diff --git "a/3105.jsonl" "b/3105.jsonl" new file mode 100644--- /dev/null +++ "b/3105.jsonl" @@ -0,0 +1,1292 @@ +{"seq_id":"40312401159","text":"from django.urls import path, include\n\nfrom .views import event_detail, event_search, location_search, EventView, CreateEventView, UpdateViewEventView, DeleteEventView, interested, attending\napp_name = 'events'\n\nurlpatterns = [\n path('search/', event_search),\n path('location/', location_search),\n path('', EventView.as_view(), name='events'),\n path('eventlisting//', EventView.as_view(), name='events_by_tags'),\n path('events//', event_detail, name='events-detail'),\n path('events//update/',\n UpdateViewEventView.as_view(), name='events-update'),\n path('events//delete/',\n DeleteEventView.as_view(), name='events-delete'),\n path('events/', CreateEventView.as_view(), name='create'),\n path('interest//', interested, name='interested'),\n path('attending//', attending, name='attending'),\n\n]\n","repo_name":"maxnze1/eventstracer","sub_path":"event/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73143075133","text":"import sys\nfrom util import Polygon, Edge, Coord\n\n_eps = 0.00001\n_huge = sys.float_info.max\n_tiny = sys.float_info.min\n\n\ndef ray_intersects(p: Coord, edge: Edge):\n a, b = edge.a, edge.b\n if a.y > b.y:\n a, b = b, a\n if p.y == a.y or p.y == b.y:\n p = Coord(p.x, p.y + _eps)\n\n intersect = False\n\n if (p.y > b.y or p.y < a.y) or (\n p.x > max(a.x, b.x)):\n return intersect\n\n if p.x < min(a.x, b.x):\n intersect = True\n else:\n if abs(a.x - b.x) > _tiny:\n m_red = (b.y - a.y) / float(b.x - a.x)\n else:\n m_red = _huge\n if abs(a.x - p.x) > _tiny:\n m_blue = (p.y - a.y) / float(p.x - a.x)\n else:\n m_blue = _huge\n intersect = m_blue >= m_red\n return intersect\n\n\ndef is_inside(p: Coord, poly: Polygon):\n intersections = filter(lambda edge: ray_intersects(p, edge), poly.edges)\n return len(list(intersections)) % 2 == 1\n","repo_name":"hamboomger/psm-2020","sub_path":"psm-3/ray_casting.py","file_name":"ray_casting.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30703449602","text":"import torch.nn.functional as F\nfrom torch import distributions\nfrom torch import nn\nimport torch\nimport numpy as np\n\n\nclass Transpose(nn.Module):\n def __init__(self, dim_1, dim_2):\n super().__init__()\n self.dim_1 = dim_1\n self.dim_2 = dim_2\n\n def forward(self, x):\n x = x.transpose(self.dim_1, self.dim_2)\n return x\n\n\nclass VariancePredictor(nn.Module):\n def __init__(self,\n enc_dim, duration_predictor_filter_sz, duration_predictor_kernel_sz, dropout=0.1):\n super(VariancePredictor, self).__init__()\n\n self.input_size = enc_dim\n self.filter_size = duration_predictor_filter_sz\n self.kernel = duration_predictor_kernel_sz\n self.conv_output_size = duration_predictor_filter_sz\n self.dropout = dropout\n\n self.conv_net = nn.Sequential(\n Transpose(-1, -2),\n nn.Conv1d(\n self.input_size, self.filter_size,\n kernel_size=self.kernel, padding=1\n ),\n Transpose(-1, -2),\n nn.LayerNorm(self.filter_size),\n nn.ReLU(),\n nn.Dropout(self.dropout),\n Transpose(-1, -2),\n nn.Conv1d(\n self.filter_size, self.filter_size,\n kernel_size=self.kernel, padding=1\n ),\n Transpose(-1, -2),\n nn.LayerNorm(self.filter_size),\n nn.ReLU(),\n nn.Dropout(self.dropout)\n )\n\n self.linear_layer = nn.Linear(self.conv_output_size, 1)\n self.relu = nn.ReLU()\n\n def forward(self, encoder_output):\n encoder_output = self.conv_net(encoder_output)\n \n out = self.linear_layer(encoder_output)\n out = self.relu(out)\n out = out.squeeze()\n if not self.training:\n out = out.unsqueeze(0)\n return out","repo_name":"aizamaksutova/FastSpeech2","sub_path":"hw_tts/variance_adaptor/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19088618579","text":"import numpy as np\nimport pandas as pd\nimport json\n\n\ndef parse_caiso_load(area, file):\n print(':: Start handling %s ...' % file)\n assert area in ['rto', 'la'], '>> WARNING: Unexpected area keyword!'\n if area == 'rto' and 'ENE_SLRS_DAM' in file:\n return _parse_caiso_load_rto(file)\n if area == 'la' and 'Demand_for_Los_Angeles' in file:\n return _parse_ca_load_la(file)\n print('>> WARNING: Dismatch area keyword & file name! Please double check!')\n\n\ndef _parse_caiso_load_rto(file):\n df = pd.read_csv(file, index_col='INTERVALSTARTTIME_GMT')\n df.index = pd.to_datetime(df.index)\n df = df.sort_index()\n df.index = df.index.tz_localize('GMT').tz_convert('America/Los_Angeles')\n dfsel = df[df['TAC_ZONE_NAME'] == 'Caiso_Totals']\n return pd.DataFrame({\n 'date': dfsel.index.date,\n 'time': dfsel.index.strftime('%H:%M'),\n 'load': dfsel['MW'],\n })\n\n\ndef _parse_ca_load_la(file, after_date=pd.datetime(2017, 1, 1)):\n with open(file) as fn:\n dict = json.load(fn)\n df = pd.DataFrame(dict['series'][0]['data'])\n df.columns = ['date_time', 'load']\n df.index = pd.to_datetime(df['date_time'])\n df.index = df.index.tz_localize('GMT').tz_convert('America/Los_Angeles')\n if after_date is not None:\n df = df.loc[df.index.date >= after_date.date()]\n return pd.DataFrame({\n 'date': df.index.date,\n 'time': df.index.strftime('%H:%M'),\n 'load': df['load'],\n })\n\n\ndef parse_miso_load(area, file):\n print(':: Start handling %s ...' % file)\n df = pd.read_excel(file, skiprows=[0, 1, 2, 3, 5], skipfooter=27)\n area_mapping = {\n 'rto': 'MISO ActualLoad (MWh)',\n 'chicago': 'LRZ4 ActualLoad (MWh)',\n 'north': ['LRZ1 ActualLoad (MWh)', 'LRZ2_7 ActualLoad (MWh)'],\n 'central': ['LRZ3_5 ActualLoad (MWh)', 'LRZ4 ActualLoad (MWh)', 'LRZ6 ActualLoad (MWh)'],\n 'south': 'LRZ8_9_10 ActualLoad (MWh)',\n }\n assert area in area_mapping.keys(), '>> WARNING: Unexpected area keyword!'\n dfsel = df[area_mapping[area]]\n if len(dfsel.shape) > 1:\n dfsel = dfsel.sum(axis=1)\n return pd.DataFrame({\n 'date': pd.to_datetime(df['Market Day']).dt.date,\n 'time': (df['HourEnding'] - 1).astype(str).str.zfill(2) + ':00',\n 'load': dfsel.values,\n })\n\n\ndef parse_isone_load(area, file):\n print(':: Start handling %s ...' % file)\n assert area in ['rto', 'boston'], '>> WARNING: Unexpected area keyword!'\n if area == 'rto' and 'rt_hourlysysload' in file:\n return _parse_isone_load_rto(file)\n if area == 'boston' and 'OI_darthrmwh_iso' in file:\n return _parse_isone_load_boston(file)\n print('>> WARNING: Dismatch area keyword & file name! Please double check!')\n\n\ndef _parse_isone_load_rto(file):\n df = pd.read_csv(file, skiprows=[0, 1, 2, 3, 5], skipfooter=1, engine='python').drop(columns='H')\n if df['Hour Ending'].dtype.kind not in 'iuf': # not number\n print(':::: Find non-numeric hour records in %s' % file)\n df = df[df['Hour Ending'].astype(str).apply(lambda x: x.replace('.', '').isnumeric())] # remove records 02X\n df['Hour Ending'] = df['Hour Ending'].astype(int)\n return pd.DataFrame({\n 'date': pd.to_datetime(df['Date']).dt.date,\n 'time': (df['Hour Ending'] - 1).astype(str).str.zfill(2) + ':00',\n 'load': df['Total Load'],\n })\n\n\ndef _parse_isone_load_boston(file):\n df = pd.read_csv(file, skiprows=[0, 1, 2, 3, 4, 6], skipfooter=1, engine='python')\n df.columns = ['H', 'Date', 'Hour Ending', 'Day Ahead', 'Real Time']\n df = df.drop(columns=['H', 'Day Ahead'])\n if df['Hour Ending'].dtype.kind not in 'iuf': # not number\n print(':::: Find non-numeric hour records in %s' % file)\n df = df[df['Hour Ending'].astype(str).apply(lambda x: x.replace('.', '').isnumeric())] # remove records 02X\n df['Hour Ending'] = df['Hour Ending'].astype(int)\n return pd.DataFrame({\n 'date': pd.to_datetime(df['Date']).dt.date,\n 'time': (df['Hour Ending'] - 1).astype(str).str.zfill(2) + ':00',\n 'load': df['Real Time'],\n }).dropna()\n\n\ndef parse_nyiso_load(area, file):\n print(':: Start handling %s ...' % file)\n df = pd.read_csv(file, index_col='Time Stamp')\n df.index = pd.to_datetime(df.index)\n assert area in ['rto', 'nyc'], '>> WARNING: Unexpected area keyword!'\n if area == 'rto':\n df.index = [df.index, df['Name']]\n df = df.loc[~df.index.duplicated()]\n dfsel = df['Integrated Load'].unstack().sum(axis=1)\n elif area == 'nyc':\n dfsel = df[df['Name'] == 'N.Y.C.'].loc[:, 'Integrated Load']\n return pd.DataFrame({\n 'date': dfsel.index.date,\n 'time': dfsel.index.strftime('%H:%M'),\n 'load': dfsel.values,\n })\n\n\ndef parse_pjm_load(area, file):\n print(':: Start handling %s ...' % file)\n df = pd.read_csv(file, index_col='datetime_beginning_ept')\n df.index = pd.to_datetime(df.index)\n area_mapping = {\n 'rto': 'RTO',\n 'phila': 'PE',\n 'chicago': 'CE',\n }\n assert area in area_mapping.keys(), '>> WARNING: Unexpected area keyword!'\n dfsel = df[df['load_area'] == area_mapping[area]].loc[:, 'mw']\n return pd.DataFrame({\n 'date': dfsel.index.date,\n 'time': dfsel.index.strftime('%H:%M'),\n 'load': dfsel.values,\n })\n\n\ndef parse_spp_load(area, file):\n print(':: Start handling %s ...' % file)\n df = pd.read_csv(file, index_col='MarketHour')\n df.index = pd.to_datetime(df.index)\n df.index = df.index.tz_localize('GMT').tz_convert('America/Chicago') - pd.Timedelta(hours=1)\n area_mapping = {\n 'rto': df.columns,\n 'kck': ' KCPL',\n 'north': [' WAUE', ' NPPD', ' OPPD', ' LES', ' INDN'],\n 'south': [c for c in df.columns if c not in [' WAUE', ' NPPD', ' OPPD', ' LES', ' INDN']],\n }\n assert area in area_mapping.keys(), '>> WARNING: Unexpected area keyword!'\n dfsel = df[area_mapping[area]]\n if len(dfsel.shape) > 1:\n dfsel = dfsel.sum(axis=1)\n return pd.DataFrame({\n 'date': dfsel.index.date,\n 'time': dfsel.index.strftime('%H:%M'),\n 'load': dfsel.values,\n })\n\n\ndef parse_ercot_load(area, file):\n print(':: Start handling %s ...' % file)\n if 'cdr.00013101' in file:\n return _parse_ercot_load_daily_record(area, file)\n elif 'Native_Load' in file:\n return _parse_ercot_load_archive(area, file)\n print('>> WARNING: Unexpected file name!')\n\n\ndef _parse_ercot_load_daily_record(area, file):\n df = pd.read_csv(file, index_col='OperDay')\n df.index = pd.to_datetime(df.index)\n df['HourEnding'] = df['HourEnding'].str.replace(\n pat=r'\\d\\d:', repl=lambda m: np.str(int(m.group(0)[0:2]) - 1).zfill(2) + ':') # avoid 24:00\n area_mapping = {\n 'rto': 'TOTAL',\n 'houston': 'COAST',\n }\n assert area in area_mapping.keys(), '>> WARNING: Unexpected area keyword!'\n return pd.DataFrame({\n 'date': df.index.date,\n 'time': df['HourEnding'],\n 'load': df[area_mapping[area]].values,\n })\n\n\ndef _parse_ercot_load_archive(area, file):\n df = pd.read_excel(file, index_col=0) # 'Hour Ending' or 'HourEnding'\n df.index = df.index.astype(str).str.replace(\n pat=r'\\d\\d:', repl=lambda m: np.str(int(m.group(0)[0:2]) - 1).zfill(2) + ':') # avoid 24:00\n df.index = df.index.astype(str).str.replace('DST', '')\n df.index = pd.to_datetime(df.index)\n area_mapping = {\n 'rto': 'ERCOT',\n 'houston': 'COAST',\n }\n return pd.DataFrame({\n 'date': df.index.date,\n 'time': df.index.strftime('%H:%M'),\n 'load': df[area_mapping[area]].values,\n })\n\n\n","repo_name":"tamu-engineering-research/COVID-EMDA","sub_path":"parser/parser_load.py","file_name":"parser_load.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"29272331882","text":"from security import Security\r\nimport pandas as pd\r\nimport datetime\r\nimport pymysql\r\nimport time\r\nimport sys\r\n\r\n\r\n\r\nd=dict()\r\ndef data_export(dataframe, dir='', file='', extension='' ,delimiter=','):\r\n file_ = dir + file + extension\r\n\r\n #default column sequence\r\n dataframe.to_csv(file_, sep=delimiter, index=True, encoding='utf-8')\r\n\r\ndef output_single_record(path,file,data):\r\n\r\n outFile = path + file\r\n df = pd.DataFrame(data)\r\n df.transpose().to_csv(outFile, sep=',',index=False, header=False, mode='a')\r\n del df\r\ndef get_data(db, ticker_id_dict, tickerLong, tickerShort):\r\n # Step 1: geting and slicing data\r\n df1 = db.get_security_day_price_with_id(ticker_id_dict[tickerLong])[['time_x', 'ADJ_CLOSE']]\r\n df1.index = df1['time_x']\r\n\r\n df2 = db.get_security_day_price_with_id(ticker_id_dict[tickerShort])[['time_x', 'ADJ_CLOSE']]\r\n df2.index = df2['time_x']\r\n # Step 2: concatenation\r\n df_price = pd.DataFrame()\r\n s1 = pd.Series(df1['ADJ_CLOSE'], index=df1.index, name=tickerLong)\r\n s2 = pd.Series(df2['ADJ_CLOSE'], index=df2.index, name=tickerShort)\r\n df_price = pd.concat([df_price, s1], axis=1)\r\n df_price = pd.concat([df_price, s2], axis=1)\r\n\r\n df_price = df_price.dropna(axis=0, how='any')\r\n print(df_price)\r\n #print(df_price)\r\n return df_price\r\n\r\n\r\n\r\n\r\n\r\n################## Get the idx of different month#################\r\n\r\ndef get_gap(data):\r\n gap = []\r\n for idx,t in enumerate(data.index):\r\n if idx == 0:\r\n gap.append(idx)\r\n prevT = str(t)[5:7]\r\n else:\r\n curT = str(t)[5:7]\r\n if curT != prevT:\r\n prevT = curT\r\n gap.append(idx)\r\n\r\n #for item in gap:\r\n # print(item)\r\n return gap\r\n\r\n\r\n\r\ndef get_gap_same_month(data,strMonth):\r\n gap = []\r\n flag = True\r\n for idx,t in enumerate(data.index):\r\n curT = str(t)[5:7]\r\n if flag == True:\r\n if curT == strMonth:\r\n flag = False\r\n gap.append(idx)\r\n else:\r\n if curT != strMonth:\r\n flag = True\r\n\r\n return gap\r\n\r\ndef get_gap_every_month(data):\r\n gap = []\r\n flag = True\r\n prevT = None\r\n for idx,t in enumerate(data.index):\r\n curT = str(t)[5:7]\r\n if prevT is None:\r\n prevT = curT\r\n gap.append(idx)\r\n continue\r\n else:\r\n if curT == prevT:\r\n continue\r\n else:\r\n prevT = curT\r\n gap.append(idx)\r\n\r\n return gap\r\n\r\n\r\ndef get_gap_day_of_month(data,strDay):\r\n gap = []\r\n startFind = False\r\n for idx,t in enumerate(data.index):\r\n if idx == 0:\r\n prevM = str(t)[5:7] #\r\n startFind = True\r\n else:\r\n curM = str(t)[5:7]\r\n curD = str(t)[8:10]\r\n if curM != prevM: #month changed\r\n startFind = True\r\n prevM = curM\r\n if curD >= strDay:\r\n startFind = False\r\n gap.append(idx)\r\n else: #in the same month\r\n if startFind == True:\r\n if curD >= strDay:\r\n startFind = False\r\n gap.append(idx)\r\n\r\n #for item in gap:\r\n # print(data.index[item])\r\n return gap\r\n\r\ndef get_gap_nth_day_of_month(data,NthDay):\r\n gap = []\r\n startFind = False\r\n for idx,t in enumerate(data.index):\r\n if idx == 0:\r\n prevM = str(t)[5:7] #\r\n startFind = True\r\n n = NthDay\r\n n -= 1\r\n if n == 0:\r\n gap.append(idx)\r\n #startFind = False\r\n else:\r\n n -= 1\r\n curM = str(t)[5:7]\r\n if curM != prevM: #month changed\r\n #vstartFind = True\r\n prevM = curM\r\n n = NthDay\r\n n -= 1\r\n if n == 0:\r\n startFind = False\r\n gap.append(idx)\r\n else: #in the same month\r\n if n == 0:\r\n # startFind = False\r\n gap.append(idx)\r\n\r\n #for item in gap:\r\n # print(data.index[item])\r\n return gap\r\n\r\n\r\n\r\n\r\n\r\ndef get_gap_day_of_week(data,weekday='MONDAY'):\r\n weekday = weekday.upper()\r\n if weekday == 'MONDAY':\r\n w = 0\r\n elif weekday == 'TUESDAY':\r\n w = 1\r\n elif weekday == 'WEDNESDAY':\r\n w = 2\r\n elif weekday == 'THURSDAY':\r\n w = 3\r\n elif weekday == 'FRIDAY':\r\n w = 4\r\n else: #default is MONDAY\r\n w = 0\r\n\r\n gap = []\r\n\r\n startFind = False\r\n for idx,t in enumerate(data.index):\r\n curY = str(t)[0:4]\r\n curM = str(t)[5:7]\r\n curD = str(t)[8:10]\r\n d1 = datetime.datetime(int(curY), int(curM), int(curD))\r\n if d1.weekday() == w:\r\n gap.append(idx)\r\n\r\n # for item in gap:\r\n # print(data.index[item])\r\n return gap\r\n\r\n\r\n################ Get the idx of each different month########\r\n\r\n######################### Get data into data frame#############\r\n # Step 1: get data\r\n tickerLong, tickerShort = tLong, tShort\r\n\r\n df_price = get_data(db, ticker_id_dict, tickerLong, tickerShort)\r\n # print(df_price)\r\n # data_export(df_price, dir=outPath, file='price.csv', delimiter=',')\r\n\r\n#######################Get data into data frame ###########\r\n\r\n\r\ndef get_data(db, ticker_id_dict, tickerLong, tickerShort):\r\n # Step 1: geting and slicing data\r\n df1 = db.get_security_day_price_with_id(ticker_id_dict[tickerLong])[['time_x', 'ADJ_CLOSE']]\r\n df1.index = df1['time_x']\r\n\r\n df2 = db.get_security_day_price_with_id(ticker_id_dict[tickerShort])[['time_x', 'ADJ_CLOSE']]\r\n df2.index = df2['time_x']\r\n # Step 2: concatenation\r\n df_price = pd.DataFrame()\r\n s1 = pd.Series(df1['ADJ_CLOSE'], index=df1.index, name=tickerLong)\r\n s2 = pd.Series(df2['ADJ_CLOSE'], index=df2.index, name=tickerShort)\r\n df_price = pd.concat([df_price, s1], axis=1)\r\n df_price = pd.concat([df_price, s2], axis=1)\r\n\r\n df_price = df_price.dropna(axis=0, how='any')\r\n print(df_price)\r\n return df_price\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef action(outPath, tLong,tShort,testMonth,db,ticker_id_dict):\r\n # config\r\n #outPath = 'D:\\\\dataset\\\\long_short\\\\single_month\\\\'\r\n outFile = 'Long_' + tLong+'.csv'\r\n\r\n mode = 2 #from mode 1 to mode 5, edit parameters in following if else block\r\n\r\n selectMonth,selectDayOfMonth, selectWeekDay, selectNthDay, moveBuy, moveSell = None, None, None, 0, 0, 0\r\n if mode == 1: #get_gap_same_month\r\n selectMonth = testMonth # from '01' to '12'\r\n moveBuy, moveSell = 10,10\r\n elif mode == 2: #get_gap_every_month\r\n #no parameter\r\n moveBuy, moveSell = 1, 1\r\n elif mode == 3: #get_gap_day_of_month\r\n selectDayOfMonth = '01'\r\n moveBuy, moveSell = 10, 10\r\n elif mode == 4: #get_gap_day_of_week\r\n selectWeekDay = 'MONDAY' # from 'MONDAY' to 'FRIDAY'\r\n moveBuy, moveSell = 1, 1\r\n elif mode == 5: #get_gap_nth_day_of_month\r\n selectNthDay = 1 #from 1 to 30\r\n moveBuy, moveSell = 10, 10\r\n else:\r\n return\r\n\r\n # NOT EDIT FOLLOWING CODING #########################################################################\r\n if selectMonth is None:\r\n selectMonth = '01'\r\n if selectDayOfMonth is None:\r\n selectDayOfMonth = '01'\r\n if selectWeekDay is None:\r\n selectWeekDay = 'MONDAY'\r\n if selectNthDay == 0:\r\n selectNthDay = 1\r\n if moveBuy == 0:\r\n moveBuy = 10\r\n if moveSell == 0:\r\n moveSell = 10\r\n\r\n\r\n # Step 1: get data\r\n tickerLong, tickerShort = tLong, tShort\r\n\r\n df_price = get_data(db, ticker_id_dict, tickerLong, tickerShort)\r\n # print(df_price)\r\n # data_export(df_price, dir=outPath, file='price.csv', delimiter=',')\r\n\r\n\r\n # Step 2: select mode\r\n # gap = get_gap(df_price)\r\n if mode == 1:\r\n gap = get_gap_same_month(df_price, selectMonth) #for special month, around the beginning of each month\r\n elif mode == 2:\r\n gap = get_gap_every_month(df_price) #for all months, around the beginning of all month\r\n elif mode == 3:\r\n gap = get_gap_day_of_month(df_price,selectDayOfMonth) #for special day\r\n elif mode == 4:\r\n gap = get_gap_day_of_week(df_price,weekday=selectWeekDay) #for special weekday\r\n elif mode == 5:\r\n gap = get_gap_nth_day_of_month(df_price,selectNthDay) #for relative day of first day of month\r\n #print(gap)\r\n\r\n if not df_price.empty and len(gap)==212:\r\n # Step 3: data validation\r\n if gap[1] > 10: #make sure the first month has at least 10 days in previous month\r\n gap = gap[1:-1]\r\n else:\r\n gap = gap[2:-1]\r\n\r\n\r\n # Step 4: do analysis with special moving time window\r\n output_single_record(outPath, outFile,\r\n ['Start_PrevMonthLastNthDay', 'End_CurMonthFirstNthDay', 'LongWeight', 'ShortWeight',\r\n 'returnOfEachTime','successRate'])\r\n # rate = [0.05 + 0.05*x for x in range(19)] # span is 0.05\r\n # rate = [0.01 + 0.01 * x for x in range(99)] # span is 0.01\r\n return_value=dict()\r\n rate = [1]\r\n print(gap)\r\n\r\n for r in rate:\r\n value = 0\r\n # successCnt = 0\r\n for idx in gap:\r\n # buyLong = df_price[tickerLong].iloc[idx-1-buyD]\r\n # sellLong = df_price[tickerLong].iloc[idx+sellD]\r\n # buyShort = df_price[tickerShort].iloc[idx-1-buyD]\r\n # sellShort = df_price[tickerShort].iloc[idx+sellD]\r\n # val = (sellLong-buyLong)/buyLong*r + (sellShort-buyShort)/buyShort*(-1.0) *(1.0-r) #short is negative\r\n if not df_price.empty:\r\n if str(df_price.index[idx])[5:7] not in return_value.keys():\r\n return_value[str(df_price.index[idx])[5:7]]=[]\r\n return_value[str(df_price.index[idx])[5:7]].append(df_price[tickerLong].iloc[idx]/df_price[tickerLong].iloc[idx-1]-1)\r\n return_value[str(df_price.index[idx])[5:7]]=sum(return_value[str(df_price.index[idx])[5:7]])/len(return_value[str(df_price.index[idx])[5:7]])\r\n print(return_value)\r\n # value /= len(gap)\r\n # #print(str(buyD+1) + ',' + str(sellD+1) + ',' + str(r) + ',' + str(1.0-r) + ',' + str(value))\r\n # record = [str(buyD+1),str(sellD+1),str(r),str(1.0-r),str(value),str(successCnt/len(gap))]\r\n d[tickerLong]=[]\r\n d[tickerLong]=return_value\r\n\r\n # output_single_record(outPath,outFile,record)\r\n\r\ndef Main():\r\n outPath = 'C:\\\\project5\\\\'\r\n\r\n conn=pymysql.connect(user='readonly',passwd='123456',host='160.79.239.235',port=3306,db='gmbp')\r\n cur=conn.cursor()\r\n cur.execute(\"Select STOCK_TICKER from index_component\")\r\n tick=list()\r\n for row in cur:\r\n # print(row[0])\r\n tick.append(row)\r\n\r\n\r\n\r\n db = Security(user='readonly', password='123456', host='160.79.239.235', port=3306, db='gmbp')\r\n ticker_id_dict = db.get_security_lookup_ticker_id()\r\n tickerList=list()\r\n for name in ticker_id_dict.keys():\r\n tickerList.append(name)\r\n print(tickerList)\r\n monthList = ['01']\r\n\r\n\r\n start = time.time()\r\n for idx1 in range(len(tickerList)):\r\n for m in monthList:\r\n action(outPath, tickerList[idx1], 'IWM', m, db, ticker_id_dict)\r\n print(d)\r\n # print(str(idx1) + ',' + str(idx2) + ',' + tickerList[idx1] + ',' + tickerList[idx2] + ',M' + m)\r\n end = time.time()\r\n db.disconnect()\r\n print('runtime: ' + str(end-start) + ' secs')\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n Main()\r\n","repo_name":"RickYuankangShong-Quantitative-Finance/Quant-python","sub_path":"prj5.py","file_name":"prj5.py","file_ext":"py","file_size_in_byte":11904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10851184978","text":"# Module 5\n# Programming Assignment 6\n# Prob-6.py\n\n# eddy\nfrom graphics import *\n\n\ndef main():\n win = GraphWin(\"shapes\", 823, 620)\n\n # lego1\n lego1 = Rectangle(Point(15, 50), Point(380, 160))\n lego1.setFill(\"blue\")\n lego1.setOutline(\"black\")\n lego1.setWidth(6)\n lego1.draw(win)\n\n \"\"\"creating the top parts im calling them jegs\"\"\"\n\n jeg = Rectangle(Point(25, 30), Point(70, 50))\n jeg.setFill(\"blue\")\n jeg.setOutline(\"black\")\n jeg.setWidth(6)\n jeg.draw(win)\n\n jeg2 = jeg.clone()\n jeg2.move(70, 0)\n jeg2.draw(win)\n\n jeg3 = jeg.clone()\n jeg3.move(140, 0)\n jeg3.draw(win)\n\n jeg4 = jeg.clone()\n jeg4.move(210, 0)\n jeg4.draw(win)\n\n jeg5 = jeg.clone()\n jeg5.move(290, 0)\n jeg5.draw(win)\n\n \"\"\"new peice green\"\"\"\n\n # lego2\n lego2 = lego1.clone()\n lego2.move(420, 0)\n lego2.setFill(\"Green\")\n lego2.draw(win)\n\n jeg11 = Rectangle(Point(450, 30), Point(495, 50))\n jeg11.setFill(\"green\")\n jeg11.setOutline(\"black\")\n jeg11.setWidth(6)\n jeg11.draw(win)\n\n jeg22 = jeg11.clone()\n jeg22.move(70, 0)\n jeg22.draw(win)\n\n jeg33 = jeg11.clone()\n jeg33.move(140, 0)\n jeg33.draw(win)\n\n jeg44 = jeg11.clone()\n jeg44.move(210, 0)\n jeg44.draw(win)\n\n jeg55 = jeg11.clone()\n jeg55.move(290, 0)\n jeg55.draw(win)\n\n # lego3\n lego3 = lego1.clone()\n lego3.move(0, 220)\n lego3.setFill(\"yellow\")\n lego3.draw(win)\n\n jegy = Rectangle(Point(25, 250), Point(70, 270))\n jegy.setFill(\"yellow\")\n jegy.setOutline(\"black\")\n jegy.setWidth(6)\n jegy.draw(win)\n\n jegy2 = jegy.clone()\n jegy2.move(70, 0)\n jegy2.draw(win)\n\n jegy3 = jegy.clone()\n jegy3.move(140, 0)\n jegy3.draw(win)\n\n jegy4 = jegy.clone()\n jegy4.move(210, 0)\n jegy4.draw(win)\n\n jegy5 = jegy.clone()\n jegy5.move(290, 0)\n jegy5.draw(win)\n\n # lego4\n lego4 = lego1.clone()\n lego4.move(420, 220)\n lego4.setFill(\"red\")\n lego4.draw(win)\n\n jegr = Rectangle(Point(450, 250), Point(495, 270))\n jegr.setFill(\"red\")\n jegr.setOutline(\"black\")\n jegr.setWidth(6)\n jegr.draw(win)\n\n jegr2 = jegr.clone()\n jegr2.move(70, 0)\n jegr2.draw(win)\n\n jegr3 = jegr.clone()\n jegr3.move(140, 0)\n jegr3.draw(win)\n\n jegr4 = jegr.clone()\n jegr4.move(210, 0)\n jegr4.draw(win)\n\n jegr5 = jegr.clone()\n jegr5.move(290, 0)\n jegr5.draw(win)\n\n # lego5\n lego4 = lego1.clone()\n lego4.move(0, 410)\n lego4.setFill(color_rgb(0, 204, 200))\n lego4.draw(win)\n\n jegl = Rectangle(Point(25, 440), Point(70, 460))\n jegl.setFill(color_rgb(0, 204, 200))\n jegl.setOutline(\"black\")\n jegl.setWidth(6)\n jegl.draw(win)\n\n jegl2 = jegl.clone()\n jegl2.move(70, 0)\n jegl2.draw(win)\n\n jegl3 = jegl.clone()\n jegl3.move(140, 0)\n jegl3.draw(win)\n\n jegl4 = jegl.clone()\n jegl4.move(210, 0)\n jegl4.draw(win)\n\n jegl5 = jegl.clone()\n jegl5.move(290, 0)\n jegl5.draw(win)\n\n # lego6\n lego4 = lego1.clone()\n lego4.move(420, 410)\n lego4.setFill(\"black\")\n lego4.draw(win)\n\n jegb = Rectangle(Point(450, 440), Point(495, 460))\n jegb.setFill(\"black\")\n jegb.setOutline(\"black\")\n jegb.setWidth(6)\n jegb.draw(win)\n\n jegb2 = jegb.clone()\n jegb2.move(70, 0)\n jegb2.draw(win)\n\n jegb3 = jegb.clone()\n jegb3.move(140, 0)\n jegb3.draw(win)\n\n jegb4 = jegb.clone()\n jegb4.move(210, 0)\n jegb4.draw(win)\n\n jegb5 = jegb.clone()\n jegb5.move(290, 0)\n jegb5.draw(win)\n\n input()\n\n\nmain()\n","repo_name":"CTEC-121-Spring-2020/mod-4-programming-assignment-edwardbramel","sub_path":"Prob-6/Prob-6.py","file_name":"Prob-6.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"da","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73082776253","text":"import requests\n\nfrom django.conf import settings\nfrom celery import shared_task\nfrom user.models import User,UserKey\nfrom .models import TradeSignals\n\nimport uuid\n\n\nfrom .trade_set import create_my_trade\n\ndef is_valid_uuid(val):\n try:\n uuid.UUID(str(val))\n return True\n except ValueError:\n return False\n\nreply_url = f\"https://api.telegram.org/bot{settings.TELEGRAM_API_TOKEN}/sendMessage\"\ndef commands_handlers(message_text,user_telegram):\n if message_text.startswith(\"/\"):\n reply = f\"you typed {message_text}\"\n if message_text.startswith(\"/start\"):\n uuid = message_text.split(' ')[-1]\n print(\"uuid is \",uuid)\n if is_valid_uuid(uuid):\n user = User.objects.filter(user_uuid=uuid,telegram_id=user_telegram).first()\n if user:\n reply = f\"Hi {user.username} you were already registered\"\n else:\n print(\"the user is\",user)\n user = User.objects.filter(user_uuid=uuid).first()\n if user:\n user.telegram_id = user_telegram\n user.save()\n reply = f\"Hi {user.username} you were registered\"\n else:\n reply = f\"invalid key please recheck \\n use /start to setup your account\" \n else:\n reply = f\"invalid key (non uuid) please recheck \\n use /start to setup your account\"\n \n \n else:\n reply = f\"Hi\"\n \n return reply\n\n\n\ndef new_message(message):\n chat_id = message[\"message\"][\"chat\"][\"id\"]\n message_text = message[\"message\"][\"text\"]\n user_telegram = message[\"message\"][\"from\"][\"id\"]\n print(message_text)\n return message_text,chat_id,user_telegram\n\n@shared_task\ndef process_telegram_message(message):\n print(message)\n my_message = message.get('edited_message',None)\n print(my_message)\n \n if not my_message:\n my_message = message.get('message',None)\n if my_message:\n reply,chat_id,user_telegram = new_message(message)\n if reply.startswith(\"/\"):\n reply = commands_handlers(reply,user_telegram)\n else:\n my_message = message.get('callback_query',None)\n if my_message:\n\n reply,chat_id,user_telegram = new_message(my_message)\n click_data = my_message[\"data\"]\n print(reply,chat_id,user_telegram)\n signal= TradeSignals.objects.get(id=click_data)\n user = User.objects.filter(telegram_id=chat_id).first()\n print(user)\n userkey = UserKey.objects.filter(user=user).first()\n print(userkey)\n trade_data = create_my_trade(signal,user,userkey)\n print(signal)\n print(\"the data is\",click_data)\n reply = f\"your trade has been placed we will let you know once it is executed\"\n data = {\"chat_id\": chat_id, \"text\": reply}\n\n requests.post(reply_url, data=data)\n\n\n\n","repo_name":"saugattimsina/projx","sub_path":"signalbot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26145623826","text":"import numpy as np\nfrom gym_factory.objects import Resource\n\nfrom collections import deque\n\nclass Model:\n\n def __init__(self, name=None, description=None):\n if name is not None:\n self.name = name\n if description is not None:\n self.description = description\n\n self.objects = deque()\n self.compiled_model = False\n\n def add(self, resource: Resource):\n self.objects.append(resource)\n if len(self.objects) > 1:\n self.objects[-1].input = self.objects[-2]","repo_name":"jairotunior/gym_production","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34085860846","text":"# 216. Combination Sum III\n# https://leetcode.com/problems/combination-sum-iii/\n#\nfrom typing import List\n\nclass Solution:\n def combinationSum3(self, k: int, n: int) -> List[List[int]]:\n res = []\n combSumRecursive(1, n, k, [], res)\n return res\n\n\ndef combSumRecursive(lowest, target, tries, currRes, res):\n if target < 0:\n return\n\n if target == 0 and tries == 0:\n res.append(currRes)\n\n for i in range(lowest, 10):\n if i <= target:\n cRes = currRes.copy()\n cRes.append(i)\n combSumRecursive(i + 1, target - i, tries - 1, cRes, res)\n","repo_name":"virup/leetcode","sub_path":"patterns/05_combinations/problems/216_Combination_Sum_III.py","file_name":"216_Combination_Sum_III.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"8423981774","text":"import tensorflow as tf\r\nfrom model import u_net\r\nfrom util import *\r\nfrom config import *\r\nfrom keras.models import model_from_json\r\n\r\ndef train(path, target):\r\n\tdiff = 2\r\n\tfor index in range(0, 100, diff):\r\n\t\tprint(\"Spectrograms are loading..\") \r\n\t\tx_list, y_list = load_spectrogram(path, target,index, index+diff)\r\n\t\tprint(\"Magnitudes of x are calculating..\")\r\n\t\tx_mag,x_phase = magphase_list(x_list)\r\n\t\tprint(\"Magnitudes of y are calculating..\")\r\n\t\ty_mag,_ = magphase_list(y_list)\r\n\t\twith tf.device('/device:GPU:0'):\r\n\t\t\tif index == 0:\t\r\n\t\t\t\tmodel = u_net()\r\n\t\t\telse:\r\n\t\t\t#load json and create model\r\n\t\t\t\tfile = open('./models/pretrain-{:0>2d}.json'.format((index//diff) -1), 'r')\r\n\t\t\t\tmodel_json = file.read()\r\n\t\t\t\tfile.close()\r\n\t\t\t\tmodel = model_from_json(model_json)\r\n\t\t\t\t# load weights\r\n\t\t\t\tmodel.load_weights('./models/pretrain-{:0>2d}.h5'.format((index//diff) -1))\r\n\t\t\t#model.summary()\r\n\t\t\tmodel.compile(optimizer='adam', loss=\"mean_squared_error\", metrics=['accuracy'])\r\n\t\t\tfor e in range(EPOCH):\r\n\t\t\t\t# Random sampling for training\r\n\t\t\t\tx,y = sampling(x_mag,y_mag)\r\n\t\t\t\tmodel.fit(x, y, batch_size=BATCH, verbose=1, validation_split=0.01)\r\n\t\t\t\tjson_file = model.to_json()\r\n\t\t\t\twith open('./models/pretrain-{:0>2d}.json'.format(index//diff), \"w+\") as file:\r\n\t\t\t\t\tfile.write(json_file)\r\n\t\t\t\t# serialize weights to HDF5\r\n\t\t\t\tmodel.save_weights('./models/pretrain-{:0>2d}.h5'.format(index//diff))\r\nif __name__ == '__main__':\r\n\ttrain(\"./spectrogram\", \"vocal\")\r\n\t \r\n\tprint(\"Training Complete!\")\r\n","repo_name":"voiceolation/voiceolation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"42334853455","text":"import os\nimport sys\n\nfrom PySide6.QtCore import Qt, QSize\nfrom PySide6.QtGui import QAction\nfrom PySide6.QtWidgets import (QApplication, QLineEdit, QGridLayout,\n QMainWindow, QSizePolicy, QWidget, QVBoxLayout, QPushButton,\n QStackedLayout, QStatusBar, QLabel)\n\nERROR_MSG = \"NOT CORRECT\"\n\ndirectory = os.path.dirname(__file__)\nsave_res = os.path.join(directory, \"res.txt\")\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"Calculadora\")\n\n # Fondo de la calculadora general\n self.setStyleSheet(\"background-color: none\")\n\n # Creación del menu principal que mostrara el programa\n self.widget = QWidget()\n self.setCentralWidget(self.widget)\n\n # Menu 1 normal\n self.calcPrincipal = QWidget()\n self.layoutPrincipal = QVBoxLayout(self.calcPrincipal)\n self.calcPrincipal.setLayout(self.layoutPrincipal)\n\n # Menu 2 cientifica\n self.calcCientifica = QWidget()\n self.layoutCientifica = QVBoxLayout(self.calcCientifica)\n self.calcCientifica.setLayout(self.layoutCientifica)\n\n # Con el StackedLayout podremos acceder a los dos tipos de calculadoras\n self.stackedLayout = QStackedLayout(self.widget)\n self.stackedLayout.addWidget(self.calcPrincipal)\n self.stackedLayout.addWidget(self.calcCientifica)\n\n self.stackedLayout.setCurrentWidget(self.calcPrincipal)\n\n # QAction para cerrar aplicacion\n buttonExit = QAction(\"Quit\", self)\n buttonExit.setShortcut('Ctrl+q')\n buttonExit.setStatusTip(\"Salir\")\n buttonExit.triggered.connect(self.quitApp)\n\n # QAction para cambiar a la calculadora Principal\n buttonPrincipal = QAction(\"&Normal\", self)\n buttonPrincipal.setShortcut('Ctrl+p')\n buttonPrincipal.setStatusTip(\"Calculadora Normal\")\n buttonPrincipal.triggered.connect(self.modoPrincipal)\n\n # QAction para cambiar a la calculadora Cientifica\n buttonCientifica = QAction(\"&Cientifica\", self)\n buttonCientifica.setShortcut('Ctrl+c')\n buttonCientifica.setStatusTip(\"Calculadora Cientifica\")\n buttonCientifica.triggered.connect(self.modoCientifico)\n\n # Creacion del cuadro donde saldran las operaciones\n self.operacion = QLineEdit()\n self.operacion.setReadOnly(True)\n self.operacion.setFixedHeight(75)\n self.operacion.setStyleSheet(\"font: 15px; background-color: #707B7C; color: white\")\n self.layoutPrincipal.addWidget(self.operacion)\n\n #Status Bar\n self.setStatusBar((QStatusBar(self)))\n self.cambiarCalc = QLabel(\"Calculadora Normal\")\n self.cambiarCalc.setScaledContents(True)\n self.statusBar().addPermanentWidget(self.cambiarCalc)\n self.statusBar().addPermanentWidget(QLabel(\"|\"))\n\n # SubMenu donde se añadira el QAction para cambiar entre calculadoras\n self.menu = self.menuBar()\n submenu = self.menu.addMenu(\"&Menu\")\n submenu.addAction(buttonPrincipal)\n submenu.addAction(buttonCientifica)\n\n # Y aqui estan los botones que saldran en el submenu\n categoriaSubmenu = submenu.addMenu(\"&Submenu\")\n categoriaSubmenu.addAction(buttonPrincipal)\n categoriaSubmenu.addAction(buttonCientifica)\n categoriaSubmenu.addAction(buttonExit)\n\n # Guardar las acciones de los botones pulsados\n self.button_press = \"\"\n # Lista de buttons\n self.caractNormal = []\n # Parentesis de la calculadora\n self.parentesis_check = True\n # Layout para buttons\n button_layout = QGridLayout()\n\n # Apartado de la calculadora principal\n self.ventanaPrincipal = QLineEdit()\n self.ventanaPrincipal.setAlignment(Qt.AlignLeft)\n self.ventanaPrincipal.setFixedHeight(75)\n self.ventanaPrincipal.setReadOnly(True)\n self.layoutPrincipal.addWidget(self.ventanaPrincipal)\n\n # Listado de los caracteres que saldran en la calculadora\n caractNormal = {\n '%': (0, 0, 1, 1), '/': (0, 1, 1, 1), 'x': (0, 2, 1, 1), '<-': (0, 3, 2, 1), 'C': (0, 4, 1, 1),\n 'e': (1, 0, 1, 1), 'ln': (1, 1, 1, 1), 'n!': (1, 2, 1, 1), '-': (1, 3, 1, 1),\n '7': (1, 0, 1, 1), '8': (1, 1, 1, 1), '9': (2, 2, 1, 1), '+': (2, 3, 2, 1), 'AC': (1, 4, 1, 1),\n '4': (2, 0, 1, 1), '5': (2, 1, 1, 1), '6': (3, 2, 1, 1), '(': (2, 4, 1, 1),\n '1': (3, 0, 1, 1), '2': (3, 1, 1, 1), '3': (4, 2, 1, 1), ')': (3, 4, 1, 1),\n '0': (5, 0, 1, 2), ',': (5, 2, 1, 1), '=': (5, 3, 1, 2)\n }\n\n # Recorremos con un for las posiciones de caracteres introducidos\n for key in caractNormal.keys():\n button = QPushButton(key)\n self.caractNormal.append(button)\n button.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n button.setStyleSheet(\"background-color: #707B7C; color: white;\")\n button_layout.addWidget(button, caractNormal[key][0], caractNormal[key][1],\n caractNormal[key][2], caractNormal[key][3])\n\n # Seleccionamos los botones que iran en la operacion\n button.clicked.connect(self.op)\n\n # Apartado de calculadora cientifica\n self.ventanaCientifica = QLineEdit()\n self.ventanaCientifica.setAlignment(Qt.AlignLeft)\n self.ventanaCientifica.setFixedHeight(75)\n self.ventanaCientifica.setReadOnly(True)\n self.layoutCientifica.addWidget(self.ventanaCientifica)\n\n self.caractCientifica = []\n\n caractCientifica = {\n '√': (0, 0, 1, 1), 'π': (0, 1, 1, 1), '**': (0, 2, 1, 1), 'log': (0, 3, 2, 1), 'C': (0, 4, 1, 1),\n '/': (1, 0, 1, 1), 'ln': (1, 1, 1, 1), 'n!': (1, 2, 1, 1), 'e': (1, 3, 1, 1),\n '7': (2, 0, 1, 1), '8': (2, 1, 1, 1), '9': (2, 2, 1, 1), '+': (2, 3, 2, 1), 'AC': (1, 4, 1, 1),\n '4': (3, 0, 1, 1), '5': (3, 1, 1, 1), '6': (3, 2, 1, 1), '(': (2, 4, 1, 1),\n '1': (4, 0, 1, 1), '2': (4, 1, 1, 1), '3': (4, 2, 1, 1), ')': (3, 4, 1, 1),\n '0': (5, 0, 1, 2), ',': (5, 2, 1, 1), '=': (5, 3, 1, 2)\n }\n\n for key in caractCientifica.keys():\n button = QPushButton(key)\n self.caractCientifica.append(button)\n button.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n button.setStyleSheet(\"background-color: #707B7C; color: white;\")\n button_layout.addWidget(button, caractCientifica[key][0], caractCientifica[key][1],\n caractCientifica[key][2], caractCientifica[key][3])\n\n # Seleccionamos los botones que iran en la operacion\n button.clicked.connect(self.op)\n\n # Evitar fallos de diseño ya que los botones no son responsive, se arreglara mas adelante\n # Preguntar por restriccion de errores\n # Añadimos el layout con el resultado de la respuesta\n self.layoutPrincipal.addLayout(button_layout)\n self.setFixedSize(360, 360)\n\n # Funcion donde se encontraran todas las operaciones de la calculadora\n def op(self):\n if self.sender().text() == \"=\":\n # Cuando el texto de QLine este vacio este no mostrara nada\n self.actText(str(eval(self.button_press)))\n elif self.sender().text() == \"C\":\n self.delText()\n elif self.sender().text() == \"AC\":\n self.delText()\n elif self.sender().text() == \"x\":\n self.button_press += \"*\"\n self.actText(self.button_press)\n elif self.sender().text() == \"()\":\n if self.parentesis_check:\n self.button_press += \"(\"\n self.parentesis_check = False\n self.actText(self.button_press)\n elif not self.parentesis_check:\n self.button_press += \")\"\n self.parentesis_check = True\n self.actText(self.button_press)\n else:\n self.button_press += self.sender().text()\n # Actualización del texto al pulsar el \"=\"\n self.actText(self.button_press)\n\n # Actualizar el texto\n def actText(self, text):\n self.operacion.setText(text)\n self.operacion.setFocus()\n\n # Borra el Texto que se muestra en la pantalla\n def delText(self):\n self.actText(\"\")\n self.save = \"\"\n\n def evaluate(self, operation):\n try:\n res = str(eval(operation))\n except Exception:\n res = \"NOT CORRECT\"\n return res\n\n def quitApp(self):\n self.close()\n\n def changeWindow(self):\n self.hide()\n if self._ventana2 is None:\n self._ventana2 = MainWindow(self)\n self._ventana2.show()\n\n # Cambia al modo basico\n def modoPrincipal(self):\n self.stackedLayout.setCurrentWidget(self.calcPrincipal)\n self.cambiarCalc.setText(\"Calculadora Principal\")\n\n # Cambia a modo cientifico\n def modoCientifico(self):\n self.stackedLayout.setCurrentWidget(self.calcCientifica)\n self.cambiarCalc.setText(\"Calculadora Cientifica\")\n\n\napp = QApplication(sys.argv)\nwindow = MainWindow()\nwindow.show()\napp.exec()\n","repo_name":"GeorgiGB/GeorgiGeorgiev-DI2122","sub_path":"Calculadora/Calculadora.py","file_name":"Calculadora.py","file_ext":"py","file_size_in_byte":9300,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34734245003","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 20 21:28:05 2017\n\n@author: Enrique Alejandro\n\nDescription: this library contains functions for postprocessing results of AFM simulations\n\"\"\"\nfrom __future__ import division, print_function, absolute_import, unicode_literals\nimport numpy as np\n\n\ndef e_diss(defl, f_ts, dt, fo1):\n \"\"\"This function calculates the tip-sample dissipation per oscillating period. \n \n Parameters\n ----------\n defl : numpy.ndarray\n tip deflection\n f_ts : numpy.ndarray\n tip-sample interacting force\n dt : float\n simulation timestep\n fo1 : float\n eigenmode resonance frequency\n \n Returns\n ------- \n energy_diss/number_of_periods : float\n total dissipated energy per oscillating period \n \"\"\"\n energy_diss = 0.0\n for i in range(1, len(defl) - 1):\n # based on integral of f_ts*dz/dt*dt, dz/dt=(defl[i+1]-defl[i-1])/(2.0*dt) Central difference approx\n energy_diss -= f_ts[i] * (defl[i + 1] - defl[i - 1]) / 2.0\n total_time = dt*len(defl)\n period1 = 1.0 / fo1\n number_of_periods = total_time/period1 \n return energy_diss/number_of_periods\n\n\ndef v_ts(defl, f_ts, dt):\n \"\"\"This function calculates the virial of the interaction.\n \n A more detailed description of this quantity is given in: \n San Paulo, Alvaro, and Ricardo García. Phys Rev B 64.19 (2001): 193411.\n \n Parameters\n ----------\n defl : numpy.ndarray\n tip deflection\n f_ts : numpy.ndarray\n tip-sample interacting force\n dt : float\n simulation timestep\n \n Returns\n ------- \n virial_tip_samp/(dt*len(defl)) : float\n virial of the tip-sample interaction\n \"\"\"\n virial_tip_samp = 0.0\n for i in range(len(defl)):\n virial_tip_samp = virial_tip_samp + f_ts[i] * defl[i] * dt\n\n # virial is 1/T*S(f_ts*defl*dt) from 0 to T, being T total experimental time\n return virial_tip_samp/(dt * len(defl))\n\n\ndef av_dt(array):\n \"\"\"this function returns the average of the time steps in a time array.\n \n Parameters\n ----------\n array : numpy.ndarray\n generally unequally spaced time-array\n \n Returns\n ------- \n dt: float\n averaged timestep of the unequally space time array\n \"\"\"\n k = 0.0\n dt = 0\n for ind in range(np.size(array)-1):\n k = k + (array[ind+1]-array[ind])\n dt = k/(np.size(array)-1)\n return dt\n\n\ndef amp_phase(time_vec, f_t, freq):\n \"\"\"this function calculates amplitude and phase using the in-phase and in-quadrature integrals for a given frequency\n \n Parameters\n ----------\n time_vec : numpy.ndarray\n time array of the simulation\n f_t: numpy.ndarray\n signal in time whose amplitude and phase at certain frequency is extracted\n freq : float\n distinct frequency at which the amplitude and phase will be calculated\n \n Returns\n ------- \n amp : float\n amplitude of the signal related to freq\n Phase : float\n Phase pf the signal related to freq\n \"\"\" \n if time_vec[0] > 0.0:\n time_vec -= time_vec[0]\n dt = av_dt(time_vec)\n i_val = 0.0\n k_val = 0.0\n for ind in range(np.size(f_t)):\n i_val = i_val + f_t[ind] * np.cos(2.0 * np.pi * freq * time_vec[ind]) * dt\n k_val = k_val + f_t[ind] * np.sin(2.0 * np.pi * freq * time_vec[ind]) * dt\n amp = 1.0 / (time_vec[np.size(time_vec) - 1]) * np.sqrt(i_val ** 2 + k_val ** 2) * 2.0\n phase = np.arctan(k_val/i_val)*180.0/np.pi\n if phase < 0.0:\n phase = phase + 180.0\n return amp, phase\n\n\ndef e_diss_obs(stiffness, qual_fac, amp_free, amp_ts, phase):\n \"\"\"Dissipated energy calculated from the dynamic AFM observables. \n \n Equation details can be seen in: J Tamayo, R Garcı́a Applied Physics Letters 73 (20), 2926-2928\n \n Parameters\n ----------\n stiffness : float\n eigenmode's stiffness\n qual_fac : float\n eigenmode's quality factor\n amp_free : float\n free oscillating amplitude (oscillating amplitude in the absence of tip-sample interaction)\n amp_ts : float\n tapping amplitude (oscillating amplitude in the presence of tip-sample interaction)\n phase : float\n phase\n \n Returns\n ------- \n e_diss: float\n dissipated energy per oscillating period (calculated from AFM observables)\n \"\"\"\n e_diss = (np.pi * stiffness * amp_ts ** 2 / qual_fac) * ((amp_free / amp_ts) * np.sin(phase * np.pi / 180.0) - 1.0)\n return e_diss\n\n\ndef virial_obs(stiffness, qual_fac, amp_free, amp_ts, phase):\n \"\"\"Virial of the interaction calculated from the dynamic AFM observables.\n \n Details of the equation in: San Paulo, Alvaro, and Ricardo García. Phys Rev B 64.19 (2001): 193411.\n \n Parameters\n ----------\n stiffness : float\n eigenmode's stiffness\n qual_fac : float\n eigenmode's quality factor\n amp_free : float\n free oscillating amplitude (oscillating amplitude in the absence of tip-sample interaction)\n amp_ts : float\n tapping amplitude (oscillating amplitude in the presence of tip-sample interaction)\n phase : float\n phase\n \n Returns\n ------- \n v_ts: float\n virial of the interaction (calculated from AFM observables)\n \"\"\"\n v_ts = -(stiffness * amp_ts * amp_free) / (2.0 * qual_fac) * np.cos(phase * np.pi / 180.0)\n return v_ts\n\n\ndef derivative_cd(f_t, time_vec):\n \"\"\"this function calculates the derivative of a given array using central difference scheme.\n \n Parameters\n ----------\n f_t: numpy.ndarray\n function trace whose 1st derivative is to be numerically calculated using the central difference scheme\n time_vec : numpy.ndarray\n time trace\n \n Returns\n ------- \n f_prime : numpy.ndarray\n first derivative of the f_t trace\n \"\"\"\n f_prime = np.zeros(np.size(f_t))\n for i in range(np.size(f_t)): # calculation of derivative using central difference scheme\n if i == 0:\n f_prime[i] = (f_t[1]-f_t[0])/(time_vec[1] - time_vec[0])\n else:\n if i == np.size(f_t)-1:\n f_prime[np.size(f_t)-1] = (f_t[np.size(f_t)-1]-f_t[np.size(f_t)-2]) / \\\n (time_vec[np.size(f_t) - 1] - time_vec[np.size(f_t) - 2])\n else:\n f_prime[i] = (f_t[i+1]-f_t[i-1])/(time_vec[i + 1] - time_vec[i - 1])\n return f_prime\n\n\ndef sparse(x, t, tr=0.1, st=1.0):\n \"\"\"this function sparses an array adjusting the time resolution and total time\n \n Parameters\n ----------\n x : numpy.ndarray\n function trace which has to be sparsed to have desired resolution and lenght \n t : numpy.ndarray\n original time trace that will be sparsed\n tr : float, optional\n time resolution desired (inverse of sampling frequency)\n st : float, optional\n desired simulation time, this has to be lower or equal than t[len(t)-1]\n \n Returns\n ------- \n np.array(x_sparse) : numpy.ndarray\n function trace sparsed to coincide with the desired time resolution and lenght\n np.array(t_sparse) : numpy.ndarray\n new time trace with the desired time resolution and total time\n \"\"\"\n nt = len(t)\n prints = 0\n i =0\n x_sparse = []\n t_sparse = []\n while i < (nt):\n if t[i] >= prints*tr and t[i]<=(st+tr) :\n x_sparse.append(x[i])\n t_sparse.append(t[i])\n prints = prints + 1\n i += 1\n return np.array(x_sparse), np.array(t_sparse)\n\n\ndef log_tw(de0, maxi, nn=10):\n \"\"\"this function generates a frequency or time array weighted in logarithmic scale\n \n Parameters\n ----------\n de0 : float\n minimum value of the function \n maxi : float\n maximum value of the function\n nn : int, optional\n number of point per decade of logarithmic scale\n \n Returns\n ------- \n np.array(epsilon) : numpy.ndarray\n function trace weighted in logarithmic scale\n \"\"\" \n epsilon = []\n w = de0\n de = de0\n prints = 1\n epsilon.append(de0)\n while w < maxi:\n w += de\n if w < maxi:\n epsilon.append(w) \n prints += 1 \n if prints == nn:\n de = de*10\n prints = 1 \n return np.array(epsilon)\n\ndef log_scale(x, t, tr=0.1, st=1.0, nn = 10):\n \"\"\"this function receives an array and sparses it weighting it in logarithmic scale\n \n warning : this function eliminates points and only takes into account certain points to have an array equally spaced in logarithmic scale\n \n Parameters\n ----------\n x : numpy.ndarray\n function trace which has to be sparsed to have desired resolution and lenght \n t : numpy.ndarray\n original time trace that will be sparsed\n tr : float, optional\n minimum time resolution, note that this is not constant because time array will be equally spaced in logarithmic scale\n st : float, optional\n desired simulation time, this has to be lower or equal than t[len(t)-1]\n nn : int, optional\n \n Returns\n ------- \n np.array(x_log) : numpy.ndarray\n function trace weighted in logarithmic scale\n np.array(t_log) : numpy. nd array\n time traced equally spaced in logarithmic scale \n \"\"\" \n prints = 1\n nt = len(x)\n i =0\n x_log = []\n t_log = []\n while i = prints*tr and t[i]<=st :\n x_log.append(x[i])\n t_log.append(t[i])\n prints += 1\n i += 1\n if prints == nn:\n tr = tr*10\n prints = 1\n return np.array(x_log),np.array(t_log)","repo_name":"TommasoCostanzo/pycroscopy","sub_path":"pycroscopy/simulation/afm_calculations.py","file_name":"afm_calculations.py","file_ext":"py","file_size_in_byte":9727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"35285122162","text":"import tensorflow as tf\nimport numpy as np\n\nclass Model(object):\n def __init__(self, args):\n\n self.num_hidden_units = args.num_hidden_units\n self.max_hash_bin = args.max_hash_bin\n self.num_of_labels = args.num_of_labels\n self.hashlookup = tf.Variable(tf.random_uniform(\n [self.max_hash_bin, self.num_hidden_units]),\n name = \"hash_lookup_table\"\n )\n\n def forward(self, ngrams, batch_labels):\n ngram_hash = tf.string_to_hash_bucket_fast(ngrams, \n num_buckets = self.max_hash_bin)\n hash_embed = tf.nn.embedding_lookup(self.hashlookup, ngram_hash)\n hash_mean = tf.reshape(tf.reduce_sum(hash_embed, axis=0), [1,self.num_hidden_units])\n self.logits = tf.layers.dense(hash_mean, self.num_of_labels,\n name = \"hidden2output\"\n )\n one_hot_labels = tf.one_hot(batch_labels-1, self.num_of_labels)\n self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(\n labels = one_hot_labels, logits = self.logits)\n self.preds = tf.argmax(self.logits, axis=-1)\n \n\n return self.logits, self.loss, self.preds\n","repo_name":"doxxitxxyoung/FastText","sub_path":"modelhv.py","file_name":"modelhv.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28803505159","text":"#!/usr/bin/env python3\n\"\"\"Tulip REPL\n\nA simple program that interfaces with the lexer to make an interactive Command Line Interface.\"\"\"\n\ndef main():\n \"\"\"The actual REPL program. If run, shows a prompt to get input and then parses it.\"\"\"\n\n # Import useful modules\n import lexer\n import parser\n\n try:\n while True:\n input_ = input('repl> ')\n parser.complete_analysis(input_)\n except KeyboardInterrupt:\n print('\\nREPL finished.')\n except EOFError:\n print('\\nREPL finished.')\n\nif __name__ == '__main__':\n # Start the REPL if running it as a file.\n main()\n","repo_name":"euroboros/parsing-engine","sub_path":"repl.py","file_name":"repl.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29145436505","text":"import re\r\nimport numpy as np\r\nimport pandas as pd\r\nimport psycopg2\r\nimport typing\r\nfrom shapely.geometry import LineString, Point\r\nfrom shapely import wkb\r\nimport geopandas as gpd\r\nfrom shapely.wkt import loads\r\nfrom map_content.utils import utils\r\n# from map_content.utils.openmap import get_alphabetic_hnr_df, get_numeric_hnr_df\r\n\r\n\r\n# Function to calculate the center point of a LineString\r\ndef calculate_center_point(geometry):\r\n line = LineString(geometry)\r\n return line.centroid\r\n\r\ndef parse_hnr_tags(df: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Parses the OSM tags to extract house number range info from way\r\n\r\n :param df: DataFrame from OSM result\r\n :type df: pd.DataFrame\r\n :return: DataFrame with added columns for hnr components\r\n :rtype: pd.DataFrame\r\n \"\"\"\r\n\r\n # Create copy of\r\n df_copy = df.copy()\r\n\r\n # Split tags into list for both end point nodes and network\r\n df_copy[\"tags_list\"] = df_copy.tags.str.split('\",')\r\n df_copy[\"tags_list\"] = df_copy.tags_list.apply(\r\n lambda x: [tag.split(\"=>\") for tag in x]\r\n )\r\n\r\n df_copy[\"tags_network_list\"] = df_copy.tags_network.str.split('\",')\r\n df_copy[\"tags_network_list\"] = df_copy.tags_network_list.apply(\r\n lambda x: [tag.split(\"=>\") for tag in x]\r\n )\r\n\r\n # Parse components\r\n df_copy[\"constant\"] = df_copy[\"tags_network_list\"].apply(\r\n lambda x: [tag[1].replace('\"', \"\").strip() for tag in x if \"constant\" in tag[0]]\r\n )\r\n df_copy[\"constant\"] = df_copy[\"constant\"].apply(lambda x: x[0] if x else None)\r\n\r\n df_copy[\"interpolation_value\"] = df_copy[\"tags_network_list\"].apply(\r\n lambda x: [\r\n tag[1].replace('\"', \"\").strip() for tag in x if \"interpolation\" in tag[0]\r\n ]\r\n )\r\n df_copy[\"interpolation_value\"] = df_copy[\"interpolation_value\"].apply(\r\n lambda x: x if x else None\r\n )\r\n\r\n # Replace interpolation\r\n df_copy[\"interpolation\"] = df_copy[\"interpolation\"].fillna(\r\n df_copy[\"interpolation_value\"]\r\n )\r\n\r\n df_copy[\"intermediates\"] = df_copy[\"tags_network_list\"].apply(\r\n lambda x: [\r\n tag[1].replace('\"', \"\").strip() for tag in x if \"intermediate\" in tag[0]\r\n ]\r\n )\r\n df_copy[\"intermediates\"] = df_copy.intermediates.apply(\r\n lambda x: [hsn for value in x for hsn in value.split(\",\")] if x else None\r\n )\r\n\r\n df_copy[\"street\"] = df_copy.tags_list.apply(\r\n lambda x: [tag[1].replace('\"', \"\").strip() for tag in x if \"street\" in tag[0]]\r\n )\r\n df_copy[\"street\"] = df_copy.street.apply(lambda x: x if x else None)\r\n\r\n return df_copy.drop(columns=[\"tags_list\"])\r\n\r\ndef get_numeric_house_number_column(x: pd.Series) -> typing.List[str]:\r\n \"\"\"Extracts the numeric component of a house number\r\n\r\n :param x: column containing house numbers\r\n :type x: pd.Series\r\n :return: list containing the parsed housenumbers\r\n :rtype: typing.List[str]\r\n \"\"\"\r\n numeric_component = x.str.extract(\"(\\d+)[^\\d]*(\\d+)?\", expand=False).fillna(\"\")\r\n return [\" \".join(j).strip() for j in numeric_component.values.tolist()]\r\n\r\ndef preprocess_hnr_hsn(hnr_df: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Preprocesses the house numbers for a set of address ranges\r\n responses\r\n\r\n :param hnr_df: DataFrame, result of openmap_hnr_lookup\r\n :type hnr_df: pd.DataFrame\r\n :return: DataFrame with processed house numbers\r\n :rtype: pd.DataFrame\r\n \"\"\"\r\n\r\n country_hnr_df_lookup = hnr_df.copy()\r\n\r\n # Split into list and get smallest and biggest HSN in the range\r\n country_hnr_df_lookup[\"min_hsn_numeric\"] = get_numeric_house_number_column(\r\n country_hnr_df_lookup[\"min_hsn\"]\r\n )\r\n\r\n country_hnr_df_lookup[\"max_hsn_numeric\"] = get_numeric_house_number_column(\r\n country_hnr_df_lookup[\"max_hsn\"]\r\n )\r\n\r\n return country_hnr_df_lookup\r\n\r\ndef compute_alphabetic_hnr(x: pd.Series) -> typing.List[str]:\r\n \"\"\"Generates a list of alphabetic address range in the Orbis ecosystem\r\n\r\n :param x: Series where the interpolation value is 'alphabetic'\r\n :type x: pd.Series\r\n :return: list containing alphabetic housenumber ranges\r\n :rtype: typing.List[str]\r\n \"\"\"\r\n\r\n # If endpoints are equal, return list with one value\r\n if x[\"min_hsn\"] == x[\"max_hsn\"]:\r\n return [x[\"min_hsn\"]]\r\n\r\n # If cannot convert to characters, return endpoints\r\n if (not isinstance(x[\"first_char\"], str)) or (not isinstance(x[\"last_char\"], str)):\r\n return [x[\"min_hsn\"], x[\"max_hsn\"]]\r\n\r\n # Iterate over chars in order\r\n variable_part = [\r\n chr(i)\r\n for i in range(\r\n min([ord(x[\"first_char\"].lower()), ord(x[\"last_char\"].lower())]),\r\n max([ord(x[\"first_char\"].lower()), ord(x[\"last_char\"].lower())]) + 1,\r\n )\r\n ]\r\n\r\n # Add constant value\r\n hnr_array = [x[\"min_hsn_numeric\"] + char for char in variable_part]\r\n\r\n return hnr_array\r\n\r\ndef numeric_mixed_array(x: pd.Series) -> typing.List[str]:\r\n \"\"\"Parses numeric mixed array according to different posibilities\r\n\r\n :param x: Series containing constant value, and parseable numeric mixed arrays\r\n :type x: pd.Series\r\n :return: list containing the parsed address range\r\n :rtype: typing.List[str]\r\n \"\"\"\r\n\r\n # No constant value, hence iteration over endpoints\r\n if x[\"constant\"] == \"\":\r\n return x[\"hnr_array\"]\r\n\r\n # Constant value separated by '-'\r\n if \"-\" in x[\"constant\"]:\r\n return [x[\"constant\"] + hnr for hnr in x[\"hnr_array\"]]\r\n\r\n # Numeric constant value that it's possibly not separated by a dash\r\n if len(x[\"constant\"]) >= 2 and x[\"min_hsn_variable\"] == \"\":\r\n min_hsn_variable = 1\r\n max_hsn_variable = int(re.search(\"([0-9]+)\", x[\"max_hsn_variable\"]).group())\r\n variable_array = list(range(min_hsn_variable, max_hsn_variable + 1, 1))\r\n hnr_mixed_array = [x[\"min_hsn\"]] + [\r\n x[\"constant\"] + \"-\" + str(hnr) for hnr in variable_array\r\n ]\r\n return hnr_mixed_array\r\n\r\n # Last case, take endpoints and add integers between them\r\n min_hsn_variable = int(re.search(\"([0-9]+)\", x[\"min_hsn\"]).group())\r\n max_hsn_variable = int(re.search(\"([0-9]+)\", x[\"max_hsn\"]).group())\r\n variable_array = list(range(min_hsn_variable + 1, max_hsn_variable + 1, 1))\r\n hnr_mixed_array = [x[\"min_hsn\"]] + variable_array + [x[\"max_hsn\"]]\r\n\r\n return hnr_mixed_array\r\n\r\ndef get_alphabetic_hnr_df(hnr_df: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Produces the housenumber array for an alphabetic address range\r\n\r\n :param hnr_df: DataFrame, result of openmap_hnr_lookup, after preprocessed\r\n :type hnr_df: pd.DataFrame\r\n :return: DataFrame containing alphabetic variance housenumber ranges\r\n :rtype: pd.DataFrame\r\n \"\"\"\r\n country_alpha_hnr_df = hnr_df.copy()\r\n\r\n # Return same input if DataFrame is empty\r\n if country_alpha_hnr_df.shape[0] == 0:\r\n return pd.DataFrame(columns=hnr_df.columns.tolist() + [\"hnr_array\"])\r\n\r\n # Compute alphabetic hnr\r\n country_alpha_hnr_df[\"first_char\"] = (\r\n country_alpha_hnr_df[\"min_hsn\"]\r\n .str.replace(\"[^a-zA-Z]\", \"\", regex=True)\r\n .str[0]\r\n .replace({\"\": \"a\"})\r\n )\r\n\r\n country_alpha_hnr_df[\"last_char\"] = (\r\n country_alpha_hnr_df[\"max_hsn\"]\r\n .str.replace(\"[^a-zA-Z]\", \"\", regex=True)\r\n .str[0]\r\n .replace({\"\": \"a\"})\r\n )\r\n\r\n country_alpha_hnr_df[\"hnr_array\"] = country_alpha_hnr_df.apply(\r\n compute_alphabetic_hnr, axis=1\r\n )\r\n\r\n return country_alpha_hnr_df\r\n\r\n\r\ndef get_numeric_hnr_df(hnr_df: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Produces the housenumber array for a numeric address range. These are\r\n address ranges with variance: 'even', 'odd', 'numeric_mixed', 'irregular'\r\n\r\n :param hnr_df: DataFrame, result of openmap_hnr_lookup, after preprocessed\r\n :type hnr_df: pd.DataFrame\r\n :return: DataFrame containing numeric variance housenumber ranges\r\n :rtype: pd.DataFrame\r\n \"\"\"\r\n country_hnr_df_lookup = hnr_df.copy()\r\n\r\n # Return emtpy DataFrame with necessary columns if input is empty\r\n if country_hnr_df_lookup.shape[0] == 0:\r\n return pd.DataFrame(columns=hnr_df.columns.tolist() + [\"hnr_array\"])\r\n\r\n country_hnr_df_lookup[\"min_hsn_numeric\"] = (\r\n country_hnr_df_lookup[\"min_hsn_numeric\"]\r\n .apply(lambda x: min(x.split(\" \")))\r\n .astype(int)\r\n )\r\n\r\n country_hnr_df_lookup[\"max_hsn_numeric\"] = (\r\n country_hnr_df_lookup[\"max_hsn_numeric\"]\r\n .apply(lambda x: max(x.split(\" \")))\r\n .astype(int)\r\n )\r\n\r\n # Recompute lowest and max depending on how the info was captured\r\n country_hnr_df_lookup[\"min_hsn_hnr\"] = country_hnr_df_lookup[\r\n [\"min_hsn_numeric\", \"max_hsn_numeric\"]\r\n ].min(axis=1)\r\n country_hnr_df_lookup[\"max_hsn_hnr\"] = country_hnr_df_lookup[\r\n [\"min_hsn_numeric\", \"max_hsn_numeric\"]\r\n ].max(axis=1)\r\n\r\n # Convert to odd number or even number depending on interpolation\r\n country_hnr_df_lookup[\"min_hsn_hnr\"] = np.where(\r\n country_hnr_df_lookup[\"interpolation\"] == \"even\",\r\n country_hnr_df_lookup[\"min_hsn_hnr\"] // 2 * 2,\r\n np.where(\r\n country_hnr_df_lookup[\"interpolation\"] == \"odd\",\r\n country_hnr_df_lookup[\"min_hsn_hnr\"] // 2 * 2 + 1,\r\n country_hnr_df_lookup[\"min_hsn_hnr\"],\r\n ),\r\n )\r\n\r\n country_hnr_df_lookup[\"max_hsn_hnr\"] = np.where(\r\n country_hnr_df_lookup[\"interpolation\"] == \"even\",\r\n country_hnr_df_lookup[\"max_hsn_hnr\"] // 2 * 2,\r\n np.where(\r\n country_hnr_df_lookup[\"interpolation\"] == \"odd\",\r\n country_hnr_df_lookup[\"max_hsn_hnr\"] // 2 * 2 + 1,\r\n country_hnr_df_lookup[\"max_hsn_hnr\"],\r\n ),\r\n )\r\n\r\n # Compute cadency and fill null values for constant\r\n country_hnr_df_lookup[\"cadency\"] = np.where(\r\n ~country_hnr_df_lookup[\"interpolation\"].isin([\"even\", \"odd\"]), 1, 2\r\n )\r\n country_hnr_df_lookup[\"constant\"] = country_hnr_df_lookup[\"constant\"].fillna(\"\")\r\n\r\n # HNR Array for even and odd cases\r\n country_hnr_df_lookup[\"hnr_array\"] = country_hnr_df_lookup.apply(\r\n lambda x: list(\r\n np.arange(x[\"min_hsn_hnr\"], x[\"max_hsn_hnr\"] + x[\"cadency\"], x[\"cadency\"])\r\n ),\r\n axis=1,\r\n )\r\n country_hnr_df_lookup[\"hnr_array\"] = country_hnr_df_lookup[\"hnr_array\"].apply(\r\n lambda x: [str(j) for j in x]\r\n )\r\n country_hnr_df_lookup[\"hnr_array\"] = country_hnr_df_lookup.apply(\r\n lambda x: x[\"hnr_array\"] + [str(j) for j in x[\"intermediates\"]]\r\n if x[\"intermediates\"]\r\n else x[\"hnr_array\"],\r\n axis=1,\r\n )\r\n\r\n # Compute numeric mixed array\r\n country_hnr_df_lookup[\"min_hsn_variable\"] = country_hnr_df_lookup.apply(\r\n lambda x: x[\"min_hsn\"].replace(x[\"constant\"], \"\"), axis=1\r\n )\r\n country_hnr_df_lookup[\"max_hsn_variable\"] = country_hnr_df_lookup.apply(\r\n lambda x: x[\"max_hsn\"].replace(x[\"constant\"], \"\"), axis=1\r\n )\r\n country_hnr_df_lookup[\"hnr_numeric_mixed_array\"] = country_hnr_df_lookup.apply(\r\n lambda x: numeric_mixed_array(x)\r\n if x[\"interpolation\"] == \"numeric_mixed\"\r\n else x[\"hnr_array\"],\r\n axis=1,\r\n )\r\n\r\n # Determine final array depending on the type of interpolation\r\n country_hnr_df_lookup[\"hnr_array\"] = np.where(\r\n country_hnr_df_lookup[\"interpolation\"] == \"numeric_mixed\",\r\n country_hnr_df_lookup[\"hnr_numeric_mixed_array\"],\r\n country_hnr_df_lookup[\"hnr_array\"],\r\n )\r\n\r\n country_hnr_df_lookup[\"hnr_array\"] = country_hnr_df_lookup.hnr_array.apply(\r\n lambda x: [str(j) for j in x]\r\n )\r\n\r\n return country_hnr_df_lookup\r\n\r\ndef get_hnr_df(hnr_df: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Produces the housenumber array, first separating into alphabetic and\r\n numeric interpolation, and then concat them to produce the address range\r\n\r\n :param hnr_df: DataFrame, result of openmap_hnr_lookup, after preprocessed\r\n :type hnr_df: pd.DataFrame\r\n :return: DataFrame containing house number range\r\n :rtype: pd.DataFrame\r\n \"\"\"\r\n\r\n country_hnr_df_lookup = hnr_df.copy()\r\n\r\n # Split into alphabetic and numeric HNR and compute array\r\n country_alpha_hnr_df = country_hnr_df_lookup.loc[\r\n country_hnr_df_lookup[\"interpolation\"] == \"alphabetic\"\r\n ].reset_index(drop=True)\r\n\r\n country_numeric_hnr_df = country_hnr_df_lookup.loc[\r\n country_hnr_df_lookup[\"interpolation\"] != \"alphabetic\"\r\n ].reset_index(drop=True)\r\n\r\n country_alpha_hnr_df = get_alphabetic_hnr_df(country_alpha_hnr_df)\r\n country_numeric_hnr_df = get_numeric_hnr_df(country_numeric_hnr_df)\r\n\r\n # Join both dataframes\r\n country_hnr_df_lookup = pd.concat(\r\n [country_alpha_hnr_df, country_numeric_hnr_df], ignore_index=True\r\n )\r\n\r\n return country_hnr_df_lookup\r\n\r\n\r\n\r\ndef find_openmap_schema(\r\n country: str, latest: bool or None = True, credentials: dict or None = None\r\n) -> pd.DataFrame:\r\n \"\"\"Gets the schema in the Openmap's 3G database to make a spatial query\r\n\r\n :param country: country code in ISO2 or ISO3\r\n :type country: str\r\n :param latest: boolean or None indicating whether to return the latest version of OSM product, defaults to True\r\n :type latest: bool or None, optional\r\n :param credentials: dictionary containing the credentials for a connection\r\n :type credentials: dict or None, optional\r\n :return: DataFrame with the relevant country schemas\r\n :rtype: pd.DataFrame\r\n \"\"\"\r\n\r\n # Standarize country input to ISO3\r\n # country_iso3 = country_converter.convert(country, to=\"ISO3\")\r\n country_iso3 = 'gbr'\r\n # Initialize connection if not passed as parameter\r\n if credentials is None:\r\n conn = postgres_db_connection()\r\n else:\r\n conn = psycopg2.connect(**credentials)\r\n\r\n # Query all schemas in Openmap database\r\n schemas_df = pd.read_sql(\"SELECT nspname FROM pg_catalog.pg_namespace\", conn)\r\n conn.close()\r\n\r\n # Filter relevant country\r\n country_schemas = schemas_df.loc[\r\n schemas_df.nspname.str.contains(\"_\" + country_iso3, case=False)\r\n ].reset_index(drop=True)\r\n\r\n country_schemas[\"date\"] = country_schemas.nspname.str.extract(\"([0-9]+)\")\r\n country_schemas[\"schema\"] = country_schemas.nspname.str.replace(\r\n \"_[0-9]+.*\", \"\", regex=True\r\n )\r\n country_schemas[\"is_latest\"] = (\r\n country_schemas.date == country_schemas.groupby(\"schema\").date.max()[0]\r\n )\r\n country_schemas[\"country\"] = country\r\n\r\n # Return schemas\r\n if latest:\r\n return country_schemas.loc[country_schemas.is_latest == latest].reset_index(\r\n drop=True\r\n )\r\n else:\r\n return country_schemas\r\n\r\n\r\ncon = psycopg2.connect(\r\n host=\"10.137.173.68\",\r\n port=\"5432\",\r\n database=\"ggg\",\r\n user=\"ggg\",\r\n password=\"ok\"\r\n)\r\n\r\n\r\ndef postgres_db_connection():\r\n \"\"\"\r\n :param db_url: Postgres Server\r\n :return: DB Connection\r\n \"\"\"\r\n try:\r\n con = psycopg2.connect(\r\n host=\"10.137.173.68\",\r\n port=\"5432\",\r\n database=\"ggg\",\r\n user=\"ggg\",\r\n password=\"ok\"\r\n )\r\n return con\r\n except Exception as error:\r\n print(\"Oops! An exception has occured:\", error)\r\n print(\"Exception TYPE:\", type(error))\r\n\r\n\r\nschemaname = find_openmap_schema(\"gbr\").nspname[0]\r\n\r\n\r\n# Query all schemas in Openmap database\r\n\r\ndef ovAdminAreaOrder8Area(schema):\r\n adminArea = \"\"\"SELECT osm_id ,admin_level,boundary,\"name\",place,country, ST_AsText(way) as geometry\r\n FROM \"{schema_name}\".planet_osm_polygon \r\n where boundary= 'administrative' and admin_level like '8'\"\"\"\r\n adminAreaAa8 = adminArea.replace(\"{schema_name}\", str(schema))\r\n AdminOrdr8Area = pd.read_sql(adminAreaAa8, postgres_db_connection())\r\n # Convert the WKB coordinates to Shapely geometries\r\n # AdminOrdr8Area['geometry'] = AdminOrdr8Area['way'].apply(wkb.loads)\r\n return AdminOrdr8Area\r\n\r\n\r\nquery_coordinates = ovAdminAreaOrder8Area(schemaname).head(1).geometry.values[0]\r\n\r\n# Create a GeoPandas DataFrame\r\n# spatial_query_result = gpd.GeoDataFrame(AdminOrdr8Area, geometry='geometry')\r\n\r\n\r\n# Create query for addresses to reverse lookup\r\n\r\nquery = \"\"\"\r\n\r\nwith sample as (select index_searched_query\r\n ,st_geomfromtext (coordinates, 4326) coordinates\r\n from (VALUES (0, '{query_coordinates}')) as t (index_searched_query, coordinates))\r\n\r\n, tags as (\r\nselect distinct skeys(tags) keys\r\nfrom \"{schema_name}\".planet_osm_polygon pop \r\nwhere admin_level in ('4', '8')\r\n)\r\n\r\n\r\n, name_tags as (\r\nselect * \r\nfrom tags\r\nwhere (keys like '%name:%' or keys like '%alt%name') and keys not like '%pronunciation%'\r\n) \r\n\r\n\r\n, hsn_tags as (\r\nselect distinct skeys(tags) keys \r\nfrom \"{schema_name}\".planet_osm_point\r\nwhere \"addr:housenumber\" is not null or tags::text like '%addr:housenumber%'\r\n\r\n)\r\n\r\n\r\n\r\n, hsn_keys as (\r\nselect * from hsn_tags where (keys like '%addr:housenumber%')\r\n\r\n)\r\n,buffers as (\r\nselect \r\n sample.index_searched_query\r\n, sample.coordinates\r\n, coordinates as buffer\r\n, road.road as road_name\r\n, road.name_tags_array as road_names\r\nfrom sample\r\n\r\n\r\nleft join lateral (\r\n SELECT name as road, array_remove(tags->array((select keys from name_tags)), null) as name_tags_array\r\n FROM \"{schema_name}\".planet_osm_line road\r\n where name is not null\r\n and highway in ('motorway','motorway_link','trunk','trunk_link','primary','primary_link','secondary','secondary_link','tertiary','tertiary_link','unclassified','residential','service','living_street','road','steps', 'footway', 'path', 'pedestrian', 'bridleway', 'cycleway', 'track')\r\n ORDER BY road.way <-> sample.coordinates\r\n\r\n LIMIT 1\r\n ) AS road \r\n on true\r\n )\r\n\r\n\r\n, address_ranges as (\r\nselect \r\nbuffers.index_searched_query\r\n, buffers.coordinates\r\n, buffers.road_name\r\n, buffers.road_names\r\n, hnr.osm_id\r\n, ST_astext(hnr.way) way\r\n, hnr.\"addr:interpolation\" as interpolation\r\n, hnr.tags\r\n, hnr.tags->'addr:street' as road_name_way\r\n, hnr.tags->'addr:interpolation' as interpolation_tag\r\n, hnr.\"name\" \r\n, unnest(ways.nodes) nodes\r\n\r\nfrom \"{schema_name}\".planet_osm_line hnr\r\n\r\njoin buffers on ST_Intersects(buffers.buffer, hnr.way)\r\n\r\njoin \"{schema_name}\".planet_osm_ways ways on ways.id = hnr.osm_id\r\n\r\nwhere hnr.\"addr:interpolation\" is not null \r\n)\r\n\r\n, hsn as (\r\nselect \r\npop.tags as tags_hsn\r\n, array_remove(array_append(pop.tags -> array((select keys from hsn_keys )), pop.\"addr:housenumber\"), null) as range_hsn\r\n, address_ranges.*\r\n\r\nfrom address_ranges\r\n\r\nleft join \"{schema_name}\".planet_osm_point pop \r\non pop.osm_id = address_ranges.nodes\r\n\r\nwhere pop.tags is not null\r\n)\r\n\r\n\r\n, hsn_long as (\r\nselect \r\nhsn.osm_id\r\n, hsn.index_searched_query\r\n, hsn.coordinates\r\n, hsn.tags as tags_network\r\n, hsn.road_name_way\r\n, hsn.road_name\r\n, hsn.road_names\r\n, hsn.interpolation\r\n, hsn.interpolation_tag\r\n, hsn.way\r\n, hsn.name\r\n, first_value(tags_hsn) over(partition by osm_id) as first_tags_hsn\r\n, unnest(range_hsn) as range_hsn\r\n\r\nfrom hsn \r\n)\r\n,addressrangesfinal as (select \r\n\thsn_long.osm_id\r\n, hsn_long.road_name\r\n, hsn_long.way\r\n, min(range_hsn) as min_hsn\r\n, max(range_hsn) as max_hsn\t\r\n, hsn_long.index_searched_query\r\n, '{date}' as date\r\n, '{version}' as version\r\n, ST_AsText(hsn_long.coordinates) as coordinates\r\n, hsn_long.tags_network\r\n, hsn_long.road_name_way\r\n, hsn_long.interpolation\r\n, hsn_long.road_names\r\n, hsn_long.interpolation_tag\r\n, hsn_long.name\r\n, first_tags_hsn as tags\r\n, array_agg(distinct range_hsn) as intermediates\r\nfrom hsn_long\r\ngroup by \r\n\thsn_long.osm_id\r\n, hsn_long.index_searched_query\r\n, coordinates\r\n, hsn_long.road_name\r\n, hsn_long.road_names\r\n, hsn_long.tags_network\r\n, hsn_long.road_name_way\r\n, hsn_long.interpolation\r\n, hsn_long.interpolation_tag\r\n, hsn_long.way\r\n, hsn_long.name\r\n, first_tags_hsn\r\norder by hsn_long.osm_id)\r\n\r\nselect \r\nplarea.osm_id as place_osm_id ,\r\nplarea.name as place_name, \r\nplarea.reg_code as reg_code, \r\nplarea.region as place_region, \r\nplarea.cntry_code as place_cntry_code,\r\nplarea.country as place_country, \r\nST_AsText(plarea.way) as place_way,\r\naddressrangesfinal.*\r\nfrom \"{schema_name}\".planet_osm_polygon as plarea\r\nINNER JOIN addressrangesfinal ON ST_Intersects(ST_SetSRID(addressrangesfinal.way, 4326), ST_SetSRID(plarea.way, 4326))\r\nwhere plarea.tags->'index:level'= '8' and plarea.tags->'index:priority:8'='30'\r\n\r\n\"\"\"\r\n\r\nadminAreaAa8 = query.replace(\"{schema_name}\", str(schemaname))\r\n\r\naddresRanges = adminAreaAa8.replace('{query_coordinates}', query_coordinates)\r\n\r\n# pandas DataFrame\r\nAdminOrdr8Area = pd.read_sql(addresRanges, postgres_db_connection())\r\n\r\nparse_hnr_tags_df = parse_hnr_tags(AdminOrdr8Area)\r\n\r\npreprocess_hnr_hsn_df = preprocess_hnr_hsn(parse_hnr_tags_df)\r\n\r\nget_hnr_df_DF = get_hnr_df(preprocess_hnr_hsn_df)\r\n\r\n\r\n# Apply the function to the 'way' column and save the result in 'PointLocation' column\r\nget_hnr_df_DF['PointLocation'] = get_hnr_df_DF['way'].apply(lambda x: calculate_center_point(Point(float(coord.split()[0]), float(coord.split()[1])) for coord in x.strip('LINESTRING()').split(',')))\r\n\r\n# Remove square brackets and convert array to string using lambda function\r\nget_hnr_df_DF['street'] = get_hnr_df_DF['street'].apply(lambda x: x[0])\r\n\r\n\r\n\r\nselectedColumnsGetHnr_DF = get_hnr_df_DF[['osm_id','place_name', 'street', 'way', 'min_hsn', 'max_hsn', 'hnr_array', 'hnr_numeric_mixed_array','PointLocation']]\r\n\r\n# Explode functionality for Array\r\ndf_exploded = selectedColumnsGetHnr_DF.explode('hnr_array')\r\ndf_exploded['hnr_Number'] = df_exploded['hnr_array']\r\n\r\ndf_exploded.reset_index(drop=True, inplace=True)\r\n\r\n\r\n# Create the Error DataFrame\r\ndf_exploded['Error'] = ''\r\nhouseNumberError = pd.DataFrame()\r\n\r\n# Check conditions and append output records\r\nfor index, row in df_exploded.iterrows():\r\n if row['min_hsn'] == row['max_hsn']:\r\n row['Error'] = 'Array Issue'\r\n houseNumberError = houseNumberError.append(row)\r\n\r\n\r\n# Select the columns of interest\r\ncolumns_to_check = ['place_name', 'street', 'hnr_Number']\r\n# Check for duplicate records based on the selected columns\r\nduplicates = df_exploded.duplicated(subset=columns_to_check,keep=False)\r\n\r\n# Filter the DataFrame to show only the duplicate records\r\nduplicate_records = df_exploded[duplicates]\r\n\r\n# Assign a unique ID to each duplicate group\r\nduplicate_records['group_id'] = duplicate_records.groupby(['place_name', 'street', 'hnr_Number']).ngroup()\r\n\r\n\r\n# Sort the DataFrame based on 'hnr_Number', 'street', and 'place_name' columns\r\nsorted_duplicates = duplicate_records.sort_values(by=['hnr_Number', 'street', 'place_name'])\r\n\r\n# Reorder the columns\r\nreordered_columns = ['osm_id','hnr_Number', 'street', 'place_name', 'group_id','min_hsn', 'max_hsn', 'hnr_array', 'hnr_numeric_mixed_array','PointLocation','way']\r\nsorted_df = sorted_duplicates[reordered_columns]\r\n\r\n# Display the duplicate records\r\nprint(sorted_df)\r\n\r\n\r\n# Reset index of Error DataFrame\r\nhouseNumberError.reset_index(drop=True, inplace=True)\r\n\r\n\r\n\r\n\r\n# if selectedColumnsGetHnr_DF[]\r\n\r\n\r\ndf_exploded.to_csv(r\"E:\\\\Amol\\\\9_addressRangesPython\\\\ArryExplodAddrssRanges.csv\")\r\n\r\n# creatting Geomaty for Admin area, Plance name , Address Ranges\r\n# def PandasToGeopandasGeoemtryExport():\r\n# \"\"\"\r\n# :return: # creatting Geomaty for Admin area, Plance name , Address Ranges\r\n# \"\"\"\r\n# # # Create new geometry column from the \"way\" column\r\n# get_hnr_df_DF['geometry'] = get_hnr_df_DF['way'].apply(lambda way: loads(way.split(';')[0]))\r\n# #\r\n# # # Create a GeoPandas DataFrame\r\n# spatial_query_result = gpd.GeoDataFrame(get_hnr_df_DF, geometry='geometry')\r\n# # Convert non-compatible columns to string\r\n# non_compatible_types = ['object', 'bool'] # Add more types if needed\r\n# non_string_columns = spatial_query_result.select_dtypes(\r\n# exclude=['string', 'int', 'float', 'datetime', 'geometry']).columns\r\n# for column in non_string_columns:\r\n# if spatial_query_result[column].dtype.name in non_compatible_types:\r\n# spatial_query_result[column] = spatial_query_result[column].astype(str)\r\n# # export to line\r\n# # Export to GeoPackage Adress Ranges\r\n# pathline = r\"E:\\\\Amol\\\\9_addressRangesPython\\\\AddrssRangeslineArray.gpkg\"\r\n# spatial_query_result.to_file(pathline, layer='AddrssRanges', driver='GPKG')\r\n# #### create Polygon Geometry Admin order 8 area\r\n# AA8gdf = spatial_query_result.copy()\r\n# # Remove the existing \"coordinates\" column\r\n# AA8gdf.drop(columns='geometry', inplace=True)\r\n# #\r\n# AA8gdf_duplicates = AA8gdf.drop_duplicates(subset=['coordinates'])\r\n# # Create new geometry column from the \"way\" column\r\n# AA8gdf_duplicates['geometry'] = AA8gdf_duplicates['coordinates'].apply(\r\n# lambda coordinates: loads(coordinates.split(';')[0]))\r\n# # # Create a GeoPandas DataFrame\r\n# admiAreaOrder8AreaGDF = gpd.GeoDataFrame(AA8gdf_duplicates, geometry='geometry')\r\n# # # Export to GeoPackage\r\n# admiAreaOrder8AreaGDF.to_file(pathline, layer='AdminOrder8Area', driver='GPKG')\r\n# #### create Polygon Geometry Placename\r\n# placegdf = spatial_query_result.copy()\r\n# # Remove the existing \"coordinates\" column\r\n# placegdf.drop(columns='geometry', inplace=True)\r\n# #\r\n# placegdf_duplicates = placegdf.drop_duplicates(subset=['place_way'])\r\n# # Create new geometry column from the \"way\" column\r\n# placegdf_duplicates['geometry'] = placegdf_duplicates['place_way'].apply(\r\n# lambda place_way: loads(place_way.split(';')[0]))\r\n# # # Create a GeoPandas DataFrame\r\n# placeGDF = gpd.GeoDataFrame(placegdf_duplicates, geometry='geometry')\r\n# # Export to GeoPackage\r\n# placeGDF.to_file(pathline, layer='place', driver='GPKG')\r\n#\r\n# PandasToGeopandasGeoemtryExport()\r\n","repo_name":"amolparande-tomtom/addressranges","sub_path":"hnrAdressRangesArrayFormation.py","file_name":"hnrAdressRangesArrayFormation.py","file_ext":"py","file_size_in_byte":26032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26185185591","text":"from django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom . import views\n\nurlpatterns = [\n path('users/', views.UserList.as_view()),\n path('users//', views.UserDetail.as_view()),\n path('posts/', views.PostList.as_view()),\n path('posts//', views.PostDetail.as_view()),\n path('comments/', views.CommentsList.as_view()),\n path('comments//', views.CommentsDetail.as_view()),\n path('categories/', views.CategoriesList.as_view()),\n path('categories//', views.CategoriesDetail.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","repo_name":"Vinigretic/Blog-backend","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14472828753","text":"#!/bin/python3\nfrom square_chess_board import Queen, ChessBoardGame, Obstacle\n\n\nFILE_NAME = 'sample_file_1.txt'\n\n\ndef queensAttack(n, k, r_q, c_q, obstacles):\n \"\"\"\n Determine how many squares the queen can attack\n Args:\n n : The number of rows and columns in the board\n k : The number of obstacles on the board\n r_q : The row number of the queen's position\n c_q : The column number of the queen's position\n obstacles : A list where each element is an array of 2 integers, the row and column of an obstacle\n Returns:\n The numbers os squares that the queen can attack from position (r_q, c_q)\n Raises:\n ValueError: Validation.\n \"\"\"\n if n <= 0 or n > 10**5:\n raise ValueError('Board dimensions are not valid')\n if k < 0 or k > 10 ^ 5:\n raise ValueError('The number of obstacles is not valid')\n if [r_q, c_q] in obstacles:\n raise ValueError('There can be no obstacle in the position where the queen is')\n queen = Queen(row=r_q, column=c_q)\n game = ChessBoardGame(dimension=n)\n obs = [Obstacle(row=obs[0], column=obs[1]) for obs in obstacles]\n return game.count_valid_squares(queen=queen, obstacles=obs)\n\n\nif __name__ == '__main__':\n with open(FILE_NAME, 'r') as reader:\n n, k = map(int, reader.readline().split(' '))\n r_q, c_q = map(int, reader.readline().split(' '))\n obstacles = []\n for x in range(k):\n obstacles.append([int(x) for x in reader.readline().split(' ')])\n try:\n result = queensAttack(n, k, r_q, c_q, obstacles)\n print(f\"The queen is standing at position ({r_q}, {c_q}) on a {n}x{n} chessboard with {k} obstacles\")\n print(f\"The number of squares she can attack from that position is {result}.\")\n except ValueError as e:\n print(e)\n","repo_name":"danordcor/-QueensAttackProblem","sub_path":"queen_attack.py","file_name":"queen_attack.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70096717374","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Ciudad',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ciudad', models.CharField(unique=True, max_length=100)),\n ],\n options={\n 'verbose_name': 'Ciudad',\n 'verbose_name_plural': 'Ciudades',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Direccion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('direccion', models.CharField(max_length=500)),\n ('punto_referencia', models.CharField(max_length=250)),\n ('zip1', models.CharField(max_length=10, blank=True)),\n ],\n options={\n 'verbose_name': 'Direccion',\n 'verbose_name_plural': 'Direcciones',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Pais',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('pais', models.CharField(unique=True, max_length=250)),\n ],\n options={\n 'verbose_name': 'Pais',\n 'verbose_name_plural': 'Paises',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Provincia',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('provincia', models.CharField(unique=True, max_length=100)),\n ('pais', models.ForeignKey(to='Direccion.Pais')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Tipo_direccion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('tipo_direccion', models.CharField(max_length=10)),\n ('activo', models.BooleanField(default=True)),\n ],\n options={\n 'verbose_name': 'Tipo de direccion',\n 'verbose_name_plural': 'Tipos de direccion',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Zona',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('zona', models.CharField(unique=True, max_length=100)),\n ('cuidad', models.ForeignKey(to='Direccion.Ciudad')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='direccion',\n name='tipo_direccion',\n field=models.ForeignKey(to='Direccion.Tipo_direccion'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='direccion',\n name='zona',\n field=models.ForeignKey(to='Direccion.Zona'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='ciudad',\n name='provincia',\n field=models.ForeignKey(to='Direccion.Provincia'),\n preserve_default=True,\n ),\n ]\n","repo_name":"jrmendozat/mtvm","sub_path":"Direccion/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14853202949","text":"import numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom keras.models import Model\nfrom keras.layers import concatenate, Input\nfrom keras.layers import Dense, Activation, Dropout, Flatten\nfrom keras.layers import BatchNormalization\n\nfrom keras import regularizers\nfrom keras import backend as K\nfrom keras.utils import np_utils\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import TensorBoard\nfrom keras.callbacks import LearningRateScheduler \nfrom keras.callbacks import EarlyStopping\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.metrics import normalized_mutual_info_score \nfrom sklearn.model_selection import train_test_split\n\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\ntf.keras.utils.set_random_seed(42)\n\nSAVE_PATH = \"/content/drive/MyDrive/Colab Notebooks/data/\"\n\ndef scheduler(epoch, lr):\n if epoch < 4:\n return lr\n else:\n return lr * tf.math.exp(-0.1)\n\ndef generate_data(N):\n pi = np.array([0.2, 0.4, 0.3, 0.1])\n mu = [[2,2], [-2,2], [-2,-2], [2,-2]]\n std = [[0.5,0.5], [1.0,1.0], [0.5,0.5], [1.0,1.0]]\n x = np.zeros((N,2), dtype=np.float32)\n y = np.zeros((N,2), dtype=np.float32)\n z = np.zeros((N,1), dtype=np.int32)\n for n in range(N):\n k = np.argmax(np.random.multinomial(1, pi))\n x[n,:] = np.random.multivariate_normal(mu[k], np.diag(std[k]))\n y[n,:] = mu[k]\n z[n,:] = k\n #end for\n z = z.flatten()\n return x, y, z, pi, mu, std\n\ndef tf_normal(y, mu, sigma):\n y_tile = K.stack([y]*num_clusters, axis=1) #[batch_size, K, D]\n result = y_tile - mu\n sigma_tile = K.stack([sigma]*data_dim, axis=-1) #[batch_size, K, D]\n result = result * 1.0/(sigma_tile+1e-8)\n result = -K.square(result)/2.0\n oneDivSqrtTwoPI = 1.0/math.sqrt(2*math.pi) \n result = K.exp(result) * (1.0/(sigma_tile + 1e-8))*oneDivSqrtTwoPI\n result = K.prod(result, axis=-1) #[batch_size, K] iid Gaussians\n return result\n\ndef NLLLoss(y_true, y_pred):\n out_mu = y_pred[:,:num_clusters*data_dim]\n out_sigma = y_pred[:,num_clusters*data_dim : num_clusters*(data_dim+1)] \n out_pi = y_pred[:,num_clusters*(data_dim+1):]\n\n out_mu = K.reshape(out_mu, [-1, num_clusters, data_dim])\n\n result = tf_normal(y_true, out_mu, out_sigma)\n result = result * out_pi\n result = K.sum(result, axis=1, keepdims=True)\n result = -K.log(result + 1e-8)\n result = K.mean(result)\n return tf.maximum(result, 0)\n\n#generate data\nX_data, y_data, z_data, pi_true, mu_true, sigma_true = generate_data(4096)\n\ndata_dim = X_data.shape[1]\nnum_clusters = len(mu_true)\n\nnum_train = 3500\nX_train, X_test, y_train, y_test = X_data[:num_train,:], X_data[num_train:,:], y_data[:num_train,:], y_data[num_train:,:]\nz_train, z_test = z_data[:num_train], z_data[num_train:]\n\n#visualize data\nplt.figure()\nplt.scatter(X_train[:,0], X_train[:,1], c=z_train, cmap=cm.bwr)\nplt.title('training data')\nplt.show()\n#plt.savefig(SAVE_PATH + '/mdn_training_data.png')\n\n#training params\nbatch_size = 128 \nnum_epochs = 128 \n\n#model parameters\nhidden_size = 32\nweight_decay = 1e-4\n\n#MDN architecture\ninput_data = Input(shape=(data_dim,))\nx = Dense(32, activation='relu')(input_data)\nx = Dropout(0.2)(x)\nx = BatchNormalization()(x)\nx = Dense(32, activation='relu')(x)\nx = Dropout(0.2)(x)\nx = BatchNormalization()(x)\n\nmu = Dense(num_clusters * data_dim, activation='linear')(x) #cluster means\nsigma = Dense(num_clusters, activation=K.exp)(x) #diagonal cov\npi = Dense(num_clusters, activation='softmax')(x) #mixture proportions\nout = concatenate([mu, sigma, pi], axis=-1)\n\nmodel = Model(input_data, out)\n\nmodel.compile(\n loss=NLLLoss,\n optimizer=tf.keras.optimizers.Adam(),\n metrics=[\"accuracy\"]\n)\n\nmodel.summary()\n\n#define callbacks\nfile_name = SAVE_PATH + 'mdn-weights-checkpoint.h5'\ncheckpoint = ModelCheckpoint(file_name, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\nreduce_lr = LearningRateScheduler(scheduler, verbose=1)\nearly_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=16, verbose=1)\n#tensor_board = TensorBoard(log_dir='./logs', write_graph=True)\ncallbacks_list = [checkpoint, reduce_lr, early_stopping]\n\nhist = model.fit(X_train, y_train, batch_size=batch_size, epochs=num_epochs, callbacks=callbacks_list, validation_split=0.2, shuffle=True, verbose=2)\n\ny_pred = model.predict(X_test)\n\nmu_pred = y_pred[:,:num_clusters*data_dim]\nmu_pred = np.reshape(mu_pred, [-1, num_clusters, data_dim])\nsigma_pred = y_pred[:,num_clusters*data_dim : num_clusters*(data_dim+1)] \npi_pred = y_pred[:,num_clusters*(data_dim+1):]\nz_pred = np.argmax(pi_pred, axis=-1)\n\nrand_score = adjusted_rand_score(z_test, z_pred)\nprint(\"adjusted rand score: \", rand_score)\n\nnmi_score = normalized_mutual_info_score(z_test, z_pred)\nprint(\"normalized MI score: \", nmi_score)\n\nmu_pred_list = []\nsigma_pred_list = []\nfor label in np.unique(z_pred):\n z_idx = np.where(z_pred == label)[0]\n mu_pred_lbl = np.mean(mu_pred[z_idx,label,:], axis=0)\n mu_pred_list.append(mu_pred_lbl)\n\n sigma_pred_lbl = np.mean(sigma_pred[z_idx,label], axis=0)\n sigma_pred_list.append(sigma_pred_lbl)\n#end for\n\nprint(\"true means:\")\nprint(np.array(mu_true))\n\nprint(\"predicted means:\")\nprint(np.array(mu_pred_list))\n\nprint(\"true sigmas:\")\nprint(np.array(sigma_true))\n\nprint(\"predicted sigmas:\")\nprint(np.array(sigma_pred_list))\n\n#generate plots\nplt.figure()\nplt.scatter(X_test[:,0], X_test[:,1], c=z_pred, cmap=cm.bwr)\nplt.scatter(np.array(mu_pred_list)[:,0], np.array(mu_pred_list)[:,1], s=100, marker='x', lw=4.0, color='k')\nplt.title('test data')\n#plt.savefig('./figures/mdn_test_data.png')\n\nplt.figure()\nplt.plot(hist.history['loss'], 'b', lw=2.0, label='train')\nplt.plot(hist.history['val_loss'], '--r', lw=2.0, label='val')\nplt.title('Mixture Density Network')\nplt.xlabel('Epochs')\nplt.ylabel('Negative Log Likelihood Loss')\nplt.legend(loc='upper left')\n#plt.savefig('./figures/mdn_loss.png')\n\n\n","repo_name":"vsmolyakov/ml_algo_in_depth","sub_path":"chp11/keras_mdn.py","file_name":"keras_mdn.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"78"} +{"seq_id":"19379682024","text":"from collections import Counter\nimport numpy as np\ndef mismatchkernel(seq1,seq2,substring_len):\n def get_substrings(seq,substring_len):\n seqlength=len(seq)\n substrings=[]\n mismatchedset=[]\n \n for i in range(seqlength-substring_len+1):\n x=seq[i:i+substring_len]\n substrings.append(x)\n mismatchedset.append(x)\n for j in range(len(x)):\n b=x[0:j]+'*'+x[j+1:]\n mismatchedset.append(b)\n return substrings,mismatchedset\n\n \n seq1_substrings,seq1_mismatchedset=get_substrings(seq1,substring_len)\n seq2_substrings,seq2_mismatchedset=get_substrings(seq2,substring_len)\n seq1_substrings_dict=dict(Counter(seq1_substrings))\n seq2_substrings_dict=dict(Counter(seq2_substrings))\n intersection=list(set(seq1_substrings) & set(seq2_substrings))\n intersection_seq1_count=np.array([seq1_substrings_dict[x] for x in intersection])\n intersection_seq2_count=np.array([seq2_substrings_dict[x] for x in intersection])\n matched_count=intersection_seq1_count*intersection_seq2_count\n penalty=sum(matched_count)*substring_len\n mismatched_intersection=list(set(seq1_mismatchedset) & set(seq2_mismatchedset))\n seq1_dict=dict(Counter(seq1_mismatchedset))\n seq2_dict=dict(Counter(seq2_mismatchedset))\n seq1_mismatched_count=np.array([seq1_dict[x] for x in mismatched_intersection])\n seq2_mismatched_count=np.array([seq2_dict[x] for x in mismatched_intersection])\n mismatched_count=(seq1_mismatched_count)*(seq2_mismatched_count)\n kern=sum(mismatched_count)-penalty\n\n return kern\n","repo_name":"aliarahim/Predicting_drug-target_interactions","sub_path":"mismatchkernel.py","file_name":"mismatchkernel.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2176163557","text":"#!/usr/bin/python3\n#-*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nimport sys\nimport zmq\nfrom zmq.asyncio import Context, ZMQEventLoop\nimport asyncio\nfrom .task import *\nfrom random import randint\n\nclass MasterConnection(object):\n\n def __init__(self, context, master_addr):\n self._context = context\n self._master_addr = master_addr\n\n async def run(self):\n self._router = self._context.socket(zmq.DEALER) # if router, sending msg is not working.\n self._router.connect(self._master_addr)\n self._register() # this must be modified.\n\n asyncio.ensure_future(task_simulator.run())\n\n while len(task_manager.complete_tasks) < TaskSimulator.NUM_TASKS:\n print(\"len =\", len(task_manager.complete_tasks))\n print(\"[Master Connection] before recv.\")\n msg = await self._router.recv_multipart()\n print(\"[Master Connection] after recv.\")\n self._process(msg)\n\n def _register(self):\n self.dispatch_msg(b\"Register\")\n\n def _process(self, msg):\n header, body = self._resolve_msg(msg)\n\n if header == b\"TaskAccept\":\n print(\"[Master Connection] TaskAccept packet in.\")\n id = int.from_bytes(body[0:4], byteorder='big')\n task_identity = TaskIdentity(id)\n task_manager.change_task_status(task_identity, Task.STATUS_WAITING)\n\n elif header == b\"TaskStart\":\n print(\"[Master Connection] TaskStart packet in.\")\n id = int.from_bytes(body[0:4], byteorder='big')\n task_identity = TaskIdentity(id)\n task_manager.change_task_status(task_identity, Task.STATUS_PROCESSING)\n\n elif header == b\"TaskFinish\":\n print(\"[Master Connection] TaskFinish packet in.\")\n id = int.from_bytes(body[0:4], byteorder='big')\n task_identity = TaskIdentity(id)\n task = task_manager.find_task(task_identity)\n task.set_result_from_bytes(body[4:])\n task_manager.change_task_status(task_identity, Task.STATUS_COMPLETE)\n\n print(\"[*] Task Finish. id = {0}, comment = {1}\".format(task.id, task.result.comment))\n\n else:\n raise ValueError(\"Invalid Header.\")\n\n def _resolve_msg(self, msg):\n print(msg)\n #addr = msg[0]\n #assert msg[1] == b''\n header = msg[0]\n assert msg[1] == b''\n body = msg[2]\n\n return header, body\n\n def dispatch_msg(self, header, body = b''):\n\n async def _dispatch_msg(msg):\n print(\"_dispatch_msg(\"+str(msg)+\")\")\n await self._router.send_multipart(msg) # why server cannot receive this msg???\n print(\"_dispatch_msg finish\") # come here : okay\n\n msg = [header, b'', body]\n asyncio.ensure_future(_dispatch_msg(msg))\n\n\nclass TaskSimulator:\n\n NUM_TASKS = 10\n SLEEP_TASK_MIN_SECONDS = 1\n SLEEP_TASK_MAX_SECONDS = 10\n TASK_GAP_MIN_SECONDS = 0\n TASK_GAP_MAX_SECONDS = 10\n\n def __init__(self):\n pass\n\n def _make_task(self):\n job = SleepTaskJob(randint(TaskSimulator.SLEEP_TASK_MIN_SECONDS, TaskSimulator.SLEEP_TASK_MAX_SECONDS))\n task = SleepTask(job)\n return task\n\n def _process_task(self, task : SleepTask):\n request_body = task.to_bytes()\n request_body += task.job.to_bytes()\n master_conn.dispatch_msg(b\"SleepTask\", request_body)\n\n async def run(self):\n\n for idx in range(TaskSimulator.NUM_TASKS):\n print(\"[*] Simulate Task #{0}\".format(idx))\n task = self._make_task()\n task_manager.add_task(task)\n self._process_task(task)\n await asyncio.sleep(randint(TaskSimulator.TASK_GAP_MIN_SECONDS, TaskSimulator.TASK_GAP_MAX_SECONDS))\n\n\nMASTER_ADDR = 'tcp://127.0.0.1:5000'\n\ncontext = Context()\nmaster_conn = MasterConnection(context, MASTER_ADDR)\ntask_manager = TaskManager()\ntask_simulator = TaskSimulator()\n\nasync def run_server():\n await master_conn.run()\n\ndef main():\n try:\n loop = ZMQEventLoop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(run_server())\n except KeyboardInterrupt:\n print('\\nFinished (interrupted)')\n sys.exit(0)","repo_name":"DrawML/research-dist","sub_path":"taeguk/prototype_ver2/dist_system/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"22425270438","text":"import os\nimport math\nimport hashlib\nimport queue\nimport file_processing\nfrom MerkleNode import MerkleNode\n\nclass MerkleTree:\n\n def __init__(self):\n super().__init__()\n self.count_merkle_nodes = 0\n self.merkle_root = None\n \n def get_merkle_node(self,data):\n \"\"\"\n Creates and returns a new merkle node with the hash of data as the hash value.\n Data is to be provided in bytes from not unicode.\n \"\"\"\n merkle_node = MerkleNode()\n hash_value = hashlib.sha256(data).hexdigest()\n merkle_node.set_hash_value(hash_value)\n self.count_merkle_nodes += 1\n\n return merkle_node\n\n def join_merkle_roots(self,root1: MerkleNode(), root2: MerkleNode()):\n \"\"\"\n Joins two Merkle Roots to form a single tree\n \"\"\"\n s = root1.get_hash_value() + root2.get_hash_value()\n new_root = self.get_merkle_node(s.encode())\n \n new_root.set_left_child(root1)\n new_root.set_right_child(root2)\n\n root1.set_parent(new_root)\n root2.set_parent(new_root)\n\n return new_root\n\n def construct_merkle_tree_helper(self,data):\n \"\"\"\n A recursive function which creates a Merkle Tree by hashing the given data\n 1 Merkle Node for 1 unit of data\n Returns the root of the Merkle Tree\n \"\"\"\n\n n = len(data)\n if n == 1:\n return self.get_merkle_node(data[0])\n\n k = math.floor(math.log2(n))\n k = pow(2,k)\n if k == n:\n k = n // 2\n\n merkle_root1 = self.construct_merkle_tree_helper(data[:k])\n merkle_root2 = self.construct_merkle_tree_helper(data[k:n])\n\n merkle_root = self.join_merkle_roots(merkle_root1, merkle_root2)\n\n self.merkle_root = merkle_root\n\n return merkle_root\n\n def construct_merkle_tree(self, file_path, block_count = 128):\n\n block_list = file_processing.divide_file_into_blocks(file_path, block_count)\n\n return self.construct_merkle_tree_helper(block_list)\n\n def get_tree_illustration(self, root, n_tabs, prefix = \"\"):\n \"\"\"\n A recursive function to illustrate the Merkle Tree\n Sample Illustration: root\n |---left_child\n | |---left_child\n | |---right_child\n |---right_child\n |---left_child\n |---right_child\n \"\"\"\n if root == None:\n return\n\n print(prefix[:-2], '|', end='')\n #print(prefix, end='')\n print(\"|--\", root.get_hash_value())\n self.get_tree_illustration(root.get_left_child(), n_tabs + 1, prefix + \" |\")\n self.get_tree_illustration(root.get_right_child(), n_tabs + 1, prefix + \" \")\n\n\ndef divide_file_into_blocks(file,block_size):\n \"\"\"\n A function to divide the data, for which the Merkle Tree has to be formed, \n into blocks of given size\n \"\"\"\n\n block_list = []\n\n with open(file, 'rb') as f:\n block = f.read(block_size)\n while block:\n # print(\"Block: \", block)\n block_list.append(block)\n block = f.read(block_size)\n \n return block_list\n\ndef bfs(root):\n \"\"\"\n My initial, not so efficient, attempt to visualize the tree structure\n TODO: Write code to traverse the tree\n and print the hash values stored in each node in each level.\n \"\"\"\n print(root.get_hash_value())\n q = queue.Queue()\n q.put(root) \n q.put(None)\n cnt = 0;\n while q.qsize() > 0:\n curr = q.get()\n if curr == None:\n print(cnt)\n cnt = 0\n print(\"-----------------\")\n if q.qsize() > 0:\n q.put(None)\n continue\n cnt += 1\n print(curr.get_hash_value(), end=' || ')\n if curr.get_left_child() != None:\n q.put(curr.get_left_child())\n if curr.get_right_child() != None:\n q.put(curr.get_right_child())\n \nfile_path = \"/home/nikhil/Desktop/btp/Paper_BlockSim-final.pdf\"\nblock_count = 256\n\nmTree = MerkleTree()\nmerkle_root = mTree.construct_merkle_tree(file_path, block_count)\n\nprint(merkle_root)\nprint(mTree.count_merkle_nodes)\n\nmTree.get_tree_illustration(mTree.merkle_root, 0)\n","repo_name":"nikhilk1701/MerkleTrees","sub_path":"MerkleTree.py","file_name":"MerkleTree.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15605275775","text":"import unittest\n\n\nimport jpeg_exif\n\n\nclass TestParseExifLittleEndian(unittest.TestCase):\n def test_leaves(self):\n with open('leaves.jpg', 'rb') as f:\n self.assertEqual({'Compression': 6,\n 'DateTime': '2015:11:09 11:59:11',\n 'ExifIFDPointer': 14248,\n 'JPEGInterchangeFormat': 35652,\n 'JPEGInterchangeFormatLength': 3459,\n 'Make': 'EASTMAN KODAK COMPANY',\n 'Model': 'KODAK EASYSHARE C195 Digital Camera',\n 'Orientation': 1,\n 'ResolutionUnit': 2,\n 'XResolution': '72/1',\n 'YCbCrPositioning': 1,\n 'YResolution': '72/1'},\n jpeg_exif.parse_exif(f))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rvinitraumass/DigitalForensics590F","sub_path":"05 - JPEG and EXIF/test_parse_exif_little_endian.py","file_name":"test_parse_exif_little_endian.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23890204024","text":"from functools import partial\nfrom threading import Thread\n\nfrom kivy.lang.builder import Builder\nfrom kivy.uix.screenmanager import ScreenManager\nfrom kivymd.uix.button import MDFlatButton\nfrom kivymd.uix.dialog import MDDialog\nfrom kivymd.uix.screen import MDScreen\n\nfrom Class.globalF import globalFuncs\nfrom Screens.HELPERS import introScreenHelper, setupExistingHelper, setupNewHelper, setupPracticeHelper, \\\n setupAdminAccHelper\n\n\nclass SetupScreen(MDScreen):\n def __init__(self):\n super(SetupScreen, self).__init__()\n self.name = \"SETUP\"\n self.sm = ScreenManager()\n self.add_widget(self.sm)\n\n self.introScreen = Builder.load_string(introScreenHelper)\n\n self.setupExisting = Builder.load_string(setupExistingHelper) # enter practice id here\n self.setupNew = Builder.load_string(setupNewHelper) # enter org ID here\n\n self.setupPractice = Builder.load_string(setupPracticeHelper) # Enter practice details here\n self.setupAdmin = Builder.load_string(setupAdminAccHelper) # Enter admin account details here\n\n self.sm.add_widget(self.introScreen)\n self.sm.add_widget(self.setupExisting)\n self.sm.add_widget(self.setupNew)\n self.sm.add_widget(self.setupPractice)\n self.sm.add_widget(self.setupAdmin)\n\n def switchScreen(self, name, *args):\n self.sm.current = name\n\n globalFuncs.closeDialog()\n\n def saveSets(self):\n globalFuncs.config.set(\"appinfo\",\"org\",self.practice.to_dict()[\"organisation\"])\n globalFuncs.config.set(\"appinfo\",\"practice\",self.practice.id)\n globalFuncs.config.set(\"appinfo\",\"firstboot\",\"False\")\n globalFuncs.saveConfig()\n\n def completeSetup(self, *args): ##only for setting up an existing practice\n self.parent.current = \"LOGIN\"\n\n # saving all the info\n self.saveSets()\n globalFuncs.closeDialog()\n\n def verifyPracCode(self, code):\n\n if code.replace(\" \", \"\") == \"\":\n globalFuncs.dialog = MDDialog(title=\"Error\", text=\"Please enter a link code\").open()\n return\n result = globalFuncs.database.returnPracticeByLink(code)\n if result == []:\n MDDialog(title=\"No Practice Found\", text=\"Please enter a valid link code\").open()\n return\n\n elif result != []:\n self.practice = result[0]\n globalFuncs.dialog = MDDialog(\n title=\"Found your practice!\",\n text=\"Is your practice called {}?\".format(self.practice.to_dict()[\"name\"]),\n buttons=[\n MDFlatButton(text=\"No\", on_release=globalFuncs.closeDialog),\n MDFlatButton(text=\"Yes\", on_release=self.completeSetup)\n ],\n auto_dismiss=False\n )\n\n globalFuncs.dialog.open()\n\n def verifyOrgCode(self, code):\n if code.replace(\" \", \"\") == \"\":\n globalFuncs.dialog = MDDialog(title=\"Error\", text=\"Please enter a link code\").open()\n return\n\n result = globalFuncs.database.returnOrgByLink(code)\n if result == []:\n MDDialog(title=\"No Practice Found\", text=\"Please enter a valid link code\").open()\n return\n\n elif result != []:\n self.org = result[0]\n globalFuncs.dialog = MDDialog(\n title=\"Found your organisation!\",\n text=\"Is your organisation called {}?\".format(self.org.to_dict()[\"name\"]),\n buttons=[\n MDFlatButton(text=\"No\", on_release=globalFuncs.closeDialog),\n MDFlatButton(text=\"Yes\", on_release=partial(self.switchScreen, \"SETUPPRACTICE\"))\n ],\n auto_dismiss=False\n )\n\n globalFuncs.dialog.open()\n return\n\n def createPractice(self):\n def function():\n self.setupPractice.ids.spinner.active = True\n name = self.setupPractice.ids.pracNameEntry.text\n admin = self.setupPractice.ids.adminNameEntry.text\n email = self.setupPractice.ids.emailEntry.text\n phone = self.setupPractice.ids.phoneEntry.text\n addr = self.setupPractice.ids.addy1Entry.text\n post = self.setupPractice.ids.postcodeEntry.text\n\n ''' # just a test string here\n name = \"specsaver optom\"\n admin = \"speccies bridgend\"\n email = \"specsavers@gmail.com\"\n phone = \"07951308773\"\n addr = \"anfwibfekjl sdfsdf\"\n post = \"CF315BG\"\n '''\n\n if name.replace(\" \", \"\") == \"\" or admin.replace(\" \", \"\") == \"\" or email.replace(\" \",\n \"\") == \"\" or phone.replace(\n \" \", \"\") == \"\" or addr.replace(\" \", \"\") == \"\" or post.replace(\" \", \"\") == \"\":\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"Please fill out all boxes\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n self.setupPractice.ids.spinner.active = False\n return\n\n if globalFuncs.validation.validatePlainString(name.replace(\" \", \"\")) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\" Practice Name is not valid\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n self.setupPractice.ids.spinner.active = False\n return\n\n if globalFuncs.validation.validatePlainString(admin.replace(\" \", \"\"), numCheck=True) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"Admin name is not valid, make sure only alphabetic characters are used.\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n self.setupPractice.ids.spinner.active = False\n return\n '''\n if globalFuncs.validation.checkEmail(email) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"invalid email\",\n buttons=[MDFlatButton(text=\"Ok\",on_release = globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n self.setupPractice.ids.spinner.active = False\n return\n '''\n '''\n if globalFuncs.validation.validatePlainString(addr.replace(\" \",\"\")) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"Address line invalid, only alphanumeric characters can be used\",\n buttons=[MDFlatButton(text=\"Ok\",on_release = globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n return\n '''\n\n if globalFuncs.validation.checkPostcode(post.replace(\" \", \"\")) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"Invalid postcode provided, only alphanumeric characters can be used\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n self.setupPractice.ids.spinner.active = False\n return\n\n self.setupPractice.ids.spinner.active = False\n self.practice = globalFuncs.database.addNewPractice(name, admin, email, phone, addr, post, self.org.id)\n self.practice = globalFuncs.database.fsdb.collection(u\"practices\").document(self.practice.id).get()\n\n self.sm.current = \"SETUPADMIN\"\n\n Thread(target=function, daemon=True).start()\n\n def createAdmin(self):\n def function():\n self.setupAdmin.ids.spinner.active = True\n fname = self.setupAdmin.ids.fnameEntry.text\n lname = self.setupAdmin.ids.lnameEntry.text\n email = self.setupAdmin.ids.emailEntry.text\n user = self.setupAdmin.ids.usernameEntry.text\n passw = self.setupAdmin.ids.passwordEntry.text\n\n if fname.replace(\" \", \"\") == \"\" or lname.replace(\" \", \"\") == \"\" or email.replace(\" \",\n \"\") == \"\" or user.replace(\n \" \", \"\") == \"\" or passw.replace(\" \", \"\") == \"\":\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"Please fill out all boxes\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n self.setupAdmin.ids.spinner.active = False\n return\n if globalFuncs.validation.validatePlainString(fname,\n numCheck=True) == False or globalFuncs.validation.validatePlainString(\n lname, numCheck=True) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\" First or last name is not valid, please make sure only one name is entered\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n self.setupAdmin.ids.spinner.active = False\n return\n\n if globalFuncs.validation.checkEmail(email) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"invalid email\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n self.setupAdmin.ids.spinner.active = False\n return\n\n if globalFuncs.validation.validatePlainString(user) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"The username is not valid, usernames can only be letters\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n\n self.setupAdmin.ids.spinner.active = False\n return\n\n if globalFuncs.password.checkPasswordIsValid(passw) == False:\n globalFuncs.dialog = MDDialog(\n title=\"Error\",\n text=\"The password entered is not valid, the password must be at least 8 characters long, and contains numbers,uppercase and lowercase letters.\",\n buttons=[MDFlatButton(text=\"Ok\", on_release=globalFuncs.closeDialog)],\n auto_dismiss=False\n )\n globalFuncs.dialog.open()\n\n self.setupAdmin.ids.spinner.active = False\n return\n\n # saving all the info\n self.saveSets()\n result = globalFuncs.database.createUser(fname, lname, email, user, passw, 1, self.practice.id)\n self.setupAdmin.ids.spinner.active = False\n self.parent.current = \"LOGIN\"\n\n Thread(target=function, daemon=True).start()\n","repo_name":"spades1404/OpthaBotDevelopment","sub_path":"Screens/SETUP.py","file_name":"SETUP.py","file_ext":"py","file_size_in_byte":11971,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"73158476093","text":"import torch\r\nimport torch.nn as nn\r\n\r\nclass GRUModel(nn.Module):\r\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers, dropout):\r\n super(GRUModel, self).__init__()\r\n self.hidden_dim = hidden_dim\r\n self.num_layers = num_layers\r\n \r\n self.gru = nn.GRU(input_dim, hidden_dim, num_layers, batch_first=True, dropout=dropout)\r\n self.fc = nn.Linear(hidden_dim, output_dim)\r\n\r\n def forward(self, x):\r\n h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(x.device) # initialize hidden state\r\n out, _ = self.gru(x, h0) # out: batch_size, seq_length, hidden_dim\r\n out = out[:, -1, :] # get the output of the last time step\r\n out = self.fc(out)\r\n return out","repo_name":"kangmincho1/ShinhanAI-competition","sub_path":"models/GRU.py","file_name":"GRU.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28029186034","text":"from pyengine.common.components.component import Component\nfrom pyengine.common.utils import Vec2\n\n\nclass CollisionComponent(Component):\n def __init__(self, game_object):\n super().__init__(game_object)\n self.name = \"CollisionComponent\"\n self.solid = True\n self.callback = None\n self.size = Vec2.zero()\n\n def to_dict(self):\n return {\n \"name\": self.name,\n \"solid\": self.solid,\n \"callback\": self.callback,\n \"size\": self.size.coords()\n }\n\n @classmethod\n def from_dict(cls, game_object, values):\n comp = CollisionComponent(game_object)\n comp.solid = values.get(\"solid\", True)\n comp.callback = values.get(\"callback\", None)\n comp.size = Vec2(*values.get(\"size\", (0, 0)))\n return comp\n","repo_name":"Lycos-Novation/PyEngine4","sub_path":"pyengine/common/components/collision_component.py","file_name":"collision_component.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"4470437662","text":"from django import forms\nfrom .models import Item\nfrom django.db.models.fields import BLANK_CHOICE_DASH\n\n\nclass SearchForm(forms.Form):\n\n\tbyBrand = forms.ChoiceField(\n\t\tlabel='Find by brand',\n\t\tchoices=BLANK_CHOICE_DASH + list(Item.BRANDS), \n\t\trequired=False, \n\t\twidget=forms.Select(attrs={'placeholder': 'Brand', 'required': False}),\n\t\t#null=True,\n\t\t#blank=True\n\t\t)\n\n\tbyCat = forms.ChoiceField(\n\t\tlabel='Find by category', \n\t\tchoices=BLANK_CHOICE_DASH + list(Item.CATEGORIES), \n\t\trequired=False, \n\t\twidget=forms.Select(attrs={'placeholder': 'Category', 'required': False}),\n\t\t#null=True,\n\t\t#blank=True\n\t\t)\n\n","repo_name":"mohamaddroubi/graduation_project","sub_path":"grad_project/grad_project/recommend/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42755375840","text":"\"\"\"Run a simple web server which responds to requests to monitor a directory\r\ntree and identify files over a certain size threshold. The directory is\r\nscanned to pick up the current situation and then monitored for new / removed\r\nfiles. By default, the page auto-refreshes every 60 seconds and shows the\r\ntop 50 files ordered by size.\r\n\"\"\"\r\nfrom __future__ import with_statement\r\nimport os, sys\r\nimport cgi\r\nimport datetime\r\nimport operator\r\nimport socket\r\nimport threading\r\nimport time\r\nimport traceback\r\nimport Queue\r\nimport urllib\r\nimport urlparse\r\nfrom wsgiref.simple_server import make_server\r\nfrom wsgiref.util import shift_path_info\r\nimport win32timezone\r\n\r\nimport error_handler\r\nfrom winsys import core, fs, misc\r\nprint(\"Logging to %s\" % core.log_filepath)\r\n\r\ndef deltastamp(delta):\r\n\r\n def pluralise(base, n):\r\n if n > 1:\r\n return \"%d %ss\" % (n, base)\r\n else:\r\n return \"%d %s\" % (n, base)\r\n\r\n if delta > datetime.timedelta(0):\r\n output_format = \"%s ago\"\r\n else:\r\n output_format = \"in %s\"\r\n\r\n days = delta.days\r\n if days != 0:\r\n wks, days = divmod(days, 7)\r\n if wks > 0:\r\n if wks < 9:\r\n output = pluralise(\"wk\", wks)\r\n else:\r\n output = pluralse(\"mth\", int(round(1.0 * wks / 4.125)))\r\n else:\r\n output = pluralise(\"day\", days)\r\n else:\r\n mins, secs = divmod(delta.seconds, 60)\r\n hrs, mins = divmod(mins, 60)\r\n if hrs > 0:\r\n output = pluralise(\"hr\", hrs)\r\n elif mins > 0:\r\n output = pluralise(\"min\", mins)\r\n else:\r\n output = pluralise(\"sec\", secs)\r\n\r\n return output_format % output\r\n\r\nclass x_stop_exception(Exception):\r\n pass\r\n\r\ndef get_files(path, size_threshold_mb, results, stop_event):\r\n \"\"\"Intended to run inside a thread: scan the contents of\r\n a tree recursively, pushing every file which is at least\r\n as big as the size threshold onto a results queue. Stop\r\n if the stop_event is set.\r\n \"\"\"\r\n size_threshold = size_threshold_mb * 1024 * 1024\r\n root = fs.dir(path)\r\n top_level_folders = sorted(root.dirs(), key=operator.attrgetter(\"written_at\"), reverse=True)\r\n try:\r\n for tlf in top_level_folders:\r\n for f in tlf.flat(ignore_access_errors=True):\r\n if stop_event.isSet():\r\n print(\"stop event set\")\r\n raise x_stop_exception\r\n try:\r\n if f.size > size_threshold:\r\n results.put(f)\r\n except fs.exc.x_winsys:\r\n continue\r\n except x_stop_exception:\r\n return\r\n\r\ndef watch_files(path, size_threshold_mb, results, stop_event):\r\n \"\"\"Intended to run inside a thread: monitor a directory tree\r\n for file changes. Convert the changed files to fs.File objects\r\n and push then onto a results queue. Stop if the stop_event is set.\r\n \"\"\"\r\n size_threshold = size_threshold_mb * 1024 * 1024\r\n BUFFER_SIZE = 8192\r\n MAX_BUFFER_SIZE = 1024 * 1024\r\n\r\n #\r\n # The double loop is because the watcher process\r\n # can fall over with an internal which is (I think)\r\n # related to a small buffer size. If that happens,\r\n # restart the process with a bigger buffer up to a\r\n # maximum size.\r\n #\r\n buffer_size = BUFFER_SIZE\r\n while True:\r\n watcher = fs.watch(path, True, buffer_size=buffer_size)\r\n\r\n while True:\r\n if stop_event.isSet(): break\r\n try:\r\n action, old_file, new_file = watcher.next()\r\n core.warn(\"Monitored: %s - %s => %s\" % (action, old_file, new_file))\r\n if old_file is not None:\r\n if (not old_file) or (old_file and old_file.size > size_threshold):\r\n results.put(old_file)\r\n if new_file is not None and new_file != old_file:\r\n if new_file and new_file.size > size_threshold:\r\n results.put(new_file)\r\n except fs.exc.x_winsys:\r\n pass\r\n except RuntimeError:\r\n try:\r\n watcher.stop()\r\n except:\r\n pass\r\n buffer_size = min(2 * buffer_size, MAX_BUFFER_SIZE)\r\n print(\"Tripped up on a RuntimeError. Trying with buffer of\", buffer_size)\r\n\r\nclass Path(object):\r\n \"\"\"Keep track of the files and changes under a particular\r\n path tree. No attempt is made to optimise the cases where\r\n one tree is contained within another.\r\n\r\n When the Path is started, it kicks of two threads: one to\r\n do a complete scan; the other to monitor changes. Both\r\n write back to the same results queue which is the basis\r\n for the set of files which will be sorted and presented\r\n on the webpage.\r\n\r\n For manageability, the files are pulled off the queue a\r\n chunk at a time (by default 1000).\r\n \"\"\"\r\n\r\n def __init__(self, path, size_threshold_mb, n_files_at_a_time):\r\n self._path = path\r\n self._size_threshold_mb = size_threshold_mb\r\n self._n_files_at_a_time = n_files_at_a_time\r\n self._changes = Queue.Queue()\r\n self._stop_event = threading.Event()\r\n self._files = set()\r\n\r\n self.file_getter = threading.Thread(\r\n target=get_files,\r\n args=(path, size_threshold_mb, self._changes, self._stop_event)\r\n )\r\n self.file_getter.setDaemon(1)\r\n self.file_getter.start()\r\n\r\n self.file_watcher = threading.Thread(\r\n target=watch_files,\r\n args=(path, size_threshold_mb, self._changes, self._stop_event)\r\n )\r\n self.file_watcher.setDaemon(1)\r\n self.file_watcher.start()\r\n\r\n def __str__(self):\r\n return \"\" % (self._path, len(self._files), self._size_threshold_mb)\r\n __repr__ = __str__\r\n\r\n def updated(self):\r\n \"\"\"Pull at most _n_files_at_a_time files from the queue. If the\r\n file exists, add it to the set (which will, of course, ignore\r\n duplicates). If it doesn't exist, remove it from the set, ignoring\r\n the case where it isn't there to start with.\r\n \"\"\"\r\n for i in range(self._n_files_at_a_time):\r\n try:\r\n f = self._changes.get_nowait()\r\n if f:\r\n self._files.add(f)\r\n else:\r\n self._files.discard(f)\r\n except Queue.Empty:\r\n break\r\n return self._files\r\n\r\n def finish(self):\r\n self._stop_event.set()\r\n\r\n def status(self):\r\n status = []\r\n if self.file_getter.isAlive():\r\n status.append(\"Scanning\")\r\n if self.file_watcher.isAlive():\r\n status.append(\"Monitoring\")\r\n return \" & \".join(status)\r\n\r\nclass App(object):\r\n \"\"\"The controlling WSGI app. On each request, it looks up the\r\n path handler which corresponds to the path form variable. It then\r\n pulls any new entries and displays them according to the user's\r\n parameters.\r\n \"\"\"\r\n\r\n PATH = \"\"\r\n N_FILES_AT_A_TIME = 1000\r\n SIZE_THRESHOLD_MB = 100\r\n TOP_N_FILES = 50\r\n REFRESH_SECS = 60\r\n HIGHLIGHT_DAYS = 0\r\n HIGHLIGHT_HRS = 12\r\n HIGHLIGHT_MINS = 0\r\n\r\n def __init__(self):\r\n self._paths_lock = threading.Lock()\r\n self.paths = {}\r\n self._paths_accessed = {}\r\n\r\n def doc(self, files, status, form):\r\n path = form.get(\"path\", self.PATH)\r\n top_n_files = int(form.get(\"top_n_files\", self.TOP_N_FILES) or 0)\r\n size_threshold_mb = int(form.get(\"size_threshold_mb\", self.SIZE_THRESHOLD_MB) or 0)\r\n refresh_secs = int(form.get(\"refresh_secs\", self.REFRESH_SECS) or 0)\r\n highlight_days = int(form.get(\"highlight_days\", self.HIGHLIGHT_DAYS) or 0)\r\n highlight_hrs = int(form.get(\"highlight_hrs\", self.HIGHLIGHT_HRS) or 0)\r\n highlight_mins = int(form.get(\"highlight_mins\", self.HIGHLIGHT_MINS) or 0)\r\n highlight_delta = datetime.timedelta(days=highlight_days, hours=highlight_hrs, minutes=highlight_mins)\r\n highlight_deltastamp = deltastamp(highlight_delta)\r\n if files:\r\n title = cgi.escape(\"Top %d files on %s over %dMb - %s\" % (min(len(files), self.TOP_N_FILES), path, size_threshold_mb, status))\r\n else:\r\n title = cgi.escape(\"Top files on %s over %dMb - %s\" % (path, size_threshold_mb, status))\r\n\r\n doc = []\r\n doc.append(\"%s\" % title)\r\n doc.append(\"\"\"\"\"\")\r\n doc.append(\"\"\"\"\"\")\r\n doc.append(\"\")\r\n doc.append(\"\"\"
\r\n Scan  \r\n for files over Mb\r\n showing the top  files\r\n refreshing every  secs\r\n highlighting the last   days\r\n   hrs\r\n   mins\r\n \r\n

\"\"\" % locals())\r\n\r\n now = win32timezone.utcnow()\r\n if path:\r\n doc.append(\"

%s

\" % title)\r\n latest_filename = \"\\\\\".join(files[-1].parts[1:]) if files else \"(no file yet)\"\r\n doc.append(u'

Last updated %s

' % time.asctime())\r\n doc.append(u'')\r\n for i, f in enumerate(files[:top_n_files]):\r\n try:\r\n doc.append(\r\n u'' % (\r\n \"odd\" if i % 2 else \"even\",\r\n \"highlight\" if ((now - max(f.written_at, f.created_at)) <= highlight_delta) else \"\",\r\n f.relative_to(path).lstrip(fs.seps),\r\n f.size / 1024.0 / 1024.0,\r\n max(f.written_at, f.created_at)\r\n )\r\n )\r\n except fs.exc.x_winsys:\r\n pass\r\n doc.append(\"
FilenameSize (Mb)Updated
%s%5.2f%s
\")\r\n\r\n doc.append(\"\")\r\n return doc\r\n\r\n def handler(self, form):\r\n path = form.get(\"path\", self.PATH)\r\n size_threshold_mb = int(form.get(\"size_threshold_mb\", self.SIZE_THRESHOLD_MB) or 0)\r\n refresh_secs = int(form.get(\"refresh_secs\", self.REFRESH_SECS) or 0)\r\n status = \"Waiting\"\r\n if path and fs.Dir(path):\r\n #\r\n # Ignore any non-existent paths, including garbage.\r\n # Create a new path handler if needed, or pull back\r\n # and existing one, and return the latest list.\r\n #\r\n with self._paths_lock:\r\n if path not in self.paths:\r\n self.paths[path] = Path(path, size_threshold_mb, self.N_FILES_AT_A_TIME)\r\n path_handler = self.paths[path]\r\n if path_handler._size_threshold_mb != size_threshold_mb:\r\n path_handler.finish()\r\n path_handler = self.paths[path] = Path(path, size_threshold_mb, self.N_FILES_AT_A_TIME)\r\n self._paths_accessed[path] = win32timezone.utcnow()\r\n files = sorted(path_handler.updated(), key=operator.attrgetter(\"size\"), reverse=True)\r\n status = path_handler.status()\r\n\r\n #\r\n # If any path hasn't been queried for at least\r\n # three minutes, close the thread down and delete\r\n # its entry. If it is queried again, it will just\r\n # be restarted as new.\r\n #\r\n for path, last_accessed in self._paths_accessed.iteritems():\r\n if (win32timezone.utcnow() - last_accessed).seconds > 180:\r\n path_handler = self.paths.get(path)\r\n if path_handler:\r\n path_handler.finish()\r\n del self.paths[path]\r\n del self._paths_accessed[path]\r\n\r\n else:\r\n files = []\r\n return self.doc(files, status, form)\r\n\r\n def __call__(self, environ, start_response):\r\n \"\"\"Only attempt to handle the root URI. If a refresh interval\r\n is requested (the default) then send a header which forces\r\n the refresh.\r\n \"\"\"\r\n path = shift_path_info(environ).rstrip(\"/\")\r\n if path == \"\":\r\n form = dict((k, v[0]) for (k, v) in cgi.parse_qs(list(environ['QUERY_STRING']).iteritems()) if v)\r\n if form.get(\"path\"):\r\n form['path'] = form['path'].rstrip(\"\\\\\") + \"\\\\\"\r\n refresh_secs = int(form.get(\"refresh_secs\", self.REFRESH_SECS) or 0)\r\n headers = []\r\n headers.append((\"Content-Type\", \"text/html; charset=utf-8\"))\r\n if refresh_secs:\r\n headers.append((\"Refresh\", \"%s\" % refresh_secs))\r\n start_response(\"200 OK\", headers)\r\n return (d.encode(\"utf8\") + \"\\n\" for d in self.handler(form))\r\n else:\r\n start_response(\"404 Not Found\", [(\"Content-Type\", \"text/plain\")])\r\n return []\r\n\r\n def finish(self):\r\n for path_handler in self.paths.itervalues():\r\n path_handler.finish()\r\n\r\nif __name__ == '__main__':\r\n misc.set_console_title(\"Monitor Directory\")\r\n PORT = 8000\r\n HOSTNAME = socket.getfqdn()\r\n threading.Timer(\r\n 2.0,\r\n lambda: os.startfile(\"http://%s:%s\" % (HOSTNAME, PORT))\r\n ).start()\r\n\r\n app = App()\r\n try:\r\n make_server('', PORT, app).serve_forever()\r\n except KeyboardInterrupt:\r\n print(\"Shutting down gracefully...\")\r\n finally:\r\n app.finish()\r\n","repo_name":"tjguk/winsys","sub_path":"winsys/extras/monitor_directory.py","file_name":"monitor_directory.py","file_ext":"py","file_size_in_byte":15377,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"78"} +{"seq_id":"14699453183","text":"import argparse\n\nfrom rasa.constants import DEFAULT_DATA_PATH, DEFAULT_RASA_X_PORT\n\nfrom rasa.cli.arguments.default_arguments import add_model_param, add_data_param\nfrom rasa.cli.arguments.run import add_server_arguments\n\n\ndef set_x_arguments(parser: argparse.ArgumentParser):\n add_model_param(parser, add_positional_arg=False)\n\n add_data_param(parser, default=DEFAULT_DATA_PATH, data_type=\"stories and Rasa NLU \")\n\n parser.add_argument(\n \"--no-prompt\",\n action=\"store_true\",\n help=\"Automatic yes or default options to prompts and oppressed warnings.\",\n )\n\n parser.add_argument(\n \"--production\",\n action=\"store_true\",\n help=\"Run Rasa X in a production environment.\",\n )\n\n parser.add_argument(\n \"--rasa-x-port\",\n default=DEFAULT_RASA_X_PORT,\n type=int,\n help=\"Port to run the Rasa X server at.\",\n )\n\n add_server_arguments(parser)\n","repo_name":"mahbubcseju/Rasa_Japanese","sub_path":"rasa/rasa/cli/arguments/x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"32588249210","text":"import math\n\na = int(input())\nb = int(input())\n\nc = math.sqrt(a**2 + b**2)\nprint(c)\nd = int(round(math.degrees(math.acos(b/c))))\nprint(d)\n\nt = u\"\\u00b0\"\nprint (str(d) + t)\n","repo_name":"nadyrbek97/hak_python_task","sub_path":"hak_triangle.py","file_name":"hak_triangle.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30972142942","text":"import os, re\nfrom os import sys\nfrom distutils.core import setup\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom imp import find_module\n#from importlib import find_module\n\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\n\"\"\" Check for required modules \"\"\"\ntry:\n find_module('numpy')\nexcept:\n sys.exit('### Error: python module numpy not found')\n \ntry:\n find_module('scipy')\nexcept:\n sys.exit('### Error: python module scipy not found')\n \ntry:\n find_module('astropy')\nexcept ImportError:\n try:\n find_module('pyfits')\n except ImportError:\n sys.exit('### Error: Neither astropy nor pyfits found.')\n\ntry:\n find_module('matplotlib')\nexcept ImportError:\n sys.exit('### Error: python module matplotlib not found')\n\ntry:\n find_module('cdfutils')\nexcept ImportError:\n sys.exit('### Error: python module cdfutils not found. '\n 'Download and install from github cdfassnacht/cdfutils')\n\n\n#try: find_module('MySQLdb')\n#except: sys.exit('### Error: python module MySQLdb not found')\n\n\nverstr = \"unknown\"\ntry:\n parentdir = os.getcwd()+'/'\n verstrline = open(parentdir+'/specim/_version.py', \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n verstr = mo.group(1)\n else:\n raise RuntimeError(\"unable to find version in \" + parentdir + \"+specim/_version.py\")\n\n\nsetup(\n name = 'specim',\n version = verstr,#'0.1.3',\n author = 'Chris Fassnacht',\n author_email = 'cdfassnacht@ucdavis.edu',\n scripts=[],\n license = 'LICENSE.txt',\n description = 'Code for visualizing fits images and for'\n 'extracting and plotting spectra',\n #long_description = open('README.txt').read(),\n requires = ['numpy','scipy','astropy','matplotlib','cdfutils'],\n packages = ['specim', 'specim.imfuncs', 'specim.specfuncs'],\n #package_dir = {'':'src'},\n package_data = {'specim.specfuncs' : ['Data/*']}\n)\n","repo_name":"pmozumdar/specim","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"5287084071","text":"import unittest\nimport datetime as dt\nfrom functional_tests.TestCase import TestCase\nfrom functional_tests.homepage.HomePage import HomePage\n\nclass TestCase(TestCase):\n\n def setUp(self):\n super().setUp()\n self.create_user('voong.david@gmail.com', 'password')\n self.sign_in('voong.david@gmail.com', 'password')\n\ndef check_transactions(test_case, home_page, expected):\n\n transactions = home_page.get_transactions()\n test_case.assertEqual(len(expected), len(transactions))\n\n for t, exp in zip(transactions, expected):\n test_case.assertEqual(\n (t.date, t.size, t.description, t.balance),\n exp\n )\n\nclass TestUpdateTransaction(TestCase):\n\n def setUp(self):\n super().setUp()\n\n url = '{}/home?start=2018-01-01&end=2018-01-22'.format(self.live_server_url)\n self.driver.get(url)\n \n home_page = HomePage(self.driver)\n home_page.create_transaction(\n date=dt.date(2018, 1, 1),\n size=1,\n description='a',\n repeats='weekly',\n ends={'how': 'never_ends'})\n \n home_page.show_repeat_transactions_view()\n\n repeat_transactions = home_page.get_repeat_transactions()\n self.assertEqual(len(repeat_transactions), 1)\n\n rt = repeat_transactions[0]\n self.assertEqual(rt.start_date, dt.date(2018, 1, 1))\n self.assertEqual(rt.size, 1)\n self.assertEqual(rt.description, 'a')\n self.assertEqual(rt.frequency, 'weekly')\n self.assertEqual(rt.ends, 'never')\n\n self.repeat_transaction = rt\n self.home_page = home_page\n\n def check_transactions(self, expected):\n return check_transactions(self, self.home_page, expected)\n \n def test_make_transaction_earlier(self):\n\n # change start date to a week earlier\n rt = self.repeat_transaction\n rt.start_date = dt.date(2017, 12, 25)\n rt.save()\n\n home_page = self.home_page\n url = '{}/home?start=2017-12-25&end=2018-01-22'.format(self.live_server_url)\n self.driver.get(url)\n home_page.reload()\n \n expected = [\n (dt.date(2017, 12, 25), 1, 'a', '£1.00'),\n (dt.date(2018, 1, 1), 1, 'a', '£2.00'),\n (dt.date(2018, 1, 8), 1, 'a', '£3.00'),\n (dt.date(2018, 1, 15), 1, 'a', '£4.00'),\n (dt.date(2018, 1, 22), 1, 'a', '£5.00')\n ]\n\n self.check_transactions(expected)\n\n def test_make_transaction_later(self):\n\n # change start date to a week later\n rt = self.repeat_transaction\n rt.start_date = dt.date(2018, 1, 8)\n rt.save()\n\n self.home_page.reload()\n expected = [\n (dt.date(2018, 1, 8), 1, 'a', '£1.00'),\n (dt.date(2018, 1, 15), 1, 'a', '£2.00'),\n (dt.date(2018, 1, 22), 1, 'a', '£3.00'),\n ]\n self.check_transactions(expected)\n \n def test_change_size(self):\n\n # change size\n rt = self.repeat_transaction\n rt.size = 2\n rt.save()\n\n self.home_page.reload()\n\n expected = [\n (dt.date(2018, 1, 1), 2, 'a', '£2.00'),\n (dt.date(2018, 1, 8), 2, 'a', '£4.00'),\n (dt.date(2018, 1, 15), 2, 'a', '£6.00'),\n (dt.date(2018, 1, 22), 2, 'a', '£8.00')\n ]\n\n self.check_transactions(expected)\n\n def test_change_description(self):\n\n # change size\n rt = self.repeat_transaction\n rt.description = 'b'\n rt.save()\n\n self.home_page.reload()\n\n expected = [\n (dt.date(2018, 1, 1), 1, 'b', '£1.00'),\n (dt.date(2018, 1, 8), 1, 'b', '£2.00'),\n (dt.date(2018, 1, 15), 1, 'b', '£3.00'),\n (dt.date(2018, 1, 22), 1, 'b', '£4.00')\n ]\n \n self.check_transactions(expected)\n\n def test_change_end_criteria(self):\n\n # change date\n rt = self.repeat_transaction\n rt.ends = dt.date(2018, 1, 15)\n rt.save()\n\n self.home_page.reload()\n\n expected = [\n (dt.date(2018, 1, 1), 1, 'a', '£1.00'),\n (dt.date(2018, 1, 8), 1, 'a', '£2.00'),\n (dt.date(2018, 1, 15), 1, 'a', '£3.00'),\n ]\n\n self.check_transactions(expected)\n \n\nclass TestUpdateRepeatTransactionThousands(TestCase):\n\n def test(self):\n\n url = '{}/home?start=2018-01-01&end=2018-01-22'.format(self.live_server_url)\n self.driver.get(url)\n \n home_page = HomePage(self.driver)\n home_page.create_transaction(\n date=dt.date(2018, 1, 1),\n size=1000,\n description='a',\n repeats='weekly',\n ends={'how': 'ends_after_#_transactions', 'when': 2})\n \n home_page.show_repeat_transactions_view()\n\n repeat_transactions = home_page.get_repeat_transactions()\n self.assertEqual(len(repeat_transactions), 1)\n\n rt = repeat_transactions[0]\n self.assertEqual(rt.start_date, dt.date(2018, 1, 1))\n self.assertEqual(rt.size, 1000)\n self.assertEqual(rt.description, 'a')\n self.assertEqual(rt.frequency, 'weekly')\n self.assertEqual(rt.ends, dt.date(2018, 1, 8))\n\n # change date\n rt.ends = dt.date(2018, 1, 15)\n import time\n time.sleep(15)\n rt.save()\n\n home_page.reload()\n\n expected = [\n (dt.date(2018, 1, 1), 1000, 'a', '£1,000.00'),\n (dt.date(2018, 1, 8), 1000, 'a', '£2,000.00'),\n (dt.date(2018, 1, 15), 1000, 'a', '£3,000.00'),\n ]\n\n check_transactions(self, home_page, expected)\n\n \n","repo_name":"dvoong/voong_finance_3","sub_path":"functional_tests/homepage/test_repeat_transaction_update_and_deletion.py","file_name":"test_repeat_transaction_update_and_deletion.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14115346580","text":"# THIS was my first attempt at getting the numerical simulation up and running\r\n#theres a couple problems with this. One is I used Euler angles and the small angle approximation\r\n# that throws off the stability of the numerical integration and the small angles\r\n# are not valid after a certain period diverging from small angles\r\n\r\n\r\n\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom scipy.integrate import odeint\r\n\r\n# satellite parameters\r\nm = 1500 # kg\r\njx = 1440 # kg*m^2\r\njy = 2500\r\njz = 3850\r\n\r\nr_c = (6378.135 + 650) # km\r\nh_w = 1000 # kgm^2/s\r\nq1 = -.5\r\nq2 = -.5\r\nq3 = .5\r\nq4 = .5\r\n\r\nn = np.sqrt(3.986004e5/r_c**3)\r\n\r\n\r\ndef num_integrate(y, t, Jx, Jy, Jz, n, h):\r\n omega1, omega2, omega3, phi, theta, psi = y\r\n\r\n dydt = [1/jx * (n*(4*n*(Jz-Jy) + h)*phi + (n*(Jx-Jy+Jz) + h)*omega3),\r\n 1/jy * (3*n**2*(Jz-Jx)*theta),\r\n 1/jz * (n*(n*(Jx-Jy) + h)*psi - (n*(Jx-Jy+Jz) + h)*omega1),\r\n omega1,\r\n omega2,\r\n omega3]\r\n return dydt\r\n\r\n\r\n# phi = np.arctan2(2*(q4*q1 + q2*q3), 1 -2*(q1**2 + q2**2))\r\n# theta = np.arcsin(2*(q4*q2 - q3*q1))\r\n# psi = np.arctan2(2*(q4*q3 + q1*q2), 1-2*(q2**2 + q3**2))\r\n# print(180/np.pi*phi)\r\n# print(180/np.pi*theta)\r\n# print(180/np.pi*psi)\r\n\r\ny0 = [.01, .1, .01, -np.pi/2, 0, np.pi/2]\r\nperiod = 2*np.pi / np.sqrt(2.896004e5) * r_c**1.5\r\n#t = np.linspace(0, int(3*period), int(3*period))\r\nt = np.linspace(0, 1000, 1000)\r\n\r\nsol = odeint(num_integrate, y0, t, args=(jx, jy, jz, n, h_w))\r\n\r\nphi = sol[:, 3]\r\ntheta = sol[:, 4]\r\npsi = sol[:, 5]\r\n\r\nfor i in range(len(phi)):\r\n # bound upper range\r\n while phi[i] > np.pi:\r\n phi[i] = phi[i] - 2*np.pi\r\n while theta[i] > np.pi:\r\n theta[i] = theta[i] - 2*np.pi\r\n while psi[i] > np.pi:\r\n psi[i] = psi[i] + 2*np.pi\r\n # Bound lower range\r\n while phi[i] < -np.pi:\r\n phi[i] = phi[i] + 2 * np.pi\r\n while theta[i] < -np.pi:\r\n theta[i] = theta[i] + 2*np.pi\r\n while psi[i] < -np.pi:\r\n psi[i] = psi[i] + 2 * np.pi\r\n\r\nplt.plot(t, sol[:, 0], label=\"phidot\")\r\nplt.plot(t, sol[:, 1], label=\"thetadot\")\r\nplt.plot(t, sol[:, 2], label=\"psidot\")\r\nplt.title(\"Numerical integration First attempt: Angular rates\")\r\nplt.xlabel(\"Time (s)\")\r\nplt.ylabel(\" w (rad/s)\")\r\nplt.legend()\r\nplt.grid()\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\nplt.plot(t, sol[:, 3], label=\"phi\")\r\nplt.plot(t, sol[:, 4], label=\"theta\")\r\nplt.plot(t, sol[:, 5], label=\"psi\")\r\nplt.title(\"Numerical Integration: Attitude Angles\")\r\nplt.xlabel(\"Time (s)\")\r\nplt.ylabel(\"Angle (rad)\")\r\nplt.legend()\r\nplt.grid()\r\nplt.show()","repo_name":"stephent0987/AERO424_Spacecraft_dynamics","sub_path":"AERO424_HW3.py","file_name":"AERO424_HW3.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37409003593","text":"#!/usr/bin/python3\n\ndef unique(s):\n for c in range(0, len(s)):\n for d in range(c+1, len(s)):\n if s[c] == s[d]:\n return False\n return True\n\nbefore = [None] * 100\ndef permut(n,r):\n if n == -1:\n return [\"\"]\n else:\n if before[n-1] == None:\n before[n-1] = permut(n-1, r)\n\n\n res = []\n for i in r:\n if n > 4:\n print(i,n)\n for e in before[n-1]:\n candidate = i + e;\n if(unique(candidate)):\n res.append(candidate)\n\n return res\n\np = permut(9, [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"])\nprint(len(p))\nprint(p[1000000])\n","repo_name":"superboum/code-bazaar","sub_path":"algo/euler/24/bruteforce.py","file_name":"bruteforce.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"15405844562","text":"\"\"\"\r\n @name : b1030\r\n @version : 21.0105\r\n @author : zhangpeng96\r\n @pass_rate : p4 timeout\r\n\"\"\"\r\n\r\ndef bisect_right(a, x, lo, hi):\r\n while lo < hi:\r\n mid = (lo+hi)//2\r\n if x < a[mid]: hi = mid\r\n else: lo = mid+1\r\n return lo\r\n\r\n\r\nans = []\r\ncount, p = map(int, input().split())\r\ndigits = sorted(map(int, input().split()))\r\n# count, p = map(int, '10 8'.split())\r\n# digits = sorted(map(int, '2 3 20 4 5 1 6 7 8 9'.split()))\r\ndigits.sort()\r\n\r\nfor i, digit in enumerate(digits):\r\n ans.append(bisect_right(digits, digit*p, i, count)-i)\r\n\r\nprint(max(ans))","repo_name":"zhangpeng96/Programming-Ability-Practice","sub_path":"PAT-B/b1030/python-1030-stl-2.py","file_name":"python-1030-stl-2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32355983718","text":"# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# This program is made to investigate the Mandelbrot Set. This is done by\n# calculating and plotting the plane of numbers for which the Mandelbrot\n# function does not converge. It is the first part of the first assignment for\n# the Stochastic Simulation course on the UvA in the master Computational Science.\n#\n# Tristan Assenmacher and Natasja Wezel\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\nimport matplotlib.pyplot as plt\nfrom numba import jit\nimport numpy as np\nimport random\nimport time\nimport os\n\n\ndef main():\n\n # for each point we do 80 iterations to calculate whether it is in the set or not\n max_iterations_list = [100, 200, 500, 1000, 1500, 2000, 2500]\n max_iterations_list = [100]\n # discretization steps\n width, height = 20000, 20000\n\n print(\"Starting now............\")\n\n for max_iterations in max_iterations_list:\n print(max_iterations)\n\n # # zoom coordinates 1\n # real_min, real_max = -0.7463, -0.7413\n # im_min, im_max = 0.1102, 0.1152\n\n # plane = mandelbrot_set(real_min, real_max, im_min, im_max, width, height, max_iterations)\n # # calculate and plot the mandelbrot set\n # plot_mandelbrot(plane, max_iterations)\n\n # # zoom coordinates 2\n real_min, real_max = -0.74877, -0.74872\n im_min, im_max = 0.065053, 0.065103\n plane = mandelbrot_set(real_min, real_max, im_min, im_max, width, height, max_iterations)\n plot_mandelbrot(plane, max_iterations, real_min, real_max, im_min, im_max, width)\n\n # normal coordinates\n # real_min, real_max = -2, 1\n # im_min, im_max = -1.25, 1.25\n # plane = mandelbrot_set(real_min, real_max, im_min, im_max, width, height, max_iterations)\n\n # plot_mandelbrot(plane, max_iterations, real_min, real_max, im_min, im_max, width)\n\n\ndef plot_mandelbrot(plane, iters, real_min, real_max, im_min, im_max, width):\n \"\"\" Plots the set. \"\"\"\n\n colormaps = [plt.cm.magma, plt.cm.twilight, plt.cm.hot]\n # colormaps = [plt.cm.magma]\n\n for colormap in colormaps:\n plt.figure()\n\n # TODO: fix the x/y ticks (from -2,1 and from -1 to 1)\n colormap.set_under(color='black')\n plt.imshow(plane.T, origin='lower', cmap=colormap, vmin=0.0001)\n plt.ylabel(\"Imaginary axis\")\n plt.xlabel(\"Real axis\")\n plt.title(\"The Mandelbrot set \\niterations: \" + str(iters))\n\n # get current axes\n ax = plt.gca()\n\n # set x/y ticks\n x_ticks = np.linspace(real_min, real_max, 5)\n y_ticks = np.linspace(im_min, im_max, 5)\n locs = np.linspace(0, width, 5)\n\n plt.xticks(locs, x_ticks)\n plt.yticks(locs, y_ticks)\n\n if not os.path.isdir(\"figures\"):\n os.makedirs(\"figures\")\n\n try:\n plt.savefig(\"figures/mandelbrot_\" + str(time.time()) + \".png\")\n except ValueError:\n print(\"Everything is 0?\", iters)\n\n plt.close()\n\n@jit(nopython=True)\ndef not_in_mandelbrot(c, maxiter):\n \"\"\" Calculates whether a given complex number (c) is in the Mandelbrot Set\n or not. \"\"\"\n\n real = c.real\n imag = c.imag\n\n for n in range(maxiter):\n real2 = real * real\n imag2 = imag * imag\n\n # if abs(z) > 2, it is in the set: this is computationally expensive tho\n if real2 + imag2 > 4.0:\n return n\n\n # update z using the Mandelbrot formula.\n imag = 2 * real * imag + c.imag\n real = real2 - imag2 + c.real\n\n # if not in the set, return False\n return False\n\n\n@jit(nopython=True, parallel=True)\ndef mandelbrot_set(real_min, real_max, im_min, im_max, width, height, maxiter):\n \"\"\" For each point on a given grid, calculates whether it belongs to the\n Mandelbrot set or not. \"\"\"\n\n # define axis and plane\n real_axis = np.linspace(real_min, real_max, width)\n im_axis = np.linspace(im_min, im_max, height)\n plane = np.empty((width,height))\n\n # loop over each point in the plane and call the mandelbrot() function\n for i in range(width):\n if i % 100 ==0:\n print(\"progres: \", i, \"/\", width)\n\n for j in range(height):\n plane[i,j] = not_in_mandelbrot(real_axis[i] + 1j * im_axis[j], maxiter)\n\n return plane\n\nif __name__ == '__main__':\n main()\n","repo_name":"NatasjaWezel/StochasticSimulation","sub_path":"assignment1/mandelbrot_plots.py","file_name":"mandelbrot_plots.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42430047235","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\nimport torch.optim as optim\nfrom skorch import NeuralNetClassifier\nimport pandas as pd\nimport numpy as np\nfrom skorch.utils import check_indexing\nfrom skorch.utils import multi_indexing\nfrom skorch.utils import to_numpy\nfrom skorch.utils import is_pandas_ndframe\nfrom skorch.utils import flatten\nfrom functools import partial\nfrom scipy import sparse\n\nclass RegressionModel(nn.Module):\n#class RegressionModel(pl.LightningModule):\n def __init__(self, emb_szs, n_cont, emb_drop, out_sz, szs, drops, y_range, use_bn=True):\n super().__init__()\n\n # embeddings\n for i, (c, s) in enumerate(emb_szs): assert c > 1, f\"cardinality must be >=2, got emb_szs[{i}]: ({c},{s})\"\n self.embs = nn.ModuleList([nn.Embedding(c+1, s) for c, s in emb_szs])\n\n for emb in self.embs: emb_init(emb)\n n_emb = sum(e.embedding_dim for e in self.embs)\n self.n_emb, self.n_cont = n_emb, n_cont\n\n\n # linear & batch norm/group norm)\n szs = [n_emb + n_cont] + szs\n self.lins = nn.ModuleList([nn.Linear(szs[i], szs[i + 1]) for i in range(len(szs) - 1)])\n self.bns = nn.ModuleList([nn.GroupNorm(1, sz) for sz in szs[1:]])\n for o in self.lins: nn.init.kaiming_normal_(o.weight.data)\n self.outp = nn.Linear(szs[-1], out_sz)\n nn.init.kaiming_normal_(self.outp.weight.data)\n\n # output\n self.emb_drop = nn.Dropout(emb_drop)\n self.drops = nn.ModuleList([nn.Dropout(drop) for drop in drops])\n self.bn = nn.GroupNorm(1,n_cont)\n self.use_bn, self.y_range = use_bn, y_range\n self.activation = nn.Sigmoid()\n\n def forward(self, x_cat, x_cont):\n # Split one output into two\n # x_cat = D1.get_X_cat()\n# x_cont = D1.get_X_cont()\n\n\n # embedding for categorical variables\n if self.n_emb != 0:\n x = [e(x_cat[:, i]) for i, e in enumerate(self.embs)]\n x = torch.cat(x, 1)\n x = self.emb_drop(x)\n\n # embedding for continuous variables\n if self.n_cont != 0:\n x2 = self.bn(x_cont.float())\n x = torch.cat([x, x2], 1) if self.n_emb != 0 else x2\n for l, d, b in zip(self.lins, self.drops, self.bns):\n x = F.relu(l(x))\n if self.use_bn: x = b(x)\n x = d(x)\n\n # regression layer\n x = self.outp(x)\n if self.y_range:\n x = self.activation(x)\n x = x * (self.y_range[1] - self.y_range[0])\n x = x + self.y_range[0]\n# x = torch.where(torch.isnan(x), torch.zeros_like(x), x)\n# x = torch.where(torch.isinf(x), torch.zeros_like(x), x)\n return x.squeeze()\n\n\n def configure_optimizers(self):\n # optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0)\n optimizer = optim.SGD(model.parameters(), lr=args.lr)\n return optimizer\n\n def BCELoss(self, output, target):\n return nn.BCELoss()\n\n def name(self):\n return \"RegressionModel\"\n\n\n def emb_init(x):\n x = x.weight.data\n sc = 2/(x.size(1)+1)\n x.uniform_(-sc,sc)\n\n# nope to tuples\n# A class for sklearns predict and fit in pytorch\nclass SampleWeightNeuralNet(NeuralNetClassifier):\n def __init__(self, *args, criterion__reduce=False, **kwargs):\n #def __init__(self, *args, criterion__reduce=False, **kwargs):\n #super().__init__(*args, criterion__reduce = criterion__reduce, **kwargs)\n super().__init__(*args, criterion__reduce=criterion__reduce, **kwargs)\n\n\n\n def fit(self, X_cat, X_cont, y, sample_weight=None):\n X = torch.cat([X_cat, X_cont], 1)\n # X = D1.get_X()\n # y = D1.get_y()\n # X_cat = D1.get_X_cat()\n # X_cont = D1.get_X_cont()\n\n if isinstance(X, (pd.DataFrame, pd.Series)) :\n #//category and data point\n # X_tuple = (X_cat, X_cont)\n #cat_array = X_tuple[0].numpy()\n #cont_array = X_tuple[1].numpy()\n #X_tuple = (cat_array, cont_array)#\n\n #X_cat = X_cat.to_numpy().astype('float32')\n X = X.to_numpy().astype('float32')\n #if isinstance(X_cont, (pd.DataFrame, pd.Series)):\n # X_cont = X_cont.to_numpy().astype('float32')\n if isinstance(y, (pd.DataFrame, pd.Series)):\n y = y.to_numpy()\n if sample_weight is not None and isinstance(sample_weight, (pd.DataFrame, pd.Series)):\n sample_weight = sample_weight.to_numpy()\n y = y.reshape([-1,1])\n sample_weight = sample_weight if sample_weight is not None else np.ones_like(y)\n #X_cat = {'X':X_cat, 'sample_weight': sample_weight}\n #X_cont = {'X':X_cont, 'sample_weight': sample_weight}\n #X = {**X_cat, **X_cont}\n X = {'X':X, 'sample_weight': sample_weight}\n return super().fit(X, y)\n\n def predict(self, X):\n if isinstance(X, (pd.DataFrame, pd.Series)):\n X = X.to_numpy().astype('float32')\n return (super().predict_proba(X) > 0.5).astype(np.float)\n\n def get_loss(self, y_pred, y_true, X, *args, **kwargs):\n loss_unreduced = super().get_loss(y_pred, y_true.float(), X, *args, **kwargs)\n sample_weight = X['sample_weight']\n sample_weight = sample_weight.to(loss_unreduced.device).unsqueeze(-1)\n #sample weights on GPU\n loss_reduced = (sample_weight * loss_unreduced).mean()\n return loss_reduced\n\n'''\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n net = SampleWeightNeuralNet(\n RegressionModel,\n max_epochs = 20,\n #optimizer = optim.Adam,\n lr = 0.001,\n #batch_size = 512,\n #train_split = None,\n iterator_train_shuffle = True,\n criterion = nn.BCELoss,\n device = device )\n\n def fit(X, y):\n fit = net.fit(X,y)\n return fit\n\n def pred(y):\n y_pred = net.predict(X)\n return y_pred\n'''\n\ndef emb_init(x):\n x = x.weight.data\n sc = 2/(x.size(1)+1)\n x.uniform_(-sc,sc)\n\ndef _apply_to_data(data, func, unpack_dict=False):\n \"\"\"Apply a function to data, trying to unpack different data\n types.\n \"\"\"\n apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict)\n\n if isinstance(data, dict):\n if unpack_dict:\n return [apply_(v) for v in data.values()]\n return {k: apply_(v) for k, v in data.items()}\n\n if isinstance(data, (list, tuple)):\n try:\n # e.g.list/tuple of arrays\n return [apply_(x) for x in data]\n except TypeError:\n return func(data)\n\n return func(data)\n\ndef _is_sparse(x):\n try:\n return sparse.issparse(x) or x.is_sparse\n except AttributeError:\n return False\n\ndef _len(x):\n if _is_sparse(x):\n return x.shape[0]\n return len(x)\n\ndef get_len(data):\n lens = [_apply_to_data(data, _len, unpack_dict=True)]\n lens = list(flatten(lens))\n len_set = set(lens)\n if len(len_set)!=1:\n raise ValueError(\"Dataset doesn't have consistent lengths\")\n return list(len_set)[0]\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, X_cat, X_cont, y, length= None):\n self.X_cat = X_cat\n self.X_cont = X_cont\n self.y = y\n\n X = torch.cat([X_cat, X_cont], 1)\n print(X)\n print(\"####### THIS IS X ^^^ #########\")\n self.X = X\n self.X_indexing = check_indexing(X)\n\n self.y_indexing = check_indexing(y)\n\n # self.X_cat_indexing = check_indexing(X_cat)\n # self.X_cont_indexing = check_indexing(X_cont)\n # self.X_cat_is_ndframe = is_pandas_ndframe(X_cat)\n # self.X_cont_is_ndframe = is_pandas_ndframe(X_cont)\n self.X_is_ndframe = is_pandas_ndframe(X)\n\n\n if length is not None:\n self._len = length\n return\n\n len_X = get_len(X)\n #len_X_cont = get_len(X_cont)\n if y is not None:\n len_y = get_len(y)\n if len_y!= len_X:\n print(\"len_y: \", len_y)\n print(\"\\n\")\n print(\"len_X: \", len_X)\n raise ValueError(\"Xs and y have inconsistent lengths\")\n self._len = len_X\n\n def __len__(self):\n return len(self._len)\n\n def transform(self, X, y):\n y = torch.Tensor([0]) if y is None else y\n if sparse.issparse(X):\n X = X.toarray().squeeze(0)\n return X, y\n\n def __getitem__(self, i):\n X, y = self.X, self.y\n if self.X_is_ndframe:\n X = {k: X[k].values.reshape(-1,1) for k in X}\n #if self.X_cont_is_ndframe:\n # X_cont = {k:X_cont[k].values.reshape(-1,1) for k in X_cont}\n Xi = multi_indexing(X, i, self.X_indexing)\n #X_conti = multi_indexing(X_cont, i, self.X_cont_indexing)\n yi = multi_indexing(y, i, self.y_indexing)\n return self.transform(Xi, yi)\n\n def get_X(self):\n return(self.X)\n\n def get_y(self):\n return(self.y)\n\n def get_X_cat(self):\n return (self.X_cat)\n\n def get_X_cont(self):\n return (self.X_cont)\n","repo_name":"raphaelletseng/bias-mitigation-sgd","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41502255406","text":"\"\"\"\nSimple demo of error handling in Webber DAGs:\n\n- Tracebacks are printed.\n\n- Dependent tasks are skipped.\n\n- The DAG continues to execute independent tasks.\n\"\"\"\nimport sys\nimport webber\n\ndef erroneous():\n \"\"\"Force an exit.\"\"\"\n print(\"I am an error.\")\n sys.exit(1)\n\ndef independent():\n \"\"\"Make a statement.\"\"\"\n print(\"I am independent.\")\n\ndef dependent():\n \"\"\"Make a statement (if you can!)\"\"\"\n print(\"I am dependent.\")\n\nif __name__ == \"__main__\":\n\n dag = webber.DAG()\n\n err_event: str = dag.add_node(erroneous)\n ind_event: str = dag.add_node(independent)\n dep_event: str = dag.add_node(dependent)\n\n _ = dag.add_edge(err_event, dependent)\n _ = dag.add_edge(independent, dependent)\n\n dag.execute()\n","repo_name":"WebberTeam/Webber","sub_path":"examples/dag_err.py","file_name":"dag_err.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"39571072202","text":"# mac_change/consumers.py\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nimport json, subprocess, re\n\nclass MacChange(AsyncWebsocketConsumer):\n\n async def connect(self):\n await self.accept()\n\n async def disconnect(self, close_code):\n print(\"disconnect\", close_code)\n pass\n\n async def receive(self, text_data):\n text_data_json = json.loads(text_data)\n\n if \"reset_mac\" in text_data:\n result = subprocess.check_output([\"ethtool\", \"-P\", \"eth0\"], encoding=\"UTF-8\")\n mac_address = re.findall(r\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", result)[0]\n await self.send_mac(\"eth0\", mac_address)\n elif \"interface\" in text_data and \"new_mac\" in text_data and len(text_data_json[\"interface\"]) == 4 and len(text_data_json[\"new_mac\"]) == 17:\n print(\"recieve\", text_data_json[\"interface\"], text_data_json[\"new_mac\"])\n\n await self.send_mac(text_data_json[\"interface\"], text_data_json[\"new_mac\"])\n\n else:\n await self.send(text_data=json.dumps({\n 'error': \"Please, fill all fields correctly\"\n }))\n\n async def change_mac(self, interface, new_mac):\n await self.send(text_data=json.dumps({\n 'message': \"[+] Changing MAC address for \" + interface + \" to \" + new_mac\n }))\n print(\"[+] Changing MAC address for \" + interface + \" to \" + new_mac)\n\n subprocess.call([\"ifconfig\", interface, \"down\"])\n subprocess.call([\"ifconfig\", interface, \"hw\", \"ether\", new_mac])\n subprocess.call([\"ifconfig\", interface, \"up\"])\n\n async def get_current_mac(self, interface):\n ifconfig_result = subprocess.check_output([\"ifconfig\", interface], encoding=\"UTF-8\")\n mac_address_search_result = re.search(r\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", ifconfig_result)\n\n if mac_address_search_result:\n return mac_address_search_result.group(0)\n else:\n await self.send(text_data=json.dumps({\n 'error': \"[-] Could not read MAC address \"\n }))\n print(\"[-] Could not read MAC address \")\n\n async def send_mac(self, interface=\"eth0\", new_mac=\"04:D4:C4:E6:E4:F3\"):\n current_mac = str(await self.get_current_mac(interface))\n if current_mac != \"None\":\n await self.send(text_data=json.dumps({\n 'message': \"Current MAC: \" + current_mac\n }))\n print(\"Current MAC: \" + current_mac)\n await self.change_mac(interface, new_mac)\n\n current_mac = await self.get_current_mac(interface)\n print(current_mac, new_mac)\n if str(current_mac).upper() == new_mac.upper():\n await self.send(text_data=json.dumps({\n 'message': \"[+] MAC address was successfylly changed to \" + str(current_mac)\n }))\n else:\n await self.send(text_data=json.dumps({\n 'error': \"[-] MAC address did not get changed.\"\n }))\n","repo_name":"ramapitecusment/hacktool_web_application_python","sub_path":"mac_change/source_code/MacChange.py","file_name":"MacChange.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13839417209","text":"# %%\nimport pandas as pd\n\nshipment = pd.read_csv('https://raw.githubusercontent.com/MIDS-at-Duke/pds2021-opioids-team-2-ids720/data_merging/20_intermediate_files/merged_pop_and_ship_and_fips.csv?token=AVKGWHYI27VQWUKTL76WBFDBUZJ6O')\n\n# aggregate the data by state and year\nship_grouped = shipment.groupby(['BUYER_STATE','Year'], as_index= False)[['MME', 'Population']].sum()\n\n# add a calculation for shipments per capita\nship_grouped['ships_per_cap'] = ship_grouped['MME']/ship_grouped['Population']\n\n\n\n\n # %%\n# subset the data for only Florida\ntreatment_state = ship_grouped[ship_grouped['BUYER_STATE']=='FL']\n# subset the data for only the control states\ncontrols = ['OR','NV','SC']\ncontrol_states = ship_grouped[ship_grouped['BUYER_STATE'].isin(controls)]\n\n# %%\n# specify the years needed before the policy change\nyear = [2006, 2007, 2008, 2009]\n# create new dataframe with only data from those years\npre_FL_ship = treatment_state.loc[treatment_state['Year'].isin(year)]\npost_FL_ship = treatment_state.loc[~treatment_state['Year'].isin(year)]\n\npre_crtl_ship = control_states.loc[control_states['Year'].isin(year)]\npost_crtl_ship = control_states.loc[~control_states['Year'].isin(year)]\n\n#%%\npre_FL_ship.head()\n\n# %%\nprint(\"pre-policy FL sum = \" + str(pre_FL_ship[\"ships_per_cap\"].sum()))\nprint(\"post-policy FL sum = \" + str(post_FL_ship[\"ships_per_cap\"].sum()))\nprint(\"pre-policy control sum = \" + str(pre_crtl_ship[\"ships_per_cap\"].sum()))\nprint(\"post-policy control sum = \" + str(post_crtl_ship[\"ships_per_cap\"].sum()))\n\n# %%\npre_FL_ship.describe()\n\n# %%\npost_FL_ship.describe()\n\n# %%\npre_crtl_ship.describe()\n\n# %%\npost_crtl_ship.describe()\n\n\n\n# %%\n","repo_name":"MIDS-at-Duke/pds2021-opioids-team-2-ids720","sub_path":"10_code/shipment_summary_stats.py","file_name":"shipment_summary_stats.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"74653562813","text":"##exercise 3:\nglossary = {\"print\":\"prints string or value\",\n \"list\":\"stores multiple values\",\n \"loop\":\"repeated execution\",\n \"input\":\"takes input\",\n \"dictionary\":\"stores both keys and value\"}\nfor x in glossary:\n print(x + \":\" + glossary[x])\nprint(\"\\n\\nNew Keys:\\n\") \nglossary.update({\"function\": \"perfom specific tasks\"})\nglossary.update({\"operator\": \"a symbol used for operations\"})\nglossary.update({\"operand\": \"the values in operation\"})\nglossary.update({\"control flow\": \"decision to change flow of program\"})\nglossary.update({\"boolean\": \"True/false,1/0,Yes/No\"})\nfor x in glossary:\n print(x + \":\" + glossary[x]) ","repo_name":"Code-Lab-1/programming-skills-portfolio-mqasimkhan143","sub_path":"Chapter 5- Dictionaries/Exercises/exercise 3.py","file_name":"exercise 3.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30108556038","text":"from sys import stdin\ninput = stdin.readline\n\nn = int(input())\nnumbers = list(map(int,input().split()))\n\nm = int(input())\nfinds = list(map(int,input().split()))\n\nre_dict = dict()\nfor i in range(n):\n if numbers[i] in re_dict:\n re_dict[numbers[i]] += 1\n else:\n re_dict[numbers[i]] = 1\n\nresult = [0] * m\nfor i in range(m):\n if finds[i] in re_dict:\n result[i] = re_dict[finds[i]]\nprint(*result)","repo_name":"hs-ryu/TIL","sub_path":"python/알고리즘/baek/수학/10816_숫자카드2.py","file_name":"10816_숫자카드2.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15784419017","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('New1.jpg')\nimg = cv2.GaussianBlur(img, (7,7), 0)\ncimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nret3,normal = cv2.threshold(cimg,150,255,cv2.THRESH_BINARY)\ninvnorm = cv2.bitwise_not(normal)\n\n\t\t\t\ncv2.namedWindow('Image' ,cv2.WINDOW_NORMAL)\ncv2.resizeWindow('Image', 600,600)\ncv2.imshow('Image', normal)\ncv2.waitKey(0)\n\ncircles = cv2.HoughCircles(normal,cv2.HOUGH_GRADIENT,2,500,\n param1=400,param2=70,minRadius=100,maxRadius=500)\n\nprint(circles)\ncircles = np.uint16(np.around(circles))\nfor i in circles[0,:]:\n # draw the outer circle\n cv2.circle(img,(i[0],i[1]),i[2],(0,255,0),2)\n # draw the center of the circle\n cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)\n\ncv2.namedWindow('Image' ,cv2.WINDOW_NORMAL)\ncv2.resizeWindow('Image', 600,600)\ncv2.imshow('Image', img)\ncv2.waitKey(0)\n\n","repo_name":"gbiswas0/ZoneDetection","sub_path":"Houghtest.py","file_name":"Houghtest.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2918638662","text":"import os\nimport sys\nfrom multiprocessing import Value, Lock, Queue\n\nsys.path.append(\"../tuna\")\nsys.path.append(\"tuna\")\n\nthis_path = os.path.dirname(__file__)\n\nfrom tuna.fin_eval import FinEvaluator\nfrom tuna.sql import DbCursor\nfrom tuna.dbBase.sql_alchemy import DbSession\nfrom tuna.tables import DBTables\nfrom dummy_machine import DummyMachine\nfrom tuna.tables import ConfigType\nfrom utils import CfgImportArgs\n\n\ndef test_fin_evaluator():\n res = None\n\n num_gpus = Value('i', 1)\n v = Value('i', 0)\n e = Value('i', 0)\n\n args = CfgImportArgs()\n config_type = ConfigType.convolution\n dbt = DBTables(config_type=args.config_type)\n with DbSession() as session:\n dbt.session_id = session.query(dbt.job_table.session).filter(dbt.job_table.state=='compiled')\\\n .filter(dbt.job_table.reason=='tuna_pytest_fin_builder').first().session\n\n kwargs = {\n 'machine': DummyMachine(False),\n 'gpu_id': 0,\n 'num_procs': num_gpus,\n 'barred': v,\n 'bar_lock': Lock(),\n 'envmt': [\"MIOPEN_LOG_LEVEL=7\"],\n 'reset_interval': False,\n 'app_test': False,\n 'label': 'tuna_pytest_fin_builder',\n 'fin_steps': ['miopen_find_eval'],\n 'use_tuner': False,\n 'job_queue': Queue(),\n 'queue_lock': Lock(),\n 'fetch_state': ['compiled'],\n 'end_jobs': e,\n 'session_id': dbt.session_id\n }\n\n # test get_job true branch\n fin_eval = FinEvaluator(**kwargs)\n ans = fin_eval.get_job('compiled', 'evaluating', False)\n assert (ans is True)\n\n with DbSession() as session:\n count = session.query(dbt.job_table).filter(dbt.job_table.state=='evaluating')\\\n .filter(dbt.job_table.reason=='tuna_pytest_fin_builder').count()\n assert (count == 1)\n\n # test get_fin_input\n file_name = fin_eval.get_fin_input()\n assert (file_name)\n\n # test check gpu with \"bad\" GPU\n # the job state will set back to \"compiled\" from \"evaluating\"\n fin_eval.check_gpu()\n with DbSession() as session:\n count = session.query(dbt.job_table).filter(dbt.job_table.state=='evaluating')\\\n .filter(dbt.job_table.reason=='tuna_pytest_fin_builder').count()\n assert (count == 0)\n\n # test check gpu with \"good\" GPU\n # the job state will remain 'evaluated'\n ans = fin_eval.get_job('compiled', 'evaluated', False)\n assert (ans is True)\n fin_eval.machine.set_gpu_state(True)\n fin_eval.check_gpu()\n with DbSession() as session:\n count = session.query(dbt.job_table).filter(dbt.job_table.state=='evaluated')\\\n .filter(dbt.job_table.reason=='tuna_pytest_fin_builder').count()\n assert (count == 1)\n\n with DbSession() as session:\n count = session.query(dbt.job_table).filter(dbt.job_table.session==dbt.session_id)\\\n .filter(dbt.job_table.state=='evaluated')\\\n .filter(dbt.job_table.reason=='tuna_pytest_fin_builder').delete()\n\n #test get_job false branch\n fin_eval = FinEvaluator(**kwargs)\n ans = fin_eval.get_job('new', 'evaluating', False)\n assert (ans is False)\n","repo_name":"technicalgrp89/MITuna","sub_path":"tests/test_fin_evaluator.py","file_name":"test_fin_evaluator.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73067727931","text":"#!/usr/bin/python3\n\n\ndef format_file(rows):\n commands = []\n for row in rows:\n row = row.split(\"->\")\n move = []\n for coordinate in row:\n coordinate=coordinate.strip()\n coordinate=coordinate.split(\",\")\n move.append(list(map(int,coordinate)))\n commands.append(move)\n return commands\n\nclass Board:\n matrix = []\n \n def __init__(self):\n self.matrix = [[0 for x in range(1000)] for y in range(1000)]\n\n def mark(self, command):\n if command[0][0] == command[1][0]:\n x = command[0][0]\n start = min([command[0][1],command[1][1]])\n end = max([command[0][1],command[1][1]])\n for y in range (start,end+1):\n self.matrix[x][y] += 1\n elif command[0][1] == command[1][1]:\n y = command[0][1]\n start = min([command[0][0],command[1][0]])\n end = max([command[0][0],command[1][0]])\n for x in range (start,end+1):\n self.matrix[x][y] += 1\n else:\n print(f\"Line Not Horiz or Vert : {command}\")\n return self.matrix\n\n def overlap(self):\n result=0\n for x in range(len(self.matrix)):\n for y in range(len(self.matrix)):\n if self.matrix[x][y] > 1 :\n result += 1\n return result\n\n\nwith open('input.txt') as f:\n rows = f.readlines()\n commands = format_file(rows)\n print(f\"Commands : {commands}\")\n board = Board()\n for command in commands:\n board.mark(command)\n print(f\"Result : {board.overlap()}\")\n","repo_name":"LucVanw/AdventOfCode2021","sub_path":"05/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73816710653","text":"from math import radians\nfrom pytouhou.vm import spawn_enemy\nfrom pytouhou.game import NextStage\n\n\ndef disk(enemy, game):\n if enemy.frame == 0:\n enemy.set_anim(0)\n\n enemy.set_hitbox(32, 32)\n\n enemy.death_anim = 1\n\n enemy.update_mode = 0\n enemy.angle, enemy.speed = radians(90), 1.5\n\n elif enemy.frame == 10000:\n enemy.removed = True\n\n\ndef boss(enemy, game):\n if enemy.frame == 0:\n enemy.set_anim(3)\n enemy.set_hitbox(8, 32)\n enemy.death_flags = 1\n enemy.set_boss(True)\n\n enemy.timeout = 20 * 60\n enemy.timeout_callback.enable(some_spellcard, (enemy, game))\n\n enemy.low_life_trigger = 0x40\n enemy.low_life_callback.enable(some_spellcard, (enemy, game))\n\n elif enemy.frame == 10000:\n enemy.removed = True\n\n if enemy.frame % 10 == 0:\n enemy.set_bullet_attributes(67, 0, 0, 3 if game.spellcard is not None else 1, 1, 6., 6., 0., radians(3), 0)\n\n\ndef some_spellcard(enemy, game):\n enemy.life = 0x40\n enemy.difficulty_coeffs = (-.5, .5, 0, 0, 0, 0)\n game.change_bullets_into_star_items()\n game.spellcard = (42, 'Some Spellcard', 0)\n game.enable_spellcard_effect()\n\n enemy.timeout = 10 * 60\n enemy.timeout_callback.enable(on_boss_death, (enemy, game))\n enemy.death_callback.enable(on_boss_death, (enemy, game))\n enemy.low_life_callback.disable()\n\n\ndef on_boss_death(enemy, game):\n enemy.timeout_callback.disable()\n enemy.death_callback.disable()\n game.disable_spellcard_effect()\n enemy.removed = True\n\n raise NextStage\n\n\ndef stage1(game):\n if game.frame == 0x10:\n spawn_enemy(game, disk, x=50., y=-32., life=20, score=300)\n elif game.frame == 0x20:\n spawn_enemy(game, disk, x=60., y=-32., life=20, score=300)\n elif game.frame == 0x30:\n spawn_enemy(game, disk, x=70., y=-32., life=20, score=300)\n elif game.frame == 0x40:\n spawn_enemy(game, disk, x=80., y=-32., life=20, score=300)\n elif game.frame == 0x50:\n spawn_enemy(game, disk, x=90., y=-32., life=20, score=300)\n elif game.frame == 0x60:\n spawn_enemy(game, disk, x=100., y=-32., life=20, score=300)\n elif game.frame == 0x100:\n spawn_enemy(game, boss, x=192., y=64., life=1000, item=-2, score=10000)\n","repo_name":"GovanifY/PyTouhou","sub_path":"pytouhou/games/sample/enemies.py","file_name":"enemies.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"78"} +{"seq_id":"38120669992","text":"from django.core.cache import cache\nfrom django.shortcuts import render, redirect\nfrom .models import *\nimport json\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\n\ndef index(request):\n pizza = Pizza.objects.all()\n orders = Order.objects.filter(user = request.user)\n context = {'pizza' : pizza , 'orders' : orders}\n return render(request,'index.html',context)\n\ndef details(request):\n return render(request,'details.html')\n\n\ndef order(request , order_id):\n if cache.get(order_id):\n print('data from Cache Redis')\n order = cache.get(order_id)\n else:\n order = Order.objects.filter(order_id=order_id).first()\n print('data from DB')\n cache.set(order_id, order)\n if order is None:\n return redirect('/')\n \n context = {'order' : order}\n return render(request , 'details.html', context)\n \n@csrf_exempt\ndef order_pizza(request):\n user = request.user\n data = json.loads(request.body)\n \n try:\n pizza = Pizza.objects.get(id=data.get('id'))\n order = Order(user=user, pizza=pizza , amount = pizza.price)\n order.save()\n return JsonResponse({'message': 'Success'})\n \n except Pizza.DoesNotExist:\n return JsonResponse({'error': 'Something went wrong'})","repo_name":"pravin130794/pythonDjangoOrderTracking","sub_path":"OrderTracking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72753260093","text":"from flask import Flask, render_template, request\nimport pickle\n\napp = Flask(__name__)\n# load the model\nmodel = pickle.load(open('savedmodel.sav', 'rb'))\n\n@app.route('/')\ndef home():\n result = ''\n return render_template('index.html', **locals())\n\n\n@app.route('/predict', methods=['POST', 'GET'])\ndef predict():\n sepal_length = float(request.form['sepal_length'])\n sepal_width = float(request.form['sepal_width'])\n petal_length = float(request.form['petal_length'])\n petal_width = float(request.form['petal_width'])\n result = model.predict([[sepal_length, sepal_width, petal_length, petal_width]])[0]\n return render_template('index.html', **locals())\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"aswintechguy/Machine-Learning-Projects","sub_path":"Iris dataset analysis - Classification/Deploy model using Flask/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":565,"dataset":"github-code","pt":"78"} +{"seq_id":"41900773660","text":"# -*- coding: utf-8 -*-\n# (c) 2025 Alfredo de la Fuente - AvanzOSC\n# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html\nimport openerp.tests.common as common\n\n\nclass TestSaleOrderLineServiceView(common.TransactionCase):\n\n def setUp(self):\n super(TestSaleOrderLineServiceView, self).setUp()\n self.sale_model = self.env['sale.order']\n self.wiz_model = self.env['wiz.delete.sale.line']\n service_product = self.env.ref('product.product_product_consultant')\n sale_vals = {\n 'name': 'sale order 1',\n 'partner_id': self.ref('base.res_partner_1'),\n 'partner_shipping_id': self.ref('base.res_partner_1'),\n 'partner_invoice_id': self.ref('base.res_partner_1'),\n 'pricelist_id': self.env.ref('product.list0').id,\n }\n sale_line_vals = {\n 'product_id': service_product.id,\n 'name': service_product.name,\n 'product_uom_qty': 7,\n 'product_uos_qty': 7,\n 'product_uom': service_product.uom_id.id,\n 'price_unit': service_product.list_price}\n sale_vals['order_line'] = [(0, 0, sale_line_vals)]\n self.sale_order = self.sale_model.create(sale_vals)\n\n def test_sale_order_line_service_view(self):\n wiz = self.wiz_model.with_context(\n active_ids=self.sale_order.ids).create({})\n wiz.lines.write({'delete_record': True})\n wiz.button_delete_sale_lines()\n self.assertEqual(\n len(self.sale_order.order_line), 0, 'Sale order with line')\n\n def test_sale_order_line_service(self):\n self.assertEquals(len(self.sale_order.order_line), 1)\n for line in self.sale_order.order_line:\n line.invalidate_cache()\n self.assertEquals(self.sale_order.order_line,\n self.sale_order.service_order_line |\n self.sale_order.no_service_order_line)\n self.assertEquals(len(self.sale_order.service_order_line), 1)\n","repo_name":"alfredoavanzosc/sale-addons","sub_path":"sale_order_line_service_view/tests/test_sale_order_line_service_view.py","file_name":"test_sale_order_line_service_view.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"2158910030","text":"from typing import NamedTuple\n\nimport gwv.filters as filters\nfrom gwv.helper import categorize, cjk_sources, is_gokan_kanji_cp, \\\n is_togo_kanji_cp\nfrom gwv.validatorctx import ValidatorContext\nfrom gwv.validators import Validator, ValidatorErrorEnum, error_code\n\n\nclass RelatedValidatorError(ValidatorErrorEnum):\n @error_code(\"0\")\n class WRONG_RELATED(NamedTuple):\n \"\"\"間違った関連字\"\"\"\n related: str\n correct_related: str\n\n @error_code(\"1\")\n class MISSING_RELATED(NamedTuple):\n \"\"\"関連字なし\"\"\"\n correct_related: str\n\n @error_code(\"2\")\n class ENTITY_NOT_FOUND(NamedTuple):\n \"\"\"実体が存在しない\"\"\"\n entity_name: str\n\n @error_code(\"10\")\n class WRONG_ENTITY_RELATED(NamedTuple):\n \"\"\"実体の関連字が違う\"\"\"\n entity_name: str\n entity_related: str\n correct_related: str\n\n @error_code(\"11\")\n class MISSING_ENTITY_RELATED(NamedTuple):\n \"\"\"実体が関連字なし\"\"\"\n entity_name: str\n correct_related: str\n\n\nE = RelatedValidatorError\n\n\nclass RelatedValidator(Validator):\n\n @filters.check_only(+filters.is_of_category({\"ucs-kanji\"}))\n def is_invalid(self, ctx: ValidatorContext):\n expected_related = \"u\" + ctx.category_param[1][0]\n if is_gokan_kanji_cp(int(expected_related[1:], 16)):\n u = cjk_sources.get(\n expected_related, cjk_sources.COLUMN_COMPATIBILITY_VARIANT)\n if u is None:\n return False\n expected_related = \"u\" + u[2:].lower()\n\n if ctx.glyph.related != \"u3013\" and \\\n expected_related != ctx.glyph.related:\n # 間違った関連字\n return E.WRONG_RELATED(ctx.glyph.related, expected_related)\n\n if ctx.glyph.entity_name is not None:\n entity_category, entity_param = categorize(ctx.glyph.entity_name)\n if entity_category == \"ucs-kanji\" and \\\n is_togo_kanji_cp(int(entity_param[0], 16)):\n return False\n if ctx.glyph.entity_name not in ctx.dump:\n # 実体が存在しない\n return E.ENTITY_NOT_FOUND(ctx.glyph.entity_name)\n\n related = ctx.entity.related\n if related == \"u3013\":\n # 実体が関連字なし\n return E.MISSING_ENTITY_RELATED(\n ctx.glyph.entity_name, expected_related)\n\n if expected_related != related:\n # 実体の関連字が違う\n return E.WRONG_ENTITY_RELATED(\n ctx.glyph.entity_name, related, expected_related)\n\n elif ctx.glyph.related == \"u3013\":\n return E.MISSING_RELATED(expected_related) # 関連字なし\n\n return False\n","repo_name":"kurgm/gwv","sub_path":"gwv/validators/related.py","file_name":"related.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"73837089850","text":"#oxford_api = '65e664d4'\r\n\r\n#python -m pip install requests\r\n\r\nimport requests\r\nimport json\r\nimport pprint\r\nimport flask\r\n\r\napp_id = '65e664d4'\r\napp_key = '7ede5606bf2c1551bafc98676fd4bac9'\r\n\r\nlanguage = 'en'\r\nword_id = 'selfie'\r\n\r\nurl = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word_id.lower()\r\n\r\nr = requests.get(url, headers = {'app_id' : app_id, 'app_key' : app_key})\r\nprint(\"code {}\\n\".format(r.status_code))\r\nprint(\"text \\n\" + r.text)\r\n#print(\"json \\n\" + json.dumps(r.json()))\r\n\r\ndata = json.dumps(r.json())\r\n\r\njsonToPython = json.loads(data)\r\n\r\n#The pp will make indent the json file so it is easily readable\r\npp = pprint.PrettyPrinter(indent=4)\r\npp.pprint (jsonToPython)\r\n\r\nfor word in jsonToPython['results']:\r\n theWord = word['id']\r\n print(\"Word:\", theWord)\r\n #print(word['definitions'])\r\n\r\nfor word in jsonToPython['results']:\r\n for entries in (word['lexicalEntries']):\r\n for deff in (entries['entries']):\r\n for line in (deff['senses']):\r\n if line == \",\" or line == \"[\" or line == \"]\":\r\n print(\"\")\r\n else:\r\n theDefinition = line[\"definitions\"]\r\n print(\"Definition:\", theDefinition)\r\n\r\n#Info to export to html\r\n","repo_name":"David-Quan00/VastVocab","sub_path":"oxford_api.py","file_name":"oxford_api.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21080426454","text":"from Layer import *\nimport json\n\nm = 10000\nbytes_to_read = m * 784\ntheta_filename = 'parameters/min_cost.json'\n# theta_filename = 'parameters/last_epoch.json'\ntestfile_image = 'data/t10k-images-idx3-ubyte'\ntestfile_label = 'data/t10k-labels-idx1-ubyte'\n# testfile_image = 'data/train-images-idx3-ubyte'\n# testfile_label = 'data/train-labels-idx1-ubyte'\n\n# Read thetas\nwith open(theta_filename, 'r') as f:\n js_obj = json.load(f)\n\n# Read testing set\nwith open(testfile_image, 'rb') as f:\n meta = f.read(16)\n raw = f.read(bytes_to_read)\nx = reshape(array([raw[i] for i in range(bytes_to_read)]), (m,784))\nx = insert(x, 0, 1, axis=1)\n\n# Read testing set labels\nwith open(testfile_label, 'rb') as f:\n meta = f.read(8)\n raw = f.read(m)\ny = array([raw[i] for i in range(m)])\n\nnetwork = []\n# Input layer\nnetwork.append(Layer(a=x,theta=reshape(array(js_obj['theta1']),(50,785))))\n# Two hidden layers\nnetwork.append(Layer(theta=reshape(array(js_obj['theta2']),(50,51))))\nnetwork.append(Layer(theta=reshape(array(js_obj['theta3']),(10,51))))\n# Output layer\nnetwork.append(Layer())\n\nnetwork[1].activate(network[0],next_to_input=True)\nfor i in range(2,len(network)):\n network[i].activate(network[i-1])\n\nnetwork_predictions = array([])\nfor a in network[3].a:\n temp, prediction = 0, 0\n for confidence in range(a.size):\n if a[confidence] > temp:\n temp = a[confidence]\n prediction = confidence\n network_predictions = append(network_predictions, prediction)\n\ncorrect_predictions = 0\nfor i in range(m):\n if y[i] == network_predictions[i]:\n correct_predictions += 1\n\nprint('Correct network predictions:', str(100*correct_predictions/m)+'%')","repo_name":"stephenwang5/handwritten-digit-reader","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25364870159","text":"\"\"\"Entry point for game\"\"\"\nfrom view import viewfactory as vf\nfrom model import core as pp\n\ndef main():\n \"\"\"simple game loop to link a view with our model logic\"\"\"\n model = pp.PerpendicularPaths()\n view = vf.factory_create()\n view.init(model)\n while 1:\n view.handle_events()\n view.update()\n view.display()\n view.quit()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jagermeister/PerpendicularPaths","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9869595845","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom item.models import Item, ItemCategory, Bid\nfrom checkout.models import Rating\n\n\n# home_page\ndef home_page(request):\n if 'search_filter' in request.GET:\n search_filter = request.GET['search_filter']\n filtered_items = Item.objects.filter(name__icontains=search_filter)\n\n if request.headers.get('x-requested-with') == 'XMLHttpRequest':\n return JsonResponse({'data': [{\n 'id': x.id,\n 'name': x.name,\n 'description': x.description,\n 'condition': x.condition,\n 'firstImage': x.images_set.first().image,\n } for x in filtered_items]})\n return render(request, 'fire_sale/home_page.html', {\n 'items': filtered_items\n })\n\n if 'sort_by' in request.GET:\n order_by = request.GET['sort_by']\n order_by_items = Item.objects.order_by(order_by)\n\n if request.headers.get('x-requested-with') == 'XMLHttpRequest':\n return JsonResponse({'data': [{\n 'id': x.id,\n 'name': x.name,\n 'description': x.description,\n 'condition': x.condition,\n 'firstImage': x.images_set.first().image,\n } for x in order_by_items]})\n return render(request, 'fire_sale/home_page.html', {\n 'items': order_by_items\n })\n\n bid_status = Bid.objects.filter(buyer__id=request.user.id)\n notification = 'False'\n for i in bid_status:\n if i.status == \"accepted\":\n notification = 'True'\n ratings = Rating.objects.filter(seller__id=request.user.id).all()\n all_ratings = []\n for i in ratings:\n all_ratings.append(i.rating)\n if len(all_ratings) != 0:\n average_rating = round(sum(all_ratings)/len(all_ratings), 1)\n else:\n average_rating = \"\"\n\n return render(request, 'fire_sale/home_page.html', {\n 'items': Item.objects.all().order_by('name'),\n 'categories': ItemCategory.objects.all(),\n 'average_rating': average_rating,\n 'notification': notification\n })\n\n","repo_name":"birnarunkarls/FireSale","sub_path":"fire_sale/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73123833211","text":"import sys\nimport os\nimport re\nimport string\nimport logging\nfrom crccheck.crc import Crc15\nfrom pathlib import Path\nfrom TipiConfig import TipiConfig\nfrom unidecode import unidecode\nfrom ti_files import ti_files\nfrom ti_files.BasicFile import basicSuffixes\nfrom tinames.NativeFlags import *\n\n# Transform a name supplied by the 4A into our storage path\n\nlogger = logging.getLogger(__name__)\n\ntipi_config = TipiConfig.instance()\n\nTIPI_DIR = \"/home/tipi/tipi_disk\"\n\nWILDCARD = '#?'\n\n\ndef __driveMapping(key):\n path = tipi_config.get(key)\n\n if path == \"\" or path is None:\n return None\n\n if path == \".\":\n return TIPI_DIR\n\n path = \"/\".join([x.replace(\"/\", \".\") for x in path.split(\".\")])\n path = TIPI_DIR + \"/\" + path\n return path\n\n\ndef __cs1Mapping():\n path = tipi_config.get(\"CS1_FILE\")\n\n if path == \"\" or path is None:\n return None\n\n path = \"/\".join([x.replace(\"/\", \".\") for x in path.split(\".\")])\n path = TIPI_DIR + \"/\" + path\n return path\n\n\ndef __scanForVolume(volume):\n # If it is literally DSK.TIPI. act like it matches DSK0.\n if volume == 'TIPI':\n return TIPI_DIR\n\n # next check if one of the mapped drives has the name\n disks = (\"DSK1_DIR\", \"DSK2_DIR\", \"DSK3_DIR\", \"DSK4_DIR\", \"DSK5_DIR\", \"DSK6_DIR\", \"DSK7_DIR\", \"DSK8_DIR\", \"DSK9_DIR\",)\n for disk in disks:\n path = __driveMapping(disk)\n if path != None and path.endswith(\"/\" + volume):\n return path\n\n # None of the Disks are mapped to this volume...\n # fall back to top level directories\n path = os.path.join(TIPI_DIR, volume)\n if os.path.exists(path):\n return path\n return None\n\n\ndef nativeFlags(devname):\n parts = str(devname).split(\".\")\n startpart = 1\n if parts[0] == \"DSK\":\n startpart = 2\n if parts[0] == \"CS1\":\n return \"\"\n flags = parts[startpart]\n if flags in NATIVE_FLAGS:\n return flags\n target_path = devnameToLocal(devname)\n if not target_path:\n return \"\"\n return nativeTextDir(target_path)\n\n\ndef nativeTextDir(target_path):\n if not os.path.isfile(target_path):\n target_path += '/'\n # check if any of text_dirs is a prefix of target_path\n native_text_dirs = [f\"TIPI.{a.strip()}\" for a in tipi_config.get(\"NATIVE_TEXT_DIRS\").split(',') if a]\n if native_text_dirs and len(native_text_dirs):\n text_dirs = [devnameToLocal(dir) for dir in native_text_dirs]\n if True in [(f\"{td}/\" in target_path) for td in text_dirs]:\n return TEXT_WINDOWS\n return \"\"\n\n\ndef devnameToLocal(devname, prog=False):\n parts = str(devname).split(\".\")\n path = None\n startpart = 1\n if parts[0] == \"TIPI\":\n path = TIPI_DIR\n elif parts[0] == \"DSK0\":\n path = TIPI_DIR\n elif parts[0] in (\"DSK1\", \"DSK2\", \"DSK3\", \"DSK4\", \"DSK5\", \"DSK6\", \"DSK7\", \"DSK8\", \"DSK9\",):\n path = __driveMapping(f\"{parts[0]}_DIR\")\n elif parts[0] == \"DSK\":\n path = __scanForVolume(parts[1])\n startpart = 2\n elif parts[0] == \"CS1\":\n path = __cs1Mapping()\n\n if path == None or path == \"\":\n logger.debug(\"no path matched\")\n return None\n\n # skip native file modes when finding linux path\n if len(parts) > startpart and parts[startpart] in NATIVE_FLAGS:\n startpart = startpart + 1\n\n for part in parts[startpart:]:\n if part != \"\":\n logger.debug(\"matching path part: %s\", part)\n if part == parts[-1]:\n path += \"/\" + findpath(path, part, prog=prog)\n else:\n path += \"/\" + findpath(path, part, dir=True)\n logger.debug(\"building path: %s\", path)\n\n path = str(path).strip()\n logger.debug(\"%s -> %s\", devname, path)\n\n return path\n\n\n# Transform long host filename to 10 character TI filename\ndef asTiShortName(name):\n parts = name.split(\"/\")\n lastpart = parts[len(parts) - 1]\n name = lastpart.replace(\".\", \"/\")\n return encodeName(name)\n\n\ndef encodeName(name):\n bytes = bytearray(name, 'utf-8')\n if len(bytes) == len(name) and len(name) <= 10:\n return name\n else:\n crc = Crc15.calc(bytearray(name, 'utf-8'))\n prefix = unidecode(name)[:6]\n shortname = f'{prefix}`{baseN(crc, 36)}'\n return shortname\n\n\ndef baseN(num, b, numerals=\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"):\n return ((num == 0) and numerals[0]) or (\n baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b]\n )\n\n\n# Use the context of actual files to transform TI file names to possibly\n# long TI names\n\n\ndef findpath(path, part, prog=False, dir=False):\n part = part.replace(\"/\", \".\").replace(\"\\\\\", \".\")\n # if the file actually exists (or dir) then use literal name\n if os.path.exists(os.path.join(path, part)):\n return part\n else:\n # if it doesn't exist, and the part has a short name hash, then search\n # for a os match\n if re.match(\"^[^ ]{6}[`][0-9A-Z]{3}$\", part):\n # Now we must find all the names in 'path' and see which one we\n # should load.\n candidates = list(\n filter(lambda x: asTiShortName(x) == part, os.listdir(path))\n )\n if candidates:\n return candidates[0]\n if WILDCARD in part:\n # return the first item that matches the wildcard expression\n globpart = part.replace(WILDCARD, \"*\")\n candidates = [p for p in Path(path).glob(globpart)]\n if candidates:\n candidates.sort()\n for item in candidates:\n if dir:\n # item must be a directory... \n if os.path.isdir(os.path.join(path, item.name)):\n return item.name\n elif prog:\n # item must be a Program image, or convertable type\n if isProgramLike(os.path.join(path, item.name)):\n return item.name\n else:\n return candidates[0].name\n return part\n\n\ndef isProgramLike(path):\n if os.path.exists(path):\n type = ti_files.get_file_type(path)\n if type == \"PRG\" or (type == \"native\" and path.lower().endswith(basicSuffixes)):\n return True\n return False\n\n\ndef local2tipi(localpath):\n \"\"\" transform a unix local path to a ti path relative to TIPI. \"\"\"\n if localpath.startswith(TIPI_DIR + \"/\"):\n idx = len(TIPI_DIR) + 1\n tipart = localpath[idx:]\n return tipart.replace(\"/\", \".\")\n else:\n return \"\"\n","repo_name":"jedimatt42/tipi","sub_path":"services/tinames/tinames.py","file_name":"tinames.py","file_ext":"py","file_size_in_byte":6579,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"78"} +{"seq_id":"35211491485","text":"# 15685 <사다리 조작>\n\nimport sys\ninput = lambda: sys.stdin.readline()\n\ndef validate():\n for i in range(1,N+1):\n cur_n = i\n for j in range(1, H+1):\n if l[cur_n][j] == 1:\n cur_n +=1\n elif l[cur_n-1][j] == 1:\n cur_n -=1\n if cur_n != i:\n # print(f\"cur_n : {cur_n}\")\n return False\n return True\n\ndef dfs(cnt, h, num):\n global answer\n \n if cnt == num:\n if validate() == True:\n answer=cnt\n return \n \n for i in range(h, H+1):\n for j in range(1,N):\n if l[j][i] == 1:\n continue\n if j-1>0 and l[j-1][i] == 1:\n continue\n if j+1 < N and l[j+1][i] == 1:\n continue\n l[j][i]=1\n dfs(cnt+1,i,num)\n l[j][i]=0\n\nif __name__ == \"__main__\":\n N,M,H=list(map(int, input().split()))\n\n l = [list(0 for _ in range(H+1)) for _ in range(N+1)]\n for _ in range(M):\n a,b=list(map(int, input().split()))\n l[b][a]=1\n answer=-1\n condition = False\n for n in range(4):\n dfs(0,1,n)\n if answer > -1:\n print(answer)\n condition=True\n break\n if condition == False:\n print(-1)\n\n","repo_name":"HyungJunGoo/AlgorithmProblems","sub_path":"Baekjun/Simulation/15684.py","file_name":"15684.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37598756132","text":"from .common import InfoExtractor\nfrom ..utils import (\n js_to_json,\n traverse_obj,\n unified_timestamp\n)\n\n\nclass BoxCastVideoIE(InfoExtractor):\n _VALID_URL = r'''(?x)\n https?://boxcast\\.tv/(?:\n view-embed/|\n channel/\\w+\\?(?:[^#]+&)?b=|\n video-portal/(?:\\w+/){2}\n )(?P[\\w-]+)'''\n _EMBED_REGEX = [r']+src=[\"\\'](?Phttps?://boxcast\\.tv/view-embed/[\\w-]+)']\n _TESTS = [{\n 'url': 'https://boxcast.tv/view-embed/in-the-midst-of-darkness-light-prevails-an-interdisciplinary-symposium-ozmq5eclj50ujl4bmpwx',\n 'info_dict': {\n 'id': 'da1eqqgkacngd5djlqld',\n 'ext': 'mp4',\n 'thumbnail': r're:https?://uploads\\.boxcast\\.com/(?:[\\w+-]+/){3}.+\\.png$',\n 'title': 'In the Midst of Darkness Light Prevails: An Interdisciplinary Symposium',\n 'release_timestamp': 1670686812,\n 'release_date': '20221210',\n 'uploader_id': 're8w0v8hohhvpqtbskpe',\n 'uploader': 'Children\\'s Health Defense',\n }\n }, {\n 'url': 'https://boxcast.tv/video-portal/vctwevwntun3o0ikq7af/rvyblnn0fxbfjx5nwxhl/otbpltj2kzkveo2qz3ad',\n 'info_dict': {\n 'id': 'otbpltj2kzkveo2qz3ad',\n 'ext': 'mp4',\n 'uploader_id': 'vctwevwntun3o0ikq7af',\n 'uploader': 'Legacy Christian Church',\n 'title': 'The Quest | 1: Beginner\\'s Bay | Jamie Schools',\n 'thumbnail': r're:https?://uploads.boxcast.com/(?:[\\w-]+/){3}.+\\.jpg'\n }\n }, {\n 'url': 'https://boxcast.tv/channel/z03fqwaeaby5lnaawox2?b=ssihlw5gvfij2by8tkev',\n 'info_dict': {\n 'id': 'ssihlw5gvfij2by8tkev',\n 'ext': 'mp4',\n 'thumbnail': r're:https?://uploads.boxcast.com/(?:[\\w-]+/){3}.+\\.jpg$',\n 'release_date': '20230101',\n 'uploader_id': 'ds25vaazhlu4ygcvffid',\n 'release_timestamp': 1672543201,\n 'uploader': 'Lighthouse Ministries International - Beltsville, Maryland',\n 'description': 'md5:ac23e3d01b0b0be592e8f7fe0ec3a340',\n 'title': 'New Year\\'s Eve CROSSOVER Service at LHMI | December 31, 2022',\n }\n }]\n _WEBPAGE_TESTS = [{\n 'url': 'https://childrenshealthdefense.eu/live-stream/',\n 'info_dict': {\n 'id': 'da1eqqgkacngd5djlqld',\n 'ext': 'mp4',\n 'thumbnail': r're:https?://uploads\\.boxcast\\.com/(?:[\\w+-]+/){3}.+\\.png$',\n 'title': 'In the Midst of Darkness Light Prevails: An Interdisciplinary Symposium',\n 'release_timestamp': 1670686812,\n 'release_date': '20221210',\n 'uploader_id': 're8w0v8hohhvpqtbskpe',\n 'uploader': 'Children\\'s Health Defense',\n }\n }]\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n webpage_json_data = self._search_json(\n r'var\\s*BOXCAST_PRELOAD\\s*=', webpage, 'broadcast data', display_id,\n transform_source=js_to_json, default={})\n\n # Ref: https://support.boxcast.com/en/articles/4235158-build-a-custom-viewer-experience-with-boxcast-api\n broadcast_json_data = (\n traverse_obj(webpage_json_data, ('broadcast', 'data'))\n or self._download_json(f'https://api.boxcast.com/broadcasts/{display_id}', display_id))\n view_json_data = (\n traverse_obj(webpage_json_data, ('view', 'data'))\n or self._download_json(f'https://api.boxcast.com/broadcasts/{display_id}/view',\n display_id, fatal=False) or {})\n\n formats, subtitles = [], {}\n if view_json_data.get('status') == 'recorded':\n formats, subtitles = self._extract_m3u8_formats_and_subtitles(\n view_json_data['playlist'], display_id)\n\n return {\n 'id': str(broadcast_json_data['id']),\n 'title': (broadcast_json_data.get('name')\n or self._html_search_meta(['og:title', 'twitter:title'], webpage)),\n 'description': (broadcast_json_data.get('description')\n or self._html_search_meta(['og:description', 'twitter:description'], webpage)\n or None),\n 'thumbnail': (broadcast_json_data.get('preview')\n or self._html_search_meta(['og:image', 'twitter:image'], webpage)),\n 'formats': formats,\n 'subtitles': subtitles,\n 'release_timestamp': unified_timestamp(broadcast_json_data.get('streamed_at')),\n 'uploader': broadcast_json_data.get('account_name'),\n 'uploader_id': broadcast_json_data.get('account_id'),\n }\n","repo_name":"yt-dlp/yt-dlp","sub_path":"yt_dlp/extractor/boxcast.py","file_name":"boxcast.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","stars":60520,"dataset":"github-code","pt":"78"} +{"seq_id":"21329203042","text":"database = {\r\n\t'home': 'ngoi nha',\r\n\t'baby': 'em be'\r\n}\r\ndef show_menu():\r\n\tprint(\"-----------------------------------\")\r\n\tprint(\"CHUONG TRINH TU DIEN\")\r\n\tprint(\"1. Them tu\")\r\n\tprint(\"2. Tim tu\")\r\n\tprint(\"3. Xoa tu\")\r\n\tprint(\"4. Xem tat ca tu\")\r\n\tprint(\"An 0 de thoat chuong trinh\")\r\n\r\nshow_menu()\r\n\r\nchoice = input(\"Ban muon lam gi\")\r\n\r\nwhile choice != 0:\r\n\tif choice == 0:\r\n\t\tbreak\r\n\telif choice == 1:\r\n\t\tadd()\r\n\telif choice == 2:\r\n\t\tfind()\r\n\telif choice == 3:\r\n\t\tdelete()\r\n\telif choice == 4:\r\n\t\tview_all()\r\nelse:\r\n\t\tprint(\"Khong co lua chon nao\")\r\n\r\n\t","repo_name":"maxvinh/family","sub_path":"tudien.py","file_name":"tudien.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30980196803","text":"import glob\nimport pandas as pd\nimport math\nfrom os import listdir, mkdir\nfrom os.path import isfile, join\n\nroot = '../data/'\ndirectories = ['pan2011_mails']\n# directories = ['gutenberg_authors', 'gutenberg_categories', 'song_artists', 'song_genres', 'pan2011_mails']\n\nfor directory in directories:\n files = [join(root, f) for f in listdir(join(root, directory))]\n classes = [data_class.split('/')[-1].split('.')[0] for data_class in files]\n data_dict = {'label': None, 'text': None}\n\n for file_name in glob.glob(join(root, directory, '*.txt')):\n with open(file_name, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if line != '\\n':\n if data_dict['label'] == None:\n data_dict['label'] = [file_name.split('/')[-1].split('.')[0]]\n else:\n data_dict['label'].append(file_name.split('/')[-1].split('.')[0])\n if data_dict['text'] == None:\n data_dict['text'] = [line]\n else:\n data_dict['text'].append(line) \n\n df = pd.DataFrame(data_dict, columns=['label', 'text'])\n\n selected_dfs = {}\n\n for curr_class in classes:\n selected_dfs[curr_class] = df[df['label'] == curr_class].sample(math.ceil(df[df['label'] == curr_class].shape[0] * 0.2))\n\n df_test = pd.DataFrame(columns=['label', 'text'])\n\n for key, value in selected_dfs.items():\n df_test = df_test.append(value)\n df = df.drop(list(value.index))\n\n df_train = df\n\n print(directory)\n print('train', df_train.shape)\n print('test', df_test.shape)\n\n mkdir(join(root, directory, 'train'))\n for curr_class in classes:\n df_curr_class = df_train[df_train['label'] == curr_class]\n with open(join(root, directory, 'train', curr_class + '.txt'), 'a', encoding=\"utf8\", errors='ignore') as f:\n for key, row in df_curr_class.iterrows():\n f.write(row['text'])\n f.write('\\n\\n')\n\n f.close()\n\n mkdir(join(root, directory, 'test'))\n\n for curr_class in classes:\n df_curr_class = df_test[df_test['label'] == curr_class]\n with open(join(root, directory, 'test', curr_class + '.txt'), 'a', encoding=\"utf8\", errors='ignore') as f:\n for key, row in df_curr_class.iterrows():\n f.write(row['text'])\n f.write('\\n\\n')\n\n f.close()","repo_name":"mateibejan1/ad-nlp","sub_path":"datasets/split_datasets.py","file_name":"split_datasets.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3652820361","text":"def is_late(d1, m1, y1, d2, m2, y2):\n\n if y2 > y1: #2001 > 2000, not late\n return False\n elif y2 == y1: #2000 == 2000, same year, continue\n if m2 > m1: # Feb > Jan, not late\n return False\n elif m2 == m1: # Feb = Feb, same month, continue\n if d2 >= d1: # 13 >= 13, not late\n return False\n return True # otherwise\n\n# Complete the libraryFine function below.\ndef libraryFine(d1, m1, y1, d2, m2, y2):\n\n if is_late(d1, m1, y1, d2, m2, y2):\n\n if y1 > y2:\n return 10000\n elif m1 > m2:\n return 500 * (m1-m2)\n else:\n return 15*(d1-d2)\n else:\n return 0\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n d1M1Y1 = input().split()\n\n d1 = int(d1M1Y1[0])\n\n m1 = int(d1M1Y1[1])\n\n y1 = int(d1M1Y1[2])\n\n d2M2Y2 = input().split()\n\n d2 = int(d2M2Y2[0])\n\n m2 = int(d2M2Y2[1])\n\n y2 = int(d2M2Y2[2])\n\n result = libraryFine(d1, m1, y1, d2, m2, y2)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"AilingLiu/hackerrank","sub_path":"libraryFine.py","file_name":"libraryFine.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"38510413202","text":"import os, sys\r\nimport pygame\r\nfrom pygame.locals import*\r\nimport time\r\nit = time.time()\r\npygame.mixer.init()\r\n\r\ndef getdate():\r\n import datetime\r\n return datetime.datetime.now()\r\n\r\ndef mint():\r\n fi = int(time.asctime(time.localtime(time.time()))[14:16])\r\n return fi\r\nmint()\r\n\r\ndef hr():\r\n ti = int(time.asctime(time.localtime(time.time()))[11:13])\r\n return ti\r\nprint(hr())\r\n# print(mint())\r\ndef log_eye():\r\n \"\"\"To set 45 minute timer for\"\"\"\r\n return((hr() == 9 and mint() == 45)\r\n or (hr() == 10 and mint() == 30)\r\n or (hr() == 11 and mint() == 15)\r\n or (hr() == 12 and mint() == 0)\r\n or (hr() == 12 and mint() == 45)\r\n or (hr() == 1 and mint() == 30)\r\n or (hr() == 2 and mint() == 15)\r\n or (hr() == 3 and mint() == 0)\r\n or (hr() == 3 and mint() == 45)\r\n or (hr() == 4 and mint() == 30)\r\n or (hr() == 5 and mint() == 15)\r\n# or (hr() == 23 and mint() == 30) #for testing\r\n )\r\n\r\n# log_eye()\r\n\r\nc=0\r\n# while((hr() >= 16) and (hr() <= 24)):\r\nwhile((hr() == 0)):\r\n log_eye()\r\n print(\"hour: \",hr(),\" minute: \", mint())\r\n l = 3500\r\n# print(l-c)\r\n\r\n if ((mint() == 30)\r\n or (mint() == 15)\r\n or (mint() == 5)\r\n or (mint() == 45)):\r\n \"\"\"for drinking water\"\"\"\r\n\r\n pygame.mixer.music.load('water.mp3')\r\n pygame.mixer.music.play(-1)\r\n\r\n print(\"________Time to drink Water_____\")\r\n usr = (input(\"Type drank:\")).lower()\r\n\r\n if usr == \"drank\":\r\n a = input(\"How much you drank in ml: \")\r\n c = c + int(a)\r\n if int(a) > 0:\r\n print(l-c)\r\n with open (\"LogFile.txt\",'a+') as f:\r\n f.write(str(getdate()) + ': Water drank: ' + a +' ml and '\r\n + str(l - int(c)) + \" ml water left\\n\")\r\n with open (\"LogFile.txt\") as f:\r\n print(f.readlines())\r\n pygame.mixer.music.stop()\r\n break\r\n\r\n else:\r\n print(\"just type drank\")\r\n\r\n if ((mint() == 30)\r\n or (mint() == 0)):\r\n \"\"\"For pyhsical workout\"\"\"\r\n\r\n pygame.mixer.music.load('pyhsical.mp3')\r\n pygame.mixer.music.play(-1)\r\n\r\n print(\"________Time to do Physical Workout_____\")\r\n usr = (input(\"Type done:\")).lower()\r\n\r\n if usr == \"done\":\r\n a = input(\"Press 1: After Completing Workout-\")\r\n with open (\"LogFile.txt\",'a+') as f:\r\n f.write(str(getdate()) + ': Workout done\\n')\r\n with open (\"LogFile.txt\") as f:\r\n print(f.readlines())\r\n if (int(a) > 0):\r\n pygame.mixer.music.stop()\r\n break\r\n else:\r\n print(\"just type done\")\r\n\r\n if (log_eye()):\r\n \"\"\"For eye workout\"\"\"\r\n\r\n pygame.mixer.music.load('eyes.mp3')\r\n pygame.mixer.music.play(-1)\r\n\r\n print(\"________Time to do Eye excercise_____\")\r\n usr = (input(\"Type done:\")).lower()\r\n\r\n if usr == \"done\":\r\n a = input(\"Press 1: For Completing Workout\")\r\n with open (\"LogFile.txt\",'a+') as f:\r\n f.write(str(getdate()) + ': Eye excercise done\\n')\r\n with open (\"LogFile.txt\") as f:\r\n print(f.readlines())\r\n if (int(a) > 0):\r\n pygame.mixer.music.stop()\r\n break\r\n else:\r\n print(\"just type done\")\r\n\r\n else:\r\n print(\"Breakkk...\")\r\n break\r\n#To remove all the break function, loop will run endlessly\r\n#Challenge is to do with recurssive loop.\r\n#Recursion possible\r\n","repo_name":"Umraooo7/Fitness-Reminder","sub_path":"Fitness-Alarm.py","file_name":"Fitness-Alarm.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1054514331","text":"import time\n\nimport hydra\nimport torch\n\nfrom salina import instantiate_class\n\n\n@hydra.main(config_path=\"configs/\", config_name=\"csp.yaml\")\ndef main(cfg):\n _start = time.time()\n logger = instantiate_class(cfg.logger)\n logger.save_hps(cfg, verbose =False)\n framework = instantiate_class(cfg.framework)\n scenario = instantiate_class(cfg.scenario)\n #logger_evaluation = logger.get_logger(\"evaluation/\")\n #logger_evaluation.logger.modulo = 1\n stage = framework.get_stage()\n for train_task in scenario.train_tasks()[stage:]:\n framework.train(train_task,logger)\n evaluation = framework.evaluate(scenario.test_tasks(),logger)\n metrics = {}\n for tid in evaluation:\n for k,v in evaluation[tid].items():\n logger.add_scalar(\"evaluation/\"+str(tid)+\"_\"+k,v,stage)\n metrics[k] = v + metrics.get(k,0)\n for k,v in metrics.items():\n logger.add_scalar(\"evaluation/aggregate_\"+k,v / len(evaluation),stage)\n m_size = framework.memory_size()\n for k,v in m_size.items():\n logger.add_scalar(\"memory/\"+k,v,stage)\n stage+=1\n logger.close()\n logger.message(\"time elapsed: \"+str(round((time.time()-_start),0))+\" sec\")\n\nif __name__ == \"__main__\":\n import torch.multiprocessing as mp\n mp.set_start_method(\"spawn\")\n CUDA_AVAILABLE = torch.cuda.is_available()\n if CUDA_AVAILABLE:\n v = torch.ones(1, device=\"cuda:0\")\n main()","repo_name":"facebookresearch/salina","sub_path":"salina_cl/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"78"} +{"seq_id":"11977113999","text":"# author: Luke Collins\n# date: 2021-11-25\n\n\"\"\"\nThis script runs preprocessed data through Wilcoxon rank-sum tests\nfor significance of difference of median substance_use_total\nbetween 'pre' and 'post' datasets.\n\nUsage: stat_tests.py --data_path= [--output=]\nOptions:\n--data_path= path to dir in which preprocessed data is stored\n--output= file to write csv output [default: './analysis/stat_tests/stat_tests_output.csv']\n\"\"\"\n\nimport pandas as pd\nimport os\nfrom scipy.stats import ranksums\nimport glob\nfrom docopt import docopt\n\nopt = docopt(__doc__)\n\ndef main(data_path, output_file):\n \"\"\"\n Performs Wilcoxon rank-sum tests on pre/post data files\n -----\n data_path: str\n path to dir in which preprocessed data csvs are stored\n output_file: str\n path to dir in which csv output of stat tests will be written.\n default val is './stat_tests_output.csv'\n Returns\n -----\n None\n \"\"\"\n # read the datasets\n # print(glob.glob(data_path + '/*.csv'))\n files = [datafile.split(os.sep)[-1] for datafile in glob.glob(data_path + '/*.csv')]\n \n results = {}\n for i, file in enumerate(files):\n # analysis\\preprocessing\\*.csv\n df = pd.read_csv(data_path + '/' + file)\n pre = df.loc[df['period'] == 'pre']['substance_use_total']\n post = df.loc[df['period'] == 'post']['substance_use_total']\n test_statistic, p_val = ranksums(pre, post, alternative='two-sided')\n results[file.split('_')[0]] = {'test_statistic': test_statistic,\n 'p_value': p_val}\n\n pd.DataFrame(results).T.to_csv(output_file)\n\n\nif __name__ == \"__main__\":\n data_path = opt[\"--data_path\"]\n output_file = opt[\"--output\"]\n main(data_path, output_file)","repo_name":"elgohr-update/UBC-MDS-covid_reddit_behaviour","sub_path":"src/stat_tests.py","file_name":"stat_tests.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"2662034123","text":"\nline = open(\"inputs\\day9.txt\",\"r\").read()\n\ndef decompSection(section, start, stop, recurse):\n p=start\n decomp = 0\n while p < stop:\n l = section[p]\n if l != '(': \n decomp += 1\n p += 1\n else:\n close = section.find(\")\",p)\n marker = section[p+1:close]\n t1, t2 = list(map(int, marker.split('x')))\n p = close + 1\n if recurse:\n newString = section[p:p+t1] * t2\n decomp += decompSection(section, p, p+t1, True) * t2\n else:\n decomp += t1 * t2\n p += t1\n return decomp\n\npart1 = decompSection(line, 0, len(line), False)\nprint(\"Part 1:\", part1)\npart2 = decompSection(line, 0, len(line), True)\nprint(\"Part 2:\", part2)\n\n","repo_name":"hanken68/aoc2016","sub_path":"day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20166937493","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom multiview_pose.core.camera import CustomSimpleCameraTorch as SimpleCameraTorch\nfrom mmpose.core.post_processing.post_transforms import (\n affine_transform_torch, get_affine_transform)\n\n\ndef compute_grid(space_size, space_center, cube_size):\n if isinstance(space_size, int) or isinstance(space_size, float):\n space_size = [space_size, space_size, space_size]\n if isinstance(space_center, int) or isinstance(space_center, float):\n space_center = [space_center, space_center, space_center]\n if isinstance(cube_size, int):\n cube_size = [cube_size, cube_size, cube_size]\n\n grid_1D_x = torch.linspace(\n -space_size[0] / 2, space_size[0] / 2, cube_size[0])\n grid_1D_y = torch.linspace(\n -space_size[1] / 2, space_size[1] / 2, cube_size[1])\n grid_1D_z = torch.linspace(\n -space_size[2] / 2, space_size[2] / 2, cube_size[2])\n grid_x, grid_y, grid_z = torch.meshgrid(\n grid_1D_x + space_center[0],\n grid_1D_y + space_center[1],\n grid_1D_z + space_center[2],\n )\n grid_x = grid_x.contiguous().view(-1, 1)\n grid_y = grid_y.contiguous().view(-1, 1)\n grid_z = grid_z.contiguous().view(-1, 1)\n grid = torch.cat([grid_x, grid_y, grid_z], dim=1)\n\n return grid\n\n\nclass NonGridProjectLayer(nn.Module):\n def __init__(self, feature_map_size):\n \"\"\"Project layer to get multi-view features.\n Args:\n cfg (dict):\n image_size: input size of the 2D model\n feature_map_size: output size of the 2D model\n \"\"\"\n super(NonGridProjectLayer, self).__init__()\n # image_size = cfg['image_size']\n # feature_map_size = cfg['feature_map_size']\n\n # if isinstance(image_size, int):\n # image_size = [image_size, image_size]\n if isinstance(feature_map_size, int):\n feature_map_size = [feature_map_size, feature_map_size]\n\n # self.register_buffer('image_size', torch.tensor(image_size))\n self.register_buffer('feature_map_size', torch.tensor(feature_map_size))\n\n def forward(self, feature_maps, meta, multiview_sample_points, discard_nan=True):\n \"\"\"\n\n Args:\n feature_maps: NxVxCxHxW\n meta:\n multiview_sample_points: [num_candidates_i x 5] i=0:N-1\n\n Returns:\n\n \"\"\"\n device = feature_maps.device\n batch_size, num_cameras, num_channels = feature_maps.shape[:3]\n multiview_features = []\n bounding = []\n for sample_points in multiview_sample_points:\n # multiview_features.append(torch.zeros(num_cameras, num_channels,\n # sample_points.shape[0], device=device))\n bounding.append(torch.ones(num_cameras, 1,\n sample_points.shape[0], device=device))\n # w, h = self.feature_map_size[0].item(), self.feature_map_size[1].item()\n h, w = feature_maps.shape[-2:]\n for i, sample_points in enumerate(multiview_sample_points):\n multiview_sample_points_norm = []\n for c in range(num_cameras):\n center = meta[i]['center'][c]\n scale = meta[i]['scale'][c]\n width, height = center * 2\n\n trans = torch.as_tensor(\n get_affine_transform(center, scale / 200.0, 0,\n [w, h]),\n dtype=torch.float,\n device=device)\n\n cam_param = meta[i]['camera'][c].copy()\n\n single_view_camera = SimpleCameraTorch(\n param=cam_param, device=device)\n xy = single_view_camera.world_to_pixel(sample_points[:, :3])\n\n bounding[i][c, 0] *= (xy[:, 0] >= 0\n ) & (xy[:, 1] >= 0\n ) & (xy[:, 0] < width) & (xy[:, 1] < height)\n sample_points_pixel_ = torch.clamp(xy, -1.0,\n max(width, height))\n sample_points_pixel = affine_transform_torch(sample_points_pixel_, trans)\n # sample_points_pixel = sample_points_pixel * self.feature_map_size[\n # None].float() / self.image_size[None].float()\n sample_points_norm = sample_points_pixel / (self.feature_map_size[\n None].float() - 1) * 2.0 - 1.0\n sample_points_norm = torch.clamp(\n sample_points_norm.view(1, 1, -1, 2), -1.1, 1.1)\n\n multiview_sample_points_norm.append(sample_points_norm)\n # multiview_features[i][c] = F.grid_sample(\n # feature_maps[c][i:i + 1],\n # sample_points_norm,\n # align_corners=True)[0]\n multiview_sample_points_norm = torch.cat(multiview_sample_points_norm, dim=0) # Vx1xPx2\n multiview_features.append(F.grid_sample(\n feature_maps[i],\n multiview_sample_points_norm,\n align_corners=True)[:, :, 0]) # [(VxCxP)]\n\n for i, multiview_feature in enumerate(multiview_features):\n multiview_features[i] = (multiview_feature *\n bounding[i]).permute(2, 0, 1).contiguous()\n is_nan = multiview_features[i].isnan().sum([1, 2]).ge(1)\n is_not_nan = is_nan.logical_not()\n if discard_nan:\n multiview_features[i] = multiview_features[i][is_not_nan]\n bounding[i] = bounding[i][:, 0, is_not_nan].sum(0) > 0\n multiview_sample_points[i] = multiview_sample_points[i][is_not_nan]\n else:\n multiview_features[i][is_nan] = 0.0 # disable nans\n bounding[i] = (bounding[i][:, 0].sum(0) > 0) * is_not_nan\n\n return multiview_features, bounding, multiview_sample_points\n","repo_name":"wusize/multiview_pose","sub_path":"multiview_pose/models/gcn_modules/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"78"} +{"seq_id":"9198145070","text":"\"\"\"\nTries to separate text/line foreground and background by 2D median filter\nwhitening.\n\nExample usage:\n\n```\nimport PIL.Image\n\nfrom whitening import whiten\n\n# possible to use numpy array as input/output\nimage = np.asarray(PIL.Image.open('image.jpg'), dtype='uint8')\nforeground, background = whiten(image, kernel_size=20, downsample=4)\nPIL.Image.fromarray(foreground).save('foreground.jpg', 'jpeg')\n\n# or directly a PIL image\nimage = PIL.Image.open('image.jpg')\nforeground, background = whiten(image, kernel_size=20, downsample=4)\nforeground.save('foreground.jpg', 'jpeg')\n```\n\nSelect kernel size that's enough for not making artifacts while small enough\nto keep computation fast. A good starting point is 50 pixels.\n\nA 9.5 Mpx image can be processed on a MacBook in 15 s, with grayscale and\ndownsampling 4x the run time can be reduced to 1 s! Quite good results can be\nobtained even with kernel size 10 and downsampling 16x.\n\nMore info: http://bohumirzamecnik.cz/blog/2015/image-whitening/\n\"\"\"\n\nimport PIL.Image\nimport numpy as np\nimport skimage.filters\nimport skimage.morphology\nimport skimage.transform\nfrom skimage.color import rgb2gray\n\n\ndef whiten(image, kernel_size=10, downsample=1):\n \"\"\"\n Tries to separate text/line foreground and background by 2D median filter\n whitening.\n\n The idea is that foreground (text/lines) are spikes that can be removed by\n spatial median filter, thus leaving the background. We can then normalize\n the original image by the background, leaving the foreground.\n\n Input:\n `image` - input image (np.ndarray or PIL.Image.Image)\n `kernel_size` - width of the median filter kernel\n `downsample` - downsampling factor to speedup the median calculation,\n can be useful since background is usually low-frequency image\n\n All images are represented as a numpy array of shape (height, width,\n channels) and dtype uint8 or PIL.Image.Image.\n\n Output: `foreground`, `background`\n \"\"\"\n input_is_image = issubclass(type(image), PIL.Image.Image)\n if input_is_image:\n # RGB/RGBA images can be converted without copying\n # L (grayscale) images must be copied to avoid\n # https://github.com/cython/cython/issues/1605\n image = np.array(image, copy=image.mode == 'L')\n\n is_grayscale = len(image.shape) < 3\n if is_grayscale:\n image = image.reshape(image.shape + (1,))\n\n channels = image.shape[-1]\n\n input_image = image\n shape = np.array(image.shape)\n\n if downsample != 1:\n downsampled_shape = (shape[:2] // downsample) + (channels,)\n # converts to np.float32 with scale [0., 1.]\n resized = skimage.transform.resize(image, downsampled_shape, mode='edge')\n input_image = to_byte_format(resized)\n\n # apply 2D median filter on each channel separately\n\n kernel = skimage.morphology.square(kernel_size)\n\n def filter_channel(channel_index):\n \"\"\"\n Filter an RGB image channel via median filter, ignore alpha channel.\n input/output data format: uint8\n \"\"\"\n image_channel = input_image[:, :, channel_index]\n if channel_index < 3:\n return skimage.filters.median(image_channel, footprint=kernel)\n else:\n # do not filter alpha channel\n return image_channel\n\n background = np.dstack([filter_channel(i) for i in range(channels)])\n\n if downsample != 1:\n # upsample the computed background to original size\n # resize converts to np.float32 with scale [0., 1.]\n background_float = skimage.transform.resize(background, shape, mode='edge')\n background = to_byte_format(background_float)\n else:\n background_float = from_byte_format(background)\n\n # We assume the original images is a product of foreground and background,\n # thus we can recover the foreground by dividing the image by the background:\n # I = F * B => F = I / B\n # For division we use float32 format instead of uint8.\n # Inputs are scaled [0., 255.], output is scaled [0., 1.]\n image_float = from_byte_format(image)\n # prevent division by zero\n background_float = np.maximum(0.001, background_float)\n foreground = (image_float / background_float)\n # Values over 1.0 has to be clipped to prevent uint8 overflow.\n foreground = np.minimum(foreground, 1)\n foreground = to_byte_format(foreground)\n\n if is_grayscale:\n foreground = foreground[:, :, 0]\n background = background[:, :, 0]\n\n if input_is_image:\n foreground = PIL.Image.fromarray(foreground)\n background = PIL.Image.fromarray(background)\n\n return foreground, background\n\n\ndef to_grayscale(image):\n return to_byte_format(rgb2gray(image))\n\n\ndef to_rgb(image):\n if image.shape[-1] == 1:\n return np.broadcast_to(image, image.shape[:2] + (3,))\n else:\n return image\n\n\ndef to_byte_format(array):\n return (array * 255).astype(np.uint8)\n\n\ndef from_byte_format(array):\n return array.astype(np.float32) / 255\n\n","repo_name":"rossumai/whitening","sub_path":"whitening/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"70152552572","text":"import sys ;\n\nold = open(sys.argv[1]);\nnew = open(sys.argv[2]);\n\nfound = [];\n\nfor blokk in old.read().split('\\n\\n'): #{\n\n\tf = blokk.split('\\n')[0];\n\tfound.append(f);\n#}\n\nfor blokk in new.read().split('\\n\\n'): #{\n\n\t\n\tf = blokk.split('\\n')[0];\n\tif f not in found: #{\n\t\tprint(blokk);\n\t\tprint('')\n\t#}\n#}\n","repo_name":"apertium/apertium-crh","sub_path":"dev/annot-comp.py","file_name":"annot-comp.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"20730019051","text":"\"\"\"\nscrap usf related comments from College Confidential\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nTOTAL = 8\nFILE = open('cconfidential', 'w')\n\ni = 1\ncount = 0\n\nwhile i <= TOTAL:\n response = requests.get(\"http://talk.collegeconfidential.com/university-san\" +\n \"-francisco/p\" + str(i) + \"/\")\n data = BeautifulSoup(response.content, \"html.parser\")\n\n\n for page in data.find_all(\"a\", class_=\"Title\"):\n link = page['href']\n\n\n pageResponse = requests.get(link)\n pageData= BeautifulSoup(pageResponse.content, \"html.parser\")\n\n for review in pageData.find_all(\"div\", {\"class\":\"Message\"}):\n count += 1\n FILE.write(str(count) + \". \")\n FILE.write(review.get_text().encode('utf-8'))\n FILE.write(\"\\n\\n\")\n\n i += 1\n\n\nFILE.write(\"The count is: \" + str(count))\nFILE.close()\n\n \n \n \n","repo_name":"akcieslak/CIPEProject1","sub_path":"code/collegeconfidential.py","file_name":"collegeconfidential.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4095370959","text":"import sys\nimport random\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport utils\n\nclass Critic(nn.Module):\n def __init__(self, state_dim, action_dim, learning_rate, device):\n super(Critic, self).__init__()\n #self.apply(utils.weight_init) # Weight initialize\n\n self.device = device\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.learning_rate = learning_rate\n\n self.fc1 = nn.Linear(state_dim, 64)\n self.fc2 = nn.Linear(64, 32)\n self.fc3 = nn.Linear(32, 16)\n self.fc4 = nn.Linear(16, 1)\n\n self.optimizer = optim.Adam(self.parameters(), lr=self.learning_rate)\n\n def _forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = self.fc4(x)\n return x\n\n def train(self, states, td_targets):\n\n values = self._forward(states)\n\n self.optimizer.zero_grad()\n loss = F.smooth_l1_loss(values ,td_targets.detach())\n loss.backward()\n self.optimizer.step()\n\n\n\n\n","repo_name":"dldnxks12/DelayedControl","sub_path":"ppo_b/ppo_critic.py","file_name":"ppo_critic.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"24188344862","text":"#!/usr/bin/python3\n# coding=utf-8\n\nfrom Tkinter import *\nimport attractor_scipy as attractor\n\nfields = 'a (sigma)', 'b', 'c (r)'\n\n\ndef fetch(entries):\n a = float(entries[0][1].get())\n b = float(entries[1][1].get())\n c = float(entries[2][1].get())\n\n attractor.showcase(a, b, c)\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = Frame(root)\n lab = Label(row, width=15, text=field, anchor='w')\n ent = Entry(row)\n ent.place(width=10)\n row.pack(side=TOP, fill=X, padx=5, pady=5)\n lab.pack(side=LEFT)\n ent.pack(side=RIGHT, expand=YES, fill=X)\n entries.append((field, ent))\n return entries\n\n\nif __name__ == '__main__':\n root = Tk()\n root.geometry(\"%dx%d\" % (500, 160))\n root.title('Strange Attractor Showcase, Mownit 2014, Lukasz Raduj')\n\n coefficients_text = StringVar()\n coefficients_label = Label(root, textvariable=coefficients_text)\n coefficients_label.pack()\n coefficients_text.set('Set coefficients for presentation:')\n ents = makeform(root, fields)\n\n root.bind('', (lambda event, e=ents: fetch(e)))\n b1 = Button(root, text='Show',\n command=(lambda e=ents: fetch(e)))\n b1.pack(side=LEFT, padx=5, pady=5)\n b2 = Button(root, text='Quit', command=root.quit)\n b2.pack(side=LEFT, padx=5, pady=5)\n root.mainloop()","repo_name":"raduy/strange-attractor-mownit","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"425231867","text":"\na = input()\nif a == 'продукты':\n b = int(input('Введите цену: '))\n if b < 100:\n print('Попробуйте нашу выпечку!')\n elif b >= 100 and b < 500:\n print('Как насчёт орехов в шоколаде')\n elif b >= 500:\n print('��опробуйте экзотические фрукты!')\nelse:\n print('Загляните в товары для дома!')\n","repo_name":"nverenchik/main","sub_path":"14.09/задача про рекомендации товаров.py","file_name":"задача про рекомендации товаров.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15402535858","text":"from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException\nfrom selenium import webdriver\nimport time\nimport pandas as pd\n\ndef get_wines(path, time_slp, num_wines, Print):\n options = webdriver.ChromeOptions()\n driver = webdriver.Chrome(executable_path=path, options=options)\n driver.set_window_size(1120, 1000)\n\n url = 'https://www.vivino.com/explore?e=eJzLLbI11jNVy83MszVQy02ssDU2NzBQS660dQxSSwYSwWoFtoZq6Wm2ZYlFmakliTlq-Um2RYklmXnpxfHJ-aV5JWr5timpxclAPcXRsbaJRQAoERtp'\n driver.get(url)\n\n wines = []\n while num_wines > len(wines):\n time.sleep(time_slp)\n wines_cards = driver.find_elements_by_xpath('//div[@class=\"explorerCard__explorerCard--3Q7_0 explorerPageResults__explorerCard--3q6Qe\"]')\n for wine_card in wines_cards:\n if len(wines) >= num_wines:\n break\n time.sleep(time_slp)\n collected_successfully = False\n while not collected_successfully:\n try:\n title = wine_card.find_element_by_xpath('.//span[@class=\"vintageTitle__winery--2YoIr\"]').text\n grape = wine_card.find_element_by_xpath('.//span[@class=\"vintageTitle__wine--U7t9G\"]').text\n location = wine_card.find_element_by_xpath('.//div[@class=\"vintageLocation__vintageLocation--1DF0p\"]/a[3]').text\n rating = wine_card.find_element_by_xpath('.//div[@class=\"vivinoRatingWide__averageValue--1zL_5\"]').text\n print (title)\n collected_successfully = True\n except:\n print ('No encontro')\n time.sleep(6)\n\n try:\n time.sleep(4)\n wine_card.find_element_by_xpath('.//button[@class=\"button__button--247vZ button__themeFlat--WqIBc button__sizeDefault--3HuoB explorerCard__button--3HZ-g\"]').click()\n time.sleep(12)\n price = driver.find_element_by_xpath('//a[@class=\"button__button--m8RH9 button__themePrimary--3H_zH button__sizeMedium--1-uHP shop__priceButton--33XSU button__link--h-Ho-\"]/span').text\n time.sleep(1)\n driver.find_element_by_xpath('//*[name()=\"path\" and @fill=\"#a8a5a3\" and @fill-rule=\"evenodd\"]/..').click()\n print (price)\n except:\n print ('No encontro precio')\n time.sleep(6)\n\n if Print:\n print (f'Title: {title}')\n print (f'Grape: {grape}')\n print (f'Price: {price}' )\n print (f'Location: {location}')\n print (f'Rating: {rating}')\n\n \n wines.append({\n 'Title': title,\n 'Grape': grape,\n 'Location': location,\n 'Rating': rating,\n 'Price': price\n })\n\n return pd.DataFrame(wines)\n\n \n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ramiVolodarsky/PersonalProjects","sub_path":"Wines_Intento/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36226998761","text":"from numpy import genfromtxt\nfrom utils.neural_network import NeuralNetwork\n\n\ndef train():\n training_full_dataset = genfromtxt(\"data/training_spam.csv\", delimiter=',')\n test_full_dataset = genfromtxt(\"data/testing_spam.csv\", delimiter=',')\n training_dataset = training_full_dataset[:, 1:]\n training_labels = training_full_dataset[:, 0]\n test_dataset = test_full_dataset[:, 1:]\n test_labels = test_full_dataset[:, 0]\n learning_rate = 0.5\n layer_settings = [{'nodes': 54, 'learning_rate': learning_rate},\n {'nodes': 3, 'learning_rate': learning_rate},\n {'nodes': 1, 'learning_rate': learning_rate}]\n nn = NeuralNetwork(layer_settings=layer_settings)\n nn.set_training_data(features=training_dataset,\n labels=training_labels)\n nn.train(epochs=5000,\n batch_size=500)\n nn.predict(test_dataset)\n acc = nn.classification_accuracy(features=test_dataset,\n labels=test_labels).round(3)\n nn.save_weights_and_biases(\"data/weights_and_bias.json\")\n print(acc)\n\n\ndef classify(data):\n layer_settings = [{'nodes': 54},\n {'nodes': 3},\n {'nodes': 1}]\n nn = NeuralNetwork(layer_settings=layer_settings)\n nn.load_weights_and_biases(\"data/weights_and_bias.json\")\n return nn.predict(data).round(0)\n\n\nif __name__ == '__main__':\n test_dataset = genfromtxt(\"data/testing_spam.csv\", delimiter=',')[:, 1:]\n a = classify(test_dataset)\n print(a)\n","repo_name":"mjpcollins/spam-filter","sub_path":"classifiers/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"24303525346","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt \nimport prototype\nimport numpy as np\nimport explain\nimport pandas as pd\nimport tensorflow as tf\nimport utils\nimport os\nimport random as rn\n\nSEED = 123\nos.environ['PYTHONHASHSEED'] = str(SEED)\nos.environ['TF_DETERMINISTIC_OPS'] = '1'\ntf.random.set_seed(SEED)\nnp.random.seed(SEED)\nrn.seed(SEED)\n\nX_train, X_test, y_train, y_test = utils.load_mnist()\n\nautoencoder_fun = prototype.image_autoencoder\n\nlatent_dim = 5\nnum_neurons = (X_train[0].shape[0] * X_train[0].shape[1]) // 2\n\nmnist_model = prototype.prototype_model(\n num_classes=10, \n latent_dim=latent_dim,\n num_neurons=num_neurons,\n num_prototypes=10,\n autoencoder_fun=autoencoder_fun,\n **{'img_shape':X_train[0].shape}\n )\n\nmnist_model.load_weights(os.path.join('..','weights','mnist.h5'))\n\nmnist_encoder = prototype.encoder(mnist_model)\n\nmnist_decoder = prototype.image_decoder(mnist_model, img_shape=(7,7,32))\n\nindices = [0,1,2,3,4,7,8,11,18,61]\n\narg_sort = np.argsort(y_test[indices])\n\nX_test_subset = X_test[indices][arg_sort]\n\nnum_iter = 100\nsize = (X_test_subset.shape[0],num_iter,7,7,32)\np0 = .1\np1 = .9\n\ndiff = explain.explain(tf.convert_to_tensor(X_test_subset),num_iter=num_iter,\n size=size,p0=p0,p1=p1,\n encoder=mnist_encoder,\n decoder=mnist_decoder,seed=SEED)\n\nnoisy_model = prototype.prototype_model(\n num_classes=10, \n latent_dim=latent_dim,\n num_neurons=num_neurons,\n num_prototypes=10,\n autoencoder_fun=autoencoder_fun,\n **{'img_shape':X_train[0].shape}\n )\n\nnoisy_encoder = prototype.encoder(noisy_model)\n\nnoisy_decoder = prototype.image_decoder(noisy_model, img_shape=(7,7,32))\n\nnum_iter = 100\nsize = (X_test_subset.shape[0],num_iter,7,7,32)\np0 = .1\np1 = .9\n\nnoisy_diff = explain.explain(tf.convert_to_tensor(X_test_subset),num_iter=num_iter,\n size=size,p0=p0,p1=p1,\n encoder=noisy_encoder,\n decoder=noisy_decoder,seed=SEED)\n\nshuffled_model = prototype.prototype_model(\n num_classes=10, \n latent_dim=latent_dim,\n num_neurons=num_neurons,\n num_prototypes=10,\n autoencoder_fun=autoencoder_fun,\n **{'img_shape':X_train[0].shape}\n )\nshuffled_model.load_weights(os.path.join('..','weights','mnist_shuffled.h5'))\n\nshuffled_encoder = prototype.encoder(shuffled_model)\n\nshuffled_decoder = prototype.image_decoder(shuffled_model, img_shape=(7,7,32))\n\nnum_iter = 100\nsize = (X_test_subset.shape[0],num_iter,7,7,32)\np0 = .1\np1 = .9\n\nshuffled_diff = explain.explain(tf.convert_to_tensor(X_test_subset),num_iter=num_iter,\n size=size,p0=p0,p1=p1,\n encoder=shuffled_encoder,\n decoder=shuffled_decoder,seed=SEED)\n\n\nfor i in arg_sort:\n fig = plt.figure(figsize=(3,3))\n plt.imshow(diff[i,:,:,0],cmap='jet')\n plt.axis('off')\n plt.savefig(os.path.join('..','plots',f'rm_heatmap{i}.pdf'),\n bbox_inches='tight', pad_inches=0) \n plt.close()\n \n fig = plt.figure(figsize=(3,3))\n plt.imshow(X_test_subset[i,:,:,0])\n plt.axis('off')\n plt.savefig(os.path.join('..','plots',f'base_{i}.pdf'),\n bbox_inches='tight', pad_inches=0) \n plt.close()\n \n fig = plt.figure(figsize=(3,3))\n plt.imshow(noisy_diff[i,:,:,0],cmap='jet')\n plt.axis('off')\n plt.savefig(os.path.join('..','plots',f'noisy_heatmap{i}.pdf'),\n bbox_inches='tight', pad_inches=0) \n plt.close()\n \n fig = plt.figure(figsize=(3,3))\n plt.imshow(shuffled_diff[i,:,:,0],cmap='jet')\n plt.axis('off')\n plt.savefig(os.path.join('..','plots',f'shuffled_heatmap{i}.pdf'),\n bbox_inches='tight', pad_inches=0) \n plt.close()","repo_name":"halliwelln/prototype-explanations","sub_path":"code/mnist_explanations.py","file_name":"mnist_explanations.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16943513053","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom xtt._ffi import ffi as _ffi\nfrom xtt._ffi import lib as _lib\nfrom xtt._ffi_utils import to_bytes, to_text, DataStruct\n\nfrom xtt.exceptions import error_from_code, ReturnCode as RC\n\n__all__ = [\n 'create_ecdsap256_key_pair',\n 'ECDSAP256PublicKey', 'ECDSAP256PrivateKey'\n]\n\nclass ECDSAP256PublicKey(DataStruct):\n struct = \"xtt_ecdsap256_pub_key\"\n\nclass ECDSAP256PrivateKey(DataStruct):\n struct = \"xtt_ecdsap256_priv_key\"\n\ndef create_ecdsap256_key_pair():\n \"\"\"\n Create a new ECDSAP256 key pair.\n\n :returns: a tuple of the public and private keys\n \"\"\"\n pub = ECDSAP256PublicKey()\n priv = ECDSAP256PrivateKey()\n rc = _lib.xtt_crypto_create_ecdsap256_key_pair(pub.native, priv.native)\n if rc == RC.SUCCESS:\n return (pub, priv)\n else:\n raise error_from_code(rc)\n","repo_name":"xaptum/xtt-python","sub_path":"xtt/crypto/ecdsap256.py","file_name":"ecdsap256.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42572251458","text":"\nfrom bs4 import BeautifulSoup\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\nfrom selenium import webdriver\nimport time\nimport datetime\nimport pandas as pd\nimport sys, errno \nfrom requests.exceptions import HTTPError\n\n\ndef description_scraper(url):\n driver = webdriver.Firefox(executable_path=r'C:/Users/jazmi/Documents/geckodriver.exe')\n driver.get(url)\n #print(url)\n soup = BeautifulSoup(driver.page_source,'html.parser')\n driver.close()\n #print(soup)\n res = soup.find_all('div',class_=\"story-contents__content\")\n \n \n string = \"\"\n #print(res)\n #print(type(res))\n #print(\"-------------\")\n for link in res:\n string += link.get_text()\n \n return string\n \n \ndef load_page(url):\n \n driver =webdriver.Firefox(executable_path=r'C:/Users/jazmi/Documents/geckodriver.exe')\n driver.get(url)\n SCROLL_PAUSE_TIME = 30\n\n # Get scroll height\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n while True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n \n return driver\n\ndef scrap_page1(fecha):\n url = \"https://elcomercio.pe/archivo/todas/\" + fecha +\"/\"\n driver = load_page(url)\n \n soup = BeautifulSoup(driver.page_source,'html.parser')\n driver.close()\n links = []\n headings = []\n #description = [] \n for item in soup.find_all('a',class_=\"story-item__title\"):\n headings.append(item.text) # .find('a')['href'])\n links.append(\"https://elcomercio.pe\" + item['href'])\n #print(\"https://elcomercio.pe\" + item['href'])\n #description.append(description_scraper(\"https://elcomercio.pe\" + item['href']))\n #break\n fechas = [str(fecha)] * len(links)\n print(fecha)\n print(len(headings))\n #print(description)\n #return headings, links, fechas, description\n return headings, links, fechas\n\n\n\n\n\n\n\n\n\n\ndef scrap_page(fecha):\n url = \"https://elcomercio.pe/archivo/todas/\" + fecha +\"/\"\n driver = load_page(url)\n \n soup = BeautifulSoup(driver.page_source,'html.parser')\n driver.close()\n links = []\n headings = []\n for item in soup.find_all('a',class_=\"story-item__title\"):\n headings.append(item.text)\n links.append(\"https://elcomercio.pe\" + item['href'])\n fechas = [str(fecha)] * len(links)\n print(fecha)\n print(len(headings))\n return headings, links, fechas\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"JazmineAlfaro/TCG","sub_path":"news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28194898172","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\nfrom django.views.generic import TemplateView\n\nfrom objects import views\nfrom objects.views import AllObjects, ObjectDetailView, ObjectEditView\n\nurlpatterns = [\n path('', AllObjects.as_view(), name=\"all_objects\"),\n path('/', ObjectDetailView.as_view(), name=\"object\"),\n path('edit//', ObjectEditView.as_view(), name=\"edit_object\"),\n path('filter/', views.filter, name=\"filter\"),\n path('add_wishlist/', views.add_wishlist, name=\"add_wishlist\"),\n path('filter_ind/', views.filter_ind, name=\"filter_ind\"),\n path('filtering/', views.filtering, name=\"filtering\"),\n path('add_object/filter_add_object/', views.cat_add_object, name=\"filter_add_object\"),\n path('add_object/', views.add_object, name=\"add_object\"),\n path('add_object/select_cat/', views.select_cat, name=\"select_cat\"),\n path('delete_obj/', views.delete_obj, name=\"delete_obj\"),\n path('edit_status/', views.edit_status, name=\"edit_status\"),\n path('edit_status_ind/', views.edit_status_ind, name=\"edit_status_ind\"),\n path('delete_photo/', views.delete_photo, name=\"delete_photo\"),\n path('modalslider/', views.modalslider, name=\"modalslider\"),\n]","repo_name":"ArtSb2604/focuscity","sub_path":"objects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14044691174","text":"#!/bin/python3\n\n'''Script de Atualizações de Ordem de Serviço!\n\nPython Version >= 3.7.3\nModules Used = os, cx_Oracle, time\n\n'''\nimport os\nfrom time import sleep\n\ntry:\n import cx_Oracle as cxo\nexcept:\n os.system('pip3 install cx_Oracle')\n import cx_Oracle as cxo\n\nwith open('/opt/Projeto_OS/software/ora.conf', 'r') as data:\n read = data.read().split('\\n')\n list_data = []\n for x in read:\n list_data.append(x.split('='))\n\ndata = dict(list_data) \nhost, db = data['host'], data['string']\nuser, _pass = data['user'], data['pass'] \n\ncon_orcl = cxo.connect(f\"{user}/{_pass}@{host}/{db}\")\ncon_orcl.autocommit = True\ncursor = con_orcl.cursor()\n\n\ndef verify_pcprest(numos):\n sql2 = f\"\"\" SELECT COUNT(*) \n FROM PCPREST \n WHERE NUMOS = {numos}\n \"\"\"\n \n sqlr2 = cursor.execute(sql2).fetchone()\n return sqlr2[0]\n \ndef insert_pcprest(codcli, prest, duplic, valor, dtvenc, codcob, dtemissao, codfilial,\n status, codusur, dtvencorig, numtransvenda, dtsaida, codsupervisor, numos, dtfecha):\n sql3 = f\"\"\"\n INSERT INTO PCPREST (CODCLI, PREST, DUPLIC, VALOR, DTVENC, CODCOB, DTEMISSAO, CODFILIAL,\n\t STATUS, CODUSUR, DTVENCORIG, NUMTRANSVENDA, DTSAIDA, CODSUPERVISOR, NUMOS, DTFECHA)\n VALUES ({codcli}, {prest}, {duplic}, {valor}, to_date('{dtvenc}', 'DD/MM/YYYY'), '{codcob}', \n to_date('{dtemissao}', 'DD/MM/YYYY'), {codfilial}, '{status}', {codusur}, \n to_date('{dtvencorig}', 'DD/MM/YYYY'), {numtransvenda}, to_date('{dtsaida}', 'DD/MM/YYYY'), \n {codsupervisor}, {numos}, to_date('{dtfecha}', 'DD/MM/YYYY'))\n \"\"\"\n print(sql3)\n sqlr3 = cursor.execute(sql3)\n\ndef stores_transvenda():\n sql4 = \"SELECT PROXNUMTRANSVENDA FROM PCCONSUM\"\n sqlr4 = cursor.execute(sql4).fetchone()\n cursor.execute(\"UPDATE PCCONSUM SET PROXNUMTRANSVENDA = PROXNUMTRANSVENDA+1\") \n for _ in sqlr4:\n print(_)\n numtrans = _\n return numtrans\n\ndef del_pcprest(numos):\n sql5 = f\"delete from pcprest where numos = {numos}\"\n sqlr5 = cursor.execute(sql5)\n\ndef modify_situation(numos):\n sql6 = f\"\"\"SELECT COUNT(*) \n FROM PCPREST \n WHERE NUMOS = {numos}\n AND DTPAG IS NOT NULL\"\"\"\n sqlr6 = cursor.execute(sql6).fetchone()\n if sqlr6[0] > 0:\n cursor.execute(f\"UPDATE PCORDEMSERVICO SET SITUACAO = 2 WHERE NUMOS = {numos}\")\n #print(f\"OS {numos} foi colocada em execução\") #DEBUG\n\ndef update_pcprest(numos, valor):\n sql8 = f\"UPDATE PCPREST SET VALOR = {valor} where numos = {numos}\"\n sqlr8 = cursor.execute(sql8).fetchall()\n\n# Start Program\n # --Consulting open OS\n # Looping iniciado com a função sleep com 15 segundos ao final do Looping\n\nwhile 0 == 0:\n \n # Primeira consulta para buscar os dados para inserção na PCPREST.\n\n sql1 = \"\"\" SELECT O.CODCLI, \n '1' PREST, \n (SELECT PROXNUMTRANSVENDA FROM PCCONSUM) DUPLIC, \n (SELECT SUM(QTDE*PUNIT) FROM PCORDEMSERVICOI WHERE NUMOS = O.NUMOS) VALOR, \n TO_CHAR(SYSDATE, 'DD/MM/YYYY') DTVENC, \n 'ORDS' CODCOB, \n TO_CHAR(SYSDATE, 'DD/MM/YYYY') DTEMISSAO, \n O.CODFILIAL,\n 'A' STATUS, \n O.CODRCA, \n TO_CHAR(SYSDATE, 'DD/MM/YYYY') DTVENCORIG, \n (SELECT PROXNUMTRANSVENDA FROM PCCONSUM) NUMTRANSVENDA, \n TO_CHAR(SYSDATE, 'DD/MM/YYYY') DTSAIDA, \n (SELECT CODSUPERVISOR FROM PCUSUARI WHERE CODUSUR = O.CODRCA) CODSUPERVISOR, \n O.NUMOS,\n TO_CHAR(SYSDATE, 'DD/MM/YYYY') DTFECHA\n FROM PCORDEMSERVICO O\n WHERE O.SITUACAO = 1\n AND (SELECT SUM(QTDE*PUNIT) \n FROM PCORDEMSERVICOI \n WHERE NUMOS = O.NUMOS) > 0\n \"\"\"\n result = cursor.execute(sql1).fetchall()\n\n # Segunda consulta para buscar OS cancelada.\n\n sql7 = \"\"\" SELECT O.NUMOS\n FROM PCORDEMSERVICO O\n WHERE O.SITUACAO = 3\n AND (SELECT SUM(QTDE*PUNIT) \n FROM PCORDEMSERVICOI \n WHERE NUMOS = O.NUMOS) > 0\n AND (SELECT COUNT(*) FROM PCPREST WHERE NUMOS = O.NUMOS) <> 0\n \"\"\"\n sqlr7 = cursor.execute(sql7).fetchall()\n\n # Terceira consulta para buscar OS com valor diferente da PCPREST\n\n sql8 = \"\"\"SELECT P.NUMOS, \n (SELECT SUM(QTDE*PUNIT) FROM PCORDEMSERVICOI WHERE NUMOS = P.NUMOS) VALOR\n FROM PCPREST P\n WHERE P.VALOR <> (SELECT SUM(I.QTDE*I.PUNIT) \n FROM PCORDEMSERVICO O, \n PCORDEMSERVICOI I \n WHERE I.NUMOS = P.NUMOS\n AND O.SITUACAO = 1)\n \"\"\" \n sqlr8 = cursor.execute(sql8).fetchall()\n \n # Laço for para desempacotar as variáveis\n\n for os in result:\n codcli, prest, duplic, valor = os[0], os[1], os[2], os[3]\n dtvenc, codcob, dtemissao, codfilial = os[4], os[5], os[6], os[7] \n status, codusur, dtvencorig = os[8], os[9], os[10],\n dtsaida, codsupervisor, numos, dtfecha = os[12], os[13], os[14], os[15]\n\n #Verificando se o titulo já existe: \n \n value = verify_pcprest(numos)\n \n #Caso o titulo não exista irá executar a inserção: \n \n if value == 0:\n #Função para armazenar o NUMTRANSVENDA da PCCONSUM e somar +1\n numtransvenda = stores_transvenda()\n \n #Inserindo titulo\n\n insert_pcprest(codcli, prest, duplic, valor, dtvenc, codcob, dtemissao, \n codfilial, status, codusur, dtvencorig, numtransvenda, dtsaida, codsupervisor, numos, dtfecha) \n\n #Modificando a situação dos titulos pagos \n modify_situation(numos) \n \n #Deletando titulos onde a Situação da OS esteja como cancelada \n for _os in sqlr7:\n os = _os[0]\n os_del = verify_pcprest(os)\n if os_del >0:\n del_pcprest(os)\n\n #Atualizando valores dos titulos que estejam diferente entre PCPREST e PCORDEMSERVICOI \n for _os in sqlr8:\n os = _os[0]\n valor = _os[1]\n update_pcprest(os, valor) \n\n #con_orcl.close()\n #print('Aguardando 15 segundos para iniciar novamente.')\n sleep(15)\n\n","repo_name":"kurtizinho/Projeto_OS","sub_path":"software/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6706,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27525510441","text":"# Imprime los numeros enteros pares entre dos numeros \n# a y b, ingresados por el usuario, a y b incluidos\n# puede recibir float pero los redondea hacia abajo\n\na = int(float (input('ingrese primer numero: ')))\nb = int(float (input('ingrese segundo numero: ')))\neven_nums = ''\n\nif a > b:\n a,b = b,a\n\neven_nums = '\\nLos numeros pares entre ' + str(a) + ' y ' + str(b) + ' son: '\n\nwhile a<=b: \n if a % 2 == 0:\n even_nums += str(a) + ' '\n a += 1\nprint (even_nums, '\\n')","repo_name":"roy-marquez/_IntroProg","sub_path":"s04/P01_pares_entre_a_y_b/pares_entre_a_y_b.py","file_name":"pares_entre_a_y_b.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"86318107986","text":"#coding:utf-8\nfrom config.api_config import *\nfrom ..common.log import *\nfrom ..common.common import *\nfrom public.xlrd_handle import *\n\n#excel中单元格数据的参数替换处理\n#将单元格中包含在||内的变量转成对应参数,替换参数均统一保存在common_data字典中\ndef var_convert_para(need_convert):\n\n try:\n temp_str = str(need_convert)\n except:\n logging.error('变量转换参数失败.转为str类型处理失败'.decode('utf-8'))\n return False\n\n #将str以'|'为分隔符,转成list\n temp_list = temp_str.split('|')\n\n #list长度大于1时,开始转换\n if len(temp_list) > 1:\n for i in range(0, len(temp_list)):\n #所有待替换的变量位置均为list的奇数位\n if i % 2 == 1:\n try:\n temp_list[i] = common_data[temp_list[i]]\n except:\n logging.error('参数替换失败.\\n\\t替换的变量为:\\t'.decode('utf-8') + str(temp_list[i]))\n return False\n #将替换好的str返回\n return ''.join(temp_list)\n \n else:\n return temp_str\n \n\n#参数为excel中任意行的接口数据,list格式,处理为可使用的字典格式返回\ndef excel_row_handle(row_value):\n\n request_value = {}\n request_value['instruction'] = row_value[0]\n request_value['tag'] = row_value[1]\n request_value['interfacestatus'] = row_value[2]\n request_value['method'] = row_value[3]\n request_value['url'] = row_value[4]\n #body参数替换并转为json格式\n temp_body_value = var_convert_para(row_value[5])\n if temp_body_value != False:\n temp_body_json = str_to_json(temp_body_value)\n if temp_body_json != False:\n request_value['body'] = temp_body_json\n else:\n logging.error( '接口 '.decode('utf-8') + request_value['instruction'] + ':' + request_value['tag'] + ' Body数据json格式转换异常'.decode('utf-8'))\n return False\n else:\n logging.error('接口 '.decode('utf-8') + request_value['instruction'] + ':' + request_value['tag'] + ' Body数据参数替换异常'.decode('utf-8'))\n return False\n #headers转为json格式\n temp_headers_json = str_to_json(row_value[6])\n if temp_headers_json != False:\n request_value['headers'] = temp_headers_json\n else:\n logging.error( '接口 '.decode('utf-8') + request_value['instruction'] + ':' + request_value['tag'] + ' headers数据json格式转换异常'.decode('utf-8'))\n return False\n\n request_value['checkpoint'] = row_value[7]\n\n return request_value\n\n#获取指定路径和指定sheet名称的sheet页的全部数据\ndef excel_sheet_value(path, sheet_name):\n\n # 实例化xlrd_handle\n sheet_obj = xlrd_handle(path, sheet_name)\n\n workbook_status = sheet_obj.open_workbook()\n if workbook_status == False:\n return False\n sheet_status = sheet_obj.open_sheet()\n if sheet_status == False:\n return False\n\n # 获取指定sheet页中的全部数据,并返回\n sheet_value = sheet_obj.value_to_array()\n return sheet_value","repo_name":"testervic/auto_test","sub_path":"public/api_test/api_global.py","file_name":"api_global.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24653983921","text":"import image_editor\nimport classification_model\nimport numpy as np\nimport cv2\nimport glob\n\n\ndef classify_video_frames(model, gridsize=3, output_filename='output.avi', label_binarizer=None):\n\n # Atleast .mp4 video files work\n filename = input('Enter video file name to edit: ')\n capture = cv2.VideoCapture(filename)\n\n # Check if camera opened successfully\n if (capture.isOpened() is False):\n print(\"Error opening video stream or file\")\n\n # Default resolutions of the frame are obtained.The default resolutions are system dependent.\n # We convert the resolutions from float to integer.\n frame_width = int(capture.get(3))\n frame_height = int(capture.get(4))\n\n # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n out = cv2.VideoWriter(output_filename, fourcc, 10.0, (frame_width, frame_height))\n\n # Read until video is completed\n while(capture.isOpened()):\n # Capture frame-by-frame\n ret, frame = capture.read()\n if ret is True:\n\n # Display the resulting frame\n cv2.imshow('Frame', frame)\n\n # Edit the frame according to\n edited_frame = image_editor.split_and_predict_grid_images(model, frame, gridsize, first_call=True)\n # write the flipped frame\n out.write(edited_frame)\n\n # Press Q on keyboard to exit\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n\n # Break the loop\n else:\n break\n # When everything done, release the video capture and video write objects\n capture.release()\n out.release()\n # Closes all the frames\n cv2.destroyAllWindows()\n\n return\n\n\ndef load_and_resize_images_from_paths(filepaths):\n\n images = []\t\n for imagepath in filepaths:\n img = cv2.imread(imagepath)\n if img.shape[0] != 224:\n img = cv2.resize(img, (224,224))\n img = np.array(img)\n images.append(img)\n\n return images\n\n\ndef load_image_paths(globPath, target, test_data=False):\n\n image_files = []\n for filename in glob.glob(globPath):\n image_files.append(filename)\n\n filepaths = []\n targets = []\n for file in image_files:\n filepaths.append(file)\n targets.append(target)\n\n # For testing 500 first loaded images\n if test_data is True:\n filepaths = filepaths[:500]\n targets = targets[:500]\n\n return filepaths, targets\n\n\ndef main():\n\n input_str = str(input('Train or Load model (Train/Load): '))\n\n if input_str == \"Train\":\n\n input_str = str(input('Test (y/n): '))\n if input_str == \"y\":\n test = True\n elif input_str == \"n\":\n test = False\n\n # Water images\n target = \"water\"\n path = 'water_images/*.jpg'\n waterimagepaths, water_targets = load_image_paths(path, target, test)\n water_images = load_and_resize_images_from_paths(waterimagepaths)\n\n # Other images\n target = \"other\"\n path = 'ResizedImages224x224/*.jpg'\n other_images_paths, other_targets = load_image_paths(path, target, test)\n other_images = load_and_resize_images_from_paths(other_images_paths)\n\n # Join the images\n images = water_images + other_images\n targets = water_targets + other_targets\n images = np.array(images)\n\n # binarize targets\n label_binarizer = classification_model.binarize_targets(targets)\n\n # Generate model\n shape = images[0].shape\n model = classification_model.generate_model(shape)\n\n # Train the model\n weights_filepath = \"weights.best.hdf5\"\n history = classification_model.train_model(images, targets, model, label_binarizer, weights_filepath)\n\n elif input_str == \"Load\":\n\n model = classification_model.generate_model((224, 224, 3))\n try:\n weights_filepath = \"weights.best.hdf5\"\n model.load_weights(weights_filepath)\n except IOError:\n print(\"no weights file with the name '{}'\".format(weights_filepath))\n\n else:\n print(\"Wrong input argument\")\n\n if model is not None:\n while True:\n input_str = str(input('Classify video? (y/n):')) \n\n if input_str == 'y':\n\n output_filename = str(input('output filename (.avi will be added): '))\n output_filename = output_filename + \".avi\"\n\n if output_filename == '':\n output_filename = 'output.avi'\n\n gridsize = input('input gridsize (x*x): ')\n try:\n gridsize = int(gridsize)\n except ValueError:\n print(\"Cannot cast grid size, setting default 3\")\n gridsize = 3\n pass\n\n classify_video_frames(model, gridsize, output_filename)\n\n else:\n break\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Bobojambo/RR","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20688558016","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals # isort:skip\r\n\r\n# Bibliotecas de terceiros\r\nfrom django.conf.urls import url\r\n\r\n# Modulos locais\r\nfrom . import views\r\n\r\n# função para definir padrões de URL em Django.\r\n\r\nurlpatterns = [\r\n url(r'^admin/$', views.perfil_admin, name='perfil_admin'),\r\n url(r'^comarcas/$', views.perfil_comarcas, name='perfil_comarcas'),\r\n url(r'^atualizacoes/$', views.atualizacoes, name='atualizacoes_perfil'),\r\n url(r'^editar/$', views.editar_perfil, name='editar_perfil'),\r\n url(r'^editar/senha/$', views.editar_senha, name='editar_senha'),\r\n url(r'^editar/email/$', views.editar_email, name='editar_email'),\r\n url(r'^editar/configurar-visualizacao-chat-por-atuacao/$', views.configurar_visualizacao_chat_por_atuacao,\r\n name='configurar_visualizacao_chat_por_atuacao'),\r\n url(r'^editar/foto/$', views.editar_foto, name='editar_foto'),\r\n url(r'^editar/senha-eproc/$', views.editar_senha_eproc, name='editar_senha_eproc'),\r\n url(r'^editar/usuario-projudi/$', views.editar_usuario_projudi, name='editar_usuario_projudi'),\r\n url(r'^editar/#tab-eproc', views.editar_perfil, name='editar_perfil_defensor'),\r\n url(r'^config/$', views.get_config_situacoes_sigilosas, name='get_config_situacoes_sigilosas'),\r\n # url(r'^salvar/$', views.salvar_perfil, name='salvar_perfil'),\r\n]\r\n","repo_name":"SegurancaDPDF/SOLAR-Backend","sub_path":"perfil/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3421394285","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\n\nfrom kazoo.client import KazooClient\n\ntry:\n from ansible.utils.vault import VaultLib\nexcept ImportError:\n # Ansible 2.0 has changed the vault location\n from ansible.parsing.vault import VaultLib\n\nos.system(\"docker-compose up -d zk\")\nvault = VaultLib(open(os.path.expanduser(\"~/.vault-password.nestor\")).read().strip())\nDEFAULT_HOSTS = os.environ.get(\"ZK_HOST\", \"localhost:2181\")\n\nzk = KazooClient(hosts=DEFAULT_HOSTS)\nzk.start()\ndir = 'config'\npath = '/nestor/config'\nfor root, dirs, files in os.walk(dir):\n outroot = root.replace(dir, \"\").strip(\"/\")\n outroot = os.path.join(path, outroot)\n for name in files:\n inpath = os.path.join(root, name)\n outpath = os.path.join(outroot, name)\n zk.ensure_path(outpath)\n with open(inpath) as f:\n data = f.read()\n if data.startswith(\"$ANSIBLE_VAULT\"):\n data = vault.decrypt(data)\n zk.set(outpath, data)\n print(\"created\", outpath, \":\")\n print(data)\n","repo_name":"tardyp/nestor-compose","sub_path":"provision.py","file_name":"provision.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41427484168","text":"from common import ScModule, ScKeynodes, ScPythonEventType\nfrom keynodes import Keynodes\nfrom TextGenAgent import TextGenAgent\n\nfrom sc import *\n\n\nclass TextGenModule(ScModule):\n\n def __init__(self):\n ScModule.__init__(\n self,\n ctx=__ctx__,\n cpp_bridge=__cpp_bridge__,\n keynodes=[\n ],\n )\n\n def OnInitialize(self, params):\n print('Initialize chinese text generation module') \n agent = TextGenAgent(self) \n fa_addr = self.ctx.HelperResolveSystemIdtf(\"question_generate_text\", ScType.NodeConstClass) \n agent.Register(fa_addr, ScPythonEventType.AddOutputEdge) \n\n def OnShutdown(self): \n print('Shutting down text generation module') \n\n\nservice = TextGenModule()\nservice.Run()\n","repo_name":"qian2020/BSUIRproject","sub_path":"problem-solver/py/services/generateText/TextGenModule.py","file_name":"TextGenModule.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70124819133","text":"from typing import Dict, Sequence\n\nfrom deepvariant import variant_caller\nfrom deepvariant.protos import deepvariant_pb2\nfrom deepvariant.python import allelecounter\n\n\nclass VerySensitiveCaller(variant_caller.VariantCaller):\n \"\"\"Call variants and gvcf records from an AlleleCounter.\"\"\"\n\n def __init__(self, options, use_cache_table=True, max_cache_coverage=100):\n super(VerySensitiveCaller, self).__init__(\n options=options,\n use_cache_table=use_cache_table,\n max_cache_coverage=max_cache_coverage)\n\n def get_candidates(\n self, allele_counters: Dict[str, allelecounter.AlleleCounter],\n sample_name: str) -> Sequence[deepvariant_pb2.DeepVariantCall]:\n # AlleleCounter doesn't have copy constructor therefore we pass\n # allele counts only (which is a member of AlleleCounter).\n allele_counts = {\n sample_id: allele_counters[sample_id].counts()\n for sample_id in allele_counters\n }\n return self.cpp_variant_caller.calls_from_allele_counts(\n allele_counts, sample_name)\n\n def get_candidate_positions(\n self, allele_counters: Dict[str, allelecounter.AlleleCounter],\n sample_name: str):\n allele_counts = {\n sample_id: allele_counters[sample_id].counts()\n for sample_id in allele_counters\n }\n return self.cpp_variant_caller.call_positions_from_allele_counts(\n allele_counts, sample_name)\n","repo_name":"google/deepvariant","sub_path":"deepvariant/very_sensitive_caller.py","file_name":"very_sensitive_caller.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":2970,"dataset":"github-code","pt":"78"} +{"seq_id":"135023906","text":"A, B = input().split()\nA = A[::-1]\nB = B[::-1]\nLA = list(map(int, A))\nLB = list(map(int, B))\ngap = len(LA) - len(LB)\nif (gap > 0):\n while gap != 0:\n LB.append(0)\n gap -= 1\nelif (gap < 0):\n while gap != 0:\n LA.append(0)\n gap += 1\ni = 0\nanswer = []\nup = 0\nwhile i < len(LA) or i < len(LB):\n answer.append(up)\n if LA[i] + LB[i] + up >= 10:\n up = 1\n else:\n up = 0\n answer[i] += (LA[i] + LB[i] - (up * 10))\n i += 1\nif (up == 1):\n answer.append(1)\nanswer.reverse()\nprint(''.join(str(s) for s in answer))","repo_name":"YunDongHwan/baekjoon","sub_path":"BasicMath1/10757.py","file_name":"10757.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23500818454","text":"import torch\nfrom typing import Sequence, Union\n\nfrom ignite.metrics import Metric\nfrom ignite.metrics.metric import sync_all_reduce, reinit__is_reduced\nfrom ignite.exceptions import NotComputableError\n\n\nclass ElementwiseMae(Metric):\n \"\"\"\n Calculates the element-wise mean absolute error.\n \"\"\"\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._elementwise_sum_abs_errors = None\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n # Unpack Sequence[Tensor] input\n y_pred, y = output\n\n # Initialize elementwise errors as zeros\n if self._elementwise_sum_abs_errors is None:\n self._elementwise_sum_abs_errors = torch.zeros_like(y_pred[0])\n\n # Sum absolute errors element-wise\n absolute_errors = torch.abs(y_pred - y.view_as(y_pred))\n absolute_errors = torch.sum(absolute_errors, 0)\n\n self._elementwise_sum_abs_errors += absolute_errors\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_elementwise_sum_abs_errors\", \"_num_examples\")\n def compute(self) -> Union[float, torch.Tensor]:\n if self._num_examples == 0:\n raise NotComputableError(\n \"ElementwiseMae must have at least one \"\n \"example before it can be computed.\"\n )\n\n # Average by number of examples to produce elementwise MAE\n return self._elementwise_sum_abs_errors / self._num_examples\n","repo_name":"aritraghsh09/GaMPEN","sub_path":"ggt/metrics/elementwise_mae.py","file_name":"elementwise_mae.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"71915619452","text":"import math\n\ndef integer_to_binary(x,off):\n y=x\n ans=\"\"\n while(y>0):\n ans+=str(y%2)\n y=y//2\n ans=ans[::-1]\n while(len(ans)=1):\n ans+=\"1\"\n x-=1\n else:\n ans+=\"0\"\n return ans\n\ndef binary_to_integer(x):\n x=x[::-1]\n sum=0\n for i in range(0,len(x)):\n sum+=int(x[i])*math.pow(2,i)\n return sum\n\ndef binary_to_decimal(x):\n sum=0\n for i in range(1,len(x)+1):\n sum+=int(x[i-1])*math.pow(2,-i)\n return sum \n\ndef binary_to_float(x):\n if(int(x)!=0):\n exponent=binary_to_integer(x[1:12])-1023\n mantissa=x[12:]\n number=binary_to_integer(\"1\"+mantissa[:int(exponent)])\n decimal=binary_to_decimal(mantissa[int(exponent):])\n ans=number+decimal\n if(x[0]==\"1\"):\n ans= -1*ans\n return ans\n else:\n return 0\n\ndef floating_to_binary(x):\n if(x!=0):\n ans=\"\"\n sign=\"0\"\n if(x<0):\n sign=\"1\"\n x=abs(x)\n integer_part=integer_to_binary(int(x),1)\n mantissa=decimal_to_binary(x-int(x),53-len(integer_part))\n normalised_mantissa=\"\"\n if(len(integer_part)>1):\n normalised_mantissa=integer_part[1:]+mantissa\n else:\n normalised_mantissa=mantissa\n exponent=len(integer_part)-1+1023\n exponent=integer_to_binary(exponent,11)\n ans=sign+exponent+normalised_mantissa\n return ans\n else:\n return \"0\"*64\n \n\n \ndef initialize_main_memory(main_memory,no_of_blocks,size_of_block):\n if(main_memory=={}): \n for i in range(0,int(no_of_blocks)):\n b={}\n for j in range(0,int(size_of_block)):\n w=\"0\"*64\n b[integer_to_binary(j,math.log2(size_of_block))]=w\n main_memory[integer_to_binary(i,math.log2(no_of_blocks))]=b\n\n\n \ndef print_main_memory(main_memory):\n print(\"main memory\")\n for i in main_memory:\n for j in main_memory[i]:\n print(i+\" \"+j,end=\" \")\n print(main_memory[i][j])\n print()\n\ndef search_in_main_memory(main_memory,no_of_blocks):\n s=0\n for i in main_memory:\n s=len(main_memory[i])\n break\n while(True):\n address=input(\"Enter the address: \")\n if(len(address)==math.log2(s*no_of_blocks)):\n break\n else:\n print(\"address requires \"+str(int(math.log2(s*no_of_blocks)))+\" bits\")\n x=main_memory[address[:int(math.log2(no_of_blocks))]][address[int(math.log2(no_of_blocks)):]]\n print(x)\n print(binary_to_float(x))\n\n","repo_name":"chintanpuggalok/2-level-cache","sub_path":"helpercode.py","file_name":"helpercode.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6023474258","text":"from flask import Flask, request\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\nimport os\nimport json\nimport re\nimport serial\nser = serial.Serial('/dev/ttyAMA0')\nser.baudrate = 9600\n\napp = Flask(__name__)\n\ndef parse_request(req):\n \"\"\"\n Parses application/json request body data into a Python dictionary\n \"\"\"\n payload = float(req.get_data())\n return payload\n\n@app.route('/robovac/', methods = ['POST'])\ndef index():\n payload = request.get_data()\n payload = str(payload)\n print(\"payload {}\".format(payload))\n matched = re.findall(r\"(^\\d*)([a-z]+$)\",payload)\n print(\"match: {}\".format(matched))\n if len(matched) != 0:\n out = matched[0][1]\n if len(matched[0][0]) > 0:\n out *= int(matched[0][0]) # string multiplication\n \n print(\"payload out {}\".format(out))\n ser.write(payload)\n # f = open(\"/home/pi/clapper_wait.txt\", 'w')\n # f.write(payload)\n # f.close()\n return(\"Success\")\n else:\n return(\"No input\")\n\nport = os.environ['CLAPPER_PORT']\nip = os.environ['IP_ADDRESS']\n\n\napp.run(host=ip, port=port)\n","repo_name":"devincody/IRbeacon","sub_path":"web_hook_controller.py","file_name":"web_hook_controller.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70357187451","text":"import discord\nimport openai\n\n# Set your OpenAI API key\napi_key = \"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\"\n\n# Initialize the OpenAI API client\nopenai.api_key = api_key\n\nintents = discord.Intents.default()\nintents.typing = False\nintents.presences = False\n\nclient = discord.Client(intents=intents)\n\n@client.event\nasync def on_ready():\n print(f'Logged in as {client.user.name}')\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return # Avoid responding to our own messages\n \n # Check if the message is from the target user (replace 'target_user_id' with the user's actual ID)\n target_user_id = '484378655561351172'\n if str(message.author.id) == target_user_id:\n # Use the user's message as a prompt for GPT-3\n user_message = message.content\n \n # Generate a response from GPT-3\n response = openai.Completion.create(\n engine=\"davinci\",\n prompt=user_message,\n temperature=0.7,\n max_tokens=50,\n )\n \n # Send the GPT-3 response to the Discord channel\n await message.channel.send(response.choices[0].text)\n return\n\n# Replace 'YOUR_BOT_TOKEN' with the token you copied earlier\nclient.run('MTE1NTU4OTU3Njk0MTkxNjE2MA.GXp4a-.HaynWd11gGnIvfC5WiN-XWHU6FKLfMop0AkmM4')\n","repo_name":"TeamSphere/sphere","sub_path":"discord/sphere.py","file_name":"sphere.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"40075885600","text":"from functools import wraps\r\nfrom Common.AuditProcess.Auditinginterface import AuditingInterface\r\n\r\n\r\n\r\nclass Auditordecorator(object):\r\n def __init__(self):\r\n self.auditobj=AuditingInterface()\r\n\r\n\r\n def __call__(self, orginal_func):\r\n @wraps(orginal_func)\r\n def wrapped_f(*args):\r\n if 'workflow' in args[2] and args[3] not in ('S','E','RC','PC','WC'):\r\n workflow_exec_id = self.auditobj.auditwriterable(args[2],args[3],args[4])\r\n else:\r\n workflow_exec_id = self.auditobj.auditwriterable(args[2], args[3])\r\n args=(*args,workflow_exec_id)\r\n retval=orginal_func(*args)\r\n if 'step_cd' in args[2]:\r\n exec_id,no_of_record_updated = self.auditobj.auditupdatable(workflow_exec_id,'S',args[2],args[3],retval)\r\n elif 'task_cd' in args[2]:\r\n exec_id, no_of_record_updated = self.auditobj.auditupdatable(workflow_exec_id, 'T', args[2], args[3],\r\n retval)\r\n elif 'workflow' in args[2]:\r\n exec_id, no_of_record_updated = self.auditobj.auditupdatable(workflow_exec_id, 'W', args[2], args[3],\r\n retval)\r\n if workflow_exec_id is None:\r\n workflow_exec_id=exec_id\r\n return retval,workflow_exec_id\r\n return wrapped_f\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"lokeshmittal08/OrchestrationTool","sub_path":"Common/AuditProcess/Auditordecorator.py","file_name":"Auditordecorator.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39545342415","text":"import sys\r\n\r\n\r\ndef bellman_ford(start):\r\n\r\n dist = [float(\"Inf\")] * n\r\n dist[start] = 0\r\n\r\n for i in range(n - 1):\r\n for u, v, w in edges:\r\n if dist[v] > dist[u] + w:\r\n dist[v] = dist[u] + w\r\n\r\n for u, v, w in edges:\r\n if dist[v] > dist[u] + w:\r\n print(\"possible\")\r\n return\r\n print(\"not possible\")\r\n\r\n\r\ndata = sys.stdin.read().split(\"\\n\")\r\nl = 0\r\ntests = int(data[l])\r\nl += 1\r\nfor test in range(tests):\r\n n, m = map(int, data[l].split())\r\n l += 1\r\n edges = []\r\n for i in range(m):\r\n u, v, w = map(int, data[l].split())\r\n edges.append([u, v, w])\r\n l += 1\r\n bellman_ford(0)\r\n\r\n\r\n\r\n\r\n\"\"\"\r\ndef build_graph(n, m, l):\r\n graph = {}\r\n\r\n for i in range(0, n):\r\n graph[i] = {}\r\n\r\n for i in range(0, m):\r\n a, b, w = map(int, data[l].split())\r\n l += 1\r\n graph[a][b] = w\r\n\r\n return graph, l\r\n\r\ndef bellman_ford():\r\n\r\n for i in range(n-1):\r\n for u in graph:\r\n for v in graph[u]:\r\n if dist[v] > dist[u] + graph[u][v]:\r\n dist[v] = dist[u] + graph[u][v]\r\n pre[v] = u\r\n\r\n for u in graph:\r\n for v in graph[u]:\r\n if dist[v] > dist[u] + graph[u][v]:\r\n print(\"possible\")\r\n return\r\n print(\"not possible\")\r\n\r\n\r\n\r\n\r\n for i in range(1, n):\r\n dist[i] = float('Inf')\r\n pre[i] = None\r\n bellman_ford()\r\n\"\"\"\r\n","repo_name":"maspri/Lab-Efficient-Algorithms","sub_path":"Sheet4/Task05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19390463551","text":"import MDAnalysis\nimport MDAnalysis.analysis.contacts\nfrom MDAnalysis.tests.datafiles import PSF,DCD\n\n# example trajectory (transition of AdK from closed to open)\nu = MDAnalysis.Universe(PSF,DCD)\n\n# crude definition of salt bridges as contacts between NH/NZ in ARG/LYS and OE*/OD* in ASP/GLU.\n# You might want to think a little bit harder about the problem before using this for real work.\nsel_basic = \"(resname ARG or resname LYS) and (name NH* or name NZ)\"\nsel_acidic = \"(resname ASP or resname GLU) and (name OE* or name OD*)\"\n\n# reference groups (first frame of the trajectory, but you could also use a separate PDB, eg crystal structure)\nacidic = u.selectAtoms(sel_acidic)\nbasic = u.selectAtoms(sel_basic)\n\n# set up analysis of native contacts (\"salt bridges\"); salt bridges have a distance <6 A\nCA1 = MDAnalysis.analysis.contacts.ContactAnalysis1(u, selection=(sel_acidic, sel_basic), refgroup=(acidic, basic), radius=6.0, outfile=\"qsalt.dat\")\n\n# iterate through trajectory and perform analysis of \"native contacts\" q\n# (force=True ignores any previous results, force=True is useful when testing)\nCA1.run(force=True)\n\n# plot time series q(t) [possibly do \"import pylab; pylab.clf()\" do clear the figure first...]\nCA1.plot(filename=\"adk_saltbridge_contact_analysis1.pdf\", linewidth=3, color=\"blue\")\n\n# or plot the data in qsalt.dat yourself.\nCA1.plot_qavg(filename=\"adk_saltbridge_contact_analysis1_matrix.pdf\")","repo_name":"PatrickObrien3/SULI2014","sub_path":"contact_example.py","file_name":"contact_example.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15143160608","text":"from argparse import ArgumentParser\nimport logging\n\nimport pandas as pd\n\nfrom component_utils.general import create_artifact_folder\n\nlogger = logging.getLogger()\n\ndef go(args):\n artifact_path = create_artifact_folder(__file__)\n\n raw_data = pd.read_csv(args.input_path, sep = \"\\t\", index_col = 0)\n raw_data.drop(columns = [\"Gender\", \"Account Type\"], inplace = True)\n raw_data.rename(columns = {\"Thread Entry Type\": \"Tweet Type\"}, inplace = True)\n\n demo = pd.read_csv(args.demo_path)\n\n news = pd.read_html(\"https://memeburn.com/2010/09/the-100-most-influential-news-media-twitter-accounts/\", header = 0)[0]\n news[\"@name\"] = news[\"@name\"].str.replace(\"@\", \"\").str.lower()\n news_users = {u.lower(): \"news\" for u in news[\"@name\"]}\n\n brands_df = pd.read_excel(\"../../data/brandfulllist.xlsx\", sheet_name = \"All 1558\", usecols = [\"Twitter Handle\"])\n brands = (\n brands_df[brands_df[\"Twitter Handle\"] != \"NOT AVAILABLE\"]\n [\"Twitter Handle\"]\n .str.replace(\"@\", \"\")\n .str.lower()\n )\n brands_map = {k: \"business\" for k in brands.values}\n\n companies_df = pd.read_excel(\"../../data/companyfulllist.xlsx\", usecols = [\"TwitterHandle\", \"TwitterHandle2\"])\n companies = (\n pd.concat([companies_df[\"TwitterHandle\"], companies_df[\"TwitterHandle2\"]], axis = 0)\n .dropna()\n .str.replace(\"@\", \"\")\n .str.lower()\n )\n companies_map = {k: \"business\" for k in companies.values}\n\n demo[\"Account Type\"] = (\n demo[\"followers_count\"].apply(lambda x: \"influencer\" if x > args.influencer_thresh else \"core\")\n .mask(demo[\"Account Type\"] != \"individual\")\n .combine_first(demo[\"Account Type\"])\n )\n\n demo[\"Account Type\"] = demo[\"screen\"].str.lower().map(brands_map).combine_first(demo[\"Account Type\"])\n demo[\"Account Type\"] = demo[\"screen\"].str.lower().map(companies_map).combine_first(demo[\"Account Type\"])\n demo[\"Account Type\"] = demo[\"screen\"].str.lower().map(news_users).combine_first(demo[\"Account Type\"])\n demo[\"Gender\"] = demo[\"Gender\"].mask(~demo[\"Account Type\"].isin([\"core\", \"influencer\"]))\n\n data = raw_data.merge(right = demo, how = \"left\", left_on = \"Author\", right_on = \"screen\")\n data = data.merge(right = demo, how = \"left\", left_on = \"Thread Author\", right_on = \"screen\", suffixes = (\"\", \"_originator\"))\n \n data.to_csv(artifact_path / \"metoo_data.csv\", sep = \"\\t\")\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"mode\", type = str, help = \"Run mode (local or remote)\")\n parser.add_argument(\"input_path\", type = str, help = \"Path to data\")\n parser.add_argument(\"demo_path\", type = str, help = \"Path to inferred author demographics\")\n parser.add_argument(\"influencer_thresh\", type = int, help = \"follower count threshold for influencer designation\")\n args = parser.parse_args()\n\n go(args)","repo_name":"seano660/metoo_research","sub_path":"components/postprocess_data/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"15104578004","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom multiconfpy import observables as obs\n\n\nclass GroundState:\n \"\"\"Class to manipulate output data from imag time computations\n\n This class provide an interface to load data from numerical simulations\n and compute some important observables. Some key quantities, as natural\n occupations and orbitals are computed directly on initialization due to\n their central role in many-body physics analysis. The grid and trap can\n also be consulted through some attributes kept in initialization. Below\n some important attributes corresponding to specific job set by method\n ``self.lock_job``\n\n Main Attributes\n ---------------\n npar - number of particles\n norb - number of orbitals\n npts - number of grid points\n grid - numpy.array with grid points\n rho1 - numpy.ndarray([norb, norb]) raw-orbitals 1-body density matrix\n rho2 - numpy.array(norb ** 4) raw-orbitals 2-body density matrix\n occ - numpy.array(norb) natural occupation fraction\n natorb - numpy.ndarray([norb, npts]) natural orbitals in rows\n energy - numpy.array(njobs) energy per particle for every job\n njobs - number of jobs found\n\n Better documentation on routines to compute observables can be found in\n `multiconfpy.observables` which provides the core implementation and is\n the backend for this class methods.\n\n \"\"\"\n\n def __init__(self, files_prefix, dir_path):\n \"\"\"Struct for many-body state loading from main program output files\n\n Parameters\n ----------\n `files_prefix` : ``str``\n string with prefix common to all files\n `dir_path` : ``str``\n string with path containing the files\n \"\"\"\n self.__dir_path = dir_path\n self.__files_prefix = files_prefix\n self.path_prefix = os.path.join(dir_path, files_prefix)\n eq_setup = np.loadtxt(self.path_prefix + \"_mctdhb_parameters.dat\")\n if eq_setup.ndim == 1:\n eq_setup = eq_setup.reshape(1, eq_setup.size)\n self.eq_setup = eq_setup\n self.njobs = eq_setup.shape[0]\n self.colormap = \"gnuplot\"\n self.lock_job(1)\n\n def lock_job(self, job_id):\n \"\"\"\n Lock on specific job, reading files pattern `*_job?*` where the `?`\n mark is integer number given in argument `job_id`. The coefficients\n and orbitals read must correspond to stationary state obtained from\n line `job_id` of parameters file\n The following object attributes are set:\n `self.npar`\n `self.norb`\n `self.npts`\n `self.xi`\n `self.xf`\n `self.dx`\n `self.grid`\n `self.coef`\n `self.trap`\n `self.orbitals`\n `self.occ`\n `self.natorb`\n \"\"\"\n if job_id > self.njobs:\n print(\"job {} not available\".format(job_id))\n print(\"Locked in job {} of {}\".format(self.job_id, self.njobs))\n return\n self.job_id = job_id\n coef_fname = self.path_prefix + \"_job{}_coef.dat\".format(job_id)\n orb_fname = self.path_prefix + \"_job{}_orb.dat\".format(job_id)\n pot_fname = self.path_prefix + \"_job{}_obpotential.dat\".format(job_id)\n self.orbitals = np.loadtxt(orb_fname, dtype=np.complex128).T\n self.coef = np.loadtxt(coef_fname, dtype=np.complex128)\n self.trap = np.loadtxt(pot_fname)\n row = job_id - 1\n self.npar = int(self.eq_setup[row, 0])\n self.norb = int(self.eq_setup[row, 1])\n self.npts = int(self.eq_setup[row, 2])\n self.xi = self.eq_setup[row, 3]\n self.xf = self.eq_setup[row, 4]\n self.dx = (self.xf - self.xi) / (self.npts - 1)\n self.grid = np.linspace(self.xi, self.xf, self.npts)\n self.g = self.eq_setup[row, 9]\n self.energy = self.eq_setup[row, 9]\n self.rho1 = self.onebody_dm()\n self.rho2 = self.twobody_dm()\n self.occ = self.natural_occupations()\n self.natorb = self.natural_orbitals()\n\n def locked_job_info(self):\n \"\"\"Print on screen the current job information\"\"\"\n print(\n \"\\nJob setup files: {} at dir {}\".format(\n self.__files_prefix, self.__dir_path\n )\n )\n print(\n \"Locked on job {} of {} with parameters:\".format(\n self.job_id, self.njobs\n )\n )\n print(\"\\t(n)particles : {}\".format(self.npar))\n print(\"\\t(m)orbitals : {}\".format(self.norb))\n print(\"\\tg : {}\\n\".format(self.g))\n print(\"\\tenergy/n : {}\\n\".format(self.energy))\n\n def onebody_dm(self):\n return obs.onebody_dm(self.npar, self.norb, self.coef)\n\n def twobody_dm(self):\n return obs.twobody_dm(self.npar, self.norb, self.coef)\n\n def natural_occupations(self):\n return obs.natural_occupations(self.npar, self.rho1)\n\n def natural_orbitals(self):\n return obs.natural_orbitals(self.rho1, self.orbitals)\n\n def occupation_entropy(self):\n return obs.occupation_entropy(self.occ)\n\n def density(self):\n return obs.density(self.occ, self.natorb)\n\n def condensate_density(self):\n return abs(self.natorb[0]) ** 2\n\n def subset_density(self, subset_ind):\n return obs.density(self.occ, self.natorb, subset_ind)\n\n def momentum_density(self, kmin=-10, kmax=10, bound=0, gf=7):\n return obs.momentum_density(\n self.occ, self.natorb, self.dx, kmin, kmax, bound, gf\n )\n\n def subset_momentum_density(\n self, subset_ind, kmin=-10, kmax=10, bound=0, gf=7\n ):\n return obs.momentum_density(\n self.occ, self.natorb, self.dx, kmin, kmax, bound, gf, subset_ind\n )\n\n def position_rdm(self):\n return obs.position_rdm(self.occ, self.natorb)\n\n def position_onebody_correlation(self):\n return obs.position_onebody_correlation(self.occ, self.natorb)\n\n def momentum_rdm(self, kmin=-10, kmax=10, bound=0, gf=7):\n return obs.momentum_rdm(\n self.occ, self.natorb, self.dx, kmin, kmax, bound, gf\n )\n\n def momentum_onebody_correlation(self, kmin=-10, kmax=10, bound=0, gf=7):\n return obs.momentum_onebody_correlation(\n self.occ, self.natorb, self.dx, kmin, kmax, bound, gf\n )\n\n def position_mutual_probability(self):\n return obs.position_mutual_probability(\n self.npar, self.rho2, self.orbitals\n )\n\n def momentum_mutual_probability(self, kmin=-10, kmax=10, bound=0, gf=7):\n return obs.momentum_mutual_probability(\n self.npar, self.rho2, self.orbitals, self.dx, kmin, kmax, bound, gf\n )\n\n def position_twobody_correlation(self):\n return obs.position_twobody_correlation(\n self.npar, self.rho2, self.orbitals, self.density()\n )\n\n def momentum_twobody_correlation(self, kmin=-10, kmax=10, bound=0, gf=7):\n den = self.momentum_density(kmin, kmax, bound, gf)[1]\n extra_args = (den, self.dx, kmin, kmax, bound, gf)\n return obs.momentum_twobody_correlation(\n self.npar, self.rho2, self.orbitals, *extra_args\n )\n\n def average_onebody_operator(self, op_action, args=(), subset_ind=None):\n \"\"\"\n Compute average of an arbritrary extensive 1-body operator\n\n Parameters\n ----------\n `op_action` : ``callable``\n function which apply the operator to a state\n see examples ``multiconfpy.operator_action``\n `args` : ``tuple``\n arguments needed for specific for each evaluation\n `subset_ind` : ``list[int] / numpy.array(dtype=int)``\n restrict evaluation of average to subset of natural orbitals\n by default no restriction is applied (``None``)\n `0 <= subset_ind[i] < self.norb` without repetitions if provided\n\n Return\n ------\n ``float``\n \"\"\"\n return obs.average_onebody_operator(\n self.occ, self.natorb, op_action, self.dx, args, subset_ind\n )\n\n def covariance(self, op_left, op_right, args_left=(), args_right=()):\n return obs.manybody_operator_covariance(\n self.npar,\n self.rho1,\n self.rho2,\n self.orbitals,\n self.dx,\n op_left,\n op_right,\n args_left,\n args_right,\n )\n\n def plot_density(self, show_trap=False, show_condensate=False):\n den = self.density()\n plt.plot(self.grid, den, color=\"black\", label=\"Gas density\")\n ax = plt.gca()\n ax.set_xlabel(\"position\", fontsize=16)\n ax.set_ylabel(\"Density\", fontsize=16)\n ax.set_xlim(self.xi, self.xf)\n ax.set_ylim(0, den.max() * 1.1)\n if show_trap:\n ax_trap = ax.twinx()\n ax_trap.plot(self.grid, self.trap, color=\"orange\")\n ax_trap.tick_params(axis=\"y\", labelcolor=\"orange\")\n ax_trap.set_ylim(0, self.trap.max() * 1.1)\n ax_trap.set_ylabel(\"Trap Potential\", fontsize=16)\n if show_condensate:\n condensate_orb = abs(self.natural_orbitals()[0]) ** 2\n ax.plot(self.grid, condensate_orb, label=\"Condensate density\")\n ax.legend()\n plt.show()\n\n def plot_momentum_density(self, kmin=-10, kmax=10, bound=0, gf=7):\n k, den = self.momentum_density(kmin, kmax, bound, gf)\n plt.plot(k, den, color=\"black\")\n ax = plt.gca()\n ax.set_xlim(k.min(), k.max())\n ax.set_ylim(0, den.max() * 1.1)\n ax.set_ylabel(\"Momentum distribution\", fontsize=16)\n ax.set_xlabel(\"Fourier frequency\", fontsize=16)\n plt.show()\n\n def __data_image_view(self, data, ext, cmap):\n \"\"\"General routine to plot 2d data within matplotlib imshow\"\"\"\n fig = plt.figure(figsize=(9, 7))\n ax = fig.add_subplot()\n im = ax.imshow(\n abs(data) ** 2,\n extent=ext,\n origin=\"lower\",\n aspect=\"equal\",\n cmap=cmap,\n )\n fig.colorbar(im, ax=ax)\n plt.show()\n\n def imshow_position_abs_rdm(self):\n \"\"\"Absolute square value of `self.position_rdm` mapped to colors\"\"\"\n obcorr = self.position_rdm()\n self.__data_image_view(\n abs(obcorr) ** 2,\n [self.xi, self.xf, self.xi, self.xf],\n self.colormap,\n )\n\n def imshow_momentum_abs_rdm(self, kmin=-10, kmax=10, bound=0, gf=7):\n \"\"\"Absolute square value of `self.momentum_rdm` mapped to colors\"\"\"\n k, obcorr = self.momentum_rdm(kmin, kmax, bound, gf)\n im_min = k.min()\n im_max = k.max()\n self.__data_image_view(\n abs(obcorr) ** 2, [im_min, im_max, im_min, im_max], self.colormap\n )\n\n def imshow_position_onebody_correlation(self):\n \"\"\"Display `self.position_onebody_correlation` mapped to colors\"\"\"\n obcorr = self.position_onebody_correlation()\n self.__data_image_view(\n obcorr,\n [self.xi, self.xf, self.xi, self.xf],\n self.colormap,\n )\n\n def imshow_momentum_onebody_correlation(\n self, kmin=-10, kmax=10, bound=0, gf=7\n ):\n \"\"\"Display `self.momentum_onebody_correlation` mapped to colors\"\"\"\n k, obcorr = self.momentum_onebody_correlation(kmin, kmax, bound, gf)\n im_min = k.min()\n im_max = k.max()\n self.__data_image_view(\n obcorr, [im_min, im_max, im_min, im_max], self.colormap\n )\n\n def imshow_position_mutual_probability(self):\n \"\"\"Display ``self.position_mutual_probability`` mapped to colors\"\"\"\n mutprob = self.position_mutual_probability()\n self.__data_image_view(\n mutprob,\n [self.xi, self.xf, self.xi, self.xf],\n self.colormap,\n )\n\n def imshow_momentum_mutual_probability(\n self, kmin=-10, kmax=10, bound=0, gf=7\n ):\n \"\"\"Display ``self.momentum_mutual_probability`` mapped to colors\"\"\"\n k, mutprob = self.momentum_mutual_probability(kmin, kmax, bound, gf)\n im_min = k.min()\n im_max = k.max()\n self.__data_image_view(\n mutprob, [im_min, im_max, im_min, im_max], self.colormap\n )\n\n def imshow_position_twobody_correlation(self):\n \"\"\"Display ``self.position_twobody_correlation`` mapped to colors\"\"\"\n tbcorr = self.position_twobody_correlation()\n self.__data_image_view(\n tbcorr,\n [self.xi, self.xf, self.xi, self.xf],\n self.colormap,\n )\n\n def imshow_momentum_twobody_correlation(\n self, kmin=-10, kmax=10, bound=0, gf=7\n ):\n \"\"\"Display ``self.momentum_twobody_correlation`` mapped to colors\"\"\"\n k, tbcorr = self.momentum_twobody_correlation(kmin, kmax, bound, gf)\n im_min = k.min()\n im_max = k.max()\n self.__data_image_view(\n tbcorr, [im_min, im_max, im_min, im_max], self.colormap\n )\n","repo_name":"andriati-alex/1d-mctdhb","sub_path":"multiconfpy/stationary.py","file_name":"stationary.py","file_ext":"py","file_size_in_byte":13028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14726272552","text":"import requests\r\nimport re\r\n\r\ndef getHTMLGet(url):\r\n\ttry:\r\n\t\tr = requests.get(url, timeout = 30)\r\n\t\tr.raise_for_status()\r\n\t\tr.encoding = r.apparent_encoding\r\n\t\treturn r.text\r\n\texcept:\r\n\t\treturn \"\"\r\n\r\ndef parsePage(infoList, html):\r\n\ttry:\r\n\t\tplt = re.findall(r'\"view_prince\":\"[\\d.]*\"',html)\r\n\t\ttlt = re.findall(r'\"rew_title\":\".*?\"',html)\r\n\t\tfor i in range(len(plt)):\r\n\t\t\tprice = eval(plt[i].split(':')[1])\r\n\t\t\ttitle = eval(tlt[i].split(':')[1])\r\n\t\t\tinfoList.append([price, title])\r\n\texcept:\r\n\t\tprice(\"\")\r\n\r\ndef printGoodsList(infoList):\r\n\ttplt = \"{8}\\t{:8}\\t{:32}\"\r\n\tprint(tplt.format(\"序号\", \"价格\", \"商品名称\"))\r\n\tcount = 0\r\n\tfor g in infoList:\r\n\t\tcount = count + 1\r\n\t\tprint(tplt.format(count, g[0], g[1]))\r\n\r\ndef main():\r\n\tgoods = '书包'\r\n\tdepth = 2\r\n\tstart_url = 'https://s.taobao.com/search?q=' +goods\r\n\tinfoList = []\r\n\tfor i in range(depth):\r\n\t\ttry:\r\n\t\t\turl = start_url + '&s=' + str(44*i)\r\n\t\t\thtml = getHTMLGet(url)\r\n\t\t\tparsePage(infoList, html)\r\n\t\texcept:\r\n\t\t\tcontinue\r\n\tprintGoodsList(infoList)\r\n\r\nmain()","repo_name":"maoyouyu/Oldyouyu","sub_path":"pachong/taobao.py","file_name":"taobao.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12030166251","text":"\"\"\"\nFeatureListNamespaceService class\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any, Optional\n\nfrom bson import ObjectId\n\nfrom featurebyte.models.feature_list import FeatureListNamespaceModel\nfrom featurebyte.persistent import Persistent\nfrom featurebyte.routes.catalog.catalog_name_injector import CatalogNameInjector\nfrom featurebyte.schema.feature_list_namespace import FeatureListNamespaceServiceUpdate\nfrom featurebyte.schema.info import (\n EntityBriefInfoList,\n FeatureListNamespaceInfo,\n TableBriefInfoList,\n)\nfrom featurebyte.service.base_document import BaseDocumentService\nfrom featurebyte.service.entity import EntityService, get_primary_entity_from_entities\nfrom featurebyte.service.feature_namespace import FeatureNamespaceService\nfrom featurebyte.service.table import TableService\n\n\nclass FeatureListNamespaceService(\n BaseDocumentService[\n FeatureListNamespaceModel, FeatureListNamespaceModel, FeatureListNamespaceServiceUpdate\n ],\n):\n \"\"\"\n FeatureListNamespaceService class\n \"\"\"\n\n document_class = FeatureListNamespaceModel\n\n def __init__(\n self,\n user: Any,\n persistent: Persistent,\n catalog_id: Optional[ObjectId],\n entity_service: EntityService,\n table_service: TableService,\n feature_namespace_service: FeatureNamespaceService,\n catalog_name_injector: CatalogNameInjector,\n ):\n super().__init__(user, persistent, catalog_id)\n self.entity_service = entity_service\n self.table_service = table_service\n self.feature_namespace_service = feature_namespace_service\n self.catalog_name_injector = catalog_name_injector\n\n async def get_feature_list_namespace_info(\n self, document_id: ObjectId, verbose: bool\n ) -> FeatureListNamespaceInfo:\n \"\"\"\n Get feature list namespace info\n\n Parameters\n ----------\n document_id: ObjectId\n Document ID\n verbose: bool\n Verbose or not\n\n Returns\n -------\n FeatureListNamespaceInfo\n \"\"\"\n _ = verbose\n namespace = await self.get_document(document_id=document_id)\n entities = await self.entity_service.list_documents_as_dict(\n page=1, page_size=0, query_filter={\"_id\": {\"$in\": namespace.entity_ids}}\n )\n primary_entity = get_primary_entity_from_entities(entities)\n\n tables = await self.table_service.list_documents_as_dict(\n page=1, page_size=0, query_filter={\"_id\": {\"$in\": namespace.table_ids}}\n )\n\n # get catalog info\n catalog_name, updated_docs = await self.catalog_name_injector.add_name(\n namespace.catalog_id, [entities, tables]\n )\n entities, tables = updated_docs\n\n # get default feature ids\n feat_namespace_to_default_id = {}\n async for feat_namespace in self.feature_namespace_service.list_documents_as_dict_iterator(\n query_filter={\"_id\": {\"$in\": namespace.feature_namespace_ids}}\n ):\n feat_namespace_to_default_id[feat_namespace[\"_id\"]] = feat_namespace[\n \"default_feature_id\"\n ]\n\n return FeatureListNamespaceInfo(\n name=namespace.name,\n created_at=namespace.created_at,\n updated_at=namespace.updated_at,\n entities=EntityBriefInfoList.from_paginated_data(entities),\n primary_entity=EntityBriefInfoList.from_paginated_data(primary_entity),\n tables=TableBriefInfoList.from_paginated_data(tables),\n default_version_mode=namespace.default_version_mode,\n default_feature_list_id=namespace.default_feature_list_id,\n dtype_distribution=namespace.dtype_distribution,\n version_count=len(namespace.feature_list_ids),\n feature_count=len(namespace.feature_namespace_ids),\n status=namespace.status,\n catalog_name=catalog_name,\n feature_namespace_ids=namespace.feature_namespace_ids,\n default_feature_ids=[\n feat_namespace_to_default_id[feat_namespace_id]\n for feat_namespace_id in namespace.feature_namespace_ids\n ],\n description=namespace.description,\n )\n","repo_name":"featurebyte/featurebyte","sub_path":"featurebyte/service/feature_list_namespace.py","file_name":"feature_list_namespace.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"78"} +{"seq_id":"19138539664","text":"# ch05/example6.py\n\nimport threading\nimport requests\nimport time\n\nUPDATE_INTERVAL = 0.01\n\nclass MyThread(threading.Thread):\n def __init__(self, url):\n threading.Thread.__init__(self)\n self.url = url\n self.result = f'{self.url}: Custom timeout'\n\n def run(self):\n res = requests.get(self.url)\n self.result = f'{self.url}: {res.text}'\n\ndef process_requests(threads, timeout=5):\n def alive_count():\n alive = [1 if thread.isAlive() else 0 for thread in threads]\n return sum(alive)\n\n while alive_count() > 0 and timeout > 0:\n timeout -= UPDATE_INTERVAL\n time.sleep(UPDATE_INTERVAL)\n for thread in threads:\n print(thread.result)\n\nurls = [\n 'http://httpstat.us/200',\n 'http://httpstat.us/200?sleep=4000',\n 'http://httpstat.us/200?sleep=20000',\n 'http://httpstat.us/400'\n]\n\nstart = time.time()\n\nthreads = [MyThread(url) for url in urls]\nfor thread in threads:\n thread.setDaemon(True)\n thread.start()\nprocess_requests(threads)\n\nprint(f'Took {time.time() - start : .2f} seconds')\n\nprint('Done.')\n\n\n'''\ndef alive_count():\n alive = [1 if thread.isAlive() else 0 for thread in threads]\n return sum(alive)\n\nwhile alive_count() > 0 and timeout > 0:\n timeout -= UPDATE_INTERVAL\n time.sleep(UPDATE_INTERVAL)\n\nafter about 5 seconds timeout wil be < 0, because of the \nthread.setDaemon(True)\nWithout daemon threads, you'd have to keep track of threads, and tell them to exit, before your program can completely quit. \n By setting them as daemon threads, you can let them run and forget about them, and when your program quits, any daemon \n threads are killed automatically.\n\nhttp://httpstat.us/200: 200 OK\nhttp://httpstat.us/200?sleep=4000: 200 OK\nhttp://httpstat.us/200?sleep=20000: Custom timeout\nhttp://httpstat.us/400: 400 Bad Request\nTook 5.87 seconds\n\n'''","repo_name":"wayne676/Python_Concurrency_Notes","sub_path":"page100_timeout_requestpy.py","file_name":"page100_timeout_requestpy.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74390345853","text":"import os\nimport time\n\nimport numpy as np\nimport torch\nimport torchvision.datasets\nimport torchvision.transforms as transforms\nfrom torch.nn import DataParallel\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils import data\nfrom torchvision.models import resnet50\n\nimport settings\nfrom arcface import Arcface\n\nfrom focal_loss import FocalLoss\n\n\ndef save_model(model, save_path, name, iter_cnt):\n save_name = os.path.join(save_path, name + '_' + str(iter_cnt) + '.pth')\n torch.save(model.state_dict(), save_name)\n return save_name\n\n\nclass NormalizeImage:\n def __call__(self, pic):\n # Our processing step is as follows\n # Each pixel is normalized by subtracting 127.5 and dividing by 128 after\n pic_as_array = np.array(pic)\n pic_as_array = (pic_as_array - 127.5) / 128.0\n return pic_as_array\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nimage_transforms = transforms.Compose([\n NormalizeImage(),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.ConvertImageDtype(dtype=torch.float32)\n])\n\nimage_dataset = torchvision.datasets.ImageFolder(settings.image_dataset_dir, transform=image_transforms)\ntrain_loader = data.DataLoader(image_dataset, shuffle=True)\n\nnum_of_classes = len(train_loader.dataset.class_to_idx)\ncriterion = torch.nn.CrossEntropyLoss() if settings.loss_function == \"softmax\" \\\n else FocalLoss(gamma=2)\nsoftmax = torch.nn.Softmax()\nmodel = resnet50(pretrained=False)\nif settings.loss_function == \"softmax\":\n model.fc = torch.nn.Linear(in_features=2048, out_features=num_of_classes)\n optimizer = torch.optim.SGD([{'params': model.parameters()},\n ],\n lr=1e-3, weight_decay=5e-4)\n model.to(device)\n model = DataParallel(model)\nelse:\n model.fc = torch.nn.Linear(in_features=2048, out_features=512)\n metric_fc = Arcface(512, num_of_classes=num_of_classes)\n optimizer = torch.optim.SGD([{'params': model.parameters()},\n {'params': metric_fc.parameters()}\n ],\n lr=1e-3,\n weight_decay=5e-4,\n momentum=0.9)\n model.to(device)\n model = DataParallel(model)\n metric_fc.to(device)\n metric_fc = DataParallel(metric_fc)\n\nscheduler = StepLR(optimizer, step_size=10, gamma=0.1)\ncorrect_guesses = torch.tensor([], device=device, dtype=torch.bool)\n\nfor i in range(settings.max_epoch):\n\n correct_guesses = torch.tensor([], device=device, dtype=torch.bool)\n correct_guesses.to(device)\n model.train()\n for ii, (image_data, image_label_id) in enumerate(train_loader):\n optimizer.zero_grad()\n image_data, image_label_id = image_data.to(device), image_label_id.to(device)\n feature = model(image_data)\n # output = feature\n\n output = feature if settings.loss_function == \"softmax\" else softmax(metric_fc(feature, image_label_id))\n correct_guesses = torch.cat([correct_guesses, output.argmax() == image_label_id])\n correct_guesses.to(device)\n loss = criterion(output, image_label_id)\n loss.backward()\n optimizer.step()\n\n iters = i * len(train_loader) + ii\n acc = correct_guesses.sum() / len(correct_guesses)\n time_str = time.asctime(time.localtime(time.time()))\n print('{} train epoch {} iter {} iters/s loss {} acc {}'.format(time_str, i, ii, loss.item(), acc))\n\n scheduler.step()\n model.eval()\n\n save_model(model, settings.models_dir,\n f\"resnet50-{settings.loss_function}\", i)\n","repo_name":"yazeed44/arcface-pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23298172811","text":"#!/usr/local/bin/python3\nfrom random import randrange\nfrom sympy import isprime\nfrom os.path import dirname, realpath, join\nfrom json import load\nfrom signal import SIGINT, signal\nimport sys\n\nclass EllipticCurve:\n\n\tdef __init__(self, field_order, params):\n\t\tself.a, self.b = params\n\t\tself.M = field_order\n\n\tdef set_point(self, xy):\n\t\tif not (xy[1]**2) % self.M == (xy[0]**3 + self.a*xy[0] + self.b) % self.M:\n\t\t\traise Exception(f'No such point {xy} lies on the curve.')\n\t\treturn Point(xy, self)\n\nclass Point:\n\n\tdef __init__(self, xy, curve):\n\t\tself.x, self.y = xy\n\t\tself.curve = curve\n\n\tdef extended_euclid(self, a, b):\n\t\tif not a:\n\t\t\treturn (0, 1, b)\n\t\tp, q, r = self.extended_euclid(b % a, a)\n\t\tq = q - (b//a)*p\n\t\treturn (q, p, r)\n\n\tdef inverse_mod(self, a, m):\n\t\tp, q, r = self.extended_euclid(a, m)\n\t\tif not r == 1:\n\t\t\traise Exception(f'Modular inverse of {a} mod {m} doesn\\'t exist.')\n\t\treturn p % m\n\n\tdef double(self):\n\t\tslope = (3*(self.x**2) + self.curve.a) % self.curve.M\n\t\tslope *= self.inverse_mod(2*self.y, self.curve.M)\n\t\tslope %= self.curve.M\n\t\t_x = (slope**2 - 2*self.x) % self.curve.M\n\t\t_y = (-self.y - slope*(_x - self.x)) % self.curve.M\n\t\treturn Point((_x, _y), self.curve)\n\n\tdef __add__(self, P):\n\t\ttry:\n\t\t\tassert self.curve.M == P.curve.M\n\t\t\tassert self.curve.a == P.curve.a\n\t\t\tassert self.curve.b == P.curve.b\n\t\t\tif (self.x, self.y) == (P.x, P.y):\n\t\t\t\treturn P.double()\n\t\t\tsign = 1 if P.x < self.x else -1\n\t\t\tslope = (sign*(self.y - P.y)) % self.curve.M\n\t\t\tslope *= self.inverse_mod(sign*(self.x - P.x), self.curve.M)\n\t\t\tslope %= self.curve.M\n\t\t\t_x = (slope**2 - self.x - P.x) % self.curve.M\n\t\t\t_y = (-self.y - slope*(_x - self.x)) % self.curve.M\n\t\t\treturn Point((_x, _y), self.curve)\n\t\texcept AssertionError:\n\t\t\traise Exception('Addition is defined only for points lying on the same curve.')\n\n\tdef multiply(self, n):\n\t\tresult = None\n\t\taddend = self\n\t\twhile n:\n\t\t\tif n & 1:\n\t\t\t\tif not result:\n\t\t\t\t\tresult = addend\n\t\t\t\telse:\n\t\t\t\t\tresult += addend\n\t\t\tn = n >> 1\n\t\t\taddend = addend.double()\n\t\treturn result\n\n\tdef xy(self):\n\t\treturn (self.x, self.y)\n\n\nclass Challenge:\n\n\tdef __init__(self, field_order, params, generator, public_key, url):\n\t\tself.curve = EllipticCurve(field_order, params)\n\t\tself.G = self.curve.set_point(generator)\n\t\tself.P = self.curve.set_point(public_key)\n\t\tself.url = url\n\n\tdef random_prime(self, digits):\n\t\tlower = 10**(digits - 1)\n\t\tupper = 10*lower\n\t\tprime = randrange(lower, upper)\n\t\twhile not isprime(prime):\n\t\t\tprime = randrange(lower, upper)\n\t\treturn prime\n\n\tdef ask_questions(self, sequence):\n\t\tcount = 0\n\t\tfor bit in sequence:\n\t\t\tfactor, correct = (randrange(2, 10), 'n') if bit == '1' else (1, 'y')\n\t\t\tprime = factor*self.random_prime(38)\n\t\t\tresponse = input(f\"{prime}: \")\n\t\t\tif not response.lower() == correct:\n\t\t\t\tbreak\n\t\t\tcount += 1\n\t\treturn count == len(sequence)\n\n\tdef welcome(self):\n\t\tprint(\"*\"*50)\n\t\tprint(\"Sorry we forgot to tell you curve parameters :(\")\n\t\tprint(\"Well, now is an opportunity.\")\n\t\tprint(\"Just answer the following questions and you shall get what you seek.\")\n\t\tprint(\"Are the given numbers prime? (y/n)\")\n\t\tprint(\"*\"*50)\n\n\tdef endgame(self):\n\t\tprint(\"LOL did you seriously think we would give up the flag that easy?\")\n\t\tprint(f\"Hurry up {self.url}\")\n\n\tdef run(self):\n\t\tself.welcome()\n\t\tsequence = ''.join('{:08b}'.format(ord(ch)) for ch in f'Here you go, a = {self.curve.a}')\n\t\ttry:\n\t\t\tassert self.ask_questions(sequence)\n\t\t\tanswer = int(input(f'SECRET KEY (hex): '), 16)\n\t\t\tassert self.P.xy() == self.G.multiply(answer).xy()\n\t\t\tself.endgame()\n\t\texcept:\n\t\t\tprint(\"OOPS! Keep trying ...\")\n\ndef handler(sig, frame):\n\tsys.exit(0)\n\ndef main():\n\tcwd = dirname(realpath(__file__))\n\tconfig_file = join(cwd, 'config.json')\n\tdata = load(open(config_file, 'r'))\n\tdata = [data[i] for i in data.keys()]\n\n\tsignal(SIGINT, handler)\n\tChallenge(*data).run()\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"Jayashrri/PCTF21","sub_path":"Cryptography/He'll love me ... or not/Docker/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"72351488253","text":"# Implementations of the following several fields regarding Performance Metrics\n# 1. Confusion Matrix\n# 2. Calculate Precision, Recall and F1\n# 3. Plot ROC\n\nimport sys\nsys.path.append('../Utilities/')\nimport readData as RD\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score, f1_score\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import accuracy_score\n\nimport matplotlib.pyplot as plt\nfrom scipy import interp\nimport numpy as np\n\nprint('Implementation of Performace Metrics related fields')\nprint('1. Read WDBC data using Utility API')\nDataMap = RD.readDataFromWDBC()\nX_train = DataMap['X_train']\ny_train = DataMap['y_train']\nX_test = DataMap['X_test']\ny_test = DataMap['y_test']\n\nprint('\\n2. Calculate and plot the confusion matrix')\npipe_svc = Pipeline([('scl', StandardScaler()),\n\t\t\t\t\t ('clf', SVC(random_state = 1))])\npipe_svc.fit(X_train, y_train)\ny_pred = pipe_svc.predict(X_test)\nconfmat = confusion_matrix(y_true = y_test, y_pred = y_pred)\nprint('After calculation, Confusion Matrix is')\nprint(confmat)\nprint('Then, we plot Confusion Matrix in the following graph')\nfig, ax = plt.subplots(figsize = (2.5, 2.5))\nax.matshow(confmat, cmap = plt.cm.Blues, alpha = 0.3)\n\nfor i in range(confmat.shape[0]):\n\tfor j in range(confmat.shape[1]):\n\t\tax.text(x = j, y = i,\n\t\t\t s = confmat[i, j],\n\t\t\t va = 'center',\n\t\t\t ha = 'center')\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.show()\n\nprint('\\n3. Precision, Recall and F1')\nprint('Precision of the Test Samples is %.3f' % \n\t precision_score(y_true = y_test, y_pred = y_pred))\nprint('Recall of the Recall Samples is %.3f' % \n\t recall_score(y_true = y_test, y_pred = y_pred))\nprint('F1 Score of the Recall Samples is %.3f' % \n\t f1_score(y_true = y_test, y_pred = y_pred))\n\nprint('\\n4. Plot ROC Curve')\npipe_lr = Pipeline([('scl', StandardScaler()),\n\t ('pca', PCA(n_components = 2)),\n\t ('clf', LogisticRegression(penalty = 'l2',\n\t \t\t\t\t\t\t random_state = 0,\n\t \t\t\t\t\t\t C = 100.0))])\nprint('After establish the pipeline, we only take two features \\\nfrom the Breast Cancer Dataset')\nX_train2 = X_train[:, [4, 14]]\ncv = StratifiedKFold(y_train, n_folds = 3, random_state = 1)\nfig = plt.figure(figsize = (7, 5))\nmean_tpr = 0.0\nmean_fpr = np.linspace(0, 1, 100)\nall_tpr = []\n\nprint('Now go through each CV Dataset to establish the ROC based FPR \\\nand TPR of each')\nfor i, (train, test) in enumerate(cv):\n\tprint('Begin to Process CV Class {0}.'.format(i))\n\tprobas = pipe_lr.fit(X_train2[train], y_train[train]).predict_proba(\n\t\tX_train2[test])\n\t\t# The return can be interpreted as (# of samples) * (# of classes)\n\t\t# Each matrix element can is the possibility of Sample_i's probability\n\t\t# as Class j\n\tfpr, tpr, threshold = roc_curve(y_train[test],\n\t\t\t\t\t\t\t\t\tprobas[:, 1], \n\t\t\t\t\t\t\t\t\tpos_label = 1)\n\t\t# For each i in all three arrays, fpr[i] and tpr[i] are the corresponding\n\t\t# value when score > threshold[i]\n\tmean_tpr += interp(mean_fpr, fpr, tpr)\n\tmean_tpr[0] = 0.0\n\troc_auc = auc(fpr, tpr)\n\tprint('CV Class {0} has ROC_AUC_score = {1}\\n'.format(i+1, round(roc_auc, 3)))\n\tplt.plot(fpr, tpr, lw = 1,\n\t\t\t label = 'ROC fold {0} (area = {1})'.format(i+1, round(roc_auc, 3)))\n\n# Plot the random guess line, which is a straight line from (0,0) to (1,1)\nplt.plot([0, 1],\n\t\t [0, 1],\n\t\t linestyle = '--',\n\t\t color = (0.6, 0.6, 0.6),\n\t\t label = 'random guessing')\n\n# Plot the mean ROC line and compute its AUC_score\nmean_tpr /= len(cv)\nmean_tpr[-1] = 1.0 # Mark the last item in the array as 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nplt.plot(mean_fpr, mean_tpr, 'k--',\n\t\t label = 'Mean ROC (area = {0})'.format(round(mean_auc, 3)),\n\t\t lw = 2)\n\n# Plot the perfect performance\nplt.plot([0, 0, 1],\n\t [0, 1, 1],\n\t lw = 2,\n\t linestyle = ':',\n\t color = 'black',\n\t label = 'Perfect Performance')\n\nplt.xlim([-0.05, 1.05])\nplt.ylim([-0.05, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver Operator Characteristic')\n\nplt.legend(loc = 'lower right')\nplt.show()\n\nprint('\\n5. Run against Test Data')\npipe_lr = pipe_lr.fit(X_train2, y_train)\ny_pred2 = pipe_lr.predict(X_test[:, [4, 14]])\nprint('ROC AUC score = {0}'.format(round(roc_auc_score(\n\t\ty_true = y_test, y_score = y_pred2), 3)))\n\nprint('Accuracy score = {0}'.format(round(accuracy_score(\n\t\ty_true = y_test, y_pred = y_pred2), 3)))\n","repo_name":"yuyue730/PythonMachineLearning","sub_path":"ModelEvaluationAndHyperparameterTuning/PerformanceMetricsROC.py","file_name":"PerformanceMetricsROC.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75045908733","text":"# Client Script\nimport socket\n\nHEADER = 64\nFORMAT = 'utf-8'\nDISCONNECT_MESSAGE = \"!DISCONNECT\"\nPORT = 6060\nSERVER = \"192.168.43.4\"\nADDR = (SERVER, PORT)\nrecieved = ''\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect(ADDR)\n\ndef send_server(msg):\n message = msg.encode(FORMAT)\n msg_length = len(message)\n send_length = str(msg_length).encode(FORMAT)\n send_length += b' ' * (HEADER - len(send_length))\n client.send(send_length)\n client.send(message)\n global recieved\n recieved = client.recv(2048)\n return recieved\n","repo_name":"Jay-Mehta-13/Chat","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28657311866","text":"from fastapi import FastAPI\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.middleware.cors import CORSMiddleware\n\n# Нужен для разворачивания БД.\n# т.к. в нём хранится информация\n# о структуре всех таблиц\nfrom .config.db import Base\n\n# Движок для подключения к БД\nfrom .config.db import engine\n\n# Подключаем роутеры\nfrom .routers import works, authors, types\n\napp = FastAPI(\n title='College Portfolio Api',\n docs_url='/documentation',\n redoc_url=None\n)\n\napp.mount(\"/static\", StaticFiles(directory=\"app/static\"), name=\"static\")\n\norigins = [\n 'http://localhost',\n 'http://localhost:3000'\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*']\n)\n\n# Создаём в базе данных таблицы по моделям.\n# Если таблицы у нас уже есть, то ничего не произойдёт\nBase.metadata.create_all(bind=engine)\n\n# Добавляем подключённый роутер в обьект app\napp.include_router(works.router)\napp.include_router(authors.router)\napp.include_router(types.router)\n","repo_name":"Aqua-Lively/college-porfolio-backend","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29692477663","text":"# ========== EASY =========\n# Link: https://leetcode.com/problems/find-the-highest-altitude/\n\nclass Solution:\n def largestAltitude(self, gain: List[int]) -> int:\n gain.insert(0, 0)\n relative_height = 0\n max_val = 0\n for i in range(len(gain) - 1):\n relative_height += gain[i + 1]\n max_val = max(relative_height, max_val)\n return max_val","repo_name":"AryanK1511/Data-Structures-and-Algorithms-in-Python","sub_path":"LeetCode_Problems/1-Easy_Problems/highestAltitude.py","file_name":"highestAltitude.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"373895837","text":"n, c = map(int, input().split())\nm = int(input())\n\nl = []\nfor i in range(0, m) :\n l.append(list(map(int, input().split())))\n\nl.sort(key=lambda x:x[1]) # 배달 도착시간이 빠른 순으로 정렬\n\ncount = 0\nb = [c]*n # 마을 당 받아들일 수 있는 박스 수\nfor i in range(0, m) :\n m = c \n # 목적지까지 가는동안 지나가는 마을중 가장 적게 가져갈 수 있는 택배를 선택\n for j in range(l[i][0]-1, l[i][1]-1) :\n m = min(m, b[j])\n # 가장 적은 택배를 실어야하는 박스보다 작으면 가능, 크다면 불가능\n m = min(m, l[i][2])\n # 목적지 까지 가는동안 지나가는 마을에 택배가 줄���듬\n for j in range(l[i][0]-1, l[i][1]-1) :\n b[j] -= m\n count += m\n\nprint(count)\n\n","repo_name":"jinaur/python","sub_path":"8980택배.py","file_name":"8980택배.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41554124120","text":"from argparse import RawTextHelpFormatter\nfrom fa import utils\n\n\nDESCRIPTION = '''adds a python-range to resultset\n\nEXAMPLE:\n result = [0, 0x200]\n -> add-offset-range 0 4 8\n result = [0, 4, 8, 0x200, 0x204, 0x208]\n'''\n\n\ndef get_parser():\n p = utils.ArgumentParserNoExit('add-offset-range',\n description=DESCRIPTION,\n formatter_class=RawTextHelpFormatter)\n p.add_argument('start')\n p.add_argument('end')\n p.add_argument('step')\n return p\n\n\ndef add_offset_range(addresses, start, end, step):\n for ea in addresses:\n for i in range(start, end, step):\n yield ea + i\n\n\ndef run(segments, args, addresses, interpreter=None, **kwargs):\n gen = add_offset_range(addresses, eval(args.start), eval(args.end),\n eval(args.step))\n return list(gen)\n","repo_name":"doronz88/fa","sub_path":"fa/commands/add_offset_range.py","file_name":"add_offset_range.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"78"} +{"seq_id":"19064739453","text":"import os\nfrom os import getenv\nfrom dotenv import load_dotenv\n\nload_dotenv() # environment\n\nAPI_ID = int(getenv(\"API_ID\"))\nAPI_HASH = getenv(\"API_HASH\")\nBOT_TOKEN = getenv(\"BOT_TOKEN\")\nSESSION_NAME = getenv(\"SESSION_NAME\", \"session\")\n\nASS_ID = int(getenv(\"ASS_ID\"))\nMONGO_DB_URI = getenv(\"MONGO_DB_URI\")\nLOG_GROUP_ID = int(getenv(\"LOG_GROUP_ID\"))\nDURATION_LIMIT = int(getenv(\"DURATION_LIMIT\", \"54000\"))\nOWNER_ID = list(map(int, getenv(\"OWNER_ID\", \"1757169682\").split()))\nCOMMAND_PREFIXES = list(getenv(\"COMMAND_PREFIXES\", \"/ ! . $\").split())\nSUDO_USERS = list(map(int, getenv(\"SUDO_USERS\", \"1757169682\").split()))\n","repo_name":"maxsupun/Music-Yukki","sub_path":"Yukki/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"4099268840","text":"#!/usr/bin/python -t\n\n# linked list\n\n\"\"\"\nDefinition of ListNode\n\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param head: n\n @return: The new head of reversed linked list.\n \"\"\"\n def reverse(self, head):\n # write your code here\n if not head or not head.next:\n return head\n \n prev = None\n \n while head:\n n = head.next\n head.next = prev\n prev = head\n head = n\n \n return prev\n \n","repo_name":"boknowswiki/mytraning","sub_path":"lintcode/python/0035_reverse_linked_list.py","file_name":"0035_reverse_linked_list.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"34672818853","text":"\nimport matplotlib.pyplot as plt\n\ntitulo = \"Scatterplot: gráfico de dispersão\"\n# titulo = \"Gráfico de barras 2\"\neixox= \"Eixo X\"\neixoy = \"Eixo Y\"\n\nx1 = [1, 3, 5, 7, 9]\ny1 = [2, 3, 7, 1, 0]\n\nx2 = [2, 4, 6, 8, 10]\ny2 = [5, 1, 3, 7, 4]\nz = [200, 25, 400, 3300, 100]\n\n# Legenda\nplt.title(titulo)\nplt.xlabel(eixox)\nplt.ylabel(eixoy)\n\n# plt.bar(x1, y1, label = \"Grupo 1\")\n# plt.bar(x2, y2, label = \"Grupo 2\")\n# plt.legend()\n\nplt.scatter(x1, y1, label = \"Meus pontos\", \n color = \"#000000\", marker=\"h\", s = z)\nplt.plot(x1, y1, color=\"k\", linestyle = \"--\")\nplt.legend()\n\n#plt.show()\nplt.savefig(\"figura1.png\", dpi=300)\n\n'''\nMatplotlib - documentação\nDocumentação oficial do Matplotlib\nA seguir, alguns exemplos de argumentos que podem ser aplicados ao método plot( ).\n\n\n\ncolor: cor (ver exemplos abaixo)\n\nlabel: rótulo\n\nlinestyle: estilo de linha (ver exemplos abaixo)\n\nlinewidth: largura da linha\n\nmarker: marcador (ver exemplos abaixo)\n\n\n\nCORES (color)\n'b' blue\n\n'g' green\n\n'r' red\n\n'c' cyan\n\n'm' magenta\n\n'y' yellow\n\n'k' black\n\n'w' white\n\n\n\nMarcadores (marker)\n'.' point marker\n\n',' pixel marker\n\n'o' circle marker\n\n'v' triangle_down marker\n\n'^' triangle_up marker\n\n'<' triangle_left marker\n\n'>' triangle_right marker\n\n'1' tri_down marker\n\n'2' tri_up marker\n\n'3' tri_left marker\n\n'4' tri_right marker\n\n's' square marker\n\n'p' pentagon marker\n\n'*' star marker\n\n'h' hexagon1 marker\n\n'H' hexagon2 marker\n\n'+' plus marker\n\n'x' x marker\n\n'D' diamond marker\n\n'd' thin_diamond marker\n\n'|' vline marker\n\n'_' hline marker\n'''\n","repo_name":"IgorPereira1997/Python-SQL-Basics","sub_path":"banco_de_dados/grafico_linhas.py","file_name":"grafico_linhas.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24232633016","text":"# 导入所需的模块和类\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Question, UserQuiz\nfrom accounts.models import UserProfile\nfrom .forms import AnswerForm\nfrom django.db.models import Q\n\n# 定义视图函数\n@login_required\ndef hello_view(request):\n return render(request, 'myapp/hello.html') # 返回 hello.html 模板\n\n@login_required\ndef quiz_already_answered(request, question_id=None):\n user = request.user\n\n # 查找下一个未回答的问题\n unanswered_questions = Question.objects.exclude(\n userquiz__user=user, userquiz__user_answer__isnull=False)\n next_question = unanswered_questions.first()\n\n if question_id:\n # 如果提供了 question_id 参数,重定向到 take_quiz 视图\n return redirect('take_quiz', question_id=question_id)\n\n if next_question:\n # 检查下一个问题的索引是否在未回答的问题范围内\n if next_question.id <= unanswered_questions.count():\n # 如果有下一个问题可用,重定向到下一个问题\n return redirect('take_quiz', question_id=next_question.pk)\n\n return render(request, 'quiz_app/quiz_already_answered.html')\n\n@login_required\ndef take_quiz(request, question_id):\n user = request.user\n user_profile = UserProfile.objects.get(user=user)\n question = Question.objects.get(pk=question_id)\n\n try:\n user_quiz = UserQuiz.objects.get(user=user, question=question)\n except UserQuiz.DoesNotExist:\n user_quiz = UserQuiz(user=user, question=question)\n\n form = AnswerForm()\n\n if request.method == \"POST\":\n form = AnswerForm(request.POST)\n if form.is_valid():\n user_answer = form.cleaned_data['user_answer']\n if user_answer == question.correct_option:\n user_profile.cp += 1\n user_profile.save()\n\n user_quiz.user_answer = user_answer\n user_quiz.save()\n\n # 检查下一个未回答的问题\n unanswered_questions = Question.objects.exclude(\n userquiz__user=user, userquiz__user_answer__isnull=False)\n next_question = unanswered_questions.first()\n\n if next_question:\n # 重定向到下一个问题,并将question_id作为查询参数传递\n return redirect('take_quiz', question_id=next_question.pk)\n else:\n # 如果没有下一个问题,重定向到quiz_already_answered视图\n return redirect('quiz_already_answered')\n\n return render(request, 'quiz_app/quiz_question.html', {\n 'question': question,\n 'form': form,\n 'user_quiz': user_quiz\n })","repo_name":"thinkchild/Django_SDGs1026","sub_path":"quiz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30115146603","text":"import sys # for argument parsing\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport PIL\n\nimport os.path as path\nimport os\n\n\ndef read_graph_with_preprocessing():\n\n\n\twith open('homework_graph.uml','r') as f:\n\t\tpreproc_txt = f.read();\n\n\tpreproc_txt = preproc_txt.replace('\"','')\n\n\twith open('homework_graph_preprocessed.uml','w') as f:\n\t\tf.write(preproc_txt)\n\n\n\tG = nx.read_edgelist('homework_graph_preprocessed.uml',\n\t create_using=nx.DiGraph,\n\t nodetype=str, delimiter=\" --> \")\n\n\treturn G\n\n\ndef ensure_center_in_graph(center, G):\n\tfor e in G.edges():\n\t\tif e[0] == center or e[1] == center:\n\t\t\treturn True\n\n\traise RuntimeError(f'center {center} not found in graph!')\n\n\n\n\n\n\n\ndef generate_subgraph(center, destdir='_generated_graphs', dry_run = False, legend = False):\n\n\tprint(f'generating subgraph for {center}')\n\n\tif not os.path.exists(destdir):\n\t\tos.mkdir(destdir)\n\n\n\n\tG = read_graph_with_preprocessing()\n\n\n\n\tensure_center_in_graph(center, G)\n\n\n\n\t\n\n\t\n\tdef decide_subgraph(d):\n\t\tkeep_me = []\n\t\tnode_type = {center: 'center'}\n\t\tfor n in G.nodes():\n\t\t\ttry:\n\t\t\t\tdist_this = nx.shortest_path_length(G,n,center)\n\n\t\t\t\tif n not in node_type:\n\t\t\t\t\tnode_type[n] = 'ancestor'\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\tdist_this = nx.shortest_path_length(G,center, n)\n\n\t\t\t\t\tif n not in node_type:\n\t\t\t\t\t\tnode_type[n] = 'descendant'\n\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\n\t\t\tif dist_this <= d:\n\t\t\t\tkeep_me.append(n)\n\n\t\treturn keep_me, node_type\n\n\n\tdef trim_long_paths(subgraph, keep_me, node_type, threshold=3):\n\n\t\t# print('\\n\\ntrimming long paths\\n')\n\n\n\t\tno_really_keep_me = [center]\n\n\t\t# print(keep_me)\n\t\tfor n in keep_me:\n\t\t\t\n\n\t\t\tif node_type[n]=='ancestor':\n\t\t\t\tpaths_to_center = nx.all_simple_paths(subgraph, n, center)\n\t\t\telif node_type[n]=='descendant':\n\t\t\t\tpaths_to_center = nx.all_simple_paths(subgraph, center, n)\n\t\t\telif n == center:\n\t\t\t\tcontinue\n\n\n\t\t\t# print(f'{center} {n}')\n\t\t\ttry:\n\t\t\t\tlongest_path_length = len(max(paths_to_center, key=lambda x: len(x)))-1\n\t\t\t\t# print(n, center, longest_path_length)\n\t\t\t\tif longest_path_length<=threshold:\n\t\t\t\t\tno_really_keep_me.append(n)\n\n\t\t\texcept ValueError as e:\n\t\t\t\tpass # can't get to it, remove it.\n\n\n\t\tassert center in no_really_keep_me\n\t\treturn no_really_keep_me\n\n\n\tdef trim_high_degree_nodes(subgraph, keep_me, node_type, threshold=3):\n\t\t# print('\\n\\ntrimming high-degree nodes\\n')\n\n\t\tno_really_keep_me = set(keep_me) # subtractive synthesis for this\n\n\t\tfor n in keep_me:\n\t\t\td = nx.degree(subgraph, n)\n\t\t\t# print(n, d)\n\n\t\t\tif d > threshold and node_type[n] == 'descendant':\n\t\t\t\t# print(f'trimming successors from {n}')\n\t\t\t\tfor s in subgraph.successors(n):\n\t\t\t\t\t# print(f'removing {s}')\n\t\t\t\t\tif s in no_really_keep_me:\n\t\t\t\t\t\tno_really_keep_me.remove(s)\t\n\n\t\t\tif d > threshold and node_type[n] == 'ancestor':\n\t\t\t\t# print(f'trimming predecessors from {n}')\n\t\t\t\tfor s in subgraph.predecessors(n):\n\t\t\t\t\t# print(f'removing {s}')\n\t\t\t\t\tif s in no_really_keep_me:\n\t\t\t\t\t\tno_really_keep_me.remove(s)\n\n\t\tassert center in no_really_keep_me\n\t\treturn no_really_keep_me\n\n\n\tkeep_me, node_type = decide_subgraph(2)\n\n\tsubgraph = nx.induced_subgraph(G,keep_me)\n\n\n\n\tbig_graph_threshold = 12\n\n\tif len(keep_me)>big_graph_threshold:\n\t\tkeep_me = trim_high_degree_nodes(subgraph, keep_me, node_type)\n\t\tsubgraph = nx.induced_subgraph(G, keep_me)\n\n\n\tif len(keep_me)>big_graph_threshold:\n\t\tkeep_me = trim_long_paths(subgraph, keep_me, node_type)\n\t\tsubgraph = nx.induced_subgraph(G, keep_me)\n\n\n\n\tcenter_nicename = center.lower().replace('\"','').replace(' ','_')\n\tuml_filename = path.join(destdir, f'{center_nicename}.uml')\n\timage_filename = path.join(destdir, f'{center_nicename}.png')\n\n\n\tif dry_run:\n\t\treturn image_filename\n\n\n\n\tencountered = set()\n\ttype_to_plantuml_tag_map = {'center': \"<< Current >>\", 'ancestor': \"<< EarlierContent >>\", 'descendant': \"<< LaterContent >>\"}\n\n\n\twith open(uml_filename,'w') as file: \n\t\tfile.write('@startuml\\n')\n\t\tfile.write('!include style.uml\\n')\n\n\t\tfile.write('partition \"Connection graph\" \\n\\n')\n\n\n\t\tfor e in subgraph.edges():\n\n\t\t\tsource = e[0]\n\t\t\ttarget = e[1]\n\n\t\t\tsource_node_type = \"\"\n\t\t\ttarget_node_type = \"\"\n\n\t\t\tif source not in encountered:\n\t\t\t\tencountered.add(source)\n\t\t\t\tsource_node_type = type_to_plantuml_tag_map[node_type[source]]\n\n\t\t\tif target not in encountered:\n\t\t\t\tencountered.add(target)\n\t\t\t\ttarget_node_type = type_to_plantuml_tag_map[node_type[target]]\n\n\t\t\tif target == center:\n\t\t\t\ttarget = '📍 ' + center\n\t\t\tif source == center:\n\t\t\t\tsource = '📍 ' + center\n\t\t\tfile.write(f'\"{source}\" {source_node_type} --> \"{target}\" {target_node_type}\\n')\n\n\n\n\n\n\n\t\tfile.write('}\\n')\n\n\t\tif legend:\n\t\t\tfile.write('!include legend.uml\\n')\n\n\t\tfile.write('@enduml\\n')\n\n\n\n\n\timport subprocess\n\tsubprocess.run([\"plantuml\", \"-tpng\", uml_filename]) \n\n\treturn image_filename\n\n\n\nif __name__ == \"__main__\":\n\n\tif len(sys.argv) == 1:\n\t\traise RuntimeError('this script must be passed a name of a node')\n\tcenter = sys.argv[1]\n\n\tgenerate_subgraph(center, destdir='_generated_graphs')\n\n\n\n\n\n\t# https://networkx.org/documentation/stable/auto_examples/drawing/plot_custom_node_icons.html\n\n\t# pos = nx.planar_layout(G)\n# pos = nx.nx_agraph.graphviz_layout(subgraph)\n# fig, ax = plt.subplots()\n\n# nx.draw_networkx(\n# subgraph,\n# pos=pos,\n# ax=ax,\n# arrows=True,\n# arrowstyle=\"->\",\n# min_source_margin=15,\n# min_target_margin=15,\n# )\n\n# fig.savefig('the_graph.pdf')\n","repo_name":"ofloveandhate/calculus_1","sub_path":"connection_graph/subgraph_of_related.py","file_name":"subgraph_of_related.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70268698494","text":"#!/usr/bin/python\nimport sys\nimport os\n\nImport('env')\n\ndefs = []\n\ncflags_libmv = Split(env['CFLAGS'])\nccflags_libmv = Split(env['CCFLAGS'])\ncxxflags_libmv = Split(env['CXXFLAGS'])\n\ndefs.append('V3DLIB_ENABLE_SUITESPARSE')\ndefs.append('GOOGLE_GLOG_DLL_DECL=')\n\nsrc = env.Glob(\"*.cpp\")\nsrc += env.Glob('libmv/image/*.cc')\nsrc += env.Glob('libmv/multiview/*.cc')\nsrc += env.Glob('libmv/numeric/*.cc')\nsrc += env.Glob('libmv/simple_pipeline/*.cc')\nsrc += env.Glob('libmv/tracking/*.cc')\nsrc += env.Glob('third_party/fast/*.c')\nsrc += env.Glob('third_party/gflags/*.cc')\nsrc += env.Glob('third_party/ldl/Source/*.c')\nsrc += env.Glob('third_party/ssba/Geometry/*.cpp')\nsrc += env.Glob('third_party/ssba/Math/*.cpp')\n\nincs = '. ../Eigen3'\nincs += ' ' + env['BF_PNG_INC']\nincs += ' ' + env['BF_ZLIB_INC']\n\nif env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc'):\n incs += ' ./third_party/glog/src/windows ./third_party/glog/src/windows/glog'\n if env['OURPLATFORM'] in ('win32-vc', 'win64-vc'):\n incs += ' ./third_party/msinttypes'\n\n src += ['./third_party/glog/src/logging.cc', './third_party/glog/src/raw_logging.cc', './third_party/glog/src/utilities.cc', './third_party/glog/src/vlog_is_on.cc']\n src += ['./third_party/glog/src/windows/port.cc']\n\n if env['OURPLATFORM'] in ('win32-vc', 'win64-vc'):\n cflags_libmv.append('/Od')\n ccflags_libmv.append('/Od')\n cxxflags_libmv.append('/Od')\n\n if not env['BF_DEBUG']:\n defs.append('NDEBUG')\n else:\n if not env['BF_DEBUG']:\n cflags_libmv += Split(env['REL_CFLAGS'])\n ccflags_libmv += Split(env['REL_CCFLAGS'])\n cxxflags_libmv += Split(env['REL_CXXFLAGS'])\nelse:\n src += env.Glob(\"third_party/glog/src/*.cc\")\n incs += ' ./third_party/glog/src'\n if not env['BF_DEBUG']:\n cflags_libmv += Split(env['REL_CFLAGS'])\n ccflags_libmv += Split(env['REL_CCFLAGS'])\n cxxflags_libmv += Split(env['REL_CXXFLAGS'])\n\nincs += ' ./third_party/ssba ./third_party/ldl/Include ../colamd/Include'\n\nenv.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137], compileflags=cflags_libmv, cc_compileflags=ccflags_libmv, cxx_compileflags=cxxflags_libmv )\n","repo_name":"BSVino/Blender","sub_path":"extern/libmv/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"41653619083","text":"import os\n\nimport numpy as np\n\n\ndef parse_model_cfg(path):\n # 解析yolo *.cfg文件,返回模块定义路径可以是'cfg/yolov3 '。cfg”、“yolov3。cfg”,或“yolov3”\n if not path.endswith('.cfg'):\n path += '.cfg'\n if not os.path.exists(path) and os.path.exists('cfg' + os.sep + path):\n path = 'cfg' + os.sep + path\n\n with open(path, 'r') as f:\n lines = f.read().split('\\n')\n lines = [x for x in lines if x and not x.startswith('#')]\n lines = [x.rstrip().lstrip() for x in lines]\n mdefs = [] # 模块定义\n for line in lines:\n if line.startswith('['):\n mdefs.append({})\n mdefs[-1]['type'] = line[1:-1].rstrip()\n if mdefs[-1]['type'] == 'convolutional':\n mdefs[-1]['batch_normalize'] = 0 # 预先配置与零\n else:\n key, val = line.split(\"=\")\n key = key.rstrip()\n\n if key == 'anchors': # return nparray\n mdefs[-1][key] = np.array([float(x) for x in val.split(',')]).reshape((-1, 2)) # np anchors\n elif key in ['from', 'layers', 'mask']: # return array\n mdefs[-1][key] = [int(x) for x in val.split(',')]\n else:\n val = val.strip()\n if val.isnumeric(): # 返回整型数或浮点数\n mdefs[-1][key] = int(val) if (int(val) - float(val)) == 0 else float(val)\n else:\n mdefs[-1][key] = val # 返回字符串\n\n # 检查是否支持所有字段\n supported = ['type', 'batch_normalize', 'filters', 'size', 'stride', 'pad', 'activation', 'layers', 'groups',\n 'from', 'mask', 'anchors', 'classes', 'num', 'jitter', 'ignore_thresh', 'truth_thresh', 'random',\n 'stride_x', 'stride_y', 'weights_type', 'weights_normalization', 'scale_x_y', 'beta_nms', 'nms_kind',\n 'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh', 'ratio', 'reduction', 'kernelsize']\n\n f = [] # fields\n for x in mdefs[1:]:\n [f.append(k) for k in x if k not in f]\n u = [x for x in f if x not in supported] # unsupported fields\n assert not any(u), \"Unsupported fields %s in %s. \" % (u, path)\n\n return mdefs\n\n\ndef parse_data_cfg(path):\n # 解析'data/person/person.data'配置文件\n if not os.path.exists(path) and os.path.exists('data' + os.sep + path): # 如果省略,添加'data/'前缀\n path = 'data' + os.sep + path\n\n with open(path, 'r') as f:\n lines = f.readlines()\n\n options = dict()\n for line in lines:\n line = line.strip()\n if line == '' or line.startswith('#'):\n continue\n key, val = line.split('=')\n options[key.strip()] = val.strip()\n\n return options\n","repo_name":"qq979249745/pedestrian_detector","sub_path":"detector/utils/parse_config.py","file_name":"parse_config.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31530772488","text":"import urllib2\nimport threading\nfrom bs4 import BeautifulSoup\nimport re\nimport json\nimport datetime\nimport time\nfrom django.utils import timezone\nfrom tools import safe_request\n\nurl_base = 'http://www.londonstockexchange.com'\nquerys = [\n 'http://www.londonstockexchange.com/exchange/prices-and-markets/rns/company-news.html?tidm={}&isin={}&newsType=',\n 'http://www.londonstockexchange.com/exchange/news/alliance-news/company-news.html?tidm={}'\n]\n\ndef get_stock_news(symbol, query, collection=None):\n for query_format in querys:\n html = safe_request(query_format.format(symbol, query.split('GBGB')[0]))\n soup = BeautifulSoup(html, 'html.parser')\n\n for news in soup.find_all('li', class_='newsContainer'):\n info = news.find('a')\n title = info.string.strip()\n url = url_base + info['href'].strip().split('.html')[0].strip().split(\"openWin2('\")[-1] + '.html'\n date_str = news.find('span', class_='hour').string.strip()\n if len(date_str) <=5:\n date_str = timezone.datetime.now().strftime('%d %b %Y') + ' ' + date_str\n date = timezone.datetime.strptime(date_str, '%d %b %Y %H:%M')\n\n if collection:\n obj, created = collection.objects.get_or_create(Symbol=symbol)\n try:\n obj_news, created = obj.stocknews_set.get_or_create(\n pub_date=date, url=url, title=title\n )\n if created:\n obj_news.save()\n except:\n pass\n\n\nif __name__ == '__main__':\n get_stock_news('JRP','GB00BCRX1J15GBGBXSTMM')\n","repo_name":"yuxiang-zhou/MarketAnalysor","sub_path":"DataAnalysis/extract_stock_news.py","file_name":"extract_stock_news.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72204064573","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n# Create your models here.\n\nStatus = [\n (\"Active\", \"Active\"), # Subscription is in active state or trial state\n (\"Cancelled\", \"Cancelled\"), # Subscription is cancelled\n]\n\n\nclass StripeCustomer(models.Model):\n \"\"\"Stripe Customer Model/Table for mapping customers with Stripe Subscriptions\"\"\"\n\n user = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"customer\"\n )\n stripeCustomerId = models.CharField(max_length=255)\n stripeSubscriptionId = models.CharField(max_length=255)\n status = models.CharField(max_length=200, choices=Status, default=\"Active\")\n\n def __str__(self):\n return self.user.username\n","repo_name":"avdhootmukhedkar/Python-Django-Payment-Portal-Coding-Challenge","sub_path":"Coding Challenge Python Case Study/challenge2/DjangoTest/stripe_subscriptions/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1507757740","text":"from django.shortcuts import render, redirect\nfrom .models import *\nimport datetime\nfrom django.contrib import messages\n# Create your views here.\n\n\ndef home(request):\n user = request.user\n if user.is_authenticated:\n incomplete = tasks.objects.filter(user=user,date=datetime.date.today(),check=False)\n complete = tasks.objects.filter(user=user,date=datetime.date.today(),check=True)\n count = tasks.objects.filter(user=user,date=datetime.date.today()).count()\n if request.method == \"POST\":\n check = request.POST['check']\n id = request.POST['taskid']\n obj = tasks.objects.get(id=id)\n obj.check = check\n obj.save()\n messages.success(request, \"Task Updated\")\n parms = {\n \"incomplete\":incomplete,\n \"complete\":complete,\n \"countz\":count,\n }\n return render(request,'index.html',parms)\n else:\n return redirect('account_login')\n\ndef about(request):\n return render(request,'about.html')\n\ndef history(request):\n user = request.user\n if user.is_authenticated:\n obj = tasks.objects.filter(user=user,date__lt=datetime.date.today())\n parms = {\n \"tasks\":obj,\n \"countz\":obj.count(),\n }\n return render(request,'history.html',parms)\n else:\n return redirect('account_login')\n\ndef addtask(request):\n user = request.user\n if user.is_authenticated:\n count = tasks.objects.filter(user=user,date=datetime.date.today()).count()\n if request.method == \"POST\":\n if 'normal' in request.POST:\n name= request.POST['name']\n desc = request.POST['desc']\n obj = tasks.objects.filter(user=user,title__iexact=name,date=datetime.date.today())\n if obj.count() != 0:\n messages.error(request,\"This Task Already Exist!\")\n else:\n if 'daily' in request.POST:\n tasks.objects.create(title=name,desc=desc,user=user,check=False,types='Daily')\n else:\n tasks.objects.create(title=name,desc=desc,user=user,check=False)\n messages.success(request,\"Task Created!\")\n if 'yesterday' in request.POST:\n ob = tasks.objects.filter(user=user,date__lt=datetime.date.today())[0]\n getdate = ob.date\n obj = tasks.objects.filter(user=user,date=getdate)\n if obj is not None:\n for i in obj:\n tasks.objects.create(user=user,title=i.title,desc=i.desc,check=False)\n messages.success(request,\"Tasks Added!\")\n else:\n messages.error(request,'No Previous Task Present')\n parms = {\n \"countz\":count,\n }\n return render(request,'addtask.html',parms)\n else:\n return redirect('account_login')\n\ndef profile(request):\n return render(request,'account/Profile.html')\n\ndef deletetask(request):\n user = request.user\n if user.is_authenticated:\n taskdelete = tasks.objects.filter(user=user,date=datetime.date.today())\n count = taskdelete.count()\n if request.method == \"POST\":\n id = request.POST['taskid']\n obj = tasks.objects.get(id=id)\n obj.delete()\n messages.success(request, \"Task Deleted\")\n parms = {\n \"taskdelete\":taskdelete,\n \"countz\":count,\n }\n return render(request,'deletetask.html',parms)\n else:\n return redirect('account_login')","repo_name":"ekesel/mytasks","sub_path":"taskapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28011765666","text":"import psmove\nimport colorsys\nimport time\nfrom math import sqrt\nfrom multiprocessing import Process, Queue\nfrom time import sleep\n\nmoves = [psmove.PSMove(x) for x in range(psmove.count_connected())]\n\ncolors = ['FF0000','FF8000','FFFF00','80FF00','00FF00','00FF80','00FFFF','0080FF','0000FF','8000FF','FF00FF','FF0080']\n\ndef colorhex(hex):\n r = int(hex[0:2],16)\n g = int(hex[2:4],16)\n b = int(hex[4:6],16)\n return (r,g,b)\n\ncolors = [colorhex(x) for x in colors]\n\ndef color_proc(q,):\n moves = [psmove.PSMove(x) for x in range(psmove.count_connected())]\n while True:\n while not q.empty():\n colors = q.get()\n for move,color in zip(moves,colors):\n move.set_leds(*color)\n move.update_leds()\n sleep(.25)\n\nq = Queue()\nq.put(colors)\nproc = Process(target=color_proc, args=(q,))\nproc.start()\nsleep(1)\nwhile True:\n moveid = input(\"Enter move number: \")\n try:\n newcolor_string = input(\"Enter color hex: \")\n newcolor = colorhex(newcolor_string)\n colors[int(moveid)-1] = newcolor\n except:\n print('Error! Enter again.')\n q.put(colors)\n q.put(colors)\n for move,color in zip(moves,colors):\n print(\"MOVE ID: %s, COLOR %s\" % (move.get_serial(),str(color)))\n #move.set_leds(255,255,255)\n #move.update_leds()\n","repo_name":"adangert/JoustMania","sub_path":"color_tests/interactive_colortest.py","file_name":"interactive_colortest.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"78"} +{"seq_id":"37530927780","text":"from django.db.models import Q\nfrom rest_framework.generics import (\n DestroyAPIView,\n ListAPIView,\n UpdateAPIView,\n RetrieveAPIView,\n CreateAPIView\n )\nfrom rest_framework.filters import (\n SearchFilter,\n OrderingFilter\n )\nfrom rest_framework.pagination import (\n LimitOffsetPagination,\n PageNumberPagination\n )\nfrom rest_framework.permissions import (\n AllowAny,\n IsAuthenticated,\n IsAdminUser,\n IsAuthenticatedOrReadOnly\n )\nfrom ..models import Post\nfrom .serializers import PostSerializer\n\n\nclass PostListAPIView(ListAPIView):\n serializer_class = PostSerializer\n permission_classes = [IsAuthenticated]\n filter_backends = [SearchFilter,OrderingFilter]\n search_fields = ['title', 'description']\n pagination_class = LimitOffsetPagination\n\n def get_queryset(self, *args, **kwargs):\n queryset_list = Post.objects.all()\n query = self.request.GET.get(\"z\")\n if query:\n queryset_list = queryset_list.filter(\n Q(title__contains=query) |\n Q(description__contains=query)\n ).distinct()\n return queryset_list\n\n\nclass PostDetailAPIView(RetrieveAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n lookup_field = 'slug'\n #permission_classes = [IsAuthenticated]\n\n\nclass PostUpdateAPIView(UpdateAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n lookup_field = 'slug'\n\n\nclass PostDeleteAPIView(DestroyAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n lookup_field = 'slug'\n\n\nclass PostCreateAPIView(CreateAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\n","repo_name":"Sonu11kumar/Myblog","sub_path":"blogapp/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1390403934","text":"# name = input(\"Enter your name: \")\n# def sum(name):\n# print(\"Hello \" +name.upper())\n# sum(name)\n# name = \"x123rach\"\n# name[4:]\n# print(name[4:])\n# name1= \"x123rach25\"\n# print(name1[4:8])\n# print(name1[::-1])\nemail=[\"pn@gmail.com\",\"rc@gmail.com\",\"dr@yahoo.com\",\"gh@yahoo.com\",\"fg@gmail.com\"]\nemail_status = email.endswith[\"@gmail.com\"]\nif email_status ==True:\n num=[]\n num.append()\n print(num)\n","repo_name":"pnkuria/Pythonprogramming","sub_path":"List/assign.py","file_name":"assign.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38795537062","text":"def leiadinheiro(msg, taxa=0, taaxa=0):\n while True:\n opçao = str(input(msg)).strip().replace(',', '.')\n if opçao.isnumeric():\n opçao = int(opçao)\n return opçao\n elif opçao.isalpha() or opçao.isalnum():\n print(f'\\033[31m{opçao} é um valor invalido. Digite novamente\\033[m')\n elif opçao == '':\n print('\\033[31mNao existe valor a ser Executado, opçao invalida. Digite novamente.\\033[m')\n else:\n return float(opçao)\n\n\ndef leiaInt(n):\n while True:\n valor = str(input(f'{n}'))\n if valor.isnumeric():\n valor = int(valor) # se for digitado um valor inteiro, passo a variavel pra int\n return f'Voce acabou de digitar o numero {valor}.'\n else:\n print('\\033[31mDigite novamente um numero\\033[m')\n\n\n","repo_name":"Brunoenrique27/Exercicios-em-Python","sub_path":"ex112/utilidadesCeV/dado/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17260674882","text":"import pandas as pd\nimport numpy as np\nimport os, wandb\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils import resample\n\nchoice = input(\"Do you have the W&B API key?: (y or n): \")\nchoice = choice.lower()\nif choice == 'y':\n print(\"Make sure you did 'pip install wandb' \")\n print(\"Going to run wandb login and you can paste your API key right in the \")\n os.system(\"wandb login\")\n \n # Going to init the project with a new run \n wandb.init(project=\"Nasa Tracked Objects\", entity=\"computer-talkers\")\n\n# Create a classifier object to perform Logistic Regression tasks\nlr = LogisticRegression()\n# create a dataframe to store the data from the csv\n\n\nraw_data = pd.read_csv(\"data/neo_v2.csv\")\n# print the data so we can see it\nprint(\"Here is a preview of the data raw data: \")\nprint(raw_data.head(5))\n\nprint(\"\"\"\n\\nThe data cleaning protocol includes the following tasks:\n\n1) Removing the non-numerical columns from the data.\n2) Changing the True/False to 1/0 (True is 1 and False is 0).\n3) Removing any features that may be correlated to another.\n4) Removing class imbalance so that the data set has a 50-50 split of true and false values in the target.\"\"\")\n\n# (1) There is only one numerical column here:\nclean_data = raw_data.drop([\"orbiting_body\", \"sentry_object\", \"id\", \"name\"], axis=1)\n\n# (2) Changing sentry_object and hazardous from True/False to 1/0\nbool_columns = [\"hazardous\"]\nclean_data[bool_columns] = clean_data[bool_columns].astype(int)\n\n# (3) Removing any features that may be correlated to another.\nclean_data = clean_data.drop([\"est_diameter_min\"], axis=1)\n\n# (4) Removing class imbalance\n# Separate majority and minority classes\nhazardous_majority = clean_data[clean_data.hazardous == 0]\nhazardous_minority = clean_data[clean_data.hazardous == 1]\n\n# Downsample majority class\nmajority_downsampled = resample(hazardous_majority, replace=False, n_samples=8840)\n\n# Combine minority class with downsampled majority class\nclean_data = pd.concat([majority_downsampled, hazardous_minority])\n\nprint(\"Here is a preview of the cleaned data: \")\nprint(clean_data.head(5))\n\n\n'''\n# We will train our model on the training dataset and see how accurate the model is using the testing dataset.\n# train_test_split will split up and randomize the data each time its run, meaning we will randomly select\n80% of the data set to train with and randomly select 20% of the dataset to test with. Note that the testing \nand training data sets are disjoint.\n'''\ntrain, test = train_test_split(clean_data, test_size=0.2)\n\n'''\n# Now we want to split up our training and testing datasets into their features and targets respectively.\nFeatures are the predictive variables. In our case they are the est_diameter_max(0), relative_velocity(1),\nmiss_distance(2), absolute_magnitude(3) columns. \nTargets are the response variables. In our case its the hazardous column (4)./\n\n'''\ntraining_features = train.iloc[:, [0, 1, 2, 3]] # all rows and columns 1 - 3 in the training data set\ntraining_targets = train[\"hazardous\"] # Training targets is a Series data type and not a DataFrame data type\n\ntesting_features = test.iloc[:, [0, 1, 2, 3]] # all rows and columns 1 - 3 in the testing data set\ntesting_targets = test[\"hazardous\"] # Training targets is a Series data type and not a DataFrame data type\n\n'''\nNow we are going to fit our logistic regression model and then see the fit score for the training and testing datasets.\nResults show:\nTraining fit score: 0.9026118786811251\nTesting fit score: 0.9029612505504183\nMeaning the model got a score of 90.3/100 cases correctly labeled\n'''\nlr.fit(training_features, training_targets)\ntraining_score = lr.score(training_features, training_targets)\nprint(f\"Training fit score: {training_score}\\n\")\n\n\nlr.fit(testing_features, testing_targets)\ntesting_score = lr.score(testing_features, testing_targets)\nprint(f\"Testing fit score: {testing_score}\\n\")\n\n# Plotting regressor to wandb\nif choice == 'y':\n # wandb.sklearn.plot_regressor(lr, training_features, training_targets, testing_features, testing_targets, model_name=\"Nasa Objects\")\n # wandb.sklearn.plot_regressor(lr, training_targets, testing_targets, training_features, testing_features, model_name=\"Nasa Tracked Objects\")\n wandb.sklearn.plot_learning_curve(lr, training_targets, training_targets)\n # wandb.sklearn.plot_class_proportions(training_features, testing_features)\n'''\nLog-Odds / Logits / Odds Ratio\nGoogle this to find its meaning in logistic/ linear regression\n'''\nprint(\"Log-Odds\")\nprint(f\"{np.transpose(lr.coef_)}\\n\")\n\n'''\nConfusion Matrix\nGives us information on how many correct positives (cp) and correct negatives (cn) we found. As well as how many false \npositives (fp) and false negatives (fn) we found.\nEx: \n[[ cp fp]\n [ fn cn]]\n'''\ntraining_features_pred = lr.predict(training_features)\ntraining_conf_matrix = confusion_matrix(training_targets, training_features_pred)\n\nprint(\"Training Confusion matrix\")\nprint(f\"{training_conf_matrix}\\n\")\n\ntesting_features_pred = lr.predict(testing_features)\ntesting_conf_matrix = confusion_matrix(testing_targets, testing_features_pred)\nif choice == 'y':\n wandb.sklearn.plot_confusion_matrix(testing_targets, testing_features_pred)\n wandb.finish()\nprint(\"Testing Confusion matrix\")\nprint(f\"{testing_conf_matrix}\\n\")","repo_name":"Computer-Talkers/nasa-tracked-objects","sub_path":"john.py","file_name":"john.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"17061959260","text":"from dash import Dash, dcc, html, Input, Output, dash_table\nimport numpy as np\nimport plotly.express as px\n\nfrom rdroc.models import StarCluster\n\n\ndef dash_plot(input_dict: dict[str, StarCluster]) -> Dash:\n\n cluster_names = list(input_dict.keys())\n\n app = Dash(__name__)\n\n app.layout = html.Div(\n [\n html.Div(\n [\n dcc.Dropdown(\n cluster_names, \"NGC_2360\", id=\"cluster-selection-dropdown\"\n )\n ],\n ),\n html.Div(\n [\n dcc.Graph(id=\"spatial-graphic\", style={\"height\": \"40vh\"}),\n dcc.Graph(id=\"pm-graphic\", style={\"height\": \"40vh\"}),\n ],\n style={\"width\": \"40vw\", \"display\": \"inline-block\"},\n ),\n html.Div(\n [\n dcc.Graph(id=\"cmd-graphic\", style={\"height\": \"80vh\"}),\n ],\n style={\"width\": \"58vw\", \"height\": \"80vh\", \"display\": \"inline-block\"},\n ),\n html.Div(id=\"table-cluster-params\", style={\"overflow\": \"scroll\"}),\n ]\n )\n\n def get_figure(df, x_col, y_col, selectedpoints, selectedpoints_local):\n \"\"\"Get figure based on selected points. Extracted from https://dash.plotly.com/interactive-graphing\"\"\"\n\n fig = px.scatter(df, x=df[x_col], y=df[y_col])\n fig.update_traces(\n selectedpoints=selectedpoints,\n customdata=df.index,\n marker={\"opacity\": 0.9},\n unselected={\n \"marker\": {\"opacity\": 0.1},\n },\n )\n\n return fig\n\n @app.callback(\n Output(\"spatial-graphic\", \"figure\"),\n Output(\"pm-graphic\", \"figure\"),\n Output(\"cmd-graphic\", \"figure\"),\n Output(\"table-cluster-params\", \"children\"),\n Input(\"cluster-selection-dropdown\", \"value\"),\n Input(\"spatial-graphic\", \"selectedData\"),\n Input(\"pm-graphic\", \"selectedData\"),\n Input(\"cmd-graphic\", \"selectedData\"),\n )\n def update_figure(\n selected_cluster, spatial_selected_data, pm_selected_data, cmd_selected_data\n ):\n cluster = input_dict[selected_cluster]\n df = cluster.datatable.to_pandas()\n df = df.rename(columns={\"pmRA\": \"pmRA_\"})\n selectedpoints = df.index\n for selected_data in [\n spatial_selected_data,\n pm_selected_data,\n cmd_selected_data,\n ]:\n if selected_data and selected_data[\"points\"]:\n selectedpoints = np.intersect1d(\n selectedpoints, [p[\"customdata\"] for p in selected_data[\"points\"]]\n )\n\n # Reverse axis for CMD plot\n fig3 = get_figure(df, \"BP-RP\", \"Gmag\", selectedpoints, spatial_selected_data)\n fig3[\"layout\"][\"yaxis\"][\"autorange\"] = \"reversed\"\n\n # Create datatable\n dfp = cluster.paramtable.to_pandas()\n dt = dash_table.DataTable(\n data=dfp.to_dict(\"records\"),\n columns=[\n {\n \"name\": i,\n \"id\": i,\n }\n for i in dfp.columns\n ],\n )\n\n return [\n get_figure(df, \"RA_ICRS\", \"DE_ICRS\", selectedpoints, spatial_selected_data),\n get_figure(df, \"pmRA_\", \"pmDE\", selectedpoints, spatial_selected_data),\n fig3,\n dt,\n ]\n\n return app\n","repo_name":"jorgeanais/rdroc","sub_path":"rdroc/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9382418074","text":"#from numpy import stats\nimport numpy as np\nimport matplotlib.patches as mpatches\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport matplotlib.mlab as mlab\n\n\n\n\ndef gaussMulti(x,mu,covar):\n\n aux=1/(np.sqrt(((2*np.pi)**2)*np.linalg.det(covar)))\n res=aux*np.exp(-0.5*(((x-mu).conj().T.dot(np.linalg.inv(covar).dot((x-mu))))))\n return res\n\nclass1=np.loadtxt(\"densEstClass1.txt\")\nclass2=np.loadtxt(\"densEstClass2.txt\")\nclass1M=np.asmatrix(class1)\nclass2M=np.asmatrix(class2)\n\nsize1=np.float64(np.size(class1,0))\nsize2=np.float64(np.size(class2,0))\nN=(size1+size2)\nprior1=size1/N\nprior2=size2/N\n\nmean1=[(1/size1)*np.sum(class1M[:,0]),(1/size1)*np.sum(class1M[:,1])]\nmean2=[(1/size2)*np.sum(class2M[:,0]),(1/size2)*np.sum(class2M[:,1])]\n\ncovariance1 = np.zeros([2, 2])\ncovariance2 = np.zeros([2, 2])\nprint(size1)\nfor k in np.arange(0,int(size1)-1):\n covariance1 = covariance1 + np.outer(class1M[k] - mean1, class1M[k] - mean1)\nfor k in np.arange(0,int(size2)-1):\n covariance2 = covariance2 + np.outer(class2M[k] - mean2, class2M[k] - mean2)\ncovariance1 = covariance1* (1 / size1)\ncovariance2 = covariance2* (1 / size2)\n\nuncovariance1 = covariance1* (1 / (size1-1))\nuncovariance2 = covariance2* (1 / (size2-1))\n\npdf1=np.zeros([size1,1])\npdf2=np.zeros([size2,1])\nfor k in np.arange(0, size1-1):\n pdf1[k]=gaussMulti(class1[k],mean1,uncovariance1)\nfor k in np.arange(0, size2 - 1):\n pdf2[k] = gaussMulti(class2[k], mean2, uncovariance2)\n\n\n#X, Y = np.meshgrid(class1[0:size1,0],class1[0:size1,1])\n#Z1 = mlab.bivariate_normal(X, Y, covariance1[0,0], covariance1[1,1], mean1[0], mean1[1],covariance1[1,0])\n#plt.contour(X,Y,Z1,linewidths=0.1,zorder=-1,inline=1)\n#X, Y = np.meshgrid(class2[0:size2,0],class2[0:size2,1])\n#Z2 = mlab.bivariate_normal(X, Y, covariance2[0,0], covariance2[1,1], mean2[0], mean2[1],covariance2[1,0])\n#plt.contour(X,Y,Z2,linewidths=0.1,zorder=-1,inline=1)\n#plt.scatter(class1M[:,0],class1M[:,1],20,color='red',zorder=1,)\n#plt.scatter(class2M[:,0],class2M[:,1],20,color='blue',zorder=1)\n#plt.show()\n\n#ll_new = 0\n#for mu, sigma in zip(mean1, covariance1):\n # ll_new += pi * mvn(mu, sigma).pdf(xs)\n# ll_new += mvn(mu, sigma).pdf(class1)\n#logLike[i] = np.log(ll_new).sum()\n\nposterior1=np.log(pdf1)*prior1/prior2\nposterior2=np.log(pdf2)*prior2/prior1\nplt.plot(posterior1)\nplt.plot(posterior2)\nplt.show()\n\n\n#class1M=np.asmatrix(class1)\n#class2M=np.asmatrix(class2)\n#plt.axis([-10, 11, -10, 11])\n#plt.scatter(class1M[:,0],class1M[:,1],30,'red')\n#plt.scatter(class2M[:,0],class2M[:,1],30,'blue')\n#plt.contour(likelihood1)\n#plt.show()#\n\n#posterior1=np.asmatrix(likelihood1*prior1)\n#posterior2=np.asmatrix(likelihood2*prior2)\n#plt.plot(posterior1[:,0],posterior1[:,1])\n#plt.plot(posterior2[:,0],posterior2[:,1])\n#plt.show()\n\n\n#priorMean=gauss(0,mean1,variance1)","repo_name":"javierdvo/MachineLearning","sub_path":"Homework2/MaximumLikelihood.py","file_name":"MaximumLikelihood.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15886555565","text":"import datetime\nimport enum\nimport pathlib\n\nimport edifice as ed\nfrom edifice.components.forms import FormDialog\n\n\nclass Color(enum.Enum):\n RED = 0\n GREEN = 1\n BLUE = 2\n\nform_data = ed.StateManager({\n \"Value 1\": 0.1,\n \"Value 2\": 1.1,\n \"Value 3\": 1.3,\n \"Color\": Color.RED,\n \"File\": pathlib.Path(\"\"),\n \"Date\": datetime.date(1970, 1, 1),\n \"Sum\": lambda d: d[\"Value 1\"] + d[\"Value 2\"] + d[\"Value 3\"]\n})\n\n\ned.App(FormDialog(form_data)).start()\nprint(form_data.as_dict())\n","repo_name":"fding/pyedifice","sub_path":"examples/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"78"} +{"seq_id":"10311813014","text":"# implementation of card game - Memory\n\nimport simplegui\nimport random\n\n# global constants\nWIDTH = 800\nHEIGHT = 100\nIM_WIDTH = WIDTH // 16 # evenly spaced 16 cards\nIM_HEIGHT = HEIGHT\nHORIZONTAL_OFFSET = 8\nVERTICAL_OFFSET = 32\n\n# external images\next_image = True\n\ncard_images = simplegui.load_image(\"\")\n\nCARD_BACK_SIZE = (71, 96)\nCARD_BACK_CENTER = (35.5, 48)\ncard_back = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/card_back.png\")\n\n\n# helper function to initialize globals\ndef new_game():\n global num_list, exposed, state, selected, moves\n \n # create a deck of cards\n num_list = range(0,8)\n num_list.extend(range(0,8))\n random.shuffle(num_list)\n \n # exposed and selected lists\n exposed = [False] * 16\n selected = [] # List of 2 lists. Index 0 = numb_list selected / Index 1 numb_list index\n \n # restart game state\n state = 0\n moves = 0 \n \n# define event handlers\ndef mouseclick(pos):\n global exposed, state, selected, moves\n index = pos[0] // IM_WIDTH\n \n # game state logic\n if state == 0: # game start\n if exposed[index] == False:\n exposed[index] = True\n selected.append([num_list[index], index])\n state = 1\n elif state == 1: # single exposed unpaired card\n if exposed[index] == False:\n exposed[index] = True\n selected.append([num_list[index], index])\n moves += 1\n state = 2\n else: # end of a turn\n if exposed[index] == False:\n exposed[index] = True\n if (selected[1][0] != selected[0][0]):\n exposed[selected[0][1]] = exposed[selected[1][1]] = False\n selected = []\n selected.append([num_list[index], index])\n state = 1 \n \n# cards are logically 50x100 pixels in size \ndef draw(canvas):\n for i in range(0,len(num_list)):\n if not ext_image:\n if exposed[i] == False:\n canvas.draw_polygon([[IM_WIDTH*i, 0], [IM_WIDTH*(i+1), 0], [IM_WIDTH*(i+1), IM_HEIGHT], [IM_WIDTH*i, IM_HEIGHT]], 5, \"Red\", \"Green\")\n else:\n canvas.draw_text(str(num_list[i]),[IM_WIDTH * i + HORIZONTAL_OFFSET, IM_HEIGHT - VERTICAL_OFFSET], 60, \"White\", \"sans-serif\")\n elif ext_image:\n if exposed[i] == False:\n canvas.draw_image(card_back, CARD_BACK_CENTER, CARD_BACK_SIZE, [(IM_WIDTH // 2) * (2 * i + 1), IM_HEIGHT //2], [IM_WIDTH, IM_HEIGHT])\n else:\n canvas.draw_text(str(num_list[i]),[IM_WIDTH * i + HORIZONTAL_OFFSET, IM_HEIGHT - VERTICAL_OFFSET], 60, \"White\", \"sans-serif\")\n label.set_text(\"Turns = \" + str(moves))\n\n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", WIDTH, HEIGHT)\nframe.add_button(\"Reset\", new_game)\nlabel = frame.add_label(\"Turns = 0\")\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nnew_game()\nframe.start()","repo_name":"Maycas/Interactive-Python-Games","sub_path":"Interactive Programming in Python/Memory.py","file_name":"Memory.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"17072947781","text":"from reportlab.lib import colors\nfrom reportlab.lib.enums import TA_CENTER, TA_LEFT\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.platypus import SimpleDocTemplate, Image, Table, Paragraph\nfrom reportlab.pdfgen import canvas\nimport models\nimport dbexecutor\nimport config\nimport util\n\n\nclass FormStockMovesTemplate:\n incomingform = None\n logopath = config.AppRoot + '/static/img/mkskablo.png'\n pdfname = config.AppRoot + '/static/data/form.pdf'\n pdf = None\n styles = getSampleStyleSheet()\n entrylist = None\n columnlist = None\n doctitle = None\n tresholdcolsize = 5\n maxvsize = 490\n maxhsize = 750\n stocktypesize = 130\n stockcolorsize = 80\n stockquantitysize = 90\n stockpackagequantitysize = 90\n userfullnamesize = 85\n corporationsize = 120\n movetypesize = 110\n stockroomsize = 100\n shipinfosize = 100\n decriptionsize = 100\n datesize = 95\n\n def __init__(self, entrylist, columnlist, doctitle):\n \"\"\"\n Instantiates a new instance of IncomingStockFormTemplate class\n :param entrylist: The list of id of stockbase entries\n :type entrylist: list[int]\n :param columnlist: The list of id of columns selected by user\n :type columnlist: list[int]\n :param doctitle: The title of the document\n :type doctitle: str\n \"\"\"\n self.entrylist = entrylist\n self.columnlist = columnlist\n self.doctitle = doctitle\n\n if len(self.columnlist) > self.tresholdcolsize:\n # Horizontal A4 Page\n self.pdf = SimpleDocTemplate(self.pdfname, pagesize=(A4[1], A4[0]))\n else:\n # Vertical A4 Page\n self.pdf = SimpleDocTemplate(self.pdfname, pagesize=A4)\n\n pdfmetrics.registerFont(TTFont('MyCalibri', config.AppRoot + '/static/webfonts/CALIBRI.ttf'))\n pdfmetrics.registerFont(TTFont('MyCalibriBold', config.AppRoot + '/static/webfonts/CALIBRI-BOLD.ttf'))\n\n def generatePdf(self):\n elems = []\n headertable = self.createheadertable()\n\n bodytable = self.createbodytable()\n # signaturetable = self.createsignaturetable()\n\n elems.append(headertable)\n elems.append(bodytable)\n # elems.append(signaturetable)\n self.pdf.build(elems)\n\n return self.pdfname\n\n def createheadertable(self):\n headerdata = [\n [self.getlogo(), self.gettitle(), '', 'BELGE TARİHİ'],\n ['', '', '', self.getdate()]\n ]\n\n hwidth = 0\n\n if len(self.columnlist) > self.tresholdcolsize:\n # Horizontal A4 Page\n hwidth = (self.maxhsize - 120 - 130) / 2.0\n else:\n # Vertical A4 Page\n hwidth = (self.maxvsize - 120 - 130) / 2.0\n\n header = Table(headerdata, colWidths=[120, hwidth, hwidth, 130])\n headerstyle = [\n ('GRID', (0, 0), (-1, -1), 0.25, colors.black),\n ('SPAN', (0, 0), (0, 1)), # Header-left image\n ('VALIGN', (0, 0), (0, 1), 'MIDDLE'),\n ('ALIGN', (0, 0), (0, 1), 'CENTER'),\n ('SPAN', (1, 0), (2, 1)), # Header-center title\n ('ALIGN', (1, 0), (2, 1), 'CENTER'),\n ('VALIGN', (1, 0), (2, 1), 'MIDDLE'),\n ('BOTTOMPADDING', (1, 0), (2, 1), 15),\n ('VALIGN', (3, 0), (-1, 1), 'MIDDLE'), # Header-right date\n ('ALIGN', (3, 0), (-1, 1), 'CENTER'),\n ('FONTNAME', (3, 0), (-1, 1), 'MyCalibriBold'),\n ('FONTSIZE', (3, 0), (-1, 1), 14),\n ('SPAN', (0, 2), (3, 2)), # Come from title\n ('BOTTOMPADDING', (0, 2), (1, 2), 7),\n ('VALIGN', (0, 2), (1, 2), 'MIDDLE')\n ]\n header.setStyle(headerstyle)\n return header\n\n def createbodytable(self):\n bodydata = []\n\n headerlist = []\n\n sizelist = []\n\n if 1 in self.columnlist:\n headerlist.append(self.getstocktableheader('MALZEMENİN CİNSİ'))\n sizelist.append(self.stocktypesize)\n\n if 2 in self.columnlist:\n headerlist.append(self.getstocktableheader('RENGİ'))\n sizelist.append(self.stockcolorsize)\n\n if 3 in self.columnlist:\n headerlist.append(self.getstocktableheader('MİKTARI'))\n sizelist.append(self.stockquantitysize)\n\n if 4 in self.columnlist:\n headerlist.append(self.getstocktableheader('AMBALAJ SAYISI'))\n sizelist.append(self.stockpackagequantitysize)\n\n if 5 in self.columnlist:\n headerlist.append(self.getstocktableheader('KULLANICI ADI'))\n sizelist.append(self.userfullnamesize)\n\n if 6 in self.columnlist:\n headerlist.append(self.getstocktableheader('ŞİRKET İSMİ'))\n sizelist.append(self.corporationsize)\n\n if 7 in self.columnlist:\n headerlist.append(self.getstocktableheader('HAREKET TİPİ'))\n sizelist.append(self.movetypesize)\n\n if 8 in self.columnlist:\n headerlist.append(self.getstocktableheader('AMBAR'))\n sizelist.append(self.stockroomsize)\n\n if 9 in self.columnlist:\n headerlist.append(self.getstocktableheader('ARAÇ BİLGİLERİ'))\n sizelist.append(self.shipinfosize)\n\n if 10 in self.columnlist:\n headerlist.append(self.getstocktableheader('AÇIKLAMA'))\n sizelist.append(self.decriptionsize)\n\n if 11 in self.columnlist:\n headerlist.append(self.getstocktableheader('TARİH'))\n sizelist.append(self.datesize)\n\n bodydata.append(headerlist)\n\n totalsize = 0\n\n for size in sizelist:\n totalsize += size\n\n if len(self.columnlist) > self.tresholdcolsize:\n # Horizontal A4 Page\n unitsize = self.maxhsize / totalsize\n else:\n # Vertical A4 Page\n unitsize = self.maxvsize / totalsize\n\n colwidths = []\n\n for size in sizelist:\n colwidths.append(size*unitsize)\n\n bodystyle = [\n ('GRID', (0, 0), (-1, -1), 0.25, colors.black),\n ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('BOTTOMPADDING', (0, 0), (-1, 0), 10),\n ('BOTTOMPADDING', (0, 1), (-1, -1), 7)\n ]\n i = 0\n\n for ertyid in self.entrylist:\n stockbase = dbexecutor.getStockBase(ertyid)\n formstockbase = dbexecutor.getFormStockBasesByStockBaseId(stockbase.id)\n stockform = dbexecutor.getStockForm(formstockbase.stockformid)\n\n bodyrowlist = []\n if 1 in self.columnlist:\n bodyrowlist.append(self.getstockentry(stockbase.getStockTypeName()))\n\n if 2 in self.columnlist:\n bodyrowlist.append(self.getstockentry(stockbase.getStockColorName()))\n\n if 3 in self.columnlist:\n bodyrowlist.append(self.getstockentry(stockbase.getQuantityText()))\n\n if 4 in self.columnlist:\n bodyrowlist.append(self.getstockentry(formstockbase.getPackageQuantityText()))\n\n if 5 in self.columnlist:\n userfullname = dbexecutor.getUser(stockbase.userid).getFullName()\n bodyrowlist.append(self.getstockentry(userfullname))\n\n if 6 in self.columnlist:\n bodyrowlist.append(self.getstockentry(stockform.getCorporationName()))\n\n if 7 in self.columnlist:\n if stockbase.actiontype:\n bodyrowlist.append(self.getstockentry('Depo Giriş Formu'))\n else:\n bodyrowlist.append(self.getstockentry('Ürün Sevkiyat Formu'))\n\n if 8 in self.columnlist:\n if stockbase.actiontype:\n bodyrowlist.append(self.getstockentry('-'))\n else:\n outgoingstockform = dbexecutor.getOutgoingStockFormByStockFormId(stockform.id)\n bodyrowlist.append(self.getstockentry(outgoingstockform.getStockroomName()))\n\n if 9 in self.columnlist:\n if stockbase.actiontype:\n bodyrowlist.append(self.getstockentry('-'))\n else:\n outgoingstockform = dbexecutor.getOutgoingStockFormByStockFormId(stockform.id)\n bodyrowlist.append(self.getstockentry(outgoingstockform.shipinfo))\n\n if 10 in self.columnlist:\n bodyrowlist.append(self.getstockentry(formstockbase.note))\n\n if 11 in self.columnlist:\n bodyrowlist.append(self.getstockentry(stockbase.createdate.strftime('%d.%m.%Y %I:%M:%S')))\n\n i = i + 1\n bodydata.append(bodyrowlist)\n\n body = Table(bodydata, colWidths=colwidths, spaceBefore=0)\n # body = Table(bodydata, [210, 100, 120, 120], rowheights, spaceBefore=0)\n body.setStyle(bodystyle)\n return body\n\n def createsignaturetable(self):\n signaturedata = [\n [self.getsignatureitem('TESLİM EDEN: '),\n self.getsignatureitem('TESLİM ALAN: ')],\n [self.getsignatureitem('İMZA: '), self.getsignatureitem('İMZA: ')]\n ]\n\n signaturestyle = [\n ('GRID', (0, 0), (-1, -1), 0.25, colors.black),\n ('VALIGN', (0, 0), (-1, -1), 'MIDDLE')\n ]\n\n signature = Table(signaturedata, [275, 275], [25, 25], spaceBefore=20)\n signature.setStyle(signaturestyle)\n return signature\n\n def getlogo(self):\n picture = Image(self.logopath)\n picture.drawWidth = 110\n picture.drawHeight = 55\n return picture\n\n def gettitle(self):\n titleparagrapfstyle = ParagraphStyle(name='center', fontName='MyCalibriBold', fontSize=26,\n parent=self.styles['Normal'], leading=24, alignment=TA_CENTER)\n titleparagraph = Paragraph(self.doctitle, titleparagrapfstyle)\n return titleparagraph\n\n def getdate(self):\n date = util.turkishTimeNow()\n dateparagrapfstyle = ParagraphStyle(name='center', fontName='MyCalibri', fontSize=12,\n parent=self.styles['Normal'], alignment=TA_CENTER)\n dateparagraph = Paragraph(date.strftime('%d.%m.%Y %I:%M:%S'), dateparagrapfstyle)\n return dateparagraph\n\n def getcomefrom(self):\n stockform = self.incomingform.getStockForm()\n getcomefromparagrapfstyle = ParagraphStyle(name='center', fontName='MyCalibriBold', fontSize=14,\n parent=self.styles['Normal'], alignment=TA_LEFT)\n getcomefromparagraph = Paragraph(stockform.getCorporationName(), getcomefromparagrapfstyle)\n return getcomefromparagraph\n\n def getcountheader(self):\n countparagrapfstyle = ParagraphStyle(name='center', fontName='MyCalibriBold', fontSize=14,\n parent=self.styles['Normal'], alignment=TA_CENTER)\n counttableparagraph = Paragraph('SIRA NO', countparagrapfstyle)\n return counttableparagraph\n\n def getnoteheader(self):\n paragrapfstyle = ParagraphStyle(name='center', fontName='MyCalibriBold', fontSize=14,\n parent=self.styles['Normal'], alignment=TA_CENTER)\n paragraph = Paragraph('AÇIKLAMA', paragrapfstyle)\n return paragraph\n\n def getstocktableheader(self, headername):\n stocktableparagrapfstyle = ParagraphStyle(name='center', fontName='MyCalibriBold', fontSize=14,\n parent=self.styles['Normal'], alignment=TA_CENTER)\n stocktableparagraph = Paragraph(headername, stocktableparagrapfstyle)\n return stocktableparagraph\n\n def getstocktypeentry(self, description):\n typeparagraphstyle = ParagraphStyle(name='center', fontName='MyCalibri', fontSize=12,\n parent=self.styles['Normal'], alignment=TA_LEFT)\n typeparagraph = Paragraph(description, typeparagraphstyle)\n return typeparagraph\n\n def getstockentry(self, stockproperty):\n stockparagraphstyle = ParagraphStyle(name='center', fontName='MyCalibri', fontSize=12,\n parent=self.styles['Normal'], alignment=TA_CENTER)\n stockparagraph = Paragraph(stockproperty, stockparagraphstyle)\n return stockparagraph\n\n def getsignatureitem(self, prop):\n signatureparagraphstyle = ParagraphStyle(name='center', fontName='MyCalibri', fontSize=10,\n parent=self.styles['Normal'], alignment=TA_LEFT)\n signatureparagraph = Paragraph(prop, signatureparagraphstyle)\n return signatureparagraph\n","repo_name":"obirler/MksWebServer","sub_path":"doctemplates/FormStockMovesTemplate.py","file_name":"FormStockMovesTemplate.py","file_ext":"py","file_size_in_byte":12899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31793230338","text":"import logging\nimport threading\n\ntry:\n import grpc\n from newrelic.core.infinite_tracing_pb2 import Span, RecordStatus\nexcept ImportError:\n grpc = None\n\n_logger = logging.getLogger(__name__)\n\n\nclass StreamingRpc(object):\n \"\"\"Streaming Remote Procedure Call\n\n This class keeps a stream_stream RPC alive, retrying after a timeout when\n errors are encountered. If grpc.StatusCode.UNIMPLEMENTED is encountered, a\n retry will not occur.\n \"\"\"\n\n PATH = \"/com.newrelic.trace.v1.IngestService/RecordSpan\"\n\n def __init__(self, endpoint, stream_buffer, metadata, record_metric, ssl=True):\n if ssl:\n credentials = grpc.ssl_channel_credentials()\n channel = grpc.secure_channel(endpoint, credentials)\n else:\n channel = grpc.insecure_channel(endpoint)\n self.channel = channel\n self.metadata = metadata\n self.request_iterator = stream_buffer\n self.response_processing_thread = threading.Thread(\n target=self.process_responses, name=\"NR-StreamingRpc-process-responses\"\n )\n self.response_processing_thread.daemon = True\n self.notify = self.condition()\n self.rpc = self.channel.stream_stream(\n self.PATH, Span.SerializeToString, RecordStatus.FromString\n )\n self.record_metric = record_metric\n\n @staticmethod\n def condition(*args, **kwargs):\n return threading.Condition(*args, **kwargs)\n\n def close(self):\n channel = None\n with self.notify:\n if self.channel:\n channel = self.channel\n self.channel = None\n self.notify.notify_all()\n\n if channel:\n _logger.debug(\"Closing streaming rpc.\")\n channel.close()\n try:\n self.response_processing_thread.join(timeout=5)\n except Exception:\n pass\n _logger.debug(\"Streaming rpc close completed.\")\n\n def connect(self):\n self.response_processing_thread.start()\n\n def process_responses(self):\n response_iterator = None\n\n while True:\n with self.notify:\n if self.channel and response_iterator:\n code = response_iterator.code()\n details = response_iterator.details()\n\n self.record_metric(\n \"Supportability/InfiniteTracing/Span/gRPC/%s\" % code.name,\n {\"count\": 1},\n )\n\n if code is grpc.StatusCode.OK:\n _logger.debug(\n \"Streaming RPC received OK \"\n \"response code. The agent will attempt \"\n \"to reestablish the stream immediately.\"\n )\n else:\n self.record_metric(\n \"Supportability/InfiniteTracing/Span/Response/Error\",\n {\"count\": 1},\n )\n\n if code is grpc.StatusCode.UNIMPLEMENTED:\n _logger.error(\n \"Streaming RPC received \"\n \"UNIMPLEMENTED response code. \"\n \"The agent will not attempt to \"\n \"reestablish the stream.\"\n )\n break\n\n _logger.warning(\n \"Streaming RPC closed. \"\n \"Will attempt to reconnect in 15 seconds. \"\n \"Code: %s Details: %s\",\n code,\n details,\n )\n self.notify.wait(15)\n\n if not self.channel:\n break\n\n response_iterator = self.rpc(\n self.request_iterator, metadata=self.metadata\n )\n _logger.info(\"Streaming RPC connect completed.\")\n\n try:\n for response in response_iterator:\n _logger.debug(\"Stream response: %s\", response)\n except Exception:\n pass\n\n self.close()\n _logger.info(\"Process response thread ending.\")\n","repo_name":"rocarmo/robocopy","sub_path":"eng_env/Lib/site-packages/newrelic/core/agent_streaming.py","file_name":"agent_streaming.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74625788410","text":"from PIL import Image\nimport cv2 as cv\nimport numpy as np\nimport os\nimport glob\n\n\ndef image_bin_conversion(image_dir):\n img_arr = cv.imread(image_dir, cv.IMREAD_GRAYSCALE)\n _, thresh_img = cv.threshold(img_arr, 127, 255, cv.THRESH_BINARY_INV)\n return thresh_img\n\n\ndef batch_image_to_grayscale(raw_data_dir, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n image_files = glob.glob(os.path.join(raw_data_dir, '*.png'))\n\n for image_file in image_files:\n # Load the image\n bin_img = image_bin_conversion(image_file)\n\n # Extract the filename without extension\n image_filename = os.path.splitext(os.path.basename(image_file))[0]\n\n # Split the image into individual clips\n clip_filename = f\"bw_{image_filename}.png\"\n\n # Save the clip as a PyTorch tensor file\n clip_path = os.path.join(output_dir, clip_filename)\n # torch.save(clip_tensor, clip_path)\n params = [cv.IMWRITE_PNG_COMPRESSION, 1, cv.IMWRITE_PNG_BILEVEL, 1]\n cv.imwrite(clip_path, bin_img, params)\n\n\ndef edge_pixel_extraction(image_dir, size, extraction_width):\n image = cv.imread(image_dir, cv.IMREAD_UNCHANGED)\n image = Image.open(image_dir)\n image = np.asarray(image)\n img_arr = image\n edge_arr = np.zeros([size * 4 - 4, extraction_width])\n for j in range(size - 1):\n edge_arr[j, :] = img_arr[0:extraction_width, j]\n for j in range(size - 1):\n edge_arr[j + size - 1, :] = np.flip(np.transpose(img_arr[j+1, 0:extraction_width]), 0)\n for j in range(size - 1):\n edge_arr[j + size * 2 - 2, :] = img_arr[size - extraction_width:size, j+1]\n for j in range(size - 1):\n edge_arr[j + size * 3 - 3, :] = np.flip(np.transpose(img_arr[j, (size - extraction_width):size]), 0)\n edge_arr_int = edge_arr.astype(np.int8)\n return edge_arr_int\n\n\ndef edge_pin_extraction(edge_arr):\n edge_tot_len = np.size(edge_arr, 0)\n extraction_width = np.size(edge_arr, 1)\n edge_side_len = int(edge_tot_len / 4)\n edge_metal_raw = np.zeros(edge_tot_len) # stores metal existence at the edge and pin orientation\n edge_pin_info = np.zeros(edge_tot_len) # first col pin num\n pin_num = 0\n for j in range(edge_tot_len):\n if sum(edge_arr[j, :]) > extraction_width - 1:\n # if all pixels are filled, out metal pin exists\n edge_metal_raw[j] = 1\n # operate on each side\n for side in range(4):\n for j in range(edge_side_len):\n idx = j + edge_side_len * side\n if edge_metal_raw[idx] == 1:\n edge_pin_info[idx] = pin_num\n if j < edge_side_len:\n if edge_metal_raw[idx + 1] == 0:\n pin_num += 1\n elif edge_metal_raw[idx] == 0:\n edge_pin_info[idx] = -1\n edge_pin_info = edge_pin_info.astype(np.int8)\n # now store pins by pin top location and corresponding width\n side_pins = [[], [], [], []]\n\n for side in range(4):\n pin_width_cnt = 0\n first_metal_pix_flag = 1\n # keeps track of total num of pins\n pin_num_cnt = 0\n for pin_loc in range(edge_side_len):\n # index for the overall arr\n idx = pin_loc + edge_side_len * side\n # pin nums\n curr_pin_num = edge_pin_info[idx]\n\n # end of arr exception\n if idx < edge_tot_len-1:\n next_pin_num = edge_pin_info[idx+1]\n else:\n next_pin_num = -1\n\n # if current location has a pin\n if curr_pin_num >= 0:\n # add pin width\n pin_width_cnt += 1\n # if starting new pin\n if first_metal_pix_flag:\n # append new pin to list\n side_pins[side].append([pin_loc, 0])\n # disable new pin\n first_metal_pix_flag = 0\n # pin num count + 1\n pin_num_cnt += 1\n # not starting new pin\n else:\n # if pin ends\n if next_pin_num < 0:\n # enable new pin\n first_metal_pix_flag = 1\n # set pin width for this pin\n side_pins[side][pin_num_cnt-1][1] = pin_width_cnt\n # reset pin width for next pin\n pin_width_cnt = 0\n return side_pins\n\n\nif __name__ == '__main__':\n sp_edge = edge_pixel_extraction(\"C:\\\\Users\\\\Siyang_Wang_work\\\\USC\\\\SPORT_LAB\\\\DRC_ML\\\\DataFiles\\\\all_data\\\\train_dataset_processed\\\\images1024_bin\\\\bw_patid_MX_Benchmark2_clip_hotspot1_7_orig_0.png\", 1024, 4)\n sp_edge = np.array(sp_edge)\n for i in range(1024):\n print(type(sp_edge[i, 0]))\n print(sp_edge[i, :])\n\n sp_pins = edge_pin_extraction(sp_edge)\n for edge_side in range(4):\n print(sp_pins[edge_side])\n\n \"\"\"\n batch_image_to_grayscale(\"C:\\\\Users\\\\Siyang_Wang_work\\\\USC\\\\SPORT_LAB\\\\DRC_ML\\\\DataFiles\\\\all_data\\\\train_dataset\",\n \"C:\\\\Users\\\\Siyang_Wang_work\\\\USC\\\\SPORT_LAB\\\\DRC_ML\\\\DataFiles\\\\all_data\\\\train_dataset_processed\\\\images1024_bin\")\n \"\"\"\n","repo_name":"wangsiyangusc/LayoutConnect","sub_path":"edge_extraction.py","file_name":"edge_extraction.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17242528067","text":"from utils import *\nfrom solution import *\nimport random\n\nclass Search:\t\t\n\tbestScorePerStar = 0\n\n\tdef __init__(self, links, getRandomSolution=False):\n\t\tself.numChecked = 0\n\t\tself.bestScore = 0\n\t\tself.bestSolutions = []\n\t\tself.boundedPaths = []\n\n\t\tself.complete = False\n\n\t\tself.links = list(set(links))\n\t\tself.getRandomSolution = getRandomSolution\n\n\tdef __str__(self):\n\t\tif self.complete:\n\t\t\tout = \"Search: \" + str(self.bestSolutions[0]) + \" + \" + str(len(self.bestSolutions)) + \"\\n\"\n\t\t\tout += \" \" + str([c.name for c in self.links])\n\t\telse:\n\t\t\tout = \"Search from: \" + str([c.name for c in self.links])\n\t\treturn out\n\n\tdef start(self):\n\t\tself.next(Search.wanted, Search.model.points, Solution([], Search.model), self.links)\n\t\tself.complete = True\n\t\tself.bestSolutions.sort(key=lambda s: s.score, reverse=True)\n\t\treturn self.bestSolutions[0]\n\n\tdef next(self, wanted, points, solution, remainingLinks, moveStr=\"\"):\n\t\tif self.complete:\n\t\t\treturn\n\t\tself.numChecked += 1\n\n\t\tub = solution.score + points*Search.bestScorePerStar\n\t\tif ub < self.bestScore and solution.score < ub:\n\t\t\t# print ub, \"<\", globalMetadata[\"bestScore\"]\n\t\t\t# print points, \"points left before trim\"\n\t\t\tsolution.kill()\n\t\t\treturn\n\n\t\tif self.boundPath(solution):\n\t\t\t# print \"Killing bounded solution:\", solution\n\t\t\tsolution.kill()\n\t\t\treturn\n\n\t\tneededAffinities = Solution.maxAffinities - solution.provides\n\n\t\tremainingLinks = [c for c in remainingLinks if neededAffinities.intersects(c.provides)]\n\t\tpossibleMoves = wanted + remainingLinks\n\n\t\tnextMoves = self.getNextMoves(solution, possibleMoves, points)\n\n\t\tif self.getRandomSolution:\n\t\t\tmethod = random.randint(1,3)\n\t\t\tif method == 1:\n\t\t\t\trandom.shuffle(nextMoves)\n\t\t\telif method == 2:\n\t\t\t\tnextMoves = sortByScore(nextMoves, Search.model)\n\t\t\t\tnextMoves = nextMoves[:int(len(nextMoves)*.75)]\n\t\t\telif method == 3:\n\t\t\t\tnextMoves = sortConstellationsByProvidesValueScore(nextMoves, Search.model, Solution.valueVector)\n\t\t\t\tnextMoves = nextMoves[:int(len(nextMoves)*.75)]\n\t\t\t\n\t\telse:\n\t\t\tnextMoves = sortConstellationsByProvidesValueScore(nextMoves, Search.model, Solution.valueVector)\n\n\n\t\tisSolution = True\n\t\tnewWanted = wanted[:]\n\t\tlinks = remainingLinks[:]\n\n\t\tfor move in nextMoves:\n\t\t\tisSolution = False\n\t\t\tnewMoveStr = moveStr + move.id + \"(\"+ str(int(move.evaluate(Search.model))) +\")\" +\" {\"+str(nextMoves.index(move)+1)+\"/\"+str(len(nextMoves))+\"}, \"\n\n\t\t\ttry:\n\t\t\t\tlinks.remove(move)\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t\ttry:\n\t\t\t\tnewWanted.remove(move)\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t\tnextSolution = Solution(solution.constellations+[move], Search.model)\n\t\t\tif not nextSolution.isDead:\n\t\t\t\tself.next(newWanted, points-len(move.stars), nextSolution, links, newMoveStr)\n\n\t\tsolution.kill()\n\n\t\tif isSolution:\n\t\t\tif self.getRandomSolution:\n\t\t\t\tself.bestScore = solution.score\n\t\t\t\tself.bestSolutions += [solution]\n\t\t\t\tself.complete = True\n\t\t\tif solution.score >= self.bestScore:\n\t\t\t\tself.bestScore = solution.score\n\t\t\t\tself.bestSolutions += [solution]\n\n\tdef boundPath(self, solution, maxLength=5):\n\t\tif len(solution.constellations) > maxLength:\n\t\t\treturn False\n\n\t\tfor bpi in range(len(self.boundedPaths)-1, -1, -1):\n\t\t\tbp = self.boundedPaths[bpi]\n\t\t\tif solution <= bp and not solution == bp:\n\t\t\t\treturn True\n\t\t\telif solution >= bp:\n\t\t\t\tself.boundedPaths[bpi] = solution\n\n\t\tif len(solution.constellations) <= maxLength:\n\t\t\tself.boundedPaths += [solution]\n\t\tself.boundedPaths = list(set(self.boundedPaths))\n\n\t\treturn False\n\n\tdef getNextMoves(self, current, possibles, points):\n\t\tmoves = [c for c in possibles if len(c.stars) <= points and c.canActivate(current.provides, current.constellations) and c not in current.constellations]\n\n\t\ttempMoves = moves[:]\n\t\tfor move in tempMoves:\n\t\t\tfor other in moves:\n\t\t\t\tif other in move.redundancies:\n\t\t\t\t\tmoves.remove(move)\n\t\t\t\t\tbreak\n\n\t\treturn moves\n","repo_name":"issuefree/GrimDawn","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1990892569","text":"import json\nimport os\nimport time\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.common.exceptions import WebDriverException\nfrom country import const_countries\n\n\nclass Human_test(Exception):\n\n def __init__(self, text):\n self.txt = text\n\n\nclass Similar:\n \"\"\" Class for working with API Similarweb. \"\"\"\n\n def __init__(self, headless=True):\n \"\"\" Parameters needed for operation. \"\"\"\n try:\n if headless:\n self.options = webdriver.ChromeOptions()\n self.options.add_argument('headless')\n self.driver = webdriver.Chrome(options=self.options)\n else:\n self.driver = webdriver.Chrome()\n except WebDriverException:\n print('chromedriver.exe needed, please put it in the script folder')\n print(\"If you don't have one download it from the link below\")\n print('https://chromedriver.storage.googleapis.com/84.0.4147.30/chromedriver_win32.zip')\n print('or download the latest version from here https://chromedriver.chromium.org/downloads')\n input('To exit the program, click - Enter!')\n quit()\n\n self.file_input = 'input.txt'\n self.file_output = 'output.txt'\n self.bad_file = 'bad_links.txt'\n self.all_months = 'all_months.txt'\n self.domain = []\n self.monthly_visits = []\n self.count = 0\n self.file_domain_count = 0\n self.path = os.getcwd()\n\n def __create_list_of_domains(self):\n \"\"\" Creating a clean list of domains from a file. \"\"\"\n try:\n with open(file=self.file_input, mode='r', encoding='utf-8') as file:\n for url in file:\n url = url.replace('\\n', '')\n if '//' in url:\n need_url = url.replace('http://', '').replace('https://', '').replace('www.', '').split('/')[0]\n self.domain.append(need_url)\n else:\n need_url = url.replace('www.', '')\n self.domain.append(need_url)\n except FileNotFoundError:\n print('Please create a file input.txt and put data there')\n input('To exit the program, click - Enter!')\n quit()\n\n def __create_files_to_write(self):\n with open(file=self.file_output, mode='w', encoding='utf-8') as file_out:\n file_out.write('Domain' + '\\t' + 'Country' + '\\t' + 'Amount of traffic' + '\\t' + 'Comment' + '\\n')\n with open(file=self.bad_file, mode='w', encoding='utf-8') as bad_file:\n bad_file.write('These links must be checked manually:\\n')\n with open(file=self.all_months, mode='w', encoding='utf-8') as all_months:\n all_months.write('Domain' + '\\t' + 'Date' + '\\t' + 'Amount of traffic' + '\\n')\n\n def __write_to_file(self, file_name, str_to_write):\n with open(file=file_name, mode='a', encoding='utf-8') as file:\n file.write(str_to_write)\n\n def __count_of_links(self):\n \"\"\" Counting the number of domains in a file. \"\"\"\n with open(self.file_input, 'r', encoding='utf-8') as file:\n for _ in file:\n self.file_domain_count += 1\n\n def __rounding(self, number_to_round):\n \"\"\" Rounding traffic by type similarweb. \"\"\"\n number = str(number_to_round)\n if len(number) >= 7:\n h = number[:3] + '.' + number[3:]\n j = round(float(h))\n g = str(j) + '0' * (len(number) - 3)\n return g\n elif 5 <= len(number) <= 6:\n h = number[:2] + '.' + number[2:]\n j = round(float(h))\n g = str(j) + '0' * (len(number) - 2)\n return g\n elif 1 <= len(number) <= 4:\n h = number[:1] + '.' + number[1:]\n j = round(float(h))\n g = str(j) + '0' * (len(number) - 1)\n return g\n\n def __preparing_data(self, soup):\n \"\"\" Preparing data for further use. \"\"\"\n if 'HTTP ERROR 429' in str(soup) or '' in str(soup):\n print('---> Banned. We need to wait!')\n time.sleep(61)\n self.count -= 1\n elif '{}' in str(soup):\n need_write = 'https://www.similarweb.com/website/' + str(self.domain[self.count - 1]) + '\\n'\n self.__write_to_file(file_name=self.bad_file, str_to_write=need_write)\n elif 'invalid payload' in str(soup):\n print('---> This is not a domain! Check - ' + str(self.domain[self.count - 1]))\n elif 'Please verify you are a human' in str(soup):\n raise Human_test('---> Please verify you are a human! - ')\n\n else:\n find_json = soup.find('pre').text\n _json = json.loads(find_json)\n monthly_visits_top5 = _json['EstimatedMonthlyVisits']\n top_country = _json['TopCountryShares']\n site_name = _json['SiteName']\n\n if self.domain[self.count - 1] == site_name:\n for date in monthly_visits_top5:\n self.monthly_visits.append(monthly_visits_top5[date])\n need_write = str(site_name) + '\\t' + str(date) + '\\t' + str(self.__rounding(monthly_visits_top5[date])) + '\\n'\n self.__write_to_file(file_name=self.all_months, str_to_write=need_write)\n\n need_write_ = str(site_name) + '\\t' + 'Total Traffic' + '\\t' + str(self.__rounding(self.monthly_visits[-1]) + '\\t' + 'Total Traffic' + '\\n')\n self.__write_to_file(file_name=self.file_output, str_to_write=need_write_)\n\n number_top_list = 1\n for country in top_country:\n country_name = const_countries[str(country['Country'])]\n top5_traffic = self.__rounding(round(self.monthly_visits[-1] * country['Value']))\n _need_write = str(site_name) + '\\t' + str(country_name) + '\\t' + str(top5_traffic) + '\\t' + 'TOP-' + str(number_top_list) + '\\n'\n self.__write_to_file(file_name=self.file_output, str_to_write=_need_write)\n number_top_list += 1\n else:\n need_write = 'https://www.similarweb.com/website/' + str(self.domain[self.count - 1]) + '\\n'\n self.__write_to_file(file_name=self.bad_file, str_to_write=need_write)\n\n def run(self):\n \"\"\" Main function. \"\"\"\n self.__create_files_to_write()\n self.__create_list_of_domains()\n self.__count_of_links()\n while True:\n if self.count == len(self.domain):\n self.driver.quit()\n break\n self.count += 1\n print('---> Passed domains: ' + str(self.count) + ' from ' + str(self.file_domain_count) +\n ' domain - ' + str(self.domain[self.count - 1]))\n self.driver.get(f'https://data.similarweb.com/api/v1/data?domain={self.domain[self.count - 1]}')\n source = self.driver.page_source\n soup = BeautifulSoup(source, 'html.parser')\n self.__preparing_data(soup=soup)\n\n\nif __name__ == '__main__':\n start_time = time.time()\n similar = Similar(headless=False) # if False - the browser will be visible, if True - will not be\n print('---> Starting data collection')\n similar.run()\n finish_time = time.time()\n print('---> Done!\\n')\n print('---> See the positive result in the file - output.txt')\n print('---> For raw links, see the file - bad_links.txt')\n print('---> Traffic for all months see the file - all_months.txt')\n print(f'---> Time spent on data collection - {round(finish_time - start_time, 2)} second')\n input('---> To exit the program, click - Enter!')\n","repo_name":"Anton-Bolotov/Similarweb_New","sub_path":"similarweb_new.py","file_name":"similarweb_new.py","file_ext":"py","file_size_in_byte":7769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"32493830215","text":"import os\nimport subprocess\n\nspamdir='enron1/spam'\n\nspam_mails=os.listdir(spamdir)\n\nls=len(spam_mails)\n\n\ndict={}\n#for filename in spam_mails[0]:\nnewfile=open('spamdata.txt','w')\n\nnofemails=0\nfor filename in spam_mails:\n\temail=open('enron1/spam/'+filename,'r')\n\tx=email.read().split()\n\tfor words in x:\n\t\tif(words not in dict and words.isalpha() and len(words)>=4):\n\t\t\tdict[words]=len(subprocess.check_output('grep -il '+words+' '+spamdir+'/*.txt',shell=True).splitlines())\n\t\t\tpwordgivenspam=dict[words]/(ls*1.0)\n\t\t\tnewfile.write(words+','+str(pwordgivenspam)+',')\n\temail.close()\n\tnofemails=nofemails+1;\n\tprint(nofemails)\n\n\n#print(dict['grayish'])\n","repo_name":"avinashr175/Spam-classifier","sub_path":"spam_classifier.py","file_name":"spam_classifier.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"9251310164","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\") as readme_file:\n README = readme_file.read()\n\nsetup_args = dict(\n author=\"Tal Leibman\",\n author_email=\"leibman2@gmail.com\",\n url=\"https://github.com/Tal-Leibman/scrapy-selenium-middleware\",\n name=\"scrapy_selenium_middleware\",\n version=\"0.0.5\",\n description=\"\"\"Scrapy middleware for downloading a page html source using selenium,\n and interacting with the web driver in the request context\n eventually returning an HtmlResponse to the spider\n \"\"\",\n long_description=README,\n keywords=[\n \"scrapy\",\n \"selenium\",\n \"middleware\",\n \"proxy\",\n \"web scraping\",\n \"render javascript\",\n \"selenium-wire\",\n \"headless browser\",\n ],\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n)\ninstall_requires = [\n \"scrapy==2.4.0\",\n \"selenium-wire==2.1.1\",\n \"selenium==3.141.0\",\n]\nif __name__ == \"__main__\":\n setup(**setup_args, install_requires=install_requires)\n","repo_name":"Tal-Leibman/scrapy-selenium-middleware","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"24628929128","text":"import requests\nimport time\nfrom bs4 import BeautifulSoup\nfrom fake_useragent import UserAgent\nfrom multiprocessing import Pool, Queue\nimport multiprocessing\n\nclass Douban(object):\n\tdef __init__(self):\n\t\tself.ua = UserAgent()\t\n\t\tself.headers = {\n\t\t\t'User-Agent': self.ua.random\n\t\t}\n\t\tself.MAX_WORKER_NUM = multiprocessing.cpu_count()\n\t\tself.urls = ['https://movie.douban.com/top250?start={}'.format(str(i)) for i in range(0,250,25)]\n\t\tself.queue_list = Queue()\n\n\tdef get_html(self, url):\n\t\ttry:\n\t\t\tr = requests.get(url, headers=self.headers)\n\t\t\tif r.status_code == 200:\n\t\t\t\treturn r.content.decode('utf-8')\n\t\texcept:\n\t\t\treturn None\n\n\tdef parse_html(self, url):\n\t\tprint('hi am in')\n\t\thtml = self.get_html(url)\n\t\tsoup = BeautifulSoup(html, 'lxml')\n\t\titems = soup.select('ol.grid_view li')\n\t\tfor item in items:\n\t\t\ttitle = item.select('span.title')[0].get_text()\n\t\t\tscore = item.select('span.rating_num')[0].get_text()\n\t\t\tprint(title, score)\n\n\t# 类中学多进程。。 实现不了。。。\n\tdef multiprocess_get(self):\n\t\tprint(self.MAX_WORKER_NUM)\n\t\tpool = Pool(self.MAX_WORKER_NUM)\n\t\tfor url in self.urls:\n\t\t\tself.queue_list.put(url)\n\t\twhile self.queue_list.qsize() > 0:\n\t\t\tpool.apply_async(self.parse_html, args=(self.queue_list.get(), ))\n\t\tpool.close()\n\t\tpool.join()\n\n\tdef main(self):\n\t\tt1 = time.time()\n\t\tself.multiprocess_get()\n\t\tprint(time.time() - t1)\n\n\nif __name__ == '__main__':\n\td = Douban()\n\td.main()\n","repo_name":"BattlesSymphony/spider_with_celery","sub_path":"douban_top_250/top_250_with_pool.py","file_name":"top_250_with_pool.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5069105366","text":"import os\n\nfrom flask import Flask, redirect, render_template, request, session, url_for\nfrom flask_session import Session\nfrom flask_socketio import SocketIO, emit\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if \"username\" in session:\n username = session[\"username\"]\n return redirect(url_for(\"chatroom\"))\n else: \n if request.method == \"POST\":\n username = request.form[\"username\"]\n session[\"username\"] = username\n return redirect(url_for(\"chatroom\"))\n else: \n return render_template(\"index.html\")\n\n@app.route(\"/chatroom\")\ndef chatroom():\n if \"username\" in session:\n username = session[\"username\"]\n return render_template(\"chatroom.html\", username=username)\n else:\n return redirect(url_for(\"index\"))\n\n@socketio.on(\"plus clicked\")\ndef plusclicked(data):\n newchannel = data[\"newchannel\"]\n emit(\"addnewchannel\", {\"newchannel\": newchannel}, broadcast=True)\n\n@socketio.on(\"message sent\")\ndef messagesent(data):\n newmessage = data[\"newmessage\"]\n username = data[\"username\"]\n channel = data[\"channel\"]\n emit(\"addnewmessage\", {\"newmessage\": newmessage, \"username\": username, \"channel\": channel}, broadcast=True)\n\n@app.route(\"/logout\")\ndef logout():\n session.pop(\"username\", None)\n return redirect(url_for(\"index\"))\n","repo_name":"jashmerchant/Flack","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73457295291","text":"\n\nimport serial\nimport socket\nimport time\nimport threading\nimport subprocess\nimport math\nfrom halo import Halo\n\n#low latency (15 fps /!\\)\n#/usr/local/vigiclient/processdiffvideo -r 15 -i udp://0.0.0.0:11111 -c:v h264_omx -profile:v baseline -b:v 500k -flags:v +global_header -bsf:v dump_extra -f rawvideo udp://127.0.0.1:9999\n#high quality\n#/usr/local/vigiclient/processdiffvideo -i udp://0.0.0.0:11111 -c:v h264_omx -profile:v baseline -b:v 3M -flags:v +global_header -bsf:v dump_extra -f rawvideo udp://127.0.0.1:9999\n\n#loool !/usr/bin/env pypy\n\n# const values\nTELLO_IP = \"192.168.10.1\" #\"127.0.0.1\"\nTELLO_PORT = 8889\nPI_PORT = 8890\nPI_LISTENIP = \"0.0.0.0\"\nTRAMESIZE = 17\nRESPONSETRAMESIZE = 30\nCONNECTCOMMAND = \"command\"\nSTREAMONCOMMAND = \"streamon\"\nSTREAMOFFCOMMAND = \"streamoff\"\nTAKEOFFCOMMAND = \"takeoff\"\nLANDCOMMAND = \"land\"\nEMERGENCYCOMMAND = \"emergency\"\nFLIPFCOMMAND = \"flip f\"\nRCCOMMAND = \"rc %s %s %s %s\" # leftright, forward/backward, up/down, yaw,\nTELLOSTATESTRUCTURE = [\"pitch\", \"roll\", \"yaw\", \"vgx\", \"vgy\", \"vgz\", \"templ\", \"temph\", \"tof\", \"h\", \"bat\", \"baro\", \"time\", \"agx\", \"agy\", \"agz\", \"\\r\\n\"]\n\n\n\npayloadSize = TRAMESIZE - 4 \nvigiSerial = serial.Serial(\"/dev/pts/0\", 115200)\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsyncTime = time.time()\nsyncRC = time.time()\nsyncTelloTime = time.time()\nfreqRC = 0.0\nfreqTello = 0\n\nlastTrame = bytearray(payloadSize)\ntelloStateTrame = [0]*len(TELLOSTATESTRUCTURE) #todo : create class\nupdatedRC = False # simple hack to match Tello frequency from Vigibot frequency\n\n#hqTranscoder = subprocess.run([\"/usr/local/vigiclient/processdiffvideo\", \"-r\", \"15\", \"-i\", \"udp://0.0.0.0:1111\", \"-c:v h264_omx\", \"-profile:v\", \"baseline\", \"-b:v\", \"1M\", \"-flags:v\", \"+global_header\", \"-bsf:v\", \"dump_extra\", \"-f\", \"rawvideo\", \"udp://127.0.0.1:9999\"])\n\n\nspinner = Halo(text='Starting', spinner='dots')\nspinner.start()\nwarningLogs = \"\"\n\n\n\n\nclass parsedTrame():\n\n def __init__(self):\n self.yaw = 0\n self.pitch = 0\n self.choixCam = 0\n self.vX = 0\n self.vY = 0\n self.vT= 0\n self.takeOff= 0 \n self.land = 0\n self.videoStream = 0\n self.emergency = 0\n self.flipF = 0\n\n def update(self, trameData: bytearray()):\n self.freq = \"RX : %.0f Hz\" % (1/elapsed)\n self.yaw = round(int.from_bytes(trameData[0:2], byteorder='little', signed=True) / 245.75)\n self.pitch = round(int.from_bytes(trameData[2:4], byteorder='little', signed=True) / 100.12)\n\n self.choixCam = trameData[8]\n self.vX = round(bytetoInt8(trameData[9]) / 1.27)\n self.vY = round(bytetoInt8(trameData[10]) / 1.27)\n self.vT = - round(bytetoInt8(trameData[11]) / 1.27)\n\n self.takeOff = bool(trameData[12] & 1)\n self.land = bool(trameData[12] & 2)\n # bool(trameData[12] & 4)\n # bool(trameData[12] & 8)\n self.videoStream = bool(trameData[12] & 16)\n self.emergency = bool(trameData[12] & 32)\n self.flipF = bool(trameData[12] & 64)\n\n\nparsedT = parsedTrame()\n\n\ndef bytetoInt8(byte):\n if byte > 127:\n return (256-byte) * (-1)\n else:\n return byte\n\n\ndef sendCommandtoTello(command : str):\n command = command\n return sock.sendto(command.encode(\"utf-8\"), (TELLO_IP, TELLO_PORT))\n\n\ndef telloReceiveState():\n global syncTelloTime, freqTello\n sockR = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sockR.bind((PI_LISTENIP, PI_PORT))\n while True:\n data, addr = sockR.recvfrom(1024) # buffer size is 1024 bytes, quite enough for the short tello state trames\n result = data.decode().split(\";\")\n \n if len(result) == len(TELLOSTATESTRUCTURE):\n for ind, r in enumerate(result):\n if r.find(TELLOSTATESTRUCTURE[ind]) > -1 and r.find(\":\") > -1:\n telloStateTrame[ind] = float(r.split(\":\")[1])\n\n newT = time.time()\n freqTello = 1/(newT - syncTelloTime)\n syncTelloTime = newT\n #print(data)\n\ndef getTelloStateValue(state :str):\n val = telloStateTrame[TELLOSTATESTRUCTURE.index(state)]\n return val\n\ndef clamp(x, minimum, maximum):\n return max(minimum, min(x, maximum))\n\ndef scaleToInt8(value, minv, maxv, signed = False):\n clippedVal = clamp(value, minv, maxv)\n result = int(255*clippedVal/(maxv-minv))\n if not signed or result >= 0:\n return result\n else:\n return result + 256\n\n\n\n\ndef generateResponseTrame(): # -> bytearray(30):\n responseTrame = bytearray(b'\\x00') * RESPONSETRAMESIZE\n # copy original trame \n responseTrame[4:4 + len(lastTrame)] = lastTrame\n responseTrame[0] = int(\"0x24\", 0)\n responseTrame[1] = int(\"0x52\", 0)\n responseTrame[2] = int(\"0x20\", 0)\n responseTrame[2] = int(\"0x20\", 0)\n\n # 16 bits values \n #responseTrame[18] # Voltage (will be updated by Raspberry pi)\n #responseTrame[19] # Percent (will be updated by Raspberry pi)\n\n # 8 bits values\n responseTrame[16] = lastTrame[8] # recopy choix camera\n responseTrame[17] = lastTrame[9] # recopy v\n responseTrame[18] = lastTrame[10]\n responseTrame[19] = lastTrame[11]\n responseTrame[20] = lastTrame[12] # recopy switches\n\n # bytes 21 to 24 = overidded by Raspi\n responseTrame[21] # cpu load\n responseTrame[22] # soc temp\n responseTrame[23] # link\n responseTrame[24] # rssi\n responseTrame[25] = scaleToInt8(getTelloStateValue(\"bat\"), 0, 100) #bat\n responseTrame[26] = scaleToInt8(getTelloStateValue(\"temph\"), 0, 100) # VPU temp\n responseTrame[27] = scaleToInt8(getTelloStateValue(\"tof\"), 0, 255) # indoor altitude (tof sensor)\n responseTrame[28] = scaleToInt8( -getTelloStateValue(\"vgz\"), -100, 100, signed=True) # tello V speed\n hSpeed = math.sqrt( getTelloStateValue(\"vgx\")**2 + getTelloStateValue(\"vgy\")**2 ) \n responseTrame[29] =scaleToInt8(hSpeed, 0, 100) # tello H speed = sqrt(vgx²+vgy²)\n\n # print(responseTrame.hex())\n vigiSerial.write(responseTrame)\n threading.Timer(1/19.9, generateResponseTrame).start() # return responseTrame # \n\ngenerateResponseTrame()\n\n\ndef telloUpdateRC():\n global updatedRC, syncRC, freqRC, warningLogs\n # One time command\n if parsedT.takeOff:\n warningLogs += \" Take off command\"\n sendCommandtoTello(TAKEOFFCOMMAND)\n if parsedT.land:\n warningLogs += \" Land command\"\n sendCommandtoTello(LANDCOMMAND)\n if parsedT.flipF:\n warningLogs += \" Flip command\"\n sendCommandtoTello(FLIPFCOMMAND)\n if parsedT.emergency:\n warningLogs += \" Emergency command\"\n sendCommandtoTello(EMERGENCYCOMMAND)\n\n # RC commands : leftright, forward/backward, up/down, yaw,\n #rc = RCCOMMAND % (parsedT.vY, parsedT.pitch, parsedT.vX, parsedT.yaw) # config 1\n rc = RCCOMMAND % (parsedT.yaw, parsedT.vY, parsedT.pitch, parsedT.vT) # config 2\n # print(rc)\n if not updatedRC:\n sendCommandtoTello(rc)\n newT = time.time()\n freqRC = 1/(newT - syncRC)\n syncRC = newT\n updatedRC = not updatedRC # skip next update\n\ndef tellloHandleStreamOnOff():\n sendCommandtoTello(CONNECTCOMMAND)\n \n if parsedT.videoStream:\n sendCommandtoTello(STREAMONCOMMAND)\n else:\n sendCommandtoTello(STREAMOFFCOMMAND)\n\n threading.Timer(1, tellloHandleStreamOnOff).start()\n\n\ntellloHandleStreamOnOff()\ntelloReceiveThread = threading.Thread(target=telloReceiveState)\ntelloReceiveThread.start()\n\n\n# main thread is sync on vigibot serial update\n\nwhile 1 :\n lastByte = vigiSerial.read()\n \n #sync check\n if (lastByte == b'$') :\n currentByte = vigiSerial.read()\n if(currentByte == b'S'):\n #in sync, flush 2 (sync padding) and read trame\n vigiSerial.read(2)\n lastTrame = vigiSerial.read(payloadSize)\n elapsed = (time.time() - syncTime)\n syncTime = time.time()\n\n trameHex = lastTrame.hex()\n freq = (1/elapsed)\n\n logs = \"Main frequency : %4.1f Hz | Tello telemetry : %4.1f Hz | Tello commands : %4.1f Hz\" % (freq, freqTello, freqRC) + \" | \" + warningLogs\n warningLogs = \"\"\n spinner.text = logs\n \n parsedT.update(lastTrame)\n\n telloUpdateRC()\n\n\n \n \n\n","repo_name":"Blafy/Telloclient","sub_path":"telloclient.py","file_name":"telloclient.py","file_ext":"py","file_size_in_byte":8291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"73128741693","text":"import os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nfrom torch.utils.data import DataLoader\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport segmentation_models_pytorch as smp\nfrom torchvision.transforms import *\nfrom utils.dataloader.car_dataset import *\nfrom unet import ResNetUNet\n\nvalidation_path = \"E:/dtu/2sem/deeplearn/project/data_folder_2/validation/\"\ntrain_path = \"E:/dtu/2sem/deeplearn/project/data_folder_2/train/\"\ntest_path = \"E:/dtu/2sem/deeplearn/project/data_folder_2/test/\"\n\ntransform = transforms.Compose([\n RandomHorizontalFlip(p=0.5),\n RandomPerspective(distortion_scale=0.5, p=0.5),\n transforms.RandomApply(transforms=[\n RandomResizedCrop(size=(256, 256)),\n ], p=0.5),\n transforms.RandomApply(transforms=[\n GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 5)),\n ], p=0.5),\n ])\n\ntrain_dataset = CarDataset(train_path, num_gan=0, num_deloitte_aug=0, num_opel=300, num_door=300, num_primary_multiple=8, augmentation=transform)\nvalidation_dataset = CarDataset(validation_path, num_gan=0, num_deloitte_aug=0, num_opel=0, num_door=0, num_primary_multiple=1)\n\ntrain_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, num_workers=0, drop_last=True)\nvalid_loader = DataLoader(validation_dataset, batch_size=1, shuffle=False, num_workers=0)\n\nmodel = ResNetUNet(n_class=9).cuda()\ncriterion = smp.utils.losses.DiceLoss()\noptimizer = torch.optim.Adam([ \n dict(params=model.parameters(), lr=0.0001),\n ])\n# optimizer = torch.optim.SGD(model.parameters(), lr = 0.1, momentum=0.9, nesterov=True)\nmin_score = 1.0\n\nDEVICE = 'cuda'\n\ntrain_logs = []\nvalid_logs = []\n\nEPOCHS = 5\nfor i in range(0, EPOCHS):\n print('\\nEpoch: {}'.format(i))\n train_log = []\n model.train()\n for image, mask in train_loader:\n image = image.to(DEVICE)\n mask = mask.to(DEVICE)\n \n optimizer.zero_grad()\n\n pred = model(image)\n\n loss = criterion(pred, mask)\n loss.backward()\n \n optimizer.step()\n\n train_log.append(loss.item())\n\n train_mean = np.mean(train_log)\n print(\"Mean Training loss: \",train_mean)\n train_logs.append(train_mean)\n\n valid_log = []\n model.eval()\n for image, mask in valid_loader:\n image = image.to(DEVICE)\n mask = mask.to(DEVICE) \n\n pred = model(image)\n\n loss = criterion(pred,mask)\n\n valid_log.append(loss.item())\n\n valid_mean = np.mean(valid_log)\n print(\"Mean Validation loss: \",valid_mean)\n valid_logs.append(valid_mean)\n\n if (min_score > valid_mean):\n min_score = valid_mean\n torch.save(model.state_dict(), 'best_model_dict.pth')\n print(\"Model saved!\")\n if i == EPOCHS/2:\n optimizer.param_groups[0]['lr'] = 1e-5\n print('---- Decrease Learning Rate to 1e-5! ----')\n","repo_name":"AndersBensen/02456-deep-learning-segmentation","sub_path":"unet/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31272610435","text":"import random\n\ncharacters = [\"My\", \" My friend's\", \"My neighbor's\", \"One of my family member's\", \"Some one known had\",\n \"One of my known\"]\n\nwhat = [\"nursing experience\", \"meeting experience\", \"introduction \"]\n\nwith_whom = [\"with COVID 19.\", \"with cancer patient.\", \"with a patient.\", \" with freedom fighters.\",\n \" with an orphan.\",\n \"with a disabled person.\"]\n\nhow_was_this = [\"It was painful.\", \"It was is inspiring.\", \"It was surprising.\", \"It was life saver.\",\n \"It was deep.\"]\n\nthird_person = [\"You\", \"I\", \"Anyone\", \"Any person\", \"Anyone in this world\"]\ncant_do = [\"can not figure it out\", \"can not imagine\", \"thought of it\"]\n\nconclusion = ['.Narrative is']\nwhat_to_do = [\"Keep smiling\", \"Keep trying\", \"keep fighting\", \"be positive\", \"be strong\", \"keep faith\"]\n\n\ndef inspiring_story(characters, what, with_whome, how_was_this, third_person, cant_do, what_to_do, conclusion):\n story = random.choice(characters) + \" \" + random.choice(what) + \" \" + random.choice(with_whome) \\\n + \" \" + random.choice(how_was_this) + \" \" + random.choice(third_person) \\\n + \" \" + random.choice(cant_do) + \" \" + random.choice(conclusion) + \" \" + random.choice(what_to_do)\n print(story)\n\n\ninspiring_story(characters, what, with_whome, how_was_this, third_person, cant_do, what_to_do, conclusion)\n","repo_name":"farhat-jahan/storyGenerator","sub_path":"sharestory.py","file_name":"sharestory.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1429115590","text":"from flask import Flask\n\nfrom routes import short\nfrom extensions import db\n\n\ndef create_app(config_file='config.py'):\n app = Flask(__name__)\n app.config.from_pyfile(config_file)\n\n db.init_app(app)\n app.register_blueprint(short)\n\n with app.app_context():\n\n db.create_all()\n return app\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run()\n\n\n","repo_name":"ggcarmi/url-shortener","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3140243484","text":"# BOJ 7662\nimport heapq\nimport sys\n\n\"\"\"\n데이터를 삭제할 때 명령에 따라 우선순위가 가장 높은 데이터 또는 가장 낮은 데이터 중 하나를 삭제\n데이터를 삭제하는 연산은 두가지로 구분\n1. 우선순위가 가장 높은 것\n2. 우선순위가 가장 낮은 것\n동일한 정수가 삽입될 수 있으므로 집합 사용 불가\n[-5643, 16, 123]\n\"\"\"\n\nsi = sys.stdin.readline\nMAX = 1000000 + 1 # 명령어의 갯수만큼 넣기\n\nt = int(si())\nfor _ in range(t):\n res = []\n max_q = []\n min_q = []\n visited = [False] * MAX\n k = int(si())\n for i in range(k):\n op, num = si().split()\n if op == \"I\":\n heapq.heappush(max_q, (-int(num), i))\n heapq.heappush(min_q, (int(num), i))\n visited[i] = True\n elif int(num) == 1:\n while max_q and not visited[max_q[0][1]]:\n heapq.heappop(max_q)\n if max_q:\n visited[max_q[0][1]] = False\n heapq.heappop(max_q)\n else:\n while min_q and not visited[min_q[0][1]]:\n heapq.heappop(min_q)\n if min_q:\n visited[min_q[0][1]] = False\n heapq.heappop(min_q)\n while min_q and not visited[min_q[0][1]]:\n heapq.heappop(min_q)\n while max_q and not visited[max_q[0][1]]:\n heapq.heappop(max_q)\n res.append(f'{-max_q[0][0]} {min_q[0][0]}' if max_q and min_q else 'EMPTY')\n print(\"\\n\".join(res))\n","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"BOJ/greedy_implementation_boj/dual_priority_queue.py","file_name":"dual_priority_queue.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12892437610","text":"from flask import Flask, jsonify, request, abort\nfrom flask_cors import CORS, cross_origin\nfrom turbineDAO import turbineDAO\n\napp = Flask(__name__, static_url_path='', static_folder='.')\nCORS(app)\n\n\n#curl \"http://127.0.0.1:5000/turbines\"\n@app.route('/turbines')\ndef getAll():\n results = turbineDAO.getAll()\n return jsonify(results)\n\n\n#curl \"http://127.0.0.1:5000/turbines/2\"\n@app.route('/turbines/')\ndef findByID(ID):\n foundTurbine = turbineDAO.findByID(ID)\n return jsonify(foundTurbine)\n\n\n#curl -i -H \"Content-Type:application/json\" -X POST -d \"{\\\"Model\\\":\\\"SG 2.6-114\\\",\\\"Manufacturer\\\":\\\"Siemens\\\",\\\"Rating\\\":2.6}\" http://127.0.0.1:5000/turbines\n@app.route('/turbines', methods=['POST'])\ndef create(): \n if not request.json:\n abort(400)\n turbine = {\n \"Model\": request.json['Model'],\n \"Manufacturer\": request.json['Manufacturer'],\n \"Rating\": request.json['Rating'],\n }\n values =(turbine['Model'],turbine['Manufacturer'],turbine['Rating'])\n newID = turbineDAO.create(values)\n turbine['ID'] = newID\n return jsonify(turbine)\n\n\n#curl -i -H \"Content-Type:application/json\" -X PUT -d \"{\\\"Model\\\":\\\"E-70\\\",\\\"Manufacturer\\\":\\\"Enercon\\\",\\\"Rating\\\":1.9}\" http://127.0.0.1:5000/turbines/1\n@app.route('/turbines/', methods=['PUT'])\ndef update(ID):\n foundTurbine = turbineDAO.findByID(ID)\n if not foundTurbine:\n abort(404)\n if not request.json:\n abort(400)\n reqJson = request.json\n if 'Rating' in reqJson and type(reqJson['Rating']) is not int:\n abort(400)\n if 'Model' in reqJson:\n foundTurbine['Model'] = reqJson['Model']\n if 'Manufacturer' in reqJson:\n foundTurbine['Manufacturer'] = reqJson['Manufacturer']\n if 'Rating' in reqJson:\n foundTurbine['Rating'] = reqJson['Rating']\n values = (foundTurbine['Model'],foundTurbine['Manufacturer'],foundTurbine['Rating'],foundTurbine['ID'])\n turbineDAO.update(values)\n return jsonify(foundTurbine)\n \n\n#curl -i -H \"Content-Type:application/json\" -X DELETE http://127.0.0.1:5000/turbines/1\n@app.route('/turbines/' , methods=['DELETE'])\ndef delete(ID):\n turbineDAO.delete(ID)\n return jsonify({\"Done\":True})\n\n\nif __name__ == '__main__' :\n app.run(debug= True)","repo_name":"grace-burke/Data-Representation-Project","sub_path":"server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28060995222","text":"# -*- coding: utf-8 -*-\n# some code from http://www.djangosnippets.org/snippets/310/ by simon\n# and from examples/djopenid from python-openid-2.2.4\nimport urlparse\nimport logging\nfrom urllib import urlencode, quote\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect, QueryDict\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\n\nfrom django.utils.encoding import smart_str\ntry:\n from django.views.decorators.csrf import csrf_exempt\nexcept ImportError:\n from django.contrib.csrf.middleware import csrf_exempt\n\nfrom django.contrib.auth import REDIRECT_FIELD_NAME\n\nfrom openid.association import default_negotiator, encrypted_negotiator\nfrom openid.consumer.discover import OPENID_IDP_2_0_TYPE, OPENID_2_0_TYPE\nfrom openid.extensions import sreg, ax\nfrom openid.server.server import Server, BROWSER_REQUEST_MODES\nfrom openid.yadis.constants import YADIS_CONTENT_TYPE\n\nfrom openid_provider import conf\nfrom openid_provider.utils import add_sreg_data, add_ax_data, get_store, \\\n trust_root_validation, get_trust_session_key, prep_response\nfrom openid_provider.models import TrustedRoot\n\nlogger = logging.getLogger(__name__)\n\n\n# Special URL which means 'let the user choose whichever identity'.\nIDENTIFIER_SELECT_URL = 'http://specs.openid.net/auth/2.0/identifier_select'\n\n\n@csrf_exempt\ndef openid_server(request):\n \"\"\"\n This view is the actual OpenID server - running at the URL pointed to by \n the tag. \n \"\"\"\n logger.debug('server request %s: %s',\n request.method, request.POST or request.GET)\n server = openid_get_server(request)\n\n if not request.is_secure():\n # if request is not secure allow only encrypted association sessions\n server.negotiator = encrypted_negotiator\n\n # Clear AuthorizationInfo session var, if it is set\n if request.session.get('AuthorizationInfo', None):\n del request.session['AuthorizationInfo']\n\n querydict = dict(request.REQUEST.items())\n orequest = server.decodeRequest(querydict)\n if not orequest:\n orequest = server.decodeRequest(request.session.get('OPENID_REQUEST', None))\n if orequest:\n # remove session stored data:\n del request.session['OPENID_REQUEST']\n else:\n # not request, render info page:\n data = {\n 'host': request.build_absolute_uri('/'),\n 'xrds_location': request.build_absolute_uri(\n reverse('openid-provider-xrds')),\n }\n logger.debug('invalid request, sending info: %s', data)\n return render_to_response('openid_provider/server.html',\n data,\n context_instance=RequestContext(request))\n\n if orequest.mode in BROWSER_REQUEST_MODES:\n if not request.user.is_authenticated():\n logger.debug('no local authentication, sending landing page')\n return landing_page(request, orequest)\n\n openid = openid_is_authorized(request, orequest.identity,\n orequest.trust_root)\n\n # verify return_to:\n trust_root_valid = trust_root_validation(orequest)\n validated = False\n\n if conf.FAILED_DISCOVERY_AS_VALID:\n if trust_root_valid == 'DISCOVERY_FAILED':\n validated = True\n else:\n # if in decide already took place, set as valid:\n if request.session.get(get_trust_session_key(orequest), False):\n validated = True\n\n if openid is not None and (validated or trust_root_valid == 'Valid'):\n if orequest.identity == IDENTIFIER_SELECT_URL:\n id_url = request.build_absolute_uri(\n reverse('openid-provider-identity', args=[openid.openid]))\n else:\n # We must return exactly the identity URL that was requested,\n # otherwise the openid.server module raises an error.\n id_url = orequest.identity\n\n oresponse = orequest.answer(True, identity=id_url)\n logger.debug('orequest.answer(True, identity=\"%s\")', id_url)\n elif orequest.immediate:\n logger.debug('checkid_immediate mode not supported')\n raise Exception('checkid_immediate mode not supported')\n else:\n request.session['OPENID_REQUEST'] = orequest.message.toPostArgs()\n request.session['OPENID_TRUSTROOT_VALID'] = trust_root_valid\n logger.debug(\n 'Set OPENID_REQUEST to %s in session %s',\n request.session['OPENID_REQUEST'], request.session)\n logger.debug(\n 'Set OPENID_TRUSTROOT_VALID to %s in session %s',\n request.session['OPENID_TRUSTROOT_VALID'], request.session)\n logger.debug('redirecting to decide page')\n return HttpResponseRedirect(reverse('openid-provider-decide'))\n else:\n oresponse = server.handleRequest(orequest)\n if request.user.is_authenticated():\n add_sreg_data(request, orequest, oresponse)\n if conf.AX_EXTENSION:\n add_ax_data(request, orequest, oresponse)\n\n return prep_response(request, orequest, oresponse, server)\n\ndef openid_xrds(request, identity=False, id=None):\n if identity:\n types = [OPENID_2_0_TYPE]\n else:\n types = [OPENID_IDP_2_0_TYPE, sreg.ns_uri]\n if conf.AX_EXTENSION:\n types.append(ax.AXMessage.ns_uri)\n endpoints = [request.build_absolute_uri(reverse('openid-provider-root'))]\n return render_to_response('openid_provider/xrds.xml', {\n 'host': request.build_absolute_uri('/'),\n 'types': types,\n 'endpoints': endpoints,\n }, context_instance=RequestContext(request), content_type=YADIS_CONTENT_TYPE)\n\n\ndef url_for_openid(request, openid):\n return request.build_absolute_uri(\n reverse('openid-provider-identity', args=[openid.openid]))\n\n\ndef openid_not_found_error_message(request, identity_url):\n ids = request.user.openid_set\n if ids.count() == 0:\n message = \"You have no OpenIDs configured. Contact the administrator.\"\n else:\n id_urls = [url_for_openid(request, id) for id in ids.iterator()]\n id_urls = ', '.join(id_urls)\n if ids.count() != 1:\n message = \"You somehow have multiple OpenIDs: \" + id_urls\n else:\n message = \"Your OpenID URL is: \" + id_urls\n return \"You do not have the OpenID '%s'. %s\" % (identity_url, message)\n\n\ndef openid_decide(request):\n \"\"\"\n The page that asks the user if they really want to sign in to the site, and\n lets them add the consumer to their trusted whitelist.\n # If user is logged in, ask if they want to trust this trust_root\n # If they are NOT logged in, show the landing page\n \"\"\"\n server = openid_get_server(request)\n orequest = server.decodeRequest(request.session.get('OPENID_REQUEST'))\n trust_root_valid = request.session.get('OPENID_TRUSTROOT_VALID')\n\n logger.debug('Got OPENID_REQUEST %s, OPENID_TRUSTROOT_VALID %s from '\n 'session %s', orequest, trust_root_valid, request.session)\n\n if not request.user.is_authenticated():\n return landing_page(request, orequest)\n\n if orequest is None:\n # This isn't normal, but can occur if the user uses the 'back' button\n # or if the session data is otherwise lost for some reason.\n return error_page(\n request, \"I've lost track of your session now. Sorry! Please go \"\n \"back to the site you are logging in to with a Baserock \"\n \"OpenID and, if you're not yet logged in, try again.\")\n\n openid = openid_get_identity(request, orequest.identity)\n if openid is None:\n # User should only ever have one OpenID, created for them when they\n # registered.\n message = openid_not_found_error_message(request, orequest.identity)\n return error_page(request, message)\n\n if request.method == 'POST' and request.POST.get('decide_page', False):\n if request.POST.get('allow', False):\n TrustedRoot.objects.get_or_create(\n openid=openid, trust_root=orequest.trust_root)\n if not conf.FAILED_DISCOVERY_AS_VALID:\n request.session[get_trust_session_key(orequest)] = True\n return HttpResponseRedirect(reverse('openid-provider-root'))\n\n oresponse = orequest.answer(False)\n logger.debug('orequest.answer(False)')\n return prep_response(request, orequest, oresponse)\n\n return render_to_response('openid_provider/decide.html', {\n 'title': _('Trust this site?'),\n 'trust_root': orequest.trust_root,\n 'trust_root_valid': trust_root_valid,\n 'return_to': orequest.return_to,\n 'identity': orequest.identity,\n }, context_instance=RequestContext(request))\n\ndef error_page(request, msg):\n return render_to_response('openid_provider/error.html', {\n 'title': _('Error'),\n 'msg': msg,\n }, context_instance=RequestContext(request))\n\nclass SafeQueryDict(QueryDict):\n \"\"\"\n A custom QueryDict class that implements a urlencode method\n knowing how to excempt some characters as safe.\n\n Backported from Django 1.3\n \"\"\"\n def urlencode(self, safe=None):\n output = []\n if safe:\n encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))\n else:\n encode = lambda k, v: urlencode({k: v})\n for k, list_ in self.lists():\n k = smart_str(k, self.encoding)\n output.extend([encode(k, smart_str(v, self.encoding))\n for v in list_])\n return '&'.join(output)\n\ndef landing_page(request, orequest, login_url=None,\n redirect_field_name=REDIRECT_FIELD_NAME):\n \"\"\"\n The page shown when the user attempts to sign in somewhere using OpenID \n but is not authenticated with the site. For idproxy.net, a message telling\n them to log in manually is displayed.\n \"\"\"\n request.session['OPENID_REQUEST'] = orequest.message.toPostArgs()\n logger.debug(\n 'Set OPENID_REQUEST to %s in session %s',\n request.session['OPENID_REQUEST'], request.session)\n if not login_url:\n login_url = settings.LOGIN_URL\n path = request.get_full_path()\n login_url_parts = list(urlparse.urlparse(login_url))\n if redirect_field_name:\n querystring = SafeQueryDict(login_url_parts[4], mutable=True)\n querystring[redirect_field_name] = path\n login_url_parts[4] = querystring.urlencode(safe='/')\n return HttpResponseRedirect(urlparse.urlunparse(login_url_parts))\n\ndef openid_is_authorized(request, identity_url, trust_root):\n \"\"\"\n Check that they own the given identity URL, and that the trust_root is \n in their whitelist of trusted sites.\n \"\"\"\n if not request.user.is_authenticated():\n return None\n\n openid = openid_get_identity(request, identity_url)\n if openid is None:\n return None\n\n if openid.trustedroot_set.filter(trust_root=trust_root).count() < 1:\n return None\n\n return openid\n\n\ndef url_is_equivalent(a, b):\n \"\"\"\n Test if two URLs are equivalent OpenIDs.\n \"\"\"\n return a.rstrip('/') == b.rstrip('/')\n\n\ndef openid_get_identity(request, identity_url):\n \"\"\"\n Select openid based on claim (identity_url).\n If none was claimed identity_url will be\n 'http://specs.openid.net/auth/2.0/identifier_select'\n - in that case return default one\n - if user has no default one, return any\n - in other case return None!\n \"\"\"\n logger.debug('Looking for %s in user %s set of OpenIDs %s',\n identity_url, request.user, request.user.openid_set)\n for openid in request.user.openid_set.iterator():\n if url_is_equivalent(identity_url, url_for_openid(request, openid)):\n return openid\n if identity_url == IDENTIFIER_SELECT_URL:\n # no claim was made, choose user default openid:\n openids = request.user.openid_set.filter(default=True)\n if openids.count() == 1:\n return openids[0]\n if request.user.openid_set.count() > 0:\n return request.user.openid_set.all()[0]\n return None\n\n\ndef openid_get_server(request):\n return Server(\n get_store(request),\n op_endpoint=request.build_absolute_uri(\n reverse('openid-provider-root')))\n","repo_name":"devcurmudgeon/cida","sub_path":"infrastructure/baserock_openid_provider/openid_provider/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70513037693","text":"import numpy as np\nimport itertools\n\nfrom hilbert import *\nfrom polvect6C import *\n\nclass structtype():\n pass\n\ndef music6C(data,test_param,wtype,v_scal,samp,W,l,stab,l_auto_perc):\n ''' \n Six-component MUSIC algorithm after Sollberger et al. (2018)\n \n data: Nx6 Matrix containing 6-C data (N samples) ordered as \n [v_x,v_y,v_z,omega_x,omega_y,omega_z]. Acceleration and\n rotation rate can be used instead of particle velocity and\n rotation angle\n\n test_param: STRUCTURE ARRAY CONTAINING PARAMETER SPACE TO BE SEARCHED\n ONLY THE PARAMETERS OF THE SPECIFIED WAVE TYPE ARE REQUIRED\n test_param.vp : vector containing P-wave velocities (ms)\n test_param.vs : vector containnig S-wave velocities\n test_param.vR : vector containing Rayleigh wave velocities\n test_param.vL : vector containing Love wave velocities\n test_param.theta: vector containing incidence angles (degree)\n test_param.phi : vector containing azimuth angles (degree)\n test_param.xi : vector containing ellipticity angles (radians)\n\n wtype: WAVE TYPE\n 'P' : P-wave\n 'SV': SV-wave\n 'SH': SH-wave\n 'L' : Love-wave\n 'R' : Rayleigh-wave\n\n v_scal: SCALING VELOCITY (m/s)\n\n samp: SAMPLE AT WHICH THE WINDOW IS CENTERED\n\n W: WINDOW LENGTH (IN SAMPLES)\n\n l: DETERMINES THE DIMENSION OF THE NULL-SPACE OF THE\n COHERENCY MATRIX (see Eq.45 in Sollberger et al., 2018)\n l=4: isolated wave, one dominant eigenvalue\n l=3: two waves, two dominant eigenvalues\n l='auto': automatically determine the size of the null space from \n the eigenvalue range, determines the number of eigenvalues \n that are smaller than l_auto_perc*lambda_max\n\n stab: OPTIONAL STABILISATION PARAMETER TO AVOID DIVISION BY 0.\n DEFAULT VALUE IS stab=1e-9;\n\n l_auto_perc: OPTIONAL PARAMETER FOR THE AUTOMATIC DETERMINATION OF THE\n DIMENSION OF THE NULL-SPACE. THE DIMENSION OF THE NULL\n SPACE IS ESTIMATED BY DETERMINING THE NUMBER OF EIGENVALUES\n THAT ARE SMALLER THAN l_auto_perc*lambda_max. DEFAULT VALUE\n IS l_auto_perc=0.01 (one percent)\n ''' \n param = structtype()\n\n # Calculate null space of coherency matrix\n if np.remainder(W,2):\n W = W + 1 # make window length even\n\n data = hilbert(data, axis=0) # convert to the analytic signal\n\n C = np.matrix.getH(data[int(samp-W/2):int(samp+W/2),:]) @ data[int(samp-W/2):int(samp+W/2),:] # compute coherency matrix\n C = C / W # average over window length\n\n Cprime,Q = np.linalg.eigh(C,UPLO='U') # eigenvalue decomposition (Q: eigenvectors, Cprime: eigenvalues)\n lambda_ = np.sort(Cprime)[::-1] # sort eigenvalues in descending order\n loc = np.argsort(Cprime)[::-1]\n Q = Q[:,loc] # sort eigenvectors\n\n # determination of the size of the null space\n I = np.nonzero(lambda_[1:]/lambda_[0] < l_auto_perc)\n I = (list(itertools.chain.from_iterable(I)))[0] + 1\n if l == 'auto':\n l = (5 - I) - 1\n Q = Q[:,5-l:5] @ np.matrix.getH(Q[:,5-l:5]) # null space\n\n ## P-wave\n if wtype == 'P':\n L = np.zeros((len(test_param.theta),len(test_param.phi),len(test_param.vp),len(test_param.vs)))\n for it1 in range(0,len(test_param.theta)):\n for it2 in range(0,len(test_param.phi)):\n for it3 in range(0,len(test_param.vp)):\n for it4 in range(0,len(test_param.vs)):\n param.theta = test_param.theta[it1]\n param.phi = test_param.phi[it2]\n param.vp = test_param.vp[it3]\n param.vs = test_param.vs[it4]\n v = polvect6C(param,v_scal,'P') # calculate test polarization vector\n v = v / np.linalg.norm(v) # convert to unit vector\n L[it1,it2,it3,it4] = 1/(np.matrix.getH(v) @ Q @ v + stab) # MUSIC estimator\n return L\n \n ## SV-wave\n if wtype == 'SV':\n L = np.zeros((len(test_param.theta),len(test_param.phi),len(test_param.vp),len(test_param.vs)))\n for it1 in range(0,len(test_param.theta)):\n for it2 in range(0,len(test_param.phi)):\n for it3 in range(0,len(test_param.vp)):\n for it4 in range(0,len(test_param.vs)):\n param.theta = test_param.theta[it1]\n param.phi = test_param.phi[it2]\n param.vp = test_param.vp[it3]\n param.vs = test_param.vs[it4]\n v = polvect6C(param,v_scal,'SV') # calculate test polarization vector\n v = v / np.linalg.norm(v) # convert to unit vector\n L[it1,it2,it3,it4] = 1/(np.matrix.getH(v) @ Q @ v + stab) # MUSIC estimator\n return L\n \n ## SH-wave\n if wtype == 'SH':\n L = np.zeros((len(test_param.theta),len(test_param.phi),len(test_param.vs)))\n for it1 in range(0,len(test_param.theta)):\n for it2 in range(0,len(test_param.phi)):\n for it3 in range(0,len(test_param.vs)):\n param.theta = test_param.theta[it1]\n param.phi = test_param.phi[it2]\n param.vs = test_param.vs[it3]\n v = polvect6C(param,v_scal,'SH') # calculate test polarization vector\n v = v / np.linalg.norm(v) # convert to unit vector\n L[it1,it2,it3] = 1/(np.matrix.getH(v) @ Q @ v + stab) # MUSIC estimator\n return L\n \n ## Rayleigh-wave\n if wtype == 'R':\n L = np.zeros((len(test_param.phi),len(test_param.xi),len(test_param.vR)))\n for it1 in range(0,len(test_param.phi)):\n for it2 in range(0,len(test_param.xi)):\n for it3 in range(0,len(test_param.vR)):\n param.phi = test_param.phi[it1]\n param.xi = test_param.xi[it2]\n param.vR = test_param.vR[it3]\n v = polvect6C(param,v_scal,'R') # calculate test polarization vector\n v = v / np.linalg.norm(v) # convert to unit vector\n L[it1,it2,it3] = 1/(np.matrix.getH(v) @ Q @ v + stab) # MUSIC estimator\n return L\n \n ## Love-wave\n if wtype == 'L':\n L = np.zeros((len(test_param.phi),len(test_param.vL)))\n for it1 in range(0,len(test_param.phi)):\n for it2 in range(0,len(test_param.vL)):\n param.phi = test_param.phi[it1]\n param.vL = test_param.vL[it2]\n v = polvect6C(param,v_scal,'L') # calculate test polarization vector\n v = v / np.linalg.norm(v) # convert to unit vector\n L[it1,it2] = 1/(np.matrix.getH(v) @ Q @ v + stab) # MUSIC estimator\n return L","repo_name":"git-taufiq/MESS-2018","sub_path":"MESS-2018-Exercise-04/music6C.py","file_name":"music6C.py","file_ext":"py","file_size_in_byte":7316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1861511700","text":"import json\nimport io\n\nfrom flask import Flask, request, send_file\nimport pyproj\nfrom pyproj.exceptions import CRSError\n\nfrom heatmap import Heatmap\n\napp = Flask(__name__, static_url_path=\"\")\n\nwith open(\"earthquakes.geojson\") as f:\n data = json.load(f)\n\n\n@app.route(\"/\")\ndef root():\n return app.send_static_file(\"index.html\")\n\n\n@app.route(\"/wms\", methods=[\"GET\"])\ndef wms():\n width = int(request.args.get(\"width\", default=400, type=int))\n height = int(request.args.get(\"height\", default=300, type=int))\n bbox = request.args.get(\"bbox\", default=\"13.25638,52.43927,13.53790,52.58177\")\n # layers = request.args.get(\"layers\")\n srs = request.args.get(\"srs\", default=\"EPSG:4326\")\n\n west, south, east, north = (float(q) for q in bbox.split(\",\"))\n\n try:\n proj = pyproj.Proj(srs)\n except CRSError as err:\n return f\"Provided projection {srs} invalid. Error: {str(err)}\", 400\n\n heatmap = Heatmap(width, height, west, south, east, north)\n\n for feature in data[\"features\"]:\n lonlat = feature[\"geometry\"][\"coordinates\"][:2]\n magnitude = feature[\"properties\"][\"mag\"]\n if srs != \"EPSG:4326\":\n lonlat = proj(*lonlat, errcheck=True)\n heatmap.add_point(*lonlat, val=magnitude)\n\n heatmap.update_pixel_grid_rgba()\n image_bytes = heatmap.get_heatmap_image_bytes()\n return send_file(io.BytesIO(image_bytes), mimetype=\"image/png\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\")\n","repo_name":"aksakalli/heatmap-wms","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"21545218040","text":"# -*- coding: utf-8 -*\n\nimport json\nimport requests\nimport codecs\nimport datetime\nimport os\nimport configparser\nimport io\nimport redis\nimport log\nimport header\nimport sys\nfrom dateutil.parser import parse\n\nlog._logger.info('Start of the program for air quality')\n\nroot = os.environ.get('BEGOOD_PATH')\n\n# Connexion au serveur redis\nc = redis.ConnectionPool(host='127.0.0.1', port='6379', db=0, decode_responses=True)\nr = redis.StrictRedis(connection_pool=c)\n\nconf = root+'/init/aquality.ini'\nconfig = configparser.ConfigParser()\nconfig.read(conf)\n\ntry:\n url = config['URL']['URL']\nexcept ValueError as error:\n log._logger.error(error)\n sys.exit(0)\n\nheader.headerCheck(url, root)\n\ntry:\n content = requests.get(url)\n data = content.json()\nexcept requests.ConnectionError as e:\n log._logger.error(\"Except error \" +str(e))\nexcept requests.Timeout as e:\n log._logger.error(\"Except error \" +str(e))\nexcept requests.TooManyRedirects as e:\n log._logger.error(\"Except error \" +str(e))\nexcept requests.HTTPError as e:\n log._logger.error(\"Except error \" +str(e))\n\ntoday = datetime.datetime.today()\ntomorrow = today + datetime.timedelta(days = 1)\nyesterday = today - datetime.timedelta(days = 1)\n\nnow = today.strftime(\"%Y/%m/%d\")\ntoom = tomorrow.strftime(\"%Y/%m/%d\")\nyest = yesterday.strftime(\"%Y/%m/%d\")\n\n# Debut de le transaction\np = r.pipeline()\nif r.exists(\"air-quality\") == 1: \n p.delete(*r.keys('air-quality'))\n\nnew_data = {}\nnew_data['list'] = []\nfor key,value in sorted(data.items()):\n if key == \"error\":\n log._logger.error(\"Error: Bad url request\")\n sys.exit(1)\n else:\n if isinstance(value,list):\n if key == \"features\":\n for element in value:\n date = parse(str(element['properties']['date_ech']))\n str_date = date.strftime('%Y/%m/%d')\n if str_date == now or str_date == yest or str_date == toom:\n dico = {}\n dico = element\n new_data['list'].append(dico)\n\ncity_name = \"\"\ni = 1\nfor key,value in sorted(new_data.items()):\n if isinstance(value,list):\n for element in value:\n for sub_key, sub_value in element.items():\n if sub_value == 'Montargis' or sub_value == 'Orleans':\n dico1 = {}\n str_qual = u''.join(element['qualif']).encode('utf-8')\n dico1['qualificatif'] = str_qual\n date = dateparser.parse(str(element['date_ech']))\n str_date = date.strftime(\"%Y/%m/%d\")\n dico1['date_echeance'] = str_date\n dico1['valeur'] = element['valeur']\n dico1['y'] = element['y']\n dico1['x'] = element['x']\n str_nom_zone = u''.join(element['lib_zone']).encode('utf-8')\n dico1['nom_zone'] = str_nom_zone\n json_string = str(dico1)\n\n p.hset(\"air-quality\", i, json.dumps(dico1, ensure_ascii=False).decode('UTF-8',\"ignore\"))\n i = i + 1\n\n# Execution et fin de la transaction\np.execute()\n\nlog._logger.info('End of the program for air quality')\nlog._logger.info(\"-----------------------------------------------------------------------------------------\")\n","repo_name":"nexterite/ViaPro","sub_path":"lib/collector/aquality.py","file_name":"aquality.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6466708313","text":"from kubernetes.client import ApiClient, Configuration, CoreV1Api, AppsV1Api\nfrom dataclasses import dataclass\nimport urllib3\n\nurllib3.disable_warnings()\n\ndef getConfig(host : str, certPath: str, keyPath: str) -> Configuration:\n config = Configuration()\n\n config.host = host\n config.cert_file = certPath\n config.key_file = keyPath\n config.verify_ssl = False\n\n return config\n\n@dataclass\nclass KubernetesClient():\n\n __slots__ = ('coreClient', 'appsClient')\n\n coreClient: CoreV1Api\n appsClient: AppsV1Api\n\n def __init__(self, config : Configuration) -> None:\n apiClient = ApiClient(configuration=config)\n self.coreClient = CoreV1Api(api_client=apiClient)\n self.appsClient = AppsV1Api(api_client=apiClient)\n","repo_name":"hokiegeek2/kubernetes-integration","sub_path":"src/org/hokiegeek2/integration/kubernetes/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3142336304","text":"# 지형 편집\n\"\"\"\nhttps://programmers.co.kr/learn/courses/30/lessons/12984\n가장 비용이 적은 층에 맞추려고 할 때 비용 찾기\n* 처음에 접근하려고 했던 방식 -> bisection search 그러나 양 끝점이 모두 최솟값이 아니므로\n* 이차함수의 꼴을 가지며, 최솟값을 찾기 위해서는 이분탐색이 아닌 삼분탐색으로 접근해야 한다.\n* 또한 시간상으로 이득을 볼게 없다.\n\n> 해결 방안: 한 칸씩 분석하는것이 아닌, 한 층씩 분석하는 방법을 사용하기 (greedy algorithms)\n1. 한 칸씩이 아닌 한 층씩 분석하기 위해서는 정렬 필요\n2. 처음 blocks[0]에 값을 맞출때 총 cost는 total - (blocks[0]) * len(blocks)(총 blocks의 갯수만큼 blocks[0] 층을 가지고 있다)\n3. 여기서 한층씩 더해나가면서 올리는데 필요한 비용을 더하고, 내리는데 필요한 비용을 빼기\n4. 이때 변화하는 층은 blocks[i] - blocks[i - 1]만큼의 층수가 변화하고, 이는 내리는데 필요한 비용에서도 마찬가지 이다.\n\"\"\"\n\n\ndef solution(land, p, q):\n blocks = []\n total = 0\n for i in range(len(land)):\n for j in range(len(land)):\n blocks.append(land[i][j])\n total += land[i][j]\n blocks.sort()\n\n cost = (total - blocks[0] * (len(blocks))) * q\n answer = cost\n for i in range(1, len(blocks)):\n # 0에서부터 i까지의 거리\n up = i * (blocks[i] - blocks[i - 1]) * p\n # len(blocks)에서부터 i까지 거리만큼 내려주는 값이 변화했다.\n down = (len(blocks) - i) * (blocks[i] - blocks[i - 1]) * q\n cost += up - down\n answer = min(answer, cost)\n return answer\n\n\nif __name__ == '__main__':\n land = [[4, 4, 3], [3, 2, 2], [2, 1, 0]]\n p = 5\n q = 3\n print(solution(land, p, q))\n","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"programmers/lv4/edit_ground_1.py","file_name":"edit_ground_1.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41727709754","text":"import random # IMPORTING RANDOM, WILL BE USED LATER TO SHUFFLE THE DECK\n\n# Global Variable\n# CONTAINS ALL THE NECCESSARY INGREDIENTS TO CREATE THE DECK.\nsuits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')\nranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')\nvalues = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10,\n 'Queen':10, 'King':10, 'Ace':11}\nplaying = True\n\n# BLACKJACK OR 21 GAME\n# THE PLANNING PHASE\n\n# Create a deck of 52 cards: Completed\n\n\nclass Card: # CREATING THE CARD TYPES\n\t\n\tdef __init__(self,suit,rank):\n\t\tself.suit = suit\n\t\tself.rank = rank\n\n\tdef __str__(self):\n\t\treturn f'{self.rank} of {self.suit}.'\n\n# TEST FILES FOR ADMIN USE ONLY:\n\t#A = Card('Hearts', 'Nine')\n\t#print(A.__str__())\n\n\nclass Deck: #CREATING THE DECK\n\tdef __init__(self): # This creates the deck, we put in in the init method because we want it to stay constant\n\t\tself.deck = []\n\t\tfor suit in suits:\n\t\t\tfor rank in ranks:\n\t\t\t\tself.deck.append(Card(suit,rank))\n\n\tdef __str__(self): # lists the deck \n\t\tdeck_comp = \"\"\n\t\tfor card in self.deck:\n\t\t\tdeck_comp += '\\n'+ card.__str__()\n\t\treturn f\"The deck has {deck_comp}\"\n\n\tdef shuffle(self): #Shuffle the deck\n\t\trandom.shuffle(self.deck)\n\t\n\tdef deal(self): # After a player chooses a card, the card is removed from the deck\n\t\tsingle_card = self.deck.pop()\n\t\treturn single_card\n\n\n\n# TEST FILES FOR ADMIN USE ONLY:\n\t#B = Deck()\n\t#B.shuffle()\n\t#print(B.__str__())\n\n\n\n\n# Players hand\n\nclass Hand:\n\n\tdef __init__(self):\n\t\tself.cards = [] # empty list to collect the cards in the players hand\n\t\t\n\t\tself.value = 0 # keeps track of the value of the players hand\n\t\t\n\t\tself.aces = 0 # keeps track of the aces that we have\n\n\tdef add_card(self,card): # adds cards from the deal method in the Deck class\n\t\tself.cards.append(card) \n\t\tself.value += values[card.rank]\n\n\t\tif card == 'Ace':\n\t\t\tself.aces += 1 # add 1 ace to hour ace attribute to know that we have one in our hand\n\n\tdef adjust_for_ace(self):\n\n\t\twhile self.value > 21 and self.aces: # in this instance self.aces is the same as self.aces > 0. \n\t\t\tself.value -=10 # if it contains an ace, then that ace becomes a 1\n\t\t\tself.aces -=1 # this prevents us from having more than one ace at a time \n\t\t\n\t\t\t\n\n# TEST FILES FOR ADMIN USE ONLY:\n\n\n\t#test_deck = Deck()\n\t#test_deck.shuffle()\n\t#test_1 = Hand()\n\t#test_1.add_card(test_deck.deal())\n\t#test_1.add_card(test_deck.deal())\n\t#test_1.adjust_for_ace()\n\t#print(test_1.value)\n\n\t#for i in test_1.card:\n\t\t#print(i)\n\n\n\n\n\t\n\t#Ask the Player for their bet\n\t#Make sure that the Player's bet does not exceed their available chips\n\nclass Chips: # keeps track of players starting chips before and after bets\n\n\tdef __init__(self):\n\t\tself.total = int(input(\"Enter in a value: \"))\n\t\tself.bet = 0\n\n\tdef win_bet(self):\n\t\tself.total += self.bet # if the player wins the bet, then the bet is added to their total\n\n\n\tdef lose_bet(self):\n\t\tself.total -= self.bet # if the player loses the bet, then its subtracted from their total\n\n\n\n\ndef take_bet(chips):\n\n\twhile True:\n\n\t\ttry:\n\t\t\tchips.bet = int(input('Enter in your bet: '))\n\n\t\texcept ValueError:\n\t\t\tprint(\"You did not enter in an integer. \")\n\n\t\telse:\n\t\t\tif chips.bet > chips.total:\n\t\t\t\tprint('Sorry try again, you do not have enough chips to make this bet.'. chips.total)\n\n\t\t\telse:\n\t\t\t\tbreak\n\n\n\n\ndef hit(deck,hand): # allows both players to take hits from the deck\n# changed print\n print(hand.add_card(deck.deal())) # same funtion used to get the single_card from the deal method\n hand.adjust_for_ace() # adjust for the amount of aces that you have.\n\n\n\n\ndef hit_or_stand(deck,hand): # asks the player if they want to draw another card or not\n global playing # to control an upcoming while loop\n \n while True:\n x = input(\"Would you like to Hit or Stand? Enter 'h' or 's' \")\n \n if x[0].lower() == 'h':\n hit(deck,hand) # hit() function defined above\n\n elif x[0].lower() == 's':\n print(\"Player stands. Dealer is playing.\")\n playing = False\n\n else:\n print(\"Sorry, please try again.\")\n continue\n break\n \n \n\ndef show_some(player,dealer): # this function will be passed through the Hand class\n\tprint(\"Dealers Card\")\n\tprint(\"