seq_id
string
text
string
repo_name
string
sub_path
string
file_name
string
file_ext
string
file_size_in_byte
int64
program_lang
string
lang
string
doc_type
string
stars
int64
dataset
string
pt
string
api
list
71897328507
import os import time import psutil def run_excel(): while True: os.system(f'start excel possible_duplicate_people.xlsx') while True: time.sleep(1) excel_running = False for process in psutil.process_iter(attrs=['pid', 'name']): if "EXCEL.EXE" in process.info['name']: excel_running = True break if not excel_running: break break print("Excel file has been closed. Now, running additional code.")
FrontendPony/XML-parser
open_excel.py
open_excel.py
py
550
python
en
code
0
github-code
6
[ { "api_name": "os.system", "line_number": 7, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 9, "usage_type": "call" }, { "api_name": "psutil.process_iter", "line_number": 11, "usage_type": "call" } ]
11691605009
import imp import re from PySide2.QtWidgets import QMainWindow from PySide2.QtCore import Slot from ui_mainwindow import Ui_MainWindow from particulasact.particula import Particula from particulasact.index import Nodo, Lista_ligada class MainWindow(QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.lista_ligada = Lista_ligada() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.ui.agregarFinal_pushButton.clicked.connect( self.click_agregarFinal) self.ui.agregarInicio_pushButton.clicked.connect( self.click_agregarInicio) self.ui.mostrar_pushButton.clicked.connect(self.click_mostrar) def creadorDeParticulas(self): destinoX = self.ui.destinoX_spinBox.value() origenX = self.ui.origenX_spinBox.value() destinoY = self.ui.destinoY_spinBox.value() origenY = self.ui.origenY_spinBox.value() velocidad = self.ui.velocidad_spinBox.value() red = self.ui.red_spinBox.value() green = self.ui.green_spinBox.value() blue = self.ui.blue_spinBox.value() return Particula(self.lista_ligada.no_elements+1, origenX, origenY, destinoX, destinoY, velocidad, red, green, blue) @Slot() def click_mostrar(self): self.ui.salida.clear() self.ui.salida.insertPlainText(str(self.lista_ligada)) @Slot() def click_agregarFinal(self): particula = self.creadorDeParticulas() nodo = Nodo(particula) self.lista_ligada.agregar_final(nodo) self.ui.salida.clear() self.ui.salida.insertPlainText("Agregado al Final") """ self.ui.salida.insertPlainText( f"ID:{particula.id}\nOrigen X:{particula.origen_x}\nDestino X: {particula.destino_x}\nOrigen Y:{particula.origen_y}\nDestino Y: {particula.destino_y}\nVelocidad: {particula.velocidad}\nDistancia:{particula.distancia}\nRed: {particula.red}\nGreen: {particula.green}\nBlue: {particula.blue}") """ @Slot() def click_agregarInicio(self): particula = self.creadorDeParticulas() nodo = Nodo(particula) self.lista_ligada.agregar_inicio(nodo) self.ui.salida.clear() self.ui.salida.insertPlainText("Agregado al Inicio")
arturos8617/actividad06
mainwindow.py
mainwindow.py
py
2,290
python
es
code
0
github-code
6
[ { "api_name": "PySide2.QtWidgets.QMainWindow", "line_number": 10, "usage_type": "name" }, { "api_name": "particulasact.index.Lista_ligada", "line_number": 13, "usage_type": "call" }, { "api_name": "ui_mainwindow.Ui_MainWindow", "line_number": 14, "usage_type": "call" },...
72136709947
#Plots import re import plotly.express as px #DASHBOARD import dash import dash_html_components as html import dash_core_components as dcc from dash.dependencies import Input, Output import dash_bootstrap_components as dbc import dash_table from dash.exceptions import PreventUpdate import tweepy ########## from data import * from update_db import * from graphs import * from semanticsearch import get_similar_sentences app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) server = app.server # for Heroku deployment tabs_styles = { # 'height': '44px', 'background': '#393939' } tab_style = { 'borderBottom': '1px solid #d6d6d6', 'padding': '6px', 'fontWeight': 'bold' } tab_selected_style = { 'borderTop': '1px solid #d6d6d6', 'borderBottom': '1px solid #d6d6d6', 'backgroundColor': '#119DFF', 'color': 'white', 'padding': '6px' } ################################ DATA PROCESSING ######################################### stockprice_number_of_days = '8d' stocks_screenerview = pd.read_csv('socialmediadata/stocks_screenerview_sectors.csv') consumer_key = "" consumer_secret = "" access_token = "326146455-" access_token_secret = "" # Creating the authentication object auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # Setting your access token and secret auth.set_access_token(access_token, access_token_secret) # Creating the API object by passing in auth information api = tweepy.API(auth) def top_words_on_key(df, colum_name, Source, key, keytype, top_n): df_words = " ".join(df[colum_name]).split(' ') if key == '@': df_ticker_list = re.findall(r'[@][A-Za-z]+', str(df_words)) elif key == '#': df_ticker_list = re.findall(r'[#][A-Za-z]+', str(df_words)) else: df_ticker_list = re.findall(r'[$][A-Za-z]+', str(df_words)) # print(df_ticker_list) df_top_tickers = pd.Series(df_ticker_list).value_counts()[:top_n].to_frame('count').reset_index() df_top_tickers['Source'] = Source df_top_tickers['keytype'] = keytype # print(df_top_tickers) return df_top_tickers LEFT_COLUMN = dbc.Navbar( [ dbc.Row( [ dbc.Col(html.H4(" Stock Market Insights"),md=6), html.Br(), dbc.Col( html.Label(" Stock Ticker:"),md=2), dbc.Col( dcc.Dropdown(id='dropdown',options=[ {'label': 'Microsoft (MSFT)', 'value': 'MSFT'}, {'label': 'Tesla (TSLA)', 'value': 'TSLA'}, {'label': 'TMobile (TMUS)', 'value': 'TMUS'}],value='TMUS',clearable=False ),md=4) ] ) ] ) def homepage_stockmarket_fig(): namedent_agg_sorted = stocks_screenerview.groupby(['Sector']).apply( lambda x: x.sort_values(['Volume'], ascending=False)).reset_index( drop=True) namedent_agg_top5_df = namedent_agg_sorted.groupby(['Sector']).head(5) namedent_agg_top5_df['Ticker_Company'] = namedent_agg_top5_df['Ticker'] + '(' + namedent_agg_top5_df[ 'Company'] + ')' stockvolume_sunburst_fig = px.sunburst(namedent_agg_top5_df, path=['Sector', 'Ticker'], values='Volume', hover_name="Ticker_Company",color_discrete_sequence=px.colors.qualitative.Pastel) stockvolume_sunburst_fig.update_layout( title=dict(text="<b>Top Tickers based on <br> Actual Stock Volume</b>"), plot_bgcolor='#5B5959', font=dict(size=13) ) tweet_df = get_data_from_db( db_table_name = 'socialmediadata.stock_twitterdata') reddit_df = get_data_from_db(db_table_name='socialmediadata.stock_redditdata') reddit_top_tickers = top_words_on_key(reddit_df, 'text', 'Reddit', '$', 'Tickers', 5) twitter_top_tickers = top_words_on_key(tweet_df, 'text', 'Twitter', '$', 'Tickers', 5) top_social_media = pd.concat([reddit_top_tickers, twitter_top_tickers]) top_tickers_socialmedia_sunburstfig = px.sunburst(top_social_media, path=['Source','index'], values='count',color_discrete_sequence=px.colors.qualitative.Pastel) top_tickers_socialmedia_sunburstfig.update_layout( title=dict(text="<b>Top Tickers based on <br> Volume(No Of Tweets) on Social Media</b>"), #treemapcolorway=['#0000A0', '#E6A000', '#009FEB'], plot_bgcolor='#5B5959', font=dict(size=12) ) df_top_reddit_users = pd.Series(reddit_df['user']).value_counts()[:5].to_frame('count').reset_index() df_top_reddit_users['Source']='Reddit' df_top_twitter_users = pd.Series(tweet_df['user']).value_counts()[:5].to_frame('count').reset_index() df_top_twitter_users['Source'] = 'Twitter' top_social_users = pd.concat([df_top_reddit_users, df_top_twitter_users]) top_users_socialmedia_sunburstfig = px.bar(top_social_users, x='count', y='index', color="Source", barmode='group') top_users_socialmedia_sunburstfig.update_layout( title=dict(text="<b>Top Users on Social Media</b>"), # treemapcolorway=['#0000A0', '#E6A000', '#009FEB'], plot_bgcolor='#5B5959', font=dict(size=12) ) final_namedentitydf = pd.read_csv('socialmediadata/namedentitydf.csv') socialmedia_namedentity_fig = px.treemap(final_namedentitydf, path=['source', 'Label', 'Text'], color_discrete_sequence=px.colors.qualitative.Pastel,values='count') socialmedia_namedentity_fig.update_layout( title=dict(text="<b>Stock News Named Entities from Twitter,Reddit,News and Blogs </b>"), #treemapcolorway=['#0000A0', '#E6A000', '#009FEB'], font=dict(size=14) ) return stockvolume_sunburst_fig,top_tickers_socialmedia_sunburstfig,socialmedia_namedentity_fig,top_users_socialmedia_sunburstfig stockvolume_sunburst_fig,top_tickers_socialmedia_sunburstfig,\ socialmedia_namedentity_fig,top_users_socialmedia_sunburstfig = homepage_stockmarket_fig() HOME_BODY = [ dbc.Row( [ dbc.Col(dcc.Graph(id="stockvolume_sunburst_fig",figure=stockvolume_sunburst_fig),width=4), dbc.Col(dcc.Graph(id="top_tickers_socialmedia_sunburstfig",figure=top_tickers_socialmedia_sunburstfig),width=4), dbc.Col(dcc.Graph(id="top_users_socialmedia_sunburstfig",figure=top_users_socialmedia_sunburstfig),width=4) ] ), dbc.Row( [ dbc.Col(dcc.Graph(id="socialmedia_namedentity_fig", figure=socialmedia_namedentity_fig), width=12) ] ), html.Br(), dbc.Col(html.H2("Semantic Search on Twitter,Reddit,News and Blogs"),md=11), html.Br(), dbc.Row( [ dbc.Col(dbc.Card(dcc.Input(id='semantic_search', type="text", value="Stock news related to Healthcare Sector", placeholder="Twitter Search")), md=8) ]), dbc.Row( [ dbc.Col(dbc.Card(html.Label(id='semanticsearchtable')), width=11) ]), html.Br(), dbc.Col(html.H2("Real Time Twitter Streaming Insights"),md=11), html.Br(), dbc.Row( [ dbc.Col(dcc.Input(id='twitter_search',type="text", value="stockmarket",placeholder="Twitter Search"), md=8) ]), html.Br(), dcc.Interval( id='interval-component', interval=1 * 80000, # in milliseconds n_intervals=0 ), dbc.Row( [ dbc.Col(dbc.Card(html.Label(id='tweettable')), width=7) ]) ] SOCIALMEDIA_BODY = [ html.Br(), dbc.Row( [ dbc.Col(dbc.Card(html.Div(id='stockdescription')), width=12) ], ), dbc.Row( [ #dbc.Col(dcc.Graph(id="tophashtagmentionfunnelchart"),width=3), dbc.Col(dbc.Spinner(dcc.Graph(id="tickertopmentionssunburstplot"), type = "grow"),width=4), dbc.Col(dbc.Spinner(dcc.Graph(id="stockfundchart"), type = "grow"),width=7) ], ), dbc.Row( [ dbc.Col(dbc.Spinner(dcc.Graph(id="stockchart"), type="grow"), width=11) ], ), dbc.Row( [ dbc.Col(dbc.Spinner(dcc.Graph(id="stocksentimentlinechart"), type = "grow"),width=11) ], ), dbc.Row( [ dbc.Col(dbc.Spinner(dcc.Graph(id="stocksentimentfunnelallchart"), type="grow"),width=4), dbc.Col(dbc.Spinner(dcc.Graph(id="stocksentimentfunnelchart"), type="grow"),width=8) ], ), dbc.Row( [ dbc.Col(dcc.RangeSlider(id='sentiment-slider',min=-1,max=1,step=0.2,value=[0, 0.5]),width=4), ], ), dbc.Row( [ dbc.Col( [ dbc.Row(dbc.Spinner(dcc.Graph(id="twitterwordcloudplot"),type = "grow")), dbc.Row(dbc.Spinner(dcc.Graph(id="redditwordcloudplot"),type = "grow")) ],width=3 ), dbc.Col(dbc.Card(html.Label(id='sentimenttable')), width=7) ] ), dbc.Col(html.H2("Real Time Tweets from STOCKTWITS"),md=11), dbc.Col(dbc.Spinner(html.Label(id='stocktwits-output'),type = "grow"), width=11) ] NEWS_BODY = [ dbc.Col(dbc.Spinner(dbc.Card(html.Label(id='newsarticletable'))), width=11), dbc.Row( [ dbc.Col(dbc.Spinner(dcc.Graph(id="newsngramfig")),width=4), dbc.Col(dbc.Spinner(dcc.Graph(id="newswordcloudfig")), width=4) ], ), dbc.Col(dbc.Spinner(dbc.Card(dcc.Graph(id='newsnamedentity'))),width=11) ] BODY = dbc.Container\ ([ dbc.Row( dbc.Col( dcc.Tabs(id="tabs-styled-with-inline", value='home', children=[ dcc.Tab(label='HOME', value='home',style=tab_style, selected_style=tab_selected_style), dcc.Tab(label='TICKER-SOCIALMEDIA SENTIMENTS', value='socialmedia',style=tab_style, selected_style=tab_selected_style), dcc.Tab(label='TICKER-NEWS ', value='news',style=tab_style, selected_style=tab_selected_style), ] ),width={"size": 8, "offset": 2},md={"size": 10, "offset": 1},lg={"size": 12, "offset": 0} ),className="bottom32"), #html.Div(dcc.Loading(html.Div(id="main_div"),type="cube", style={"marginTop": "150px"}),style={"minHeight": "500px"}) html.Div(html.Div(id="main_div")) ], style={"maxWidth": "1340px"} ) app.layout = html.Div(children=[LEFT_COLUMN,BODY]) ########################################################################################### ##########################################CALLBACKS######################################## ########################################################################################### @app.callback([Output('stockdescription', 'children'), Output('stockfundchart', 'figure'), Output('stockchart', 'figure') ], Input('dropdown', 'value') ) def update_graphs(value): ##### stockdescription, stock_fundament = get_stock_fundamentals(value) stock_fund_treefig = px.treemap(stock_fundament, path=['index', 'value'], color_discrete_sequence = px.colors.qualitative.Pastel,height=400) data = get_stockpricedata(value, stockprice_number_of_days) stock_fig = candlestick_chart(data) return stockdescription, stock_fund_treefig,stock_fig @app.callback( Output('tickertopmentionssunburstplot', 'figure'), Input('dropdown', 'value') ) def update_graphs(value): ##### tweet_df = get_data_from_db(db_table_name='socialmediadata.ticker_twitterdata1') reddit_df = get_data_from_db(db_table_name='socialmediadata.ticker_redditdata') tweet_df = tweet_df[tweet_df['ticker'] == '$'+value] reddit_df = reddit_df[reddit_df['ticker'] == value] tweet_df['DataSource'] = 'Twitter' reddit_df['DataSource'] = 'Reddit' reddit_top_mentions = top_words_on_key(reddit_df, 'text', 'Reddit', '@', 'Mentions', 5) reddit_top_hashtags = top_words_on_key(reddit_df, 'text', 'Reddit', '#', 'Hashtags', 5) twitter_top_mentions = top_words_on_key(tweet_df, 'text', 'Twitter', '@', 'Mentions', 5) twitter_top_hashtags = top_words_on_key(tweet_df, 'text', 'Twitter', '#', 'Hashtags', 5) top_social_media = pd.concat( [reddit_top_mentions, reddit_top_hashtags, twitter_top_mentions, twitter_top_hashtags]) top_mentions_hastags_sunburstfig = px.sunburst(top_social_media, path=['keytype', 'Source', 'index'], values='count',color_discrete_sequence=px.colors.qualitative.Pastel) top_mentions_hastags_sunburstfig.update_layout( plot_bgcolor='#5B5959', title=dict(text="<b>Top Mentions/Hashtags on Social Media</b>"), font=dict(size=11) ) return top_mentions_hastags_sunburstfig @app.callback( Output('stocksentimentlinechart', 'figure') , Input('dropdown', 'value') ) def update_graphs(value): ##### tweet_df = get_data_from_db(db_table_name='socialmediadata.ticker_twitterdata1') reddit_df = get_data_from_db(db_table_name='socialmediadata.ticker_redditdata') tweet_df = tweet_df[tweet_df['ticker'] == '$'+ value] reddit_df = reddit_df[reddit_df['ticker'] == value] tweet_df['DataSource'] = 'Twitter' reddit_df['DataSource'] = 'Reddit' twitter_reddit_sentiment_df = pd.concat([tweet_df, reddit_df]) twitter_reddit_sentiment_df['datehour'] = pd.to_datetime( twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y, %H")) twitter_reddit_sentiment_df['Date'] = pd.to_datetime(twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y")) finaldf = twitter_reddit_sentiment_df.groupby(['datehour', 'DataSource'])['sentiment_score'].mean().reset_index() stock_sentiment_line_fig = update_sentiment_linechart(finaldf, x='datehour', y='sentiment_score', color='DataSource') return stock_sentiment_line_fig @app.callback([ Output('stocksentimentfunnelallchart', 'figure'), Output('stocksentimentfunnelchart', 'figure') ], Input('dropdown', 'value') ) def update_graphs(value): ##### tweet_df = get_data_from_db(db_table_name='socialmediadata.ticker_twitterdata1') reddit_df = get_data_from_db(db_table_name='socialmediadata.ticker_redditdata') tweet_df = tweet_df[tweet_df['ticker'] == '$'+value] reddit_df = reddit_df[reddit_df['ticker'] == value] tweet_df['DataSource'] = 'Twitter' reddit_df['DataSource'] = 'Reddit' twitter_reddit_sentiment_df = pd.concat([tweet_df, reddit_df]) twitter_reddit_sentiment_df['datehour'] = pd.to_datetime( twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y, %H")) twitter_reddit_sentiment_df['Date'] = pd.to_datetime(twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y")) finaldf_date = twitter_reddit_sentiment_df.groupby(['Date', 'DataSource', 'sentiment']).size().reset_index() final__ = finaldf_date.sort_values(0, ascending=False) stock_sentiment_funnel_all_fig = px.bar(final__, x=0, y='sentiment', color="DataSource") #stock_sentiment_funnel_all_fig.update_layout(showlegend=False) finaldf_ = twitter_reddit_sentiment_df.groupby(['datehour', 'DataSource', 'sentiment']).size().reset_index() finaldf_2 = finaldf_[finaldf_['sentiment'] != "Neutral"] stock_sentiment_funnel_fig = update_stock_sentiment_funnel(finaldf_2, x="datehour", y=0, text="DataSource", color="sentiment") return stock_sentiment_funnel_all_fig,stock_sentiment_funnel_fig @app.callback([ Output('sentimenttable', 'children'), Output('redditwordcloudplot', 'figure'), Output('twitterwordcloudplot', 'figure') ], Input('dropdown', 'value') ) def update_graphs(value): tweetdf = get_data_from_db( db_table_name = 'socialmediadata.ticker_twitterdata1') redditdf = get_data_from_db(db_table_name='socialmediadata.ticker_redditdata') tweet_df = tweetdf[tweetdf['ticker'] == '$'+value] reddit_df = redditdf[redditdf['ticker'] == value] tweet_df['DataSource'] = 'Twitter' reddit_df['DataSource'] = 'Reddit' twitter_reddit_sentiment_df = pd.concat([tweet_df, reddit_df]) nlpinsight = nlpinsights(reddit_df, column_name="text") reddit_wordcloud_fig = nlpinsight.visualize_wordclouds() nlpinsight = nlpinsights(tweet_df, column_name="text") twitter_wordcloud_fig = nlpinsight.visualize_wordclouds() twitter_reddit_sentiment_df['datehour'] = pd.to_datetime( twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y, %H")) twitter_reddit_sentiment_df['Date'] = pd.to_datetime(twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y")) print(twitter_reddit_sentiment_df.columns) twitter_reddit_sentiment_fil = twitter_reddit_sentiment_df[['date_hour', 'text', 'sentiment_score','DataSource','url']] twitter_reddit_sentiment_fil = twitter_reddit_sentiment_fil.round(3) def f(row): l = "[{0}]({0})".format(row["url"]) return l print(twitter_reddit_sentiment_fil.head(2)) twitter_reddit_sentiment_fil["url"] = twitter_reddit_sentiment_fil.apply(f, axis=1) #twitter_reddit_sentiment_fil = twitter_reddit_sentiment_fil[ # (twitter_reddit_sentiment_fil['sentiment_score'] > int(slidervalue[0])) & ( # twitter_reddit_sentiment_fil['sentiment_score'] < int(slidervalue[1]))] sentiments_table = dash_table.DataTable( id='datatable-output1', style_data={ 'whiteSpace': 'normal', # 'height': 'auto' }, data=twitter_reddit_sentiment_fil.to_dict('records'), row_selectable="multi", selected_rows=[], columns=[{'id': c, 'name': c ,'type':'text', 'presentation':'markdown'} for c in twitter_reddit_sentiment_fil.columns], # columns=[{'name': 'Link', 'id': 'Link', 'type': 'text', 'presentation': 'markdown'}], filter_action='native', sort_action='native', css=[ {'selector': '.row-1', 'rule': 'background: #E6A000;'} ], page_size=4, style_header={'backgroundColor': '#7DF180', 'fontWeight': 'bold', 'border': '1px solid black', 'font_size': '18px'}, style_cell={'font_size': '11px', 'whiteSpace': 'normal', 'height': 'auto', 'padding': '15px'}, # export_format='csv', export_format='csv', style_cell_conditional=[ {'if': {'column_id': 'date_hour'}, 'width': '10%', 'textAlign': 'left'}, {'if': {'column_id': 'sentiment_score'}, 'width': '5%', 'textAlign': 'left'}, {'if': {'column_id': 'text'}, 'width': '65%', 'textAlign': 'left'}, {'if': {'column_id': 'DataSource'}, 'width': '10%', 'textAlign': 'left'}, {'if': {'column_id': 'url'}, 'width': '10%', 'textAlign': 'left'} ] ) #top_mentions_hastags_sunburstfig return sentiments_table,reddit_wordcloud_fig,twitter_wordcloud_fig @app.callback( Output('newsngramfig', 'figure'), Output('newswordcloudfig', 'figure'), Input('dropdown', 'value') ) def update_graphs(value): news_df = get_data_from_db(db_table_name='socialmediadata.ticker_newsdata') news_df = news_df[news_df['ticker'] == value] news_df = news_df.round(3) news_df = news_df.head(15) nlpinsight = nlpinsights(news_df, column_name="text") news_wordcloud_fig = nlpinsight.visualize_wordclouds() news_ngram_fig = nlpinsight.visualize_ngrams(2,5) return news_ngram_fig,news_wordcloud_fig @app.callback( Output('newsnamedentity', 'figure'), Input('dropdown', 'value') ) def update_graphs(value): news_df = get_data_from_db(db_table_name='socialmediadata.ticker_newsdata') news_df = news_df[news_df['ticker'] == value] news_df = news_df.round(3) news_df = news_df.head(5) nlpinsight = nlpinsights(news_df, column_name="text") news_namedentity_fig = nlpinsight.visualize_namedentities() return news_namedentity_fig @app.callback(Output('newsarticletable', 'children'), Input('dropdown', 'value') ) def update_graphs(value): news_df = get_data_from_db(db_table_name='socialmediadata.ticker_newsdata') news_df = news_df[news_df['ticker'] == value] news_df = news_df.round(3) #newsarticle_df = news_df[news_df['ticker'] == value] newsarticle_df = news_df[['date','title','summary','sentiment_score','link']] newsarticle_df = newsarticle_df[newsarticle_df['summary']!='Invalid'] #print(newsarticle_df) def f(row): l = "[{0}]({0})".format(row["link"]) return l newsarticle_df["link"] = newsarticle_df.apply(f, axis=1) newsarticle_table = dash_table.DataTable( id='datatable-output1', style_data={ 'whiteSpace': 'normal', # 'height': 'auto' }, data=newsarticle_df.to_dict('records'), row_selectable="multi", selected_rows=[], columns=[{'id': c, 'name': c ,'type':'text', 'presentation':'markdown'} for c in newsarticle_df.columns], # columns=[{'name': 'Link', 'id': 'Link', 'type': 'text', 'presentation': 'markdown'}], filter_action='native', sort_action='native', css=[ {'selector': '.row-1', 'rule': 'background: #E6A000;'} ], page_size=4, style_header={'backgroundColor': '#7DF180', 'fontWeight': 'bold', 'border': '1px solid black', 'font_size': '18px'}, style_cell={'font_size': '11px', 'whiteSpace': 'normal', 'height': 'auto', 'padding': '15px'}, # export_format='csv', export_format='csv', style_cell_conditional=[ {'if': {'column_id': 'Date'}, 'width': '15%', 'textAlign': 'left'}, {'if': {'column_id': 'Title'}, 'width': '20%', 'textAlign': 'left'}, {'if': {'column_id': 'Link'}, 'width': '10%', 'textAlign': 'left'}, {'if': {'column_id': 'summary'}, 'width': '45%', 'textAlign': 'left'}, {'if': {'column_id': 'sentiment_score'}, 'width': '10%', 'textAlign': 'left'}, {'if': {'column_id': 'sentiment'}, 'width': '5%', 'textAlign': 'left'} ] ) return newsarticle_table @app.callback(Output('tweettable', 'children'), [Input('twitter_search', 'value'), Input('interval-component', 'n_intervals')] ) def update_graphs(value,n): mainlis = [] res = api.search(value) for i in res: lis = [] lis.append([i.id, i.created_at, i.text]) mainlis.append(lis) tweetstream_df = pd.DataFrame(mainlis) tweetstream_table = dash_table.DataTable( id='datatable-output', style_data={ 'whiteSpace': 'normal', 'height': 'auto', 'lineHeight': '15px' }, data=tweetstream_df.to_dict('records'), css=[ {'selector': '.row-1', 'rule': 'background: #E6A000;'} ], columns=[{'id': c, 'name': c} for c in tweetstream_df.columns], page_size=8, style_header={'backgroundColor': '#E6A000', 'fontWeight': 'bold', 'border': '1px solid black', 'font_size': '18px'}, style_cell={'font_size': '11px', 'font_family': "Arial", 'whiteSpace': 'normal', 'height': 'auto', 'padding': '15px' }, # export_format='csv', export_format='csv', export_headers='display', style_data_conditional=[ { 'if': {'row_index': 'odd'}, 'backgroundColor': 'rgb(248, 248, 248)' } ], style_cell_conditional=[ {'if': {'column_id': 'UserTweetDate'}, 'width': '10%', 'textAlign': 'center'}, {'if': {'column_id': 'Time'}, 'width': '10%', 'textAlign': 'center'}, {'if': {'column_id': 'Tweet'}, 'width': '55%', 'textAlign': 'left'}, {'if': {'column_id': 'sentiment'}, 'width': '15%', 'textAlign': 'left'}, ] ) return tweetstream_table @app.callback(Output('semanticsearchtable', 'children') , Input('semantic_search', 'value') ) def update_graphs(value): stock_socialmediasemanticdata = get_data_from_db(db_table_name='socialmediadata.stock_socialmediasemanticdata') semantic_df = get_similar_sentences(stock_socialmediasemanticdata,[value]) def f(row): l = "[{0}]({0})".format(row["link"]) return l semantic_df["link"] = semantic_df.apply(f, axis=1) print("Semantic Searchhh") print(semantic_df.head()) tweetstream_table = dash_table.DataTable( id='datatable-output1', style_data={ 'whiteSpace': 'normal', # 'height': 'auto' }, data=semantic_df.to_dict('records'), columns=[{'id': c, 'name': c, 'type': 'text', 'presentation': 'markdown'} for c in semantic_df.columns], # columns=[{'name': 'Link', 'id': 'Link', 'type': 'text', 'presentation': 'markdown'}], #filter_action='native', sort_action='native', css=[ {'selector': '.row-1', 'rule': 'background: #E6A000;'} ], page_size=4, style_header={'backgroundColor': '#E6A000', 'fontWeight': 'bold', 'border': '1px solid black', 'font_size': '18px'}, style_cell={'font_size': '11px', 'whiteSpace': 'normal', 'height': 'auto', 'padding': '15px'}, # export_format='csv', export_format='csv' ) return tweetstream_table @app.callback(Output('stocktwits-output', 'children'), [Input('dropdown', 'value')]) def get_data_table2(option): df2 = getstocktwitsdata(option) #print('---STOCKTWITS---') #print(df2) df = df2[['date','time','text','sentiment']] df.columns = ['UserTweetDate', 'Time', 'Tweet', 'sentiment'] filtereddf = df.copy() filteredtable = dash_table.DataTable( id='datatable-output', style_data={ 'whiteSpace': 'normal', 'height': 'auto', 'lineHeight': '15px' }, data=filtereddf.to_dict('records'), css=[ { 'selector': '.row-1', 'rule': 'background: #E6A000;' } ], columns=[{'id': c, 'name': c} for c in filtereddf.columns], page_size=8, style_header={'backgroundColor': '#E6A000', 'fontWeight': 'bold', 'border': '1px solid black', 'font_size': '18px'}, style_cell={'font_size': '11px', 'font_family':"Arial",'whiteSpace': 'normal', 'height': 'auto', 'padding': '15px' }, #export_format='csv', export_format='csv', export_headers='display', style_data_conditional=[ { 'if': {'row_index': 'odd'}, 'backgroundColor': 'rgb(248, 248, 248)' } ], style_cell_conditional=[ {'if': {'column_id': 'UserTweetDate'}, 'width': '10%', 'textAlign': 'center'}, {'if': {'column_id': 'Time'}, 'width': '10%', 'textAlign': 'center'}, {'if': {'column_id': 'Tweet'}, 'width': '55%', 'textAlign': 'left'}, {'if': {'column_id': 'sentiment'}, 'width': '15%', 'textAlign': 'left'}, ] ) return filteredtable @app.callback( Output('main_div', 'children'), [Input('tabs-styled-with-inline', 'value')]) def update_graph(tab_btn): if tab_btn == "socialmedia": return SOCIALMEDIA_BODY elif tab_btn == "home": return HOME_BODY elif tab_btn == "news": return NEWS_BODY if __name__ == "__main__": app.run_server(port = 8053)
balasubramaniamniit/StockMarket-Insights
app.py
app.py
py
30,384
python
en
code
0
github-code
6
[ { "api_name": "dash.Dash", "line_number": 23, "usage_type": "call" }, { "api_name": "dash_bootstrap_components.themes", "line_number": 23, "usage_type": "attribute" }, { "api_name": "tweepy.OAuthHandler", "line_number": 60, "usage_type": "call" }, { "api_name": "t...
43785018579
from __future__ import print_function import argparse import sys import rospy import os import numpy as np from geometry_msgs.msg import Twist import time import cv2 import tensorflow as tf pre_path = os.path.abspath('../') sys.path.append(pre_path) from utils import imagezmq # ============================== Pretrained Model ========================== meta_path = './Unitedmap_0906_reload-0/RobotBrain/model-12063494.cptk.meta' ckpt_path = './Unitedmap_0906_reload-0/RobotBrain/' # ========================================================================== config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) config.gpu_options.allow_growth = True # RL model class class RL_Model: # for testing, linear speed would be set to 0.2, while angular speed set to 0.3 or 0.4186 def __init__(self, dir, linear_speed=0.2): # ------------------------------------------------ # dir: RL model's directory # linear_speed: linear speed (x-axis) for AGV, default is 0.2 # ------------------------------------------------ tf.reset_default_graph() self.sess = tf.Session() self.saver = tf.train.import_meta_graph(path) graph = tf.get_default_graph() # get variable by name from tensorflow graph self.visual_in = graph.get_tensor_by_name('visual_observation_0:0') self.action = graph.get_tensor_by_name('action:0') self.action_mask = graph.get_tensor_by_name('action_masks:0') self.action_pub = rospy.Publisher('twitch', Twist, queue_size=1) self.linear_speed = linear_speed self.angular_speed = angular_speed self.move_command = Twist() # create mask to enable three action self.mask = np.array([[1, 1, 1]]) self.saver.restore(self.sess, tf.train.latest_checkpoint(ckpt_path)) def restore_and_run(self, img): # ---------------------------------------------------- # img_test: input image from segmentation module # ---------------------------------------------------- # initialize parameters self.move_command.angular.z = 0 self.move_command.linear.x = self.linear_speed # for multinomial sampling, using " act = tf.multinomial(self.action, 1) " and revise session in next row prob = self.sess.run([self.action], feed_dict = {self.visual_in:img, self.action_mask:self.mask}) direction = np.argmax(prob) # 3-Action # Keep moving forward if direction == 0 : self.move_command.angular.z = 0 # Turn Left elif direction == 1: self.move_command.angular.z = 1 # Turn Right elif direction == 2: self.move_command.angular.z = -1 # publish Twist self.action_pub.publish(self.move_command) # External class for RL model class PolicyModel: def __init__(self): self.RLmodel = RL_Model(meta_path) self.last_time = time.time() def callback(self,resize_image): # -------------------------------------------------- # resize_image: image received from segmentation module # -------------------------------------------------- self.RLmodel.restore_and_run(resize_image) self.last_time = time.time() # RL model's test function def test(self): for i in range(100): fake_image = np.zeros((1, 80, 120, 3)) self.RLmodel.restore_and_run(fake_image) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--port', type=str, help='connected port', dest='port', default='5555') args = parser.parse_args() rospy.init_node('control_model', anonymous=True) hub = imagezmq.ImageHub(open_port='tcp://*:%s' %(args.port)) cm = PolicyModel() while True: name, image = hub.recv_image() # recieve image start = time.time() cv2.imshow("Image", image) image = [image] cm.callback(image) # process the image print(time.time()-start) hub.send_reply() # get ready for next image cv2.waitKey(1)
KaiChen1008/Sim-to-Real-Virtual-Guidance-for-Robot-Navigation
control_policy_module/control_policy.py
control_policy.py
py
3,752
python
en
code
72
github-code
6
[ { "api_name": "os.path.abspath", "line_number": 13, "usage_type": "call" }, { "api_name": "os.path", "line_number": 13, "usage_type": "attribute" }, { "api_name": "sys.path.append", "line_number": 14, "usage_type": "call" }, { "api_name": "sys.path", "line_num...
23261593645
import pygame import math import random #CONST SCREENWIDTH=1280 SCREENHEIGHT=720 # pygame setup pygame.init() screen = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT)) clock = pygame.time.Clock() running = True dt = 0 FPS=60 pygame.display.set_caption('TANK') #variables x_target=[] y_target=[] checkVar =1 showHelp=False #functions #Start screen isStartingScreen=True def start_Screen(): screen.fill('white') font = pygame.font.Font('fonts/TitleFont.ttf', 150) sfont = pygame.font.Font('fonts/TitleFont.ttf', 50) title = font.render('TANKS',True,'black','white') titleRect = screen.get_rect(center=(SCREENWIDTH//2+SCREENWIDTH//3+30,SCREENHEIGHT//2+SCREENHEIGHT//3)) subtext= sfont.render("PRESS 'E' TO CONTINUE", True,'black','white') subRect = screen.get_rect(center=(SCREENWIDTH//2+SCREENWIDTH//3,SCREENHEIGHT//2+SCREENHEIGHT//3+200)) screen.blit(title, titleRect) screen.blit(subtext, subRect) class Target: def __init__(self,x,y): self.x = int(x) self.y = int(y) #spawnTarget def spawnTarget(self): for i in range(10): x= random.randint(SCREENWIDTH//2,SCREENWIDTH*0.9) y = random.randint(50,SCREENHEIGHT*0.8) x_target.append(x) y_target.append(y) #drawTarget def drawTarget(self): targetIMG = pygame.image.load('images/target.png').convert_alpha() for i in range(0,10): screen.blit(targetIMG,(x_target[i],y_target[i])) #classes class Player: KEYS = pygame.key.get_pressed() def __init__(self,x,y,alpha,g,V0,t,dt,isShoted,score,bx,by): self.x = int(x) self.y = int(y) self.alpha =int(alpha) self.g = g self.V0 = V0 self.t = t self.dt = dt self.isShoted = isShoted self.score = score self.bx = bx self.by = by def draw(self): #tank_body tank= pygame.image.load('images/Tank.png').convert_alpha() tank= pygame.transform.scale(tank, (65, 40)) screen.blit(tank, (self.x,self.y)) def move(self): if(self.isShoted==False): if(KEYS[pygame.K_a]): self.x-=3 if(KEYS[pygame.K_d]): self.x+=3 def tankTurret(self): global turretPointX,turretPointY if(self.isShoted==False): if(KEYS[pygame.K_w]): self.alpha+=1 if(KEYS[pygame.K_s]): self.alpha-=1 if(self.alpha<=0): self.alpha=0 if(self.alpha>=90): self.alpha=90 theta = math.radians(self.alpha) end_x = (self.x+35) + 40 * math.cos(theta) end_y = (self.y+5) - 40 * math.sin(theta) pygame.draw.line(screen, 'black', (self.x+35,self.y+5), (end_x,end_y),5) pygame.draw.circle(screen, 'black', (end_x,end_y), 2) pygame.draw.circle(screen, 'black', (self.x+35,self.y+5), 2) turretPointX,turretPointY=end_x,end_y def wallCollision(self): if(self.x<=0): self.x=0 if(self.x>=231): self.x=231 def shoot(self): #meth needed for this math XD theta = math.radians(self.alpha) V0x = self.V0 * math.cos(theta) V0y = self.V0 * math.sin(theta) self.bx= turretPointX+V0x *self.t self.by=turretPointY- V0y*self.t+0.5*self.g+self.t**2 pygame.draw.circle(screen, 'black', (int(self.bx),int(self.by)), 5) if(self.bx<0 or self.bx>1280 or self.by>720): self.bx=(self.x+35) self.by=(self.y+5) V0x = self.V0 * math.cos(theta) V0y = self.V0 * math.sin(theta) self.isShoted=False self.t=0 def checkColl(self): for i in range(10): if(self.bx>=x_target[i] and self.bx<=x_target[i]+50 and self.by>=y_target[i] and self.by<=y_target[i]+50 and self.bx<=x_target[i]+50 and self.by<=y_target[i]+50): self.score+=1 x_target[i]=2000 y_target[i]=2000 class gameGUI: def __init__(self,x,y): self.x = int(x) self.y = int(y) def draw(self): ground = pygame.image.load('images/ground.png').convert_alpha() ground = pygame.transform.scale(ground, (300,300)) screen.blit(ground, (0,SCREENHEIGHT*0.62)) font = pygame.font.Font('fonts/TitleFont.ttf', 30) Q = pygame.image.load('images/Q.png').convert_alpha() Q = pygame.transform.scale(Q, (50,50)) screen.blit(Q, (SCREENWIDTH*0.75,SCREENHEIGHT*0.9+5)) Qtxt = font.render("PRESS FOR HELP ", True, 'black','white') QtxtRect = screen.get_rect(center=(SCREENWIDTH+SCREENWIDTH-900,SCREENHEIGHT+SCREENHEIGHT-420)) screen.blit(Qtxt, QtxtRect) angle = font.render("ANGLE "+str(player.alpha), True, 'black','white') angleRect = screen.get_rect(center=(SCREENWIDTH//2+20,SCREENHEIGHT+SCREENHEIGHT-550)) screen.blit(angle, angleRect) power = font.render("POWER "+str(player.V0), True, 'black','white') powerRect = screen.get_rect(center=(SCREENWIDTH//2+20,SCREENHEIGHT+SCREENHEIGHT-500)) screen.blit(power, powerRect) scoreText = font.render("SCORE "+str(player.score), True, 'black','white') scoreRect = screen.get_rect(center=(SCREENWIDTH//2+20,SCREENHEIGHT+SCREENHEIGHT-450)) screen.blit(scoreText, scoreRect) def helpMenu(self): pUD = pygame.image.load('images/powerUPpowerDown.png').convert_alpha() pUD = pygame.transform.scale(pUD, (260,130)) screen.blit(pUD, (0,SCREENHEIGHT*0.1)) font = pygame.font.Font('fonts/TitleFont.ttf', 30) ptext = font.render("POWER +1 ", True, 'black','white') ptextRect = screen.get_rect(center=(SCREENWIDTH-530,SCREENHEIGHT-268)) screen.blit(ptext, ptextRect) ptext2 = font.render("POWER -1 ", True, 'black','white') ptextRect2 = screen.get_rect(center=(SCREENWIDTH-530,SCREENHEIGHT-213)) screen.blit(ptext2, ptextRect2) wasd = pygame.image.load('images/WASD.png').convert_alpha() wasd = pygame.transform.scale(wasd, (260,130)) screen.blit(wasd, (30,SCREENHEIGHT*0.2+50)) WASDT1 = font.render("W,S- ANGLE", True, 'black','white') ptextRect = screen.get_rect(center=(SCREENWIDTH-450,SCREENHEIGHT-140)) screen.blit(WASDT1, ptextRect) WASDT2 = font.render("A,D- MOVE", True, 'black','white') ptextRect = screen.get_rect(center=(SCREENWIDTH-450,SCREENHEIGHT-90)) screen.blit(WASDT2, ptextRect) #class variables player = Player(50, SCREENHEIGHT*0.6,0,9.81,10,0,0.01,False,0,0,0) gui = gameGUI(0,SCREENHEIGHT-200) target= Target(0, 0) while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False #Controls KEYS = pygame.key.get_pressed() if(KEYS[pygame.K_ESCAPE]): running=False if(KEYS[pygame.K_q]): showHelp= not showHelp if(KEYS[pygame.K_e]): isStartingScreen=False if(KEYS[pygame.K_SPACE]): player.isShoted = True if(player.isShoted==False): if(KEYS[pygame.K_UP]): player.V0 +=1 if(KEYS[pygame.K_DOWN]): player.V0 -=1 if(player.V0<=10): player.V0=10 if(player.V0>=50): player.V0=50 #GAME section if(isStartingScreen==True): start_Screen() else: screen.fill('white') gui.draw() target.spawnTarget() target.drawTarget() if(player.score/10==checkVar and player.score!=0): x_target.clear() y_target.clear() target.spawnTarget() target.drawTarget() checkVar+=1 if(showHelp==True): gui.helpMenu() player.checkColl() player.draw() player.move() player.tankTurret() player.wallCollision() if(player.isShoted): player.shoot() player.t+=0.2 #pygame essentials pygame.display.flip() dt = clock.tick(FPS) / 1000 pygame.quit()
stefanstojkoviic/Tenkici
game.py
game.py
py
8,210
python
en
code
0
github-code
6
[ { "api_name": "pygame.init", "line_number": 10, "usage_type": "call" }, { "api_name": "pygame.display.set_mode", "line_number": 11, "usage_type": "call" }, { "api_name": "pygame.display", "line_number": 11, "usage_type": "attribute" }, { "api_name": "pygame.time.C...
27251458016
""" 文件名: Code/Chapter09/C05_FastText/main.py 创建时间: 2023/7/22 10:31 上午 作 者: @空字符 公众号: @月来客栈 知 乎: @月来客栈 https://www.zhihu.com/people/the_lastest """ import logging from gensim.models import KeyedVectors import fasttext from fasttext.util import reduce_model import sys import os sys.path.append('../../') from utils import DATA_HOME def load_fasttext_model(): path_to_model = os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.zh.300.bin') # path_to_model = os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.zh.300.vec.gz') # model = KeyedVectors.load_word2vec_format(path_to_model, binary=False) ft = fasttext.load_model(path_to_model) logging.info(f"词向量的维度: {ft.get_dimension()}") logging.info(f"中国: {ft.get_word_vector('中国')}") logging.info(f"与中国最相似的5个词为: {ft.get_nearest_neighbors('中国', k=5)}") logging.info(ft.get_subwords("跟我一起学深度学习")) reduce_model(ft, 100) # 降维 logging.info(f"词向量的维度: {ft.get_dimension()}") path_to_model = os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.zh.100.bin') ft.save_model(path_to_model) def get_get_analogies(): ft = fasttext.load_model(os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.zh.300.bin')) logging.info('有凤来仪' in ft.words) logging.info(f"与东坡居士最相似的5个词为: {ft.get_nearest_neighbors('有凤来仪', k=5)}") logging.info(ft.get_analogies("柏林", "德国", "法国", k=5)) # False # 与有凤来仪最相似的5个词为: [(0.457183, 'Viscosity'), (0.454175, 'viscosity'), (0.361536, 'thb'), (0.343013, 'kg/m2'), (0.335760, 'Dirham')] # [(0.743810, '巴黎'), (0.583832, '里昂'), (0.555544, '法國'), (0.547275, '斯特拉斯堡'), (0.536760, '坎城')] ft = fasttext.load_model(os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.en.300.bin')) logging.info('accomodtion' in ft.words) logging.info(f"与accomodtion最相似的5个词为: {ft.get_nearest_neighbors('accomodation', k=5)}") logging.info(ft.get_analogies("berlin", "germany", "france", k=5)) # False # 与accomodtion最相似的5个词为: [(0.858731, 'accomadation'), (0.828016, 'acommodation'), (0.822644, 'accommodation'), (0.821873, 'accomdation'), (0.793275, 'Accomodation')] # [(0.730373, 'paris'), (0.640853, 'france.'), (0.639331, 'avignon'), (0.631667, 'paris.'), (0.589559, 'montpellier')] if __name__ == '__main__': # load_fasttext_model() get_get_analogies()
moon-hotel/DeepLearningWithMe
Code/Chapter09/C05_FastText/main.py
main.py
py
2,582
python
en
code
116
github-code
6
[ { "api_name": "sys.path.append", "line_number": 16, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 16, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 21, "usage_type": "call" }, { "api_name": "utils.DATA_HOME", "lin...
3037264375
import os import numpy as np import pandas as pd from PIL import Image import matplotlib.pyplot as plt import seaborn as sns; sns.set() from sklearn.manifold import TSNE from sklearn.manifold import Isomap from sklearn.decomposition import PCA from sklearn.cluster import KMeans from matplotlib import offsetbox #directory = './avatars' directory = '/home/kayla/pylearn/manifold/cupcakes' pics = [] data = pd.DataFrame() count = 0 for filename in os.listdir(directory): f = os.path.join(directory, filename) # checking if it is a file if os.path.isfile(f): # creating a object pics.append(Image.open(f)) pics[count] = pics[count].resize((100,100)) image = pd.Series(np.asarray(pics[count]).ravel()) data = pd.concat([data,image.to_frame().T],ignore_index=True) count += 1 dims = np.asarray(pics[0]).shape data = data.fillna(data.mean()) model = Isomap(n_components=10) proj = model.fit_transform(data) kmeans = KMeans(n_clusters=10) kmeans.fit(proj) clusters = kmeans.predict(proj) data['cluster'] = clusters #avg_data = data.groupby(by=['cluster']).sample().drop(['cluster'], \ # axis=1) avg_data = data.groupby(by=['cluster']).median() avg_image = [] for i in avg_data.index.astype('int'): avg_image.append(avg_data.loc[i].to_numpy().reshape(dims).astype('int')) fig, ax = plt.subplots(2,5,figsize=(8,3)) for axi, img in zip(ax.flat, avg_image): axi.set(xticks=[],yticks=[]) axi.imshow(img,interpolation='nearest') plt.show()
estimatrixPipiatrix/decision-scientist
pythonCode/kmeans/kmeans_avatars.py
kmeans_avatars.py
py
1,534
python
en
code
0
github-code
6
[ { "api_name": "seaborn.set", "line_number": 6, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line_number": 17, "usage_type": "call" }, { "api_name": "os.listdir", "line_number": 19, "usage_type": "call" }, { "api_name": "os.path.join", "line_numbe...
30789833021
from django.contrib import messages from django.shortcuts import render import random import smtplib from.models import Onlinepayment,Offlinepayment,Onlineapproval,Offlineapproval,Send def home(request): return render(request,'f1.html') def tv(request): return render(request,'tv.html') def mobile(request): return render(request,'mobile.html') def theatre(request): return render(request,'hometheatre.html') def air(request): return render(request,'airconditioner.html') def buy(request): if request.method == 'POST': return render(request,'otpver.html') else: return render(request,'otpver.html') def offpay(request): if request.method == 'POST': return render(request, 'offlinepayment.html') else: return render(request,'offlinepayment.html') def onpay(request) : if request.method == 'POST': return render(request,'onlinepayment.html') else: return render(request,'onlinepyment.html') def onlineok(request): if request.method == 'POST': name = request.POST['name'] address = request.POST['address'] city = request.POST['city'] ph = request.POST['ph'] mail = request.POST['mail'] item = request.POST['item'] imo = request.POST['imo'] credit = request.POST['credit'] Onlineapproval(name=name, address=address, city=city, ph=ph, mail=mail, item=item, imo=imo, credit=credit).save() msg = name + ", your order has been registered you will get your product within " \ "one week. Your payment has been received. for any quiries contact-xxxx " s = smtplib.SMTP('smtp.gmail.com', 587) s.starttls() s.login("nimishachandran98@gmail.com", "Nimishakc@98") s.sendmail("nimishachandran98@gmail.com",mail, msg) s.quit() messages.success(request, 'SUCCESSFULLY ORDERED YOU WILL RECIEVE AN EMAIL SHORTLY') return render(request,'onlinepayment.html') else: messages.success(request, 'REGISTRATION FAILED TRY AGAIN LATER') return render(request,'onlinepayment.html') def offlineok(request): if request.method == 'POST': name = request.POST['name'] address = request.POST['address'] city = request.POST['city'] ph = request.POST['ph'] mail = request.POST['mail'] item = request.POST['item'] imo = request.POST['imo'] Offlineapproval(name=name, address=address, city=city, ph=ph, mail=mail, item=item, imo=imo).save() msg = name + ", your order has been registered you will get your product within " \ "one week. Your have opted offline payment system. for any quiries contact-xxxx " s = smtplib.SMTP('smtp.gmail.com', 587) s.starttls() s.login("nimishachandran98@gmail.com", "Nimishakc@98") s.sendmail("nimishachandran98@gmail.com", mail, msg) s.quit() messages.success(request, 'SUCCESSFULLY ORDERED YOU WILL RECIEVE AN EMAIL SHORTLY') return render(request,'offlinepayment.html') else: messages.success(request, 'REGISTRATION FAILED TRY AGAIN LATER') return render(request,'offlinepayment.html') def staff(request): return render(request,'staff.html') def app(request): if request.method == 'POST': username = request.POST['username'] password = request.POST['password'] if username == 'app' and password == 'app': return render(request, 'dashboard.html') else: return render(request, 'staff.html') def onappro(request): x = Onlineapproval.objects.all() return render(request, 'onlineapproval.html', {'all': x}) def offapro(request): x = Offlineapproval.objects.all() return render(request, 'offlineapproval.html', {'all': x}) def onpays(request): if request.method == 'POST': return render(request,'onlinestatus.html') else: return render(request, 'onlinestatus.html') def offpays(request): if request.method == 'POST': return render(request,'offlinestatus.html') else: return render(request, 'offlinestatus.html') val=None def send(request): if request.method == 'POST': email = request.POST['email'] x = str(random.randint(1000,9999)) c=x global val def val(): return c s = smtplib.SMTP('smtp.gmail.com', 587) s.starttls() s.login("nimishachandran98@gmail.com", "Nimishakc@98") s.sendmail("nimishachandran98@gmail.com",email, x) s.quit() messages.success(request, 'OTP SENT CHECK YOUR MAIL') return render(request,'otpver.html') else: return render(request,'otpver.html') def verify(request): if request.method == 'POST': otpcheck = request.POST['otpcheck'] ok=val() if otpcheck == ok: messages.success(request, 'LOGGED IN') return render(request,'registration.html') else: messages.success(request, 'FAILED TO VERIFY OTP') return render(request,'otpver.html') def homea(request): return render(request,'dashboard.html') def logout(request): return render(request,'staff.html')
Nimishakc/NimishaFinal
eapp/views.py
views.py
py
5,321
python
en
code
0
github-code
6
[ { "api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call" }, { "api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call" }, { "api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call" }, { "api_name"...
2668239466
import multiprocessing from time import ctime def consumer(input_q): print("Into consumer:",ctime()) while True: # 处理项 item = input_q.get() print ("pull",itemm,"out of q")#此处代替为有用的工作 input_q.tast_done()#发出信号通知任务完成 print("Out of consumer:",ctime())##此句末执行,因为q.join()信号后,主进程启动,为等到print此语句完 def prodeuce(sequence,out_q): print("Inpo producer ",ctime()) for item in sequence: output_q.put(item) print("put",item,"into q") print("Out of producer",ctime()) if __name__ == '__main__': q = multiprocessing.JoinableQueue() #运行消费者进程 cons_p = multiprocessing.Process(target= consumer, args = (q,1)) cons_p.daemon = True cons_p.start() #在生产多个项,sqeuence代表要发送个消费者的项的序列 #在实践中,这可能是生成器的输出或通过一些其他生产方式出来 sequence = [1,2,3,4] prodeuce(sequence,q) q.join()
Sssssww/pycharm
多线程/22.py
22.py
py
1,065
python
en
code
0
github-code
6
[ { "api_name": "time.ctime", "line_number": 6, "usage_type": "call" }, { "api_name": "time.ctime", "line_number": 12, "usage_type": "call" }, { "api_name": "time.ctime", "line_number": 16, "usage_type": "call" }, { "api_name": "time.ctime", "line_number": 20, ...
10012294494
#!/usr/bin/python # coding=utf-8 import paramiko import xlrd import time import os import handle_file server_info_list = handle_file.read_excel_xlsx('documents/server_info.xlsx', 'Sheet1') Host = server_info_list[0][0] Port = server_info_list[0][1] Username = server_info_list[0][2] Password = server_info_list[0][3] def ssh_exec_cmd(): '''ssh远程登录:windows客户端连接Linux服务器,并输入指令''' ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #连接Linux服务器 #ssh.connect('10.20.24.89', 22, 'root', 'Cfcs2@07380') ssh.connect(Host, Port, Username, Password) #执行Linux相关命令 #不管环境上是否存在kafka进程,都kill -9 kafka进程 stdin, stdout, stderr = ssh.exec_command("ps -ef | grep kafka | grep -v grep | awk '{print $2}' | xargs kill -9") #不管环境上是否存在zookeeper进程,都kill -9 zookeeper进程 stdin, stdout, stderr = ssh.exec_command("ps -ef | grep zookeeper | grep -v grep | awk '{print $2}' | xargs kill -9") #不管环境上是否存在redis进程,都kill -9 redis进程 stdin, stdout, stderr = ssh.exec_command("ps -ef | grep redis | grep -v grep | awk '{print $2}' | xargs kill -9") #不管环境上是否存在nginx进程,都kill -9 nginx进程 stdin, stdout, stderr = ssh.exec_command("ps -ef | grep nginx | grep -v grep | awk '{print $2}' | xargs kill -9") """ 解压zookeeper安装包,并启动zookeeper服务 """ stdin, stdout, stderr = ssh.exec_command("cd /usr/local && rm -rf zookeeper " ) stdin, stdout, stderr = ssh.exec_command("cd /usr/local && unzip -o zookeeper-3.4.12.zip -d /usr/local ") time.sleep(3) stdin, stdout, stderr = ssh.exec_command("cd /usr/local && mv zookeeper-3.4.12 zookeeper && chmod -R 777 zookeeper ") stdin, stdout, stderr = ssh.exec_command("source .bash_profile; sh /usr/local/zookeeper/bin/zkServer.sh start ") time.sleep(3) """ 解压kafka安装包,并修改kafka配置文件,然后启动kafka服务 """ stdin, stdout, stderr = ssh.exec_command("cd /usr/local && rm -rf kafka ") stdin, stdout, stderr = ssh.exec_command("cd /usr/local && unzip -o kafka_2.11-0.11.0.3.zip -d /usr/local") time.sleep(3) stdin, stdout, stderr = ssh.exec_command("cd /usr/local && mv kafka_2.11-0.11.0.3 kafka && chmod -R 777 kafka") #修改kafka配置文件 # stdin, stdout, stderr = ssh.exec_command("cd /usr/local/kafka/config; sed -i 's#zookeeper.connect=.*#zookeeper.connect=10.20.158.33:2181#g' server.properties") # stdin, stdout, stderr = ssh.exec_command("cd /usr/local/kafka/config; sed -i 's#listeners=.*#listeners=PLAINTEXT://10.20.158.33:9092#g' server.properties") stdin, stdout, stderr = ssh.exec_command("cd /usr/local/kafka/config; sed -i " + "'" + "s#zookeeper.connect=.*#zookeeper.connect=" + Host + ":2181#g" + "'" + " server.properties") stdin, stdout, stderr = ssh.exec_command("cd /usr/local/kafka/config; sed -i " + "'" + "s#listeners=.*#listeners=PLAINTEXT://" + Host + ":9092#g" + "'" + " server.properties") #启动kafka服务 stdin, stdout, stderr = ssh.exec_command("source .bash_profile; sh /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties") time.sleep(3) """ 解压redis安装包,并修改redis.conf配置文件,然后启动redis服务 """ stdin, stdout, stderr = ssh.exec_command("cd /usr/local && rm -rf redis ") print("删除redis文件夹") stdin, stdout, stderr = ssh.exec_command("cd /usr/local && tar xzvf redis-4.0.14.tar.gz -C /usr/local" ,get_pty=True) time.sleep(3) print("成功解压redis包") stdin, stdout, stderr = ssh.exec_command("cd /usr/local && mv redis-4.0.14 redis && chmod -R 777 redis") print("redis文件夹赋予权限777") stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis && make > log_make.log", get_pty=True) time.sleep(100) print("make命令执行结束") stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis && make install PREFIX=/usr/local/redis > log_makeinstall.log", get_pty=True) time.sleep(100) print("make install命令执行结束") # 修改redis.conf配置文件 stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis && sed -i " + "'" + "s#bind 127.0.0.1#bind " +'"' + Host +'"' + "#g' redis.conf", get_pty=True) time.sleep(5) stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis && sed -i 's/# requirepass .*/requirepass taredis/g' redis.conf", get_pty=True) time.sleep(5) print("修改redis.conf文件成功") stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis/bin ; ./redis-server ../redis.conf >> redis.log 2>&1 &") time.sleep(5) print("启动redis服务成功") """ 解压nginx安装包 """ stdin, stdout, stderr = ssh.exec_command("cd /usr/local && rm -rf nginx") stdin, stdout, stderr = ssh.exec_command("cd /usr/local && tar xzvf nginx-1.12.2.tar.gz -C /usr/local", get_pty=True) time.sleep(3) stdin, stdout, stderr = ssh.exec_command("cd /usr/local && mv nginx-1.12.2 nginx && chmod -R 777 nginx") stdin, stdout, stderr = ssh.exec_command("cd /usr/local/nginx; ./configure --prefix=/usr/local/nginx --conf-path=/usr/local/nginx/nginx.conf > log_configure.log", get_pty=True) time.sleep(3) err = stderr.readlines() #out = stdout.readlines() print("./configure命令输出结果打印开始:") if (err): print('error:') print(err) # else: # print('out:') # print(out) print("./configure命令输出结果打印结束!") print("ending") stdin, stdout, stderr = ssh.exec_command("cd /usr/local/nginx; make > log_make.log", get_pty=True) time.sleep(30) stdin, stdout, stderr = ssh.exec_command("cd /usr/local/nginx; make install > log_make_install.log", get_pty=True) time.sleep(30) #将环境变量/etc/profile中删除包含的NGINX_HOME变量的行 stdin, stdout, stderr = ssh.exec_command("sed -i '/export NGINX_HOME=/d' /etc/profile") #将环境变量/etc/profile中使用到$NGINX_HOME变量的地方删除,主要目的是删除;$NGINX_HOME/sbin字符串 stdin, stdout, stderr = ssh.exec_command("sed -i 's#;$NGINX_HOME/sbin##g' /etc/profile") #在环境变量/etc/profile中添加NGINX_HOME变量 stdin, stdout, stderr = ssh.exec_command("sed -i '/export PATH=/i\export NGINX_HOME=/usr/local/nginx' /etc/profile") #在环境变量PATH路径末尾添加;$NGINX_HOME/sbin内容 stdin, stdout, stderr = ssh.exec_command("sed -i 's#export PATH=.*#&;$NGINX_HOME/sbin#g' /etc/profile") #本地上传一份nginx.conf模板,并修改里面的IP信息 ############################### sftp_upload_file() ############################### #修改nginx.conf配置文件 stdin, stdout, stderr = ssh.exec_command("cd /usr/local/nginx/conf; sed -i " + "'" + "s#server 10.20.24.89#server " + Host + "#g' nginx.conf", get_pty=True) time.sleep(5) stdin, stdout, stderr = ssh.exec_command("source /etc/profile; /usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf", get_pty=True) time.sleep(5) err = stderr.readlines() #out = stdout.readlines() if (err): print('error:') print(err) # else: # print('out:') # print(out) print("ending") ssh.close() def sftp_upload_file(): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #ssh.connect(Host, Port, Username, Password) #连接Linux服务器 ssh.connect(Host, Port, Username, Password) transport = paramiko.Transport((Host, Port)) transport.banner_timeout = 10 transport.connect(username=Username, password=Password) sftp = paramiko.SFTPClient.from_transport(transport) try: sftp.put('./documents/nginx.conf', '/usr/local/nginx/conf/nginx.conf') print("上传成功") except Exception as e: print(e) transport.close() if __name__ == '__main__': ssh_exec_cmd()
July666/python_workspace_component
main.py
main.py
py
8,134
python
en
code
0
github-code
6
[ { "api_name": "handle_file.read_excel_xlsx", "line_number": 9, "usage_type": "call" }, { "api_name": "paramiko.SSHClient", "line_number": 17, "usage_type": "call" }, { "api_name": "paramiko.AutoAddPolicy", "line_number": 18, "usage_type": "call" }, { "api_name": "...
26302348125
import frappe, requests from frappe.model.document import Document class DjangoProperty(Document): def db_insert(self): d = self.get_valid_dict(convert_dates_to_str=True) res = requests.post(f'{self.get_url()}/propertycreate/', data=dict(d)) return res.json() def load_from_db(self): print(self.doctype, self.name, 'demonstration\n\n\n') if(self.name!=self.doctype): res = requests.get(f'{self.get_url()}/propertydetail/{self.name}/') if(res.status_code==200): for key, value in res.json()[0].items(): setattr(self, key, value) def db_update(self): d = self.get_valid_dict(convert_dates_to_str=True) # print(type(d), type(dict(d)), '\n\n\n') res = requests.post(f'{self.get_url()}/propertycreate/', data=dict(d)) return res.json() def get_list(self, args): # print(args, 'ARGS, \n\n\n') url = f"{self.get_url()}/propertylist/" res = requests.get(url) if(res.status_code==200): return res.json() return json.dumps([]) def get_url(self): return "http://192.168.1.156:8000/property"
mymi14s/estate_app
estate_app/estate_app/doctype/djangoproperty/djangoproperty.py
djangoproperty.py
py
1,046
python
en
code
16
github-code
6
[ { "api_name": "frappe.model.document.Document", "line_number": 4, "usage_type": "name" }, { "api_name": "requests.post", "line_number": 8, "usage_type": "call" }, { "api_name": "requests.get", "line_number": 15, "usage_type": "call" }, { "api_name": "requests.post...
13301338601
from bson.objectid import ObjectId from flask import Blueprint, jsonify from assets.extensions import mongo from assets.decors import errorhandler, tokenrequired accounts = Blueprint("accounts", __name__, url_prefix="/accounts") # STATUS @accounts.route("/<account_id>/status", methods=["GET"]) @tokenrequired @errorhandler def get_status(current_user, account_id): # query the user collection for account status accounts = mongo.db.users.find_one({"_id": ObjectId(current_user["_id"]["$oid"])})[ "Accounts"] if account_id in accounts: status = accounts[account_id]["Active"] return jsonify({"account_status": status, "account_id": account_id}), 200 return jsonify({"error": "invalid account id", "account_id": account_id}), 400 @accounts.route("/<account_id>/status", methods=["PUT"]) @tokenrequired @errorhandler def update_status(current_user, account_id): resp = mongo.db.users.update_one({"_id": ObjectId(current_user["_id"]["$oid"])}, [{"$set": {f"Accounts.{account_id}.Active": {"$eq": [False, f"$Accounts.{account_id}.Active"]}}}]) # if update did not occur because no account id found if resp.matched_count == 0 or resp.modified_count == 0: return jsonify({"error": "failed to update status", "account_id": account_id}), 400 return jsonify({"success": "status updated", "account_id": account_id}), 201
TreyThomas93/tos-python-web-app-server
api/accounts/__init__.py
__init__.py
py
1,434
python
en
code
0
github-code
6
[ { "api_name": "flask.Blueprint", "line_number": 6, "usage_type": "call" }, { "api_name": "assets.extensions.mongo.db.users.find_one", "line_number": 16, "usage_type": "call" }, { "api_name": "assets.extensions.mongo.db", "line_number": 16, "usage_type": "attribute" }, ...
8384623961
from __future__ import print_function from __future__ import absolute_import import sys import math import heapq import gzip import warnings from xml.sax import handler, parse from copy import copy from collections import defaultdict from itertools import chain import sumolib from . import lane, edge, netshiftadaptor, node, connection, roundabout # noqa from .connection import Connection class TLS: """Traffic Light Signal for a sumo network""" def __init__(self, id): self._id = id self._connections = [] self._maxConnectionNo = -1 self._programs = {} def addConnection(self, inLane, outLane, linkNo): self._connections.append([inLane, outLane, linkNo]) if linkNo > self._maxConnectionNo: self._maxConnectionNo = linkNo def getConnections(self): return self._connections def getID(self): return self._id def getLinks(self): links = {} for the_connection in self._connections: if the_connection[2] not in links: links[the_connection[2]] = [] links[the_connection[2]].append(the_connection) return links def getEdges(self): edges = set() for c in self._connections: edges.add(c[0].getEdge()) return edges def addProgram(self, program): self._programs[program._id] = program def removePrograms(self): self._programs.clear() def toXML(self): ret = "" for p in self._programs: ret = ret + self._programs[p].toXML(self._id) return ret def getPrograms(self): return self._programs class Phase: def __init__(self, duration, state, minDur=-1, maxDur=-1, next=None, name=""): self.duration = duration self.state = state self.minDur = minDur # minimum duration (only for actuated tls) self.maxDur = maxDur # maximum duration (only for actuated tls) self.next = [] if next is None else next self.name = name def __repr__(self): name = "" if self.name == "" else ", name='%s'" % self.name next = "" if len(self.next) == 0 else ", next='%s'" % self.next return ("Phase(duration=%s, state='%s', minDur=%s, maxDur=%s%s%s" % (self.duration, self.state, self.minDur, self.maxDur, name, next)) class TLSProgram: def __init__(self, id, offset, type): self._id = id self._type = type self._offset = offset self._phases = [] self._params = {} def addPhase(self, state, duration, minDur=-1, maxDur=-1, next=None, name=""): self._phases.append(Phase(duration, state, minDur, maxDur, next, name)) def toXML(self, tlsID): ret = ' <tlLogic id="%s" type="%s" programID="%s" offset="%s">\n' % ( tlsID, self._type, self._id, self._offset) for p in self._phases: minDur = '' if p.minDur < 0 else ' minDur="%s"' % p.minDur maxDur = '' if p.maxDur < 0 else ' maxDur="%s"' % p.maxDur name = '' if p.name == '' else ' name="%s"' % p.name next = '' if len(p.next) == 0 else ' next="%s"' % ' '.join(map(str, p.next)) ret += ' <phase duration="%s" state="%s"%s%s%s%s/>\n' % ( p.duration, p.state, minDur, maxDur, name, next) for k, v in self._params.items(): ret += ' <param key="%s" value="%s"/>\n' % (k, v) ret += ' </tlLogic>\n' return ret def getPhases(self): return self._phases def getType(self): return self._type def setParam(self, key, value): self._params[key] = value def getParam(self, key, default=None): return self._params.get(key, default) def getParams(self): return self._params class Net: """The whole sumo network.""" def __init__(self): self._location = {} self._id2node = {} self._id2edge = {} self._crossings_and_walkingAreas = set() self._macroConnectors = set() self._id2tls = {} self._nodes = [] self._edges = [] self._tlss = [] self._ranges = [[10000, -10000], [10000, -10000]] self._roundabouts = [] self._rtreeEdges = None self._rtreeLanes = None self._allLanes = [] self._origIdx = None self._proj = None self.hasInternal = False # store dijsktra heap for reuse if the same origin is used repeatedly self._shortestPathCache = None def setLocation(self, netOffset, convBoundary, origBoundary, projParameter): self._location["netOffset"] = netOffset self._location["convBoundary"] = convBoundary self._location["origBoundary"] = origBoundary self._location["projParameter"] = projParameter def addNode(self, id, type=None, coord=None, incLanes=None, intLanes=None): if id is None: return None if id not in self._id2node: n = node.Node(id, type, coord, incLanes, intLanes) self._nodes.append(n) self._id2node[id] = n self.setAdditionalNodeInfo( self._id2node[id], type, coord, incLanes, intLanes) return self._id2node[id] def setAdditionalNodeInfo(self, node, type, coord, incLanes, intLanes=None): if coord is not None and node._coord is None: node._coord = coord self._ranges[0][0] = min(self._ranges[0][0], coord[0]) self._ranges[0][1] = max(self._ranges[0][1], coord[0]) self._ranges[1][0] = min(self._ranges[1][0], coord[1]) self._ranges[1][1] = max(self._ranges[1][1], coord[1]) if incLanes is not None and node._incLanes is None: node._incLanes = incLanes if intLanes is not None and node._intLanes is None: node._intLanes = intLanes if type is not None and node._type is None: node._type = type def addEdge(self, id, fromID, toID, prio, function, name, edgeType=''): if id not in self._id2edge: fromN = self.addNode(fromID) toN = self.addNode(toID) e = edge.Edge(id, fromN, toN, prio, function, name, edgeType) self._edges.append(e) self._id2edge[id] = e if function: self.hasInternal = True return self._id2edge[id] def addLane(self, edge, speed, length, width, allow=None, disallow=None): return lane.Lane(edge, speed, length, width, allow, disallow) def addRoundabout(self, nodes, edges=None): r = roundabout.Roundabout(nodes, edges) self._roundabouts.append(r) return r def addConnection(self, fromEdge, toEdge, fromlane, tolane, direction, tls, tllink, state, viaLaneID=None): conn = connection.Connection( fromEdge, toEdge, fromlane, tolane, direction, tls, tllink, state, viaLaneID) fromEdge.addOutgoing(conn) fromlane.addOutgoing(conn) toEdge._addIncoming(conn) if viaLaneID: try: # internal lanes are only available when building with option withInternal=True viaLane = self.getLane(viaLaneID) viaEdge = viaLane.getEdge() viaEdge._addIncoming(connection.Connection( fromEdge, viaEdge, fromlane, viaLane, direction, tls, tllink, state, '')) except Exception: pass return conn def getEdges(self, withInternal=True): if not withInternal: return [e for e in self._edges if e.getFunction() == ''] else: return self._edges def getRoundabouts(self): return self._roundabouts def hasEdge(self, id): return id in self._id2edge def getEdge(self, id): return self._id2edge[id] def getLane(self, laneID): edge_id, lane_index = laneID.rsplit("_", 1) return self.getEdge(edge_id).getLane(int(lane_index)) def _initRTree(self, shapeList, includeJunctions=True): import rtree # noqa result = rtree.index.Index() result.interleaved = True for ri, shape in enumerate(shapeList): result.add(ri, shape.getBoundingBox(includeJunctions)) return result # Please be aware that the resulting list of edges is NOT sorted def getNeighboringEdges(self, x, y, r=0.1, includeJunctions=True, allowFallback=True): edges = [] try: if self._rtreeEdges is None: self._rtreeEdges = self._initRTree(self._edges, includeJunctions) for i in self._rtreeEdges.intersection((x - r, y - r, x + r, y + r)): e = self._edges[i] d = sumolib.geomhelper.distancePointToPolygon( (x, y), e.getShape(includeJunctions)) if d < r: edges.append((e, d)) except ImportError: if not allowFallback: raise warnings.warn("Module 'rtree' not available. Using brute-force fallback.") for the_edge in self._edges: d = sumolib.geomhelper.distancePointToPolygon((x, y), the_edge.getShape(includeJunctions)) if d < r: edges.append((the_edge, d)) return edges def getNeighboringLanes(self, x, y, r=0.1, includeJunctions=True, allowFallback=True): lanes = [] try: if self._rtreeLanes is None: for the_edge in self._edges: self._allLanes += the_edge.getLanes() self._rtreeLanes = self._initRTree(self._allLanes, includeJunctions) for i in self._rtreeLanes.intersection((x - r, y - r, x + r, y + r)): the_lane = self._allLanes[i] d = sumolib.geomhelper.distancePointToPolygon((x, y), the_lane.getShape(includeJunctions)) if d < r: lanes.append((the_lane, d)) except ImportError: if not allowFallback: raise warnings.warn("Module 'rtree' not available. Using brute-force fallback.") for the_edge in self._edges: for the_lane in the_edge.getLanes(): d = sumolib.geomhelper.distancePointToPolygon((x, y), the_lane.getShape(includeJunctions)) if d < r: lanes.append((the_lane, d)) return lanes def hasNode(self, id): return id in self._id2node def getNode(self, id): return self._id2node[id] def getNodes(self): return self._nodes def getTLS(self, tlid): return self._id2tls[tlid] def getTLSSecure(self, tlid): if tlid in self._id2tls: tls = self._id2tls[tlid] else: tls = TLS(tlid) self._id2tls[tlid] = tls self._tlss.append(tls) return tls def getTrafficLights(self): return self._tlss def addTLS(self, tlid, inLane, outLane, linkNo): tls = self.getTLSSecure(tlid) tls.addConnection(inLane, outLane, linkNo) return tls def addTLSProgram(self, tlid, programID, offset, type, removeOthers): tls = self.getTLSSecure(tlid) program = TLSProgram(programID, offset, type) if removeOthers: tls.removePrograms() tls.addProgram(program) return program def setFoes(self, junctionID, index, foes, prohibits): self._id2node[junctionID].setFoes(index, foes, prohibits) def forbids(self, possProhibitor, possProhibited): return possProhibitor.getFrom().getToNode().forbids(possProhibitor, possProhibited) def getDownstreamEdges(self, edge, distance, stopOnTLS, stopOnTurnaround): """return a list of lists of the form [[firstEdge, pos, [edge_0, edge_1, ..., edge_k], aborted], ...] where firstEdge: is the downstream edge furthest away from the intersection, [edge_0, ..., edge_k]: is the list of edges from the intersection downstream to firstEdge pos: is the position on firstEdge with distance to the end of the input edge aborted: a flag indicating whether the downstream search stopped at a TLS or a node without incoming edges before reaching the distance threshold """ ret = [] seen = set() toProc = [] toProc.append([edge, 0, []]) while not len(toProc) == 0: ie = toProc.pop() if ie[0] in seen: continue seen.add(ie[0]) if ie[1] + ie[0].getLength() >= distance: ret.append( [ie[0], ie[0].getLength() + ie[1] - distance, ie[2], False]) continue if len(ie[0]._incoming) == 0: ret.append([ie[0], ie[0].getLength() + ie[1], ie[2], True]) continue mn = [] stop = False for ci in ie[0]._incoming: if ci not in seen: prev = copy(ie[2]) if stopOnTLS and ci._tls and ci != edge and not stop: ret.append([ie[0], ie[1], prev, True]) stop = True elif (stopOnTurnaround and ie[0]._incoming[ci][0].getDirection() == Connection.LINKDIR_TURN and not stop): ret.append([ie[0], ie[1], prev, True]) stop = True else: prev.append(ie[0]) mn.append([ci, ie[0].getLength() + ie[1], prev]) if not stop: toProc.extend(mn) return ret def getEdgesByOrigID(self, origID): if self._origIdx is None: self._origIdx = defaultdict(set) for the_edge in self._edges: for the_lane in the_edge.getLanes(): for oID in the_lane.getParam("origId", "").split(): self._origIdx[oID].add(the_edge) return self._origIdx[origID] def getBBoxXY(self): """ Get the bounding box (bottom left and top right coordinates) for a net; Coordinates are in X and Y (not Lat and Lon) :return [(bottom_left_X, bottom_left_Y), (top_right_X, top_right_Y)] """ return [(self._ranges[0][0], self._ranges[1][0]), (self._ranges[0][1], self._ranges[1][1])] # the diagonal of the bounding box of all nodes def getBBoxDiameter(self): return math.sqrt( (self._ranges[0][0] - self._ranges[0][1]) ** 2 + (self._ranges[1][0] - self._ranges[1][1]) ** 2) def getGeoProj(self): if self._proj is None: import pyproj try: self._proj = pyproj.Proj(projparams=self._location["projParameter"]) except RuntimeError: if hasattr(pyproj.datadir, 'set_data_dir'): pyproj.datadir.set_data_dir('/usr/share/proj') self._proj = pyproj.Proj(projparams=self._location["projParameter"]) raise return self._proj def getLocationOffset(self): """ offset to be added after converting from geo-coordinates to UTM""" return list(map(float, self._location["netOffset"].split(","))) def getBoundary(self): """ return xmin,ymin,xmax,ymax network coordinates""" return list(map(float, self._location["convBoundary"].split(","))) def convertLonLat2XY(self, lon, lat, rawUTM=False): x, y = self.getGeoProj()(lon, lat) if rawUTM: return x, y else: x_off, y_off = self.getLocationOffset() return x + x_off, y + y_off def convertXY2LonLat(self, x, y, rawUTM=False): if not rawUTM: x_off, y_off = self.getLocationOffset() x -= x_off y -= y_off return self.getGeoProj()(x, y, inverse=True) def move(self, dx, dy, dz=0): for n in self._nodes: n._coord = (n._coord[0] + dx, n._coord[1] + dy, n._coord[2] + dz) for e in self._edges: for l in e._lanes: l._shape = [(p[0] + dx, p[1] + dy, p[2] + dz) for p in l.getShape3D()] e.rebuildShape() def getInternalPath(self, conn, fastest=False): minInternalCost = 1e400 minPath = None for c in conn: if c.getViaLaneID() != "": viaCost = 0 viaID = c.getViaLaneID() viaPath = [] while viaID != "": viaLane = self.getLane(viaID) viaCost += viaLane.getLength() if not fastest else viaLane.getLength() / viaLane.getSpeed() viaID = viaLane.getOutgoing()[0].getViaLaneID() viaPath.append(viaLane.getEdge()) if viaCost < minInternalCost: minInternalCost = viaCost minPath = viaPath return minPath, minInternalCost def getOptimalPath(self, fromEdge, toEdge, fastest=False, maxCost=1e400, vClass=None, reversalPenalty=0, includeFromToCost=True, withInternal=False, ignoreDirection=False, fromPos=0, toPos=0): """ Finds the optimal (shortest or fastest) path for vClass from fromEdge to toEdge by using using Dijkstra's algorithm. It returns a pair of a tuple of edges and the cost. If no path is found the first element is None. The cost for the returned path is equal to the sum of all edge costs in the path, including the internal connectors, if they are present in the network. The path itself does not include internal edges except for the case when the start or end edge are internal edges. The search may be limited using the given threshold. """ def speedFunc(edge): return edge.getSpeed() if fastest else 1.0 if self.hasInternal: appendix = () appendixCost = 0. while toEdge.getFunction() == "internal": appendix = (toEdge,) + appendix appendixCost += toEdge.getLength() / speedFunc(toEdge) toEdge = list(toEdge.getIncoming().keys())[0] fromCost = fromEdge.getLength() / speedFunc(fromEdge) if includeFromToCost else 0 q = [(fromCost, fromEdge.getID(), (fromEdge, ), ())] if fromEdge == toEdge and fromPos > toPos and not ignoreDirection: # start search on successors of fromEdge q = [] startCost = (fromEdge.getLength() - fromPos) / speedFunc(fromEdge) if includeFromToCost else 0 for e2, conn in fromEdge.getAllowedOutgoing(vClass).items(): q.append((startCost + e2.getLength() / speedFunc(e2), e2.getID(), (fromEdge, e2), ())) seen = set() dist = {fromEdge: fromEdge.getLength() / speedFunc(fromEdge)} while q: cost, _, e1via, path = heapq.heappop(q) e1 = e1via[-1] if e1 in seen: continue seen.add(e1) path += e1via if e1 == toEdge: if self.hasInternal: return path + appendix, cost + appendixCost if includeFromToCost and toPos == 0: # assume toPos=0 is the default value return path, cost return path, cost + (-toEdge.getLength() + toPos) / speedFunc(toEdge) if cost > maxCost: return None, cost for e2, conn in chain(e1.getAllowedOutgoing(vClass).items(), e1.getIncoming().items() if ignoreDirection else []): # print(cost, e1.getID(), e2.getID(), e2 in seen) if e2 not in seen: newCost = cost + e2.getLength() / speedFunc(e2) if e2 == e1.getBidi(): newCost += reversalPenalty minPath = (e2,) if self.hasInternal: viaPath, minInternalCost = self.getInternalPath(conn, fastest=fastest) if viaPath is not None: newCost += minInternalCost if withInternal: minPath = tuple(viaPath + [e2]) if e2 not in dist or newCost < dist[e2]: dist[e2] = newCost heapq.heappush(q, (newCost, e2.getID(), minPath, path)) return None, 1e400 def getShortestPath(self, fromEdge, toEdge, maxCost=1e400, vClass=None, reversalPenalty=0, includeFromToCost=True, withInternal=False, ignoreDirection=False, fromPos=0, toPos=0): """ Finds the shortest path from fromEdge to toEdge respecting vClass, using Dijkstra's algorithm. It returns a pair of a tuple of edges and the cost. If no path is found the first element is None. The cost for the returned path is equal to the sum of all edge lengths in the path, including the internal connectors, if they are present in the network. The path itself does not include internal edges except for the case when the start or end edge are internal edges. The search may be limited using the given threshold. """ return self.getOptimalPath(fromEdge, toEdge, False, maxCost, vClass, reversalPenalty, includeFromToCost, withInternal, ignoreDirection, fromPos, toPos) def getFastestPath(self, fromEdge, toEdge, maxCost=1e400, vClass=None, reversalPenalty=0, includeFromToCost=True, withInternal=False, ignoreDirection=False, fromPos=0, toPos=0): """ Finds the fastest path from fromEdge to toEdge respecting vClass, using Dijkstra's algorithm. It returns a pair of a tuple of edges and the cost. If no path is found the first element is None. The cost for the returned path is equal to the sum of all edge costs in the path, including the internal connectors, if they are present in the network. The path itself does not include internal edges except for the case when the start or end edge are internal edges. The search may be limited using the given threshold. """ return self.getOptimalPath(fromEdge, toEdge, True, maxCost, vClass, reversalPenalty, includeFromToCost, withInternal, ignoreDirection, fromPos, toPos) class NetReader(handler.ContentHandler): """Reads a network, storing the edge geometries, lane numbers and max. speeds""" def __init__(self, **others): self._net = others.get('net', Net()) self._currentEdge = None self._currentNode = None self._currentConnection = None self._currentLane = None self._crossingID2edgeIDs = {} self._withPhases = others.get('withPrograms', False) self._latestProgram = others.get('withLatestPrograms', False) if self._latestProgram: self._withPhases = True self._withConnections = others.get('withConnections', True) self._withFoes = others.get('withFoes', True) self._withPedestrianConnections = others.get('withPedestrianConnections', False) self._withMacroConnectors = others.get('withMacroConnectors', False) self._withInternal = others.get('withInternal', self._withPedestrianConnections) if self._withPedestrianConnections and not self._withInternal: sys.stderr.write("Warning: Option withPedestrianConnections requires withInternal\n") self._withInternal = True self._bidiEdgeIDs = {} def startElement(self, name, attrs): if name == 'location': self._net.setLocation(attrs["netOffset"], attrs["convBoundary"], attrs[ "origBoundary"], attrs["projParameter"]) if name == 'edge': function = attrs.get('function', '') if (function == '' or (self._withInternal and function in ['internal', 'crossing', 'walkingarea']) or (self._withMacroConnectors and function == 'connector')): prio = -1 if 'priority' in attrs: prio = int(attrs['priority']) # get the ids edgeID = attrs['id'] fromNodeID = attrs.get('from', None) toNodeID = attrs.get('to', None) # for internal junctions use the junction's id for from and to node if function == 'internal': fromNodeID = toNodeID = edgeID[1:edgeID.rfind('_')] # remember edges crossed by pedestrians to link them later to the crossing objects if function == 'crossing': self._crossingID2edgeIDs[edgeID] = attrs.get('crossingEdges').split(' ') self._currentEdge = self._net.addEdge(edgeID, fromNodeID, toNodeID, prio, function, attrs.get('name', ''), attrs.get('type', '')) self._currentEdge.setRawShape(convertShape(attrs.get('shape', ''))) bidi = attrs.get('bidi', '') if bidi: self._bidiEdgeIDs[edgeID] = bidi else: if function in ['crossing', 'walkingarea']: self._net._crossings_and_walkingAreas.add(attrs['id']) elif function == 'connector': self._net._macroConnectors.add(attrs['id']) self._currentEdge = None if name == 'lane' and self._currentEdge is not None: self._currentLane = self._net.addLane( self._currentEdge, float(attrs['speed']), float(attrs['length']), float(attrs.get('width', 3.2)), attrs.get('allow'), attrs.get('disallow')) self._currentLane.setShape(convertShape(attrs.get('shape', ''))) if name == 'neigh' and self._currentLane is not None: self._currentLane.setNeigh(attrs['lane']) if name == 'junction': if attrs['id'][0] != ':': intLanes = None if self._withInternal: intLanes = attrs["intLanes"].split(" ") self._currentNode = self._net.addNode(attrs['id'], attrs['type'], tuple( map(float, [attrs['x'], attrs['y'], attrs['z'] if 'z' in attrs else '0'])), attrs['incLanes'].split(" "), intLanes) self._currentNode.setShape( convertShape(attrs.get('shape', ''))) if 'fringe' in attrs: self._currentNode._fringe = attrs['fringe'] if name == 'succ' and self._withConnections: # deprecated if attrs['edge'][0] != ':': self._currentEdge = self._net.getEdge(attrs['edge']) self._currentLane = attrs['lane'] self._currentLane = int( self._currentLane[self._currentLane.rfind('_') + 1:]) else: self._currentEdge = None if name == 'succlane' and self._withConnections: # deprecated lid = attrs['lane'] if lid[0] != ':' and lid != "SUMO_NO_DESTINATION" and self._currentEdge: connected = self._net.getEdge(lid[:lid.rfind('_')]) tolane = int(lid[lid.rfind('_') + 1:]) if 'tl' in attrs and attrs['tl'] != "": tl = attrs['tl'] tllink = int(attrs['linkIdx']) tlid = attrs['tl'] toEdge = self._net.getEdge(lid[:lid.rfind('_')]) tolane2 = toEdge._lanes[tolane] tls = self._net.addTLS( tlid, self._currentEdge._lanes[self._currentLane], tolane2, tllink) self._currentEdge.setTLS(tls) else: tl = "" tllink = -1 toEdge = self._net.getEdge(lid[:lid.rfind('_')]) tolane = toEdge._lanes[tolane] viaLaneID = attrs['via'] self._net.addConnection(self._currentEdge, connected, self._currentEdge._lanes[ self._currentLane], tolane, attrs['dir'], tl, tllink, attrs['state'], viaLaneID) if name == 'connection' and self._withConnections and (attrs['from'][0] != ":" or self._withInternal): fromEdgeID = attrs['from'] toEdgeID = attrs['to'] if ((self._withPedestrianConnections or not (fromEdgeID in self._net._crossings_and_walkingAreas or toEdgeID in self._net._crossings_and_walkingAreas)) and (self._withMacroConnectors or not (fromEdgeID in self._net._macroConnectors or toEdgeID in self._net._macroConnectors))): fromEdge = self._net.getEdge(fromEdgeID) toEdge = self._net.getEdge(toEdgeID) fromLane = fromEdge.getLane(int(attrs['fromLane'])) toLane = toEdge.getLane(int(attrs['toLane'])) if 'tl' in attrs and attrs['tl'] != "": tl = attrs['tl'] tllink = int(attrs['linkIndex']) tls = self._net.addTLS(tl, fromLane, toLane, tllink) fromEdge.setTLS(tls) else: tl = "" tllink = -1 try: viaLaneID = attrs['via'] except KeyError: viaLaneID = '' self._currentConnection = self._net.addConnection( fromEdge, toEdge, fromLane, toLane, attrs['dir'], tl, tllink, attrs['state'], viaLaneID) # 'row-logic' is deprecated!!! if self._withFoes and name == 'ROWLogic': self._currentNode = attrs['id'] if name == 'logicitem' and self._withFoes: # deprecated self._net.setFoes( self._currentNode, int(attrs['request']), attrs["foes"], attrs["response"]) if name == 'request' and self._withFoes: self._currentNode.setFoes( int(attrs['index']), attrs["foes"], attrs["response"]) # tl-logic is deprecated!!! NOTE: nevertheless, this is still used by # netconvert... (Leo) if self._withPhases and name == 'tlLogic': self._currentProgram = self._net.addTLSProgram( attrs['id'], attrs['programID'], float(attrs['offset']), attrs['type'], self._latestProgram) if self._withPhases and name == 'phase': self._currentProgram.addPhase( attrs['state'], int(attrs['duration']), int(attrs['minDur']) if 'minDur' in attrs else -1, int(attrs['maxDur']) if 'maxDur' in attrs else -1, list(map(int, attrs['next'].split())) if 'next' in attrs else [], attrs['name'] if 'name' in attrs else "" ) if name == 'roundabout': self._net.addRoundabout( attrs['nodes'].split(), attrs['edges'].split()) if name == 'param': if self._currentLane is not None: self._currentLane.setParam(attrs['key'], attrs['value']) elif self._currentEdge is not None: self._currentEdge.setParam(attrs['key'], attrs['value']) elif self._currentNode is not None: self._currentNode.setParam(attrs['key'], attrs['value']) elif self._currentConnection is not None: self._currentConnection.setParam(attrs['key'], attrs['value']) elif self._withPhases and self._currentProgram is not None: self._currentProgram.setParam(attrs['key'], attrs['value']) def endElement(self, name): if name == 'lane': self._currentLane = None if name == 'edge': self._currentEdge = None if name == 'junction': self._currentNode = None if name == 'connection': self._currentConnection = None # 'row-logic' is deprecated!!! if name == 'ROWLogic' or name == 'row-logic': self._haveROWLogic = False # tl-logic is deprecated!!! if self._withPhases and (name == 'tlLogic' or name == 'tl-logic'): self._currentProgram = None if name == 'net': for edgeID, bidiID in self._bidiEdgeIDs.items(): self._net.getEdge(edgeID)._bidi = self._net.getEdge(bidiID) def endDocument(self): # set crossed edges of pedestrian crossings for crossingID, crossedEdgeIDs in self._crossingID2edgeIDs.items(): pedCrossing = self._net.getEdge(crossingID) for crossedEdgeID in crossedEdgeIDs: pedCrossing._addCrossingEdge(self._net.getEdge(crossedEdgeID)) def getNet(self): return self._net def convertShape(shapeString): """ Convert xml shape string into float tuples. This method converts the 2d or 3d shape string from SUMO's xml file into a list containing 3d float-tuples. Non existant z coordinates default to zero. If shapeString is empty, an empty list will be returned. """ cshape = [] for pointString in shapeString.split(): p = [float(e) for e in pointString.split(",")] if len(p) == 2: cshape.append((p[0], p[1], 0.)) elif len(p) == 3: cshape.append(tuple(p)) else: raise ValueError( 'Invalid shape point "%s", should be either 2d or 3d' % pointString) return cshape def readNet(filename, **others): """ load a .net.xml file The following named options are supported: 'net' : initialize data structurs with an existing net object (default Net()) 'withPrograms' : import all traffic light programs (default False) 'withLatestPrograms' : import only the last program for each traffic light. This is the program that would be active in sumo by default. (default False) 'withConnections' : import all connections (default True) 'withFoes' : import right-of-way information (default True) 'withInternal' : import internal edges and lanes (default False) 'withPedestrianConnections' : import connections between sidewalks, crossings (default False) """ netreader = NetReader(**others) try: parse(gzip.open(filename), netreader) except IOError: parse(filename, netreader) return netreader.getNet()
ngctnnnn/DRL_Traffic-Signal-Control
sumo-rl/sumo/tools/sumolib/net/__init__.py
__init__.py
py
35,544
python
en
code
17
github-code
6
[ { "api_name": "connection.Connection", "line_number": 205, "usage_type": "call" }, { "api_name": "connection.Connection", "line_number": 215, "usage_type": "call" }, { "api_name": "rtree.index.Index", "line_number": 243, "usage_type": "call" }, { "api_name": "rtre...
42300472031
# -*- coding: utf-8 -*- """ Created on Fri Jun 14 12:43:40 2019 @author: Minh """ import json import requests import tweepy import base64 import sys import time import os import csv from AlarmDialogUtil import showAlertDialog class TwitterMonitor(): twitter_public = twitter_private = access_token = access_token_secret = '' def __init__(self): #fill out with keys self.twitter_public = 'CONSUMER_KEY' self.twitter_private = 'CONSUMER_SECRET' self.access_token = 'ACCESS_TOKEN' self.access_token_secret = 'ACCESS_SECRET' def monitor(self): twitter_ids = list(123432, 32423432, 23432423) #random examples auth = tweepy.OAuthHandler(self.twitter_public, self.twitter_private) auth.set_access_token(self.access_token, self.access_token_secret) api = tweepy.API(auth) streamListener = self.AlertStreamListener() stream = tweepy.Stream(auth = api.auth, listener=streamListener) stream.filter(follow=twitter_ids) class AlertStreamListener(tweepy.StreamListener): def on_status(self, status): if(not hasattr(status, 'retweeted_status') and status.in_reply_to_status_id_str == None and status.in_reply_to_user_id_str == None): title = status.user.name text = "text: {} \n\n url:{}".format(status.text, status.entities['urls']) showAlertDialog(title, text) with open("twitter/TwitterMonitorLog", "a") as file: file.write(json.dumps(status._json) + "\n\n") def on_error(self, status_code): if status_code == 420: print("error on connecting to stream: 420 ; time: {}".format(time.time())) #returning False in on_error disconnects the stream return False if __name__ == "__main__": while True: try: TwitterMonitor().monitor() except Exception as e: showAlertDialog("Error", "Program exited:\n" + str(e)) time.sleep(60)
BeePete/SBV1
twitter_monitor.py
twitter_monitor.py
py
2,214
python
en
code
0
github-code
6
[ { "api_name": "tweepy.OAuthHandler", "line_number": 31, "usage_type": "call" }, { "api_name": "tweepy.API", "line_number": 34, "usage_type": "call" }, { "api_name": "tweepy.Stream", "line_number": 36, "usage_type": "call" }, { "api_name": "tweepy.StreamListener", ...
7261113141
import matplotlib matplotlib.use('TkAgg') import cv2 import numpy as np import time from matplotlib import pyplot as plt def main(): #img= cv2.imread('primeiroFrame.jpg',0) img= cv2.imread('primeiroFrame.jpg',cv2.IMREAD_COLOR) print(img.shape) #nomaliza a imagem img = histogramaNormalizadoColor(img) #aplica filtro gausiano img = cv2.GaussianBlur(img,(5,5),0) #calcula os clauster usando kmeans: img = calculakmeans(img) #aplica trashholding separando as cores vermelhas mask = trashholdingVermelho(img) #aplica uma operação de closing e open na mascara para remover os ruidos retKernel = cv2.getStructuringElement(cv2.MORPH_RECT,(50,1)) openImage = cv2.morphologyEx(mask,cv2.MORPH_CLOSE,retKernel) retKernel = cv2.getStructuringElement(cv2.MORPH_RECT,(20,10)) closeImage = cv2.morphologyEx(openImage,cv2.MORPH_OPEN,retKernel) retKernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5)) openImage = cv2.morphologyEx(closeImage,cv2.MORPH_CLOSE,retKernel) #encontra os blobs: #img_blob = deteccaoDeBlobs(openImage) #utilizando momento: img_blob = detectMomento(openImage) #cv2.imshow("Final",openImage) #cv2.imshow("CloseiImage",closeImage) #cv2.waitKey(0) #cv2.destroyAllWindows() #cap.release() def detectMomento(img): #img= cv2.imread('untitled.png',0) ret,thresh = cv2.threshold(img,125,255,0) countourn, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) countourn = list(reversed(countourn)) cnt = countourn[0] M = cv2.moments(cnt) cx = int(M["m10"]/M["m00"]) cy = int(M["m01"]/M["m00"]) print(cx) print(cy) x,y,w,h = cv2.boundingRect(countourn[0]) cv2.rectangle(img,(x-10,y-10), (x+w+10,y+h+10),(255,255,255),1) cv2.imshow("img_contours", img) cv2.waitKey(0) """ for c in countourn: M = cv2.moments(c) cx = int(M["m10"]/M["m00"]) cy = int(M["m01"]/M["m00"]) cv2.circle(img, (cX, cY), 5, (255, 255, 255), -1) cv2.imshow("img_contourn", img) print(countourn) cv2.drawContours(img, countourn, -1, (0,255,0), 3) cv2.imshow("img_contours", img) cv2.waitKey(0) """ def deteccaoDeBlobs(img): #img= cv2.imread('untitled.png',cv2.IMREAD_GRAYSCALE) params = cv2.SimpleBlobDetector_Params() #img = cv2.bitwise_not(img) params.minDistBetweenBlobs = 10 params.filterByColor = True params.blobColor = 255 params.filterByCircularity = False params.filterByConvexity = False params.filterByInertia = False params.filterByArea = True # params.minArea = 1 # params.maxArea = 100000 # # Create a detector with the parameters ver = (cv2.__version__).split('.') if int(ver[0]) < 3 : detector = cv2.SimpleBlobDetector(params) else : detector = cv2.SimpleBlobDetector_create(params) keypoints = detector.detect(img) print(type(keypoints)) keypoints = list(reversed(keypoints)) #np.invert(keypoints) for i in keypoints: im_with_keypoints = cv2.drawKeypoints(img, [i], np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) print("################") print(i.class_id) print(i.pt) objectCentroid = i.pt print("################") break black = np.zeros((540,960,3)) #print(im_with_keypoints.size) #im_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.imshow("Keypoints", im_with_keypoints) cv2.waitKey(0) def trashholdingVermelho(colorImage): #converte para HSV hsv = cv2.cvtColor(colorImage, cv2.COLOR_BGR2HSV) lower_red = np.array([0,70,50]) upper_red = np.array([10,255,255]) mask1 = cv2.inRange(hsv,lower_red,upper_red) lower_red = np.array([170,70,50]) upper_red = np.array([180,255,255]) mask2 = cv2.inRange(hsv,lower_red,upper_red) mask = mask1 | mask2 res = cv2.bitwise_and(colorImage,colorImage, mask = mask) #cv2.imshow('frame',colorImage) #cv2.imshow('mask',mask) #cv2.imshow('res',res) return mask def calculaHistogramaColor(colorImage): color = ('b','g','r') for i,col in enumerate(color): print("passei") histr = cv2.calcHist(colorImage,[i],None,[255],[0,255]) plt.plot(histr,color = col) plt.xlim([0,255]) plt.show() def calculaHistograma(greyImage): histr = cv2.calcHist(greyImage,[0],None,[255],[0,256]) plt.plot(histr,) plt.xlim([0,255]) def histogramaNormalizadoColor(ColorImg): lab = cv2.cvtColor(ColorImg, cv2.COLOR_BGR2LAB) lab_planes = cv2.split(lab) clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8)) lab_planes[0] = clahe.apply(lab_planes[0]) lab = cv2.merge(lab_planes) bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) return bgr #Contrast Limited Adaptive Histogram Equalization # create a CLAHE object (Arguments are optional). def histogramaNormalizadoCinza(img): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) cl1 = clahe.apply(image) return cl1 def calculakmeans(img): z = img.reshape((-1,3)) z = np.float32(z) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) K=15 ret, label,center = cv2.kmeans(z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) center = np.uint8(center) res = center[label.flatten()] res2= res.reshape((img.shape)) return res2 if __name__ == "__main__": main()
felipemateus/vis-oCompEstudo
histogram.py
histogram.py
py
5,685
python
en
code
0
github-code
6
[ { "api_name": "matplotlib.use", "line_number": 2, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 12, "usage_type": "call" }, { "api_name": "cv2.IMREAD_COLOR", "line_number": 12, "usage_type": "attribute" }, { "api_name": "cv2.GaussianBlur", ...
22425229606
import typing from pydantic import BaseModel, root_validator from candid import CandidApiEnvironment from candid.client import CandidApi, AsyncCandidApi from candid.resources.auth.client import AuthClient, AsyncAuthClient from candid.resources.auth.resources.v_2 import AuthGetTokenRequest from candid.resources.billing_notes.client import BillingNotesClient, AsyncBillingNotesClient from candid.resources.encounters.client import EncountersClient, AsyncEncountersClient from candid.resources.expected_network_status.client import ExpectedNetworkStatusClient, \ AsyncExpectedNetworkStatusClient from candid.resources.payers.client import PayersClient, AsyncPayersClient class CandidApiClientOptions(BaseModel): token: typing.Optional[str] client_id: typing.Optional[str] client_secret: typing.Optional[str] @root_validator(pre=False) def token_or_client_id_and_secret_present(cls, values): if values.get("token") is not None: return values elif values.get("client_id") is not None and values.get("client_secret") is not None: return values raise ValueError("Either token or client_id and client_secret must be provided") def get_token_for_options(options: CandidApiClientOptions, environment: CandidApiEnvironment) -> str: if options.token is not None: return options.token else: unauthed_client = CandidApi(environment=environment, token=None) response = unauthed_client.auth.v_2.get_token(request=AuthGetTokenRequest(client_id=options.client_id, client_secret=options.client_secret)) return response.access_token class CandidApiClient: def __init__( self, *, options: CandidApiClientOptions, environment: CandidApiEnvironment = CandidApiEnvironment.PRODUCTION ): candid = CandidApi(token=get_token_for_options(options, environment), environment=environment) self.auth = candid.auth self.encounters = candid.encounters self.billing_notes = candid.billing_notes self.expected_network_status = candid.expected_network_status self.payers = candid.payers class AsyncCandidApiClient: def __init__( self, *, options: CandidApiClientOptions, environment: CandidApiEnvironment = CandidApiEnvironment.PRODUCTION ): candid = AsyncCandidApi(token=get_token_for_options(options, environment), environment=environment) self.auth = candid.auth self.encounters = candid.encounters self.billing_notes = candid.billing_notes self.expected_network_status = candid.expected_network_status self.payers = candid.payers
candidhealth/candid-python
src/candid/candid_api_client.py
candid_api_client.py
py
2,650
python
en
code
0
github-code
6
[ { "api_name": "pydantic.BaseModel", "line_number": 16, "usage_type": "name" }, { "api_name": "typing.Optional", "line_number": 17, "usage_type": "attribute" }, { "api_name": "typing.Optional", "line_number": 18, "usage_type": "attribute" }, { "api_name": "typing.O...
31501456333
from django.urls import path, include from . import views from django.conf.urls.static import static from django.conf import settings urlpatterns = [ path("", views.index, name='index'), path("news/<int:id>/", views.see_full_news), path("main_page/", views.main_page, name='main_page'), path("dataselect/", views.dataselect, name='dataselect'), path("station/", views.station, name='station'), path("geofrom", views.geoform, name='geofrom'), path('captcha/', include('captcha.urls')), path("req_form", views.requsets_list, name = 'req_form'), path("structure", views.struct, name= "structure"), ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + [path('captcha/', include('captcha.urls')),]
AlekseiMuryskin/SiteGS
gsras/urls.py
urls.py
py
809
python
en
code
0
github-code
6
[ { "api_name": "django.urls.path", "line_number": 6, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 7, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 8, "usage_type": "call" }, { "api_name": "django.urls.path", ...
43243448951
import logging from functools import wraps from io import BytesIO from PIL import Image try: from PIL import ImageCms # pylint: disable=ungrouped-imports except ImportError: ImageCms = None DEFAULT_SRGB_PROFILE = None TRANSFORM_FLAGS = 0 else: DEFAULT_SRGB_PROFILE = ImageCms.ImageCmsProfile( ImageCms.createProfile("sRGB") ) TRANSFORM_FLAGS = ( ImageCms.FLAGS["NOTCACHE"] | ImageCms.FLAGS["NOTPRECALC"] | ImageCms.FLAGS["BLACKPOINTCOMPENSATION"] | ImageCms.FLAGS["HIGHRESPRECALC"] ) CONTENT_TYPE = { ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".png": "image/png", ".webp": "image/webp", ".mp4": "video/mp4", ".webm": "video/webm", ".svg": "image/svg+xml", ".tif": "image/tiff", ".tiff": "image/tiff", ".avif": "image/avif", ".heic": "image/heif", ".heif": "image/heif", } EXTENSION = { "image/jpeg": ".jpg", "image/gif": ".gif", "image/png": ".png", "image/webp": ".webp", "video/mp4": ".mp4", "video/webm": ".webm", "image/svg+xml": ".svg", "image/tiff": ".tif", "image/avif": ".avif", "image/heif": ".heic", } logger = logging.getLogger("thumbor") def deprecated(message): def decorator_deprecated(func): @wraps(func) def wrapper_deprecated(*args, **kwargs): logger.warning( "Deprecated function %s%s", func.__name__, message, ) return func(*args, **kwargs) return wrapper_deprecated return decorator_deprecated def get_profile_and_color_space(icc): with BytesIO(icc) as buf: try: profile = ImageCms.ImageCmsProfile(buf) return profile, profile.profile.xcolor_space.strip() except (AttributeError, OSError, TypeError, ValueError): return None, None def get_color_space(img): icc = img.info.get("icc_profile") if not icc: return "RGB" if ImageCms is None: return None _, color_space = get_profile_and_color_space(icc) return color_space def ensure_srgb(img, srgb_profile=None): """ Ensures that an image either has no ICC profile (and so is implicitly sRGB) or has an sRGB color profile. If the image is sRGB, it is returned unchanged. If it has a CMYK or Gray color profile, this function will return an image converted to sRGB. Any color profiles in other color spaces will return None. """ img_info = dict(img.info) icc = img_info.pop("icc_profile", None) if not icc: return img if ImageCms is None: raise RuntimeError("ImageCms is required for color profile utilities") if srgb_profile is not None: srgb_profile = ImageCms.ImageCmsProfile(srgb_profile) else: srgb_profile = DEFAULT_SRGB_PROFILE orig_profile, color_space = get_profile_and_color_space(icc) if not color_space: return None if color_space == "RGB": logger.debug("Returning img (RGB)") return img if color_space not in ("GRAY", "CMYK"): # Other color spaces are rare, but best not to try to convert them. # Upstream understands a None return as meaning it should not # use it for the target encoder. logger.debug("Cannot convert to sRGB; color space = %s", color_space) return None # Probably not possible to have an animated image with CMYK or GRAY icc # profile, but best leave it alone if we have one if getattr(img, "is_animated", False): return None if color_space == "GRAY": pil_mode = "L" else: pil_mode = "CMYK" logger.debug("Converting from %s to sRGB", color_space) transform = ImageCms.ImageCmsTransform( orig_profile, srgb_profile, pil_mode, "RGBA", intent=ImageCms.Intent.RELATIVE_COLORIMETRIC, flags=TRANSFORM_FLAGS, ) src_im = Image.new(pil_mode, img.size, "white") src_im.paste(img) dst_im = Image.new("RGBA", img.size, "white") dst_im.info = img_info dst_im = transform.apply(src_im, dst_im) dst_im = dst_im.convert("RGB") dst_im.info = img_info return dst_im
thumbor/thumbor
thumbor/utils.py
utils.py
py
4,272
python
en
code
9,707
github-code
6
[ { "api_name": "PIL.ImageCms", "line_number": 9, "usage_type": "name" }, { "api_name": "PIL.ImageCms.ImageCmsProfile", "line_number": 13, "usage_type": "call" }, { "api_name": "PIL.ImageCms", "line_number": 13, "usage_type": "name" }, { "api_name": "PIL.ImageCms.cr...
23391926789
from selenium import webdriver from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By import json from pathlib import Path from time import sleep from selm.gales import settings class Gale(): def __init__(self, implicitly_wait=12): option = webdriver.ChromeOptions() # 防止打印一些无用的日志 option.add_experimental_option( "excludeSwitches", ['enable-automation', 'enable-logging']) # 初始化driver self.driver = webdriver.Chrome(options=option) # 设置浏览器窗口大小 # self.driver.maximize_window() self.driver.set_window_rect(-7, 0, 1190, 1047) # 设置隐性等待时间 self.driver.implicitly_wait(implicitly_wait) def get_url(self, url: str): '''页面访问 Args: url (str): url地址 ''' self.driver.get(url) def get_url_by_cookies(self, url: str, cookies_path: Path): '''带cookies的页面访问 Args: url (str): 登录网址 cookies_path (Path): cookies储存地址 ''' self.get_url(url) cookies = json.loads(open(cookies_path).read()) for c in cookies: self.driver.add_cookie(c) self.get_url(url) # self.driver.refresh() def get_cookies(self, url: str, file_path: Path, user_data: dict, input_time: int = 12): '''获取cookies Args: url (str): 登录网址 file_path (Path): cookies储存路径 user_data (dict): 用户登录数据 { "账号":(css_selector, str), "密码":(css_selector, str), ... "login": css_selector } input_time (int, optional): Defaults to 12. 用户输入验证码的时间 ''' self.get_url(url) for i in user_data: if i == 'login': continue for selector, content in user_data[i]: self.find_by_selector(selector).send_keys(content) # button_cache.send_keys(content) # 等待用户手动输入验证码 sleep(input_time) self.find_by_selector(user_data["login"]).click() cookies = self.driver.get_cookies() with open(file_path, "w", encoding="utf-8") as f: f.write(json.dumps(cookies)) def find_by_id(self, id): '''通过id查找元素 Args: id (str): id路径 Returns: WebElement: WebElement对象 ''' return self.driver.find_element_by_id(id) def find_by_selector(self, selector): '''通过css属性查找webelement元素 Args: selector (str): css属性 Returns: WebElement: WebElement对象 ''' return self.driver.find_element_by_css_selector(selector) def find_by_xpath(self, xpath): '''通过xpath查找元素 Args: xpath (str): xpath路径 Returns: WebElement: WebElement对象 ''' return self.driver.find_element_by_xpath(xpath) def wait_clickable(self, ele_path, by="selector", time=10): '''等待元素可以被点击 ''' if by == "selector": return WebDriverWait(self.driver, time).until(EC.element_to_be_clickable( (By.CSS_SELECTOR, ele_path))) elif by == "xpath": return WebDriverWait(self.driver, time).until(EC.element_to_be_clickable( (By.XPATH, ele_path))) else: raise TypeError(f"not type {by}") def wait_presence(self, ele_path, by="selector", time=10): '''显性等待元素出现,如果元素在规定时间内出现就返回该元素 Args: ele_path (str): 与by对应的属性路径 by (str, optional): Defaults to "selector". [还可以是xpath等] time (int, optional): Defaults to 10. [默认的显性等待时间] Raises: TypeError: 不存在的by方法 Returns: WebElement: 如果元素在规定时间内被查找到则返回该元素WebElement对象 ''' if by == "selector": return WebDriverWait(self.driver, time).until(EC.presence_of_element_located( (By.CSS_SELECTOR, ele_path))) elif by == "xpath": return WebDriverWait(self.driver, time).until(EC.presence_of_element_located( (By.XPATH, ele_path))) else: raise TypeError(f"not type {by}") def wait_not_presence(self, ele_path, by="selector", time=10): '''等待元素不存在 ''' if by == "selector": return WebDriverWait(self.driver, time).until_not(EC.presence_of_element_located( (By.CSS_SELECTOR, ele_path))) elif by == "xpath": return WebDriverWait(self.driver, time).until_not(EC.presence_of_element_located( (By.XPATH, ele_path))) else: raise TypeError(f"not type {by}") def wait_visibility(self, ele_path, by="selector", time=10): '''等待元素对用户可见 ''' if by == "selector": return WebDriverWait(self.driver, time).until(EC.visibility_of_element_located( (By.CSS_SELECTOR, ele_path))) elif by == "xpath": return WebDriverWait(self.driver, time).until(EC.visibility_of_element_located( (By.XPATH, ele_path))) else: raise TypeError(f"not type {by}") def wait_invisibility(self, ele_path, by="selector", time=10): '''等待元素对用户不可见 ''' if by == "selector": return WebDriverWait(self.driver, time).until(EC.invisibility_of_element_located( (By.CSS_SELECTOR, ele_path))) elif by == "xpath": return WebDriverWait(self.driver, time).until(EC.invisibility_of_element_located( (By.XPATH, ele_path))) else: raise TypeError(f"not type {by}") def save2png(self, file_name): '''获取当前浏览器窗口屏幕截图 Args: file_name (str): 图片名(图片默认存放到data目录下) ''' file_path = settings.DATA_DIR / file_name self.driver.get_screenshot_as_file(str(file_path)) sleep(1) def close(self): '''关闭当前窗口 ''' self.driver.close() def quit(self): '''关闭整个浏览器 ''' self.driver.quit() def refresh(self): '''刷新 ''' self.driver.refresh()
tyutltf/xaioliangzatan
selm/gales/gale.py
gale.py
py
6,838
python
en
code
1
github-code
6
[ { "api_name": "selenium.webdriver.ChromeOptions", "line_number": 14, "usage_type": "call" }, { "api_name": "selenium.webdriver", "line_number": 14, "usage_type": "name" }, { "api_name": "selenium.webdriver.Chrome", "line_number": 19, "usage_type": "call" }, { "api...
9202556126
#!/usr/bin/env python # -*- coding: utf-8 -*- from PIL import Image def make_data_index(args, index): ''' :param args: :param index: :return: ''' # K is for K-fold cross-validation # k is The k-th fold used for test K = args.K_fold # Here, we don't use this because we use 10-fold cross-validation in default k = args.k_test # print('The index is ', index, 'Length of Index', len(index)) # Here we assume to use 10-fold Cross-validation, i.e., K = 10 # Approximately 60% Training set, 20% Validation set, 20% Testing set num_test_ref = int(len(index) * 0.2) num_val_ref = int(len(index) * 0.2) num_train_ref = len(index) - num_test_ref - num_val_ref # Assume k = 1 : 10 for 10-fold Cross-validation threshold = int(len(index) / num_test_ref) if k < threshold: testindex = index[(k - 1) * num_test_ref: k * num_test_ref] valindex = index[k * num_val_ref: (k + 1) * num_val_ref] elif k == threshold: testindex = index[(k - 1) * num_test_ref: k * num_test_ref] # Check if the index num of validation set is less than num_val_ref valindex = index[k * num_val_ref: (k + 1) * num_val_ref] if len(valindex) < num_val_ref: valindex = valindex.tolist() for i in range(0, num_val_ref - len(valindex)): valindex.append(index[i]) elif k == threshold + 1: testindex = index[k * num_test_ref: (k + 1) * num_test_ref] if len(testindex) < num_test_ref: testindex = testindex.tolist() for i in range(0, num_test_ref - len(testindex)): testindex.append(index[i]) k -= threshold valindex = index[(k + 2) * num_val_ref: (k + 3) * num_val_ref] else: k -= threshold testindex = index[k * num_test_ref: (k + 1) * num_test_ref] if len(testindex) < num_test_ref: testindex = testindex.tolist() for i in range(0, num_test_ref - len(testindex)): testindex.append(index[i + num_test_ref]) valindex = index[(k + 2) * num_val_ref: (k + 3) * num_val_ref] if len(valindex) < num_val_ref: valindex = valindex.tolist() for i in range(0, num_val_ref - len(valindex)): valindex.append(index[i]) return valindex, testindex def default_loader(path, channel=3): """ :param path: image path :param channel: # image channel :return: image """ if channel == 1: return Image.open(path).convert('L') else: assert (channel == 3) return Image.open(path).convert('RGB')
SuperBruceJia/NLNet-IQA
Cross Database Evaluations/lib/make_index.py
make_index.py
py
2,658
python
en
code
8
github-code
6
[ { "api_name": "PIL.Image.open", "line_number": 75, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number": 75, "usage_type": "name" }, { "api_name": "PIL.Image.open", "line_number": 78, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number"...
14471424043
''' There are N network nodes, labelled 1 to N. Given times, a list of travel times as directed edges times[i] = (u, v, w), where u is the source node, v is the target node, and w is the time it takes for a signal to travel from source to target. Now, we send a signal from a certain node K. How long will it take for all nodes to receive the signal? If it is impossible, return -1. Example 1: Input: times = [[2,1,1],[2,3,1],[3,4,1]], N = 4, K = 2 Output: 2 Note: N will be in the range [1, 100]. K will be in the range [1, N]. The length of times will be in the range [1, 6000]. All edges times[i] = (u, v, w) will have 1 <= u, v <= N and 0 <= w <= 100. ''' # Dijkstra's heap O(E logE) time and O(N + E) space from collections import defaultdict from heapq import * class Solution: def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int: graph = defaultdict(list) for u, v, w in times: graph[u].append((w,v)) pq = [(0, K)] dist = {} while pq: d, node = heappop(pq) if node in dist: continue dist[node] = d for d2, nei in graph[node]: if nei not in dist: heappush(pq, (d+d2, nei)) return max(dist.values()) if len(dist) else -1 # Dijkstra's basic O(N**2 + E) and O(N + E) space from collections import defaultdict class Solution: def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int: graph = defaultdict(list) for u, v, w in times: graph[u].append((w,v)) dist = {node: float('inf') for node in range(1, N+1)} seen = [False] * (N+1) dist[K] = 0 while True: cand_node = -1 cand_dist = float('inf') for i in range(1, N+1): if not seen[i] and dist[i] < cand_dist: cand_dist = dist[i] cand_node = i if cand_node < 0: break seen[cand_node] = True for d, nei in graph[cand_node]: dist[nei] = min(dist[nei], dist[cand_node] + d) ans = max(dist.values()) return ans if ans < float('inf') else -1 # dfs O(N**N + ElogE) time and O(N + E) space from collections import defaultdict class Solution: def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int: graph = defaultdict(list) for u, v, w in times: graph[u].append((w,v)) dist = {node: float('inf') for node in range(1, N+1)} def dfs(node, elapsed): if elapsed >= dist[node]: return dist[node] = elapsed for time, nei in sorted(graph[node]): dfs(nei, elapsed + time) dfs(K, 0) ans = max(dist.values()) return ans if ans < float('inf') else -1
loganyu/leetcode
problems/743_network_delay_time.py
743_network_delay_time.py
py
3,027
python
en
code
0
github-code
6
[ { "api_name": "collections.defaultdict", "line_number": 32, "usage_type": "call" }, { "api_name": "collections.defaultdict", "line_number": 55, "usage_type": "call" }, { "api_name": "collections.defaultdict", "line_number": 86, "usage_type": "call" } ]
31263506941
from selenium import webdriver from ordered_set import OrderedSet import time import os import csv os.system("cls") f=open("99acres.csv","w",encoding='utf-8') csv_writer= csv.writer(f) csv_writer.writerow(['Project', 'specification', 'area','Value']) driver = webdriver.Chrome('D:/virtualenvs_muthu/selenium_twitter/chromedriver_win32/chromedriver.exe') os.system("cls") scroll_list=OrderedSet() for x in range(1,3): driver.get(f"https://www.99acres.com/property-in-hadapsar-pune-ffid-page-{x}") try: mutiple_properties=driver.find_elements_by_class_name('srpTuple__tupleDetails') time.sleep(2) for elem in (mutiple_properties): scroll_list.add(elem.text) except: continue temp=list(scroll_list) my_actual_list=[] for x in temp: xt=x.split("\n") print(xt) try: if xt!=['']: my_actual_list=[xt[2],xt[1],xt[4],xt[3]] except: temp_i=temp.index(x) os.system("cls") print("previous:") print(temp[temp_i-1]) print("error:") print(xt) print(my_actual_list) csv_writer.writerow(my_actual_list) my_actual_list.clear() f.close()
Muthu1612/Optimizing-real-estate-price-prediction-with-various-machine-learning-algorithms
Data extraction/extract_data.py
extract_data.py
py
1,450
python
en
code
1
github-code
6
[ { "api_name": "os.system", "line_number": 7, "usage_type": "call" }, { "api_name": "csv.writer", "line_number": 11, "usage_type": "call" }, { "api_name": "selenium.webdriver.Chrome", "line_number": 14, "usage_type": "call" }, { "api_name": "selenium.webdriver", ...
18598085905
from __future__ import annotations from .base import BaseDAO from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from dto import PlayerDTO class PlayerDAO(BaseDAO): def create(self, data: PlayerDTO) -> None: self._db_gateway.cursor.execute("INSERT INTO profiles (phone, username, description, age, height, weight) " "VALUES (?, ?, ?, ?, ?, ?);", (data.profile.phone, data.profile.username, data.profile.description, data.profile.age, data.profile.height, data.profile.weight)) profile_id = self._db_gateway.cursor.lastrowid self._db_gateway.cursor.execute("INSERT INTO players (email, name, surname, country_id, sport_type_id, " "team_id, profile_id) VALUES (?, ?, ?, ?, ?, ?, ?);", (data.email, data.name, data.surname, data.country_id, data.sport_type_id, data.team_id, profile_id)) self._db_gateway.connection.commit() def get_ids_list(self) -> list[int]: result = self._db_gateway.cursor.execute("SELECT id FROM players;") return result.fetchall() def get_list(self) -> list[tuple]: result = self._db_gateway.cursor.execute("SELECT players.id, players.name, players.surname, profiles.age, countries.name, teams.name " "FROM players JOIN profiles ON players.profile_id = profiles.id JOIN countries ON " "players.country_id = countries.id JOIN teams ON players.team_id = teams.id;") return result.fetchall()
pyteacher123/py35-onl
sports_db_project/data_access/dao/player.py
player.py
py
1,747
python
en
code
2
github-code
6
[ { "api_name": "typing.TYPE_CHECKING", "line_number": 4, "usage_type": "name" }, { "api_name": "base.BaseDAO", "line_number": 8, "usage_type": "name" }, { "api_name": "dto.PlayerDTO", "line_number": 9, "usage_type": "name" } ]
32149738267
# Upload BOJ Gold-3 Topological Sorting & Graph 2252번 줄세우기 # https://velog.io/@kimdukbae/%EC%9C%84%EC%83%81-%EC%A0%95%EB%A0%AC-Topological-Sorting # 참고 링크 from collections import deque N, M = map(int,input().split()) graph = [ [] for _ in range(N+1)] indegree = [0]*(N+1) result = [] for i in range(M): A, B = map(int,input().split()) graph[A].append(B) indegree[B] += 1 def topology_sort(): q = deque() for i in range(1,N+1): if not indegree[i]: q.append(i) while q: num = q.popleft() result.append(num) for value in graph[num]: indegree[value] -= 1 if not indegree[value]: q.append(value) topology_sort() print(*result)
HS980924/Algorithm
src/14.그래프/B#2252_줄세우기.py
B#2252_줄세우기.py
py
808
python
en
code
2
github-code
6
[ { "api_name": "collections.deque", "line_number": 17, "usage_type": "call" } ]
71992716667
# -*- coding: utf-8 -*- # python3 from loguru import logger import time import socket import re import psycopg2 from psycopg2 import Error from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT import time logger.add("/var/log/cdr_collector/cdr_collector_{time:DD-MM-YYYY}.log", format="{time:DD-MM-YYYY at HH:mm:ss} {level} {message}", rotation="20 MB", compression="zip") serv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto=0) serv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serv_sock.bind(('', 9000)) serv_sock.listen(10) def recieve_msg(client_sock): while True: len_msg = 102 len_msg_socket = len_msg * 9000 valid_msg = [] msg = client_sock.recv(len_msg_socket) #.decode("utf-8") # I got the error when I put server.recv if len(msg) == len_msg: logger.debug(f"Received {len(msg)} byte") global start_time start_time = time.time() msg = msg.decode("utf-8") valid_msg.append(msg) msg = valid_msg return msg elif len(msg) > len_msg: logger.debug(f"Received {len(msg)} byte") i = r"b'\d{9}" ls = [msg[i:i+len_msg] for i in range(0,len(msg),len_msg)] for i in ls: if len(i) == len_msg: i = i.decode("utf-8") valid_msg.append(i) msg = valid_msg return msg break elif len(msg) == 0: logger.info(f"Received {len(msg)} byte client DISCONNECTED!!!") timing = start_time - time.time() logger.info(f"Время обработки = {timing}") return False break else: logger.info(f"ACM is CONNECTED, time {msg.decode('utf-8')}") def start(serv_sock): serv_sock.listen(1) while True: logger.info(f"waiting connection, socket is OPEN!!!") client_sock, client_addr = serv_sock.accept() logger.info(f"Connected to socket by, {client_addr}") while True: msg = recieve_msg(client_sock) if msg == False: client_sock.close() break else: put_to_db(msg) logger.info(f"Socket connection by {client_addr} is CLOSED!!!") client_sock.close() def put_to_db(msg): try: # Подключение к существующей базе данных connection = psycopg2.connect(user="user", # пароль, который указали при установке PostgreSQL password="password", host="127.0.0.1", port="5432") connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # Курсор для выполнения операций с базой данных cursor = connection.cursor() #sql_insert_query = f"SELECT cdr_unformatted_func_py({tst})" sql_insert_query = f"SELECT cdr_unformatted_func_py(VARIADIC ARRAY{msg!r})" # !r для того чтобы строка передавалась с ковычками. cursor.execute(sql_insert_query) connection.commit() except (Exception, Error) as error: logger.error(f"Error at work PostgreSQL, {error}") finally: if connection: cursor.close() connection.close() logger.debug(f"Data length={len(msg)} has been successfully written to the database") start(serv_sock)
jgnom/avaya_cdr_collector
main.py
main.py
py
3,660
python
en
code
0
github-code
6
[ { "api_name": "loguru.logger.add", "line_number": 14, "usage_type": "call" }, { "api_name": "loguru.logger", "line_number": 14, "usage_type": "name" }, { "api_name": "socket.socket", "line_number": 17, "usage_type": "call" }, { "api_name": "socket.AF_INET", "l...
6365552957
import threading from flask import Flask, jsonify, Response import asyncio from camera import Camera from websocketServer import VideoWs from flask_cors import CORS from config import ip, http_port from queue import Queue app = Flask(__name__) CORS(app) camera = Camera() event = threading.Event() queue = Queue() @app.route('/open') def open_camera(): open_ws_conn() dic = { 'code': 1, 'msg': 'open camera success' } return jsonify(dic) @app.route('/close') def close_camera(): camera.close_camera() return jsonify({ 'code': 1 }) def open_ws_conn(): openWs() def run(): print('执行') app.run(host=ip, port=http_port) def main(): flask_thread = threading.Thread(target=run, name='flask thread') flask_thread.start() def openWs(): t = threading.Thread(target=openws1, name='ws thread', args=(1, event, queue)) t1 = threading.Thread(target=openVideo1, name='video thread', args=(1, event, queue)) t.start() t1.start() def openws1(args, event, queue): openws2(args, event, queue).send(None) def openws2(args, event, queue): asyncio.run(VideoWs.start(args, event, queue)) def openVideo1(args, event, queue): openVideo(args, event, queue).send(None) def openVideo(args, event, queue): asyncio.run(camera.open_camera(args, event, queue)) if __name__ == '__main__': main()
l5769389/py-vue-ws-camera
router.py
router.py
py
1,400
python
en
code
0
github-code
6
[ { "api_name": "flask.Flask", "line_number": 10, "usage_type": "call" }, { "api_name": "flask_cors.CORS", "line_number": 11, "usage_type": "call" }, { "api_name": "camera.Camera", "line_number": 12, "usage_type": "call" }, { "api_name": "threading.Event", "line...
43356641346
#!/usr/bin/python3 from scipy import stats import numpy as np import matplotlib.pyplot as plt import sys NUM_TRIAL = 2000 def calcPlotData(numSamplings, percentiles, dist): plotXData = np.array([]) plotYData = np.array([]) for percentile in percentiles: tmpPlotXData = np.array([]) tmpPlotYData = np.array([]) for numSampling in numSamplings: scores = calcScoresAtPercentile(numSampling, percentile, dist) tmpPlotXData = np.append(tmpPlotXData, numSampling) tmpPlotYData = np.append(tmpPlotYData, np.std(scores, ddof=1)) plotXData = np.append(plotXData, tmpPlotXData) plotYData = np.append(plotYData, tmpPlotYData) return plotXData.reshape(len(percentiles), len(numSamplings)), plotYData.reshape(len(percentiles), len(numSamplings)) def calcScoresAtPercentile(numSampling, percentile, dist): scoresAtPercentile = np.array([]) for i in range(NUM_TRIAL): samples = dist.rvs(size=numSampling) scoresAtPercentile = np.append(scoresAtPercentile, stats.scoreatpercentile(samples, percentile)) return scoresAtPercentile def plot(title, plotXData, plotYData, percentiles): plt.clf() plt.figure(figsize=(15, 9)) plt.rcParams["font.size"] = 24 plt.xticks(np.arange(0, np.max(plotXData)+10, 10)) plt.grid() for i, x in enumerate(plotXData): plt.plot(x, plotYData[i], marker='o', label='percentile='+str(percentiles[i])) plt.title(title) plt.xlabel("The number of samples") plt.ylabel("Standard deviation") plt.legend() plt.savefig(title.replace(" ", "_").lower() + ".png", dpi=200) def main(): numSamplings = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] percentiles = [1, 10, 50, 90, 99] dists = { "Uniform distribution": stats.uniform(), "Normal distribution": stats.norm(), "Exponential distribution": stats.expon() } for distName, dist in dists.items(): print("dist: {}".format(distName)) plotXData, plotYData = calcPlotData(numSamplings, percentiles, dist) print(plotYData) plot(distName, plotXData, plotYData, percentiles) if __name__ == "__main__": main()
peng225/blog
230114/percentile.py
percentile.py
py
2,211
python
en
code
0
github-code
6
[ { "api_name": "numpy.array", "line_number": 11, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 12, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 14, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": ...
12161841216
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed May 26 14:10:45 2021 @author: manssandsjo """ import matplotlib.animation as ani import matplotlib.pyplot as plt import numpy as np import pandas as pd lstm_lw = pd.read_pickle('./pickle_graphs/totValueLSTM_LW.pkl') #.to_numpy() lstm_scm = pd.read_pickle('./pickle_graphs/totValueLSTM_SCM.pkl') #.to_numpy() sma_scm = pd.read_pickle('./pickle_graphs/totValueSMA_SCM.pkl') #.to_numpy() df = pd.DataFrame(columns=['SMA_SCM','LSTM_LW','LSTM_SCM']) df['LSTM_LW'] = lstm_lw.iloc[:,0] df['LSTM_SCM'] = lstm_scm.iloc[:,0] df['SMA_SCM'] = sma_scm.iloc[:,0] color = ['red', 'green', 'blue'] fig = plt.figure() plt.subplots_adjust(bottom = 0.2, top = 0.9) #ensuring the dates (on the x-axis) fit in the screen plt.ylabel('Total value of portfolio, SEK') plt.xlabel('Year') def buildmebarchart(i=int): plt.legend(df.columns) p = plt.plot(df[:i].index, df[:i].values) #note it only returns the dataset, up to the point i for i in range(0,3): p[i].set_color(color[i]) #set the colour of each curve anim = ani.FuncAnimation(fig, buildmebarchart, interval = 100) from matplotlib import rc # equivalent to rcParams['animation.html'] = 'html5' rc('animation', html='html5') anim
MansSandsjo/StockBot
src/Data/testGraphs.py
testGraphs.py
py
1,267
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_pickle", "line_number": 13, "usage_type": "call" }, { "api_name": "pandas.read_pickle", "line_number": 14, "usage_type": "call" }, { "api_name": "pandas.read_pickle", "line_number": 15, "usage_type": "call" }, { "api_name": "pandas.DataF...
21700191213
# Reference : https://textgears.com/api/ import requests text = input() text = text.replace(' ','+') url = "https://api.textgears.com/check.php?text=" + text + "&key=DEMO_KEY" resp = requests.get(url) resp = resp.json() if resp['result']: for err in resp['errors']: print("Error at position : ",err['offset']+1) print("Error : ",err['bad']) print("Possible corrections are : ") for corr in err['better']: print(corr) print() else: print(resp['description'])
kapoor-rakshit/pyfiddle
grammar.py
grammar.py
py
475
python
en
code
0
github-code
6
[ { "api_name": "requests.get", "line_number": 8, "usage_type": "call" } ]
3668384154
#!/usr/local/bin/python3 import boto3 networkAclId = 'acl-26cc1243' ignoreEntries = [100, 32767] session = boto3.Session() client = session.client('ec2') acls = client.describe_network_acls() def getMaxOfRuleNumbers(): result = [0] for acl in acls['NetworkAcls']: if acl['NetworkAclId'] == networkAclId: for entries in acl['Entries']: if entries['RuleNumber'] not in ignoreEntries: if entries['RuleAction'] == 'deny': result.append(entries['RuleNumber']) return max(result) def existEntry(cidrBlock): for acl in acls['NetworkAcls']: if acl['NetworkAclId'] == networkAclId: for entries in acl['Entries']: if entries['RuleNumber'] not in ignoreEntries: if entries['RuleAction'] == 'deny': if cidrBlock == entries['CidrBlock']: return True return False def createNetworkAclIngressEntry(ruleNumber, cidrBlock): params = {} params["NetworkAclId"] = networkAclId params["RuleNumber"] = ruleNumber params["Protocol"] = '-1' params["CidrBlock"] = cidrBlock params["Egress"] = False params["RuleAction"] = "DENY" client.create_network_acl_entry(**params) def blockIp(ip): ip = ip + '/32' if not existEntry(ip): maxId = getMaxOfRuleNumbers() maxId = maxId + 1 if maxId not in ignoreEntries: createNetworkAclIngressEntry(maxId, ip) print("BlockIP: %s" % ip) blockIp('174.129.214.250')
debeando/MyScripts
aws/aws_network_acl_block_ip.py
aws_network_acl_block_ip.py
py
1,606
python
en
code
1
github-code
6
[ { "api_name": "boto3.Session", "line_number": 8, "usage_type": "call" } ]
44914350526
#!/usr/bin/env python3 #/* # Terminal User input # Manual Mode where the coordinate and orientation variables are input # Doesn't use accelerometer #*/ # Import essential libraries import requests #type: ignore import numpy as np #type: ignore import imutils #type: ignore import time import math from datetime import datetime import adafruit_adxl34x # type: ignore import sys import cv2 #type: ignore import os import RPi.GPIO as GPIO #type: ignore import serial #type: ignore import matplotlib.pyplot as plt #type: ignore sys.path.append('/home/pi/Chromebook-projects/projects/proj_Hexclaw') from IK_module import * from h2_module import * from board import SCL, SDA # type: ignore import busio # type: ignore from adafruit_motor import servo # type: ignore from adafruit_servokit import ServoKit # type: ignore from adafruit_pca9685 import PCA9685 # type: ignore i2c = busio.I2C(SCL, SDA) pca = PCA9685(i2c) pca.frequency = 50 servo = [servo.Servo(pca.channels[0]), servo.Servo(pca.channels[1]), servo.Servo(pca.channels[2]), servo.Servo(pca.channels[3]), servo.Servo(pca.channels[4]), servo.Servo(pca.channels[5]), ] for i in range(6): servo[i].set_pulse_width_range(500, 2500) sendToServo(servo,[135,45,180,45,180,90],0,mode=0) time.sleep(1) GPIO.setmode(GPIO.BCM) # GPIO Numbers instead of board numbers ledRelay = 23 GPIO.setup(ledRelay, GPIO.OUT) # GPIO Assign mode GPIO.output(ledRelay, GPIO.LOW) # out GPIO.output(ledRelay, GPIO.HIGH) # on time.sleep(0.75) sendToServo(servo,[90,115,135,90,115,90],1,mode=2) if False: for _ in range(4): GPIO.output(ledRelay, False) time.sleep(0.03) GPIO.output(ledRelay, True) time.sleep(0.03) time.sleep(1.5) GPIO.output(ledRelay, False) time.sleep(0.25) GPIO.output(ledRelay, True) time.sleep(0.5) GPIO.output(ledRelay, False) time.sleep(0.1) GPIO.output(ledRelay, True) time.sleep(2) GPIO.output(ledRelay, False) print("------") time.sleep(2) GPIO.output(ledRelay, True) diagnostics = False if diagnostics: ard_port = '/dev/ttyUSB0' ser_arduino = serial.Serial(ard_port, 9600, timeout=0.1) ser_arduino.reset_input_buffer() x_Values = [] #time/seconds passed since start of program y0_Values = [] #voltage y1_Values = 6*[[]] #absolute rotation difference/jump for each *individual motor* y2_Values = [] #total absolute rotation difference (i.e. the "jump" in movement for that iteration) y3_Values = 6*[[]] #*estimated* stagnant load on each motor (Nm) GraphTitles = ["Voltage","new rotation","total new rotation","torque per motor"] Graph_yLabel = ["Voltage [V]","Angle [degrees]","Angle [degrees]","Torque [Nm]"] axisFilter = 0.7 #On the new value end xScaling, yScaling, zScaling = 0.8, 0.8, 1.2 brightVal = 75 diffCheck = 100 showImage = False globalPrint = True endAnglePrint = False firstAnglePrint = False posOption = '-' q = [0]*6 #NOTE: q = q[0] = servo[0] s = [0, 0, 0, 0, 0, 0, 0] #The variables that are sent to the servos zMax = 300 a, b, Y = toRadians(0), toRadians(-45), toRadians(90) PP = [0, 200, 200] coord = "" drawing = False # true if mouse is pressed buttonPressed = False x1,y1,x2,y2 = -1,-1,-1,-1 windowRes = (600,300) # mouse callback function def mouseTrack(event,x,y,flags,param): global x1,y1,x2,y2,drawing,buttonPressed,img,temp x2,y2 = x,y if drawing: img = np.zeros((windowRes[1],windowRes[0],3), np.uint8) if event == cv2.EVENT_LBUTTONDOWN: drawing = True x1,y1 = x2,y2 elif event == cv2.EVENT_MOUSEMOVE: buttonPressed = True if drawing == True: cv2.line(img,(0,y2),(windowRes[0],y2),(255,255,255),1) cv2.line(img,(x2,0),(x2,windowRes[1]),(255,255,255),1) cv2.circle(img,(x2,y2),10,(0,0,255),1) cv2.putText(img,"("+str(int(x2-windowRes[0]*0.5))+","+str(int(windowRes[1]-y2))+")",(x2+10,y2),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255)) cv2.circle(img,(int(windowRes[0]*0.5),int(windowRes[1])),50,(255,255,255),1) cv2.circle(img,(int(windowRes[0]*0.5),int(windowRes[1])),100,(255,255,255),1) cv2.circle(img,(int(windowRes[0]*0.5),int(windowRes[1])),150,(255,255,255),1) cv2.circle(img,(int(windowRes[0]*0.5),int(windowRes[1])),200,(255,255,255),1) cv2.putText(img,str(PP),(10,20),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255)) temp = img elif event == cv2.EVENT_LBUTTONUP: buttonPressed = False drawing = False img = temp #type: ignore # cv2.line(img,(0,y2),(windowRes[0],y2),(255,255,255),1) # cv2.line(img,(x2,0),(x2,windowRes[1]),(255,255,255),1) # cv2.circle(img,(x2,y2),10,(0,0,255),1) # cv2.putText(img,"("+str(int(x2-windowRes[0]*0.5))+","+str(int(windowRes[1]-y2))+")",(x2+10,y2),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255)) def main(): global PP, a, b, Y mode = 0 mod_code = "q4" print(" Different modes for tracking/moving:") print(" 0. Run movement commands from .dat file") print(" 1. Enter position and orientation in terminal") print(" 2. Mouse position tracking on window") print(" 3. Move end-effector in a pattern") option = input(" input: ") if option == "exit": return else: mode = int(option) # "under" = given < 0 # "over" = given < 180 servoExceeded = False whichServoExceeded = 6*[False] typeOfExceeded = 6*["null"] start_time = time.time() x = 1 # displays the frame rate every 1 second counter = 0 while True: # os.system("clear") print("\n ---Enter mode_(n) to change mode to (n)--- ") print(" - \"debug\" to enter debug-mod menu") print(" - \"mode_(n)\" to change mode to n") PP = [0, 200, 200] while True: isReachable = [True] if mode==0: tempInput_1 = input("Enter file path:") if tempInput_1 == "exit": return elif tempInput_1[:4] == "mode": mode=int(tempInput_1[5:]) else: movCommPath = tempInput_1 runFromFile(movCommPath, servo) break if mode==1: tempInput_1 = input("Enter coordinates [x y z] in mm: ").split() if tempInput_1[0] == "exit": return elif tempInput_1[0][:4] == "mode": mode=int(tempInput_1[0][5:]) elif tempInput_1[0] == "debug": debug_mod_menu(mod_dict) else: PP[0] = (float(tempInput_1[0])) # type: ignore PP[1] = (float(tempInput_1[1])) # type: ignore PP[2] = (float(tempInput_1[2])) # type: ignore break if mode==2: tempInput_1 = input("Enter z-value in mm: ") if tempInput_1 == "exit": return elif tempInput_1[:4] == "mode": mode=int(tempInput_1[5:]) elif tempInput_1 == "debug": debug_mod_menu(mod_dict) else: PP[2] = float(tempInput_1) # type: ignore break if mode==3: patternOpt = 1 print("Options:") print(" 0.run a custom program/course of motions") print(" 1.choose a pre-defined pattern from a dictionary") print(" 2.move end-effector along an axis") print(" 3.move end-effector orientation with a fixed position") tempInput_1 = input("input: ") if tempInput_1 == "exit": return elif tempInput_1[:4] == "mode": mode=int(tempInput_1[5:]) elif tempInput_1 == "debug": debug_mod_menu(mod_dict) else: patternOpt = int(tempInput_1) break if mode==1: tempInput_2 = input("Enter orientation values [a b Y] in degrees: ").split() a,b,Y = toRadians(float(tempInput_2[0])), toRadians(float(tempInput_2[1])), toRadians(float(tempInput_2[2])) if diagnostics: print("x:", PP[0], " y:", PP[1], " z:", PP[2], " a:", toDegrees(a), " b:", toDegrees(b), " Y:", toDegrees(Y), sep='') q = getAngles(PP,a,b,Y,'-', debug=mod_dict, positionIsReachable=isReachable) # print(q) print("read:",[toDegrees(q) for q in q], "posIsReachable:", isReachable) if isReachable[0]: # custom_sendToServo(servo,[toDegrees(angle) for angle in q],0,True) sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=2,printResult=True) elif mode==2: tempInput_2 = input("Enter orientation values [a b Y] in degrees: ").split() a,b,Y = toRadians(float(tempInput_2[0])), toRadians(float(tempInput_2[1])), toRadians(float(tempInput_2[2])) img = np.zeros((windowRes[1],windowRes[0],3), np.uint8) cv2.namedWindow('tracking_window') cv2.setMouseCallback('tracking_window',mouseTrack) temp = img print("\n 'Esc' - change z-value and orientation\n") while True: cv2.imshow('tracking_window', img) #use this if the previous drawings are not to be used k = cv2.waitKey(1) & 0xFF if k == 27: break elif k == 119: PP[2]+=10 #type: ignore elif k == 115: PP[2]-=10 #type: ignore if drawing: PP[0], PP[1] = x2-windowRes[0]*0.5,windowRes[1]-y2 # type: ignore q = getAngles(PP,a,b,Y,'-',positionIsReachable=isReachable, debug=mod_dict) # print(q) if isReachable[0]: sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=0) counter+=1 if (time.time() - start_time) > x : print("FPS: ", counter / (time.time() - start_time)) counter = 0 start_time = time.time() cv2.destroyAllWindows() elif mode==3: if patternOpt==0: #type: ignore print("Pick any of these programs") for key,_ in mov_Programs.items(): print(" - \"",key,"\"", sep='') key = input("input the program name: ") if key=="exit": break presetAngles = 6*[0] for joint in range(6): presetAngles[joint] = servo[joint].angle / constants_q[joint]["fixed"] if key=="axisTest" and diagnostics: mov_Programs[key]( servo, [diagnostics,x_Values, [y0_Values,y1_Values,y2_Values,y3_Values],ser_arduino] ) else: mov_Programs[key](servo) sendToServo(servo,presetAngles,0,mode=2) if diagnostics and key=="axisTest": fig = plt.figure(figsize=(19, 6)) ax = [0,0,0,0] y_Values = [y0_Values,y1_Values,y2_Values,y3_Values] for axis in range(len(ax)): ax[axis] = fig.add_subplot(1,4,axis+1) #type: ignore ax[axis].set_xlim(0,round(x_Values[-1])) #type: ignore ax[axis].set_title(GraphTitles[axis]) #type: ignore ax[axis].set_xlabel("time [seconds]") #type: ignore ax[axis].set_ylabel(Graph_yLabel[axis]) #type: ignore ax[axis].grid() #type: ignore if axis==1 or axis==3: for j in range(6): ax[axis].plot(x_Values,y_Values[axis][j],linestyle='solid',label='q'+str(j+1)) #type: ignore else: ax[axis].plot(x_Values,y_Values[axis],linestyle='solid') #type: ignore if axis==0: ax[axis].axhline( #type: ignore y=(sum(y_Values[axis])/len(y_Values[axis])), color='red',linestyle='-', label=f"avg:{round(sum(y_Values[axis])/len(y_Values[axis]))}V" ) ax[axis].legend(loc='upper right',framealpha=0.3) #type: ignore fig.tight_layout(pad=5.0) fig.suptitle("") currentDate = str(datetime.now()).replace(" ", ";") relativePath = "/home/pi/Chromebook-projects/projects/proj_Hexclaw/hexclaw_files/voltage_readings/media/" fileTitle = "mov_Voltage_" plt.savefig(relativePath+fileTitle+currentDate+".png") #type: ignore plt.show() if patternOpt==1: #type: ignore print("Pick any of these patterns") for key,_ in mov_Patterns.items(): print(" - \"",key,"\"", sep='') key = input("input a key:") if key=="exit": break for i in range(len(mov_Patterns[key])): q = getAngles( [mov_Patterns[key][i][0],mov_Patterns[key][i][1],mov_Patterns[key][i][2]], toRadians(mov_Patterns[key][i][3]), toRadians(mov_Patterns[key][i][4]), toRadians(mov_Patterns[key][i][5]), '-', positionIsReachable=isReachable, debug=mod_dict ) print(mov_Patterns[key][i]) if isReachable[0]: sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=2) time.sleep(1) elif patternOpt==2: #type: ignore axis = input("\nEnter what axis to move [x, y or z] [unit: mm]:") if axis=="exit": break orientToUse = input("\nEnter orientation for axis test [a, b and Y]:").split() fullPos = [0,link[4]+link[5]+120,150] presetAngles = [0,0,0,0,0,0] for joint in range(6): presetAngles[joint] = servo[joint].angle / constants_q[joint]["fixed"] prevSent=False for direction in range(1, -2, -2): for pos in range(-200, 200): if axis == "x": fullPos[0] = direction*pos*0.75 #type: ignore #300 if axis == "y": fullPos[1] = direction*pos*0.5+100 #type: ignore #200 if axis == "z": fullPos[2] = direction*pos*0.5+200 #type: ignore #200 q = getAngles( fullPos, toRadians(int(orientToUse[0])),toRadians(int(orientToUse[1])),toRadians(int(orientToUse[2])), '-', positionIsReachable=isReachable, debug=mod_dict ) if isReachable[0]: if prevSent: sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=0) else: sendToServo(servo,[toDegrees(joint) for joint in q],2,useDefault=True,mode=2) time.sleep(1) prevSent=True if axis == "x": time.sleep(0.005) else: time.sleep(0.001) time.sleep(1.5) sendToServo(servo,presetAngles,0,mode=2) elif patternOpt==3: #type: ignore orientToUse = input("\nEnter what orientation to test [a, b or Y] [unit: degrees]:") posToUse = input("\nEnter coordinate for position test [x, y and z]:").split() for _ in range(3): posToUse[_] = int(posToUse[_]) #type: ignore fullOrient = [0,0,0] presetAngles = [0,0,0,0,0,0] for joint in range(6): presetAngles[joint] = servo[joint].angle / constants_q[joint]["fixed"] for direction in range(1, -2, -2): for angle in range(-90, 90): if orientToUse == "a": fullOrient[0] = direction*angle if orientToUse == "b": fullOrient[1] = direction*angle if orientToUse == "Y": fullOrient[2] = direction*angle q = getAngles(posToUse,toRadians(fullOrient[0]),toRadians(int(fullOrient[1])),toRadians(int(fullOrient[2])), '-',positionIsReachable=isReachable, debug=mod_dict ) if isReachable[0]: sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=0) time.sleep(0.01) time.sleep(1.5) sendToServo(servo,presetAngles,0,mode=2) # input("\npaused. Press enter to continue...") if __name__ == "__main__": main() sendToServo(servo,[135,45,180,45,180,90],1,mode=2) GPIO.output(ledRelay, False) pca.deinit()
Quark3e/Chromebook-projects
projects/proj_Hexclaw/in rpi/Hexclaw_Main_2.py
Hexclaw_Main_2.py
py
17,353
python
en
code
2
github-code
6
[ { "api_name": "sys.path.append", "line_number": 24, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 24, "usage_type": "attribute" }, { "api_name": "busio.I2C", "line_number": 36, "usage_type": "call" }, { "api_name": "board.SCL", "line_number"...
20955203674
from selenium import webdriver from selenium.webdriver.common.by import By import time # from selenium.webdriver.common.keys import Keys # from selenium.webdriver.support import expected_conditions as EC # from selenium.webdriver.support.ui import WebDriverWait def Scrape(today): # setup chrome_options = webdriver.ChromeOptions() chrome_options.add_argument("--mute-audio") driver = webdriver.Chrome(options=chrome_options) driver.get(f"https://nytcrosswordanswers.org/nyt-crossword-answers-{today}/") print(driver.title) time.sleep(3) uls = driver.find_elements(By.TAG_NAME, "ul") across = uls[1].find_elements(By.TAG_NAME, "li") down = uls[2].find_elements(By.TAG_NAME, "li") across_clues = [] across_answers = [] for group in across: clue = group.find_element(By.TAG_NAME, "a").get_attribute('innerText') answer = group.find_element(By.TAG_NAME, "span").get_attribute('innerText') across_clues.append(clue) across_answers.append(answer) down_clues = [] down_answers = [] for group in down: clue = group.find_element(By.TAG_NAME, "a").get_attribute('innerText') answer = group.find_element(By.TAG_NAME, "span").get_attribute('innerText') down_clues.append(clue) down_answers.append(answer) driver.quit() return across_clues, across_answers, down_clues, down_answers
colin-lankau/Crossword-Practice-Tool
Scrape.py
Scrape.py
py
1,416
python
en
code
0
github-code
6
[ { "api_name": "selenium.webdriver.ChromeOptions", "line_number": 11, "usage_type": "call" }, { "api_name": "selenium.webdriver", "line_number": 11, "usage_type": "name" }, { "api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call" }, { "api...
71750331067
from flask import Flask, render_template, request, session, redirect, url_for from flask_session import Session import sqlite3 from ast import literal_eval import datetime app = Flask(__name__) # Set the secret key to some random bytes. Keep this really secret! app.secret_key = b'_5#y2L"F4Q8z\n\xec]/' @app.route('/') def index(): if session.get("user_id") is None: session["user_id"]=[] #Get settings conn = sqlite3.connect('my_database.db') settings = conn.execute("select start,stop from settings order by rowid DESC limit 1").fetchone() try: start = literal_eval(settings[0]) stop = literal_eval(settings[1]) start = datetime.time(start[0],start[1],start[2]) stop = datetime.time(stop[0],stop[1],stop[2]) recent_settings = (str(start),str(stop)) conn.close() print(recent_settings) return render_template('index.html', username=session["user_id"], settings=recent_settings) except: return render_template('index.html', username=session["user_id"]) @app.route('/login', methods=["POST", "GET"]) def login(): if session.get("user_id") is None: session["user_id"]=[] if request.method == 'POST': conn = sqlite3.connect('my_database.db') username = request.form.get('username') # access the data inside password = request.form.get('password') if conn.execute("SELECT * FROM users WHERE user=?", (username,)).fetchone() is None: return render_template("login.html", message="Username not found.", username=session["user_id"]) else: # Username exists, check pw user = conn.execute("SELECT * FROM users where user=?", (username,)).fetchone() print(user, user[0], user[1]) if user[1] == password: session["user_id"] = user[0] print(session["user_id"],'test') return render_template("login.html", message=f"Logged in as {user[0]}.", username=session["user_id"]) else: return render_template("login.html", message="Incorrect password, try again", username=session["user_id"]) else: return render_template("login.html", username=session["user_id"]) @app.route('/update', methods=["POST", "GET"]) def update_settings(): if session.get("user_id") is None: session["user_id"]=[] #Get current settings conn = sqlite3.connect('my_database.db') message = '' if request.method == 'POST': start_time = request.form.get('start_time').split(':') # access the data inside stop_time = request.form.get('stop_time').split(':') print(start_time) try: start_time = [int(i) for i in start_time] stop_time = [int(i) for i in stop_time] conn.execute("insert into settings(start, stop) values (?,?)", (str(start_time), str(stop_time))) conn.commit() message="Updated successfully" times = (start_time,stop_time) except: message = "Failed to split into datetime format, make sure to use format HH:MM:SS" settings = conn.execute("select start,stop from settings order by rowid DESC limit 1").fetchone() try: start = literal_eval(settings[0]) stop = literal_eval(settings[1]) start = datetime.time(start[0],start[1],start[2]) stop = datetime.time(stop[0],stop[1],stop[2]) current_settings = (str(start),str(stop)) print(current_settings) conn.close() return render_template("update_settings.html", username=session["user_id"], settings=current_settings, message=message) except: return render_template("update_settings.html", username=session["user_id"], message=message) @app.route('/logout') def logout(): # remove the username from the session if it's there session.pop('user_id', None) return redirect(url_for('index')) if __name__ == '__main__': app.run(host='127.0.0.1', port=8000, debug=True)
mason-landry/lighttimer
lighttimer_app/app.py
app.py
py
4,048
python
en
code
0
github-code
6
[ { "api_name": "flask.Flask", "line_number": 7, "usage_type": "call" }, { "api_name": "flask.session.get", "line_number": 15, "usage_type": "call" }, { "api_name": "flask.session", "line_number": 15, "usage_type": "name" }, { "api_name": "flask.session", "line_...
8665774694
import os import json from botocore.exceptions import ClientError from unittest import TestCase from unittest.mock import patch from exceptions import YahooOauthError from login_yahoo_authorization_url import LoginYahooAuthorizationUrl class TestLoginYahooAuthorizationUrl(TestCase): @classmethod def setUpClass(cls): os.environ['YAHOO_CLIENT_ID'] = 'fake_yahoo_consumer_key' os.environ['YAHOO_SECRET'] = 'fake_yahoo_consumer_secret' os.environ['YAHOO_OAUTH_CALLBACK_URL'] = 'http://localhost' def test_exec_main_ok(self): with patch('login_yahoo_authorization_url.YahooUtil') as yahoo_mock: yahoo_mock.return_value.get_authorization_url.return_value = 'oauth_url' response = LoginYahooAuthorizationUrl({}, {}).main() self.assertEqual(response['statusCode'], 200) self.assertEqual( json.loads(response['body']), {'url': 'oauth_url'} ) def test_exec_main_ng_with_clienterror(self): with patch('login_yahoo_authorization_url.YahooUtil') as yahoo_mock: yahoo_mock.return_value.generate_auth_url.side_effect = ClientError( {'Error': {'Code': 'UserNotFoundException'}}, 'operation_name' ) response = LoginYahooAuthorizationUrl({}, {}).main() self.assertEqual(response['statusCode'], 500) self.assertEqual( json.loads(response['body']), {'message': 'Internal server error: LoginYahooAuthorizationUrl'} ) def test_exec_main_ng_with_yahoo(self): with patch('login_yahoo_authorization_url.YahooUtil') as yahoo_mock: yahoo_mock.return_value.generate_auth_url.side_effect = YahooOauthError( endpoint='http://example.com', status_code=400, message='error' ) response = LoginYahooAuthorizationUrl({}, {}).main() self.assertEqual(response['statusCode'], 500) self.assertEqual( json.loads(response['body']), {'message': 'Internal server error: LoginYahooAuthorizationUrl'} )
AlisProject/serverless-application
tests/handlers/login/yahoo/authorization_url/test_login_yahoo_authorization_url.py
test_login_yahoo_authorization_url.py
py
2,213
python
en
code
54
github-code
6
[ { "api_name": "unittest.TestCase", "line_number": 10, "usage_type": "name" }, { "api_name": "os.environ", "line_number": 13, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 14, "usage_type": "attribute" }, { "api_name": "os.environ", "l...
72528403069
import os, csv from random import sample import nltk as nlp import pandas as pd import matplotlib.pyplot as plt sampleSize = 200 sampleTimes = 50 TTR = [] years =[] # Iterate through word count/list file with open('wordCountsNLTK.csv', 'r', encoding="ISO-8859-1") as csvFile: reader = csv.reader(csvFile) next(reader) for row in reader: tokens = nlp.word_tokenize(row[2]) TTRSampled =[] print(row[0] + " " + row[1] + " " + "Population: " + str(len(tokens))) if len(tokens) > sampleSize: for i in range(sampleTimes): tokensSample = sample(tokens,sampleSize) types = nlp.Counter(tokensSample) TTRSampled.append(len(types)/len(tokensSample)*100) years.append(row[0]) TTR.append(sum(TTRSampled)/sampleTimes) else: pass # Store values into PD dataframe and plot average word count values dfTTR = pd.DataFrame({"Year": years, "TTR": TTR}) print(dfTTR) dfTTR.groupby(["Year"]).mean().plot() plt.xlabel('Year', fontsize=15) plt.ylabel('TTR', fontsize=15) plt.ylim([30, 90]) plt.title("Sampled Type Token Ratio per year") plt.show()
stkeller/Replication-Thesis
Code/LexicalTTRSampling.py
LexicalTTRSampling.py
py
1,075
python
en
code
0
github-code
6
[ { "api_name": "csv.reader", "line_number": 14, "usage_type": "call" }, { "api_name": "nltk.word_tokenize", "line_number": 18, "usage_type": "call" }, { "api_name": "random.sample", "line_number": 25, "usage_type": "call" }, { "api_name": "nltk.Counter", "line_...
23559591731
import numpy as np import matplotlib.pyplot as plt N = 1000 #Nombre de tirages X = np.random.rand(N) #Tirages independants de la loi uniforme sur [0,1] ##################################### # But: calculer la suite des moyennes # empiriques pour n de 1 a N ##################################### ## Calcul de la moyenne empirique: ## avec une boucle ##################################### sumEmp = X[0] moyenneEmp = X[0] * np.ones(N) for i in range(1,N): sumEmp = sumEmp + X[i] moyenneEmp[i] = sumEmp / (i+1) ###################################### ### Calcul de la moyenne empirique: ### avec des fonctions numpy ###################################### integers1toN = np.arange(1,N+1) #Un array contenant les entiers de 1 a N moyenneEmp = np.cumsum(X) / integers1toN ###################################### ## Affichage ###################################### plt.plot(integers1toN, moyenneEmp, color="b", label="Moyenne empirique") plt.axhline(0.5, color="r", label="Esperance") plt.legend(loc="best") plt.show()
FanJiang718/Courses-Exercises
MAP556/TP1/TP1 Python solutions-20181211/MAP556_PC1_Exo1_1_sol.py
MAP556_PC1_Exo1_1_sol.py
py
1,073
python
fr
code
0
github-code
6
[ { "api_name": "numpy.random.rand", "line_number": 5, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 5, "usage_type": "attribute" }, { "api_name": "numpy.ones", "line_number": 17, "usage_type": "call" }, { "api_name": "numpy.arange", "line...
73307642428
import pandas as pd import psycopg2 as ps import datetime as dt from datetime import datetime from dateutil import tz from config import POSTGRES_DBNAME, POSTGRES_PASSWORD, POSTGRES_USERNAME, POSTGRES_PORT, POSTGRES_ADDRESS, API_KEY """ Functions for Flask App """ # dict with available exchange/trading pair for trade predictions exchanges={'bitfinex': ['btc_usd', 'eth_usd', 'ltc_usd'], 'coinbase_pro': ['btc_usd', 'eth_usd', 'ltc_usd'], 'hitbtc': ['btc_usdt', 'eth_usdt', 'ltc_usdt']} # Insert DB Credentials - Don't push to GH credentials = {'POSTGRES_ADDRESS': POSTGRES_ADDRESS, 'POSTGRES_PORT': POSTGRES_PORT, 'POSTGRES_USERNAME': POSTGRES_USERNAME, 'POSTGRES_PASSWORD': POSTGRES_PASSWORD, 'POSTGRES_DBNAME': POSTGRES_DBNAME, 'API_KEY': API_KEY } # dictionary used to rename column values with correct time period model_periods = {'bitfinex_ltc_usd': '1440T', 'bitfinex_btc_usd':'1200T', 'bitfinex_eth_usd': '1200T', 'hitbtc_ltc_usdt': '1440T', 'hitbtc_btc_usdt': '360T', 'hitbtc_eth_usdt': '1440T', 'coinbase_pro_btc_usd': '960T', 'coinbase_pro_eth_usd': '960T', 'coinbase_pro_ltc_usd': '960T'} def create_conn(credentials): """ Function that creates a connection with DB """ # creating connection conn = ps.connect(host=credentials['POSTGRES_ADDRESS'], database=credentials['POSTGRES_DBNAME'], user=credentials['POSTGRES_USERNAME'], password=credentials['POSTGRES_PASSWORD'], port=credentials['POSTGRES_PORT']) # creating cursor cur = conn.cursor() return conn, cur def retrieve_one_trp(exchange, trading_pair, model_periods): """ Retrieve the last trade recommender prediction from DB """ # to retrieve period of trp model period = model_periods[exchange + '_' + trading_pair] # create connection and cursor conn, cur = create_conn(credentials) # Change limit number to whatever amount of rows you want to retrieve cur.execute("""SELECT * FROM prediction.trp WHERE exchange = '{exchange}' AND trading_pair = '{trading_pair}' ORDER by p_time desc limit 1;""".format(trading_pair=trading_pair, exchange=exchange)) result = cur.fetchall() # creates dataframe from results and rename columns result = pd.DataFrame(result) result = result.rename( columns={0: 'p_time', 1: 'c_time', 2: 'exchange', 3: 'trading_pair', 4: 'prediction'}) # get the one value in p_time pt = result['p_time'].values # get UTC timezone from_zone = tz.gettz('UTC') # get PST timezone to_zone = tz.gettz('US/Pacific') utc = datetime.strptime(str(pt[0]), '%Y-%m-%d %H:%M:%S') utc = utc.replace(tzinfo=from_zone) pcf = utc.astimezone(to_zone) # returning new timezone value to the df result['p_time'] = str(pcf)[:-6] # creating df to dictionary result = result.to_dict() # add the new period key and value to dictionary result.update({'period': period}) # close connection conn.close() return result # This is for retrieving arbitrage predictions from DB def retrieve_one_arb(exchange_1, exchange_2, trading_pair): ''' Retrieve the last arbitrage prediction from DB ''' # create connection and cursor conn, cur = create_conn(credentials) try: cur.execute("""SELECT * FROM prediction.arp WHERE exchange_1 = '{exchange_2}' AND exchange_2 = '{exchange_1}' AND trading_pair = '{trading_pair}' OR exchange_1 = '{exchange_1}' AND exchange_2 = '{exchange_2}' AND trading_pair = '{trading_pair}' ORDER by p_time desc limit 1;""".format(trading_pair=trading_pair, exchange_2=exchange_2, exchange_1=exchange_1)) result = cur.fetchall() # creates dataframe from results and rename columns result = pd.DataFrame(result) result = result.rename( columns={0: 'p_time', 1: 'c_time', 2: 'exchange_1', 3: 'exchange_2', 4: 'trading_pair', 5: 'prediction'}) # get the one value in p_time pt = result['p_time'].values # get UTC time zone from_zone = tz.gettz('UTC') # get PST time zone to_zone = tz.gettz('US/Pacific') utc = datetime.strptime(str(pt[0]), '%Y-%m-%d %H:%M:%S') utc = utc.replace(tzinfo=from_zone) pcf = utc.astimezone(to_zone) # returning new timezone value to the df result['p_time'] = str(pcf)[:-6] # creating df to dictionary result = result.to_dict() # closing connection conn.close() return result except: pass def retrieve_tr_pred(): """ Retrieves trade recommender predictions from DB and returns result in JSON format """ # create connection and cursor conn, cur = create_conn(credentials) # Gets last 20 prediction results from trp table cur.execute("""SELECT * FROM prediction.trp ORDER by p_time desc limit 500;""") result = cur.fetchall() # creates dataframe from results and rename columns result = pd.DataFrame(result) result = result.rename(columns={0: 'p_time', 1: 'c_time', 2: 'exchange', 3: 'trading_pair', 4: 'prediction'}) # filter predictions to get one for each combination result = result.drop_duplicates(subset=['exchange','trading_pair']) # creating new column with exchange_trading_pair name combined result['period'] = result['exchange'] +'_'+ result['trading_pair'] # use the values in period to rename them with the dict 'model_periods' values result['period'] = result['period'].apply(lambda x: model_periods[x]) # drop unnecessary columns result.drop(columns=['c_time'], inplace=True) # Creating List of prediction time values pt = result['p_time'].values # getting UTC timezone from_zone = tz.gettz('UTC') # getting PST timezone to_zone = tz.gettz('US/Pacific') nt = [] # Looping thru 'p_time' values to change time to PST for p in pt: utc = datetime.strptime(str(p), '%Y-%m-%d %H:%M:%S') utc = utc.replace(tzinfo=from_zone) pcf = utc.astimezone(to_zone) # append new PST time to nt list nt.append(str(pcf)[:-6] + ' PST') # Give new PST time value to 'p_time" column result['p_time'] = nt # Create json output result = (result.groupby(['exchange', 'trading_pair'], as_index=True) .apply(lambda x: x[['p_time', 'period', 'prediction']].to_dict('r')).to_dict()) # close connection conn.close() return result def retrieve_arb_pred(): """ Retrieves arbitrage predictions from DB and returns result in JSON format """ # create connection and cursor conn, cur = create_conn(credentials) # Gets last 500 prediction results from arp table cur.execute("""SELECT * FROM prediction.arp ORDER by p_time desc limit 500;""") result = cur.fetchall() # creates dataframe from results and rename columns result = pd.DataFrame(result) result = result.rename( columns={0: 'p_time', 1: 'c_time', 2: 'exchange_1', 3: 'exchange_2', 4: 'trading_pair', 5: 'prediction'}) # result = result.drop(columns='c_time') result = result.drop_duplicates(subset=['exchange_1', 'exchange_2', 'trading_pair']) # converts p_time column to datetime result['datetime'] = pd.to_datetime(result['p_time']) # create time threshold to 15 minutes, to only return results in the last 15 min # filters result to the last 15 min t = dt.datetime.now() - dt.timedelta(minutes=15) result = result[result['datetime'] > t] # drop unnecessary columns result.drop(columns=['datetime', 'c_time'], inplace=True) # creating a list of prediction time values pt = result['p_time'].values # Getting UTC timezone from_zone = tz.gettz('UTC') # Getting PST timezone to_zone = tz.gettz('US/Pacific') nt = [] # Looping thru 'p_time' values to change time to PST for p in pt: utc = datetime.strptime(str(p), '%Y-%m-%d %H:%M:%S') utc = utc.replace(tzinfo=from_zone) pcf = utc.astimezone(to_zone) # appends new time to nt list nt.append(str(pcf)[:-6] + ' PST') # give new PST time value to 'p_time" column result['p_time'] = nt # Create json output result = (result.groupby(['exchange_1', 'exchange_2', 'trading_pair'], as_index=True) .apply(lambda x: x[['p_time', 'prediction']].to_dict('r')).to_dict()) # close connection to DB conn.close() return result
dbreddyAI/cryptolyticapp
api_source_code/Flask_App/utils.py
utils.py
py
9,012
python
en
code
2
github-code
6
[ { "api_name": "config.POSTGRES_ADDRESS", "line_number": 16, "usage_type": "name" }, { "api_name": "config.POSTGRES_PORT", "line_number": 17, "usage_type": "name" }, { "api_name": "config.POSTGRES_USERNAME", "line_number": 18, "usage_type": "name" }, { "api_name": ...
72931175228
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns #Leio os datasets e retiro o que preciso df= pd.read_csv('DATASET_MobileRobotNav.csv', sep=';') #Mostra a descrição do dataset (Média, Conta o número de tuplas, min, máx, etc) print(df.describe()) #print(df) #Separa o modelo para fazer a from sklearn.model_selection import train_test_split #labels = np.array(df['Out_Vel_Linear(m/s)','Out_Vel_Angula(rad/s)']) features= df.drop(columns=['Out_Vel_Linear(m/s)','Out_Vel_Angula(rad/s)'], axis = 1) #print(features) labels= df['Out_Vel_Linear(m/s)'] #y2= df['Out_Vel_Angula(rad/s)'] features_list = list(features.columns) print(features_list) features_train, features_test, labels_train, labels_test= train_test_split(features,labels,test_size= 0.3) print('Training Features Shape:', features_train.shape) print('Training Labels Shape:', labels_train.shape) print('Testing Features Shape:', features_train.shape) print('Testing Labels Shape:', labels_test.shape) from sklearn.neural_network import MLPRegressor mlp = MLPRegressor(hidden_layer_sizes=(8,8,8), activation='tanh', solver='adam', max_iter=500, alpha = 0.001) mlp.fit(features_train,labels_train) #Faz os testes separados anteriormente predictions= mlp.predict(features_test) print("a" ,predictions, "B:" ,labels_test) #Calcula erros absolutos (mudar pra erro quadrático depois?) errors = abs(predictions - labels_test) print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.') #Calcula qtd de erros mape = 100 * (errors / labels_test) #Mostra a precisão accuracy = 100 - np.mean(mape) print('Accuracy:', round(accuracy, 2), '%.')
jpavargasf/ML_RandomForest_ANN
Pedro/neural_network.py
neural_network.py
py
1,716
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 27, "usage_type": "call" }, { "api_name": "sklearn.neural_network.MLPRegressor", "line_number": 37, "usage_type": "call" }...
22449340075
""" Реализация программу взаимодействия виджетов друг с другом: Форма для приложения (ui/d_eventfilter_settings.ui) Программа должна обладать следующим функционалом: 1. Добавить для dial возможность установки значений кнопками клавиатуры(+ и -), выводить новые значения в консоль 2. Соединить между собой QDial, QSlider, QLCDNumber (изменение значения в одном, изменяет значения в других) 3. Для QLCDNumber сделать отображение в различных системах счисления (oct, hex, bin, dec), изменять формат отображаемого значения в зависимости от выбранного в comboBox параметра. 4. Сохранять значение выбранного в comboBox режима отображения и значение LCDNumber в QSettings, при перезапуске программы выводить в него соответствующие значения """ from PySide6 import QtWidgets, QtCore, QtGui from PySide6.QtCore import Qt class Window(QtWidgets.QWidget): def __init__(self, parent=None): super().__init__(parent) self.initUi() self.initSignal() self.settings = QtCore.QSettings('Data') self.loadData() def initUi(self): self.dial = QtWidgets.QDial() self.dial.setRange(0, 100) self.dial.installEventFilter(self) self.comboBox = QtWidgets.QComboBox() self.comboBox.addItem("dec") self.comboBox.addItem("hex") self.comboBox.addItem("oct") self.comboBox.addItem("bin") self.LCDNumber = QtWidgets.QLCDNumber() self.slider = QtWidgets.QSlider() self.slider.setOrientation(Qt.Orientation.Horizontal) layout1 = QtWidgets.QVBoxLayout() layout2 = QtWidgets.QHBoxLayout() layout3 = QtWidgets.QVBoxLayout() layout1.addWidget(self.comboBox) layout1.addWidget(self.LCDNumber) layout2.addWidget(self.dial) layout2.addLayout(layout1) layout3.addLayout(layout2) layout3.addWidget(self.slider) self.setLayout(layout3) def initSignal(self): self.slider.valueChanged.connect(self.dial.setValue) self.slider.valueChanged.connect(self.LCDNumber.display) self.dial.valueChanged.connect(self.slider.setValue) self.dial.valueChanged.connect(self.LCDNumber.display) self.comboBox.currentTextChanged.connect(self.updateLcd) def keyPressEvent(self, event: QtGui.QKeyEvent): if event.key() == QtCore.Qt.Key_Plus: self.dial.setValue(self.dial.value() + 1) elif event.key() == QtCore.Qt.Key_Minus: self.dial.setValue(self.dial.value() - 1) else: super().keyPressEvent(event) def updateLcd(self): if self.comboBox.currentText() == "dec": self.LCDNumber.setDecMode() elif self.comboBox.currentText() == "bin": self.LCDNumber.setBinMode() elif self.comboBox.currentText() == "oct": self.LCDNumber.setOctMode() elif self.comboBox.currentText() == "hex": self.LCDNumber.setHexMode() def loadData(self): self.LCDNumber.display(self.settings.value("Value", "")) self.comboBox.setCurrentText(self.settings.value("Text", "")) def closeEvent(self, event: QtGui.QCloseEvent): self.settings.setValue("Value", self.LCDNumber.intValue()) self.settings.setValue("Text", self.comboBox.currentText()) if __name__ == "__main__": app = QtWidgets.QApplication() window = Window() window.show() app.exec()
julsmi/DevPyQt
scripts/Практика 2/b_Самостоятельная + домашняя работа/d_eventfilter_settings.py
d_eventfilter_settings.py
py
3,953
python
ru
code
null
github-code
6
[ { "api_name": "PySide6.QtWidgets.QWidget", "line_number": 25, "usage_type": "attribute" }, { "api_name": "PySide6.QtWidgets", "line_number": 25, "usage_type": "name" }, { "api_name": "PySide6.QtCore.QSettings", "line_number": 31, "usage_type": "call" }, { "api_nam...
41550685644
import os, sys, serial, time, traceback from . codes import CMDTYPE, LEDTYPE, SPIChipsets, BufferChipsets from . devices import Devices from . import io from .. channel_order import ChannelOrder from .. driver_base import DriverBase from ... util import exception, log, util from ... drivers.return_codes import ( RETURN_CODES, print_error, raise_error, BiblioSerialError) class Serial(DriverBase): """Main driver for Serial based LED strips and devices like the AllPixel Provides the same parameters of :py:class:`bibliopixel.drivers.driver_base.DriverBase` as well as those below: :param ledtype: LED protocol type. One of :py:func:`bibliopixel.drivers.ledtype.LEDTYPE` :param str dev: Serial device address/path. If left empty, first device found will be used. :param int spi_speed: SPI datarate for applicable LED types, in MHz :param int restart_timeout: Seconds to wait between reconfigure reboot and reconnection attempt :param int device_id: Device ID to connect to. :param str hardwareID: A valid USB VID:PID pair such as "1D50:60AB" :param int baudrate: Baud rate to connect to serial device """ def __init__(self, ledtype=None, num=0, dev='', c_order='RGB', spi_speed=2, gamma=None, restart_timeout=3, device_id=None, hardwareID="1D50:60AB", baudrate=921600, **kwds): if ledtype is None: raise ValueError('Must provide ledtype value!') if num == 0: raise ValueError('Must provide num value >0!') super().__init__(num, c_order=c_order, gamma=gamma, **kwds) self.devices = Devices(hardwareID, baudrate) if not (1 <= spi_speed <= 24 and ledtype in SPIChipsets): spi_speed = 1 self._spi_speed = spi_speed self._com = None self._ledtype = ledtype self._bufPad = 0 self.dev = dev self.device_version = 0 self.device_id = device_id self._sync_packet = util.generate_header(CMDTYPE.SYNC, 0) if self.device_id is not None and not (0 <= self.device_id <= 255): raise ValueError("device_id must be between 0 and 255") resp = self._connect() if resp == RETURN_CODES.REBOOT: # reboot needed log.info(REBOOT_MESSAGE) self._close() time.sleep(restart_timeout) resp = self._connect() if resp != RETURN_CODES.SUCCESS: raise_error(resp) else: log.info("Reconfigure success!") elif resp != RETURN_CODES.SUCCESS: raise_error(resp) if type in SPIChipsets: log.info("Using SPI Speed: %sMHz", self._spi_speed) def cleanup(self): if self._com: log.info("Closing connection to: %s", self.dev) exception.report(self._close) self._com = None def _connect(self): try: if not self.dev: self.devices.find_serial_devices() idv = self.devices.get_device(self.device_id) self.device_id, self.dev, self.device_version = idv try: self._com = serial.Serial( self.dev, baudrate=self.devices.baudrate, timeout=5) except serial.SerialException: ports = self.devices.devices.values() error = "Invalid port specified. No COM ports available." if ports: error = ("Invalid port specified. Try using one of: \n" + "\n".join(ports)) log.info(error) raise BiblioSerialError(error) packet = util.generate_header(CMDTYPE.SETUP_DATA, 4) packet.append(self._ledtype) # set strip type byteCount = self.bufByteCount() if self._ledtype in BufferChipsets: if self._ledtype == LEDTYPE.APA102 and self.device_version >= 2: pass else: self._bufPad = BufferChipsets[ self._ledtype](self.numLEDs) * 3 byteCount += self._bufPad packet.append(byteCount & 0xFF) # set 1st byte of byteCount packet.append(byteCount >> 8) # set 2nd byte of byteCount packet.append(self._spi_speed) self._write(packet) code = self._read() if code is None: self.devices.error() return code except serial.SerialException as e: error = ("Unable to connect to the device. Please check that " "it is connected and the correct port is selected.") log.error(traceback.format_exc()) log.error(error) raise e def set_device_brightness(self, brightness): packet = util.generate_header(CMDTYPE.BRIGHTNESS, 1) packet.append(self._brightness) self._write(packet) code = self._read() if code == RETURN_CODES.SUCCESS: return True print_error(code) def _send_packet(self): if not self._com: return self._write(self._packet) code = self._read() if code is None: self.devices.error(fail=False) elif code != RETURN_CODES.SUCCESS: print_error(code) else: self._flushInput() return True def _compute_packet(self): count = self.bufByteCount() + self._bufPad self._packet = util.generate_header(CMDTYPE.PIXEL_DATA, count) self._render() self._packet.extend(self._buf) self._packet.extend([0] * self._bufPad) def _send_sync(self): self._write(self._sync_packet) def _read(self): return io.read_byte(self._com) def _close(self): try: return self._com and self._com.close() except Exception: log.exception('Serial exception in close') finally: self._com = None def _write(self, packet): try: return self._com and self._com.write(packet) except Exception: log.exception('Serial exception in write') def _flushInput(self): try: return self._com and self._com.flushInput() except Exception: log.exception('Serial exception in flushInput') class TeensySmartMatrix(Serial): """Variant of :py:class:`Serial` for use with the Teensy and SmartMatrix library. The following provides compatible firmware: https://github.com/ManiacalLabs/BiblioPixelSmartMatrix All parameters are the same as with :py:class:`Serial`, except the default hardwareID is changed to match the Teensy. The main difference is that SmartMatrix requires a sync command to keep multiple instances of this driver running smoothly. """ def __init__(self, width, height, dev="", device_id=None, hardwareID="16C0:0483", **kwds): super().__init__(ledtype=LEDTYPE.GENERIC, num=width * height, device_id=device_id, hardwareID=hardwareID, **kwds) self.sync = self._send_sync REBOOT_MESSAGE = """Reconfigure and reboot needed! Waiting for controller to restart...""" from ... util import deprecated if deprecated.allowed(): # pragma: no cover DriverSerial = Serial DriverTeensySmartMatrix = TeensySmartMatrix
ManiacalLabs/BiblioPixel
bibliopixel/drivers/serial/driver.py
driver.py
py
7,526
python
en
code
263
github-code
6
[ { "api_name": "driver_base.DriverBase", "line_number": 13, "usage_type": "name" }, { "api_name": "devices.Devices", "line_number": 45, "usage_type": "call" }, { "api_name": "codes.SPIChipsets", "line_number": 47, "usage_type": "name" }, { "api_name": "util.util.ge...
34570396569
from django import forms from .models import Post, Comment, Category2 from treebeard.forms import movenodeform_factory, MoveNodeForm # from markdownx.fields import MarkdownxFormField class CategoryForm(MoveNodeForm): class Meta: model = Category2 exclude = ('sib_order', 'parent') class PostCreateUpdateForm(forms.ModelForm): class Meta: model = Post fields = ('body','title', 'category2', 'is_published' ) labels = { 'category2': 'دسته بندی', 'is_published': ' پست منتشر شود؟', } widgets = { 'body': forms.Textarea(attrs={'id':'markdown_input' , 'class':'form-control shadow-lg'}), 'category2': forms.Select(attrs={'class':'form-control shadow-lg'}), 'title': forms.TextInput(attrs={'class':'form-control shadow-lg'}), # 'is_published': forms.BooleanField(), # 'title': forms.TextInput(attrs={'class':'form-control shadow-lg'}), } class CommentCreateForm(forms.ModelForm): class Meta: model = Comment fields = ('body',) widgets = { 'body': forms.Textarea(attrs={'class':'form-control'}) } class CommentReplyForm(forms.ModelForm): class Meta: model = Comment fields = ('body',) class PostSearchForm(forms.Form): search = forms.CharField()
MohammadGoodarzi/hamkav_core
HamkavBlog/forms.py
forms.py
py
1,271
python
en
code
0
github-code
6
[ { "api_name": "treebeard.forms.MoveNodeForm", "line_number": 7, "usage_type": "name" }, { "api_name": "models.Category2", "line_number": 9, "usage_type": "name" }, { "api_name": "django.forms.ModelForm", "line_number": 12, "usage_type": "attribute" }, { "api_name"...
18100446664
""" https://leetcode.com/problems/jump-game-iv/ 1345. Jump Game IV Given an array of integers arr, you are initially positioned at the first index of the array. In one step you can jump from index i to index: i + 1 where: i + 1 < arr.length. i - 1 where: i - 1 >= 0. j where: arr[i] == arr[j] and i != j. Return the minimum number of steps to reach the last index of the array. Notice that you can not jump outside of the array at any time. """ from typing import List, Deque, DefaultDict, Tuple from collections import deque, defaultdict from unittest import TestCase, main class Solution: def minJumps(self, arr: List[int]) -> int: # Edge case if len(arr) == 1: return 0 steps = 0 visited = [True] + [False] * (len(arr) - 1) q: Deque[int] = deque() q.append(0) hm: DefaultDict[int, List[int]] = defaultdict(list) # Create hash map for i in range(len(arr)): hm[arr[i]].append(i) while q: for _ in range(len(q)): i = q.popleft() # If i is the last index, return steps if i == len(arr) - 1: return steps # Search i - 1 if 0 <= i - 1 and not visited[i - 1]: visited[i - 1] = True q.append(i - 1) # Search i + 1 if i + 1 < len(arr) and not visited[i + 1]: visited[i + 1] = True q.append(i + 1) # Search hm[arr[i]] while hm[arr[i]]: j = hm[arr[i]].pop() if not visited[j]: visited[j] = True q.append(j) steps += 1 raise ValueError("Could't find the answer") class Test(TestCase): cases: List[Tuple[List[int], int]] = [ ([7, 7, 2, 1, 7, 7, 7, 3, 4, 1], 3), ] def test_solution(self): solution = Solution() for arr, expected in self.cases: self.assertEqual(solution.minJumps(arr), expected) if __name__ == "__main__": main()
hirotake111/leetcode_diary
leetcode/1345/solution.py
solution.py
py
2,162
python
en
code
0
github-code
6
[ { "api_name": "typing.List", "line_number": 21, "usage_type": "name" }, { "api_name": "typing.Deque", "line_number": 28, "usage_type": "name" }, { "api_name": "collections.deque", "line_number": 28, "usage_type": "call" }, { "api_name": "typing.DefaultDict", "...
35384638116
from setuptools import setup with open('README.md', 'r') as f: readme = f.read() if __name__ == '__main__': setup( name='youtube-dl-service', version='0.0.2', author='Dmitriy Pleshevskiy', author_email='dmitriy@ideascup.me', description='Using youtube-dl as service in python code', long_description=readme, long_description_content_type='text/markdown', package_data={'': ['LICENSE', 'README.md']}, include_package_data=True, license='MIT', packages=['youtube_dl_service'], install_requires=[ 'youtube-dl==2020.12.29', ] )
pleshevskiy/youtube-dl-service
setup.py
setup.py
py
655
python
en
code
0
github-code
6
[ { "api_name": "setuptools.setup", "line_number": 9, "usage_type": "call" } ]
2298576928
#!/usr/bin/env python import sys import logging import time import argparse import pyBigWig import math import numpy as np from sklearn import preprocessing from MSTS.version import __version__ from MSTS.Parser.SimpleGffParser import SimpleGffParser from MSTS.Db.FeatureDB import FeatureDB from MSTS.Graphics import Graphics def getCoordinatesFreeFromOtherGenes(lFeatures,feature,start, pivot='start'): "...." startNew = start endNew = start for feat in lFeatures: if feature.strand == 1: if pivot == 'start': if feat.end <= feature.start and feat.end > startNew: startNew = feat.end elif pivot == 'end': if feat.start >= feature.end and feat.start < endNew: endNew = feat.start elif feature.strand == -1: if pivot == 'start': if feat.start >= feature.end and feat.start < startNew: startNew = feat.start elif pivot == 'end': if feat.end <= feature.start and feat.end > endNew: endNew = feat.end if pivot == 'start': return startNew elif pivot == 'end': return endNew def getBasesOverlappingOtherGenes(lOverlappingFeatures,Start, End, regionStart, regionEnd): """....""" lOtherFeatures = [] for feat in lOverlappingFeatures: if feat.start <= Start and feat.end >= End: pass else: lOtherFeatures.append(feat) dBases = {} for val in range(regionStart, regionEnd): dBases[val] = 0 for feat in lOtherFeatures: for base in range(feat.start,feat.end): if base in dBases: dBases[base] += 1 return [value for (key,value) in sorted(dBases.items())] def gaussianSmoothing(data, windowWidth=3, stdev=20): """smoothing""" windowWidth = windowWidth stdev = stdev filter = [None]*(2*windowWidth*stdev+1) sumt = 0.0; for i in range(0,len(filter)): x = float(i - 3 * stdev) value = math.exp(-(x * x) / (2 * stdev * stdev)) filter[i] = value sumt += value for i in range(0,len(filter)): filter[i] /=sumt smoothed = [0]*len(data) ltmp = [0.0]*(windowWidth*stdev) data.extend(ltmp) ltmp.extend(data) data = ltmp for i in range(0,len(smoothed)): for j in range(0,len(filter)): smoothed[i] += data[i + j] * filter[j] smoothed[0:windowWidth*stdev] = [np.nan]*(windowWidth*stdev) smoothed[-(windowWidth*stdev):] = [np.nan]*(windowWidth*stdev) return smoothed def readFeatureIds(lIdsFile): """Read feature Ids from file""" lIds = [] try: with open(lIdsFile, 'r') as f: for line in f: line = line.rstrip() if line: lIds.append(line) f.close() logging.info("{} features to analyze".format(len(lIds))) except Exception as e: logging.error("Can not read feature file: {}".format(lIdsFile)) sys.exit(1) return lIds def getMeanValue(bw): """Normalize with mean""" lValues = [] for chrom in db.selectReferences(): logging.info('Requesting genes in sequence: {}'.format(chrom)) lFeatures = db.selectFeatureTypeFromReference(chrom, featType) for feat in lFeatures: values = bw.values(chrom,feat.start,feat.end) lValues.extend(values) return np.mean(lValues) if __name__ == "__main__": program = sys.argv[0] version = __version__ description = 'todo, \ ...' parser = argparse.ArgumentParser(prog=program) parser = argparse.ArgumentParser(description=description) parser.add_argument('--version', action='version', version='{} {}'.format(program,version)) parser.add_argument("bigWig", help="Input bigWig File", type=str) parser.add_argument("gff3", help="Input genome annotation in gff3 format", type=str) parser.add_argument("-wb","--windowBefore", help="window size to analyze before the feature, default=1000", type=int, default=1000) parser.add_argument("-wa","--windowAfter", help="window size to analyez after the feature, default=1000", type=int, default=1000) parser.add_argument("-ft","--featureType", help="feature type to analyze, default=gene", type=str, default='gene') parser.add_argument("-p", "--pivot", help="feature bound to use, default=start, possible values=[start,end]",type=str,default='start') parser.add_argument("--context", help="if set, defined features in context matter", action="store_true", default=False) parser.add_argument("-o", "--out", help="name of output graph", type=str, default="graph.png") parser.add_argument("-t", "--title", help="title text", type=str, default="title") parser.add_argument("-x", "--xax", help="x axis text", type=str, default="window, bp") parser.add_argument("-y", "--yax", help="y axis text", type=str, default="nb bases") parser.add_argument("-z", "--zax", help="z axis text", type=str, default="signal coverage") parser.add_argument("-d", "--sqliteDB", help="provide sqlite DB to avoid insertion, usefull for multi-analysis", type=str, default=None) parser.add_argument("-n", "--noDeleteDB", help="Do not delete SQLite DB", action="store_true", default=False) parser.add_argument("-s","--GaussianSmoothing", help="Perform Gaussian Smoothing on data, ", action="store_true", default=False) parser.add_argument("-w","--windowWidth", help="window size for Gaussian smoothing, default=3", type=int, default=3) parser.add_argument("-sd","--stdev", help="stdev for Gaussian smoothing, default=20", type=int, default=20) parser.add_argument("--norm", help="Normalize signal value with the average signal of all features of the same type", action="store_true", default=False) parser.add_argument("--flush",help="print phases on stdout to save in file, > phases.out", action="store_true", default=False) parser.add_argument("-l","--lIds", help="txt file with ID list (one ID per line), limit phasogram to the features specified in the file. Features must be of the same type as featureType", type=str, default=None) parser.add_argument("--heatmap", help="export coresponding heatmap", action="store_true", default=False) parser.add_argument("--heatmapsmooth", help="Perform Gaussian smoothing on each feature (time consuming)", action="store_true", default=False) parser.add_argument("--keepLIdOrder", help="export heatmap with respect to LIds order if supplied", action="store_true", default=False) parser.add_argument("-v", "--verbosity", type=int, choices=[1,2,3], help="increase output verbosity 1=error, 2=info, 3=debug") args = parser.parse_args() logLevel='ERROR' if args.verbosity == 1: logLevel = 'ERROR' if args.verbosity == 2: logLevel = 'INFO' if args.verbosity == 3: logLevel = 'DEBUG' logging.getLogger().setLevel(logLevel) featType = args.featureType featTypeContext = featType pivot = args.pivot if pivot not in ['start','end']: logging.error("Provided pivot: '{}' not allowed, choose 'start' or 'end'".format(pivot)) sys.exit(1) context = args.context if not args.sqliteDB: logging.info("Parsing gff3 gene file") parser = SimpleGffParser(args.gff3, logLevel) lFeatures = [] for feat in parser.parse(): lFeatures.append(feat) logging.info("{} features parsed".format(parser.nbFeatures)) logging.info("Inserting features in SQLite DB") timestamp = int(time.time()) db = FeatureDB('sqlite-{}.db'.format(timestamp),False,logLevel) db.insertlFeatures(lFeatures) logging.info("Insertion done") else: logging.info("Using {} file as SQLite db".format(args.sqliteDB)) db = FeatureDB(args.sqliteDB,noCreate=True,logLevel=logLevel) bw = pyBigWig.open(args.bigWig) winBefore = args.windowBefore winAfter = args.windowAfter lPhases = [0]*(1+winBefore+winAfter) lAllPhases = [] lPhasesNb = [0]*(1+winBefore+winAfter) lOtherGenesNb = [0]*(1+winBefore+winAfter) lIds = [] d = {} if args.lIds: lIds = readFeatureIds(args.lIds) #lIds = ['Avrlm3_Lema_T200610.1', 'lm_SuperContig_0_v2_lmctg_0007_v2_egn4_orf_Lema_T000020.1'] for chrom in db.selectReferences(): logging.info('Requesting genes in sequence: {}'.format(chrom)) if len(lIds) > 0: lFeatures = db.selectFeatureFromIdListAndType(chrom, lIds, featType) else: lFeatures = db.selectFeatureTypeFromReference(chrom, featType) nbAnalyzedFeat = len(lAllPhases) for ftIdx, feat in enumerate(lFeatures): #print(feat.id) if args.keepLIdOrder: d[feat.id] = ftIdx+nbAnalyzedFeat lAllPhases.append([0]*(1+winBefore+winAfter)) if feat.strand == 1: if pivot == 'start': start = max(1,feat.start-winBefore) end = min(bw.chroms(chrom),feat.start+winAfter) elif pivot == 'end': start = max(1,feat.end-winBefore) end = min(bw.chroms(chrom),feat.end+winAfter) elif feat.strand == -1: if pivot == 'start': start = min(bw.chroms(chrom),feat.end+winBefore) end = max(1,feat.end-winAfter) elif pivot == 'end': start = min(bw.chroms(chrom),feat.start+winBefore) end = max(1,feat.start-winAfter) else: logging.error("Cannot perform analysis on feature witout strand") sys.exit(1) startNew = start endNew = end if pivot == 'start': lOverlappingFeatures = db.selectFeatureTypeFromCoordinates(featTypeContext,chrom,min(start,end),max(start, end)) if context: startNew = getCoordinatesFreeFromOtherGenes(lOverlappingFeatures,feat,start) elif pivot == 'end': lOverlappingFeatures = db.selectFeatureTypeFromCoordinates(featTypeContext,chrom,min(start,end),max(start, end)) if context: endNew = getCoordinatesFreeFromOtherGenes(lOverlappingFeatures,feat,end,pivot='end') index = 0 lOtherGenesBases = [] if feat.strand == 1: if pivot == 'start': lValues = bw.values(chrom,startNew-1,end) decal = 0 if context: lValues = bw.values(chrom,startNew-1,feat.end) decal = (startNew-start) for i in range(0+decal,min(len(lValues)+decal,winBefore+winAfter+1)): lPhases[i] += lValues[i-decal] lAllPhases[ftIdx+nbAnalyzedFeat][i] = lValues[i-decal] lPhasesNb[i] += 1 index = i for i in range(0,0+decal): lAllPhases[ftIdx+nbAnalyzedFeat][i] = np.nan for i in range(min(len(lValues)+decal,winBefore+winAfter+1), winBefore+winAfter+1): lAllPhases[ftIdx+nbAnalyzedFeat][i] = np.nan lOtherGenesBases = getBasesOverlappingOtherGenes(lOverlappingFeatures,feat.start,feat.end,start,end) elif pivot == 'end': lValues = bw.values(chrom,start-1,endNew) decal=0 if context: lValues = bw.values(chrom,feat.start-1,endNew) decal = max((winBefore-(feat.end-feat.start)),0) for i in range(decal,min(decal+len(lValues),winBefore+winAfter+1)): lPhases[i] += lValues[i-decal] lAllPhases[ftIdx+nbAnalyzedFeat][i] = lValues[i-decal] lPhasesNb[i] += 1 index = i for i in range(0,decal): lAllPhases[ftIdx+nbAnalyzedFeat][i] = np.nan for i in range(min(len(lValues)+decal,winBefore+winAfter+1), winBefore+winAfter+1): lAllPhases[ftIdx+nbAnalyzedFeat][i] = np.nan lOtherGenesBases = getBasesOverlappingOtherGenes(lOverlappingFeatures,feat.start,feat.end,start,end) elif feat.strand == -1: if pivot == 'start': lValues = bw.values(chrom,end,startNew) decal = 0 if context: lValues = bw.values(chrom,feat.start-1,startNew) decal = (startNew-start) for i in range(-1+decal,max(-len(lValues)+decal-1,(-winAfter)+(-winBefore)+(-1)-1),-1): lPhases[-i-1] += lValues[i-decal] lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = lValues[i-decal] lPhasesNb[-i-1] += 1 index = i for i in range(-1,-1+decal, -1): lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = np.nan for i in range( max(-len(lValues)+decal-1,(-winAfter)+(-winBefore)+(-1)-1),(-winAfter)+(-winBefore)+(-1)-1 ,-1): lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = np.nan lOtherGenesBases = getBasesOverlappingOtherGenes(lOverlappingFeatures,feat.start,feat.end,end,start)[::-1] elif pivot == 'end': lValues = bw.values(chrom,endNew-1,start) decal = 0 if context: lValues = bw.values(chrom,endNew-1,feat.end) decal = max((winBefore-(feat.end-feat.start)),0) for i in range(-1-decal,max(-len(lValues)-decal,(-winAfter)+(-winBefore)+(-1)-1), -1): lPhases[-i-1] += lValues[i+decal] lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = lValues[i-decal] lPhasesNb[-i-1] += 1 index = i for i in range(-1,-1+decal, -1): lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = np.nan for i in range( max(-len(lValues)+decal-1,(-winAfter)+(-winBefore)+(-1)-1),(-winAfter)+(-winBefore)+(-1)-1 ,-1): lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = np.nan lOtherGenesBases = getBasesOverlappingOtherGenes(lOverlappingFeatures,feat.start,feat.end,end,start)[::-1] else: pass for i in range(0,len(lOtherGenesBases)): lOtherGenesNb[i] += lOtherGenesBases[i] # print(lPhases) lAveragePhases = [0]*(1+winBefore+winAfter) # print(len(d.keys())) # print(len(lIds)) if args.keepLIdOrder: lSortedAllPhases = [] for i in lIds: #print(i) lSortedAllPhases.append(lAllPhases[d[i]]) lAllPhases = lSortedAllPhases # for idx,feature_phase in enumerate(lAllPhases): # lSmoothedAllPhases.append(gaussianSmoothing(feature_phase, args.windowWidth, args.stdev)) # if not (idx+1)%1000: # logging.info("{} feature values smoothed on {}".format(idx+1, len(lAllPhases))) # heatvalues = np.array(lSmoothedAllPhases, dtype=np.float) heatvalues = np.array(lAllPhases, dtype=np.float64) lAllPhasesNorm = [] for u,s in enumerate(lAllPhases): mi= np.nanmin(s) ma = np.nanmax(s) l = [] for x in s: if np.isnan(np.array([x])): l.append(np.nan) else: l.append((x-mi*1.0)/(ma-mi)) lAllPhasesNorm.append(l) heatvalues = np.array(lAllPhasesNorm, dtype=np.float) lSmoothedAllPhases = [] if args.heatmapsmooth: heatvaluesNoNaN = np.nan_to_num(heatvalues) for idx,feature_phase in enumerate(heatvaluesNoNaN): lSmoothedAllPhases.append(gaussianSmoothing(list(feature_phase), args.windowWidth, args.stdev)) if not (idx+1)%1000: logging.info("{} feature values smoothed on {}".format(idx+1, len(lAllPhases))) for i,s in enumerate(heatvalues): for j,x in enumerate(s): if np.isnan(x): lSmoothedAllPhases[i][j] = np.nan for a,b in enumerate(lPhases): if lPhasesNb[a] != 0: lAveragePhases[a] = lPhases[a]/lPhasesNb[a] if args.norm: mean = getMeanValue(bw) lAveragePhases = [v/mean for v in lAveragePhases] logging.info("Normalizing values with average coverage of {}: {}".format(args.featureType,mean)) if args.GaussianSmoothing: logging.info("Smoothing data with Gaussian blur, window: {}, stdev: {}".format(args.windowWidth, args.stdev)) lAveragePhases = gaussianSmoothing(lAveragePhases, args.windowWidth, args.stdev) lenFilter = 2*args.windowWidth*args.stdev+1 if args.noDeleteDB: logging.info('SQLite db: {} not removed'.format(db.getDbFileName())) else: logging.info('SQLite db: {} removed'.format(db.getDbFileName())) db.deleteDB() logging.info("Drawing graph in {}".format(args.out)) if args.GaussianSmoothing: Graphics.plotDistributionWithGeneHistogram([x for x in range(-winBefore,winAfter+1)],lAveragePhases[0:(winBefore+winAfter+1)],lPhasesNb[0:(winBefore+winAfter+1)],lOtherGenesNb[0:(winBefore+winAfter+1)],out=args.out, title=args.title, xax=args.xax, yax=args.yax, yax2=args.zax) else: Graphics.plotDistributionWithGeneHistogram([x for x in range(-winBefore,winAfter+1)],lAveragePhases[0:(winBefore+winAfter+1)],lPhasesNb[0:(winBefore+winAfter+1)],lOtherGenesNb[0:(winBefore+winAfter+1)],out=args.out, title=args.title, xax=args.xax, yax=args.yax, yax2=args.zax) if args.heatmap: if args.heatmapsmooth: Graphics.plotHeatmap(lSmoothedAllPhases, out="heatmap-{}".format(args.out), title="Heatmap - {}".format(args.title), xax="position bp", yax="#features", mi=args.windowBefore, ma=args.windowAfter) else: Graphics.plotHeatmap(heatvalues, out="heatmap-{}".format(args.out), title="Heatmap - {}".format(args.title), xax="position bp", yax="#features", mi=args.windowBefore, ma=args.windowAfter) if args.flush: for x in range(0,winBefore+winAfter+1): print("{}\t{}".format(x-winBefore,lAveragePhases[x]))
nlapalu/MSTS
bin/MSTS_feature_phasogram.py
MSTS_feature_phasogram.py
py
18,845
python
en
code
0
github-code
6
[ { "api_name": "math.exp", "line_number": 77, "usage_type": "call" }, { "api_name": "numpy.nan", "line_number": 93, "usage_type": "attribute" }, { "api_name": "numpy.nan", "line_number": 94, "usage_type": "attribute" }, { "api_name": "logging.info", "line_numbe...
16319784906
"""ZIP - Compactando / Descompactando arquivos""" from zipfile import ZipFile import os """ Este módulo fornece ferramentas para: criar, ler, escrever, adicionar, e listar um arquivo ZIP -https://docs.python.org/pt-br/3/library/zipfile.html """ way = (r'C:\Users\Igor\Desktop\Estudos\Programação-em-Python' r'\Mundo-invertido\Udemy\4-Módulos-Python') # Cria um arquivo e colocar arquivos num zip with ZipFile('file.zip', 'w') as zip: for file in os.listdir(way): full_path = os.path.join(way, file) zip.write(full_path, file) with ZipFile('file.zip', 'r') as zip: # Exibi os arquivos dentro no zip for file in zip.namelist(): print(file) with ZipFile('file.zip', 'r') as zip: # Descompacta os arquivos do zip zip.extractall(f'{way}/unzipped') # descompactado
igorfreits/Studies-Python
Udemy/4-Módulos-Python/aula 87 - ZIP.py
aula 87 - ZIP.py
py
814
python
pt
code
1
github-code
6
[ { "api_name": "zipfile.ZipFile", "line_number": 14, "usage_type": "call" }, { "api_name": "os.listdir", "line_number": 15, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 16, "usage_type": "call" }, { "api_name": "os.path", "line_number": ...
3337691584
''' reres替换本地文件有问题 起一个端口用http试一试 ''' from flask import Flask app = Flask(__name__) @app.route("/udc.js") def udc(): fill=open('/Users/xiaodu/Desktop/udc2.js','r',encoding='utf-8') js_dt=fill.read() return js_dt if __name__ == '__main__': app.run()
zml1996/learn_record
myflask/file2link.py
file2link.py
py
307
python
en
code
2
github-code
6
[ { "api_name": "flask.Flask", "line_number": 6, "usage_type": "call" } ]
74180016507
import json poptab = [ x.split(', ') for x in open('country-list.csv').read().split('\n')] captab = [ x.split(', ') for x in open('country-capitals.csv').read().split('\n')] flgtab = [ x.split(', ') for x in open('country-flags.csv').read().split('\n')] curtab = [ x.split(', ') for x in open('country-currency.csv').read().split('\n')] popmap = dict([[x[1], { 'population': x[2], 'area': x[3], 'density': x[4]}] for x in poptab if len(x) > 1]) capmap = dict([[x[0], x[1:]] for x in captab if len(x) > 1]) curmap = dict([[x[0], { 'currency': x[1], 'code': x[2]}] for x in curtab if len(x) > 1]) flgmap = dict([[x[1], x[0]] for x in flgtab if len(x) > 1]) countries = [x[1] for x in poptab if len(x) > 1] res = [{ 'serial': i+1, 'name': countries[i], 'capitals': capmap[countries[i]], 'currency': curmap[countries[i]]['currency'], 'currency_code': curmap[countries[i]]['code'], 'population': popmap[countries[i]]['population'], 'area': popmap[countries[i]]['area'], 'population_density': popmap[countries[i]]['density'], 'flag': flgmap[countries[i]] } for i in range(len(countries))] print(json.dumps(res, indent=4))
GnsP/countries
assemble.py
assemble.py
py
1,156
python
en
code
0
github-code
6
[ { "api_name": "json.dumps", "line_number": 26, "usage_type": "call" } ]
73829652666
import asyncio import multiprocessing as mp import os import time from bs4 import BeautifulSoup from src.download_utils import download_page class DownloadRepository: def __init__(self, address: str): self.files = [] self.address = address self.queue = mp.Queue() self.process = mp.Process(target=self.start_download) async def start_collect(self, start_page: str) -> None: self.process.start() await self.search_repository(start_page) time.sleep(2) self.process.terminate() async def search_repository(self, url: str) -> None: raw_html = await download_page(self.address, url) is_file = self.check_page(raw_html) if not is_file: embedded_links = self.parse_html_code(raw_html) for link in embedded_links: await self.search_repository(link) def check_page(self, html_text: str) -> bool: soup = BeautifulSoup(html_text, 'lxml') tool_bar = soup.find( name='h4', attrs='file-header ui top attached header df ac sb', ) if tool_bar: tag = tool_bar.find('div', class_='ui buttons mr-2') if tag is None: return False link = tag.find('a', string='Raw').get('href') print(f'[+]Found file -> {self.address}{link}') self.queue.put(link) return True return False def parse_html_code(self, html_code: str) -> list[str]: soup = BeautifulSoup(html_code, 'lxml') links = [] if soup.tbody: for tag in soup.tbody.find_all('a'): link = tag.get('href') title = tag.get('title') if link and title: links.append(link) return links return [] def start_download(self): asyncio.run(self.save_file()) @staticmethod def create_temp_folder(): if not os.path.exists('repository'): os.mkdir('repository') @staticmethod def create_saving_directory(path: str) -> None: if not os.path.exists(path): os.makedirs(path) @staticmethod def extract_paths(link: str) -> tuple[str, str]: paths = link.split('/') project = paths[2] directory = '/'.join(paths[6:-1]) filename = paths[-1] folder = f'repository/{project}/{directory}' full_path = f'{folder}/{filename}' return full_path, folder async def save_file(self): """Download and save downloaded files to repository directory.""" self.create_temp_folder() while True: if not self.queue.empty(): link = self.queue.get() text = await download_page(self.address, link) full_path, directory = self.extract_paths(link) self.create_saving_directory(directory) with open(f'{full_path}', 'w') as repo_file: repo_file.write(text) print(f'[+]File saved in {full_path}')
FeltsAzn/TestTaskRadium
src/download_recursive.py
download_recursive.py
py
3,102
python
en
code
0
github-code
6
[ { "api_name": "multiprocessing.Queue", "line_number": 15, "usage_type": "call" }, { "api_name": "multiprocessing.Process", "line_number": 16, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 21, "usage_type": "call" }, { "api_name": "src.download...
33526996747
''' 测试预训练VGG文件,与CNN_DPL为同一预训练模型 ''' import torch as t import torch.optim as optim from torch.utils.data import Dataset, DataLoader import data from torchvision import transforms, datasets import model import warnings warnings.filterwarnings('ignore') batch_size = 128 test_dataset = datasets.ImageFolder(root='/data2/ci2p_user_data/kwei/Caltech101/test', transform=data.transform) test_loader = DataLoader(dataset=test_dataset, shuffle=True, batch_size=batch_size, ) device = t.device("cuda:1" if t.cuda.is_available() else "cpu") net = model.VGG_Test() net = net.to(device) def test(): net.eval() correct = 0 total = 0 with t.no_grad(): for data in test_loader: images, labels = data images = images.to(device) labels = labels.to(device) outputs = net(images) _, pred = t.max(outputs.data, dim=1) total += labels.size(0) correct += (pred == labels).sum().item() print('正确率:{}%'.format(100 * correct / total)) if __name__ == '__main__': test()
huifeidetiaotu96/CNN_DPL
test_VGG.py
test_VGG.py
py
1,266
python
en
code
0
github-code
6
[ { "api_name": "warnings.filterwarnings", "line_number": 12, "usage_type": "call" }, { "api_name": "torchvision.datasets.ImageFolder", "line_number": 15, "usage_type": "call" }, { "api_name": "torchvision.datasets", "line_number": 15, "usage_type": "name" }, { "api...
27215468512
# # Dependency for album cover download # from urllib import request # # thumbnail_path = "https://i.ytimg.com/vi/uijHb5U1pD8/default.jpg" # album_cover = request.urlretrieve(thumbnail_path, "albumcover.jpg") import requests f = open('image.jpg', 'wb') f.write(requests.get('https://i.ytimg.com/vi/uijHb5U1pD8/default.jpg').content) f.close()
iostate/PythonProjects
DownloadMP4/open_image.py
open_image.py
py
343
python
en
code
0
github-code
6
[ { "api_name": "requests.get", "line_number": 9, "usage_type": "call" } ]
44904287123
import torch from experiment import Experiment, AutoDateSet, train, get_args from model.Tree import Tree from dataset.criteo import CriteoDataset def get_model(args): model = Tree(args) return model def get_dataset(args): dataset = CriteoDataset(dataset_path=args.dataset_paths[0]) train_length = int(len(dataset) * 0.9) valid_length = len(dataset) - train_length train_dataset, valid_dataset = torch.utils.data.random_split(dataset, (train_length, valid_length)) test_dataset = CriteoDataset(dataset_path=args.dataset_paths[1]) return [train_dataset, valid_dataset, test_dataset], dataset.field_dims def main(): args = get_args() args.hidden= [1024,512,256,128,64] args.embed_dim = 16 args.activation='relu' args.label='y' args.num_class = 2 args.dataset_paths= ['data/criteo/train.txt', 'data/criteo/test.txt'] datasets, field_dims= get_dataset(args) print(field_dims) args.field_dims = field_dims model = get_model(args) experiment = Experiment(model=model, args=args) data = AutoDateSet(datasets, args.batch_size, args.batch_size, args.num_workers, args.pin_memory) train(args, experiment, data) if __name__ == "__main__": main()
Jarlene/Experiment
tree_main.py
tree_main.py
py
1,237
python
en
code
0
github-code
6
[ { "api_name": "model.Tree", "line_number": 10, "usage_type": "name" }, { "api_name": "model.Tree.Tree", "line_number": 10, "usage_type": "call" }, { "api_name": "model.Tree", "line_number": 11, "usage_type": "name" }, { "api_name": "dataset.criteo", "line_numb...
39991774220
import os from glob import glob from pathlib import Path SOURCE_DIRECTORY = "CSVdatabases" TARGET_DIRECTORY = "cleaned_databases" CURRENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) DATABASES_FOLDER = f"{CURRENT_DIRECTORY}/{SOURCE_DIRECTORY}" CLEANED_FOLDER = f"{CURRENT_DIRECTORY}/{TARGET_DIRECTORY}" def clean_file_contents(contents: list[str]): return contents[4:-2] def clean_all_databases(): database_files = glob(f"{DATABASES_FOLDER}/**/*.csv") for raw_data_file in database_files: target_directory = os.path.dirname(raw_data_file).replace( SOURCE_DIRECTORY, TARGET_DIRECTORY) target_file = raw_data_file.replace(SOURCE_DIRECTORY, TARGET_DIRECTORY) if not os.path.exists(target_directory): os.makedirs(target_directory) with open(raw_data_file, 'r') as source: with open(target_file, 'w') as destination: destination.writelines(clean_file_contents(source.readlines())) def remove_empty_databases(): cleaned_data_files = glob(f"{CLEANED_FOLDER}/**/*.csv") for cleaned_data_file in cleaned_data_files: delete = False with open(cleaned_data_file, 'r') as f: if len(f.readlines()) < 2: delete = True if delete: Path(cleaned_data_file).unlink() if __name__ == '__main__': clean_all_databases() remove_empty_databases()
DekunZhang/UCL_IFRC_IXN_PUBLIC
DesInventar/data_cleaner.py
data_cleaner.py
py
1,417
python
en
code
0
github-code
6
[ { "api_name": "os.path.dirname", "line_number": 7, "usage_type": "call" }, { "api_name": "os.path", "line_number": 7, "usage_type": "attribute" }, { "api_name": "os.path.realpath", "line_number": 7, "usage_type": "call" }, { "api_name": "glob.glob", "line_numb...
44900991873
import torch import torch.nn as nn from argparse import Namespace from layers.Layers import DualAttenion, Transpose class CARD(nn.Module): def __init__(self, args: Namespace) -> None: super(CARD, self).__init__() self.patch_len = args.patch_len self.stride = args.stride self.embed_dim = args.embed_dim self.task_name = args.task_name patch_num = int((args.seq_len - self.patch_len)/self.stride + 1) self.patch_num = patch_num self.pos_embed = nn.Parameter( torch.randn(patch_num, args.embed_dim)*1e-2) self.total_token_number = self.patch_num + 1 args.total_token_number = self.total_token_number # embeding layer related self.input_projection = nn.Linear(self.patch_len, args.embed_dim) self.input_dropout = nn.Dropout(args.dropout) self.cls = nn.Parameter(torch.randn(1, args.embed_dim)*1e-2) # mlp decoder self.out_proj = nn.Linear( (patch_num+1+self.model_token_number)*args.embed_dim, args.pred_len) # dual attention encoder related self.Attentions_over_token = nn.ModuleList( [DualAttenion(args) for i in range(args.hiden_layer_num)]) self.Attentions_over_channel = nn.ModuleList( [DualAttenion(args, over_channel=True) for i in range(args.hiden_layer_num)]) self.Attentions_mlp = nn.ModuleList( [nn.Linear(args.embed_dim, args.embed_dim) for i in range(args.hiden_layer_num)]) self.Attentions_dropout = nn.ModuleList( [nn.Dropout(args.dropout) for i in range(args.hiden_layer_num)]) self.Attentions_norm = nn.ModuleList([nn.Sequential(Transpose(1, 2), nn.BatchNorm1d(args.embed_dim, momentum=args.momentum), Transpose(1, 2)) for i in range(args.hiden_layer_num)]) def forward(self, z: torch.Tensor): b, c, s = z.shape # inputs nomralization z_mean = torch.mean(z, dim=(-1), keepdims=True) z_std = torch.std(z, dim=(-1), keepdims=True) z = (z - z_mean)/(z_std + 1e-4) # tokenization zcube = z.unfold(dimension=-1, size=self.patch_len, step=self.stride) z_embed = self.input_dropout( self.input_projection(zcube)) + self.pos_embed cls_token = self.cls.repeat(z_embed.shape[0], z_embed.shape[1], 1, 1) z_embed = torch.cat((cls_token, z_embed), dim=-2) # dual attention encoder inputs = z_embed b, c, t, h = inputs.shape for a_2, a_1, mlp, drop, norm in zip(self.Attentions_over_token, self.Attentions_over_channel, self. Attentions_mlp, self.Attentions_dropout, self.Attentions_norm): output_1 = a_1(inputs.permute(0, 2, 1, 3)).permute(0, 2, 1, 3) output_2 = a_2(output_1) outputs = drop(mlp(output_1+output_2))+inputs outputs = norm(outputs.reshape(b*c, t, -1)).reshape(b, c, t, -1) inputs = outputs # mlp decoder z_out = self.out_proj(outputs.reshape(b, c, -1)) # denomrlaization z = z_out * (z_std+1e-4) + z_mean return z
Jarlene/AlgoRepo
models/ts/card.py
card.py
py
3,357
python
en
code
0
github-code
6
[ { "api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 8, "usage_type": "name" }, { "api_name": "argparse.Namespace", "line_number": 9, "usage_type": "name" }, { "api_name": "torch.nn.Parameter", ...
10917250777
from collections import namedtuple from backend.DBEntry import DBEntry, to_db_obj_name from backend.Ingredient import Ingredient class Recipe(DBEntry): """A recipe of a dish. Consists of ingredients with optional amount (in optional units).""" table_main = "recipes" associations = [ ("user_meals","recipe_id", False), ("recipe_contents","recipe_id", True) ] def __init__(self, name, instructions="", contents=set(), db=None, id=None): """Constructor. Returns functional object. :param name: Inherits from DBEntry. :param instructions: A string, instructios on how to cook the dish. :param contents: A set of Content namedtuples - ingredients and their amounts that comprise the dish. :param db: Inherits from DBEntry. :param id: Inherits from DBEntry. """ super().__init__(name=name, db=db, id=id) self.instructions = instructions self.contents = contents @classmethod def from_db(cls, db, id=None, name=None): """Search db for recipe entry by id or name (in that priority) and return constructed object. Returns None if id is not found. """ recipe = super().from_db(db=db, id=id, name=name) if not recipe: return None id = recipe.id # Constructing contents set needle = (id,) db.c.execute( 'SELECT ingredient_id, amount, units FROM recipe_contents ' 'WHERE recipe_id = ?', needle ) rows = db.c.fetchall() contents = set() for row in rows: ingredient = Ingredient.from_db(db, row["ingredient_id"]) content = Content(ingredient, row["amount"], row["units"]) contents.add(content) recipe.contents = contents return recipe def new_to_db(self): """Write a new recipe entry to the DB. Return id assigned by the DB.""" table_main = to_db_obj_name(self.table_main) # Inserting values to the main table recipe = (self.name, self.instructions) self.db.c.execute(f'INSERT INTO "{table_main}" (name, instructions) VALUES (?, ?)', recipe) # Remembering id assigned by the DB new_row_id = (self.db.c.lastrowid,) self.db.c.execute(f'SELECT id FROM "{table_main}" WHERE rowid = ?', new_row_id) row = self.db.c.fetchone() id = row["id"] # inserting contents to the associative table contents = {(id, c.ingredient.id, c.amount, c.units) for c in self.contents} self.db.c.executemany( 'INSERT INTO recipe_contents (recipe_id, ingredient_id, amount, units) ' ' VALUES (?, ?, ?, ?)', contents ) return id def edit_in_db(self): """Edit existing DB recipe to match current object state. Return number of affected rows. """ table_main = to_db_obj_name(self.table_main) rows_affected = 0 # Updating values to the main table recipe = (self.name, self.instructions, self.id) self.db.c.execute(f'UPDATE "{table_main}" SET name = ?, instructions = ? WHERE id = ?', recipe) rows_affected += self.db.c.rowcount # Constructing sets of the recipe's old and new contents' ingredient ids new_ingredient_ids = {c.ingredient.id for c in self.contents} needle = (self.id,) old_contents = self.db.c.execute('SELECT ingredient_id as id FROM recipe_contents WHERE ' 'recipe_id = ?', needle).fetchall() old_ingredient_ids = {c["id"] for c in old_contents} # Removing contents missing in the new set to_remove = {(self.id, i_id) for i_id in old_ingredient_ids - new_ingredient_ids} self.db.c.executemany('DELETE FROM recipe_contents WHERE recipe_id = ? AND ingredient_id = ?', to_remove) rows_affected += self.db.c.rowcount # Adding contents missing in the old set new_contents = {c for c in self.contents if c.ingredient.id in new_ingredient_ids - old_ingredient_ids} to_add = {(self.id, c.ingredient.id, c.amount, c.units) for c in new_contents} self.db.c.executemany( 'INSERT INTO recipe_contents (recipe_id, ingredient_id, amount, units) ' ' VALUES (?, ?, ?, ?)', to_add ) rows_affected += self.db.c.rowcount # Updating contents present in both the old and the new sets updated_contents = self.contents - new_contents to_update = {(c.amount, c.units, self.id, c.ingredient.id) for c in updated_contents} self.db.c.executemany( 'UPDATE recipe_contents SET amount = ?, units = ? ' ' WHERE recipe_id = ? AND ingredient_id = ?', to_update ) return rows_affected @classmethod def get_summary(cls, db, name_sort=False): """"Return summary table for Recipe objects in DB as dictionary list. id: recipe db_id. name: recipe name. instructions: instructions on how to prepare the dish. contents: list of contents (ingredient name, amount, units). dependents: number of other class entries referencing this id as a foreign key. param name_sort: A boolean. If True, summary will be recursively sorted by object name ascending. """ summary = [] # Get main table data db.c.execute('SELECT id, name, instructions ' 'FROM recipes ' 'ORDER BY id ASC' ) for db_row in db.c.fetchall(): row = {x: y for x, y in zip(db_row.keys(), db_row)} summary.append(row) # Get content lists db.c.execute( 'SELECT recipe_contents.recipe_id, ingredients.name as ingredient, ' ' recipe_contents.amount, recipe_contents.units ' 'FROM recipe_contents ' 'LEFT JOIN ingredients ON recipe_contents.ingredient_id = ingredients.id ' 'ORDER BY recipe_id ASC' ) db_rows = db.c.fetchall() if db_rows: it_summary = iter(summary) s_row = next(it_summary) for db_row in db_rows: while not db_row["recipe_id"] == s_row["id"]: # Ensure at least an empty 'cell' exists for this recipe before moving to next try: s_row["contents"] except KeyError: s_row["contents"] = [] s_row = next(it_summary) content = { "ingredient": db_row["ingredient"], "amount" : db_row["amount"], "units" : db_row["units"], } try: s_row["contents"].append(content) except KeyError: s_row["contents"] = [content] # Fill remaining rows with empty content lists finished = False while not finished: try: s_row = next(it_summary) s_row["contents"] = [] except StopIteration: finished = True # Get dependents db.c.execute( 'SELECT recipe_id, COUNT(user_id) as dependents FROM user_meals ' 'GROUP BY recipe_id ' 'ORDER BY recipe_id ASC' ) db_rows = db.c.fetchall() if db_rows: it_summary = iter(summary) s_row = next(it_summary) for db_row in db_rows: while not db_row["recipe_id"] == s_row["id"]: # Set dependents = 0 for ingredients that don't exist in recipe_contents table try: s_row["dependents"] except KeyError: s_row["dependents"] = 0 s_row = next(it_summary) s_row["dependents"] = db_row["dependents"] # Fill remaining rows with dependents = 0 finished = False while not finished: try: s_row = next(it_summary) s_row["dependents"] = 0 except StopIteration: finished = True if name_sort: summary.sort(key=lambda x: x["name"].lower()) for row in summary: try: row["contents"].sort(key=lambda x: x["ingredient"].lower()) except KeyError: pass return summary def toJSONifiable(self): dct = super().toJSONifiable() contents_list = [item._asdict() for item in self.contents] dct["contents"] = contents_list return dct # constructor validation from kindall"s answer at # https://stackoverflow.com/a/42146452 ContentTuple = namedtuple("ContentTuple", "ingredient amount units") class Content(ContentTuple): """Represents quantity or amount of a specific ingredient in a dish""" __slots__ = () def __new__(cls, ingredient, amount, units=None): try: if not amount >= 0: raise ValueError("amount must be a positive number") except TypeError: raise RuntimeError("amount must be a positive number") return ContentTuple.__new__(cls, ingredient, amount, units) def _replace(self, **kwargs): try: if not kwargs["amount"] >= 0: raise ValueError("amount must be a positive number") except ValueError: raise RuntimeError("amount must be a positive number") except KeyError: pass return super()._replace(**kwargs) def toJSONifiable(self): return self._asdict()
Longneko/demo_food_app
backend/Recipe.py
Recipe.py
py
10,035
python
en
code
0
github-code
6
[ { "api_name": "backend.DBEntry.DBEntry", "line_number": 7, "usage_type": "name" }, { "api_name": "backend.Ingredient.Ingredient.from_db", "line_number": 50, "usage_type": "call" }, { "api_name": "backend.Ingredient.Ingredient", "line_number": 50, "usage_type": "name" },...
7035699251
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Feb 21 13:24:47 2018 @author: kausic """ import os import cv2 import numpy as np from tqdm import tqdm root="/home/kausic/Desktop/My_research/dataset/sunrgbd/SUNRGBD" save_location="/home/kausic/ASU_MS/SML/project/sunrgbd_images/" data_file=open(save_location+"data.txt",'w') count=0 for dirn,subn,fileList in tqdm(os.walk(root,True),desc="Files recorded"): if('annotation' in subn): #print (subn) (_,__,files)=os.walk(dirn +'/image/').__next__() image_path=dirn+'/image/'+files[0] (_,__,files)=os.walk(dirn +'/depth/').__next__() depth_path=dirn+'/depth/'+files[0] scene_file=open(dirn+'/scene.txt') scene=scene_file.read() scene_file.close() rgb_img=cv2.imread(image_path) depth_img=cv2.imread(depth_path) if rgb_img is None or depth_img is None: continue final_string="img_{0:05d} ".format(count) img_name="rgb_img_{0:05d}.jpg".format(count) depth_name="depth_img_{0:05d}.jpg".format(count) final_string+=scene data_file.write(final_string+'\n') cv2.imwrite(save_location+img_name,rgb_img) cv2.imwrite(save_location+depth_name,depth_img) count+=1 data_file.close()
Bharathgc/Evaluating-Fusion-points-for-multi-stream-networks-handling-cross-modal-data
data_extractor_script.py
data_extractor_script.py
py
1,306
python
en
code
11
github-code
6
[ { "api_name": "tqdm.tqdm", "line_number": 19, "usage_type": "call" }, { "api_name": "os.walk", "line_number": 19, "usage_type": "call" }, { "api_name": "os.walk", "line_number": 22, "usage_type": "call" }, { "api_name": "os.walk", "line_number": 24, "usage...
31454964996
from typing import Any from django.db.models import ( Model, CharField, ForeignKey, BooleanField, ManyToManyField, UniqueConstraint, DateTimeField, CASCADE, ) from abstracts.models import AbstractDateTime from subjectss.models import Topic from subjectss.models import Student class QuizType(AbstractDateTime): SUBJECT_QUIZ_TYPE = 1 TOPIC_QUIZ_TYPE = 2 CLASS_QUIZ_TYPE = 3 QUIZ_NAME_LIMIT = 100 name: CharField = CharField( max_length=QUIZ_NAME_LIMIT, unique=True, db_index=True, verbose_name="Наименование типа теста" ) class Meta: verbose_name: str = "Тип теста" verbose_name_plural: str = "Типы тестов" ordering: tuple[str] = ("-datetime_updated",) def __str__(self) -> str: return self.name class Question(AbstractDateTime): TEST_NAME_LIMIT = 240 name: CharField = CharField( max_length=TEST_NAME_LIMIT, unique=True, db_index=True, verbose_name="Наименование" ) attached_subject_class: Topic = ForeignKey( to=Topic, on_delete=CASCADE, related_name="questions", verbose_name="Вопрос к теме" ) class Meta: verbose_name_plural: str = "Вопросы" verbose_name: str = "Вопрос" ordering: tuple[str] = ("-datetime_updated",) def __str__(self) -> str: return self.name class Answer(AbstractDateTime): ANSWER_NAME_LIMIT = 250 name: CharField = CharField( max_length=ANSWER_NAME_LIMIT, verbose_name="Ответ" ) question: Question = ForeignKey( to=Question, on_delete=CASCADE, related_name="answers", verbose_name="Ответ к вопросу" ) is_correct: BooleanField = BooleanField( default=False, verbose_name="Правильный ответ?" ) class Meta: verbose_name_plural: str = "Ответы" verbose_name: str = "Ответ" ordering: tuple[str] = ("-datetime_updated",) constraints: tuple[Any] = ( UniqueConstraint( fields=['name', 'question'], name="unique_asnwer_name_question" ), ) def __str__(self) -> str: return self.name class Quiz(Model): QUIZ_MAX_NAME = 250 name: CharField = CharField( max_length=QUIZ_MAX_NAME, verbose_name="Название теста" ) student: Student = ForeignKey( to=Student, on_delete=CASCADE, related_name="subject_quizes", verbose_name="Зарегестрированный стедент" ) quiz_type: QuizType = ForeignKey( to=QuizType, on_delete=CASCADE, related_name="quizes", verbose_name="Тип куиза" ) questions: ManyToManyField = ManyToManyField( to=Question, through="QuizQuestionAnswer", through_fields=["quiz", "question"], verbose_name="Вопросы на теста" ) datetime_created: DateTimeField = DateTimeField( verbose_name="время и дата создания", auto_now_add=True ) attached_questions: ManyToManyField = ManyToManyField( to=Question, blank=True, related_name="quizess", verbose_name="Прикрепленные вопросы теста (для чтения)" ) class Meta: verbose_name: str = "Тест" verbose_name_plural: str = "Тесты" def __str__(self) -> str: return f"Студент: '{self.student}' Тип теста: '{self.quiz_type}'" class QuizQuestionAnswer(Model): quiz: Quiz = ForeignKey( to=Quiz, on_delete=CASCADE, related_name="quiz_questions", verbose_name="Тест" ) question: Question = ForeignKey( to=Question, on_delete=CASCADE, related_name="quiz_questions", verbose_name="Вопрос" ) user_answer: Answer = ForeignKey( to=Answer, on_delete=CASCADE, related_name="user_answer", verbose_name="Ответ пользователя" ) class Meta: verbose_name: str = "Ответ на вопрос теста" verbose_name_plural: str = "Ответы на вопросы тестов" constraints: tuple[Any] = ( UniqueConstraint( fields=['quiz', 'question'], name="unique_quiz_question" ), ) def __str__(self) -> str: return f"{self.quiz} {self.question} {self.user_answer}"
NNaraB/education_site_back
apps/tests/models.py
models.py
py
4,723
python
en
code
0
github-code
6
[ { "api_name": "abstracts.models.AbstractDateTime", "line_number": 19, "usage_type": "name" }, { "api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "name" }, { "api_name": "abstracts.models.AbstractDateTime", "line_number": 40, "usage_type": "name" ...
27132094648
import logging from operations.operation_manager import Operation from operations import * logging.config.fileConfig('/opt/TopPatch/conf/logging.config') logger = logging.getLogger('rvapi') #process that data!! def process_queue_data(rqueue, queue_exists, agent_id, username, customer_name, uri, method): if queue_exists: agent_queue = rqueue.get_all_objects_in_queue() for operation in agent_queue: if operation.get(OperationKey.OperationId): oper = ( Operation(username, customer_name, uri, method) ) oper.update_operation_pickup_time( operation[OperationKey.OperationId], agent_id, CHECKIN ) return agent_queue else: return([])
SteelHouseLabs/vFense
tp/src/receiver/corehandler.py
corehandler.py
py
837
python
en
code
5
github-code
6
[ { "api_name": "logging.config.fileConfig", "line_number": 6, "usage_type": "call" }, { "api_name": "logging.config", "line_number": 6, "usage_type": "attribute" }, { "api_name": "logging.getLogger", "line_number": 7, "usage_type": "call" }, { "api_name": "operatio...
16233043242
from flask import Flask, url_for, request, json, session from flask_cors import CORS from flask_sqlalchemy import SQLAlchemy import random from passlib.hash import sha256_crypt app = Flask(__name__) app.secret_key = "alon" app.config.from_object(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///site.db" app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False CORS(app) db = SQLAlchemy(app) def generate_pin(digits): pin = "" for k in range(digits): pin += str(random.randint(0, 9)) return pin class User(db.Model): __tablename__ = "user" id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(100), unique=True, nullable=False) password = db.Column(db.String(100), nullable=False) quizzes_done = db.Column(db.Integer, default=0) correct_answers = db.Column(db.Integer, default=0) # relationship with Quiz quizzes = db.relationship("Quiz", backref="author", lazy=True, cascade="all, delete-orphan") class Quiz(db.Model): __tablename__ = "quiz" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(100), default=f"Quiz{id}") pin = db.Column(db.String(8), unique=True) published = db.Column(db.Boolean, default=False, nullable=False) # connection with user user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False) # relationship with ChoiceQuestion choice_questions = db.relationship("ChoiceQuestion", backref="author", lazy=True, cascade="all, delete-orphan") # relationship with Status statuses = db.relationship("Status", backref="author", lazy=True, cascade="all, delete-orphan") def __repr__(self): return f"{self.name}, pin={self.pin}" def get_json(self, with_answers=False): my_json = {"name": self.name, "pin": self.pin, "published": self.published, "choice_questions": []} for choice_question in self.choice_questions: my_json["choice_questions"].append(choice_question.get_json(with_answers)) return my_json def get_statuses_json(self): statuses_json = [] for status in self.statuses: statuses_json.append(status.get_json()) return statuses_json class ChoiceQuestion(db.Model): __tablename__ = "choice_question" id = db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer) question = db.Column(db.String(120)) # connection to quiz quiz_id = db.Column(db.Integer, db.ForeignKey("quiz.id"), nullable=False) # relationship with choice choices = db.relationship("Choice", backref="author", lazy=True, cascade="all, delete-orphan") def __repr__(self): string = f"Q{self.number}" string += f"\nquestion: {self.question}" for choice in self.choices: string += "\n" + choice.__repr__() return string def get_json(self, with_answers=False): my_json = {"number": self.number, "question": self.question, "choices": []} for choice in self.choices: my_json["choices"].append(choice.get_json(with_answers)) return my_json class Choice(db.Model): __tablename__ = "choice" id = db.Column(db.Integer, primary_key=True) text = db.Column(db.String(120)) correct = db.Column(db.Boolean) # connection to question choice_question_id = db.Column(db.Integer, db.ForeignKey("choice_question.id"), nullable=False) def __repr__(self): return f"text={self.text}, correct={self.correct}" def get_json(self, with_answers=False): if not with_answers: return {"text": self.text} else: return {"text": self.text, "correct": self.correct} class Status(db.Model): __tablename__ = "status" id = db.Column(db.Integer, primary_key=True) grade = db.Column(db.Integer) amount_played = db.Column(db.Integer, default=1) user_id = db.Column(db.Integer, nullable=False) # connection to quiz quiz_id = db.Column(db.Integer, db.ForeignKey("quiz.id"), nullable=False) def get_json(self): user = User.query.filter_by(id=self.user_id).first() return {"username": user.username, "grade": self.grade, "amount": self.amount_played} # create a new user. Return true if created, false otherwise @app.route("/user/signup", methods=["POST"]) def sign_up(): response = request.get_json() username = response["username"] password = response["password"] hashed_password = sha256_crypt.encrypt(password) user = User.query.filter_by(username=username).first() if user is not None: return {"created": "false"} new_user = User(username=username, password=hashed_password) db.session.add(new_user) db.session.commit() return {"created": "true"} # try to login. Return user id if able to login @app.route("/user/login", methods=["GET"]) def login(): data = json.loads(request.args.get("data")) username = data["username"] password = data["password"] user = User.query.filter_by(username=username).first() if user is None or not sha256_crypt.verify(password, user.password): return {"user_id": "None"} return {"user_id": user.id} # get user id. Returns stats of user with this id @app.route("/home/userinfo", methods=["GET"]) def user_info(): user_id = json.loads(request.args.get("data")) user = User.query.filter_by(id=user_id).first() if user is None: return {"found": "false"} return_data = {"username": user.username, "quizzes_done": user.quizzes_done, "correct_answers": user.correct_answers, "quizzes_made": len(user.quizzes), "found": "true" } return return_data # create a new quiz for user with given id. Returns the pin. @app.route("/create/newQuiz", methods=["GET"]) def new_quiz(): user_id = json.loads(request.args.get("data")) pin = generate_pin(8) while Quiz.query.filter_by(pin=pin).first() is not None: pin = generate_pin(8) quiz = Quiz(name="MyQuiz", pin=pin, user_id=user_id) db.session.add(quiz) db.session.commit() return {"pin": pin} # get current state of quiz questions and update quiz accordingly for user. @app.route("/create/postQuestions", methods=["POST"]) def post_questions(): response = request.get_json()["quiz"] user_id = request.get_json()["user_id"] pin = response["pin"] quiz = Quiz.query.filter_by(pin=pin, user_id=user_id).first() if quiz is None or quiz.published: return {"posted": "false"} questions = response["questions"] quiz.name = response["name"] quiz.choice_questions = [] for question in questions: if question["type"] == "ChoiceQuestion": number = question["number"] question_text = question["question"] question_db = ChoiceQuestion(number=number, question=question_text, quiz_id=quiz.id) db.session.add(question_db) db.session.commit() for choice in question["choices"]: text = choice["text"] correct = choice["correct"] choice_db = Choice(text=text, correct=correct, choice_question_id=question_db.id) db.session.add(choice_db) db.session.commit() return {"posted": "true"} # publish quiz of certain pin, allowing others to play it. @app.route("/create/publishQuiz", methods=["POST"]) def publish_quiz(): pin = request.get_json()["pin"] user_id = request.get_json()["user_id"] quiz = Quiz.query.filter_by(pin=pin, user_id=user_id).first() if quiz is None: return {"published": "false"} quiz.published = True db.session.commit() return {"published": "true"} # get pin of quiz and return whether a quiz with that pin exists and was published @app.route("/enterPin/quizExists", methods=["GET"]) def quiz_exists(): data = json.loads(request.args.get("data")) pin = data["pin"] quiz = Quiz.query.filter_by(pin=pin).first() if (quiz is None) or (not quiz.published): return {"exists": "false", "pin": pin} return {"exists": "true", "pin": pin} # get pin and return a published quiz with that pin @app.route("/play/getQuiz", methods=["GET"]) def get_quiz(): data = json.loads(request.args.get("data")) pin = data["pin"] quiz = Quiz.query.filter_by(pin=pin).first() if (quiz is None) or (not quiz.published): return {"exists": "false", "pin": pin} return quiz.get_json() # gets pin of quiz, user and what player answered. Returns number of questions he got right. # also updates user stats accordingly @app.route("/play/correctAnswers", methods=["POST"]) def correct_answers(): response = request.get_json()["quiz"] user_id = request.get_json()["user_id"] pin = response["pin"] quiz = Quiz.query.filter_by(pin=pin).first() if (quiz is None) or (not quiz.published): return {"error": "cannot play quiz"} correct = 0 # go over each question sent for question in response["questions"]: if question["type"] == "ChoiceQuestion": is_correct = True # find matching question in quiz number = question["number"] question_text = question["question"] quiz_question = ChoiceQuestion.query.filter_by(quiz_id=quiz.id, number=number, question=question_text).first() # go over each choice in question sent for choice in question["choices"]: # find matching choice in question text = choice["text"] question_choice = Choice.query.filter_by(text=text, choice_question_id=quiz_question.id).first() if choice["correct"] != question_choice.correct: is_correct = False break if is_correct: correct += 1 user = User.query.filter_by(id=user_id).first() grade = (correct * 100) / len(quiz.choice_questions) # update user stats user.correct_answers += correct user.quizzes_done += 1 # create new status status = Status.query.filter_by(user_id=user_id, quiz_id=quiz.id).first() if status is None: new_status = Status(grade=grade, user_id=user_id, quiz_id=quiz.id) db.session.add(new_status) else: status.amount_played += 1 status.grade = max(grade, status.grade) db.session.commit() return {"correctAnswers": correct} # gets quiz pin and returns all user statuses for that quiz @app.route("/leaderboard/getStatuses", methods=["GET"]) def get_statuses(): data = json.loads(request.args.get("data")) pin = data["pin"] quiz = Quiz.query.filter_by(published=True, pin=pin).first() if quiz is None: return {"found": "false"} return {"found": "true", "statuses": quiz.get_statuses_json()} # get list of all quizzes this user has creates and not published @app.route("/edit/getUserQuizzes", methods=["GET"]) def get_user_quizzes(): data = json.loads(request.args.get("data")) user = User.query.filter_by(id=data).first() json_to_return = [] for quiz in user.quizzes: if not quiz.published: json_to_return.append(quiz.get_json()) return {"quizzes": json_to_return} # get quiz with answers for user to edit @app.route("/create/getQuizWithAnswers", methods=["GET"]) def get_quiz_with_answers(): data = json.loads(request.args.get("data")) user_id = data["user_id"] pin = data["pin"] quiz = Quiz.query.filter_by(user_id=user_id, pin=pin, published=False).first() return {"quiz": quiz.get_json(True)} # delete quiz of certain pin created by user @app.route("/edit/deleteQuiz", methods=["GET"]) def delete_quiz(): data = json.loads(request.args.get("data")) user_id = data["user_id"] pin = data["pin"] quiz = Quiz.query.filter_by(user_id=user_id, pin=pin, published=False).first() if quiz is not None: db.session.delete(quiz) db.session.commit() return {"deleted": "true", "pin": pin} return {"deleted": "false", "pin": pin} if __name__ == "__main__": app.run() db.create_all()
nivschuman/QuizServer
main.py
main.py
py
12,314
python
en
code
0
github-code
6
[ { "api_name": "flask.Flask", "line_number": 8, "usage_type": "call" }, { "api_name": "flask_cors.CORS", "line_number": 15, "usage_type": "call" }, { "api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 18, "usage_type": "call" }, { "api_name": "random.randint...
42104820373
from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession class Reporter(object): def __init__(self, project, src_files, dst_table, keyfile, config): self.project = project self.src_files = src_files self.dst_table = dst_table self.keyfile = keyfile self.driver = config.get('mysql', 'driver') self.url = config.get('mysql', 'url') self.user = config.get('mysql', 'user') self.password = config.get('mysql', 'password') self.jar_dir = config.get('environment', 'jar_dir') self.submit_host = config.get('environment', 'submit_host') self.python_lib = config.get('environment', 'python_lib') self.python_files = self.get_list(config.get('environment', 'python_files')) def get_list(self, csv): raw_list = csv.split(',') stripped_list = [v.strip() for v in raw_list] return stripped_list def get_context(self, app_name, project, keyfile, submit_host, python_lib, python_files): # generate environment variables full_path_list = ['file:{python_lib}/{file}'.format(python_lib=python_lib, file=file) for file in python_files] full_paths = ','.join(full_path_list) python_path = ':'.join(python_files) # cluster execution conf = SparkConf() \ .setMaster(submit_host) \ .setAppName(app_name) \ .set('spark.yarn.dist.files','{full_paths}'.format(full_paths=full_paths)) \ .setExecutorEnv('PYTHONPATH','{python_path}'.format(python_path=python_path)) \ context = SparkContext(conf=conf) # Setup gcs Hadoop Configurations programmatically # Require Google Service account context._jsc.hadoopConfiguration().set("fs.gs.impl", "com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystem") context._jsc.hadoopConfiguration().set("fs.gs.project.id", project) context._jsc.hadoopConfiguration().set("google.cloud.auth.service.account.enable", "true") context._jsc.hadoopConfiguration().set("google.cloud.auth.service.account.json.keyfile", keyfile) return context def get_session(self, context): session = SparkSession.builder \ .config(conf=context.getConf()) \ .getOrCreate() return session def get_src_df(self, session, src_files, type='json'): if type == 'json' : df = session.read.format("json") \ .option("header", "false") \ .option("inferSchema", "true") \ .load(src_files) else : df = session.read.format("csv") \ .option("delimiter", type) \ .option("header", "false") \ .load(src_files) return df def write_df_to_mysql(self, df): df.write.format('jdbc').options( url=self.url, driver=self.driver, dbtable=self.dst_table, user=self.user, password=self.password).mode('append').save()
ubermen/anomaly_detector
estimator/reporter/engines.py
engines.py
py
2,795
python
en
code
1
github-code
6
[ { "api_name": "pyspark.SparkConf", "line_number": 37, "usage_type": "call" }, { "api_name": "pyspark.SparkContext", "line_number": 43, "usage_type": "call" }, { "api_name": "pyspark.sql.SparkSession.builder.config", "line_number": 56, "usage_type": "call" }, { "ap...
28204866764
from automata.Automata import Automata from tokenizer.Tokenizer import tokenizerSimple ''' Leemos los datos necesarios para empezar a procesar el automata. Tokenizamos las entradas para obtenerlas en forma de array. ''' simbolos_entrada = tokenizerSimple(input("Simbolos de entrada >> ")) estados = tokenizerSimple(input("Estados >> ")) estado_inicial = tokenizerSimple(input("Estado inicial >> ")) estados_aceptacion = tokenizerSimple(input("Estados de Aceptacion >> ")) # Creamos una nueva instancia de tipo Automata. automata = Automata(simbolos_entrada, estados, estado_inicial, estados_aceptacion) #print("Entrada -->", automata.simbolos_entrada) #print("Estados -->", automata.estados) #print("Inicial -->", automata.estado_inicial) #print("Aceptacion -->", automata.estados_aceptacion)
ephelsa/Teoria_Lenguaje
1. Automata/proyecto/__init__.py
__init__.py
py
799
python
es
code
0
github-code
6
[ { "api_name": "tokenizer.Tokenizer.tokenizerSimple", "line_number": 9, "usage_type": "call" }, { "api_name": "tokenizer.Tokenizer.tokenizerSimple", "line_number": 10, "usage_type": "call" }, { "api_name": "tokenizer.Tokenizer.tokenizerSimple", "line_number": 11, "usage_ty...
39861129033
"""Imports xml statements from privatbank, optained via p24-cli. See https://github.com/dimboknv/p24-cli The xml format is identical to that of p24 merchant api. """ import datetime from xml.etree import ElementTree as ET import beangulp import dateutil.parser from beancount.core import data, flags from beancount.core.number import D from uabean.importers.mixins import IdentifyMixin class Importer(IdentifyMixin, beangulp.Importer): FLAG = flags.FLAG_OKAY matchers = [ ("content", "<statements status="), ("mime", "application/xml"), ] unknown_account = "Assets:Unknown" def __init__( self, card_to_account_map, *args, fee_account="Expenses:Fees:Privatbank", **kwargs ): self.card_to_account_map = card_to_account_map self.fee_account = fee_account super().__init__(*args, **kwargs) def date_from_elem(self, elem): return dateutil.parser.parse(" ".join(elem.attrib["trandate"].split(" ")[:2])) def account(self, _): return "privatbank" def extract(self, filename, existing_entries=None): entries = [] tree = ET.parse(filename) root = tree.getroot() assert root.tag == "statements" max_date = None max_elem = None for elem in root: meta = data.new_metadata(filename, 0) entries.append(self.entry_from_elem(meta, elem)) date = self.date_from_elem(elem) if max_date is None or date > max_date: max_date = date max_elem = elem if max_elem is not None: rest_num, rest_currency = max_elem.attrib["rest"].split(" ", 1) amount = data.Amount(D(rest_num), rest_currency) entries.append( data.Balance( data.new_metadata(filename, 0), max_date.date() + datetime.timedelta(days=1), self.card_to_account_map[max_elem.attrib["card"]], amount, None, None, ) ) return entries def entry_from_elem(self, meta, elem): dt = self.date_from_elem(elem) meta["time"] = dt.strftime("%H:%M:%S") account = self.card_to_account_map.get( elem.attrib["card"], self.unknown_account ) num, currency = elem.attrib["amount"].split(" ", 1) num = D(num) card_num, card_currency = elem.attrib["cardamount"].split(" ", 1) card_num = D(card_num) postings = [ data.Posting( account, data.Amount(card_num, card_currency), None, None, None, None ) ] if currency != card_currency: meta["converted"] = elem.attrib["amount"] elif abs(card_num) != num: fee_amount = data.Amount(abs(card_num) - num, currency) postings.append( data.Posting(self.fee_account, fee_amount, None, None, None, None) ) return data.Transaction( meta, dt.date(), self.FLAG, None, elem.attrib["description"], data.EMPTY_SET, data.EMPTY_SET, postings, ) def get_test_importer(): return Importer( { "1234": "Assets:Privatbank:Universal", "5678": "Assets:Privatbank:Social", } ) if __name__ == "__main__": from beangulp.testing import main main(get_test_importer())
OSadovy/uabean
src/uabean/importers/privatbank_xml.py
privatbank_xml.py
py
3,577
python
en
code
18
github-code
6
[ { "api_name": "uabean.importers.mixins.IdentifyMixin", "line_number": 19, "usage_type": "name" }, { "api_name": "beangulp.Importer", "line_number": 19, "usage_type": "attribute" }, { "api_name": "beancount.core.flags.FLAG_OKAY", "line_number": 20, "usage_type": "attribute...
34596295886
import argparse import itertools import re import sys from typing import Optional from graphviz import Digraph import numpy as np import wandb from app.type import APPSProblem, Node from app.const import ( CONCURRENCY_LIMIT, DEFAULT_WANDB_PROJECT_NAME, K, NO_CUDA, NUM_ROLLOUTS, TERMINAL_TOKEN, PROBLEM_INDEX, ) from app.config import experiments sys.path.append("Code-AI-Tree-Search/eval") from compute_reward import compute_reward as _compute_reward # type: ignore def extract_code(text: str, terminal_token: str = TERMINAL_TOKEN) -> str: pattern = rf"ANSWER:\n(.*?){re.escape(terminal_token)}" match = re.search(pattern, text, re.DOTALL) return match.group(1).strip() if match else "" def compute_reward(code: str, problem: APPSProblem, mode: str = "train") -> int: return _compute_reward( problem.dir, code + "\n", mode=mode, public_test_cases="half" ) def log_info( num_actions: int, node: Node, token: Optional[str], elapsed: Optional[float], ): print( f"Step: {('Prediction' if elapsed is not None else 'Selection'):<10} |", f"Action #: {num_actions:<2} |", f"Action: {node.display_action if node else 'N/A':<6} |", f"Token: {repr(token) if token is not None else 'N/A':<8} |", f"Elapsed: {(str(np.round(elapsed, 3)) + 's' if elapsed is not None else 'N/A'):<7} |", ) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--remote", action="store_true", default=False) parser.add_argument( "--debug", action="store_true", help="Debug mode", default=False ) parser.add_argument("--dry", action="store_true", default=False) parser.add_argument("--no_cuda", action="store_true", default=NO_CUDA) parser.add_argument("--K", type=int, help="Number of expanded children", default=K) parser.add_argument("--num_rollouts", type=int, default=NUM_ROLLOUTS) parser.add_argument("--concurrency_limit", type=int, default=CONCURRENCY_LIMIT) parser.add_argument("--experiment_name", type=str, default=None) parser.add_argument( "--problem_index", type=str, default=PROBLEM_INDEX, choices=APPSProblem.problem_indices, ) args, _ = parser.parse_known_args() return args def traverse_and_visualize(node, graph, tokenizer, node_id=0): if node is None: return node_id # Create a label for the node with its statistics action = tokenizer.decode([node.action]) if node.action != "root" else "root" label = f"Action ID: {node.action}\n Action: {action}\nVisits: {node.visits}\nSelected: {node.selected}" if node.action != "root": label += f"\nProb: {node.prob:.2f}\nValue: {node.value:.2f}" graph.node(str(node_id), label) current_id = node_id children = [c for c in node.children if c.selected > 0 or c.visits > 0] for child in children: next_id = node_id + 1 graph.edge(str(current_id), str(next_id)) node_id = traverse_and_visualize(child, graph, tokenizer, next_id) return node_id def visualize_tree(root, tokenizer): graph = Digraph(comment="Tree Visualization") traverse_and_visualize(root, graph, tokenizer) graph.render("tree", format="png") def get_wandb_runs(experiment_name, project_name=DEFAULT_WANDB_PROJECT_NAME): return wandb.Api().runs(project_name, filters={"group": experiment_name}) def compose_configs( problem_indices, experiment_name, dry, project_name=DEFAULT_WANDB_PROJECT_NAME ): runs = get_wandb_runs(experiment_name, project_name) already_run = [run.config for run in runs] exp = experiments[experiment_name] configs = [] for idx in problem_indices: for cfg in list(itertools.product(*exp.values())): cfg = { **dict(zip(exp.keys(), cfg)), "problem_index": idx, } if cfg not in already_run or dry: configs.append(cfg) return configs
cavaunpeu/mcts-llm-codegen
app/util.py
util.py
py
4,017
python
en
code
1
github-code
6
[ { "api_name": "sys.path.append", "line_number": 23, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 23, "usage_type": "attribute" }, { "api_name": "app.const.TERMINAL_TOKEN", "line_number": 27, "usage_type": "name" }, { "api_name": "re.escape", ...
70396698747
import lcqp_manip import casadi import numpy as np import lcqpow # create the objects box_ly = 0.15 vmax = 0.035 wmax = 0.035 box = lcqp_manip.Box(w=0.24, h=0.08, m=0.1, g=9.81, vmax=casadi.SX([vmax, vmax, wmax])) gripper = lcqp_manip.Gripper(h=0.08, rmax=0.115, rmin=0, vmax=casadi.SX([vmax, vmax, wmax, vmax])) ground = lcqp_manip.Ground() wall = lcqp_manip.Wall(-0.25) objects = [gripper, box, ground, wall] # create the contact points and surfaces box_TR_corner = lcqp_manip.BoxTopRightCorner(box) box_BR_corner = lcqp_manip.BoxBottomRightCorner(box) box_TL_corner = lcqp_manip.BoxTopLeftCorner(box) box_BL_corner = lcqp_manip.BoxBottomLeftCorner(box) box_T_surface = lcqp_manip.BoxTopSurface(box) box_B_surface = lcqp_manip.BoxBottomSurface(box) box_R_surface = lcqp_manip.BoxRightSurface(box) box_L_surface = lcqp_manip.BoxLeftSurface(box) gripper_BR_corner = lcqp_manip.GripperBottomRightCorner(gripper, offset=0.0035) gripper_BL_corner = lcqp_manip.GripperBottomLeftCorner(gripper, offset=0.0035) # create contacts fmax = casadi.SX([10*box.m*box.g, 10*box.m*box.g, 10*box.m*box.g]) mu_ground = 0.4 mu = 0.5 # contacts between the box (contact points) and ground (contact surface) contact_box_TR_ground = lcqp_manip.RelaxedContact(contact_point=box_TR_corner, contact_surface=ground, contact_name="box_TR_ground", mu=mu, fmax=fmax, inv_force_dir=False) contact_box_BR_ground = lcqp_manip.RelaxedContact(contact_point=box_BR_corner, contact_surface=ground, contact_name="box_BR_ground", mu=mu, fmax=fmax, inv_force_dir=False) contact_box_TL_ground = lcqp_manip.RelaxedContact(contact_point=box_TL_corner, contact_surface=ground, contact_name="box_TL_ground", mu=mu, fmax=fmax, inv_force_dir=False) contact_box_BL_ground = lcqp_manip.RelaxedContact(contact_point=box_BL_corner, contact_surface=ground, contact_name="box_BL_ground", mu=mu, fmax=fmax, inv_force_dir=False) # contacts between the box (contact points) and ground (contact surface) contact_box_TR_wall = lcqp_manip.RelaxedContact(contact_point=box_TR_corner, contact_surface=wall, contact_name="box_TR_wall", mu=mu, inv_force_dir=False) contact_box_BR_wall = lcqp_manip.RelaxedContact(contact_point=box_BR_corner, contact_surface=wall, contact_name="box_BR_wall", mu=mu, inv_force_dir=False) contact_box_TL_wall = lcqp_manip.RelaxedContact(contact_point=box_TL_corner, contact_surface=wall, contact_name="box_TL_wall", mu=mu, inv_force_dir=False) contact_box_BL_wall = lcqp_manip.RelaxedContact(contact_point=box_BL_corner, contact_surface=wall, contact_name="box_BL_wall", mu=mu, inv_force_dir=False) # contacts between the gripper (contact points) and box (contact surfaces) contact_gripper_BR_box_R = lcqp_manip.RelaxedContact(contact_point=gripper_BR_corner, contact_surface=box_R_surface, contact_name="gripper_BR_box_R", mu=mu, fmax=fmax, inv_force_dir=True) contact_gripper_BL_box_R = lcqp_manip.RelaxedContact(contact_point=gripper_BL_corner, contact_surface=box_R_surface, contact_name="gripper_BL_box_L", mu=mu, fmax=fmax, inv_force_dir=True) contacts = [contact_box_TL_ground, contact_box_BL_ground, contact_box_TR_ground, contact_box_BR_ground, contact_box_TL_wall, contact_box_BL_wall, contact_box_TR_wall, contact_box_BR_wall, contact_gripper_BR_box_R] # create an LCQP lcqp = lcqp_manip.LCQP(objects, contacts) lcqp.set_force_balance(box) lcqp.set_position_limit(gripper_BR_corner, box_T_surface, margin=0.02, inv_dir=True) lcqp.set_position_limit(gripper_BR_corner, box_B_surface, margin=0.02, inv_dir=True) box_x0 = 0.5 box_y0 = wall.w0 + box.w / 2 + 0.03 # goal configuration box_center_to_gripper_top = gripper.h / 2 goal_height = 0.0 goal_angle = np.pi / 2 gripper_z_start = box.h + box_center_to_gripper_top box_z_goal = box.w / 2 q_goal = np.array([box_y0, 0, -np.pi/6, 0.01, box_y0, box_z_goal, goal_angle]) # set config cost q_weight = np.array([0.0, 0.0, 0.001, 1000, 0, 1, 1000]) v_weight = 1.0e-02 f_weight = 1.0e-02 slack_penalty = 1.0e04 lcqp.set_config_cost(q_goal, q_weight, v_weight, f_weight, slack_penalty) # create the LCQP solver lcqp_solver = lcqp_manip.LCQPSolver(lcqp) lcqp_solver.options.setMaxRho(1.0e12) lcqp_solver.options.setComplementarityTolerance(1.0e-06) lcqp_solver.options.setStationarityTolerance(1.0e-04) # create the simulation environment sim = lcqp_manip.PyBulletSimulation(time_step=0.05, sim_time=30, gui=True) sim.robot_q0 = np.array([-np.pi/2, 0, -np.pi/2, np.pi/2, -np.pi/2, -np.pi/2, -np.pi/2, -0.01, 0.01]) sim.joint_rest_poses = sim.robot_q0.tolist() sim.add_box(lx=box_ly, ly=box.w, lz=box.h, mass=box.m, x=box_x0, y=box_y0) # create the controller controller = lcqp_manip.LCQPController(lcqp_solver=lcqp_solver, projection_axis='x', z_offset=sim.table_height) controller.joint_rest_poses = sim.robot_q0 controller.position_gain = np.concatenate([np.full(7, 1), np.full(2, 0.1)]) controller.op_space_gain = np.concatenate([np.full(3, 100), np.full(3, 100)]) # run a simulation pre_grasp_orn = np.array([[ 0, 1, 0], [ 0, 0, -1], [ -1, 0, 0]]) from scipy.spatial.transform import Rotation pre_grasp_orn = Rotation.from_matrix(pre_grasp_orn).as_quat() controller.set_pre_grasp(pre_grasp_pos=np.array([box_x0, box_y0+box.w/2+gripper.h, sim.table_surface_height+box.h/2+gripper.h/2]), pre_grasp_orn=pre_grasp_orn, pre_pre_grasp_dist=np.array([0.0, 0.1, 0.0]), pre_grasp_tol=0.01, pre_pre_grasp_tol=0.02) controller.set_post_grasp(post_grasp_pos=np.array([box_x0, 0.0, gripper_z_start+sim.table_surface_height+0.1]), post_grasp_tol=0.01) sim.set_camera_side() sim.run(controller, record=False) # sim.run(controller, anim_2D=False, record=True)
omron-sinicx/lcqp
examples/pivot.py
pivot.py
py
6,805
python
en
code
1
github-code
6
[ { "api_name": "lcqp_manip.Box", "line_number": 11, "usage_type": "call" }, { "api_name": "casadi.SX", "line_number": 11, "usage_type": "call" }, { "api_name": "lcqp_manip.Gripper", "line_number": 12, "usage_type": "call" }, { "api_name": "casadi.SX", "line_num...
349034442
import pandas as pd import numpy as np from zipfile import ZipFile import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from pathlib import Path import matplotlib.pyplot as plt from keras.layers import Concatenate, Dense, Dropout from keras.layers import Add, Activation, Lambda from keras.models import Model from keras.layers import Input, Reshape, Dot from keras.layers.embeddings import Embedding from keras.optimizers import Adam from keras.regularizers import l2 class RecommenderNet(keras.Model): def __init__(self, num_users, num_products, embedding_size, **kwargs): super(RecommenderNet, self).__init__(**kwargs) self.num_users = num_users self.num_products = num_products self.embedding_size = embedding_size self.user_embedding = layers.Embedding( num_users, embedding_size, embeddings_initializer="he_normal", embeddings_regularizer=keras.regularizers.l2(1e-6), ) self.user_bias = layers.Embedding(num_users, 1) self.product_embedding = layers.Embedding( num_products, embedding_size, embeddings_initializer="he_normal", embeddings_regularizer=keras.regularizers.l2(1e-6), ) self.product_bias = layers.Embedding(num_products, 1) def call(self, inputs): user_vector = self.user_embedding(inputs[:, 0]) user_bias = self.user_bias(inputs[:, 0]) product_vector = self.product_embedding(inputs[:, 1]) product_bias = self.product_bias(inputs[:, 1]) dot_user_product = tf.tensordot(user_vector, product_vector, 2) # Add all the components (including bias) x = dot_user_product + user_bias + product_bias # The sigmoid activation forces the rating to between 0 and 1 return tf.nn.sigmoid(x) class EmbeddingLayer: def __init__(self, n_items, n_factors): self.n_items = n_items self.n_factors = n_factors def __call__(self, x): x = Embedding(self.n_items, self.n_factors, embeddings_initializer='he_normal', embeddings_regularizer=l2(1e-6))(x) x = Reshape((self.n_factors,))(x) return x def ModRecommenderNet(n_users, n_movies, n_factors, min_rating, max_rating): user = Input(shape=(1,)) u = EmbeddingLayer(n_users, n_factors)(user) movie = Input(shape=(1,)) m = EmbeddingLayer(n_movies, n_factors)(movie) x = Concatenate()([u, m]) x = Dropout(0.05)(x) x = Dense(10, kernel_initializer='he_normal')(x) x = Activation('relu')(x) x = Dropout(0.5)(x) x = Dense(1, kernel_initializer='he_normal')(x) x = Activation('sigmoid')(x) x = Lambda(lambda x: x * (max_rating - min_rating) + min_rating)(x) model = Model(inputs=[user, movie], outputs=x) opt = Adam(lr=0.001) model.compile(loss='mean_squared_error', optimizer=opt) return model if __name__ == "__main__": print('main')
wmcfarlan/health_recommender
src/helper/keras_class.py
keras_class.py
py
3,069
python
en
code
0
github-code
6
[ { "api_name": "tensorflow.keras.Model", "line_number": 18, "usage_type": "attribute" }, { "api_name": "tensorflow.keras", "line_number": 18, "usage_type": "name" }, { "api_name": "tensorflow.keras.layers.Embedding", "line_number": 24, "usage_type": "call" }, { "ap...
37204907362
# 打开数据库连接 import pymysql as pymysql import math EARTH_REDIUS = 6378.137 pi = 3.1415926 def rad(d): return d * pi / 180.0 def getDistan(lat1, lng1, lat2, lng2): radLat1 = rad(lat1) radLat2 = rad(lat2) a = radLat1 - radLat2 b = rad(lng1) - rad(lng2) s = 2 * math.asin(math.sqrt(math.pow(math.sin(a / 2), 2) + math.cos(radLat1) * math.cos(radLat2) * math.pow( math.sin(b / 2), 2))) s = s * EARTH_REDIUS return s db = pymysql.connect(host='bj-cdb-cwu7v42u.sql.tencentcdb.com', port=62864, user='user', passwd='xmxc1234', db='test', charset='utf8') # charset='utf8'查询的有汉字此代码必须加上否则导出的是??? # 使用cursor()方法获取操作游标 cursor = db.cursor() # sql 查询语句 sql = "select id, client_name, latitude, longitude FROM t_client_elm limit 100" # 执行sql语句 cursor.execute(sql) # 获取所有记录列表 results = cursor.fetchall() a = [["酒仙桥",116.687288, 40.2342], ["望京",116.462054, 39.914102], ["三里屯",116.442054, 39.954102]] print(a.__len__()) for i in range(0, a.__len__()): i = 1 # 坐标定义 name: str = a[i][0] lat2: float = a[i][1] lng2: float = a[i][2] i = i + 1 for row in results: lat1=float(row[2]) lng1=float(row[3]) result = getDistan(lat1, lng1, lat2,lng2) if result <= 1: print(name + ":" + str(result))
tangleibest/untitled
test/a.py
a.py
py
1,444
python
en
code
0
github-code
6
[ { "api_name": "math.asin", "line_number": 15, "usage_type": "call" }, { "api_name": "math.sqrt", "line_number": 15, "usage_type": "call" }, { "api_name": "math.pow", "line_number": 15, "usage_type": "call" }, { "api_name": "math.sin", "line_number": 15, "u...
17451718192
import numpy as np import pandas as pd from model_zoo.xgb import train_xgb from model_zoo.catboost import train_catboost from model_zoo.lgbm import train_lgbm from sklearn.metrics import roc_auc_score from sklearn.model_selection import KFold TRAIN_FCTS = { "lgbm": train_lgbm, "xgb": train_xgb, "catboost": train_catboost, } def k_fold( df, config, log_folder=None, ): train_fct = TRAIN_FCTS[config.model] ft_imps, models = [], [] pred_oof = np.zeros(len(df)) if config.split == "kf": kf = KFold(n_splits=config.n_folds, shuffle=True, random_state=13) splits = kf.split(df) elif config.split == "gkf": splits = [(i, i) for i in range(config.n_folds)] else: raise NotImplementedError() for fold, (train_idx, val_idx) in enumerate(splits): if fold in config.selected_folds: print(f"\n------------- Fold {fold + 1} / {config.n_folds} -------------\n") if config.split == "kf": df_train = df.iloc[train_idx].reset_index(drop=True) df_val = df.iloc[val_idx].reset_index(drop=True) else: df_train = df[ (df["fold_1"] != fold) & (df["fold_2"] != fold) ].reset_index(drop=True) df_val = df[(df["fold_1"] == fold) | (df["fold_2"] == fold)] val_idx = ( df_val.index.values if isinstance(df, pd.DataFrame) else df_val.index.values.get() ) print(f" -> {len(df_train)} training pairs") print(f" -> {len(df_val)} validation pairs\n") pred_val, model = train_fct( df_train, df_val.reset_index(drop=True), None, config.features, config.target, params=config.params, cat_features=config.cat_features, use_es=config.use_es, ) pred_oof[val_idx] += pred_val try: try: ft_imp = model.feature_importance except AttributeError: ft_imp = model.feature_importances_ ft_imp = pd.DataFrame( pd.Series(ft_imp, index=config.features), columns=["importance"] ) ft_imps.append(ft_imp) except Exception: pass models.append(model) if log_folder is None: return pred_oof, models, ft_imp if config.model == "xgb": model.save_model(log_folder + f"{config.model}_{fold}.json") elif config.model == "lgbm": try: model.booster_.save_model(log_folder + f"{config.model}_{fold}.txt") except Exception: model.save_model(log_folder + f"{config.model}_{fold}.txt") else: # catboost, verif model.save_model(log_folder + f"{config.model}_{fold}.txt") if config.split == "gkf": pred_oof = pred_oof / (1 + (df["fold_1"] != df["fold_2"])) y = df[config.target].values if isinstance(df, pd.DataFrame) else df[config.target].get() auc = roc_auc_score(y, pred_oof) print(f"\n Local CV is {auc:.4f}") ft_imp = pd.concat(ft_imps, axis=1).mean(1) ft_imp.to_csv(log_folder + "ft_imp.csv") np.save(log_folder + "pred_oof.npy", pred_oof) return pred_oof, models, ft_imp
TheoViel/kaggle_foursquare
src/training/main_boosting.py
main_boosting.py
py
3,551
python
en
code
13
github-code
6
[ { "api_name": "model_zoo.lgbm.train_lgbm", "line_number": 10, "usage_type": "name" }, { "api_name": "model_zoo.xgb.train_xgb", "line_number": 11, "usage_type": "name" }, { "api_name": "model_zoo.catboost.train_catboost", "line_number": 12, "usage_type": "name" }, { ...
71578351547
""" An example trainer for a simply policy gradient implementation. """ import time import json import torch from torch.optim import Adam import gym from oxentiel import Oxentiel from asta import dims, shapes from vpg import ( get_action, compute_policy_loss, compute_value_loss, finish, ActorCritic, RolloutStorage, ) SETTINGS_PATH = "settings_vpg.json" def train(ox: Oxentiel) -> None: """ Training loop. """ env: gym.Env = gym.make(ox.env_name) shapes.OB = env.observation_space.shape dims.N_ACTS = env.action_space.n ac = ActorCritic(shapes.OB[0], dims.N_ACTS, ox.hidden_dim) actor_optimizer = Adam(ac.pi.parameters(), lr=ox.lr) critic_optimizer = Adam(ac.v.parameters(), lr=ox.lr) rollouts = RolloutStorage() ob = env.reset() done = False t_start = time.time() for i in range(ox.iterations): ob_t = torch.Tensor(ob) act, val = get_action(ac, ob_t) # Critical: add prev ob to rollouts buffer. prev_ob = ob ob, rew, done, _ = env.step(act) rollouts.add(prev_ob, act, val, rew) # If we're done, or we finished a batch. if done or (i > 0 and i % ox.batch_size == 0): rews = rollouts.rews vals = rollouts.vals last_val = 0 if done else vals[-1] ep_weights, ep_rets = finish(ox, rews, vals, last_val) rollouts.rews = [] rollouts.vals = [] rollouts.lens.append(len(ep_weights)) rollouts.weights.extend(ep_weights) rollouts.rets.extend(ep_rets) ob, done = env.reset(), False if i > 0 and i % ox.batch_size == 0: mean_ret, mean_ep_len = rollouts.stats() obs, acts, weights, rets = rollouts.get() actor_optimizer.zero_grad() policy_loss = compute_policy_loss(ac, obs, acts, weights) policy_loss.backward() actor_optimizer.step() critic_optimizer.zero_grad() value_loss = compute_value_loss(ac, obs, rets) value_loss.backward() critic_optimizer.step() print(f"Iteration: {i} \t ", end="") print(f"Time: {time.time() - t_start:.3f} \t ", end="") print(f"Loss: {policy_loss:.3f} \t ", end="") print(f"Mean episode length: {mean_ep_len:.3f} \t ", end="\n") t_start = time.time() def main() -> None: """ Run the trainer. """ with open(SETTINGS_PATH, "r") as settings_file: settings = json.load(settings_file) ox = Oxentiel(settings) train(ox) if __name__ == "__main__": main()
langfield/polstead
demos/vpg/primitive/trainer.py
trainer.py
py
2,664
python
en
code
0
github-code
6
[ { "api_name": "oxentiel.Oxentiel", "line_number": 25, "usage_type": "name" }, { "api_name": "gym.Env", "line_number": 28, "usage_type": "attribute" }, { "api_name": "gym.make", "line_number": 28, "usage_type": "call" }, { "api_name": "asta.shapes.OB", "line_nu...
32740927638
#!/bin/python3 import re import getopt, sys from kazoo.client import KazooClient import json def getRollupRules(zookeeperHostsIn, zNodePath): zk = KazooClient(hosts=zookeeperHostsIn); zk.start(); result = {}; if zk.exists(zNodePath): for zookeeperChild in zk.get_children(zNodePath): zookeeperChildPath = zNodePath + "/" + zookeeperChild if zk.exists(zookeeperChildPath): Data, Stat = zk.get(zookeeperChildPath) result[zookeeperChild] = json.loads(Data) else: print("Reported ZK path no longer exists: " + Zookeeper_Child_Path) zk.stop(); return result;
ModdingFox/AutomaticDruidRollup
Zookeeper/Fetch_Druid_Rules.py
Fetch_Druid_Rules.py
py
682
python
en
code
0
github-code
6
[ { "api_name": "kazoo.client.KazooClient", "line_number": 9, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 18, "usage_type": "call" } ]
38049949342
from gtts import gTTS from pydub import AudioSegment from pydub.playback import play from screeninfo import get_monitors import numpy as np import cv2 import json import tempfile import os import time def get_screen_resolution(use_default): if use_default: return 640, 480 # Default resolution monitors = get_monitors() if monitors: return monitors[0].width, monitors[0].height else: print("Unable to determine screen resolution.") return None def text_to_speech(text, lang='en'): tts = gTTS(text=text, lang=lang, slow=False) with tempfile.NamedTemporaryFile(delete=False) as temp_audio: tts.save(temp_audio.name) audio_segment = AudioSegment.from_file(temp_audio.name) os.remove(temp_audio.name) return audio_segment input_file = 'results.json' with open(input_file, 'r') as f: results_data = json.load(f) use_default_resolution = 1 # 0 to use device's resolution screen_width, screen_height = get_screen_resolution(use_default_resolution) # Total duration of audio (in milliseconds) total_duration = 10000 # 10sec # Silent audio segment audio = AudioSegment.silent(duration=total_duration) for frame_idx, frame_results in enumerate(results_data): frame_duration = total_duration / len(results_data) frame_audio = AudioSegment.silent(duration=frame_duration) for obj_result in frame_results: bounding_box = obj_result['bounding_box'] class_name = obj_result['class_name'] # Adjusting amplitude based on distance between y-coordinates distance_y = bounding_box['y'] + bounding_box['height'] - bounding_box['y'] amplitude = np.clip(1 - distance_y / screen_height, 0.1, 1) # Amplitude range = [0.1, 1] # Midpoint of bounding box for panning audio midpoint_x = bounding_box['x'] + bounding_box['width'] / 2 midpoint_y = bounding_box['y'] + bounding_box['height'] / 2 # Distance between midpoints and pan accordingly distance_to_center = midpoint_x - screen_width / 2 pan = np.clip(distance_to_center / (screen_width / 2), -1, 1) # Pan range = [-1, 1] voice_audio = text_to_speech(class_name) voice_audio = voice_audio - (1 - amplitude) * 50 voice_audio = voice_audio.pan(pan) frame_audio = frame_audio.overlay(voice_audio) # print(f"Frame {frame_idx + 1}, Object: {class_name}, Pan: {pan}, Amplitude: {amplitude:.2f}") audio = audio.overlay(frame_audio, position=int(frame_idx * frame_duration)) if (frame_idx + 1) % 3 == 0: # Speak out the class name every 3 seconds text_to_speech("Attention, " + class_name).play() # Export the stereo audio to a file audio.export('stereo_audio_with_speech_amplitude.wav', format='wav')
aman071/Smartphone-audio-cues-for-visually-impaired
audio_generation.py
audio_generation.py
py
2,862
python
en
code
0
github-code
6
[ { "api_name": "screeninfo.get_monitors", "line_number": 16, "usage_type": "call" }, { "api_name": "gtts.gTTS", "line_number": 24, "usage_type": "call" }, { "api_name": "tempfile.NamedTemporaryFile", "line_number": 25, "usage_type": "call" }, { "api_name": "pydub.A...
41137173523
#to run, 'sudo python' then 'import gamepad' (this file), then 'gamepad.test()' #to install pygame: apt-get install python-pygame import pygame, time, serial, csv, motor_func, math pygame.init() j = pygame.joystick.Joystick(0) j.init() # This is for the output write (change it accordingly, i.e: /dev/ttyUSB0): #output_ser_path = raw_input("Please enter your serial port number: ") output_delay = 0.1 """ for i in range(10): try: output_ser_path = str(i) except Exception: pass print(output_ser_path) ser = serial.Serial("Port_#0002.Hub_#0004") ser.baudrate = 9600 ser.write('Initialized Joystick : %s' % j.get_name()) print('Initialized Joystick : %s' % j.get_name()) ser.timeout = 1 """ def get(): out = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] it = 0 #iterator pygame.event.pump() #Read input from the two joysticks for i in range(0, j.get_numaxes()): out[it] = round(j.get_axis(i), 2) it+=1 #Read input from buttons for i in range(0, j.get_numbuttons()): #print (j.get_numbuttons()) out[it] = j.get_button(i) it+=1 return out for i in range(0, j.get_numhats()): out[it] = j.get_hat(i) it+=1 return out def test(): while True: time.sleep(float(output_delay)) joystick_info = get() print (joystick_info) #ser.write(str(joystick_info)) #def motor_move(motor, speed_fb,speed_lr,ser) # motor_func.motor_move(1,joystick_info[1]*0.5*(joystick_info[3] + 1),joystick_info[0]*0.5*(joystick_info[3] + 1),joystick_info[2]*0.5*(joystick_info[3] + 1),ser) # motor_func.motor_move(2,joystick_info[1]*0.5*(joystick_info[3] + 1),joystick_info[0]*0.5*(joystick_info[3] + 1),joystick_info[2]*0.5*(joystick_info[3] + 1),ser) if __name__ == '__main__': test()
rsx-utoronto/galaxy
ground_station/main_ui/joystick.py
joystick.py
py
1,815
python
en
code
1
github-code
6
[ { "api_name": "pygame.init", "line_number": 6, "usage_type": "call" }, { "api_name": "pygame.joystick.Joystick", "line_number": 7, "usage_type": "call" }, { "api_name": "pygame.joystick", "line_number": 7, "usage_type": "attribute" }, { "api_name": "pygame.event.p...
32108115946
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("orgs", "0014_auto_20150722_1419")] operations = [ migrations.CreateModel( name="ContactField", fields=[ ("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)), ("label", models.CharField(max_length=36, verbose_name="Label")), ("key", models.CharField(max_length=36, verbose_name="Key")), ("value_type", models.CharField(max_length=1, verbose_name="Field Type")), ( "org", models.ForeignKey( related_name="contactfields", on_delete=models.PROTECT, verbose_name="Org", to="orgs.Org" ), ), ], ) ]
rapidpro/ureport
ureport/contacts/migrations/0001_initial.py
0001_initial.py
py
990
python
en
code
23
github-code
6
[ { "api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute" }, { "api_name": "django.db.migrations", "line_number": 7, "usage_type": "name" }, { "api_name": "django.db.migrations.CreateModel", "line_number": 11, "usage_type": "call" }, ...
39104149472
from django.urls import path from . import views app_name = 'orders' urlpatterns = [ path('checkout', views.checkout, name='checkout'), path('order_details', views.order_detail, name='order_details'), path('orders', views.orders, name='orders'), path('create_order/', views.create_order, name='create_order'), ]
suveydacan/book_shopping_microservice
MyProject/orders/urls.py
urls.py
py
333
python
en
code
1
github-code
6
[ { "api_name": "django.urls.path", "line_number": 7, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 8, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 9, "usage_type": "call" }, { "api_name": "django.urls.path", ...
30763965983
# Obj: Data persistance # Opt1: External files # Opt2: DB # Procedure: # Create the external file. # Open the file # Manipulate the file # Close the file from io import open # First parameter file name, second parameter mode to open (read, write) textFile = open('file.txt', 'w') line = 'Great day to code Python \nIsn\'t it?' textFile.write(line) # writing on the file textFile.close() # closing the file textFile = open('file.txt', 'r') # Opens the file on read mode text = textFile.read() # reads the file textFile.close() # closing the file print(text) textFile = open('file.txt', 'r') # Opens the file on read mode # reads the file line by line saving each one of themn on a list textLines = textFile.readlines() textFile.close() # closes the file print(textLines[0]) # a parameter allows to append lines to the text file textFile = open('file.txt', 'a') textFile.write('\nEveryday it\'s a god day to code') textFile.close() textFile = open('file.txt', 'r') print(textFile.read()) print(textFile.read()) # After executing the first read command, the pointer stays at the end of the file, so the second time it's executed there are no more lines ahead and it won't print anything # seek sets the pointer to the given position, in this case index = 0 textFile.seek(0) print(textFile.read()) print(textFile.read(11)) # Starts reading on the given position (11) textFile.close() # Writing and reading mode, sets the pointer on the first postion textFile = open('file.txt', 'r+')
Giorc93/PythonCourse
ExternalFiles/TextFiles/externalText.py
externalText.py
py
1,508
python
en
code
1
github-code
6
[ { "api_name": "io.open", "line_number": 15, "usage_type": "call" }, { "api_name": "io.open", "line_number": 21, "usage_type": "call" }, { "api_name": "io.open", "line_number": 29, "usage_type": "call" }, { "api_name": "io.open", "line_number": 39, "usage_t...
24348533870
import shutil import zipfile import os import sys import warnings from urllib.request import urlretrieve from tqdm import tqdm from zrp import about # This is used to show progress when downloading. # see here: https://github.com/tqdm/tqdm#hooks-and-callbacks class TqdmUpTo(tqdm): """Provides `update_to(n)` which uses `tqdm.update(delta_n)`.""" def update_to(self, b=1, bsize=1, tsize=None): """ b : int, optional Number of blocks transferred so far [default: 1]. bsize : int, optional Size of each block (in tqdm units) [default: 1]. tsize : int, optional Total size (in tqdm units). If [default: None] remains unchanged. """ if tsize is not None: self.total = tsize self.update(b * bsize - self.n) # will also set self.n = b * bsize def download_progress(url, fname): """ Download a file and show a progress bar. :param url: A string for the url of the release zip to download. :param fname: A string for the local file name under which the downloaded file can be found. :return: """ with TqdmUpTo(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t: # all optional kwargs print(f"Retrieving url at {url}") urlretrieve(url, filename=fname, reporthook=t.update_to, data=None) t.total = t.n return fname def download_and_clean_lookup_tables(url, lookup_tables_output_fname, lookup_tables_output_zip_fname, geo_yr="2019", acs_yr="2019", acs_range="5yr"): """ Downloads look up tables and file them within the module. This downloads the zip file from the repository, extracts it, renames it, then moves the tables to the correct directory, and removes large files not used at runtime. :param lookup_tables_output_fname: A string for the name of the file downloaded after unzipping. :param acs_range: A string for the year range the acs lookup table data will be from. :param acs_yr: A string for the year the acs lookup table data will be from. :param geo_yr: A string for the year the geo lookup table data will be from. :param url: A string for the url of the release zip to download. :param lookup_tables_output_zip_fname: A string for the name of the zip file downloaded. :return: """ cwd = os.path.dirname(os.path.abspath(__file__)) path_to_lt_zip = os.path.join(cwd, lookup_tables_output_zip_fname) print("Downloading zrp release...", file=sys.stderr) download_progress(url, path_to_lt_zip) print("Finished download") print("\n") print("Filing extras...") with zipfile.ZipFile(path_to_lt_zip, 'r') as zf: zf.extractall(cwd) os.remove(path_to_lt_zip) # Get rid of prefix that unzipping prepends # curr_folder = cwd.split("/")[-1] # unzipped_src_fname = curr_folder + "-" + lookup_tables_output_fname # path_to_unzipped_src = os.path.join(cwd, unzipped_src_fname) path_to_lookup_tables = os.path.join(cwd, lookup_tables_output_fname) # os.rename(path_to_unzipped_src, path_to_lookup_tables) # Clear old look up table directories data_dir = os.path.join(cwd, 'data') geo_data_dir = os.path.join(data_dir, f'processed/geo/{geo_yr}') acs_data_dir = os.path.join(data_dir, f'processed/acs/{acs_yr}/{acs_range}') if os.path.isdir(geo_data_dir): shutil.rmtree(geo_data_dir) if os.path.isdir(acs_data_dir): shutil.rmtree(acs_data_dir) print("Old geo lookup table data cleared out.") # Migrate lookup tables dl_geo_dir = os.path.join(cwd, lookup_tables_output_fname, f'geo/{geo_yr}') dl_acs_dir = os.path.join(cwd, lookup_tables_output_fname, f'acs/{acs_yr}/{acs_range}') if os.path.isdir(dl_geo_dir): shutil.move(dl_geo_dir, geo_data_dir) print(f"New geo lookup tables successfully migrated from {dl_geo_dir} to {geo_data_dir}.") else: warnings.warn(f"The geo lookup data was not found in {dl_geo_dir}. Ensure you're requesting a valid year. " "Consult the lookup_tables release to troubleshoot.") if os.path.isdir(dl_acs_dir): shutil.move(dl_acs_dir, acs_data_dir) print(f"New geo lookup tables successfully migrated from {dl_acs_dir} to {acs_data_dir}.") else: warnings.warn(f"The acs lookup data was not found in {dl_acs_dir}. Ensure you're requesting a valid year and" "year range. Consult the lookup_tables release to troubleshoot.") # Remove rest of lookup table folder shutil.rmtree(path_to_lookup_tables) # save a version file so we can tell what it is vpath = os.path.join(data_dir, 'version') with open(vpath, 'w') as vfile: vfile.write('zrp release --> {}'.format(lookup_tables_output_fname)) print("Filed lookup tables successfully,", file=sys.stderr) print(" to", data_dir) def download_and_clean_pipelines(url, pipelines_output_fname, pipelines_output_zip_fname): """ Downloads pipeline pickle files and file them within the module. This downloads the zip file from the repository, extracts it, renames it, then moves the tables to the correct directory, and removes large files not used at runtime. :param pipeline_output_fname: A string for the name of the file downloaded after unzipping. :param url: A string for the url of the release zip to download. :param pipelines_output_zip_fname: A string for the name of the zip file downloaded. :return: """ cwd = os.path.dirname(os.path.abspath(__file__)) path_to_ppln_zip = os.path.join(cwd, pipelines_output_zip_fname) print("Downloading zrp release...", file=sys.stderr) download_progress(url, path_to_ppln_zip) print("Finished download") print("\n") print("Filing extras...") with zipfile.ZipFile(path_to_ppln_zip, 'r') as zf: zf.extractall(cwd) os.remove(path_to_ppln_zip) # Get rid of prefix that unzipping prepends # curr_folder = cwd.split("/")[-1] # unzipped_src_fname = curr_folder + "-" + pipelines_output_fname # path_to_unzipped_src = os.path.join(cwd, unzipped_src_fname) path_to_pipelines = os.path.join(cwd, pipelines_output_fname) # os.rename(path_to_unzipped_src, path_to_pipelines) # Clear old look up table directories model_dir = os.path.join(cwd, 'modeling/models') block_group_dir = os.path.join(model_dir, 'block_group') census_tract_dir = os.path.join(model_dir, 'census_tract') zip_code_dir = os.path.join(model_dir, 'zip_code') block_group_pipeline = os.path.join(block_group_dir, 'pipe.pkl') census_tract_pipeline = os.path.join(census_tract_dir, 'pipe.pkl') zip_code_pipeline = os.path.join(zip_code_dir, 'pipe.pkl') if os.path.isfile(block_group_pipeline): os.remove(block_group_pipeline) if os.path.isfile(census_tract_pipeline): os.remove(census_tract_pipeline) if os.path.isfile(zip_code_pipeline): os.remove(zip_code_pipeline) print("Old pipelines cleared out.") # Migrate pipelines dl_bg_pipe_file = os.path.join(path_to_pipelines, 'block_group_pipe.pkl') dl_ct_pipe_file = os.path.join(path_to_pipelines, 'census_tract_pipe.pkl') dl_zp_pipe_file = os.path.join(path_to_pipelines, 'zip_code_pipe.pkl') if os.path.isfile(dl_bg_pipe_file): shutil.move(dl_bg_pipe_file, os.path.join(block_group_dir, 'pipe.pkl')) print("Block group pipeline successfully migrated.") else: warnings.warn(f"The block group pipeline was not found in {dl_bg_pipe_file}." "Consult the pipelines release to troubleshoot.") if os.path.isfile(dl_ct_pipe_file): shutil.move(dl_ct_pipe_file, os.path.join(census_tract_dir, 'pipe.pkl')) print("Census tract pipeline successfully migrated.") else: warnings.warn(f"The census tract pipeline was not found in {dl_ct_pipe_file}." "Consult the pipelines release to troubleshoot.") if os.path.isfile(dl_zp_pipe_file): shutil.move(dl_zp_pipe_file, os.path.join(zip_code_dir, 'pipe.pkl')) print("Zip code pipeline successfully migrated.") else: warnings.warn(f"The zip code pipeline was not found in {dl_zp_pipe_file}." "Consult the pipelines release to troubleshoot.") # Remove rest of pipelines folder shutil.rmtree(path_to_pipelines) # save a version file so we can tell what it is data_dir = os.path.join(cwd, 'data') vpath = os.path.join(data_dir, 'version') with open(vpath, 'w') as vfile: vfile.write('zrp release --> {}'.format(pipelines_output_fname)) print("Filed pipelines successfully", file=sys.stderr) print(" to", model_dir) def get_release(): version = about.__version__ dl_tpl = "{m}-{v}" return dl_tpl.format(m="zrp", v=version) def download(): release_pkg = get_release() # lookup_tables_output_fname = release_pkg + "_lookup_tables" lookup_tables_output_fname = "lookup_tables" lookup_tables_output_zip_fname = release_pkg + "_lookup_tables" + ".zip" lookup_table_url = about.__download_url_prefix__ + release_pkg + "/lookup_tables.zip" download_and_clean_lookup_tables(lookup_table_url, lookup_tables_output_fname, lookup_tables_output_zip_fname) pipelines_output_fname = "pipelines" pipelines_output_zip_fname = release_pkg + "_pipelines" + ".zip" pipelines_url = about.__download_url_prefix__ + release_pkg + "/pipelines.zip" download_and_clean_pipelines(pipelines_url, pipelines_output_fname, pipelines_output_zip_fname)
zestai/zrp
zrp/download.py
download.py
py
9,695
python
en
code
25
github-code
6
[ { "api_name": "tqdm.tqdm", "line_number": 13, "usage_type": "name" }, { "api_name": "urllib.request.urlretrieve", "line_number": 41, "usage_type": "call" }, { "api_name": "os.path.dirname", "line_number": 60, "usage_type": "call" }, { "api_name": "os.path", "l...
29057620857
#!/usr/bin/python3 """ base module contains the Base class """ import json class Base: """ Base class: manage id attribute in all the subclass Attributes: __nb_objects - class attribute initialized with 0 __init__ - class constructor """ __nb_objects = 0 def __init__(self, id=None): """assign the public instance attribute id""" if id: self.id = id else: type(self).__nb_objects += 1 self.id = type(self).__nb_objects @staticmethod def to_json_string(list_dictionaries): """returns the JSON string repr""" if list_dictionaries: return json.dumps(list_dictionaries) return "[]" @classmethod def save_to_file(cls, list_objs): """writes the JSON string repr of list_objs to a file""" list_dict = [] if list_objs: for i in list_objs: list_dict.append(i.to_dictionary()) objs_json = cls.to_json_string(list_dict) filename = cls.__name__ + ".json" with open(filename, 'w', encoding="utf-8") as f: f.write(objs_json) @staticmethod def from_json_string(json_string): """deserialises the json obj and returns the python object""" import json if not json_string: return [] return json.loads(json_string) @classmethod def create(cls, **dictionary): """returns an instance with all attr already set""" if not dictionary: return cls if cls.__name__ == "Rectangle": rectangle = cls(1, 1) rectangle.update(**dictionary) return rectangle square = cls(1) square.update(**dictionary) return square @classmethod def load_from_file(cls): """returns a list of instances""" import os filename = cls.__name__ + ".json" if not os.path.exists(filename): return [] with open(filename, "r", encoding='utf-8') as f: json_str = f.read() list_dict = cls.from_json_string(json_str) list_obj = [] for item in list_dict: instance = cls.create(**item) list_obj.append(instance) return list_obj @classmethod def save_to_file_csv(cls, list_objs): """parse list_objs to csv""" import csv """if not list_objs: return None""" if cls.__name__ == "Rectangle": fields = ["id", "width", "height", "x", "y"] elif cls.__name__ == "Square": fields = ["id", "size", "x", "y"] filename = cls.__name__ + ".csv" with open(filename, 'w', newline='', encoding='utf-8') as f: csvwriter = csv.writer(f) csvwriter.writerow(fields) list_dict = [] for item in list_objs: cls_dict = item.to_dictionary() instance_value = [] for key in fields: instance_value.append(cls_dict[key]) list_dict.append(instance_value) csvwriter.writerows(list_dict) @classmethod def load_from_file_csv(cls): """load a csv to list_obj""" import csv filename = cls.__name__ + ".csv" list_objs = [] with open(filename, 'r', newline='', encoding='utf-8') as f: csvreader = csv.reader(f) fields = next(csvreader) key_value = {} for row in csvreader: i = 0 for attr in fields: key_value[attr] = int(row[i]) i += 1 python_obj = cls.create(**key_value) list_objs.append(python_obj) return list_objs
ZIHCO/alx-higher_level_programming
0x0C-python-almost_a_circle/models/base.py
base.py
py
3,809
python
en
code
0
github-code
6
[ { "api_name": "json.dumps", "line_number": 32, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 54, "usage_type": "call" }, { "api_name": "os.path.exists", "line_number": 75, "usage_type": "call" }, { "api_name": "os.path", "line_number": 75,...
13900998846
from flask import Flask,render_template,request,send_file; from flask_wtf import FlaskForm; from wtforms import FileField,SubmitField; import pandas as pd import logging as logger from werkzeug.utils import secure_filename from openpyxl.styles import Border, Side, Alignment,PatternFill from openpyxl.utils import get_column_letter from datetime import datetime import re import os import io app = Flask(__name__) app.config['SECRET_KEY'] = 'supersecretkey' app.config['UPLOAD_FOLDER'] = 'static/files' class UploadFileForm(FlaskForm): file = FileField("File") submit = SubmitField("Upload File") @app.route('/',methods = ['GET','POST']) @app.route('/download') def index(): form = UploadFileForm() # @app.route('/upload', methods=['GET', 'POST']) if form.validate_on_submit(): file = form.file.data df = pd.read_excel(file) unique_surveyor_names = df["Surveyor Name"].unique() # Create a dictionary to store the results for each surveyor name results = {} # Loop through each unique surveyor name for surveyor_name in unique_surveyor_names: # Filter the dataframe to only include data for the current surveyor name surveyor_data = df[df["Surveyor Name"] == surveyor_name] # Group the data by surveyor name and calculate the number of samples samples = len(surveyor_data) # Group the data by surveyor name and calculate the total audio duration duration = surveyor_data["Audio Duration (in secs)"].sum() # Calculate the starting time and ending time for each surveyor start_time = surveyor_data["Timestamp"].min() end_time = surveyor_data["Timestamp"].max() # Group the data by gender and calculate the percentage of male and female gender_group = surveyor_data.groupby("Gender").count()["Timestamp"] #print(gender_group) gender_percentage = (gender_group / len(surveyor_data) * 100).astype(float) def count_valid_contact_numbers(x): pattern = re.compile(r"^\d{10}$") mask = x.apply(lambda x: not bool(pattern.match(str(x)))) count = mask.sum() return count invalid_numbers = count_valid_contact_numbers(surveyor_data['Contact Number']) duplicate = surveyor_data.duplicated('Location').sum() # Group the data by age group and calculate the percentage of each age group age_group = surveyor_data.groupby("Age Group").count()["Timestamp"].astype(float) age_percentage = (age_group / len(surveyor_data) * 100) print(age_group) # Group the data by political party and calculate the percentage of each party party_group = surveyor_data.groupby('ఇప్పటికి ఇప్పుడు ఎన్నికలు జరిగితే మీరు ఏ పార్టీ కి మద్దతు ఇస్తారు ?').count()["Timestamp"] party_percentage = (party_group / len(surveyor_data) * 100).astype(float) party_group1 = surveyor_data.groupby('మీ MLA పరిపాలన పట్ల మీ అభిప్రాయం?').count()["Timestamp"] party_percentage2 = (party_group1 / len(surveyor_data) * 100).astype(float) party_group2 = surveyor_data.groupby('వైయెస్ జగన్మోహన్ రెడ్డిగారి పరిపాలన పట్ల మీ అభిప్రాయం ఏమిటి?').count()["Timestamp"] party_percentage3 = (party_group2 / len(surveyor_data) * 100).astype(float) # Save the results in the dictionary results[surveyor_name] = { "NO OF SAMPLES": samples, "DURATION": duration, "STARTING TIME": start_time, "ENDING TIME": end_time, "FEMALE": gender_percentage.get("Female", 0), "MALE": gender_percentage.get("Male", 0), "DUPLICATE LOCATION":duplicate, 'INVALID CONTACT': invalid_numbers, "18-30": age_percentage.get("18-30", 0), "30-45": age_percentage.get("30-45", 0), "45-60": age_percentage.get("45-60", 0), "60+": age_percentage.get("60+", 0), "YSRCP": party_percentage.get("YSRCP", 0), "TDP": party_percentage.get("TDP",0), "JSP": party_percentage.get("JSP", 0), "BJP": party_percentage.get("BJP", 0), "INC": party_percentage.get("INC", 0), "Not Decided": party_percentage.get("Not Decided", 0), "బాగుంది.":party_percentage3.get('బాగుంది' ,0), "బాగోలేదు.":party_percentage3.get('బాగోలేదు' ,0), "బాగా చేస్తున్నారు" : party_percentage2.get("బాగా చేస్తున్నారు" , 0), "బాగా చేయడం లేదు": party_percentage2.get("బాగా చేయడం లేదు" , 0), } #results[surveyor_name]['INVALID CONTACT'] = invalidnum.Contact_Number.apply(count_valid_contact_numbers).sum() def color_format(val): text_color = 'black' try: if val >= "60": color = 'red' font_weight = 'bold' text_color = 'white' else: color = 'white' font_weight = 'normal' return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color) except ValueError: return '' def color_format2(val): text_color = 'black' try: if val.endswith("%") and float(val.strip("%")) >= 30: color = 'red' font_weight = 'bold' text_color = 'white' else: color = 'white' font_weight = 'normal' return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color) except ValueError: return '' def color_format3(val): text_color = 'black' try: if val.endswith("%") and float(val.strip("%")) >= 50: color = 'red' font_weight = 'bold' text_color = 'white' else: color = 'white' font_weight = 'normal' return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color) except ValueError: return '' def color_format4(val): text_color = 'black' try: if val.endswith("%") and float(val.strip("%")) >= 60: color = 'red' font_weight = 'bold' text_color = 'white' else: color = 'white' font_weight = 'normal' return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color) except ValueError: return '' def duration_format(val): text_color = 'black' if val >= '05:00:00' : color = 'red' font_weight = 'bold' text_color = 'white' else: color= 'white' font_weight = 'normal' return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color) def duplicate_location(val): text_color = 'black' try: if val >= 5: color = 'red' font_weight = 'bold' text_color = 'white' else: color = 'white' font_weight = 'normal' return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color) except ValueError: return '' def invalid_number(val): text_color = 'black' try: if val >= 5: color = 'red' font_weight = 'bold' text_color = 'white' else: color = 'white' font_weight = 'normal' return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color) except ValueError: return '' results_df = pd.DataFrame.from_dict(results, orient='index') #results_df = results_df.transpose() results_df.reset_index(inplace=True) results_df.rename(columns={"index": "Surveyor Name"}, inplace=True) results_df['BJP'] = results_df['BJP'].apply(lambda x: "{:.0f}%".format(x)) results_df['INC'] = results_df['INC'].apply(lambda x: "{:.0f}%".format(x)) results_df['JSP'] = results_df['JSP'].apply(lambda x: "{:.0f}%".format(x)) results_df['TDP'] = results_df['TDP'].apply(lambda x: "{:.0f}%".format(x)) results_df['YSRCP'] = results_df['YSRCP'].apply(lambda x: "{:.0f}%".format(x)) results_df['Not Decided'] = results_df['Not Decided'].apply(lambda x: "{:.0f}%".format(x)) results_df['18-30']= results_df['18-30'].apply(lambda x: "{:.0f}%".format(x)) results_df['30-45']= results_df['30-45'].apply(lambda x: "{:.0f}%".format(x)) results_df['45-60']= results_df['45-60'].apply(lambda x: "{:.0f}%".format(x)) results_df['60+']= results_df['60+'].apply(lambda x: "{:.0f}%".format(x)) results_df['MALE']= results_df['MALE'].apply(lambda x: "{:.0f}%".format(x)) results_df['FEMALE']= results_df['FEMALE'].apply(lambda x: "{:.0f}%".format(x)) results_df['బాగా చేయడం లేదు']= results_df['బాగా చేయడం లేదు'].apply(lambda x: "{:.0f}%".format(x)) results_df['బాగా చేస్తున్నారు']= results_df['బాగా చేస్తున్నారు'].apply(lambda x: "{:.0f}%".format(x)) results_df['బాగోలేదు.']= results_df['బాగోలేదు.'].apply(lambda x: "{:.0f}%".format(x)) results_df['బాగుంది.']= results_df['బాగుంది.'].apply(lambda x: "{:.0f}%".format(x)) results_df['STARTING TIME'] = results_df['STARTING TIME'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')) results_df['ENDING TIME'] = results_df['ENDING TIME'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')) results_df['DURATION'] = results_df['ENDING TIME'] - results_df['STARTING TIME'] results_df['DURATION'] = results_df['DURATION'].apply(lambda x: f"{x.days * 24 + x.seconds // 3600:0>2}:{(x.seconds % 3600) // 60:0>2}:{x.seconds % 60:0>2}") results_df = results_df.style.applymap(color_format, subset=['MALE', 'FEMALE']) \ .applymap(duration_format, subset=['DURATION']) \ .applymap(duplicate_location, subset=['DUPLICATE LOCATION']) \ .applymap(invalid_number, subset=['INVALID CONTACT']) \ .applymap(color_format2, subset=['18-30','30-45','45-60','60+'])\ .applymap(color_format3, subset=['YSRCP','TDP','JSP','BJP','INC','Not Decided'])\ .applymap(color_format4, subset=['బాగుంది.','బాగోలేదు.' ])\ .applymap(color_format4, subset=['బాగా చేస్తున్నారు', 'బాగా చేయడం లేదు' ]) #results_df.to_excel(r"C:\Users\dell\Downloads\Sai_Swapnill Cons_Results.xlsx") # Save the DataFrame to an Excel file writer = pd.ExcelWriter("result.xlsx", engine='openpyxl') results_df.to_excel(writer, index=False) # Get the active worksheet worksheet = writer.book.active worksheet.freeze_panes = worksheet.cell(1,2) # Set the column width to automatically adjust to the size of the contents in the column for column_cells in worksheet.columns: length = max(len(str(cell)) for cell in column_cells) worksheet.column_dimensions[column_cells[0].column_letter].width = length worksheet.insert_rows(1, 1) worksheet['C1']="TIME" worksheet.merge_cells('C1:E1') worksheet['F1']="GENDER" worksheet.merge_cells('F1:G1') worksheet['J1']="AGE GROUP" worksheet.merge_cells('J1:M1') worksheet['N1'] = 'ఇప్పటికి ఇప్పుడు ఎన్నికలు జరిగితే మీరు ఏ పార్టీ కి మద్దతు ఇస్తారు ?' worksheet.merge_cells('N1:S1') worksheet['T1']= "వైయెస్ జగన్మోహన్ రెడ్డిగారి పరిపాలన పట్ల మీ అభిప్రాయం ఏమిటి?""మీ MLA పరిపాలన పట్ల మీ అభిప్రాయం?" worksheet.merge_cells('T1:U1') worksheet['V1'] = "మీ MLA పరిపాలన పట్ల మీ అభిప్రాయం?" worksheet.merge_cells('V1:W1') fill_colors = ["D8E4BCFF", "D8E4BCFF", "D8E4BCFF", "D8E4BCFF", "D8E4BCFF","D8E4BCFF",] for i, merged_cell_range in enumerate(worksheet.merged_cell_ranges): range_string = str(merged_cell_range) merged_row = worksheet[range_string] for row in merged_row: for cell in row: cell.fill = PatternFill(start_color=fill_colors[i], end_color=fill_colors[i], fill_type="solid") for row in worksheet.iter_rows(): for cell in row: cell.border = cell.border + Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) cell.alignment = Alignment(horizontal='center') # Set the background color of the first row (the column names) for cell in worksheet[2]: cell.fill = PatternFill(start_color="B8CCE4FF", end_color="B8CCE4FF", fill_type = "solid") #Add filter to each column worksheet.auto_filter.ref = "A2:%s2" % (get_column_letter(worksheet.max_column)) # data = Reference(worksheet, min_col=2, min_row=1, max_row=worksheet.max_row, max_col=2) # Save the changes to the Excel file writer.save() print("Analysis complete!") # Convert categorical columns to numeric # Return the file for download return send_file("result.xlsx", mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", as_attachment=True) # rendered=render_template('data.html',res = res,length = length) # df1 = pd.DataFrame({'Data': [rendered]}) # output = io.BytesIO() # writer = pd.ExcelWriter(output, engine='xlsxwriter') # df1.to_excel(writer, index=False, sheet_name='Sheet1') # writer.save() # output.seek(0) # return send_file(output, # attachment_filename='combined.xlsx', # as_attachment=True) # result.to_excel("swapnil_New.xlsx", index=False) #return render_template('data.html',res = res,length = length) #result.to_excel(os.path.join(app.config['UPLOAD_FOLDER'], "swapnil_New.xlsx"), index=False) # return "Report Generated Successfully" return render_template('index.html',form = form) if __name__ == '__main__': app.run()
Swapnil-mindbrick-2022/reportgen
App.py
App.py
py
18,656
python
en
code
0
github-code
6
[ { "api_name": "flask.Flask", "line_number": 17, "usage_type": "call" }, { "api_name": "flask_wtf.FlaskForm", "line_number": 23, "usage_type": "name" }, { "api_name": "wtforms.FileField", "line_number": 24, "usage_type": "call" }, { "api_name": "wtforms.SubmitField...
11601984964
import sqlite3 """ Two functions to help the main.py functions to validate the reference variable. """ # Open the database and create a cursor conn = sqlite3.connect("candidate.db") c = conn.cursor() """ ************************** Args - ref - str Return - Bool A validation function that takes the reference as an argument, checks the length is equal to 8 and then if it is made up of only letters and numbers. If either of these steps fail, a relevant message is sent to the user explaining why. **************************""" def valid_reference(ref): if len(ref) != 8: print("Reference must be 8 characters long.") return False else: count = 0 for i in ref: if (57 >= ord(i) >= 48) or (90 >= ord(i) >= 65) or (122 >= ord(i) >= 97): count += 1 if count == 8: return True else: print("Reference must be only letters/digits.") return False """ ************************** Args - ref - str Return - either DB row or False This function takes the reference as an argument and checks the database to see if it exists. If it does it messages the user and return the record. If not, then it returns False **************************""" def check_reference_exists(ref): with conn: c.execute("SELECT * FROM candidate_table WHERE reference=?", (ref,)) candidate_selected = c.fetchone() if candidate_selected: print("Reference exists...") return candidate_selected return False
JohnEHughes/arctic_shores_test_v1
validators.py
validators.py
py
1,584
python
en
code
0
github-code
6
[ { "api_name": "sqlite3.connect", "line_number": 10, "usage_type": "call" } ]
27472315206
from flask import Flask, request,jsonify import util from flask_cors import CORS app= Flask(__name__) CORS(app) @app.route('/get_location_names',methods=['GET']) def get_location_names(): response = jsonify({ 'location': util.get_location() }) response.headers.add('Access-Control-Allow-Origin', '*') return response @app.route('/predict_home_price',methods=['GET','POST']) def predict_home_price(): toal_sqft=float(request.form['total_sqft']) bhk=int(request.form['bhk']) balcony=int(3) bath=int(request.form['bath']) location=request.form['location'] response=jsonify({ 'estimated_price':util.estimated_price(location,toal_sqft,bhk,bath,balcony) }) response.headers.add('Access-Control-Allow-Origin', '*') return response if __name__ == "__main__": print("staring the server") util.get_location() app.run()
Shinchan3/Home_price_linear_regression
server/server.py
server.py
py
922
python
en
code
0
github-code
6
[ { "api_name": "flask.Flask", "line_number": 4, "usage_type": "call" }, { "api_name": "flask_cors.CORS", "line_number": 5, "usage_type": "call" }, { "api_name": "flask.jsonify", "line_number": 8, "usage_type": "call" }, { "api_name": "util.get_location", "line_...
36356263355
from PIL import ImageDraw from configs.cfgs import args def read_class_names(class_file_name): '''loads class name from a file''' names = {} with open(class_file_name, 'r') as data: for ID, name in enumerate(data): names[ID] = name.strip('\n') return names def draw_boxes(img, boxes): """ :param img: :param boxes: :return: """ draw = ImageDraw.Draw(img) for box in boxes: draw.rectangle(list(box), outline='red') return img class UnNormalizer(object): def __init__(self, mean=None, std=None): if mean == None: self.mean = [0.485, 0.456, 0.406] else: self.mean = mean if std == None: self.std = [0.229, 0.224, 0.225] else: self.std = std def __call__(self, tensor): """ Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. Returns: Tensor: Normalized image. """ for t, m, s in zip(tensor, self.mean, self.std): t.mul_(s).add_(m) return tensor def test(): class_name = read_class_names(args.classes) print(class_name) if __name__ == "__main__": test()
alexchungio/RetinaNet-Pytorch
utils/tools.py
tools.py
py
1,245
python
en
code
0
github-code
6
[ { "api_name": "PIL.ImageDraw.Draw", "line_number": 21, "usage_type": "call" }, { "api_name": "PIL.ImageDraw", "line_number": 21, "usage_type": "name" }, { "api_name": "configs.cfgs.args.classes", "line_number": 52, "usage_type": "attribute" }, { "api_name": "confi...
44098268965
import tensorflow as tf import numpy as np from typing import Union, Optional, Sequence from pathlib import Path from dui.datasets.hdf5datasetfactory import HDF5DatasetFactory from dui.utils.signal import compress_db from dui.layers.utils import get_channel_axis def create_image_dataset( path: Union[str, Path], name: str, factor: Union[str, float] = '0db', # TODO: None as default or 1? signal_type: str = 'rf', # TODO: None or 'raw' as default? data_format: str = 'channels_last', # TODO: patch paddings typing elsewhere if validated # paddings: Optional[Union[Sequence[int], np.ndarray]] = None, paddings: Optional[Union[Sequence[Sequence[int]], np.ndarray]] = None, start: int = 0, stop: Optional[int] = None, step: int = 1, slicer: Optional[Sequence[slice]] = None, batch_size: int = 1, shuffle: bool = False, num_parallel_calls: Optional[int] = None, seed: Optional[int] = None, ) -> tf.data.Dataset: # Factory dataset_factory = HDF5DatasetFactory( path=path, name=name, start=start, stop=stop, step=step, slicer=slicer, shuffle=shuffle, seed=seed ) # Check sample shape base_sample_shape = dataset_factory._output_sample_shape if len(base_sample_shape) != 2: raise ValueError( "Dataset sample must be a 2D array. Current shape: {}".format( base_sample_shape ) ) # Normalization factor if isinstance(factor, str): attr_key = factor factor = dataset_factory.attrs.get(attr_key) if factor is None: raise ValueError( "No attribute '{}' for dataset '{}' in '{}'".format( attr_key, dataset_factory._dataset.name, dataset_factory._dataset.file.filename ) ) elif type(factor) in (int, float): pass else: raise TypeError("Unsupported type for 'factor'") # Create dataset dataset = dataset_factory.create_dataset() # TODO: include factor directly and specialize the pre-processing # for US-specific only? # Hack to avoid having an <unknown> shape (probably unsafe) # TODO: handle this in factory or by sub-classing tf.data.Dataset # Note: Probably below some Dataset._element_structure properties # Note: most probably not compatible with 1.15 dataset._element_structure._shape = tf.TensorShape(base_sample_shape) # Pre-processing dataset = dataset.batch(batch_size=batch_size) # TODO: use `dataset.padded_batch` instead and remove following # `paddings` option from following pre-processing # TODO: apply normalization factor before dataset = _preprocess_image_dataset( dataset=dataset, factor=factor, data_format=data_format, signal_type=signal_type, paddings=paddings, num_parallel_calls=num_parallel_calls ) return dataset def _preprocess_image_dataset( dataset: tf.data.Dataset, factor: Optional[float] = None, data_format: str = 'channels_last', signal_type: Optional[str] = None, paddings: Optional[Union[Sequence[int], np.ndarray]] = None, num_parallel_calls: Optional[int] = None ) -> tf.data.Dataset: # Specify pre-processing function as a mapping function def map_func(x: tf.Tensor) -> tf.Tensor: return _image_preproc_fun( x, factor=factor, data_format=data_format, signal_type=signal_type, paddings=paddings ) return dataset.map( map_func=map_func, num_parallel_calls=num_parallel_calls ) def _image_preproc_fun( x: tf.Tensor, factor: Optional[float] = None, data_format: str = 'channels_last', signal_type: Optional[str] = None, paddings: Optional[Union[Sequence[int], np.ndarray]] = None, ) -> tf.Tensor: # TODO: check inputs x = tf.convert_to_tensor(x) # Normalization factor if factor: # TODO: apply factor before and keep this pre-proc # function only for US-specific transformations? x /= factor # Paddings if paddings is not None: # TODO: would probably make more sense to remove paddings # from this US-specific pre-processing function # x = _batched_pad(x, paddings=paddings) paddings = np.array(paddings) valid_pad_shape = 2, 2 pad_shape = paddings.shape # TODO: this test is too restrictive in general (e.g. 3D) # but ok for now as we only work on 2D images if pad_shape != valid_pad_shape: raise ValueError( "Incompatible 'paddings' shape. Current: {}. " "Expected {}".format(pad_shape, valid_pad_shape) ) paddings = [[0, 0], *paddings.tolist()] pad_kwargs = { 'paddings': tf.constant(paddings, dtype='int32'), 'mode': 'CONSTANT', 'constant_values': 0 } x = tf.pad(x, **pad_kwargs) # Channel axis channel_axis = get_channel_axis(data_format=data_format) # Signal type if signal_type is not None: if signal_type == 'rf': x = tf.math.real(x) elif signal_type == 'iq': # Stack complex components in channels x = tf.stack((tf.math.real(x), tf.math.imag(x)), axis=channel_axis) elif signal_type == 'env': # Takes modulus of complex IQ signal x = tf.math.abs(x) elif signal_type == 'bm': # Takes modulus of complex IQ signal x = tf.math.abs(x) # Compress to dB x = compress_db(tensor=x) elif signal_type == 'raw': pass else: raise ValueError("Invalid signal type") # Expand dimension if signal_type != 'iq': x = tf.expand_dims(x, axis=channel_axis) return x
dperdios/dui-ultrafast
dui/datasets/utils.py
utils.py
py
6,117
python
en
code
14
github-code
6
[ { "api_name": "typing.Union", "line_number": 12, "usage_type": "name" }, { "api_name": "pathlib.Path", "line_number": 12, "usage_type": "name" }, { "api_name": "typing.Union", "line_number": 14, "usage_type": "name" }, { "api_name": "typing.Optional", "line_nu...
43332148964
import logging import json import gzip import ipaddress import datetime from c99api import EndpointClient from typing import List, Dict, Optional from os.path import exists from pydantic import BaseModel logger = logging.getLogger() def enrich_object_c99(object, c99_key:str=""): c99 = EndpointClient c99.key = c99_key ip = object["IPAddress"] resp = c99.gethostname(ip) if resp["success"] and ip != resp["hostname"] and resp["hostname"] not in object["hostname_list"]: logging.info(f"gethostname: {resp['hostname']}") object["hostname_list"].append(resp["hostname"]) resp = c99.ip2domains(ip) if resp["success"] and resp["count"] >= 1: logging.info(f"ip2domains: {resp['data']}") object["domain_list"].extend([hname for hname in resp["data"] if hname not in object["domain_list"]]) def merge_config(current_config: Dict[int, str] = {}, custom_config: Dict[int, str] = {}): for key, value in custom_config.items(): if key in current_config.keys(): if isinstance(value, (list,)): current_config[key] = list(set(current_config[key].extend(current_config[key]))) elif isinstance(value, (dict,)): current_config[key] = merge_config(current_config[key], custom_config[key]) else: current_config[key] = value else: current_config.update({key: value}) return current_config def load_config(default_config: str = "config.default.json", override_config: str = ""): config_builder = {} if exists(default_config): with open(default_config, "r", encoding="utf-8") as config_file: config_builder = json.load(config_file) else: raise ValueError("config file not found") if exists(override_config): with open(override_config, "r", encoding="utf-8") as config_file: try: configData = json.load(config_file) config_builder = merge_config(current_config=config_builder, custom_config=configData) except Exception as e: logger.error(f"Error adding override config\n{e}") return config_builder def decode_shodan(obj:dict={}): try: parsed_object = { "domain_list": obj["domains"] if "domains" in obj else [], "hostname_list": [obj["_shodan"]["options"]["hostname"]] if "hostname" in obj["_shodan"]["options"] else [], "cloud_provider": None, "operating_system": obj["os"], "product": obj["product"] if "product" in obj else "", "IPAddress": ipaddress.ip_address(obj["ip_str"]), "timestamp": datetime.datetime.fromisoformat(obj["timestamp"]), "protocol": obj["transport"] if "transport" in obj else "", "internet_service_provider": obj["isp"], "version": obj["version"] if "version" in obj else "", "organisation": obj["org"], "country": obj["location"]["country_name"] if "country_name" in obj["location"] else "", "city": obj["location"]["city"] if "city" in obj["location"] else "", "port": obj["port"] } parsed_object["hostname_list"].extend([hname.strip() for hname in obj["hostnames"]]) except Exception as e: logging.error(e) return {} try: if "ssl" in obj and "cert" in obj["ssl"]: cert = obj["ssl"] #parsed_object["ssl_fingerprint"] = cert["cert"]["fingerprint"]["sha256"] #parsed_object["ssl_serial"] = cert["cert"]["serial"] parsed_object["ssl_SAN"] = [cert["cert"]["subject"]["CN"]] if "CN" in cert["cert"]["subject"]["CN"] else [] for alt in cert["cert"]["extensions"]: if alt["name"]=="subjectAltName" and alt["data"]: i = 0 while i < len(alt["data"]): if alt["data"][i] == "\\": i += 4 continue next_slash = alt["data"][i:].find("\\") if next_slash >= 0: parsed_object["ssl_SAN"].append(alt["data"][i:i+next_slash]) i += next_slash else: parsed_object["ssl_SAN"].append(alt["data"][i:]) i = len(alt["data"]) if parsed_object["ssl_SAN"][-1] == "0.": parsed_object["ssl_SAN"].pop() parsed_object["ssl_SAN"] = list(set(parsed_object["ssl_SAN"])) parsed_object["ssl_issuer"] = cert["cert"]["issuer"]["O"] if "O" in cert["cert"]["issuer"] else cert["cert"]["issuer"]["CN"] #parsed_object["ssl_ja3"] = cert["ja3s"] #parsed_object["ssl_jarm"] = cert["jarm"] parsed_object["ssl_expiration"] = datetime.datetime.strptime(cert["cert"]["expires"], "%Y%m%d%H%M%SZ") else: #parsed_object["ssl_fingerprint"] = "" #parsed_object["ssl_serial"] = -1 parsed_object["ssl_SAN"] = [] parsed_object["ssl_issuer"] = "" #parsed_object["ssl_ja3"] = "" #parsed_object["ssl_jarm"] = "" parsed_object["ssl_expiration"] = datetime.datetime.fromordinal(1) except Exception as e: #parsed_object["ssl_fingerprint"] = "" #parsed_object["ssl_serial"] = -1 parsed_object["ssl_SAN"] = [] parsed_object["ssl_issuer"] = "" #parsed_object["ssl_ja3"] = "" #parsed_object["ssl_jarm"] = "" parsed_object["ssl_expiration"] = datetime.datetime.fromordinal(1) logging.error(e) return parsed_object def load_shodan_files(filename:str="", config:Dict={}): if not exists(filename): logging.error(f"File not found: {filename}") raise FileNotFoundError logging.info(f"Loading file: {filename}") if filename.endswith(".json.gz"): with gzip.open(filename, "rb") as archive: lines = archive.readlines() else: with open(filename, "rb") as raw_file: lines = raw_file.readlines() data = [] error_count = 0 for line in lines: try: json_obj = json.loads(line) try: obj = decode_shodan(obj=json_obj) data.append(obj) except Exception as e: logger.warning(f"JSON data could not be parsed") logger.warning(e) except: error_count += 1 continue if error_count > 0: logging.error(f"{filename} - Errors occurred during loading of data: {error_count}") return data if __name__ == "__main__": configFileName = "../../config/config.default.json" loaded_config = load_config(default_config=configFileName) logConfig = loaded_config["logging"] logging.basicConfig( level=logConfig["log_level"], format=logConfig["log_fstr_std"], datefmt=logConfig["log_date_formt"] ) pass
Magnus1990P/shodan_extractor
src/ShodanExtractor/common.py
common.py
py
7,089
python
en
code
0
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 13, "usage_type": "call" }, { "api_name": "c99api.EndpointClient", "line_number": 17, "usage_type": "name" }, { "api_name": "logging.info", "line_number": 22, "usage_type": "call" }, { "api_name": "logging.info", ...
2026787329
import csv import matplotlib.pyplot as plt Zb = [[], []] # with open('F:/zhengwangwork/test csv/4.csv','rb')as f: # reader=csv.reader(f) # for row in reader: # print(row[0]) file = open('../gold.csv', 'r', encoding='UTF-8') # 打开csv文件 reader = csv.reader(file) # 读取csv文件 data = list(reader) # 将csv数据转化为列表 length_h = len(data) # 得到数据行数 lenght_l = len(data[0]) # 得到每行长度 x = list() y = list() for i in range(0, length_h): # 从第一行开始读取 x.append(data[i][0]) # 将第一列数据从第一行读取到最后一行付给列表x y.append(data[i][2]) # 将第三列数据从第一行读取到最后一行付给列表y plt.plot(x, y) # 绘制折线图 plt.show() # 显示折线图
Nienter/mypy
personal/gold.py
gold.py
py
775
python
zh
code
0
github-code
6
[ { "api_name": "csv.reader", "line_number": 10, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name" }, { "api_name": "matplotlib.pyplot...
39824039269
import random import numpy as np from scipy.stats import entropy class CNF: def __init__(self, path=None, clauses=None): if path: with open(path, 'r') as cnf: formula = cnf.read() formula = formula.split('\n') start_index = 0 while formula[start_index][0] != 'p': start_index += 1 self.n = int(formula[start_index].split()[2]) self.variables = [i+1 for i in range(self.n)] self.m = int(formula[start_index].split()[3]) self.clauses = [list(map(int, formula[start_index + 1 + i].split()[:-1])) for i in range(self.m)] else: variables = set() for clause in clauses: for var in clause: variables.add(abs(var)) self.n = len(variables) self.variables = list(variables) self.m = len(clauses) self.clauses = clauses def get_size(self): return [self.m, self.n, float(self.m) / self.n] def get_vc(self): nodes = {i: set() for i in self.variables} for j in range(self.m): for var in self.clauses[j]: nodes[abs(var)].add(j) nodes = [len(nodes.get(i)) for i in nodes] nodes_np = np.array(nodes) nodes_proba = np.unique(nodes_np, return_counts=True)[1]/float(len(nodes_np)) nodes_entropy = entropy(list(nodes_proba)) clause = [] for j in range(self.m): cl = set() for i in range(len(self.clauses[j])): cl.add(abs(self.clauses[j][i])) clause.append(len(cl)) clause_np = np.array(clause) clause_proba = np.unique(clause_np, return_counts=True)[1]/float(len(clause_np)) clause_entropy = entropy(list(clause_proba)) return [nodes_np.mean(), nodes_np.std()/nodes_np.mean(), nodes_np.min(), nodes_np.max(), nodes_entropy, clause_np.mean(), clause_np.std()/clause_np.mean(), clause_np.min(), clause_np.max(), clause_entropy] def get_v(self): variables = {i: set() for i in self.variables} for j in range(self.m): for var in self.clauses[j]: for var_o in self.clauses[j]: if abs(var_o) != abs(var): variables[abs(var)].add(abs(var_o)) var_deg = [len(variables.get(i)) for i in variables] var_deg_np = np.array(var_deg) return [var_deg_np.mean(), var_deg_np.std()/var_deg_np.mean(), var_deg_np.min(), var_deg_np.max()] def get_balance(self): ratio_clause = [] for clause in self.clauses: pos, neg = 0, 0 for var in clause: if var > 0: pos += 1 else: neg += 1 ratio_clause.append(float(pos) / (pos + neg)) ratio_clause_np = np.array(ratio_clause) ratio_clause_proba = np.unique(ratio_clause_np, return_counts=True)[1] / float(len(ratio_clause_np)) ratio_clause_entropy = entropy(list(ratio_clause_proba)) ration_var = {i: [0, 0] for i in self.variables} for j in range(self.m): for var in self.clauses[j]: if var > 0: ration_var.get(abs(var))[0] += 1 else: ration_var.get(abs(var))[1] += 1 ration_var = [float(ration_var.get(i)[0]) / (ration_var.get(i)[0] + ration_var.get(i)[1]) for i in ration_var] ration_var_np = np.array(ration_var) ration_var_proba = np.unique(ration_var_np, return_counts=True)[1] / float(len(ration_var_np)) ration_var_entropy = entropy(list(ration_var_proba)) binary, ternary = 0, 0 for clause in self.clauses: if len(clause) == 2: binary += 1 elif len(clause) == 3: ternary += 1 return [ratio_clause_np.mean(), ratio_clause_np.std()/ratio_clause_np.mean(), ratio_clause_entropy, ration_var_np.mean(), ration_var_np.std()/ration_var_np.mean(), ration_var_np.min(), ration_var_np.max(), ration_var_entropy, float(binary)/self.m, float(ternary)/self.m] def get_horn(self): num_of_horns = 0 horn_var = {i: 0 for i in self.variables} for clause in self.clauses: horn = True cnt = 0 for var in clause: if var > 0: cnt += 1 if cnt > 1: horn = False break if horn: num_of_horns += 1 for vr in clause: horn_var[abs(vr)] += 1 horn_var = [horn_var.get(i) for i in horn_var] horn_var_np = np.array(horn_var) horn_var_proba = np.unique(horn_var_np, return_counts=True)[1] / float(len(horn_var_np)) horn_var_entropy = entropy(list(horn_var_proba)) return [float(num_of_horns) / self.m, horn_var_np.mean(), horn_var_np.std()/horn_var_np.mean(), horn_var_np.min(), horn_var_np.max(), horn_var_entropy] def get_features(self): size = self.get_size() vc = self.get_vc() v = self.get_v() balance = self.get_balance() horn = self.get_horn() return size + vc + v + balance + horn def set_var(self, var=None): if not var: var = random.choice(self.variables + [-i for i in self.variables]) new_clauses = [[i for i in clause if i != -var] for clause in self.clauses if var not in clause] return CNF(clauses=new_clauses)
mosin26/master_thesis
cnf.py
cnf.py
py
5,685
python
en
code
0
github-code
6
[ { "api_name": "numpy.array", "line_number": 38, "usage_type": "call" }, { "api_name": "numpy.unique", "line_number": 39, "usage_type": "call" }, { "api_name": "scipy.stats.entropy", "line_number": 40, "usage_type": "call" }, { "api_name": "numpy.array", "line_...
26703437835
import argparse from math import sqrt, floor, ceil from os import listdir import io from Player import Player import pygame import time from View import View import matplotlib.pyplot as plt import pandas as pd import numpy as np import cv2 BUTTONPRESS_CSV = "buttonpress.csv" GAME_CSV = "game.csv" FACE_FILES = ["faces_0.txt", "faces_1.txt"] class DummyClock: def __init__(self): pass def get_fps(self): return 60 def get_rawtime(self): return 1 def get_time(self): return 1 class GameReplay: def __init__(self, replay_dir): self.field = None self.players = [] self.boxes = set() self.bombs = dict() self.explosions = set() self.inactive_traps = dict() self.active_traps = set() self.falling_boxes = dict() self.crushing_boxes = dict() self.power_ups = dict() self.active_taunts = dict() self.view = View(32 * 15, 32 * 16, "Replay Bombangerman") self.plots = [] self.vlines = [] self.replay_dir = "../replays/" + replay_dir self.face_replay_file_handles = [None,None] self.next_faces = [[0,np.zeros((48,48))],[0,np.zeros((48,48))]] plt.ion() plt.show() self.figure = plt.figure() self.figure.autofmt_xdate() plt.xticks([]) plt.yticks([]) self.last_timestamp = None self.first_timestamp = None replay_files = listdir(self.replay_dir) if GAME_CSV in replay_files: with open(self.replay_dir + "/" + GAME_CSV) as f: lines = f.readlines() self.first_timestamp = float(lines[3].split(";")[0]) self.last_timestamp = float(lines[-1].split(";")[0]) # plot data files data_files = [f for f in replay_files if f not in [GAME_CSV, BUTTONPRESS_CSV] + FACE_FILES] self.nr_plots = len(data_files) for file in data_files: print("FILENAME:", file) self.replay_data(file) # Faces Replay display setup # Yes, this has constantly open file handles. but this is read only, so we will prolly get away with it. # Here, we preload the first entry in those files for i,filename in enumerate(FACE_FILES): if filename in replay_files: f = open(self.replay_dir + "/" + filename) self.face_replay_file_handles[i] = f print("Opened Face Data File for Player", i) self.load_next_image_and_timestamp(i,f) # prepare 2 windows if 2 players have replay data here for i,h in enumerate(self.face_replay_file_handles): if h is not None: cv2.namedWindow("Player " + str(i)) # buttonpress if BUTTONPRESS_CSV in replay_files: try: with open(self.replay_dir + "/" + BUTTONPRESS_CSV) as f: content = f.readlines() bps = [float(x) for x in content] for b in bps: for plot in self.plots: plot.axvline(x=b, c="b") except FileNotFoundError: print(BUTTONPRESS_CSV + " not found") # game replay if GAME_CSV in replay_files: with open(self.replay_dir + "/" + GAME_CSV) as f: for i, line in enumerate(f): if i == 0: self.field = eval(line) elif i == 1: for x, y, ticks in eval(line): self.inactive_traps[(x, y)] = [ticks, ticks] elif i == 2: player_data = eval(line) self.update_player_data(player_data) else: break def replay_data(self, filename): content = pd.read_csv(self.replay_dir + "/" + filename, delimiter=";", names=["time", "player0", "player1"], dtype="float") if self.last_timestamp is not None: content = content[content["time"] <= self.last_timestamp] if self.first_timestamp is not None: content = content[content["time"] >= self.first_timestamp] content = content.fillna(method='ffill').dropna() content = content.sort_values('time', axis=0) plot_nr = len(self.plots) + 1 nr_rows = ceil(sqrt(self.nr_plots)) nr_columns = ceil(sqrt(self.nr_plots)) plot = self.figure.add_subplot(nr_rows, nr_columns, plot_nr) plot.title.set_text(filename) self.plots.append(plot) content.plot(x="time", ax=plot) vline = plot.axvline(x=0, c="r") self.vlines.append(vline) def new_plot(self, x, ys, title): plot_nr = len(self.plots) + 1 nr_rows = floor(sqrt(self.nr_plots)) nr_columns = ceil(sqrt(self.nr_plots)) plot = self.figure.add_subplot(nr_rows, nr_columns, plot_nr) plot.title.set_text(title) self.plots.append(plot) for y in ys: plot.plot(x, y) vline = plot.axvline(x=x[0], c="r") self.vlines.append(vline) def replay(self): clock = DummyClock() run = True frame = 3 last_timestamp = None while run: events = pygame.event.get() for event in events: if event.type == pygame.QUIT: run = False if event.type == pygame.K_ESCAPE: run = False with open(self.replay_dir + "/" + GAME_CSV) as f: for i, line in enumerate(f): if i == frame: if line == "\n": # no idea why but needed in windows continue timestamp = eval(line.split(";")[0]) events = eval(line.split(";")[1]) self.handle_events(events) elif i < frame: continue else: break if frame % 30 == 0: for vline in self.vlines: vline.set_xdata(timestamp) # TODO what's this line for? plt.pause(1e-10) if not last_timestamp is None: time.sleep((timestamp - last_timestamp)/5) last_timestamp = timestamp # intermission: draw player face when present for i,h in enumerate(self.face_replay_file_handles): if h is not None: if timestamp >= self.next_faces[i][0]: img = self.next_faces[i][1] / 255.0 img = cv2.resize(img, dsize=(48*3, 48*3), interpolation=cv2.INTER_NEAREST) cv2.imshow("Player " + str(i), img) self.load_next_image_and_timestamp(i,h) # intermission end self.update_counters() self.view.draw_game(self.field, self.boxes, self.inactive_traps, self.active_traps, self.power_ups, self.bombs, self.explosions, self.falling_boxes, self.crushing_boxes, self.players, self.active_taunts, 0, clock) self.view.update() frame += 1 pygame.quit() def update_counters(self): """ Updates all tick counters for the client """ for bomb in self.bombs: self.bombs[bomb] -= 1 for x, y in self.inactive_traps: data = self.inactive_traps[(x, y)] data[1] -= 1 # [max_ticks, remaining_ticks] for falling_box in self.falling_boxes: self.falling_boxes[falling_box] -= 1 for crushing_box in self.crushing_boxes: self.crushing_boxes[crushing_box] -= 1 remove = [] for id, ticks in self.active_taunts.items(): if ticks <= 0: remove.append(id) else: self.active_taunts[id] -= 1 for id in remove: del self.active_taunts[id] def update_player_data(self, player_data): while len(self.players) < len(player_data): self.players.append(None) for d in player_data: pid = d["id"] x = d["x"] y = d["y"] lifes = d["l"] bombs = d["b"] power = d["p"] player = self.players[pid] if len(self.players) > pid else None if player == None: self.players[pid] = Player(pid, x, y, lifes, power, bombs) else: player.x = x player.y = y player.lifes = lifes player.bombs = bombs player.power = power def handle_events(self, events): for type, data in events: if type == 0: # GENERIC pass elif type == 1: # PLAYER_INIT # unused pass elif type == 2: # PLAYER_MORTAL self.players[data["id"]].immortal = False elif type == 3: # PLAYER_DAMAGED self.players[data["id"]].lifes -= data["dmg"] self.players[data["id"]].immortal = True elif type == 4: # PLAYER_MOVED p = self.players[data["id"]] p.x = data["x"] p.y = data["y"] p.facing = data["f"] elif type == 12: # PLAYER_NOT_SLIMEY self.players[data["id"]].slimey = False elif type == 13: # PLAYER_SLIMED self.players[data["id"]].slimey = True elif type == 5: # SPAWN_BOX self.boxes.add((data["x"], data["y"])) elif type == 6: # SPAWN_BOMB self.bombs[(data["x"], data["y"])] = data["t"] # x,y -> ticks elif type == 7: # SPAWN_EXPLOSION self.explosions.add((data["x"], data["y"])) elif type == 8: # UPDATE_TRAP # unused pass elif type == 9: # REMOVE_BOX self.boxes.discard((data["x"], data["y"])) elif type == 10: # REMOVE_BOMB self.bombs.pop((data["x"], data["y"]), None) elif type == 11: # REMOVE_EXPLOSION self.explosions.discard((data["x"], data["y"])) elif type == 15: # SPAWN_FALLING_BOX self.falling_boxes[(data["x"], data["y"])] = data["t"] # x,y -> ticks elif type == 16: # REMOVE_FALLING_BOX self.falling_boxes.pop((data["x"], data["y"]), None) elif type == 17: # SPAWN_CRUSHING_BOX self.crushing_boxes[(data["x"], data["y"])] = data["t"] elif type == 18: # REMOVE_CRUSHING_BOX self.crushing_boxes.pop((data["x"], data["y"]), None) elif type == 19: # PLAYER_TAUNT if data["id"] not in self.active_taunts: self.active_taunts[data["id"]] = data["t"] elif type == 20: # SPAWN_POWER_UP self.power_ups[(data["x"], data["y"])] = data["t"] # type elif type == 21: # REMOVE_POWER_UP self.power_ups.pop((data["x"], data["y"]), None) elif type == 22: # ANGER_INFO self.players[0].set_anger(data["0"]) self.players[1].set_anger(data["1"]) elif type == 23: # ACTIVATE_TRAP self.inactive_traps.pop((data["x"], data["y"]), None) self.active_traps.add((data["x"], data["y"])) elif type == 24: # RESET_TRAP self.inactive_traps[(data["x"], data["y"])] = [data["t"], data["t"]] # [max_ticks, remaining_ticks] self.active_traps.discard((data["x"], data["y"])) elif type == 25: # PLAYER_INVERT_KEYBOARD_ON self.players[data["id"]].inverted_keyboard = True elif type == 26: # PLAYER_INVERT_KEYBOARD_OFF self.players[data["id"]].inverted_keyboard = False elif type == 27: # PLAYER_CHANGE_BOMBS_COUNT self.players[data["id"]].bombs = data["b"] elif type == 28: # PLAYER_CHANGE_POWER_AMOUNT self.players[data["id"]].power = data["p"] elif type == 29: # PLAYER_AUTOWALK_ON self.players[data["id"]].autowalk = True elif type == 30: # PLAYER_AUTOWALK_OFF self.players[data["id"]].autowalk = False def load_next_image_and_timestamp(self, player_id, opened_handle): f = opened_handle timestamp = f.readline() if timestamp == "": self.face_replay_file_handles[player_id] = None f.close() return else: # print([timestamp]) timestamp = float(timestamp) self.next_faces[player_id][0] = timestamp image_data = [] for _ in range(48): line = f.readline() if line == "": self.face_replay_file_handles[player_id] = None f.close() return image_data.append(line.strip()) print(len(image_data)) image_data = "\n".join(image_data) image_data = io.StringIO(initial_value=image_data.strip() + "\n") img = np.loadtxt(image_data) self.next_faces[player_id][1] = img line = f.readline().strip() if line != "": print(line) print("ERROR: Wanted to jump empty line but was not empty") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-r", "--replay_dir", help="The directory containing the replays for this run.", type=str) args = vars(parser.parse_args()) gr = GameReplay(**args) gr.replay()
LanyK/TheAngerGames
bombangerman/client/GameReplay.py
GameReplay.py
py
14,488
python
en
code
1
github-code
6
[ { "api_name": "View.View", "line_number": 46, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 51, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.ion", "line_number": 53, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "...
34730801336
import logging import json import os import requests logger = logging.getLogger() logger.setLevel(logging.INFO) def lambda_handler(inputPayload, context): slack_link = os.environ['SLACK_URL'] try: url = inputPayload['issue']['html_url'] except Exception as e: logger.error(e) # return a 500 error code res = json.dumps({'statusCode': 500, 'body': f'Error: {e}'}) return res reply = {'text': f"Issue Created: {url}"} res = requests.post(slack_link, json=reply) return res
ByteOfKathy/esep-webhooks
lambda_function.py
lambda_function.py
py
536
python
en
code
0
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 6, "usage_type": "call" }, { "api_name": "logging.INFO", "line_number": 7, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 10, "usage_type": "attribute" }, { "api_name": "json.dumps", "l...
24860820161
# -*- coding: utf-8 -*- """ Mini project 1 Dennis Brown, COMP6636, 03 MAR 2021 """ import numpy as np import copy import matplotlib.pyplot as plt def libsvm_scale_import(filename): """ Read data from a libsvm .scale file """ datafile = open(filename, 'r') # First pass: get dimensions of data num_samples = 0 max_feature_id = 0 for line in datafile: num_samples += 1 tokens = line.split() for feature in tokens[1:]: feature_id = int(feature.split(':')[0]) max_feature_id = max(feature_id, max_feature_id) # Second pass: read data into array data = np.zeros((num_samples, max_feature_id + 1)) curr_sample = 0 datafile.seek(0) for line in datafile: tokens = line.split() data[curr_sample][0] = float(tokens[0]) for feature in tokens[1:]: feature_id = int(feature.split(':')[0]) feature_val = float(feature.split(':')[1]) data[curr_sample][feature_id] = feature_val curr_sample += 1 datafile.close() print('LOADED:', filename, ':', data.shape) return data def get_neighbors(data, test_sample, num_neighbors): """ Given training data, a test sample, and a number of neighbors, return the closest neighbors. """ # Calculate all distances from the training samples # to this test sample. Collect index, distance into a list. indices_and_distances = list() for i in range(len(data)): dist = np.linalg.norm(test_sample[1:] - (data[i])[1:]) # leave out classification at pos 0 indices_and_distances.append([i, dist]) # Sort list by distance indices_and_distances.sort(key=lambda _: _[1]) # Make a list of requested number of closest neighbors from sorted # list of indices+distances neighbors = list() for i in range(num_neighbors): neighbors.append(indices_and_distances[i][0]) return neighbors def classify_one_sample(data, test_sample, num_neighbors): """ Given training data, a test sample, and a number of neighbors, predict which classification the test sample belongs to. """ # Get closest neighbors neighbors = get_neighbors(data, test_sample, num_neighbors) # Create list of classifications of the neighbors classifications = list() for i in range(len(neighbors)): classifications.append(data[neighbors[i]][0]) # 0 = classification # Return the most common classification of the neighbors prediction = max(set(classifications), key = classifications.count) return prediction def k_nearest_neighbors(data, test_samples, num_neighbors): """ Given sample data (samples are rows, columns features, and samples have classifications in position 0), test data, and a number of neighbors, predict which classification each test sample belongs to. """ classifications = list() for i in range(len(test_samples)): output = classify_one_sample(data, test_samples[i], num_neighbors) classifications.append(output) if ((i % 20) == 0): print('\rknn test sample', i, end='') print() return(classifications) def check_knn_classifications(y, y_hat): """ Given actual values y and classiciations y_hat, return the number of errors """ errors = 0 for i in range(len(y)): if (y[i] != y_hat[i]): errors += 1 return errors def train_perceptron(data, beta, step_limit): """ Perceptron. Given a set of data (samples are rows, columns features, and samples have classifications in position 0), a step size (beta), and a step limit, train and return a weight vector that can be used to classify the given data. """ # Initialize the weight vector including bias element w = np.zeros(len(data[0])) # Initialize y_hat y_hat = np.zeros(len(data)) # Slice off y y = data[:,0] # Repeat the main loop until we have convergence or reach the # iteration limit steps = 0 converged = False while(not(converged) and (steps < step_limit)): converged = True # For each sample in the data, calculate w's classification error # and update w. for i in range(len(data)): # Replace classification in sample[0] with a 1 to allow # for a biased weight vector biased_sample = np.copy(data[i]) biased_sample[0] = 1 # Get prediction and error, then update weight vector y_hat[i] = 1 if (np.matmul(w.T, biased_sample) > 0) else -1 error = y[i] - y_hat[i] w += biased_sample * error * beta steps += 1 # If error on this element is > a very small value, we have # not converged. if (abs(error) > 0.000001): converged = False print('Perceptron:' ,steps, 'steps; converged?', converged) return w def multiclass_train_perceptron(data, beta, step_limit): """ Perceptron. Given a set of data (samples are rows, columns features, and samples have classifications in position 0), a step size (beta), and a step limit, train and return a weight vector that can be used to classify the given data. This version works on data with multiple classes by one-vs-rest. """ # Find unique classes classes = [] for i in range(data.shape[0]): if (not(data[i][0] in classes)): classes.append(data[i][0]) # For each classification, train perceptron on current class vs. # rest of the untrained classes. ws = [] curr_data = copy.deepcopy(data) for curr_class in range(len(classes)): # Save original classification data orig_classes = copy.deepcopy(curr_data[:,0]) # Reset classification data to 1 (for current class) or -1 for other for i in range(curr_data.shape[0]): if (curr_data[i][0] == classes[curr_class]): curr_data[i][0] = 1 else: curr_data[i][0] = -1 # Train and find weights ws.append(train_perceptron(curr_data, beta, step_limit)) # Put original classifications back for i in range(curr_data.shape[0]): curr_data[i][0] = orig_classes[i] return ws def test_perceptron(data, w): """ Given test data and a weight vector w, return number of num_misclass when classifying the test data using the weights. """ errors = 0 # Initialize y_hat y_hat = np.zeros(len(data)) # Slice off y y = data[:,0] # Determine how weights classify each test sample and count # num_misclass for i in range(len(data)): biased_sample = np.copy(data[i]) biased_sample[0] = 1 y_hat[i] = 1 if (np.matmul(w.T, biased_sample) > 0) else -1 if (y[i] != y_hat[i]): errors += 1 return errors def multiclass_test_perceptron(data, ws): """ Given test data and a weight vector w, return number of num_misclass when classifying the test data using the weights. This version works on data with multiple classes by One vs. All (OVA). """ # Find unique classes classes = [] for i in range(data.shape[0]): if (not(data[i][0] in classes)): classes.append(data[i][0]) # For each classification, test perceptron on current class vs. # rest of the untested classes. errors = [] curr_data = copy.deepcopy(data) for curr_class in range(len(classes)): # Save original classification data orig_classes = copy.deepcopy(curr_data[:,0]) # Reset classification data to 1 (for current class) or -1 for other for i in range(curr_data.shape[0]): if (curr_data[i][0] == classes[curr_class]): curr_data[i][0] = 1 else: curr_data[i][0] = -1 # Train and find weights errors.append(test_perceptron(curr_data, ws[curr_class])) # Put original classifications back for i in range(curr_data.shape[0]): curr_data[i][0] = orig_classes[i] return errors def iris_knn(): """ Run kNN on the iris dataset for the various numbers of neighbors. """ print("----------\niris kNN") # Load data data = libsvm_scale_import('data/iris.scale') # Shuffle the data because we want to split it into train & test, # and it is pre-sorted (we would test against classes we didn't # see in training) np.random.seed(1) # ensure consistent shuffling np.random.shuffle(data) # Split up data into training and test data based on split value split = 50 train_data = data[:split] test_data = data[split:] # Test multiple values of k test_ks = np.arange(1, split) error_rates = np.zeros(test_ks.shape[0]) for i in range(len(test_ks)): # Classify the test data print('Classify with k =', test_ks[i]) classifications = k_nearest_neighbors(train_data, test_data, test_ks[i]) # Check accuracy errors = check_knn_classifications(test_data[:,0], classifications) error_rates[i] = errors / test_data.shape[0] print(errors, 'errors in', test_data.shape[0], 'samples') print('ks:', test_ks) print('error rates:', error_rates) plt.clf() plt.plot(test_ks, error_rates, marker='.') plt.title('Iris kNN: error rate vs. k') plt.xlabel('k') plt.ylabel('error rate') plt.xlim(left = 0) plt.ylim(bottom = 0) plt.grid(True) plt.savefig('iris_knn.png', dpi = 600) def iris_perceptron(): """ Run Perceptron on the iris dataset in various ways. """ print("----------\niris Perceptron") # Load data data = libsvm_scale_import('data/iris.scale') # Shuffle the data because we want to split it into train & test, # and it is pre-sorted (we would test against classes we didn't # see in training) np.random.seed(1) # ensure consistent shuffling np.random.shuffle(data) # Split up data into training and test data based on split value split = 50 train_data = data[:split] test_data = data[split:] # Perform multi-class training and test and collect # a weight vector and number of errors for each class ws = multiclass_train_perceptron(train_data, 0.1, 100000) errors = multiclass_test_perceptron(test_data, ws) # Report errors print(errors, 'errors in', test_data.shape[0], 'samples') # Show sorted weights for every class for i in range(len(ws)): # Sort weights to find most important w = list(ws[i][1:]) feature_ids = range(1, len(w) + 1) print('W:', w) labels = [] for id in feature_ids: labels.append(str(int(id))) # Report top weights plt.clf() plt.bar(labels, w) plt.title('iris Perceptron: feature weights for class = ' + str(i+1)) plt.xlabel('feature ID') plt.ylabel('weight') plt.grid(True) plt.savefig('iris_weights' + str(i+1) + '.png', dpi = 600) def a4a_knn(): """ Run kNN on the a4a dataset for various numbers of neighbors. """ print("----------\na4a kNN") # Load data train_data = libsvm_scale_import('data/a4a') test_data = libsvm_scale_import('data/a4a.t') # Training data has 1 fewer feature than test data, so add a column # of zeros to it so samples have same number of features in train and test zero_col = np.zeros((len(train_data), 1)) train_data = np.hstack((train_data, zero_col)) # Test multiple values of k # This takes over 3 hours to run on my fastest computer. test_ks = np.array([1, 3, 5, 11, 21, 31, 41, 51, 61, 71, 81, 91, 101, 201, 301, 401, 501, 601, 701, 801, 901, 1001]) error_rates = np.zeros(len(test_ks)) for i in range(len(test_ks)): print('Classify with k =', test_ks[i]) # Classify the test data classifications = k_nearest_neighbors(train_data, test_data, test_ks[i]) # Check accuracy errors = check_knn_classifications(test_data[:,0], classifications) error_rates[i] = errors / test_data.shape[0] print(errors, 'errors in', test_data.shape[0], 'samples') print('ks:', test_ks) print('error rates:', error_rates) plt.clf() plt.plot(test_ks, error_rates, marker='.') plt.title('a4a kNN: error rate vs. k') plt.xlabel('k') plt.ylabel('error rate') plt.xlim(left = 0) plt.ylim(bottom = 0) plt.grid(True) plt.savefig('a4a_knn.png', dpi = 600) def a4a_perceptron(): """ Run Perceptron on the a4a dataset in various ways. """ print("----------\na4a Perceptron") # Load data train_data = libsvm_scale_import('data/a4a') test_data = libsvm_scale_import('data/a4a.t') # Training data has 1 fewer feature than test data, so add a column # of zeros to it so samples have same number of features in train and test zero_col = np.zeros((len(train_data), 1)) train_data = np.hstack((train_data, zero_col)) # Test multiple values of beta test_betas = np.array([0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]) error_rates = np.zeros(test_betas.shape[0]) ws = [] best_beta = -1 best_error_rate = 999999 for i in range(len(test_betas)): print('Classify with beta =', test_betas[i]) # Train and find weights ws.append(train_perceptron(train_data, test_betas[i], 100000)) # Check accuracy errors = test_perceptron(test_data, ws[i]) error_rates[i] = errors / test_data.shape[0] if (error_rates[i] < best_error_rate): best_error_rate = error_rates[i] best_beta = i print(errors, 'errors in', test_data.shape[0], 'samples') # Report error rates print('betas:', test_betas) print('error rates:', error_rates) plt.clf() plt.plot(test_betas, error_rates, marker='.') plt.title('a4a Perceptron: error rate vs. step size for 100000 iterations') plt.xscale('log') plt.xlabel('step size') plt.ylabel('error rate') plt.ylim(bottom = 0) plt.grid(True) plt.savefig('a4a_perceptron.png', dpi = 600) # Sort weights to find most important w = list(ws[best_beta][1:]) feature_ids = range(1, len(w) + 1) bar_data = list(zip(feature_ids, w)) bar_data.sort(key = lambda _: abs(_[1]), reverse = True) bar_data = np.array(bar_data[:20]) labels = [] for id in bar_data[:,0]: labels.append(str(int(id))) # Report top weights plt.clf() plt.bar(labels, bar_data[:,1]) plt.title('a4a Perceptron: 20 most important features') plt.xlabel('feature ID') plt.ylabel('weight') plt.grid(True) plt.savefig('a4a_weights.png', dpi = 600) def main(): iris_knn() iris_perceptron() a4a_knn() a4a_perceptron() if __name__ == '__main__': main()
dennisgbrown/classifiers-decision-trees-kNN-perceptron
MiniProj1.py
MiniProj1.py
py
15,120
python
en
code
0
github-code
6
[ { "api_name": "numpy.zeros", "line_number": 30, "usage_type": "call" }, { "api_name": "numpy.linalg.norm", "line_number": 57, "usage_type": "call" }, { "api_name": "numpy.linalg", "line_number": 57, "usage_type": "attribute" }, { "api_name": "numpy.zeros", "li...
44343755795
import sys from itertools import combinations as comb sys.stdin = open('input/20529.txt') input = sys.stdin.readline def d(A, B): return sum([A[i] != B[i] for i in range(4)]) T = int(input()) for tc in range(T): N = int(input()) mbti = input().split() if len(mbti) > 32: print(0) else: ans = 12 for c in set(list(comb(mbti, 3))): dist = d(c[0], c[1]) + d(c[0], c[2]) + d(c[1], c[2]) ans = min(ans, dist) print(ans)
nayeonkinn/algorithm
baekjoon/[S1] 20529. 가장 가까운 세 사람의 심리적 거리.py
[S1] 20529. 가장 가까운 세 사람의 심리적 거리.py
py
493
python
en
code
0
github-code
6
[ { "api_name": "sys.stdin", "line_number": 3, "usage_type": "attribute" }, { "api_name": "sys.stdin", "line_number": 4, "usage_type": "attribute" }, { "api_name": "itertools.combinations", "line_number": 17, "usage_type": "call" } ]
71359830907
from decimal import Decimal import ffmpeg import math import gc def get_aspect_ratio(width, height): gcd = math.gcd(width, height) lhs = int(width / gcd) rhs = int(height / gcd) return f"{lhs}x{rhs}" def get_raw_duration(video): duration_raw = None # check framerate at index 0 and 1, because its given like '25/1' # ToDo: add other sources for NUMBER_OF_FRAMES => check some files try: if 'NUMBER_OF_FRAMES-eng' in video['tags'] and 'avg_frame_rate' in video: duration_raw = int(video['tags']['NUMBER_OF_FRAMES-eng']) / \ int((video['avg_frame_rate'][0] + video['avg_frame_rate'][1])) except: #raise TypeError('Some error happened during the calculation of the raw duration!') return duration_raw return duration_raw def get_duration(video): duration = None try: if 'DURATION-eng' in video['tags']: # could also be DURATION => search for something with DURATION in its name; might be this one: [value for key, value in programs.items() if 'new york' in key.lower()] duration = video['tags']['DURATION-eng'].split('.')[0] elif 'DURATION-de' in video['tags']: duration = video['tags']['DURATION-de'].split('.')[0] elif 'DURATION' in video['tags']: duration = video['tags']['DURATION'].split('.')[0] else: raise TypeError('Cant find duration in tags!') except: #raise TypeError('Some error happened during the calculation of the duration!') return duration return duration def try_get_width(video): width = None if 'width' in video: width = video['width'] elif 'coded_width' in video: width = video['coded_width'] return width def try_get_height(video): height = None if 'height' in video: height = video['height'] elif 'coded_height' in video: height = video['coded_height'] return height def get_data(path): # read the audio/video file from the command line arguments media_file = str(path) # uses ffprobe command to extract all possible metadata from the media file probe = ffmpeg.probe(media_file) bitrate = 0.00 if 'format' in probe: bitrate = round( Decimal(probe['format'].get('bit_rate'))/(1024*1024), 2) streams = probe["streams"] video = streams[0] codec = video['codec_name'] # for other codecs => needs to be included in the output file! other_codecs = [] first_cd = True for cd in streams: if first_cd: first_cd = False continue # creates object with name, type, language, title codec_name = cd.get('codec_name', '') codec_type = cd.get('codec_type', '') codec_language = str codec_title = str if 'tags' in cd: codec_language = cd['tags'].get('language', '') codec_title = cd['tags'].get("title", '') other_codecs.append({"name": str(codec_name), "type": codec_type, "language": codec_language, "title": codec_title}) # ToDo: add FPS, and think of a good output for other codecs (e.g. ac3, eac3, aac) => so just comma seperated names # could also add audio language (comma seperated) and subtitle language duration = get_duration(video) duration_raw = get_raw_duration(video) height = try_get_height(video) width = try_get_width(video) aspect_ratio = '0x0' # might look for a better option => 16:9 - excel will convert this to datetime if width != None and height != None: aspect_ratio = get_aspect_ratio(width, height) # clear data del streams, video gc.collect() return {"codec": codec, "other_codecs": other_codecs, "bitrate": bitrate, "duration": duration, "aspect_ratio": aspect_ratio, "dimensions": {"width": width, "height": height}, "raw": {"duration_raw": duration_raw}}
bennischober/MetaDataScraper
src/media/read_media.py
read_media.py
py
3,958
python
en
code
0
github-code
6
[ { "api_name": "math.gcd", "line_number": 8, "usage_type": "call" }, { "api_name": "ffmpeg.probe", "line_number": 68, "usage_type": "call" }, { "api_name": "decimal.Decimal", "line_number": 72, "usage_type": "call" }, { "api_name": "gc.collect", "line_number": ...
5484857647
import ROOT import uproot from hipe4ml.tree_handler import TreeHandler import numpy as np import argparse import yaml import sys sys.path.append('utils') import utils as utils utils.set_style() kBlueC = ROOT.TColor.GetColor('#1f78b4') kOrangeC = ROOT.TColor.GetColor('#ff7f00') ROOT.gROOT.SetBatch() ## create signal extraction class class SignalExtraction: def __init__(self, input_data_hdl, input_mc_hdl=None): ## could be either a pandas or a tree handler self.data_hdl = input_data_hdl self.mc_hdl = input_mc_hdl self.is_3lh = True self.bins = 40 self.n_evts = 1e9 self.is_matter = False self.signal_fit_func = 'dscb' self.bkg_fit_func = 'pol1' self.performance = False self.additional_pave_text = '' ## additional text to be added to the ALICE performance pave ## variables self.pdf = None self.roo_dataset = None ### frames to be saved to file self.mc_frame_fit = None self.data_frame_fit = None self.local_pvalue_graph = None def process_fit(self, extended_likelihood=True, rooworkspace_path=None): if self.is_3lh: self.inv_mass_string = '#it{M}_{^{3}He+#pi^{-}}' if self.is_matter else '#it{M}_{^{3}#bar{He}+#pi^{+}}' decay_string = '{}^{3}_{#Lambda}H #rightarrow ^{3}He+#pi^{-}' if self.is_matter else '{}^{3}_{#bar{#Lambda}}#bar{H} #rightarrow ^{3}#bar{He}+#pi^{+}' tree_var_name = 'fMassH3L' else: self.inv_mass_string = '#it{M}_{^{4}He+#pi^{-}}' if self.is_matter else '#it{M}_{^{4}#bar{He}+#pi^{+}}' decay_string = '{}^{4}_{#Lambda}H #rightarrow ^{4}He+#pi^{-}' if self.is_matter else '{}^{4}_{#bar{#Lambda}}#bar{H} #rightarrow ^{4}#bar{He}+#pi^{+}' tree_var_name = 'fMassH4L' # define signal and bkg variables if self.is_3lh: mass = ROOT.RooRealVar('m', self.inv_mass_string, 2.96, 3.04, 'GeV/c^{2}') mu = ROOT.RooRealVar('mu', 'hypernucl mass', 2.97, 2.992, 'GeV/c^{2}') else: mass = ROOT.RooRealVar('m', self.inv_mass_string, 3.89, 3.97, 'GeV/c^{2}') mu = ROOT.RooRealVar('mu', 'hypernucl mass', 3.9, 3.95, 'GeV/c^{2}') sigma = ROOT.RooRealVar('sigma', 'hypernucl width', 0.001, 0.004, 'GeV/c^{2}') a1 = ROOT.RooRealVar('a1', 'a1', 0, 5.) a2 = ROOT.RooRealVar('a2', 'a2', 0, 5.) n1 = ROOT.RooRealVar('n1', 'n1', 1, 5.) n2 = ROOT.RooRealVar('n2', 'n2', 1, 5.) c0 = ROOT.RooRealVar('c0', 'constant c0', -1., 1) c1 = ROOT.RooRealVar('c1', 'constant c1', -1., 1) if self.signal_fit_func == 'dscb': signal = ROOT.RooCrystalBall('cb', 'cb', mass, mu, sigma, a1, n1, a2, n2) elif self.signal_fit_func == 'gaus': signal = ROOT.RooGaussian('gaus', 'gaus', mass, mu, sigma) else: raise ValueError(f'Invalid signal fit function. Expected one of: dscb, gaus') # define background pdf if self.bkg_fit_func == 'pol1': background = ROOT.RooChebychev('bkg', 'pol1 bkg', mass, ROOT.RooArgList(c0)) elif self.bkg_fit_func == 'pol2': background = ROOT.RooChebychev('bkg', 'pol2 bkg', mass, ROOT.RooArgList(c0, c1)) else: raise ValueError(f'Invalid background fit function. Expected one of: pol1, pol2') if extended_likelihood: n_signal = ROOT.RooRealVar('n_signal', 'n_signal', 10., 1e6) n_background = ROOT.RooRealVar('n_background', 'n_background', 0., 1e6) else: f = ROOT.RooRealVar('f', 'fraction of signal', 0., 0.4) # fix DSCB parameters to MC if self.mc_hdl != None: mass_roo_mc = utils.ndarray2roo(np.array(self.mc_hdl['fMassH3L'].values, dtype=np.float64), mass, 'histo_mc') signal.fitTo(mass_roo_mc, ROOT.RooFit.Range(2.97, 3.01), ROOT.RooFit.PrintLevel(-1)) a1.setConstant() a2.setConstant() n1.setConstant() n2.setConstant() sigma.setRange(sigma.getVal(), sigma.getVal()*1.5) self.mc_frame_fit = mass.frame(80) self.mc_frame_fit.SetName('mc_frame_fit') mass_roo_mc.plotOn(self.mc_frame_fit) signal.plotOn(self.mc_frame_fit) fit_param = ROOT.TPaveText(0.6, 0.6, 0.9, 0.9, 'NDC') fit_param.SetBorderSize(0) fit_param.SetFillStyle(0) fit_param.SetTextAlign(12) fit_param.AddText('#mu = ' + f'{mu.getVal()*1e3:.2f} #pm {mu.getError()*1e3:.2f}' + ' MeV/#it{c}^{2}') fit_param.AddText('#sigma = ' + f'{sigma.getVal()*1e3:.2f} #pm {sigma.getError()*1e3:.2f}' + ' MeV/#it{c}^{2}') self.mc_frame_fit.addObject(fit_param) # define the fit function and perform the actual fit if extended_likelihood: self.pdf = ROOT.RooAddPdf('total_pdf', 'signal + background', ROOT.RooArgList(signal, background), ROOT.RooArgList(n_signal, n_background)) else: self.pdf = ROOT.RooAddPdf('total_pdf', 'signal + background', ROOT.RooArgList(signal, background), ROOT.RooArgList(f)) mass_array = np.array(self.data_hdl[tree_var_name].values, dtype=np.float64) self.roo_dataset = utils.ndarray2roo(mass_array, mass) self.pdf.fitTo(self.roo_dataset, ROOT.RooFit.Extended(extended_likelihood), ROOT.RooFit.Save(True), ROOT.RooFit.PrintLevel(-1)) ## get fit parameters fit_pars = self.pdf.getParameters(self.roo_dataset) sigma_val = fit_pars.find('sigma').getVal() sigma_val_error = fit_pars.find('sigma').getError() mu_val = fit_pars.find('mu').getVal() mu_val_error = fit_pars.find('mu').getError() if extended_likelihood: signal_counts = n_signal.getVal() signal_counts_error = n_signal.getError() background_counts = n_background.getVal() background_counts_error = n_background.getError() else: signal_counts = (1-f.getVal())*self.roo_dataset.sumEntries() signal_counts_error = (1-f.getVal()) * self.roo_dataset.sumEntries()*f.getError()/f.getVal() background_counts = f.getVal()*self.roo_dataset.sumEntries() background_counts_error = f.getVal() * self.roo_dataset.sumEntries()*f.getError()/f.getVal() self.data_frame_fit = mass.frame(self.n_bins) self.data_frame_fit.SetName('data_frame_fit') self.roo_dataset.plotOn(self.data_frame_fit, ROOT.RooFit.Name('data'), ROOT.RooFit.DrawOption('p')) self.pdf.plotOn(self.data_frame_fit, ROOT.RooFit.Components('bkg'), ROOT.RooFit.LineStyle(ROOT.kDashed), ROOT.RooFit.LineColor(kOrangeC)) self.pdf.plotOn(self.data_frame_fit, ROOT.RooFit.LineColor(kBlueC), ROOT.RooFit.Name('fit_func')) self.data_frame_fit.GetYaxis().SetTitleSize(0.06) self.data_frame_fit.GetYaxis().SetTitleOffset(0.9) self.data_frame_fit.GetYaxis().SetMaxDigits(2) self.data_frame_fit.GetXaxis().SetTitleOffset(1.1) # signal within 3 sigma mass.setRange('signal', mu_val-3*sigma_val, mu_val+3*sigma_val) signal_int = signal.createIntegral(ROOT.RooArgSet(mass), ROOT.RooArgSet(mass), 'signal') signal_int_val_3s = signal_int.getVal()*signal_counts signal_int_val_3s_error = signal_int_val_3s*signal_counts_error/signal_counts # background within 3 sigma mass.setRange('bkg', mu_val-3*sigma_val, mu_val+3*sigma_val) bkg_int = background.createIntegral(ROOT.RooArgSet(mass), ROOT.RooArgSet(mass), 'bkg') bkg_int_val_3s = bkg_int.getVal()*background_counts bkg_int_val_3s_error = bkg_int_val_3s*background_counts_error/background_counts significance = signal_int_val_3s / np.sqrt(signal_int_val_3s + bkg_int_val_3s) significance_err = utils.significance_error(signal_int_val_3s, bkg_int_val_3s, signal_int_val_3s_error, bkg_int_val_3s_error) s_b_ratio_err = np.sqrt((signal_int_val_3s_error/signal_int_val_3s)**2 + (bkg_int_val_3s_error/bkg_int_val_3s)**2)*signal_int_val_3s/bkg_int_val_3s # add pave for stats pinfo_vals = ROOT.TPaveText(0.632, 0.5, 0.932, 0.85, 'NDC') pinfo_vals.SetBorderSize(0) pinfo_vals.SetFillStyle(0) pinfo_vals.SetTextAlign(11) pinfo_vals.SetTextFont(42) pinfo_vals.AddText(f'Signal (S): {signal_counts:.0f} #pm {signal_counts_error:.0f}') pinfo_vals.AddText(f'S/B (3 #sigma): {signal_int_val_3s/bkg_int_val_3s:.1f} #pm {s_b_ratio_err:.1f}') pinfo_vals.AddText('S/#sqrt{S+B} (3 #sigma): ' + f'{significance:.1f} #pm {significance_err:.1f}') pinfo_vals.AddText('#mu = ' + f'{mu_val*1e3:.2f} #pm {mu.getError()*1e3:.2f}' + ' MeV/#it{c}^{2}') pinfo_vals.AddText('#sigma = ' + f'{sigma_val*1e3:.2f} #pm {sigma.getError()*1e3:.2f}' + ' MeV/#it{c}^{2}') ## add pave for ALICE performance if self.performance: pinfo_alice = ROOT.TPaveText(0.6, 0.5, 0.93, 0.85, 'NDC') else: pinfo_alice = ROOT.TPaveText(0.14, 0.6, 0.42, 0.85, 'NDC') pinfo_alice.SetBorderSize(0) pinfo_alice.SetFillStyle(0) pinfo_alice.SetTextAlign(11) pinfo_alice.SetTextFont(42) pinfo_alice.AddText('ALICE Performance') pinfo_alice.AddText('Run 3, pp #sqrt{#it{s}} = 13.6 TeV') if not self.performance: pinfo_alice.AddText('N_{ev} = ' f'{self.n_evts:.0f} ' '#times 10^{9}') pinfo_alice.AddText(decay_string) if self.additional_pave_text != '': pinfo_alice.AddText(self.additional_pave_text) if not self.performance: self.data_frame_fit.addObject(pinfo_vals) self.data_frame_fit.addObject(pinfo_alice) fit_stats = {'signal': [signal_counts, signal_counts_error], 'significance': [significance, significance_err], 's_b_ratio': [signal_int_val_3s/bkg_int_val_3s, s_b_ratio_err]} if rooworkspace_path != None: w = ROOT.RooWorkspace('w') sb_model = ROOT.RooStats.ModelConfig('sb_model', w) sb_model.SetPdf(self.pdf) sb_model.SetParametersOfInterest(ROOT.RooArgSet(n_signal)) sb_model.SetObservables(ROOT.RooArgSet(mass)) getattr(w, 'import')(sb_model) getattr(w, 'import')(self.roo_dataset) w.writeToFile(rooworkspace_path + '/rooworkspace.root', True) return fit_stats def compute_significance_asymptotic_calc(self, rooworkspace_path, do_local_p0plot=False): print("-----------------------------------------------") print("Computing significance with asymptotic calculator") ## get saved workspace workspace_file = ROOT.TFile(rooworkspace_path + '/rooworkspace.root', 'READ') w = workspace_file.Get('w') roo_abs_data = w.data('data') sb_model = w.obj('sb_model') poi = sb_model.GetParametersOfInterest().first() sb_model.SetSnapshot(ROOT.RooArgSet(poi)) ## create the b-only model b_model = sb_model.Clone() b_model.SetName('b_model') poi.setVal(0) b_model.SetSnapshot(poi) b_model.Print() # w.var('sigma').setConstant(True) w.var('mu').setConstant(True) asymp_calc = ROOT.RooStats.AsymptoticCalculator(roo_abs_data, sb_model, b_model) asymp_calc.SetPrintLevel(0) asymp_calc_result = asymp_calc.GetHypoTest() null_p_value = asymp_calc_result.NullPValue() null_p_value_err = asymp_calc_result.NullPValueError() significance = asymp_calc_result.Significance() significance_err = asymp_calc_result.SignificanceError() if do_local_p0plot: ### perform a scan in mass and compute the significance masses = [] p0_values = [] p0_values_expected = [] mass_array = np.linspace(w.var('mu').getMin(), w.var('mu').getMax(), 100) for mass in mass_array: w.var('mu').setVal(mass) w.var('mu').setConstant(True) asymp_calc_scan = ROOT.RooStats.AsymptoticCalculator(roo_abs_data, sb_model, b_model) asymp_calc_scan.SetOneSidedDiscovery(True) asym_calc_result_scan = asymp_calc_scan.GetHypoTest() null_p_value_scan = asym_calc_result_scan.NullPValue() masses.append(mass) p0_values.append(null_p_value_scan) print(f"Mass: {mass} MeV/c^2, p0: {null_p_value_scan:.10f}") ## create a graph with the p0 values self.local_pvalue_graph = ROOT.TGraph(len(masses), np.array(masses), np.array(p0_values)) self.local_pvalue_graph.SetName('p0_values') self.local_pvalue_graph.GetXaxis().SetTitle(self.inv_mass_string) self.local_pvalue_graph.GetYaxis().SetTitle('Local p-value') # log Y axis self.local_pvalue_graph.SetMarkerStyle(20) self.local_pvalue_graph.SetMarkerColor(kBlueC) self.local_pvalue_graph.SetMarkerSize(0) self.local_pvalue_graph.SetLineColor(kBlueC) self.local_pvalue_graph.SetLineWidth(2) print("****************************************************") print(f'p0: {null_p_value:.3E} +/- {null_p_value_err:.3E}') print(f'significance: {significance:.5f} +/- {significance_err:.5f}') print("****************************************************") if __name__ == '__main__': # set parameters parser = argparse.ArgumentParser( description='Configure the parameters of the script.') parser.add_argument('--config-file', dest='config_file', default='', help='path to the YAML file with configuration.') parser.add_argument('--nbins', dest='n_bins', default=30, help='number of bins in the final plot.') parser.add_argument('--performance', action='store_true', help="True for performance plot", default=False) args = parser.parse_args() config_file = open(args.config_file, 'r') config = yaml.full_load(config_file) input_parquet_data = config['input_parquet_data'] input_analysis_results = config['input_analysis_results'] input_parquet_mc = config['input_parquet_mc'] output_dir = config['output_dir'] output_file = config['output_file'] is_4lh = config['is_4lh'] matter_type = config['matter_type'] n_bins = config['n_bins'] performance = args.performance data_hdl = TreeHandler(input_parquet_data) mc_hdl = TreeHandler(input_parquet_mc) an_vtx_z = uproot.open(input_analysis_results)['hyper-reco-task']['hZvtx'] n_evts = an_vtx_z.values().sum() / 1e9 n_evts = round(n_evts, 0) signal_extraction = SignalExtraction(data_hdl, mc_hdl) signal_extraction.n_bins = n_bins signal_extraction.n_evts = n_evts signal_extraction.matter_type = matter_type signal_extraction.performance = performance signal_extraction.is_3lh = not is_4lh signal_extraction.bkg_fit_func = 'pol1' signal_extraction.process_fit(extended_likelihood=True, rooworkspace_path="../results") signal_extraction.compute_significance_asymptotic_calc(rooworkspace_path="../results", do_local_p0plot=True) # create output file and save frames out_file = ROOT.TFile(f'{output_dir}/{output_file}', 'recreate') out_file.cd() signal_extraction.data_frame_fit.Write() signal_extraction.mc_frame_fit.Write() signal_extraction.local_pvalue_graph.Write() out_file.Close() if is_4lh: state_label = '4lh' else: state_label = '3lh' cSignalExtraction = ROOT.TCanvas('cSignalExtraction', 'cSignalExtraction', 800, 600) signal_extraction.data_frame_fit.SetTitle('') signal_extraction.data_frame_fit.Draw() cSignalExtraction.SaveAs(f'{output_dir}/cSignalExtraction_{matter_type}_{state_label}.pdf')
lbariogl/HyperRoutine
signal_extraction.py
signal_extraction.py
py
16,102
python
en
code
0
github-code
6
[ { "api_name": "sys.path.append", "line_number": 10, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 10, "usage_type": "attribute" }, { "api_name": "utils.set_style", "line_number": 13, "usage_type": "call" }, { "api_name": "ROOT.TColor.GetColor", ...
2872283986
import requests import datetime as dt from twilio.rest import Client account_sid = 'Twilio_api_sid' auth_token = 'twilio_auth_token' STOCK = "TSLA" COMPANY_NAME = "Tesla Inc" stock_api_key = 'alpha_vantage_api_key' news_api_key = 'news_api_key' STOCK_ENDPOINT = "https://www.alphavantage.co/query" NEWS_ENDPOINT = "https://newsapi.org/v2/everything" today = dt.datetime.now().date() lag_1day = str(today - dt.timedelta(days=1)) lag_2day = str(today - dt.timedelta(days=2)) up_down = '' percent_change = 0 stock_parameters = { 'function': 'TIME_SERIES_DAILY_ADJUSTED', 'symbol': STOCK, 'outputsize': 'compact', 'apikey': stock_api_key, 'pageSize': 3, 'page': 1, } news_parameters = { 'q': COMPANY_NAME, 'from': lag_2day, 'to': lag_1day, 'sortBy': 'publishedAt', 'apiKey': news_api_key, } # Make api request to stock api stock_response = requests.get(STOCK_ENDPOINT, params=stock_parameters) stock_response.raise_for_status() stock_data = stock_response.json() # Get closing price try: lag_1day_close = float(stock_data['Time Series (Daily)'][lag_1day]['4. close']) except KeyError: lag_1day_close = None try: lag_2day_close = float(stock_data['Time Series (Daily)'][lag_2day]['4. close']) except KeyError: lag_2day_close = None # Find percent change, and set up_down symbol if lag_1day_close is not None and lag_2day_close is not None: difference = lag_1day_close - lag_2day_close percent_change = round((difference / lag_1day_close) * 100) if difference < 0: up_down = '🔻' else: up_down = '🔺' # Make api request to get news articles news_response = requests.get(NEWS_ENDPOINT, params=news_parameters) news_response.raise_for_status() news_data = news_response.json() top_news = news_data['articles'][:3] news_title_list = [top_news[_]['title'] for _ in range(len(top_news))] news_description_list = [top_news[_]['description'] for _ in range(len(top_news))] # Send text messages if percent_change >= 5 or percent_change <= -5: for i in range(len(news_title_list)): client = Client(account_sid, auth_token) message = client.messages \ .create( body=f'{STOCK}: {up_down}{percent_change}%\nHeadline: {news_title_list[i]}\nBrief: {news_description_list[i]}', from_='+19257226085', to='+15551234567' )
mgardner1011/UdemyProjects
Stock_news_alert/main.py
main.py
py
2,383
python
en
code
0
github-code
6
[ { "api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute" }, { "api_name": "datetime.timedelta", "line_number": 18, "usage_type": "call" }, { "api_name": "datet...
15183195346
#! usr/bin/env python # -*- coding : utf-8 -*- from skopt import gp_maximize import numpy as np from skopt.plots import plot_convergence np.random.seed(123) #%matplotlib inline import matplotlib.pyplot as plt noise_level = 0.1 def f(x, noise_level=noise_level): return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) + np.random.randn() * noise_level # Plot f(x) + contours x = np.linspace(-2, 2, 400).reshape(-1, 1) fx = [f(x_i, noise_level=0.0) for x_i in x] plt.plot(x, fx, "r--", label="True (unknown)") plt.fill(np.concatenate([x, x[::-1]]), np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx], [fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])), alpha=.2, fc="r", ec="None") #plt.legend() #plt.grid() #plt.show() res = gp_maximize(f, # the function to minimize [(-2.0, 2.0)], # the bounds on each dimension of x acq_func="EI", # the acquisition function n_calls=15, # the number of evaluations of f n_random_starts=5, # the number of random initialization points noise=0.1**2, # the noise level (optional) random_state=123) # the random seed #print(res) #plot_convergence(res); plt.rcParams["figure.figsize"] = (6, 4) # Plot f(x) + contours x = np.linspace(-2, 2, 400).reshape(-1, 1) x_gp = res.space.transform(x.tolist()) fx = [f(x_i, noise_level=0.0) for x_i in x] plt.plot(x, fx, "r--", label="True (unknown)") plt.fill(np.concatenate([x, x[::-1]]), np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx], [fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])), alpha=.2, fc="r", ec="None") # Plot GP(x) + contours gp = res.models[-1] y_pred, sigma = gp.predict(x_gp, return_std=True) plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$") plt.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.2, fc="g", ec="None") # Plot sampled points plt.plot(res.x_iters, res.func_vals, "r.", markersize=15, label="Observations") plt.title(r"$x^* = %.4f, f(x^*) = %.4f$" % (res.x[0], res.fun)) plt.legend(loc="best", prop={'size': 8}, numpoints=1) plt.grid() plt.show()
aggarwalpiush/Hyperparameter-Optimization-Tutorial
main.py
main.py
py
2,364
python
en
code
3
github-code
6
[ { "api_name": "numpy.random.seed", "line_number": 7, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 7, "usage_type": "attribute" }, { "api_name": "numpy.sin", "line_number": 16, "usage_type": "call" }, { "api_name": "numpy.tanh", "line_nu...
29246617685
# from django.contrib import messages from json import loads, dumps from .models import Link from django.db.models import Sum from django.db import OperationalError from tenacity import (retry, stop_after_attempt, wait_fixed, retry_if_exception_type) import random import string import datetime from django.shortcuts import render, redirect, get_object_or_404 from django.http import (HttpResponse, HttpResponseServerError, Http404, HttpResponseBadRequest) # For Google Web Crawler to work and website to show up on Google def robots_txt(request): lines = [ "User-Agent: *", "Disallow: /admin/" # "Disallow: /*" ] return HttpResponse("\n".join(lines), content_type="text/plain") # Returning home page def index(request): stats = get_stats() return render(request, 'shortner/index.html', context=stats) # returns stats for rendering in index.html def return_last_value(retry_state): print(f'\n\n attempt number {retry_state.attempt_number} \n \ function for which retry was called: {retry_state.fn} \n\n') @retry(retry=retry_if_exception_type(OperationalError), stop=stop_after_attempt(3), wait=wait_fixed(0.75), retry_error_callback=return_last_value) def get_stats(): # generating date information d1 = datetime.datetime(2020, 8, 30) d2 = datetime.datetime.now() time_difference = d2-d1 months = round(time_difference.days / 30) stats = { 'total_links': Link.objects.all().count(), 'total_clicks': Link.objects.aggregate(total_clicks=Sum('clicks'))['total_clicks'], 'active_months': months } return stats def check(request, shortlink): if linkExists(shortlink): return HttpResponse(dumps({'link': shortlink, 'available': False})) else: return HttpResponse(dumps({'link': shortlink, 'available': True})) # not strictly required but might be useful for debugging print('nothing got returned') def create(request): # assump1: post body exists # assump2: post body has 'longlink' defined if request.method != 'POST': return redirect('/') reqBody = loads(request.body) longlink = reqBody['longlink'] shortlink = '' # temporary empty value try: shortlink = reqBody['shortlink'] if shortlink == '': # ik it's wrong...sorry. raise KeyError('Empty shortlink') if linkExists(shortlink): res = HttpResponseBadRequest() res.reason_phrase = 'Shortlink already taken' res.status_code = 400 return res except KeyError: shortlink = getShortRandomLink(5) obj = Link(shortlink=shortlink, longlink=longlink) obj.save() return HttpResponse(dumps(obj.getDict())) @retry(retry=retry_if_exception_type(OperationalError), stop=stop_after_attempt(3), wait=wait_fixed(0.75), retry_error_callback=return_last_value) def rediretor(request, shortlink): shortlinkObj = get_object_or_404(Link, pk=shortlink) # uncomment below lines when adding feature shortlinkObj.clicks += 1 shortlinkObj.save() return redirect(shortlinkObj.longlink) def custom_404(request, exception): return render(request, 'shortner/404.html', status=404) def linkExists(shortlink): try: Link.objects.get(pk=shortlink) return True except Link.DoesNotExist: return False # ------- helper functions --------- def getShortRandomLink(length): temp = get_random_string(length) if linkExists(temp): # recursion! getShortRandomLink(length) return temp def get_random_string(length): letters = string.ascii_lowercase result_str = ''.join(random.choice(letters) for i in range(length)) return result_str # function to tell user how many clicks their link have gotten # usable as api/clicky/<shortlink> def clicks(request, shortlink): # print(f"shortlink of cliks is {shortlink}\n") if linkExists(shortlink): link = Link.objects.get(pk=shortlink) return HttpResponse(link.clicks) else: return HttpResponse('0')
RahulTandon1/cutshort
shortner/views.py
views.py
py
4,216
python
en
code
3
github-code
6
[ { "api_name": "django.http.HttpResponse", "line_number": 25, "usage_type": "call" }, { "api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 48, "usage_type": "call" }, { "api_name": "da...
41039653002
from __future__ import annotations import json import atexit import datetime import functools import logging import multiprocessing import os import shutil import signal import socket import subprocess import tempfile import threading import time import webbrowser from collections import namedtuple from contextlib import closing from enum import Enum, auto from io import BufferedWriter from logging.handlers import QueueHandler from multiprocessing import Queue from os import path from typing import Dict, List, Optional, Set import docker import grpc import requests import toml from urllib3.exceptions import MaxRetryError import vega_sim.api.governance as gov import vega_sim.grpc.client as vac from vega_sim import vega_bin_path, vega_home_path from vega_sim.service import VegaService from vega_sim.tools.load_binaries import download_binaries from vega_sim.tools.retry import retry from vega_sim.wallet.base import DEFAULT_WALLET_NAME, Wallet from vega_sim.wallet.slim_wallet import SlimWallet from vega_sim.wallet.vega_wallet import VegaWallet logger = logging.getLogger(__name__) PortUpdateConfig = namedtuple( "PortUpdateConfig", ["file_path", "config_path", "key", "val_func"] ) PORT_DIR_NAME = "market_sim_ports" class Ports(Enum): DATA_NODE_GRPC = auto() DATA_NODE_REST = auto() DATA_NODE_POSTGRES = auto() FAUCET = auto() WALLET = auto() VEGA_NODE = auto() CORE_GRPC = auto() CORE_REST = auto() BROKER = auto() METRICS = auto() DATA_NODE_METRICS = auto() PPROF = auto() CONSOLE = auto() PORT_UPDATERS = { Ports.DATA_NODE_GRPC: [ PortUpdateConfig( ("config", "data-node", "config.toml"), ["API"], "Port", lambda port: port, ), PortUpdateConfig( ("config", "data-node", "config.toml"), ["Gateway", "Node"], "Port", lambda port: port, ), PortUpdateConfig( ("config", "wallet-service", "networks", "local.toml"), ["API", "GRPC"], "Hosts", lambda port: [f"localhost:{port}"], ), ], Ports.DATA_NODE_REST: [ PortUpdateConfig( ("config", "data-node", "config.toml"), ["Gateway"], "Port", lambda port: port, ), PortUpdateConfig( ("config", "wallet-service", "networks", "local.toml"), ["API", "REST"], "Hosts", lambda port: [f"localhost:{port}"], ), PortUpdateConfig( ("config", "wallet-service", "networks", "local.toml"), ["API", "GraphQL"], "Hosts", lambda port: [f"localhost:{port}"], ), ], Ports.DATA_NODE_POSTGRES: [ PortUpdateConfig( ("config", "data-node", "config.toml"), ["SQLStore", "ConnectionConfig"], "Port", lambda port: port, ), ], Ports.FAUCET: [ PortUpdateConfig( ("config", "faucet", "config.toml"), [], "Port", lambda port: port ), ], Ports.WALLET: [ PortUpdateConfig( ("config", "wallet-service", "config.toml"), ["Server"], "Port", lambda port: port, ), PortUpdateConfig( ("config", "wallet-service", "networks", "local.toml"), [], "Port", lambda port: port, ), ], Ports.VEGA_NODE: [ PortUpdateConfig( ("config", "node", "config.toml"), ["Blockchain", "Null"], "Port", lambda port: port, ), ], Ports.CORE_GRPC: [ PortUpdateConfig( ("config", "faucet", "config.toml"), ["Node"], "Port", lambda port: port, ), PortUpdateConfig( ("config", "node", "config.toml"), ["API"], "Port", lambda port: port, ), PortUpdateConfig( ("config", "data-node", "config.toml"), ["API"], "CoreNodeGRPCPort", lambda port: port, ), ], Ports.CORE_REST: [ PortUpdateConfig( ("config", "node", "config.toml"), ["API", "REST"], "Port", lambda port: port, ), ], Ports.BROKER: [ PortUpdateConfig( ("config", "data-node", "config.toml"), ["Broker", "SocketConfig"], "Port", lambda port: port, ), PortUpdateConfig( ("config", "node", "config.toml"), ["Broker", "Socket"], "Port", lambda port: port, ), ], Ports.DATA_NODE_METRICS: [ PortUpdateConfig( ("config", "data-node", "config.toml"), ["Metrics"], "Port", lambda port: port, ), ], Ports.METRICS: [ PortUpdateConfig( ("config", "node", "config.toml"), ["Metrics"], "Port", lambda port: port, ), ], Ports.PPROF: [ PortUpdateConfig( ("config", "data-node", "config.toml"), ["Pprof"], "Port", lambda port: port, ), PortUpdateConfig( ("config", "node", "config.toml"), ["Pprof"], "Port", lambda port: port, ), ], Ports.CONSOLE: [ PortUpdateConfig( ("config", "wallet-service", "networks", "local.toml"), ["Console"], "LocalPort", lambda port: port, ), ], } class VegaStartupTimeoutError(Exception): pass class ServiceNotStartedError(Exception): pass class SocketNotFoundError(Exception): pass def logger_thread(q): while True: record = q.get() if record is None: break logger = logging.getLogger(record.name) logger.handle(record) def find_free_port(existing_set: Optional[Set[int]] = None): ret_sock = 0 existing_set = ( existing_set.union(set([ret_sock])) if existing_set is not None else set([ret_sock]) ) # Synchronisation to try to avoid using the same ports across processes # launching at very similar times dated_path_dir = path.join( tempfile.gettempdir(), PORT_DIR_NAME, datetime.date.today().strftime("%Y-%d-%m-%H-%M"), ) os.makedirs(dated_path_dir, exist_ok=True) existing_set.update(set(int(x) for x in os.listdir(dated_path_dir))) num_tries = 0 while ret_sock in existing_set: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("", 0)) ret_sock = s.getsockname()[1] num_tries += 1 if num_tries >= 100: # Arbitrary high number. If we try 100 times and fail to find # a port it seems reasonable to give up raise SocketNotFoundError("Failed finding a free socket") open(path.join(dated_path_dir, str(ret_sock)), "x") return ret_sock def _terminate_proc( proc: subprocess.Popen[bytes], out_file: BufferedWriter, err_file: BufferedWriter ) -> None: proc.terminate() out_file.close() err_file.close() def _popen_process( popen_args: List[str], dir_root: str, log_name: str, env: Optional[Dict[str, str]] = None, ) -> subprocess.Popen[bytes]: out = open(path.join(dir_root, f"{log_name}.out"), "wb") err = open(path.join(dir_root, f"{log_name}.err"), "wb") sub_proc = subprocess.Popen( popen_args, stdout=out, stderr=err, env=env, close_fds=True ) atexit.register(functools.partial(_terminate_proc, sub_proc, out, err)) return sub_proc def _update_node_config( vega_home: str, port_config: Dict[Ports, int], transactions_per_block: int = 1, block_duration: str = "1s", use_docker_postgres: bool = False, ) -> None: config_path = path.join(vega_home, "config", "node", "config.toml") config_toml = toml.load(config_path) config_toml["Blockchain"]["Null"]["GenesisFile"] = path.join( vega_home, "genesis.json" ) config_toml["Blockchain"]["Null"]["BlockDuration"] = block_duration config_toml["Blockchain"]["Null"]["TransactionsPerBlock"] = transactions_per_block existing_ports = set(port_config.values()) for port in Ports: if port in port_config: continue new_port = find_free_port(existing_ports) existing_ports.add(new_port) port_config[port] = new_port with open(config_path, "w") as f: toml.dump(config_toml, f) for port_key, update_configs in PORT_UPDATERS.items(): for config in update_configs: file_path = path.join(vega_home, *config.file_path) config_toml = toml.load(file_path) elem = config_toml for k in config.config_path: elem = elem[k] elem[config.key] = config.val_func(port_config[port_key]) if port_key == Ports.DATA_NODE_POSTGRES: config_toml["SQLStore"]["UseEmbedded"] = not use_docker_postgres with open(file_path, "w") as f: toml.dump(config_toml, f) def manage_vega_processes( child_conn: multiprocessing.Pipe, log_queue, vega_path: str, data_node_path: str, vega_wallet_path: str, vega_console_path: Optional[str] = None, run_with_console: bool = False, port_config: Optional[Dict[Ports, int]] = None, transactions_per_block: int = 1, block_duration: str = "1s", run_wallet: bool = False, retain_log_files: bool = False, log_dir: Optional[str] = None, replay_from_path: Optional[str] = None, store_transactions: bool = True, log_level: Optional[int] = None, genesis_time: Optional[datetime.datetime] = None, ) -> None: logger.addHandler(QueueHandler(log_queue)) logger.setLevel(log_level if log_level is not None else logging.INFO) port_config = port_config if port_config is not None else {} try: docker_client = docker.from_env() use_docker_postgres = True except: use_docker_postgres = False # Explicitly not using context here so that crashed logs are retained tmp_vega_dir = tempfile.mkdtemp(prefix="vega-sim-") if log_dir is None else log_dir logger.info(f"Running NullChain from vegahome of {tmp_vega_dir}") if port_config.get(Ports.CONSOLE): logger.info(f"Launching Console at port {port_config.get(Ports.CONSOLE)}") if port_config.get(Ports.DATA_NODE_REST): logger.info( "Launching Datanode REST + GRAPHQL at port" f" {port_config.get(Ports.DATA_NODE_REST)}" ) if port_config.get(Ports.DATA_NODE_GRPC): logger.info( f"Launching Datanode GRPC at port {port_config.get(Ports.DATA_NODE_GRPC)}" ) if port_config.get(Ports.CORE_REST): logger.info(f"Launching Core REST at port {port_config.get(Ports.CORE_REST)}") if port_config.get(Ports.CORE_GRPC): logger.info(f"Launching Core GRPC at port {port_config.get(Ports.CORE_GRPC)}") dest_dir = f"{tmp_vega_dir}/vegahome" shutil.copytree(vega_home_path, dest_dir) for dirpath, _, filenames in os.walk(dest_dir): os.utime(dirpath, None) for file in filenames: os.utime(os.path.join(dirpath, file), None) if genesis_time is not None: with open(f"{dest_dir}/genesis.json", "r") as file: data = json.load(file) data["genesis_time"] = genesis_time.isoformat() + "Z" with open(f"{dest_dir}/genesis.json", "w") as file: json.dump(data, file, indent=2) tmp_vega_home = tmp_vega_dir + "/vegahome" _update_node_config( tmp_vega_home, port_config=port_config, transactions_per_block=transactions_per_block, block_duration=block_duration, use_docker_postgres=use_docker_postgres, ) if use_docker_postgres: data_node_docker_volume = docker_client.volumes.create() data_node_container = docker_client.containers.run( "timescale/timescaledb:2.11.2-pg15", command=[ "-c", "max_connections=50", "-c", "log_destination=stderr", "-c", "work_mem=5MB", "-c", "huge_pages=off", "-c", "shared_memory_type=sysv", "-c", "dynamic_shared_memory_type=sysv", "-c", "shared_buffers=2GB", "-c", "temp_buffers=5MB", ], detach=True, ports={5432: port_config[Ports.DATA_NODE_POSTGRES]}, volumes=[f"{data_node_docker_volume.name}:/var/lib/postgresql/data"], environment={ "POSTGRES_USER": "vega", "POSTGRES_PASSWORD": "vega", "POSTGRES_DB": "vega", }, remove=False, ) dataNodeProcess = _popen_process( [ data_node_path, "start", "--home=" + tmp_vega_home, "--chainID=CUSTOM", ], dir_root=tmp_vega_dir, log_name="data_node", ) vega_args = [ vega_path, "start", "--nodewallet-passphrase-file=" + tmp_vega_home + "/passphrase-file", "--home=" + tmp_vega_home, ] if store_transactions: replay_file = ( replay_from_path if replay_from_path is not None else tmp_vega_home + "/replay" ) vega_args.extend( [ f"--blockchain.nullchain.replay-file={replay_file}", "--blockchain.nullchain.record", ] ) if replay_from_path is not None: vega_args.extend( [ f"--blockchain.nullchain.replay-file={replay_from_path}", "--blockchain.nullchain.replay", ] ) vegaNodeProcess = _popen_process( vega_args, dir_root=tmp_vega_dir, log_name="node", ) for _ in range(500): try: requests.get( f"http://localhost:{port_config[Ports.CORE_REST]}/blockchain/height" ).raise_for_status() break except: pass vegaFaucetProcess = _popen_process( [ vega_path, "faucet", "run", "--passphrase-file=" + tmp_vega_home + "/passphrase-file", "--home=" + tmp_vega_home, ], dir_root=tmp_vega_dir, log_name="faucet", ) processes = { "data-node": dataNodeProcess, "faucet": vegaFaucetProcess, "vega": vegaNodeProcess, } if run_wallet: for _ in range(3000): try: requests.get( f"http://localhost:{port_config.get(Ports.DATA_NODE_REST)}/time" ).raise_for_status() requests.get( f"http://localhost:{port_config.get(Ports.CORE_REST)}/blockchain/height" ).raise_for_status() break except ( MaxRetryError, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, ): time.sleep(0.1) subprocess.run( [ vega_wallet_path, "wallet", "api-token", "init", f"--home={tmp_vega_home}", f"--passphrase-file={tmp_vega_home}/passphrase-file", ], capture_output=True, ) subprocess.run( [ vega_wallet_path, "wallet", "create", "--wallet", DEFAULT_WALLET_NAME, "--home", tmp_vega_home, "--passphrase-file", tmp_vega_home + "/passphrase-file", "--output", "json", ], capture_output=True, ) subprocess.run( [ vega_wallet_path, "wallet", "api-token", "generate", "--home=" + tmp_vega_home, "--tokens-passphrase-file=" + tmp_vega_home + "/passphrase-file", "--wallet-passphrase-file=" + tmp_vega_home + "/passphrase-file", "--wallet-name=" + DEFAULT_WALLET_NAME, "--description=" + DEFAULT_WALLET_NAME, ], capture_output=True, ) wallet_args = [ vega_wallet_path, "wallet", "service", "run", "--network", "local", "--home=" + tmp_vega_home, "--automatic-consent", "--load-tokens", "--tokens-passphrase-file=" + tmp_vega_home + "/passphrase-file", ] vegaWalletProcess = _popen_process( wallet_args, dir_root=tmp_vega_dir, log_name="vegawallet", ) processes["wallet"] = vegaWalletProcess if run_with_console: env_copy = os.environ.copy() env_copy.update( { "NX_VEGA_URL": ( f"http://localhost:{port_config[Ports.DATA_NODE_REST]}/graphql" ), "NX_VEGA_WALLET_URL": f"http://localhost:{port_config[Ports.WALLET]}", "NX_VEGA_ENV": "CUSTOM", "NX_PORT": f"{port_config[Ports.CONSOLE]}", "NODE_ENV": "development", "NX_VEGA_NETWORKS": "{}", } ) console_process = _popen_process( [ "yarn", "--cwd", vega_console_path, "nx", "serve", "-o", "trading", "--port", f"{port_config[Ports.CONSOLE]}", ], dir_root=tmp_vega_dir, log_name="console", env=env_copy, ) processes["console"] = console_process # Send process pid values for resource monitoring child_conn.send({name: process.pid for name, process in processes.items()}) # According to https://docs.oracle.com/cd/E19455-01/806-5257/gen-75415/index.html # There is no guarantee that signal will be catch by this thread. Usually the # parent process catches the signal and removes it from the list of pending # signals, this leave us with memory leak where we have orphaned vega processes # and the docker containers. Below is hack to maximize chance by catching the # signal. # We call signal.signal method as a workaround to move this thread on top of # the catch stack, then sigwait waits until singal is trapped. # As last resort We catches the `SIGCHLD` in case the parent process exited # and this is the orphan now. # But to provide 100% guarantee this should be implemented in another way: # - Signal should be trapped in the main process, and this should be synced # the shared memory # - or this entire process manager should be incorporated in the VegaServiceNull # and containers/processes should be removed as inline call in the __exit__ # # # Important assumption is that this signal can be caught multiple times as well def sighandler(signal, frame, logger_): if signal is None: logger_.info("VegaServiceNull exited normally") else: logger_.info(f"VegaServiceNull exited after trapping the {signal} signal") logger_.info("Received signal from parent process") logger_.info("Starting termination for processes") for name, process in processes.items(): logger_.info(f"Terminating process {name}(pid: {process.pid})") process.terminate() for name, process in processes.items(): attempts = 0 while process.poll() is None: logger_.info(f"Process {name} still not terminated") time.sleep(1) attempts += 1 if attempts > 60: logger_.warning( "Gracefully terminating process timed-out. Killing process" f" {name}." ) process.kill() logger_.debug(f"Process {name} stopped with {process.poll()}") if process.poll() == 0: logger_.info(f"Process {name} terminated.") if process.poll() == -9: logger_.info(f"Process {name} killed.") if use_docker_postgres: def kill_docker_container() -> None: try: data_node_container.stop() with open(tmp_vega_home + "/postgres.out", "wb") as f: f.write(data_node_container.logs()) data_node_container.remove() except requests.exceptions.HTTPError as e: if e.response.status_code == 404: logger_.debug( f"Container {data_node_container.name} has been already" " killed" ) return else: raise e logger_.debug(f"Stopping container {data_node_container.name}") retry(10, 1.0, kill_docker_container) removed = False logger_.debug(f"Removing volume {data_node_docker_volume.name}") for _ in range(20): if data_node_container.status == "running": time.sleep(3) continue try: data_node_docker_volume.remove(force=True) removed = True break except requests.exceptions.HTTPError as e: if e.response.status_code == 404: removed = True logger_.debug( f"Data node volume {data_node_docker_volume.name} has been" " already killed" ) break else: time.sleep(1) except docker.errors.APIError: time.sleep(1) if not removed: logger_.exception( "Docker volume failed to cleanup, will require manual cleaning" ) if not retain_log_files and os.path.exists(tmp_vega_dir): shutil.rmtree(tmp_vega_dir) # The below lines are workaround to put the signal listeners on top of the stack, so this process can handle it. signal.signal(signal.SIGINT, lambda _s, _h: None) signal.signal(signal.SIGTERM, lambda _s, _h: None) # The process had previously created one or more child processes with the fork() function. # One or more of these processes has since died. signal.sigwait( [ signal.SIGKILL, # The process was explicitly killed by somebody wielding the kill program. signal.SIGTERM, # The process was explicitly killed by somebody wielding the terminate program. signal.SIGCHLD, ] ) sighandler(None, None, logger_=logger) class VegaServiceNull(VegaService): PORT_TO_FIELD_MAP = { Ports.CONSOLE: "console_port", Ports.CORE_GRPC: "vega_node_grpc_port", Ports.CORE_REST: "vega_node_rest_port", Ports.DATA_NODE_GRPC: "data_node_grpc_port", Ports.DATA_NODE_METRICS: "data_node_metrics_port", Ports.DATA_NODE_POSTGRES: "data_node_postgres_port", Ports.DATA_NODE_REST: "data_node_rest_port", Ports.FAUCET: "faucet_port", Ports.METRICS: "metrics_port", Ports.VEGA_NODE: "vega_node_port", Ports.WALLET: "wallet_port", } def __init__( self, vega_path: Optional[str] = None, data_node_path: Optional[str] = None, vega_wallet_path: Optional[str] = None, vega_console_path: Optional[str] = None, start_immediately: bool = False, run_with_console: bool = False, run_wallet_with_token_dapp: bool = False, port_config: Optional[Dict[Ports, int]] = None, warn_on_raw_data_access: bool = True, transactions_per_block: int = 1, seconds_per_block: int = 1, use_full_vega_wallet: bool = False, retain_log_files: bool = False, launch_graphql: bool = False, store_transactions: bool = True, replay_from_path: Optional[str] = None, listen_for_high_volume_stream_updates: bool = False, check_for_binaries: bool = False, genesis_time: Optional[datetime.datetime] = None, ): super().__init__( can_control_time=True, warn_on_raw_data_access=warn_on_raw_data_access, seconds_per_block=seconds_per_block, listen_for_high_volume_stream_updates=listen_for_high_volume_stream_updates, ) self.retain_log_files = retain_log_files self._using_all_custom_paths = all( [x is not None for x in [vega_path, data_node_path, vega_wallet_path]] ) self.vega_path = vega_path or path.join(vega_bin_path, "vega") self.data_node_path = data_node_path or path.join(vega_bin_path, "data-node") self.vega_wallet_path = vega_wallet_path or path.join(vega_bin_path, "vega") self.vega_console_path = vega_console_path or path.join( vega_bin_path, "console" ) self.proc = None self.run_with_console = run_with_console self.run_wallet_with_token_dapp = run_wallet_with_token_dapp self.genesis_time = genesis_time self.transactions_per_block = transactions_per_block self.seconds_per_block = seconds_per_block self._wallet = None self._use_full_vega_wallet = use_full_vega_wallet self.store_transactions = store_transactions self.log_dir = tempfile.mkdtemp(prefix="vega-sim-") self.launch_graphql = launch_graphql self.replay_from_path = replay_from_path self.check_for_binaries = check_for_binaries self.stopped = False self.logger_p = None self._assign_ports(port_config) if start_immediately: self.start() def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): self.stop() def wait_fn(self, wait_multiple: float = 1) -> None: self.wait_for_core_catchup() self.forward(f"{int(wait_multiple * self.seconds_per_block)}s") self.wait_for_core_catchup() @property def wallet(self) -> Wallet: if self._wallet is None: if self._use_full_vega_wallet: self._wallet = VegaWallet( self.wallet_url, wallet_path=self.vega_wallet_path, vega_home_dir=path.join(self.log_dir, "vegahome"), passphrase_file_path=path.join( self.log_dir, "vegahome", "passphrase-file" ), ) else: self._wallet = SlimWallet( self.core_client, full_wallet=None, log_dir=self.log_dir, ) return self._wallet def _check_started(self) -> None: if self.proc is None: raise ServiceNotStartedError("NullChain Vega accessed without starting") def _generate_port_config(self) -> Dict[Ports, int]: return { Ports.CONSOLE: self.console_port, Ports.CORE_GRPC: self.vega_node_grpc_port, Ports.CORE_REST: self.vega_node_rest_port, Ports.DATA_NODE_GRPC: self.data_node_grpc_port, Ports.DATA_NODE_METRICS: self.data_node_metrics_port, Ports.DATA_NODE_POSTGRES: self.data_node_postgres_port, Ports.DATA_NODE_REST: self.data_node_rest_port, Ports.FAUCET: self.faucet_port, Ports.METRICS: self.metrics_port, Ports.VEGA_NODE: self.vega_node_port, Ports.WALLET: self.wallet_port, } # set ports from port_config or alternatively find a free port # to use def _assign_ports(self, port_config: Optional[Dict[Ports, int]]): self.console_port = 0 self.data_node_grpc_port = 0 self.data_node_metrics_port = 0 self.data_node_postgres_port = 0 self.data_node_rest_port = 0 self.faucet_port = 0 self.metrics_port = 0 self.vega_node_grpc_port = 0 self.vega_node_port = 0 self.vega_node_rest_port = 0 self.wallet_port = 0 for key, name in self.PORT_TO_FIELD_MAP.items(): if port_config is not None and key in port_config: setattr(self, name, port_config[key]) else: curr_ports = set( [getattr(self, port) for port in self.PORT_TO_FIELD_MAP.values()] ) setattr(self, name, find_free_port(curr_ports)) def start(self, block_on_startup: bool = True) -> None: if self.check_for_binaries and not self._using_all_custom_paths: download_binaries() parent_conn, child_conn = multiprocessing.Pipe() ctx = multiprocessing.get_context() port_config = self._generate_port_config() self.queue = Queue() self.logger_p = threading.Thread(target=logger_thread, args=(self.queue,)) self.logger_p.start() self.proc = ctx.Process( target=manage_vega_processes, kwargs={ "child_conn": child_conn, "log_queue": self.queue, "vega_path": self.vega_path, "data_node_path": self.data_node_path, "vega_wallet_path": self.vega_wallet_path, "vega_console_path": self.vega_console_path, "run_with_console": self.run_with_console, "port_config": port_config, "transactions_per_block": self.transactions_per_block, "block_duration": f"{int(self.seconds_per_block)}s", "run_wallet": self._use_full_vega_wallet, "retain_log_files": self.retain_log_files, "log_dir": self.log_dir, "store_transactions": self.store_transactions, "replay_from_path": self.replay_from_path, "log_level": logging.getLogger().level, "genesis_time": self.genesis_time, }, ) self.proc.start() if self.run_with_console: logger.info( "Vega Running. Console launched at" f" http://localhost:{self.console_port}" ) if block_on_startup: # Wait for startup started = False for _ in range(500): try: channel = grpc.insecure_channel( self.data_node_grpc_url, options=( ("grpc.enable_http_proxy", 0), ("grpc.max_send_message_length", 1024 * 1024 * 20), ("grpc.max_receive_message_length", 1024 * 1024 * 20), ), ) grpc.channel_ready_future(channel).result(timeout=5) trading_data_client = vac.VegaTradingDataClientV2( self.data_node_grpc_url, channel=channel, ) gov.get_blockchain_time(trading_data_client) requests.get( f"http://localhost:{self.data_node_rest_port}/time" ).raise_for_status() requests.get( f"http://localhost:{self.vega_node_rest_port}/blockchain/height" ).raise_for_status() requests.get( f"http://localhost:{self.faucet_port}/api/v1/health" ).raise_for_status() if self._use_full_vega_wallet: requests.get( f"http://localhost:{self.wallet_port}/api/v2/health" ).raise_for_status() started = True break except ( MaxRetryError, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, grpc.RpcError, grpc.FutureTimeoutError, ): time.sleep(0.1) if not started: self.stop() raise VegaStartupTimeoutError( "Timed out waiting for Vega simulator to start up" ) # TODO: Remove this once datanode fixes up startup timing time.sleep(6) self.process_pids = parent_conn.recv() # Initialise the data-cache self.data_cache if self.run_with_console: webbrowser.open(f"http://localhost:{port_config[Ports.CONSOLE]}/", new=2) if self.launch_graphql: webbrowser.open( f"http://localhost:{port_config[Ports.DATA_NODE_REST]}/graphql", new=2 ) # Class internal as at some point the host may vary as well as the port @staticmethod def _build_url(port: int, prefix: str = "http://"): return f"{prefix}localhost:{port}" def stop(self) -> None: logger.debug("Calling stop for veganullchain") if self.stopped: return self.stopped = True if self._core_client is not None: self.core_client.stop() if self._core_state_client is not None: self.core_state_client.stop() if self._trading_data_client_v2 is not None: self.trading_data_client_v2.stop() if self.proc is None: logger.info("Stop called but nothing to stop") else: os.kill(self.proc.pid, signal.SIGTERM) if self.queue is not None: if self.proc is not None: attempts = 0 while self.proc.is_alive: if attempts > 5: break time.sleep(1) attempts += 1 self.queue.put(None) self.logger_p.join() if isinstance(self.wallet, SlimWallet): self.wallet.stop() super().stop() @property def wallet_url(self) -> str: return self._build_url(self.wallet_port) @property def data_node_rest_url(self) -> str: return self._build_url(self.data_node_rest_port) @property def data_node_grpc_url(self) -> str: return self._build_url(self.data_node_grpc_port, prefix="") @property def faucet_url(self) -> str: return self._build_url(self.faucet_port) @property def vega_node_url(self) -> str: return self._build_url(self.vega_node_port) @property def vega_node_grpc_url(self) -> str: return self._build_url(self.vega_node_grpc_port, prefix="") def clone(self) -> VegaServiceNull: """Creates a clone of the service without the handle to other processes. This is required as when spinning a Nullchain service out into separate processes we need to start the various components in the main thread (as daemon processes cannot spawn daemon processes), however want to maintain a handle to these in the child. """ return VegaServiceNull( self.vega_path, self.data_node_path, self.vega_wallet_path, start_immediately=False, port_config=self._generate_port_config(), use_full_vega_wallet=self._use_full_vega_wallet, warn_on_raw_data_access=self.warn_on_raw_data_access, )
vegaprotocol/vega-market-sim
vega_sim/null_service.py
null_service.py
py
36,719
python
en
code
19
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 43, "usage_type": "call" }, { "api_name": "collections.namedtuple", "line_number": 45, "usage_type": "call" }, { "api_name": "enum.Enum", "line_number": 52, "usage_type": "name" }, { "api_name": "enum.auto", "l...
43079348194
from typing import List class Solution: def factorial(self, n: int) -> int: fact = 1 for i in range(1, n+1): fact *= i return fact def combination(self, n: int, r: int) -> int: return self.factorial(n) // (self.factorial(n-r) * self.factorial(r)) def generate(self, numRows: int) -> List[List[int]]: pascal_triangles = list() for i in range(numRows): inner_list = [] for j in range(i+1): inner_list.append(self.combination(i,j)) pascal_triangles.append(inner_list) return pascal_triangles print(Solution().generate(5))
devKhush/DSALeetCodeProblems_Python
Pascal's Triangle/GeneratePascalTriangle.py
GeneratePascalTriangle.py
py
659
python
en
code
0
github-code
6
[ { "api_name": "typing.List", "line_number": 15, "usage_type": "name" } ]
73816585466
from collections import defaultdict from typing import ClassVar, Self from discord import Message, Thread from discord.errors import HTTPException from pydis_core.utils import scheduling from pydis_core.utils.logging import get_logger import bot from bot.constants import Channels from bot.exts.filtering._filter_context import Event, FilterContext from bot.exts.filtering._settings_types.settings_entry import ActionEntry from bot.exts.filtering._utils import FakeContext from bot.utils.messages import send_attachments log = get_logger(__name__) SUPERSTAR_REASON = ( "Your nickname was found to be in violation of our code of conduct. " "If you believe this is a mistake, please let us know." ) async def upload_messages_attachments(ctx: FilterContext, messages: list[Message]) -> None: """Re-upload the messages' attachments for future logging.""" if not messages: return destination = messages[0].guild.get_channel(Channels.attachment_log) for message in messages: if message.attachments and message.id not in ctx.uploaded_attachments: ctx.uploaded_attachments[message.id] = await send_attachments(message, destination, link_large=False) class RemoveContext(ActionEntry): """A setting entry which tells whether to delete the offending message(s).""" name: ClassVar[str] = "remove_context" description: ClassVar[str] = ( "A boolean field. If True, the filter being triggered will cause the offending context to be removed. " "An offending message will be deleted, while an offending nickname will be superstarified." ) remove_context: bool async def action(self, ctx: FilterContext) -> None: """Remove the offending context.""" if not self.remove_context: return if ctx.event in (Event.MESSAGE, Event.MESSAGE_EDIT): await self._handle_messages(ctx) elif ctx.event == Event.NICKNAME: await self._handle_nickname(ctx) elif ctx.event == Event.THREAD_NAME: await self._handle_thread(ctx) @staticmethod async def _handle_messages(ctx: FilterContext) -> None: """Delete any messages involved in this context.""" if not ctx.message or not ctx.message.guild: return # If deletion somehow fails at least this will allow scheduling for deletion. ctx.messages_deletion = True channel_messages = defaultdict(set) # Duplicates will cause batch deletion to fail. for message in {ctx.message} | ctx.related_messages: channel_messages[message.channel].add(message) success = fail = 0 deleted = list() for channel, messages in channel_messages.items(): try: await channel.delete_messages(messages) except HTTPException: fail += len(messages) else: success += len(messages) deleted.extend(messages) scheduling.create_task(upload_messages_attachments(ctx, deleted)) if not fail: if success == 1: ctx.action_descriptions.append("deleted") else: ctx.action_descriptions.append("deleted all") elif not success: if fail == 1: ctx.action_descriptions.append("failed to delete") else: ctx.action_descriptions.append("all failed to delete") else: ctx.action_descriptions.append(f"{success} deleted, {fail} failed to delete") @staticmethod async def _handle_nickname(ctx: FilterContext) -> None: """Apply a superstar infraction to remove the user's nickname.""" alerts_channel = bot.instance.get_channel(Channels.mod_alerts) if not alerts_channel: log.error(f"Unable to apply superstar as the context channel {alerts_channel} can't be found.") return command = bot.instance.get_command("superstar") if not command: user = ctx.author await alerts_channel.send(f":warning: Could not apply superstar to {user.mention}: command not found.") log.warning(f":warning: Could not apply superstar to {user.mention}: command not found.") ctx.action_descriptions.append("failed to superstar") return await command(FakeContext(ctx.message, alerts_channel, command), ctx.author, None, reason=SUPERSTAR_REASON) ctx.action_descriptions.append("superstarred") @staticmethod async def _handle_thread(ctx: FilterContext) -> None: """Delete the context thread.""" if isinstance(ctx.channel, Thread): try: await ctx.channel.delete() except HTTPException: ctx.action_descriptions.append("failed to delete thread") else: ctx.action_descriptions.append("deleted thread") def union(self, other: Self) -> Self: """Combines two actions of the same type. Each type of action is executed once per filter.""" return RemoveContext(remove_context=self.remove_context or other.remove_context)
python-discord/bot
bot/exts/filtering/_settings_types/actions/remove_context.py
remove_context.py
py
5,173
python
en
code
1,206
github-code
6
[ { "api_name": "pydis_core.utils.logging.get_logger", "line_number": 16, "usage_type": "call" }, { "api_name": "bot.exts.filtering._filter_context.FilterContext", "line_number": 24, "usage_type": "name" }, { "api_name": "discord.Message", "line_number": 24, "usage_type": "...
5093704747
"""empty message Revision ID: b3ff59df2833 Revises: fee4d1b1d192 Create Date: 2022-04-08 07:33:52.082355 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql # revision identifiers, used by Alembic. revision = 'b3ff59df2833' down_revision = 'fee4d1b1d192' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('product', 'image', existing_type=mysql.VARCHAR(length=200), type_=sa.String(length=20000), existing_nullable=True) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('product', 'image', existing_type=sa.String(length=20000), type_=mysql.VARCHAR(length=200), existing_nullable=True) # ### end Alembic commands ###
sudiptob2/microserve-main
migrations/versions/b3ff59df2833_.py
b3ff59df2833_.py
py
934
python
en
code
1
github-code
6
[ { "api_name": "alembic.op.alter_column", "line_number": 21, "usage_type": "call" }, { "api_name": "alembic.op", "line_number": 21, "usage_type": "name" }, { "api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 22, "usage_type": "call" }, { "api_name": "...
42896269372
import jax import numpy as np import numpy.testing as npt import pytest from matplotlib import pyplot as plt from statsmodels.graphics.tsaplots import plot_acf from .common import GaussianDistribution, FlatPotential, FlatUnivariatePotential, GaussianDynamics, lgssm_data, \ GaussianObservationPotential from ..csmc import get_kernel @pytest.fixture(scope="module", autouse=True) def jax_config(): jax.config.update("jax_platform_name", "cpu") @pytest.mark.parametrize("backward", [True, False]) def test_flat_potential(backward): # Test a flat potential, to check that we recover the prior. # The model is a stationary AR process with Gaussian noise. JAX_KEY = jax.random.PRNGKey(0) T = 5 # T time steps RHO = 0.9 # correlation N = 32 # use N particles in total M = 50_000 # get M - B samples from the particle Gibbs kernel B = M // 10 # Discard the first 10% of the samples M0 = GaussianDistribution(mu=0.0, sig=1.0) G0 = FlatUnivariatePotential() Gt = FlatPotential() Mt = GaussianDynamics(rho=RHO) init, kernel = get_kernel(M0, G0, Mt, Gt, N=N, backward=backward, Pt=Mt) init_key, key = jax.random.split(JAX_KEY) x0 = jax.random.normal(init_key, (T, 1)) init_state = init(x0) def body(state, curr_key): state = kernel(curr_key, state) return state, (state.x, state.updated) _, (xs, ancestors) = jax.lax.scan(body, init_state, jax.random.split(key, M)) xs = xs[B:, :, 0] fig, axes = plt.subplots(ncols=2, figsize=(10, 5)) fig.suptitle("Backward: {}".format(backward)) plot_acf(xs[:, 0], ax=axes[0]) axes[0].set_title("ACF of x_0") plot_acf(xs[:, T // 2], ax=axes[1]) axes[1].set_title("ACF of x_T/2") plt.show() atol = 0.05 cov = np.cov(xs, rowvar=False) cov = np.atleast_2d(cov) rows, cols = np.diag_indices_from(cov) cov_diag = cov[rows, cols] # marginal variances sub_cov_diag = cov[rows[:-1], cols[1:]] # Covariances between adjacent time steps npt.assert_allclose(xs.mean(axis=0), 0., atol=atol) npt.assert_allclose(cov_diag, 1., atol=atol) npt.assert_allclose(sub_cov_diag, RHO, atol=atol) @pytest.mark.parametrize("backward", [True, False]) def test_lgssm(backward): # Test a LGSSM model test JAX_KEY = jax.random.PRNGKey(0) T = 25 # T time steps RHO = 0.9 # correlation SIG_Y = 0.1 # observation noise data_key, init_key, key = jax.random.split(JAX_KEY, 3) true_xs, true_ys = lgssm_data(data_key, RHO, SIG_Y, T) N = 32 # use N particles in total M = 50_000 # get M - B samples from the particle Gibbs kernel B = M // 10 # Discard the first 10% of the samples M0 = GaussianDistribution(mu=0.0, sig=1.0) G0 = GaussianDistribution(mu=true_ys[0], sig=SIG_Y) Gt = GaussianObservationPotential(params=true_ys[1:], sig=SIG_Y) Mt = GaussianDynamics(rho=RHO) init, kernel = get_kernel(M0, G0, Mt, Gt, N=N, backward=backward, Pt=Mt) x0 = jax.random.normal(init_key, (T, 1)) init_state = init(x0) def body(state, curr_key): state = kernel(curr_key, state) return state, (state.x, state.updated) _, (xs, ancestors) = jax.lax.scan(body, init_state, jax.random.split(key, M)) xs = xs[B:, :, 0] fig, axes = plt.subplots(ncols=3, figsize=(15, 5)) fig.suptitle("Backward: {}".format(backward)) plot_acf(xs[:, 0], ax=axes[0]) axes[0].set_title("ACF of x_0") plot_acf(xs[:, T // 2], ax=axes[1]) axes[1].set_title("ACF of x_T/2") plot_acf(xs[:, -1], ax=axes[2]) axes[2].set_title("ACF of x_T") plt.show() print(xs.mean(axis=0)) print(xs.std(axis=0))
AdrienCorenflos/aux-ssm-samplers
aux_samplers/_primitives/test_csmc/test_csmc.py
test_csmc.py
py
3,688
python
en
code
7
github-code
6
[ { "api_name": "jax.config.update", "line_number": 15, "usage_type": "call" }, { "api_name": "jax.config", "line_number": 15, "usage_type": "attribute" }, { "api_name": "pytest.fixture", "line_number": 13, "usage_type": "call" }, { "api_name": "jax.random.PRNGKey",...
45364274546
import pygame from Game.Scenes.Scene import * from Game.Shared import GameConstant from Game import Highscore class HighscoreScene(Scene): def __init__(self, game): super(HighscoreScene, self).__init__(game) self.__highScoreSprite = pygame.transform.scale(pygame.image.load(GameConstant.SPRITE_HIGHSCORE) , (276,164)) def render(self): self.getGame().screen.blit(self.__highScoreSprite , (50 , 50)) self.clearText() highscore = Highscore() x = 350 y = 100 for score in highscore.getScores(): self.addText(score[0] , x , y , size = 30) self.addText(str(score[1]) , x + 200 , y , size = 30) y+=30 self.addText("Press F1 to start The Game" , 50 , 300 , size = 30) super(HighscoreScene, self).render() def handleEvents(self , events): super(HighscoreScene,self).handleEvents(events) for event in events: keys = pygame.key.get_pressed() if event.type == pygame.QUIT: quit() if keys[pygame.K_F1]: self.getGame().reset() self.getGame().changeScene(GameConstant.PLAYING_SCENE)
grapeJUICE1/Grape-Bricks
Game/Scenes/HighScoreScene.py
HighScoreScene.py
py
1,256
python
en
code
7
github-code
6
[ { "api_name": "pygame.transform.scale", "line_number": 10, "usage_type": "call" }, { "api_name": "pygame.transform", "line_number": 10, "usage_type": "attribute" }, { "api_name": "pygame.image.load", "line_number": 10, "usage_type": "call" }, { "api_name": "pygame...
10423288883
from __future__ import annotations import pytest from PySide6.QtCore import Qt from randovania.game_description.db.configurable_node import ConfigurableNode from randovania.game_description.db.dock_node import DockNode from randovania.game_description.db.event_node import EventNode from randovania.game_description.db.hint_node import HintNode from randovania.game_description.db.node import GenericNode from randovania.game_description.db.pickup_node import PickupNode from randovania.game_description.db.teleporter_network_node import TeleporterNetworkNode from randovania.gui.dialog.node_details_popup import NodeDetailsPopup @pytest.mark.parametrize( "node_type", [ GenericNode, DockNode, PickupNode, EventNode, ConfigurableNode, HintNode, ], ) def test_unchanged_create_new_node_echoes(skip_qtbot, echoes_game_description, node_type): node = next(node for node in echoes_game_description.region_list.iterate_nodes() if isinstance(node, node_type)) dialog = NodeDetailsPopup(echoes_game_description, node) skip_qtbot.addWidget(dialog) # Run new_node = dialog.create_new_node() # Assert assert node == new_node @pytest.mark.parametrize( "node_type", [ TeleporterNetworkNode, ], ) def test_unchanged_create_new_node_corruption(skip_qtbot, corruption_game_description, node_type): node = next(node for node in corruption_game_description.region_list.iterate_nodes() if isinstance(node, node_type)) dialog = NodeDetailsPopup(corruption_game_description, node) skip_qtbot.addWidget(dialog) # Run new_node = dialog.create_new_node() # Assert assert node == new_node def test_change_incompatible_dock_list(skip_qtbot, echoes_game_description): node = next(node for node in echoes_game_description.region_list.iterate_nodes() if isinstance(node, DockNode)) dialog = NodeDetailsPopup(echoes_game_description, node) skip_qtbot.addWidget(dialog) model = dialog.dock_incompatible_model m = model.index(0) assert model.data(m, Qt.ItemDataRole.WhatsThisRole) is None assert model.data(m, Qt.ItemDataRole.DisplayRole) == "New..." assert model.data(m, Qt.ItemDataRole.EditRole) == "" assert not model.setData(m, "Normal Door", Qt.ItemDataRole.DisplayRole) assert model.data(m, Qt.ItemDataRole.DisplayRole) == "New..." assert model.setData(m, "Normal Door", Qt.ItemDataRole.EditRole) assert model.data(m, Qt.ItemDataRole.DisplayRole) == "Normal Door" result = dialog.create_new_node() assert isinstance(result, DockNode) assert [w.name for w in result.incompatible_dock_weaknesses] == ["Normal Door"] assert model.removeRow(0, m) assert model.data(m, Qt.ItemDataRole.EditRole) == "" result = dialog.create_new_node() assert isinstance(result, DockNode) assert [w.name for w in result.incompatible_dock_weaknesses] == [] def test_on_pickup_index_button_generic(skip_qtbot, echoes_game_description): node = next(node for node in echoes_game_description.region_list.iterate_nodes() if isinstance(node, GenericNode)) dialog = NodeDetailsPopup(echoes_game_description, node) skip_qtbot.addWidget(dialog) dialog.on_pickup_index_button() assert dialog.pickup_index_spin.value() == 119 def test_on_pickup_index_button_pickup(skip_qtbot, echoes_game_description): node = next(node for node in echoes_game_description.region_list.iterate_nodes() if isinstance(node, PickupNode)) dialog = NodeDetailsPopup(echoes_game_description, node) skip_qtbot.addWidget(dialog) dialog.on_pickup_index_button() assert dialog.pickup_index_spin.value() == node.pickup_index.index def test_on_dock_update_name_button(skip_qtbot, blank_game_description): node = next(node for node in blank_game_description.region_list.iterate_nodes() if isinstance(node, DockNode)) dialog = NodeDetailsPopup(blank_game_description, node) skip_qtbot.addWidget(dialog) dialog.name_edit.setText("Weird Name") # Run assert dialog.name_edit.text() == "Weird Name" dialog.on_dock_update_name_button() assert dialog.name_edit.text() == node.name
randovania/randovania
test/gui/dialog/test_node_details_popup.py
test_node_details_popup.py
py
4,199
python
en
code
165
github-code
6
[ { "api_name": "randovania.gui.dialog.node_details_popup.NodeDetailsPopup", "line_number": 29, "usage_type": "call" }, { "api_name": "pytest.mark.parametrize", "line_number": 16, "usage_type": "call" }, { "api_name": "pytest.mark", "line_number": 16, "usage_type": "attribu...
37198526566
# _*_ coding: utf-8 _*_ # @author: anniequ # @file: datapre.py # @time: 2020/11/12 11:07 # @Software: PyCharm import os from PIL import Image import matplotlib.pyplot as plt import numpy as np import torch import torchvision.transforms as tfs from torch.utils.data import DataLoader from torch import nn import torch.nn.functional as f import torchvision from torch.autograd import Variable import torchvision.models as models from datetime import datetime voc_root = os.path.join("data", "VOC2012") np.seterr(divide='ignore',invalid='ignore') # 读取图片 def read_img(root=voc_root, train=True): txt_frame = root + '/ImageSets/Segmentation/' + ('train.txt' if train else 'val.txt') with open(txt_frame, 'r') as f: images = f.read().split() data = [os.path.join(root, 'JPEGImages', i + '.jpg') for i in images] label = [os.path.join(root, 'SegmentationClass', i + '.png') for i in images] return data, label # 图片大小不同,同时裁剪data and label def crop(data, label, height, width): 'data and label both are Image object' box = (0, 0, width, height) data = data.crop(box) label = label.crop(box) return data, label # VOC数据集中对应的标签 classes = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor'] # 各种标签所对应的颜色 colormap = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] cm2lbl = np.zeros(256 ** 3) # 枚举的时候i是下标,cm是一个三元组,分别标记了RGB值 for i, cm in enumerate(colormap): cm2lbl[(cm[0] * 256 + cm[1]) * 256 + cm[2]] = i # 将标签按照RGB值填入对应类别的下标信息 def image2label(im): data = np.array(im, dtype="int32") idx = (data[:, :, 0] * 256 + data[:, :, 1]) * 256 + data[:, :, 2] return np.array(cm2lbl[idx], dtype="int64") def image_transforms(data, label, height, width): data, label = crop(data, label, height, width) # 将数据转换成tensor,并且做标准化处理 im_tfs = tfs.Compose([ tfs.ToTensor(), tfs.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) data = im_tfs(data) label = image2label(label) label = torch.from_numpy(label) return data, label class VOCSegDataset(torch.utils.data.Dataset): # 构造函数 def __init__(self, train, height, width, transforms=image_transforms): self.height = height self.width = width self.fnum = 0 # 用来记录被过滤的图片数 self.transforms = transforms data_list, label_list = read_img(train=train) self.data_list = self._filter(data_list) self.label_list = self._filter(label_list) if train == True: print("训练集:加载了 " + str(len(self.data_list)) + " 张图片和标签" + ",过滤了" + str(self.fnum) + "张图片") else: print("测试集:加载了 " + str(len(self.data_list)) + " 张图片和标签" + ",过滤了" + str(self.fnum) + "张图片") # 过滤掉长小于height和宽小于width的图片 def _filter(self, images): img = [] for im in images: if (Image.open(im).size[1] >= self.height and Image.open(im).size[0] >= self.width): img.append(im) else: self.fnum = self.fnum + 1 return img # 重载getitem函数,使类可以迭代 def __getitem__(self, idx): img = self.data_list[idx] label = self.label_list[idx] img = Image.open(img) label = Image.open(label).convert('RGB') img, label = self.transforms(img, label, self.height, self.width) return img, label def __len__(self): return len(self.data_list) height = 224 width = 224 voc_train = VOCSegDataset(True, height, width) voc_test = VOCSegDataset(False, height, width) train_data = DataLoader(voc_train, batch_size=8, shuffle=True) valid_data = DataLoader(voc_test, batch_size=8) # 下面就构建一个基于resnet34的fcn网络 # 初始化转置卷积卷积核的函数 def bilinear_kernel(in_channels, out_channels, kernel_size): factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * \ (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype='float32') weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(np.array(weight)) # 加载预训练的resnet34网络 model_root = "./model/resnet34-333f7ec4.pth" pretrained_net = models.resnet34(pretrained=False) pre = torch.load(model_root) pretrained_net.load_state_dict(pre) # 分类的总数 num_classes = len(classes) print(num_classes) class fcn(nn.Module): def __init__(self, num_classes): super(fcn, self).__init__() # 第一段,通道数为128,输出特征图尺寸为28*28 self.stage1 = nn.Sequential(*list(pretrained_net.children())[:-4]) # 第二段,通道数为256,输出特征图尺寸为14*14 self.stage2 = list(pretrained_net.children())[-4] # 第三段,通道数为512,输出特征图尺寸为7*7 self.stage3 = list(pretrained_net.children())[-3] # 三个1*1的卷积操作,各个通道信息融合 self.scores1 = nn.Conv2d(512, num_classes, 1) self.scores2 = nn.Conv2d(256, num_classes, 1) self.scores3 = nn.Conv2d(128, num_classes, 1) # 将特征图尺寸放大八倍 self.upsample_8x = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=16, stride=8, padding=4, bias=False) self.upsample_8x.weight.data = bilinear_kernel(num_classes, num_classes, 16) # 使用双线性 kernel # 这是放大了四倍,下同 self.upsample_4x = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=4, stride=2, padding=1, bias=False) self.upsample_4x.weight.data = bilinear_kernel(num_classes, num_classes, 4) # 使用双线性 kernel self.upsample_2x = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=4, stride=2, padding=1, bias=False) self.upsample_2x.weight.data = bilinear_kernel(num_classes, num_classes, 4) # 使用双线性 kernel def forward(self, x): x = self.stage1(x) s1 = x # 224/8 = 28 x = self.stage2(x) s2 = x # 224/16 = 14 x = self.stage3(x) s3 = x # 224/32 = 7 s3 = self.scores1(s3) # 将各通道信息融合 s3 = self.upsample_2x(s3) # 上采样 s2 = self.scores2(s2) s2 = s2 + s3 # 14*14 s1 = self.scores3(s1) s2 = self.upsample_4x(s2) # 上采样,变成28*28 s = s1 + s2 # 28*28 s = self.upsample_8x(s2) # 放大八倍,变成224*224 return s # 返回特征图 # 计算混淆矩阵 def _fast_hist(label_true, label_pred, n_class): # mask在和label_true相对应的索引的位置上填入true或者false # label_true[mask]会把mask中索引为true的元素输出 mask = (label_true >= 0) & (label_true < n_class) # np.bincount()会给出索引对应的元素个数 """ hist是一个混淆矩阵 hist是一个二维数组,可以写成hist[label_true][label_pred]的形式 最后得到的这个数组的意义就是行下标表示的类别预测成列下标类别的数量 比如hist[0][1]就表示类别为1的像素点被预测成类别为0的数量 对角线上就是预测正确的像素点个数 n_class * label_true[mask].astype(int) + label_pred[mask]计算得到的是二维数组元素 变成一位数组元素的时候的地址取值(每个元素大小为1),返回的是一个numpy的list,然后 np.bincount就可以计算各中取值的个数 """ hist = np.bincount( n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class) return hist """ label_trues 正确的标签值 label_preds 模型输出的标签值 n_class 数据集中的分类数 """ def label_accuracy_score(label_trues, label_preds, n_class): """Returns accuracy score evaluation result. - overall accuracy - mean accuracy - mean IU - fwavacc """ hist = np.zeros((n_class, n_class)) # 一个batch里面可能有多个数据 # 通过迭代器将一个个数据进行计算 for lt, lp in zip(label_trues, label_preds): # numpy.ndarray.flatten将numpy对象拉成1维 hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) # np.diag(a)假如a是一个二维矩阵,那么会输出矩阵的对角线元素 # np.sum()可以计算出所有元素的和。如果axis=1,则表示按行相加 """ acc是准确率 = 预测正确的像素点个数/总的像素点个数 acc_cls是预测的每一类别的准确率(比如第0行是预测的类别为0的准确率),然后求平均 iu是召回率Recall,公式上面给出了 mean_iu就是对iu求了一个平均 freq是每一类被预测到的频率 fwavacc是频率乘以召回率,我也不知道这个指标代表什么 """ acc = np.diag(hist).sum() / hist.sum() acc_cls = np.diag(hist) / hist.sum(axis=1) # nanmean会自动忽略nan的元素求平均 acc_cls = np.nanmean(acc_cls) iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) mean_iu = np.nanmean(iu) freq = hist.sum(axis=1) / hist.sum() fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() return acc, acc_cls, mean_iu, fwavacc net = fcn(num_classes) if torch.cuda.is_available(): net = net.cuda() criterion = nn.NLLLoss() basic_optim = torch.optim.SGD(net.parameters(), lr=1e-2, weight_decay=1e-4) optimizer = basic_optim # 训练网络 EPOCHES = 20 # 训练时的数据 train_loss = [] train_acc = [] train_acc_cls = [] train_mean_iu = [] train_fwavacc = [] # 验证时的数据 eval_loss = [] eval_acc = [] eval_acc_cls = [] eval_mean_iu = [] eval_fwavacc = [] for e in range(EPOCHES): _train_loss = 0 _train_acc = 0 _train_acc_cls = 0 _train_mean_iu = 0 _train_fwavacc = 0 prev_time = datetime.now() net = net.train() for img_data, img_label in train_data: if torch.cuda.is_available: im = Variable(img_data).cuda() label = Variable(img_label).cuda() else: im = Variable(img_data) label = Variable(img_label) # 前向传播 out = net(im) out = f.log_softmax(out, dim=1) loss = criterion(out, label) # 反向传播 optimizer.zero_grad() loss.backward() optimizer.step() _train_loss += loss.item() # label_pred输出的是21*224*224的向量,对于每一个点都有21个分类的概率 # 我们取概率值最大的那个下标作为模型预测的标签,然后计算各种评价指标 label_pred = out.max(dim=1)[1].data.cpu().numpy() label_true = label.data.cpu().numpy() for lbt, lbp in zip(label_true, label_pred): acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(lbt, lbp, num_classes) _train_acc += acc _train_acc_cls += acc_cls _train_mean_iu += mean_iu _train_fwavacc += fwavacc # 记录当前轮的数据 train_loss.append(_train_loss / len(train_data)) train_acc.append(_train_acc / len(voc_train)) train_acc_cls.append(_train_acc_cls) train_mean_iu.append(_train_mean_iu / len(voc_train)) train_fwavacc.append(_train_fwavacc) net = net.eval() _eval_loss = 0 _eval_acc = 0 _eval_acc_cls = 0 _eval_mean_iu = 0 _eval_fwavacc = 0 for img_data, img_label in valid_data: if torch.cuda.is_available(): im = Variable(img_data).cuda() label = Variable(img_label).cuda() else: im = Variable(img_data) label = Variable(img_label) # forward out = net(im) out = f.log_softmax(out, dim=1) loss = criterion(out, label) _eval_loss += loss.item() label_pred = out.max(dim=1)[1].data.cpu().numpy() label_true = label.data.cpu().numpy() for lbt, lbp in zip(label_true, label_pred): acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(lbt, lbp, num_classes) _eval_acc += acc _eval_acc_cls += acc_cls _eval_mean_iu += mean_iu _eval_fwavacc += fwavacc # 记录当前轮的数据 eval_loss.append(_eval_loss / len(valid_data)) eval_acc.append(_eval_acc / len(voc_test)) eval_acc_cls.append(_eval_acc_cls) eval_mean_iu.append(_eval_mean_iu / len(voc_test)) eval_fwavacc.append(_eval_fwavacc) # 打印当前轮训练的结果 cur_time = datetime.now() h, remainder = divmod((cur_time - prev_time).seconds, 3600) m, s = divmod(remainder, 60) epoch_str = ('Epoch: {}, Train Loss: {:.5f}, Train Acc: {:.5f}, Train Mean IU: {:.5f}, \ Valid Loss: {:.5f}, Valid Acc: {:.5f}, Valid Mean IU: {:.5f} '.format( e, _train_loss / len(train_data), _train_acc / len(voc_train), _train_mean_iu / len(voc_train), _eval_loss / len(valid_data), _eval_acc / len(voc_test), _eval_mean_iu / len(voc_test))) time_str = 'Time: {:.0f}:{:.0f}:{:.0f}'.format(h, m, s) print(epoch_str + time_str) # 绘图 epoch = np.array(range(EPOCHES)) plt.plot(epoch, train_loss, label="train_loss") plt.plot(epoch, train_loss, label="valid_loss") plt.title("loss during training") plt.legend() plt.grid() plt.show() plt.plot(epoch, train_acc, label="train_acc") plt.plot(epoch, eval_acc, label="valid_acc") plt.title("accuracy during training") plt.legend() plt.grid() plt.show() plt.plot(epoch, train_mean_iu, label="train_mean_iu") plt.plot(epoch, eval_mean_iu, label="valid_mean_iu") plt.title("mean iu during training") plt.legend() plt.grid() plt.show() # 测试模型性能 # 保存模型 PATH = "./model/fcn-resnet34.pth" torch.save(net.state_dict(), PATH) # 加载模型 # model.load_state_dict(torch.load(PATH)) cm = np.array(colormap).astype('uint8') def predict(img, label): # 预测结果 img = Variable(img.unsqueeze(0)).cuda() out = net(img) pred = out.max(1)[1].squeeze().cpu().data.numpy() # 将pred的分类值,转换成各个分类对应的RGB值 pred = cm[pred] # 将numpy转换成PIL对象 pred = Image.fromarray(pred) label = cm[label.numpy()] return pred, label size = 224 num_image = 10 _, figs = plt.subplots(num_image, 3, figsize=(12, 22)) for i in range(num_image): img_data, img_label = voc_test[i] pred, label = predict(img_data, img_label) img_data = Image.open(voc_test.data_list[i]) img_label = Image.open(voc_test.label_list[i]).convert("RGB") img_data, img_label = crop(img_data, img_label, size, size) figs[i, 0].imshow(img_data) # 原始图片 figs[i, 0].axes.get_xaxis().set_visible(False) # 去掉x轴 figs[i, 0].axes.get_yaxis().set_visible(False) # 去掉y轴 figs[i, 1].imshow(img_label) # 标签 figs[i, 1].axes.get_xaxis().set_visible(False) # 去掉x轴 figs[i, 1].axes.get_yaxis().set_visible(False) # 去掉y轴 figs[i, 2].imshow(pred) # 模型输出结果 figs[i, 2].axes.get_xaxis().set_visible(False) # 去掉x轴 figs[i, 2].axes.get_yaxis().set_visible(False) # 去掉y轴 # 在最后一行图片下面添加标题 figs[num_image - 1, 0].set_title("Image", y=-0.2) figs[num_image - 1, 1].set_title("Label", y=-0.2) figs[num_image - 1, 2].set_title("fcns", y=-0.2)
Anniequ/FCNcopy
all.py
all.py
py
16,271
python
en
code
0
github-code
6
[ { "api_name": "os.path.join", "line_number": 26, "usage_type": "call" }, { "api_name": "os.path", "line_number": 26, "usage_type": "attribute" }, { "api_name": "numpy.seterr", "line_number": 27, "usage_type": "call" }, { "api_name": "torch.nn.functional", "lin...