seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
19160015551 | """
Contains author & document records in dict() form for self-contained testing
"""
import contextlib
import copy
import time
from collections import defaultdict
from itertools import zip_longest
from unittest.mock import MagicMock
import path_finder
from cache.cache_buddy import CacheMiss, AUTHOR_VERSION_NUMBER, \
DOCUMENT_VERSION_NUMBER
from names.ads_name import ADSName
# Monkey-patch path_finder to recognize our bibcodes and ORCID IDs
path_finder.is_bibcode = lambda x: x.startswith("paper")
path_finder.is_orcid_id = lambda x: "ORCID" in str(x)
path_finder.normalize_orcid_id = lambda x: x
r"""
The authorship graph:
D -- J -- I
| |
K -- A == B == C == F -- H
| | \\ //
L E ---- G
"""
TIME = int(time.time())
empty_document = {
'doctype': 'article', 'keywords': [],
'publication': 'mock', 'pubdate': 'never',
'citation_count': 0, 'read_count': 0,
'timestamp': TIME, 'version': DOCUMENT_VERSION_NUMBER}
documents = {
'paperAB': {
'title': 'Paper Linking A & B',
'authors': ['Author, A.', 'Author, Bbb'],
'affils': ['Univ of A', 'B Center'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperAB2': {
'title': 'Second Paper Linking A & B',
'authors': ['Author, B.', 'Author, Aaa'],
'affils': ['Univ of B', 'A Institute'],
'orcid_ids': ['ORCID B'],
'orcid_id_src': '3',
**empty_document
},
'paperAE': {
'title': 'Paper Linking A & E',
'authors': ['Author, Aaa', 'Author, Eee E.'],
'affils': ['A Institute', 'E Center for E'],
'orcid_ids': ['ORCID A'],
'orcid_id_src': '1',
**empty_document
},
'paperAK': {
'title': 'Paper Linking A & K',
'authors': ['Author, Aaa', 'Author, K.'],
'affils': ['A Institute', 'K Center for K'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperBC': {
'title': 'Paper Linking B & C',
'authors': ['Author, C.', 'Author, B.'],
'affils': ['University of C', 'Univ of B'],
'orcid_ids': ['', 'ORCID B'],
'orcid_id_src': '0,1',
**empty_document
},
'paperBCG': {
'title': 'Paper Linking B, C & G',
'authors': ['Author, Bbb', 'Author, C. C.', 'Author, G.'],
'affils': ['B Institute', 'Univ. C', 'G Center for G'],
'orcid_ids': ['Not ORCID B'],
'orcid_id_src': '1',
**empty_document
},
'paperBD': {
'title': 'Paper Linking B & D',
'authors': ['Author, B.', 'Author, D.'],
'affils': ['B Institute', 'D Center for D'],
'orcid_ids': ['ORCID B', 'ORCID D'],
'orcid_id_src': '1,1',
**empty_document
},
'paperBG': {
'title': 'Paper Linking B & G',
'authors': ['Author, Bbb', 'Author, G.'],
'affils': ['B Institute', 'G Center for G'],
'orcid_ids': ['ORCID B'],
'orcid_id_src': '1',
**empty_document
},
'paperCF': {
'title': 'Paper Linking C & F',
'authors': ['Author, C.', 'Author, F.'],
'affils': ['C Institute', 'F Center for F'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperCF2': {
'title': 'Second Paper Linking C & F',
'authors': ['Author, C.', 'Author, F.'],
'affils': ['C University', 'F Center for F'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperCG': {
'title': 'Paper Linking C & G',
'authors': ['Author, C.', 'Author, G.'],
'affils': ['C Institute', 'G Center for G at Gtown'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperDJ': {
'title': 'Paper Linking D & J',
'authors': ['Author, D.', 'Author, J. J.'],
'affils': ['D Institute', 'J Institute, U. J. @ Jtown'],
'orcid_ids': ['', 'ORCID E'],
'orcid_id_src': '0,2',
**empty_document
},
'paperEG': {
'title': 'Paper Linking E & G',
'authors': ['Author, Eee E.', 'Author, G.'],
'affils': ['E Institute', 'G Center for G, Gtown'],
'orcid_ids': ['ORCID E'],
'orcid_id_src': '3',
**empty_document
},
'paperFH': {
'title': 'Paper Linking F & H',
'authors': ['Author, F.', 'Author, H.'],
'affils': ['F Institute | Fville', 'H Center for H'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperFI': {
'title': 'Paper Linking F & I',
'authors': ['Author, F.', 'Author, I.'],
'affils': ['F Institute, Fville, Fstate, 12345', 'I Center for I'],
'orcid_ids': ['', 'ORCID I'],
'orcid_id_src': '0,3',
**empty_document
},
'paperIJ': {
'title': 'Paper Linking J & I',
'authors': ['Author, J. J.', 'Author, I.'],
'affils': ['J Center, University of J, Other town', 'I Center for I'],
'orcid_ids': ['', 'ORCID I'],
'orcid_id_src': '0,2',
**empty_document
},
'paperKL': {
'title': 'Paper Linking K & L',
'authors': ['Author, L.', 'Author, K.'],
'affils': ['L Institute', 'K Center for K'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperKL2': {
'title': "Paper Linking K and two L's",
'authors': ['Author, L.', 'Author, L. L.', 'Author, K.'],
'affils': ['L Institute', 'L U', 'K Center for K'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperUncon': {
'title': 'Paper Linking Uncon1 & Uncon2',
'authors': ['author, unconnected b.', 'author, unconnected a.'],
'affils': ['B Institute', 'A Center for A'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
}
authors = {author for doc in documents.values() for author in doc['authors']}
for bibcode, document in documents.items():
document['bibcode'] = bibcode
def refresh():
pass
store_document = MagicMock()
store_documents = store_document
def delete_document(*args, **kwargs):
raise RuntimeError("Should not delete from mock cache")
def load_document(key):
try:
return copy.deepcopy(documents[key])
except KeyError:
raise CacheMiss(key)
def load_documents(keys):
return [load_document(key) for key in keys]
store_author = MagicMock()
delete_author = delete_document
def author_is_in_cache(key):
try:
load_author(key)
return True
except CacheMiss:
return False
def authors_are_in_cache(keys):
return [author_is_in_cache(key) for key in keys]
def load_author(key):
if key[0] in '<>=':
raise CacheMiss(key)
orcid = "ORCID" in key
if orcid:
name = None
else:
name = ADSName.parse(key)
docs = []
coauthors = defaultdict(list)
appears_as = defaultdict(list)
for bibcode, document in documents.items():
matched = None
# Go through the document's authors until/if we find our search author
for orcid_id, author in zip_longest(
document['orcid_ids'], document['authors']):
if orcid and orcid_id == key:
matched = author
aname = ADSName.parse(author)
if name is None or aname.is_more_specific_than(name):
name = aname
elif not orcid and name == author:
matched = author
if matched is not None:
docs.append(bibcode)
idx = len(docs) - 1
appears_as[matched].append(idx)
for coauthor in document['authors']:
if coauthor != matched:
coauthors[coauthor].append(idx)
if len(docs) or key.endswith("nodocs"):
for coauthor, coauthor_dat in coauthors.items():
coauthors[coauthor] = ','.join(str(i) for i in coauthor_dat)
for alias, alias_dat in appears_as.items():
appears_as[alias] = ','.join(str(i) for i in alias_dat)
return {
# defaultdict doesn't play nicely with AuthorRecord's asdict()
'name': name.qualified_full_name,
'documents': docs,
'coauthors': dict(**coauthors),
'appears_as': dict(**appears_as),
'timestamp': TIME,
'version': AUTHOR_VERSION_NUMBER,
}
else:
raise CacheMiss(key)
def load_authors(keys):
return [load_author(key) for key in keys]
def store_progress_data(*args, **kwargs):
pass
delete_progress_data = delete_document
def load_progress_data(*args, **kwargs):
raise RuntimeError("Should not load progress from mock cache")
def clear_stale_data(*args, **kwargs):
pass
# A dummy batch manager
@contextlib.contextmanager
def batch():
yield True
| svank/appa-backend | appa/tests/mock_backing_cache.py | mock_backing_cache.py | py | 9,020 | python | en | code | 0 | github-code | 36 |
70714190504 | #!/usr/bin/env python
# coding: utf-8
# ## Import des librairies
# In[2]:
import numpy as np
import pandas as pd
from pandas_profiling import ProfileReport
import matplotlib.pyplot as plt
import plotly.offline as py
import seaborn as sns
import plotly.graph_objs as go
import plotly
import plotly.figure_factory as ff
from sklearn.model_selection import train_test_split
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFE
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn import preprocessing
from sklearn import metrics
from sklearn.metrics import *
from sklearn.linear_model import LogisticRegressionCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings('ignore')
# # 1.<span style="color:red"> Lecture des Datasets </span>
# In[2]:
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
# ### 1.1 <span style="color:black"> Concaténation en un DataFrame pour appliquer les mêmes changements</span>
#
# In[3]:
df = pd.concat([train,test], axis= 0)
# In[4]:
df.head()
# In[5]:
df.info()
# In[6]:
df.describe(include = 'all' )
# # 2.<span style="color:blue"> EDA </span>
# ### 2.1<span style="color:black"> Distribution de la Target </span>
# In[7]:
df['embauche'].value_counts()
# - **On remarque un fort déséquilibre dans la distribution de la classe "embauche" ce qui affectera l'apprentissage si
# on ne procède pas à une redistribution de cette variable**
# In[8]:
df['embauche'].value_counts().plot(kind='pie',title= 'distribution de la Target', autopct='%.f%%', legend = False, figsize=(12,6), fontsize=12,explode = [0, 0.2]);
# ### 2.2<span style="color:black"> Pandas profiling du Dataset </span>
# In[ ]:
profile = ProfileReport(df, title="Embauche ou pas")
profile
# **Les NaN & valeurs abérrantes présentes dans ce dataset:**
#
# - 5 observations dont l'age est supérieur/égal à 70 ans
# - 479 observations dont l'age est inférieur à 16 ans
# - 2 observations dont l'expérience est inférieur à 0
# - 104 observations dont l'expérience est supérieur à l'age
# - 1465 observations dont la note est supérieur à 100.
# - 908 NaN
#
# <span style="color:blue">**2055 Outliers & 908 NaN soit près de 15% du dataset**</span>
# <span style="color:darkorange"> **Deux méthodologies se présentent:**</span>
#
# **1- Supprimer les Outliers & les NaNs**
#
# **2- Dans la compétition Kaggle, on était face à une contrainte majeure qui était de garder le set de Test complet à
# 5000 lignes, donc on a procédé à une "harmonisation" des NaN et des valeurs aberrantes**
#
#
#
#
# <span style="color:blue">**Outliers de la variable "age"**</span>
# - **On procèdera donc à la correction de l'âge en supposant un age minimal légal de travail de 16 ans et maximal de 70 ans**
#
#
# <span style="color:blue">**Outliers de la variable "diplome"**</span>
# - **On procèdera donc à l'harmonisation de cette variable en tenant compte de la variable "age" comme suit :**
#
# **diplome bac--> age 18 ans / license --> 21 ans / master --> 23 ans / doctorat --> 27 ans**
#
#
# <span style="color:blue">**Outliers de la variable "note"**</span>
# - **Etant donné le concours d'embauche est noté de 0 à 100, on considérera toutes les notes supérieures à la limite comme arrondie à 100**
#
# <span style="color:blue">**Outliers de la variable "exp"**</span>
# - **Sur des observations ou l'expérience dépasse l'âge, cette dernière sera remplacée par la moyenne de l'expérience**
#
# <span style="color:red">**Les valeurs manquantes**</span>
# - **Pour les Nan des variables numériques on imputera la moyenne (mean)**
# - **Pour les Nan des variables catégorielles on imputera le mode (mode)**
#
# <span style="color:green">**Les variables corrélées**</span>
# - **Aucune corrélation notoire ou presque n'a été détectée à part Note/Salaire à près de 40%**
# ### 2.3<span style="color:black"> Traitement des outliers </span>
# **Boxplot Diplome/Age**
# In[9]:
plt.figure(figsize=(12,8))
sns.boxplot(x='diplome',
y='age',
data=df,
palette='winter');
# **Boxplot Diplome/Exp**
# In[10]:
plt.figure(figsize=(12,8))
sns.boxplot(x='diplome',
y='exp',
data=df,
palette='winter');
# **Boxplot Exp/Age**
# In[11]:
plt.figure(figsize=(12,8))
sns.boxplot(x='exp',
y='age',
data=df,
palette='winter');
# In[12]:
#------------#
df.loc[(df['age'] >= 70), 'age'] = round(df['age'].mean(), 0) #5 Observations
df.loc[(df['age'] < 16), 'age'] = round(df['age'].mean(), 0) #479 Observations
#------------#
df.loc[(df['diplome'] == "bac"), 'age'] = 18 #2453 observations
df.loc[(df['diplome'] == "licence"), 'age'] = 21 #7377 observations
df.loc[(df['diplome'] == "master"), 'age'] = 23 #7513 observations
df.loc[(df['diplome'] == "doctorat"), 'age'] = 27 #2547 observations
#------------#
df.loc[(df['exp'] < 0), 'exp'] = round(df['exp'].mean(), 0) #2 observations
df.loc[(df['exp'] > df['age']),'exp'] = round(df['exp'].mean(),0) #104 observations
#------------#
df.loc[(df['note'] > 100), 'note'] = 100 #1465 observations
#------------#
# ### 2.4<span style="color:black"> Traitement des NAN </span>
# In[13]:
plt.figure(figsize=(12,8))
sns.heatmap(df.isnull(),
yticklabels=False,
cbar=False,
cmap='viridis');
# In[14]:
#------Variables Numériques-------#
NUMERICAL = ["age","exp","salaire","note"]
df[NUMERICAL]= df[NUMERICAL].astype(np.float32)
df[NUMERICAL] = df[NUMERICAL].fillna(round(df[NUMERICAL].mean(), 0))
#------Variables Catégorielles-------#
CATEGORICAL = ["cheveux","sexe","diplome","specialite","dispo","date"]
df[CATEGORICAL]= df[CATEGORICAL].astype('category')
df[CATEGORICAL] = df[CATEGORICAL].fillna(df[CATEGORICAL].mode().iloc[0])
# ### 2.5<span style="color:black"> Création de nouvelles features numériques à partir de la date </span>
# In[15]:
df['date'] = pd.to_datetime(df['date'],format="%Y-%m-%d")
df['year']= df['date'].dt.year
df['month']= df['date'].dt.month
df['day']= df['date'].dt.day
# ### 2.6 <span style="color:black"> Création de nouvelles features catégoriques </span>
# In[16]:
df['q_exp'] = pd.qcut(df['exp'],q=3,precision=0)
df['q_age'] = pd.qcut(df['age'], q=3,precision=0)
df['q_note'] = pd.qcut(df['note'],q=4,precision=0)
df['q_salaire'] = pd.qcut(df['salaire'],q=5,precision=0)
# ### 2.4 <span style="color:black"> Redéfinition des Variables numériques/catégorielles/features/Target </span>
# In[17]:
NUMERICAL = ["age","exp","salaire","note","year","month","day"]
df[NUMERICAL]= df[NUMERICAL].astype(np.float32)
# In[18]:
CATEGORICAL = ["cheveux","sexe","diplome","specialite","dispo"]
df[CATEGORICAL]= df[CATEGORICAL].astype('category')
# In[19]:
FEATURES = NUMERICAL + CATEGORICAL + ["q_exp","q_age","q_note",'q_salaire']
TARGET = "embauche"
# ### 2.5 <span style="color:black"> Data Viz </span>
# **Distribution des classes de la variable AGE par rapport à la TARGET**
# In[20]:
plt.figure(figsize=(14,6))
plt.hist(df[df["embauche"]==1]["age"], edgecolor="k",density=True, alpha=0.7, label = "Embauché(e)")
plt.hist(df[df["embauche"]==0]["age"], edgecolor="k",density=True, alpha=0.7, label = "Pas embauché(e)")
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# **Distribution des classes de la variable EXP par rapport à la TARGET**
# In[21]:
plt.figure(figsize=(14,6))
plt.hist(df[df["embauche"]==1]["exp"], edgecolor="k",density=True, alpha=0.7, label = "Embauché(e)")
plt.hist(df[df["embauche"]==0]["exp"], edgecolor="k",density=True, alpha=0.7, label = "Pas embauché(e)")
plt.xlabel("Experience")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# **Distribution des classes de la variable NOTE par rapport à la TARGET**
# In[22]:
plt.figure(figsize=(14,6))
plt.hist(df[df["embauche"]==1]["note"], edgecolor="k",density=True, alpha=0.7, label = "Embauché(e)")
plt.hist(df[df["embauche"]==0]["note"], edgecolor="k",density=True, alpha=0.7, label = "Pas embauché(e)")
plt.xlabel("Note")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# **Distribution des classes de la variable SALAIRE par rapport à la TARGET**
# In[23]:
plt.figure(figsize=(14,6))
plt.hist(df[df["embauche"]==1]["salaire"], edgecolor="k",density=True, alpha=0.7, label = "Embauché(e)")
plt.hist(df[df["embauche"]==0]["salaire"], edgecolor="k",density=True, alpha=0.7, label = "Pas embauché(e)")
plt.xlabel("Salaire")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# **Distribution des classes de la variable YEAR par rapport à la TARGET**
# In[24]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="year",hue="embauche", edgecolor="k")
plt.xlabel("Year")
plt.ylabel("Count")
plt.show()
# **Distribution des classes de la variable MONTH par rapport à la TARGET**
# In[25]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="month",hue="embauche", edgecolor="k")
plt.xlabel("Month")
plt.ylabel("Count")
plt.show()
# **Distribution des classes de la variable DAY par rapport à la TARGET**
# In[26]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="day",hue="embauche", edgecolor="k")
plt.xlabel("day")
plt.ylabel("Count")
plt.show()
# **Distribution de la variable CHEVEUX par rapport à la TARGET**
# In[27]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="cheveux",hue="embauche", edgecolor="k")
plt.xlabel("Cheveux")
plt.ylabel("Count")
plt.show()
# **Distribution de la variable DIPLOME par rapport à la TARGET**
# In[28]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="diplome",hue="embauche", edgecolor="k")
plt.xlabel("Diplome")
plt.ylabel("Count")
plt.show()
# **Distribution de la variable SPECIALITE par rapport à la TARGET**
# In[29]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="specialite",hue="embauche", edgecolor="k")
plt.xlabel("specialite")
plt.ylabel("Count")
plt.show()
# **Distribution de la variable DISPO par rapport à la variable SEXE**
# In[30]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="dispo",hue="embauche", edgecolor="k")
plt.xlabel("Dispo")
plt.ylabel("Count")
plt.show()
# ### 2.6 <span style="color:black"> Tests Statistiques </span>
# In[31]:
import scipy
# **CHEVEUX / SALAIRE**
# - Hypothèse H0 : Pas de relation statistiquement significative
# In[32]:
data_blond =df[df["cheveux"]=="blond"]
data_brun = df[df["cheveux"]=="brun"]
data_roux =df[df["cheveux"]=="roux"]
data_chatain =df[df["cheveux"]=="chatain"]
stat, p_value = scipy.stats.kruskal(data_blond["salaire"], data_brun["salaire"],data_roux["salaire"] ,data_chatain["salaire"])
print('Statistics=%.3f, p_value=%.3f' % (stat, p_value))
# interpret
alpha = 0.05
if p_value > alpha:
print('Même distributions (Hypothèse H0 non rejetée)')
else:
print('Distributions différentes (Hypothèse H0 rejetée)')
# **SPECIALITE / SEXE**
# - Hypothèse H0 : Pas de relation statistiquement significative
# In[33]:
data_forage =df[df["specialite"]=="forage"]
data_geologie = df[df["specialite"]=="geologie"]
data_detective =df[df["specialite"]=="detective"]
data_archeologie =df[df["specialite"]=="archeologie"]
stat, p_value = scipy.stats.kruskal(data_forage["sexe"], data_geologie["sexe"],data_detective["sexe"] ,
data_archeologie["sexe"])
print('Statistics=%.3f, p_value=%.3f' % (stat, p_value))
# interpret
alpha = 0.05
if p_value > alpha:
print('Même distributions (Hypothèse H0 non rejetée)')
else:
print('Distributions différentes (Hypothèse H0 rejetée)')
# **EXP / NOTE**
# - Hypothèse H0 : Pas de relation statistiquement significative
# In[34]:
data_exp =df["exp"]
data_note = df["note"]
stat, p_value = scipy.stats.kruskal(data_exp, data_note)
print('Statistics=%.3f, p_value=%.3f' % (stat, p_value))
# interpret
alpha = 0.05
if p_value > alpha:
print('Même distributions (Hypothèse H0 non rejetée)')
else:
print('Distributions différentes (Hypothèse H0 rejetée)')
# In[35]:
plt.figure(dpi=150)
sns.heatmap(df.corr('spearman'),annot=False,cmap='rocket',lw=1);
# In[36]:
from scipy.stats import chi2_contingency
# In[37]:
def test_chi_2(QualVar,target,alpha):
QualVar = pd.DataFrame(QualVar)
liste_chi2 = []
liste_chi2_name = []
# ici on créé le tableau de contingence pour réaliser notre test :
for i in range(len(list(QualVar.columns))):
table = pd.crosstab(QualVar[list(QualVar.columns)[i]],QualVar[target])
stat, p, dof, expected = chi2_contingency(table)
if p <= alpha:
liste_chi2.append(i)
else:
pass
for j in liste_chi2:
liste_chi2_name.append([i.encode('ascii', 'ignore') for i in QualVar.columns][j])
return liste_chi2_name
# In[38]:
liste_chi2_name = test_chi_2(df,"embauche",0.05)
liste_chi2_name
# Les variables listées ci-dessus ont une p_value< 5% et donc présente une significativité statistique pour expliquer la TARGET
# # 3.<span style="color:green"> PREPROCESSING </span>
# ### 3.1<span style="color:black"> Label Encoding </span>
# **Le choix s'est porté sur le label encoding pour éviter une augumentation de la dimension créée par le One hot encoding par exemple, et ce pour plus de performance lors des Tunnings des hyperparamètres**
# In[39]:
df_c=df.copy()
# In[40]:
label_encoder = preprocessing.LabelEncoder()
df_c[CATEGORICAL]=df[CATEGORICAL].apply(label_encoder.fit_transform)
df_c[["q_exp","q_age","q_note",'q_salaire']] = df[["q_exp","q_age","q_note",'q_salaire']].apply(label_encoder.fit_transform)
df_c[TARGET]=df[TARGET]
# ### 3.2<span style="color:black"> Transformation du type </span>
# In[41]:
df_c['age'] = df_c['age'].astype(np.uint8)
df_c['exp'] = df_c['exp'].astype(np.uint8)
df_c['salaire'] = df_c['salaire'].astype(np.uint8)
df_c['cheveux'] = df_c['cheveux'].astype(np.uint8)
df_c['note'] = df_c['note'].astype(np.float16)
df_c['sexe'] = df_c['sexe'].astype(np.uint8)
df_c['diplome'] = df_c['diplome'].astype(np.uint8)
df_c['specialite'] = df_c['specialite'].astype(np.uint8)
df_c['dispo'] = df_c['dispo'].astype(np.uint8)
df_c['year'] = df_c['year'].astype(np.int16)
df_c['month'] = df_c['month'].astype(np.int16)
df_c['day'] = df_c['day'].astype(np.int16)
df_c['q_exp'] = df_c['q_exp'].astype(np.int16)
df_c['q_age'] = df_c['q_age'].astype(np.int16)
df_c['q_salaire'] = df_c['q_salaire'].astype(np.int16)
df_c['q_note'] = df_c['q_note'].astype(np.int16)
# ### 3.3<span style="color:black"> Train/Test Split </span>
# In[42]:
train = df_c.loc[~df_c[TARGET].isna()]
# In[43]:
test = df_c.loc[df_c[TARGET].isna()]
# ### 3.4<span style="color:black"> Oversampling de la classe minoritaire "embauche = 1" </span>
# **Le SMOTETomek procédera à la création de valeurs synthétiques similaires aux vraies valeurs présentes dans le dataset avec une Embauche = 1**
# In[44]:
from imblearn.combine import SMOTETomek
# In[45]:
smotetomek_X = train[FEATURES]
smotetomek_Y = train[TARGET]
smote_tomek = SMOTETomek(random_state=68, sampling_strategy=0.99) #La classe 1 sera 99% de la classe 0
X_resampled, y_resampled = smote_tomek.fit_resample(train[FEATURES], train[TARGET])
smotetomek_X = pd.DataFrame(data = X_resampled,columns=FEATURES)
smotetomek_Y = pd.DataFrame(data = y_resampled,columns=['embauche'])
print ((smotetomek_Y['embauche'] == 1).sum())
print ((smotetomek_Y['embauche'] == 0).sum())
# In[46]:
train_X = smotetomek_X.copy()
# In[47]:
train_Y = smotetomek_Y.copy()
# In[48]:
train_X = train_X[FEATURES]
train_Y = train_Y[TARGET]
test_X = test[FEATURES]
# In[49]:
df_oversampler = pd.concat([train_X,train_Y], axis= 1)
# **Distribution de la target après Oversampling**
# In[50]:
df_oversampler['embauche'].value_counts().plot(kind='pie',title= 'distribution de la Target', autopct='%.f%%', legend = False, figsize=(12,6), fontsize=12,explode = [0, 0.2]);
# ### 3.4<span style="color:black"> Standardisation des données</span>
# **Remarque** :
#
# **La standardisation des données n'est pas nécessaire quand on utilise des algorithmes d'apprentissage non sensibles à l'amplitude des variables tels que**
# - La régression logistique
# - Le Random Forest
# - Les modèles de Gradient boosting
#
# **Hors dans ce projet, on utilisera aussi le SVC, DTC & KNN qui eux sont sensibles à l'amplitude des variables**
# In[51]:
train_X.std()
# In[52]:
test_X.std()
# In[53]:
scaler = StandardScaler()
train_X = scaler.fit_transform(train_X)
test_X = scaler.fit_transform(test_X)
# In[54]:
train_X = train_X.astype('float32')
test_X = test_X.astype('float32')
# # 4.<span style="color:Orange"> MODELISATION </span>
# - Le projet présenté à pour but une classification de la TARGET entre 0 & 1
#
# - On choisira donc des Algorithmes d'apprentissage supervisé pour CLASSIFICATION
#
# - Régression Logistique /Decision Tree/ SVC / KNN / Random Forest / Gradient boosting / XGBoost
#
# - La comparaison des modèles se fera principalement sur le score AUC
#
# - Le tunning des hyperparamètres se fera avec HalvingGridSearchCV qui est une nouvelle classe de tunning des hyperparamètres beaucoup plus rapide que le GridsearchCV avec pratiquement les mêmes résultats
# ### 4.1<span style="color:black"> Tunning des Hyperparamètres avec HalvingGridSearchCV </span>
# In[55]:
def tunning(param_grid,model,X,Y):
halving = HalvingGridSearchCV(model, param_grid = param_grid,scoring="roc_auc", min_resources = "exhaust",
n_jobs = -1,cv = 5, factor = 3, verbose = 1)
halving.fit(X, Y)
print ("Best Score: {}".format(halving.best_score_))
print ("Best params: {}".format(halving.best_params_))
# ### 4.2<span style="color:black"> Evaluation du modèle </span>
# In[56]:
def evaluation(model,z,X,Y):
model.fit(X,Y)
predict = model.predict(X)
proba = model.predict_proba(X)
fig = plt.figure()
#roc_auc_score
model_roc_auc = metrics.roc_auc_score(Y,predict)
#Confusion matrix
conf_matrix = metrics.confusion_matrix(Y,predict)
#plot confusion matrix
plot1 = go.Heatmap(z = conf_matrix ,
x = ["Pred_0","Pred_1"],
y = ["Real_0","Real_1"],
showscale = True,autocolorscale = True,
name = "matrix", transpose = True, visible = True)
#plot roc auc
a,b,c = metrics.roc_curve(Y,proba[:,1])
plot2 = go.Scatter(x = a,y = b,
name = "Roc : " + str(model_roc_auc),
line = dict(color = ('rgb(22, 96, 167)'),width = 2))
plot3 = go.Scatter(x = [0,1],y=[0,1],
line = dict(color = ('rgb(205, 12, 24)'),width = 2,
dash = 'dot'))
#plot coefficients/Features
if z == "coefficients" :
coefficients = pd.DataFrame(model.coef_.ravel())
elif z== "features" :
coefficients = pd.DataFrame(model.feature_importances_)
column_df = pd.DataFrame(FEATURES)
coef_sumry = (pd.merge(coefficients,column_df,left_index= True,
right_index= True, how = "left"))
coef_sumry.columns = ["coefficients","features"]
coef_sumry = coef_sumry.sort_values(by = "coefficients",ascending = False)
plot4 = trace4 = go.Bar(x = coef_sumry["features"],y = coef_sumry["coefficients"],
name = "coefficients",
marker = dict(color = coef_sumry["coefficients"],
colorscale = "Picnic",
line = dict(width = .6,color = "black")))
#Subplots
fig = plotly.subplots.make_subplots(rows=2, cols=2, specs=[[{}, {}], [{'colspan': 2}, None]],
subplot_titles=('Confusion Matrix',
'Receiver operating characteristic',
'Feature Importances'),print_grid=False)
fig.append_trace(plot1,1,1)
fig.append_trace(plot2,1,2)
fig.append_trace(plot3,1,2)
fig.append_trace(plot4,2,1)
fig['layout'].update(showlegend=False, title="Model performance" ,
autosize = False,height = 900,width = 800,
plot_bgcolor = 'rgba(240,240,240, 0.95)',
paper_bgcolor = 'rgba(240,240,240, 0.95)',
margin = dict(b = 195))
fig["layout"]["xaxis2"].update(dict(title = "false positive rate"))
fig["layout"]["yaxis2"].update(dict(title = "true positive rate"))
fig["layout"]["xaxis3"].update(dict(showgrid = True,tickfont = dict(size = 10),
tickangle = 90))
py.iplot(fig);
print ("ROC-AUC : ",model_roc_auc,"\n")
print("score F1 : ", metrics.f1_score(Y, predict),"\n")
print ("Accuracy Score : ",metrics.accuracy_score(Y,predict))
# In[57]:
def evaluation_knn(model,X,Y):
model.fit(X,Y)
predict = model.predict(X)
proba = model.predict_proba(X)
#roc_auc_score
model_roc_auc = metrics.roc_auc_score(Y,predict)
#plot confusion matrix
plot_confusion_matrix(model, X, Y)
plt.show();
print ("ROC-AUC : ",model_roc_auc,"\n")
print("score F1 : ", metrics.f1_score(Y, predict),"\n")
print ("Accuracy Score : ",metrics.accuracy_score(Y,predict))
# In[58]:
def MetricsMaker(model):
# Save Models
# Splits
kf = StratifiedKFold(n_splits=5,shuffle=True,random_state=2021)
split = list(kf.split(train_X,train_Y))
Metrics = {}
Precision, Accuracy, F1_score, Recall_score, ROC_AUC = 0, 0, 0, 0, 0
for i,(train_index, test_index) in enumerate(split):
data_train = train_X[train_index]
y_train = train_Y[train_index]
data_test = train_X[test_index]
y_test = train_Y[test_index]
# create a fitted model
fittedModel = model.fit(data_train,y_train)
y_hat_proba = fittedModel.predict_proba(data_test)[:,1]
y_hat = fittedModel.predict(data_test)
# log_l =
Precision += metrics.precision_score(y_test,y_hat)
Accuracy += metrics.accuracy_score(y_test,y_hat)
F1_score += metrics.f1_score(y_test,y_hat)
Recall_score += metrics.recall_score(y_test,y_hat)
ROC_AUC += metrics.roc_auc_score(y_test,y_hat)
Metrics['Precision'] = Precision / 5
Metrics['Accuracy'] = Accuracy / 5
Metrics['F1_score'] = F1_score / 5
Metrics['Recall_score'] = Recall_score / 5
Metrics['ROC-AUC'] = ROC_AUC / 5
return Metrics
# In[59]:
# Les metrics scores de chaque modeles seront stockés ici!
Metrics = {}
# ### 4.2<span style="color:black"> Régression Logistique </span>
# In[60]:
parameters = {'Cs': [1, 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10]
}
logit = LogisticRegressionCV(random_state= 33,cv=10,max_iter=10000,verbose=1, n_jobs = -1)
#tunning(parameters,logit,train_X,train_Y)
# In[61]:
logReg = LogisticRegressionCV(Cs= 6, random_state= 33,cv=10,max_iter=10000,verbose=1)
Metrics['LogisticRegressionCV'] = MetricsMaker(logReg)
# In[62]:
#Evaluation avec le modèle tunné
logit = LogisticRegressionCV(Cs= 6, random_state= 33,cv=10,max_iter=10000,verbose=1)
evaluation(logit,"coefficients",train_X,train_Y)
# ### 4.3<span style="color:black"> Decision Tree Classifier </span>
# In[63]:
d_t_c = DecisionTreeClassifier(random_state=33)
parameters = {'max_depth': [1, 2, 3, 4, 5, 6, 7],
'max_features': [1, 2, 3, 4, 5],
'criterion': ['gini','entropy'],
'splitter': ['best'],
}
#tunning(parameters,d_t_c,train_X,train_Y.values.ravel())
# In[64]:
D_T_C = DecisionTreeClassifier(random_state=33, criterion = "gini", max_depth=7, max_features = 5, splitter = "best")
Metrics['DecisionTreeClassifier'] = MetricsMaker(D_T_C)
# In[65]:
#Evaluation avec le modèle tunné
d_t_c = DecisionTreeClassifier(random_state=33, criterion = "gini", max_depth=7, max_features = 5, splitter = "best")
evaluation(d_t_c,"features",train_X,train_Y)
# ### 4.4<span style="color:black"> SVC </span>
# **Le Tunning s'est fait un hyperparamètre à la fois malgrè que cela peut fausser les meilleurs combinaisons mais pour éviter une attente trop longue lors de l'execution**
# In[66]:
s_v_c = SVC(random_state=33,verbose=2)
parameters = {'kernel': ["linear","rbf","poly"],
'gamma': [0.1, 1, 10, 100],
'C': [0.1, 1, 10, 100,1000],
'degree': [0, 1, 2, 3, 4, 5, 6]
}
#tunning(parameters,s_v_c,train_X,train_Y.values.ravel())
# In[67]:
S_V_C = SVC(random_state=33, kernel = "rbf", gamma=0.1, C = 10, degree = 4,probability=True,verbose=2 )
Metrics['SVC'] = MetricsMaker(S_V_C)
# In[68]:
#Evaluation avec le modèle tunné
s_v_c = SVC(random_state=33, kernel = "rbf", gamma=0.1, C = 10, degree = 4,probability=True,verbose=2 )
evaluation_knn(s_v_c,train_X,train_Y) #Since rbf Kernel is used
# ### 4.5<span style="color:black"> KNN Classifier </span>
# In[69]:
k_n_n = KNeighborsClassifier(algorithm='auto', n_jobs = -1)
parameters = {
'leaf_size':[5,10,20,30],
'n_neighbors':[3,4,5,8,10,11,12],
'weights' : ['uniform', 'distance'],
'p' : [1,2]
}
#tunning(parameters,k_n_n,train_X,train_Y)
# In[70]:
K_N_N = KNeighborsClassifier(algorithm='auto',leaf_size= 20,n_neighbors= 11, p=1, weights = "distance", n_jobs = -1)
Metrics['KNeighborsClassifier'] = MetricsMaker(K_N_N)
# In[71]:
#Evaluation avec le modèle tunné
k_n_n = KNeighborsClassifier(algorithm='auto',leaf_size= 20,n_neighbors= 11, p=1, weights = "distance", n_jobs = -1)
evaluation_knn(k_n_n,train_X,train_Y)
# ### 4.6<span style="color:black"> Random Forest Classifier </span>
# In[72]:
r_f_c = RandomForestClassifier(random_state=33, verbose=2,n_jobs = -1)
parameters = {
'n_estimators': [5,10,15,20,30,40,50,60,70,80],
'min_samples_split': [3, 5, 10],
'max_depth': [2, 5, 15, 30,50,70,80],
'max_features': ['auto', 'sqrt'],
'bootstrap': [True, False],
'criterion': ['gini','entropy']
}
#tunning(parameters,r_f_c,train_X,train_Y.values.ravel())
# In[73]:
R_F_C = RandomForestClassifier(random_state=33, verbose=2, n_estimators = 70,
min_samples_split= 3, max_depth = 70, max_features = "auto",
bootstrap = "False", criterion = "gini")
Metrics['RandomForestClassifier'] = MetricsMaker(R_F_C)
# In[74]:
#Evaluation avec le modèle tunné
r_f_c = RandomForestClassifier(random_state=33, verbose=2, n_estimators = 70,
min_samples_split= 3, max_depth = 70, max_features = "auto",
bootstrap = "False", criterion = "gini")
evaluation(r_f_c,"features",train_X,train_Y)
# ### 4.7<span style="color:black"> Gradient boosting Classifier </span>
# In[75]:
g_b_c = GradientBoostingClassifier (random_state = 33, verbose=2)
parameters = {'learning_rate' : [0.01,0.02,0.03,0.04,0.06,0.08,0.09],
'loss' : ["deviance", "exponential"],
'subsample' : [0.9, 0.5, 0.2, 0.1],
'n_estimators' : [100,500,1000, 1500],
'max_depth' : [4,6,8,10],
'criterion' : ["friedman_mse", "mse"],
'min_samples_split' : [2,4,6,8,10,12,14],
'min_samples_leaf' : [1,2,3,4],
'max_features' : ["auto", "sqrt", "log2"]
}
#tunning(parameters,g_b_c,train_X,train_Y.values.ravel())
# In[76]:
G_B_C = GradientBoostingClassifier(learning_rate=0.09, n_estimators=500, max_depth = 8, min_samples_split = 12,
max_features='auto', subsample=0.1,criterion= "friedman_mse", min_samples_leaf = 2,
loss = "exponential", random_state=33, verbose = 1)
Metrics['GradientBoostingClassifier'] = MetricsMaker(G_B_C)
# In[77]:
#Evaluation avec le modèle tunné
g_b_c = GradientBoostingClassifier(learning_rate=0.09, n_estimators=500, max_depth = 8, min_samples_split = 12,
max_features='auto', subsample=0.1,criterion= "friedman_mse", min_samples_leaf = 2,
loss = "exponential", random_state=33, verbose = 1)
evaluation(g_b_c,"features",train_X,train_Y)
# ### 4.8<span style="color:black"> XGBoost Classifier </span>
# In[78]:
x_g_c = XGBClassifier(use_label_encoder=False)
parameters = {'nthread':[4,5,6,8,10,12],
'learning_rate': [0.01,0.03,0.05,0.1,0.2,0.3,0.4,0.5],
'max_depth': range (2, 21, 1),
'min_child_weight': [10,12,14,16,18,20],
'subsample': [0.6,0.8,1],
'colsample_bytree': [0.2,0.4,0.5,0.7],
'n_estimators': [100,200,300,400,500]
}
#tunning(parameters,x_g_c,train_X,train_Y.values.ravel())
# In[79]:
X_G_B = XGBClassifier(learning_rate = 0.4,nthread = 10,max_depth = 16, subsample=0.8,colsample_bytree=0.5
,n_estimators = 200, min_child_weight = 16,
use_label_encoder=False, random_state = 33, verbosity=1)
Metrics['XGBClassifier'] = MetricsMaker(X_G_B)
# In[80]:
#Evaluation avec le modèle tunné
x_g_c = XGBClassifier(learning_rate = 0.4,nthread = 10,max_depth = 16, subsample=0.8,colsample_bytree=0.5
,n_estimators = 200, min_child_weight = 16,
use_label_encoder=False, random_state = 33, verbosity=1)
evaluation(x_g_c,"features",train_X,train_Y.values.ravel())
# # 5.<span style="color:Turquoise"> FEATURES SELECTION </span>
# ### 5.1<span style="color:black"> Select KBest </span>
# In[81]:
kbest = SelectKBest(score_func=f_classif, k='all') #Score_func peut etre f_classif ou chi2
fit = kbest.fit(train_X, train_Y.values.ravel())
# In[82]:
np.set_printoptions(precision=3) #Chaque score correspond à une colonne, les variables a retenir sont celles qui ont le meilleur score
d = { label: value for label, value in zip(FEATURES, fit.scores_) }
d
# ### 5.1<span style="color:black"> RFECV avec XGboost Classifier tunné </span>
# In[83]:
train_X = pd.DataFrame(train_X, columns = FEATURES)
# In[84]:
rfecv = RFECV(estimator=x_g_c,cv=5,scoring="f1") ## on peut choisir le min_features_to_select( 1 par défaut)
rfecv = rfecv.fit(train_X, train_Y.values.ravel())
print('Nombre optimal de variables :', rfecv.n_features_)
print('Les meilleures variables :', train_X.columns[rfecv.support_])
best_features = list(train_X.columns[rfecv.support_])
# # 5.<span style="color:Purple"> PREDICTION </span>
# **Les prédictions de la base test se feront avec chaque modèle tunné pour pouvoir comparer le meilleur modèle de classification**
# **Les métriques de comparaison**
#
# `recall` : Nombre de classes trouvées par rapport aux nombres entiers de cette même classe.
#
# `precision` : Combien de classes ont été correctements classifiées
#
# `f1-score` : La moyenne harmonique entre precision & recall
# ## Comparaison
# In[85]:
pd.DataFrame(Metrics)
| bg-mohamed/RFS677-Y | Machine Learning/Machine_Learning_Classification.py | Machine_Learning_Classification.py | py | 31,870 | python | fr | code | 1 | github-code | 36 |
4788623202 | from pytz import timezone
from datetime import datetime
import re
from urllib.parse import urlparse, urljoin
from flask import request, escape, Request
import tiktoken
from werkzeug.datastructures import ImmutableMultiDict
class HTTPMethodOverrideMiddleware(object):
allowed_methods = frozenset([
'GET',
'HEAD',
'POST',
'DELETE',
'PUT',
'PATCH',
'OPTIONS'
])
bodyless_methods = frozenset(['GET', 'HEAD', 'OPTIONS', 'DELETE'])
def __init__(self, app, field='_method'):
self.app = app
self._regex = re.compile('.*' + field + '=([a-zA-Z]+)(&.*|$)')
def __call__(self, environ, start_response):
method = self._regex.match(environ.get('QUERY_STRING', ''))
if method is not None:
method = method.group(1).upper()
if method in self.allowed_methods:
environ['REQUEST_METHOD'] = method
if method in self.bodyless_methods:
environ['CONTENT_LENGTH'] = '0'
return self.app(environ, start_response)
class SanitizedRequest(Request):
"""Sanitizes form fields automatically to escape HTML."""
def __init__(self, environ, populate_request=True, shallow=False):
super(SanitizedRequest, self).__init__(environ, populate_request, shallow)
self.unsanitized_form = self.form
if self.form:
sanitized_form = {}
for k, v in self.form.items():
sanitized_form[k] = escape(v)
self.form = ImmutableMultiDict(sanitized_form)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def now_mytz():
rome = timezone('Europe/Rome')
return datetime.now(tz=rome)
class TokenCounter:
"""Returns the number of tokens used by a list of messages.
Based on: https://platform.openai.com/docs/guides/chat/managing-tokens
"""
def __init__(self, model="gpt-3.5-turbo-0301"):
self.model = model
try:
self.encoding = tiktoken.encoding_for_model(model)
except KeyError:
self.encoding = tiktoken.get_encoding("cl100k_base")
def num_tokens_from_string(self, text):
return len(self.encoding.encode(text))
def num_tokens_from_messages(self, messages):
"""Returns the number of tokens used by a list of messages.
From: https://platform.openai.com/docs/guides/chat/managing-tokens
"""
if self.model == "gpt-3.5-turbo-0301": # note: future models may deviate from this
num_tokens = 0
for message in messages:
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
for key, value in message.items():
num_tokens += self.num_tokens_from_string(value)
if key == "name": # if there's a name, the role is omitted
num_tokens += -1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {self.model}.
See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
| mkmenta/chatgpt-research | utils.py | utils.py | py | 3,525 | python | en | code | 0 | github-code | 36 |
73997230824 | import unittest
from HomeWorks.Lesson_4.common.constants import *
from HomeWorks.Lesson_4.client import show_presence, proc_answer
# Класс с тестами
class TestClass(unittest.TestCase):
# тест коректного запроса
def test_def_presense(self):
test = show_presence()
# время необходимо приравнять принудительно иначе тест никогда не будет
# пройден
test[TIME] = 1.1
self.assertEqual(
test, {
ACTION: PRESENCE, TIME: 1.1, USER: {
ACCOUNT_NAME: 'Guest'}})
# тест корректтного разбора ответа 200
def test_200_ans(self):
self.assertEqual(proc_answer({RESPONSE: 200}), '200 : OK')
# тест корректного разбора 400
def test_400_ans(self):
self.assertEqual(proc_answer(
{RESPONSE: 400, ERROR: 'Bad Request'}), '400 : Bad Request')
# тест исключения без поля RESPONSE
def test_no_response(self):
self.assertRaises(ValueError, proc_answer, {ERROR: 'Bad Request'})
if __name__ == '__main__':
unittest.main()
| spoliv/Client_Server_Apps_28.10.2019 | HomeWorks/Lesson_4/unit_tests/test_client.py | test_client.py | py | 1,235 | python | ru | code | 0 | github-code | 36 |
2884377589 | # -*- coding: utf8 -*-
__author__ = 'yqzhang'
from utils.util import get_requests, form_post,login,get_code_token
def detail(gooids):
login('0086','18810432995')
url='https://jf.lagou.com/integral/mall/goods/detail.json'
data={'goodsId':gooids}
return get_requests(url=url,remark='商品详情',data=data)
# detail() | Ariaxie-1985/aria | api_script/jianzhao_web/gouinH5/detail.py | detail.py | py | 334 | python | en | code | 0 | github-code | 36 |
39983047311 | # Assignment-008/6 (Prime Numbers)
# 💡Objective:
# To improve your control flow statement skills
# and to raise your awareness of some algebraic knowledge.
# Write a Python code on any IDE,
# push it up to your GitHub repository
# and submit the GitHub page address link
# in addition to your code (answer) as a plain text.
# Task : Print the prime numbers which are between 1 to entered limit number (n).
# You can use a nested for loop.
# Collect all these numbers into a list
# The desired output for n=100 :
# [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
# 61, 67, 71, 73, 79, 83, 89, 97]
# Note that : This question is famous on the web,
# so to get more benefit from this assignment,
# try to complete this task on your own.
n = int(input("Enter an end point to check prime numbers: "))
prime_numbers = []
for i in range(1, n+1) :
count = 0
for j in range(1, i+1):
if i % j == 0 :
count += 1
if (i == 0) or (i == 1) or (count >=3) :
continue
else:
prime_numbers.append(i)
print(prime_numbers, "are prime numbers") | MattCon70/mypython | assigments/primenumbers2.py | primenumbers2.py | py | 1,107 | python | en | code | 0 | github-code | 36 |
2353697076 | import os
import dotenv
from telethon import sync
_users_cache = set() # to avoid double DMs
dotenv.load_dotenv()
MESSAGE_TEMPLATE = os.getenv("AUTO_DM")
CURSOR_FILE = "cursor.txt"
def _read_cursor() -> int:
if os.path.exists(CURSOR_FILE):
with open(CURSOR_FILE) as file:
return int(file.read())
return 0
def _write_cursor(cursor: int):
with open(CURSOR_FILE, "w") as file:
file.write(str(cursor))
async def _dm_user(client: sync.TelegramClient, user_id: int):
try:
if user_id in _users_cache:
return
await client.send_message(user_id, MESSAGE_TEMPLATE)
_users_cache.add(user_id)
except Exception as e:
print(f"Failed to DM user {user_id}: {e}")
async def process(client: sync.TelegramClient, channel):
min_id = _read_cursor()
logs = await client.get_admin_log(channel, join=True, min_id=min_id)
for log in logs[::-1]:
try:
if log.joined and log.input_user and hasattr(log.input_user, "user_id"):
user_id = log.input_user.user_id
await _dm_user(client, user_id)
min_id = log.id
except Exception as e:
print(f"Failed to process log {log.id}: {e}")
_write_cursor(min_id)
| rebryk/supertelega | dm.py | dm.py | py | 1,280 | python | en | code | 15 | github-code | 36 |
43427847703 | from .common import deploy
def _parse_sub(subparsers):
parser = subparsers.add_parser("rtt_isolated",
help="Round-trip time for each node in isolation (only node on network)")
return parser
def _main(args, script_fmt):
cmd_list = [
"cd mqtt-benchmark",
script_fmt.format(pub="topic", sub="topic")
]
return deploy (cmd_list, args.devices, sync=True)
| arjunr2/mqtt-benchmark | bench_scripts/rtt_isolated.py | rtt_isolated.py | py | 405 | python | en | code | 0 | github-code | 36 |
3285451205 | from commands.CleanBuildCommands.SignApkCommand import SignApkCommand
from parsers.SignApkParser import SignApkParser
class SignApkCommandBuilder:
def __init__(self, pathToBuildUtil):
assert pathToBuildUtil is not None
self.pathToBuildUtil = pathToBuildUtil
def isSignApk(self, line):
assert line is not None
parser = SignApkParser()
isValid = parser.isValidLine(line)
return isValid
def getCommandFor(self, line):
assert line is not None
parser = SignApkParser()
result = parser.parseLine(line)
slnPath = result[0]
slnConfig = result[1]
projectName = result[2]
command = SignApkCommand(self.pathToBuildUtil, slnPath, slnConfig, projectName)
return command | TouchInstinct/BuildScript | scripts/TouchinBuild/CommandBuilders/SignApkBuilder.py | SignApkBuilder.py | py | 697 | python | en | code | 1 | github-code | 36 |
29551084642 | '''
E->E+T|T
T->T*F|F
F->(E)|A
A->1A|2A|3A|4A|5A|6A|7A|8A|9A|0|1|2|3|4|5|6|7|8|9|ε
'''
'''
E->EOE|(E)|A
O->+|-|*
A->1A|2A|3A|4A|5A|6A|7A|8A|9A|0|1|2|3|4|5|6|7|8|9|ε
'''
#还是消除简单左递归
#重写LR0和SLR1中的TABLE以及ANALYSE函数,条理更清晰
import copy
LAN = {}
FIRST = {}
EXLAN = []
ITEM = []
DICT = {}
DFA = [] #[0]为代表 其中[0][0]为项目字符串,[0][1]为搜索符 [1]为图上连线
CH = []
CL = []
def isterminal(ch):
if not (ch >= "A" and ch <="Z"):
return True
else:
return False
def table():
length = len(DFA)
tmp = []
for i in LAN:
p = LAN[i]
for j in p:
for k in j:
if isterminal(k):
if k not in tmp:
tmp.append(k)
if 'ε' in tmp:
tmp.remove('ε')
tmp.sort()
tmp.append('$')
l = len(tmp)
tmp1 = []
for i in LAN:
p = LAN[i]
for j in p:
for k in j:
if not isterminal(k):
if k not in tmp1:
tmp1.append(k)
print(LAN)
print(LL1LAN)
print(tmp1)
tmp.extend(tmp1)
CH.extend(tmp)
TABLE = [[None for i in range(len(CH))] for j in range(len(DFA))]
for index, i in enumerate(DFA):
for j in i[0]: #先填reduce项
if len(j[0]) - 1 == j[0].index('.'):
string = j[0][:-1]
pos = EXLAN.index(string)
if pos == 0:
TABLE[index][CH.index(j[1])] = 'acc'
else:
TABLE[index][CH.index(j[1])] = 'r' + str(pos)
for j in i[1]: #再填Action/Goto项
if isterminal(j[0]):
TABLE[index][CH.index(j[0])] = 's' + str(j[1])
else:
TABLE[index][CH.index(j[0])] = str(j[1])
return TABLE
def closure(item):
global CL
string = item[0]
search = item[1]
if string.index('.') == len(string) - 1:
return [item]
tmp = [item]
index = string.index('.')
k = string[index + 1:]
# print(item)
# print(k)
if not isterminal(k[0]):
for i in ITEM:
if i[0] == k[0]:
if i[3] == '.':
# tmp.append([i, search]) #需增加搜索符
w = []
if len(k) == 1: # .走到了最后就继承搜索符
w = [i, search]
tmp.append(w)
if len(w[0]) > 4:
if not isterminal(w[0][4]):
if w not in CL:
CL.append(w)
# if w[0] != string:
tmp.append(closure(w))
CL.remove(w)
else:
if isterminal(k[1]):
w = [i, k[1]]
tmp.append(w)
if len(w[0]) > 4:
if not isterminal(w[0][4]):
# if w[0] != string:
if w not in CL:
CL.append(w)
tmp.append(closure(w))
CL.remove(w)
else:
t = list(set(calfirst(k[1:])))
if t == ['ε'] or len(t) == 0:
w = [i, k[1]]
tmp.append(w)
if len(w[0]) > 4:
if not isterminal(w[0][4]):
if w not in CL:
CL.append(w)
# if w[0] != string:
tmp.append(closure(w))
CL.remove(w)
else:
for x in t:
if x != 'ε':
w = [i, x]
tmp.append(w)
if len(w[0]) > 4:
if not isterminal(w[0][4]):
# if w[0] != string:
if w not in CL:
CL.append(w)
tmp.append(closure(w))
CL.remove(w)
k = []
for i in tmp:
if type(i[0]) == list:
for j in i:
if j not in k:
k.append(j)
else:
if i not in k:
k.append(i)
tmp = k
return tmp
def getdfa():
cl = closure([ITEM[0], '$'])
DFA.append([cl, []])
l = 0
while l < len(DFA):
vis = [False for i in range(len(DFA[l][0]))]
for indexi, i in enumerate(DFA[l][0]):
if i[0].index('.') == len(i[0]) - 1:
continue
p = i[0].index('.')
tmp = []
posi = i[0].index('.')
if len(i[0]) > 4:
ch = i[0][posi + 1]
else:
ch = ""
for indexj, j in enumerate(DFA[l][0]):
if not vis[indexj]:
posj = j[0].index('.')
if len(j[0]) - 1 > posj:
if j[0][posj + 1] == ch:
newstr = j[0][:posj] + j[0][posj + 1] + '.' + j[0][posj + 2:]
tmp.extend(closure([newstr, j[1]])) #没想好,对于外部应该是直接继承,先这么写看效果
#去重
k = []
for i in tmp:
if type(i[0]) == list:
for j in i:
if j not in k:
k.append(j)
else:
if i not in k:
k.append(i)
tmp = k
vis[indexj] = True
if tmp != []:
pos = -1
for index, j in enumerate(DFA):
if j[0] == tmp:
pos = index
if pos == -1:
DFA.append([tmp, []])
DFA[l][1].append([ch, len(DFA) - 1])
else:
DFA[l][1].append([ch, pos])
l += 1
def first():
for i in FIRST:
FIRST[i] = getfirst(i)
for i in FIRST:
FIRST[i].sort()
def getfirst(tar):
for i in LL1LAN[tar]:
if len(i) == 1:
if(isterminal(i)):
FIRST[tar].append(i) #是终结符直接加入first集
else:
FIRST[tar].extend(getfirst(i)) #非终结符则把这个非终结符的first集加入first集
else:
for index, j in enumerate(i):
if j == 'ε':
FIRST[tar].append(j)
continue
elif isterminal(j):
FIRST[tar].append(j)
break
else:
tmp = copy.deepcopy(getfirst(j))
if 'ε' in tmp:
if index == len(i) - 1:
FIRST[tar].extend(tmp)
else:
tmp.remove('ε')
FIRST[tar].extend(tmp)
else:
FIRST[tar].extend(tmp)
break
FIRST[tar] = list(set(FIRST[tar])) #去重
return FIRST[tar]
def calfirst(string):
if string == 'ε':
return ['ε']
elif len(string) == 1 and isterminal(string):
return [string]
tmp = []
for i in string:
if isterminal(i):
tmp.append(i)
return tmp
else:
t = copy.deepcopy(FIRST[i])
if 'ε' in t:
t.remove('ε')
tmp.extend(t)
else:
tmp.extend(t)
return tmp
return tmp
def getlan():
path = "lr1test.txt"
infile = open(path, 'r')
i = 0
for line in infile.readlines():
splitlist = line[3:].replace("\n", "").strip().split("|")
if line[0] in LAN:
LAN[line[0]].extend(splitlist)
LAN[line[0]] = list(set(LAN[line[0]]))
else:
if i == 0:
LAN['Z'] = [line[0]]
ACC = line[0]
LAN[line[0]] = splitlist
FIRST['Z'] = []
EXLAN.append('Z->' + line[0])
ITEM.append('Z->.' + line[0])
ITEM.append('Z->' + line[0] + '.')
i += 1
else:
LAN[line[0]] = splitlist
FIRST[line[0]] = []
for j in splitlist:
if j != 'ε':
EXLAN.append(line[0] + '->' + j)
for k in range(len(j)):
ITEM.append(line[0] + '->' + j[:k] + '.' + j[k:])
ITEM.append(line[0] + '->' + j + '.')
else:
ITEM.append(line[0] + '->' + '.')
EXLAN.append(line[0] + '->')
def getll1lan():
strlist = ['Y', 'X', 'W', 'V', 'U']
tmplan = {}
pos = 0
for i in LL1LAN.keys():
p = LL1LAN[i]
vis = [False for i in range(len(p))]
for indexj, j in enumerate(p):
if i == j[0]: #左递归
for indexk, k in enumerate(p):
if i != k[0] and vis[indexk] == False:
DICT[i] = strlist[pos]
p[indexk] += strlist[pos]
tmplan[strlist[pos]] = [j[1:] + strlist[pos], 'ε']
FIRST[strlist[pos]] = []
pos += 1
vis[indexk] = True
break
p.remove(j)
for i in tmplan:
LL1LAN[i] = tmplan[i]
def analyse(string, TABLE):
print('%-10s' % "序号", end = "")
print('%-16s' % "分析栈", end = "")
print('%-16s' % "输入栈", end = "")
print('%-16s' % "动作")
Analyse = [['$', 0]]
Istack = list(string)
Istack.append('$')
index = 1
state = 0
length = len(DFA)
while True:
if TABLE[state][CH.index('$')] == 'acc' and Istack == ['$']:
print('%-10s' % index, end = "")
s = ''
for i in Analyse:
s += i[0]
s += str(i[1])
print('%-16s' % s, end = "")
s = ''
for i in Istack:
s += i
print('%-16s' % s, end = "")
if Istack == ['$']:
print('%-16s' % "Acc")
else:
print('%-16s' % "分析失败!")
return
print('%-10s' % index, end = "")
s = ''
for i in Analyse:
s += i[0]
s += str(i[1])
print('%-16s' % s, end = "")
s = ''
for i in Istack:
s += i
print('%-16s' % s, end = "")
if Istack[0] not in CH:
print('分析失败!')
return
if TABLE[state][CH.index(Istack[0])] == None:
print('分析失败!')
return
else:
if TABLE[state][CH.index(Istack[0])][0] == 's':
s = 'shift ' + Istack[0]
print('%-16s' % s)
pos = int(TABLE[state][CH.index(Istack[0])][1:])
Analyse.append([Istack[0], pos])
Istack = Istack[1:]
state = pos
elif TABLE[state][CH.index(Istack[0])][0] == 'r':
pos = int(TABLE[state][CH.index(Istack[0])][1:])
s = 'reduce ' + str(pos)
print('%-16s' % s)
lan = EXLAN[pos]
p = lan.index('>')
K = lan[0]
k = lan[p+1:]
if k != '':
Analyse = Analyse[:-len(k)]
state = Analyse[-1][1]
if TABLE[state][CH.index(K)] == None:
print("分析失败!")
return
Analyse.append([K, int(TABLE[state][CH.index(K)])])
state = int(TABLE[state][CH.index(K)])
else:
s = 'shift ' + Istack[0]
print('%-16s' % s)
pos = int(TABLE[state][CH.index(Istack[0])])
Analyse.append([Istack[0], pos])
Istack = Istack[1:]
state = pos
index += 1
def main():
global LL1LAN
print("文法n行,->区分左右,$为终结符,ε为空串,大写非终结符,小写终结符,S为开始符号(放在第一行),|是或:,Z为S'")
getlan()
print("拓广文法:")
print(LAN)
print(EXLAN)
print("项目:")
print(ITEM)
LL1LAN = copy.deepcopy(LAN)
getll1lan()
print()
print(LL1LAN)
first()
print("FIRST集:", FIRST)
getdfa()
print()
print("识别文法活前缀的DFA:")
for index, i in enumerate(DFA):
print(index, i)
print()
TABLE = table()
print()
print("LR1分析表:")
p = CH.index('$')
print('%-6s' % "", end = "")
for i in CH:
print('%-6s' % i, end = "")
print()
for index, i in enumerate(TABLE):
print('%-6s' % index, end = "")
for j in i:
if j != None:
print('%-6s' % j, end = "")
else:
print('%-6s' % "", end = "")
print()
print()
string = input("请输入要分析的字符串:")
analyse(string, TABLE)
if __name__ == '__main__':
main()
| xbyige/LL1-LR0-SLR1-LR1_Parser | lr1.py | lr1.py | py | 10,054 | python | en | code | 1 | github-code | 36 |
74473655465 | from inspect import getsource
from IPython.core.display import HTML, display
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
_formatter = HtmlFormatter()
def get_source(obj, preprocess=None):
# comments = f'# decorated by: {obj.decorated_by}\n' if hasattr(obj, 'decorated_by') else ''
if hasattr(obj, 'original_function'):
obj = obj.original_function
if hasattr(obj, '__source__'):
source = obj.__source__
else:
source = getsource(obj)
if preprocess:
source = preprocess(source)
return HTML(highlight(source, PythonLexer(), _formatter))
def show_source(obj):
display(get_source(obj))
def embed_source_styling(custom_styles='.highlight{margin-left:10px!important; font-size:11px}'):
default_highlight_style = _formatter.get_style_defs('.highlight')
html = HTML(f'<style>{default_highlight_style}{custom_styles}</style>')
display(html)
| krassowski/jupyter-helpers | jupyter_helpers/source.py | source.py | py | 983 | python | en | code | 45 | github-code | 36 |
38075654293 | # https://quera.ir/problemset/293/
a = int(input())
b = int(input())
if a == 1 and b == 1:
pass
elif a == 1 and b == 2:
print(2)
elif a == 2 and b == 2:
print(2)
else:
if a == 1 or a == 2:
print(2)
if a % 2 == 0:
start_point = a+1
else:
if a == 1:
start_point = 3
else:
start_point = a
if b % 2 == 0:
end_point = b-1
else:
end_point = b
for i in range(start_point, end_point+1, 2):
is_prime = True
for j in range(3, int(i**0.5)+1, 2):
if i % j == 0:
is_prime = False
break
if is_prime:
print(i)
| MohammadNPak/quera.ir | اعداد اول/python/solution1.py | solution1.py | py | 686 | python | en | code | 40 | github-code | 36 |
73574822824 |
def community_similarity(l1, l2):
totalElementos = 0
similaridade = 0
for lista1 in l1:
taml1 = len(lista1)
totalElementos += taml1
setl1 = set(lista1)
maiorSemelhanca = 0
for lista2 in l2:
setl2 = set(lista2)
common = setl1.intersection(setl2)
if len(common) > maiorSemelhanca:
maiorSemelhanca = len(common)
similaridade += maiorSemelhanca
return similaridade/totalElementos
# Exemplo de uso
l1 = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
l2 = [[2, 3, 4], [1, 5, 6], [7, 8, 9]]
similarity = community_similarity(l1, l2)
print(f"A similaridade entre as formações de comunidades é: {similarity}")
'''
Nesse exemplo, as listas l1 e l2 representam as formações de comunidades nos grafos.
Cada sublista dentro das listas l1 e l2 representa uma comunidade separada.
O resultado do será um valor entre 0 e 1, em que 1 indica uma similaridade perfeita,
e 0 indica a ausência de similaridade.
'''
| dudu-miranda/tp-redesComplexas | comparacaoComunidades.py | comparacaoComunidades.py | py | 1,014 | python | pt | code | 0 | github-code | 36 |
30000519084 |
#Function to calculate pairs
def returnPairs(mylist):
pairs=0
myset=set()
for i in range(0,len(mylist)):
occur=0
if mylist[i] in myset:
continue
for j in range(i+1,len(mylist)):
if mylist[i]==mylist[j]:
occur+=1
myset.add(mylist[i])
if occur>=2:
occur=2
pairs+=occur
return pairs
#Inputing the numbers and initializing an empty list
val=map(int,input().split())
res=[]
#Computing the resulting values and storing them into a list
for i in val:
dup=i
large=0
small=0
while dup!=0:
k=dup%10
if k>large:
large=k
if k<small:
small=k
dup=dup//10
res_val=(large*11+small*7)%100
res.append(res_val)
#seperating into even and odd groups
length=len(res)
evn_grp=[]
odd_grp=[]
for i in range(0,length):
if i%2==0:
evn_grp.append(res[i]//10)
else:
odd_grp.append(res[i]//10)
#calculating pairs
evn_pairs=returnPairs(evn_grp)
odd_pairs=returnPairs(odd_grp)
tot_pairs=evn_pairs+odd_pairs
print(tot_pairs)
| shyamkrishnan1999/python-projects | mockvita2/digit_pairs.py | digit_pairs.py | py | 1,137 | python | en | code | 0 | github-code | 36 |
22825492843 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: gpu.py
# Author: jian<jian@mltalker.com>
from __future__ import unicode_literals
import os
import re
# from antgo.utils.utils import change_env
import subprocess
import time
import numpy as np
class GPU(object):
def __init__(self):
try:
content = subprocess.check_output('nvidia-smi')
self._is_gpu_ok = True
self._driver_version = re.findall('(?<=Driver Version: )[\d.]+', content)[0]
gpu_cards_basic_info = re.findall('(?<=\|)[ ]+\d+[ ]+\w+[ ]+\w+[ ]+On[ ]+(?=\|)', content)
gpu_num = len(gpu_cards_basic_info)
self._gpu_cards = []
for gpu_index in range(gpu_num):
result = re.split('\s+', gpu_cards_basic_info[gpu_index].strip())
gpu_card_str = ' '.join(result[1:-1])
self._gpu_cards.append(gpu_card_str)
gpu_mem_info = re.findall('\d+MiB / \d+MiB', content)
self._gpu_mem_max = []
for gpu_index in range(gpu_num):
result = re.split('/', gpu_mem_info[gpu_index])
mem_max = re.findall('\d+', result[1])[0]
self._gpu_mem_max.append(int(float(mem_max) / 1000))
except:
self._is_gpu_ok = False
@property
def driver_version(self):
if not self.is_gpu_ok:
return None
return self._driver_version
@property
def is_gpu_ok(self):
return self._is_gpu_ok
def gpu_model_name(self, card_id=-1):
if not self.is_gpu_ok:
return None
if card_id == -1:
return self._gpu_cards
return self._gpu_cards[card_id]
def gpu_physical_cards(self):
if not self.is_gpu_ok:
return None
return len(self._gpu_cards)
def gpu_total_mem(self, card_id=-1):
if not self.is_gpu_ok:
return None
if card_id == -1:
return self._gpu_mem_max
return self._gpu_mem_max[card_id]
def gpu_available_mem(self, card_id=-1):
if not self.is_gpu_ok:
return None
try:
content = subprocess.check_output('nvidia-smi')
gpu_mem_info = re.findall('\d+MiB / \d+MiB', content)
gpu_mem_usage = []
for gpu_index in range(self.gpu_physical_cards()):
result = re.split('/', gpu_mem_info[gpu_index])
mem_usage = re.findall('\d+', result[0])[0]
gpu_mem_usage.append(int(float(mem_usage) / 1000))
if card_id == -1:
return gpu_mem_usage
return gpu_mem_usage[card_id]
except:
return None
def gpu_util(self, card_id=-1):
if not self.is_gpu_ok:
return None
content = subprocess.check_output('nvidia-smi')
gpu_util = re.findall('(?<=\|)[ ]+\d+(?=%)', content)
gpu_util = [int(util) for id, util in enumerate(gpu_util) if id % 2 == 1]
if card_id == -1:
return gpu_util
return gpu_util[card_id]
def running_state(self, pid, interval=10):
if not self._is_gpu_ok:
return None
content = subprocess.check_output('nvidia-smi')
pattern = '(?<=\|)[ ]+\d+[ ]+\s+(?={pid})'.format(pid=pid)
terms = re.findall(pattern,content)
occupy_gpus = []
for term in terms:
occupy_gpus.append(int(term.strip()))
if len(occupy_gpus) == 0:
return None
for _ in range(interval):
content = subprocess.check_output('nvidia-smi')
gpu_pwr_info = re.findall('\d+W / \d+W',content)
gpu_pwr_usage=[]
gpu_pwr_cap=[]
for gpu_index in range(self.gpu_physical_cards()):
result = re.split('/',gpu_pwr_info[gpu_index])
pwr_usage = re.findall('\d+',result[0])[0]
pwr_cap = re.findall('\d+',result[1])[0]
gpu_pwr_usage.append(float(pwr_usage))
gpu_pwr_cap.append(float(pwr_cap))
gpu_mem_info = re.findall('\d+MiB / \d+MiB',content)
gpu_mem_usage=[]
gpu_mem_max=[]
for gpu_index in range(self.gpu_physical_cards()):
result = re.split('/',gpu_mem_info[gpu_index])
mem_usage = re.findall('\d+',result[0])[0]
mem_max = re.findall('\d+',result[1])[0]
gpu_mem_usage.append(float(mem_usage))
gpu_mem_max.append(float(mem_max))
gpu_util = re.findall('(?<=\|)[ ]+\d+(?=%)', content)
gpu_util = [int(util) for id, util in enumerate(gpu_util) if id % 2 == 1]
pid_mem_util = np.mean([gpu_mem_usage[id] / gpu_mem_max[id] for id in occupy_gpus])
pid_gpu_util = np.mean([gpu_util[id] / 100.0 for id in occupy_gpus])
pid_pwr_util = np.mean([gpu_pwr_usage[id] / gpu_pwr_cap[id] for id in occupy_gpus])
# sleep 2 second
time.sleep(2)
return {'mem_util': pid_mem_util, 'gpu_util': pid_gpu_util, 'gpu_pwr': pid_pwr_util}
# my_gpu = GPU()
# print(my_gpu.gpu_model_name())
# print(my_gpu.gpu_available_mem())
# print(my_gpu.gpu_available_mem(1))
# print(my_gpu.gpu_util())
# print(my_gpu.driver_version)
#
# print(my_gpu.running_state(6465, 1))
# print(my_gpu.gpu_util()) | jianzfb/subgradient | subgradient/core/gpu.py | gpu.py | py | 4,831 | python | en | code | 0 | github-code | 36 |
28969560543 | from django.contrib import admin
from ..models import Player
class PlayerAdmin(admin.ModelAdmin):
list_display = (
'name',
'lastname',
'birth_date',
'team',
'photo',
'position',
'player_number',
'is_first_team',
)
admin.site.register(Player, PlayerAdmin)
| dexer13/rebus-project | world_cup/admin/player_admin.py | player_admin.py | py | 332 | python | en | code | 0 | github-code | 36 |
14761513002 | def _mimport(name, level=1):
try:
return __import__(name, globals(), level=level)
except:
return __import__(name, globals())
import ctypes as _C
_ver=_mimport('version')
_exc=_mimport('mdsExceptions')
#### Load Shared Libraries Referenced #######
#
_MdsShr=_ver.load_library('MdsShr')
#
#############################################
def pointerToObject(pointer,tree=None):
if not pointer: return None
return Descriptor(pointer)._setTree(tree).value
class Descriptor(object):
tree = None
dclass_id = 0
_value = None
_structure = None
class _structure_class(_C.Structure):
_fields_=[("length",_C.c_ushort),
("dtype",_C.c_ubyte),
("dclass",_C.c_ubyte),
("pointer",_C.c_void_p),]
PTR = _C.POINTER(_structure_class)
null= _C.cast(0,PTR)
@property
def value(self):
if self.dclass:
return self.desc_class(self._structure,self.__dict__)._setTree(self.tree).value
def _setTree(self,tree):
_tre = _mimport('tree')
if isinstance(tree,_tre.Tree): self.tree=tree
return self
@property
def dtype_name(self):
if self.dtype in dclassToClass:
return dtypeToClass[self.dtype].__name__
if self.dtype in dtypeToArrayClass:
return dtypeToArrayClass[self.dtype].__name__
return "Unknown-%d"%int(self.dtype)
def __str__(self):
return "%s(%d,%s,%d,0x%x)"%(self.__class__.__name__,self.length,self.dtype_name,self.dclass,0 if self.pointer is None else self.pointer)
def __repr__(self): return str(self)
@property
def desc_class(self):
return dclassToClass[self.dclass]
def _new_structure(self,length=0,dtype=0,dclass=0,pointer=None,**kwargs):
self._structure = self._structure_class()
self._structure.length = length
self._structure.dtype = dtype
self._structure.dclass = dclass
self._structure.pointer= None
for k,v in kwargs.items():
exec(compile("struct.%s = v"%k,'<string>','exec'))
def __new__(cls,obj_in=None,_dict_={}):
if cls is not Descriptor or not obj_in:
return object.__new__(cls)
if not obj_in and not hasattr(cls,'__del__'):
return DescriptorNULL
if not isinstance(obj_in,cls._structure_class):
if isinstance(obj_in,_C.Structure):
obj_in = _C.pointer(obj_in)
obj_in = _C.cast(obj_in,cls.PTR).contents
obj = dclassToClass[obj_in.dclass](obj_in,_dict_)
obj.__init__ = lambda *a: None # done call __init__ again
return obj
def __init__(self,obj_in=None,_dict_={}):
if self.__class__ is Descriptor:
return Exception("cannot instanciate Descriptor")
for k,v in _dict_.items():
if k not in ['ptr','ptr_']:
self.__dict__[k] = v
if obj_in is None:
self._new_structure(dclass=self.dclass_id)
elif isinstance(obj_in,self._structure_class):
self._structure = obj_in
else:
if isinstance(obj_in,_C.Structure):
obj_in = _C.pointer(obj_in)
elif isinstance(obj_in,(int,_ver.long)):
obj_in = _C.c_void_p(obj_in)
self._structure=_C.cast(obj_in,self.PTR).contents
self.ptr = _C.pointer(self._structure)
self.ptr_= _C.cast(self.ptr,Descriptor.PTR)
def __getattr__(self,name):
if name is not '_structure' and name in dict(self._structure._fields_):
return self._structure.__getattribute__(name)
return super(Descriptor,self).__getattr__(name)
def __setattr__(self,name,value):
if name is not '_structure' and name in dict(self._structure._fields_):
return self._structure.__setattr__(name,value)
return super(Descriptor,self).__setattr__(name,value)
@property
def addressof(self):
return _C.addressof(self._structure)
@property
def ref(self):
return _C.byref(self._structure)
class DescriptorNULL(Descriptor):
dclass = length = dtype = addressof = pointer = 0
ref=ptr_=ptr=Descriptor.null
def __init__(self):pass
DescriptorNULL=DescriptorNULL()
class Descriptor_s(Descriptor):
dclass_id = 1
@property
def value(self):
if self.dtype:
return dtypeToClass[self.dtype].fromDescriptor(self)._setTree(self.tree)
class Descriptor_d(Descriptor_s):
dclass_id = 2
def __del__(self):
_MdsShr.MdsFree1Dx(self.ptr,0)
class Descriptor_xs(Descriptor_s):
dclass_id = 193
class _structure_class(_C.Structure):
_fields_=Descriptor_s._structure_class._fields_ + [
("l_length",_C.c_uint32)]
def _new_structure(self,l_length=0,**kwarg):
super(Descriptor_xs,self)._new_structure(**kwarg)
self._structure.l_length = l_length
PTR = _C.POINTER(_structure_class)
null= _C.cast(0,PTR)
@property
def value(self):
if self.l_length and self.pointer:
return Descriptor(self.pointer,self.__dict__)._setTree(self.tree).value
class Descriptor_xd(Descriptor_xs):
dclass_id = 192
dtype_dsc = 24
def __del__(self):
_MdsShr.MdsFree1Dx(self.ptr,0)
class Descriptor_r(Descriptor_s):
dclass_id = 194
class _structure_class(_C.Structure):
_pack_ = _C.sizeof(_C.c_void_p)
_fields_=Descriptor_s._structure_class._fields_ + [
("ndesc",_C.c_ubyte),
("dscptrs",Descriptor.PTR*256)]
PTR = _C.POINTER(_structure_class)
null= _C.cast(0,PTR)
## HINT: arrays
class Descriptor_a(Descriptor):
dclass_id = 4
class _structure_class(_C.Structure):
_fields_ = Descriptor._structure_class._fields_ + [
("scale",_C.c_byte),
("digits",_C.c_ubyte),
("",_C.c_ubyte * (0 if _ver.iswin else 2)),
("aflags",_C.c_ubyte),
("",_C.c_ubyte * (0 if _ver.iswin else 3)),
("dimct",_C.c_ubyte),
("arsize",_C.c_uint),
("a0",_C.c_void_p),
("coeff_and_bounds",_C.c_int32 * 24)]
def _new_structure(self,arsize=0,**kwarg):
super(Descriptor_a,self)._new_structure(**kwarg)
self._structure.arsize = arsize
self._structure.aflags=48
PTR = _C.POINTER(_structure_class)
null= _C.cast(0,PTR)
@property
def value(self):
if self.dtype:
return dtypeToArrayClass[self.dtype].fromDescriptor(self)._setTree(self.tree)
@property
def binscale(self):
return bool(self.aflags & 8)
@binscale.setter
def binscale(self,value):
if value:
self.aflags|= 8
else:
self.aflags&= ~8
@property
def redim(self):
return bool(self.aflags & 16)
@redim.setter
def redim(self,value):
if value:
self.aflags|= 16
else:
self.aflags&= ~16
@property
def column(self):
return bool(self.aflags & 32)
@column.setter
def column(self,value):
if value:
self.aflags|= 32
else:
self.aflags&= ~32
@property
def coeff(self):
return bool(self.aflags & 64)
@coeff.setter
def coeff(self,value):
if value:
self.aflags|= 64
else:
self.aflags&= ~64
@property
def bounds(self):
return bool(self.aflags & 128)
@bounds.setter
def bounds(self,value):
if value:
self.aflags|= 128
else:
self.aflags&= ~128
class Descriptor_ca(Descriptor_a):
dclass_id = 195
@property
def value(self):
xd = Descriptor_xd()
_exc.checkStatus(_MdsShr.MdsDecompress(self.ptr,xd.ptr))
return xd._setTree(self.tree).value
class Descriptor_apd(Descriptor_a):
dclass_id = 196
dclassToClass={Descriptor_s.dclass_id : Descriptor_s,
Descriptor_d.dclass_id : Descriptor_d,
Descriptor_xs.dclass_id : Descriptor_xs,
Descriptor_xd.dclass_id : Descriptor_xd,
Descriptor_r.dclass_id : Descriptor_r,
Descriptor_a.dclass_id : Descriptor_a,
Descriptor_ca.dclass_id : Descriptor_ca,
Descriptor_apd.dclass_id : Descriptor_apd}
dtypeToClass={}
def addDtypeToClass(Class): dtypeToClass[Class.dtype_id]=Class
dtypeToArrayClass={}
def addDtypeToArrayClass(Class): dtypeToArrayClass[Class.dtype_id]=Class
| bcao19/my-python-code | MDSplus/descriptor.py | descriptor.py | py | 8,565 | python | en | code | 0 | github-code | 36 |
30012376702 |
__author__ = 'rockie yang'
import os
from os import path, listdir
from hanzi2pinyin import hanzi2pinyin
def name_converter(old):
pinyin = hanzi2pinyin(old)
remove_unconverted_chars = pinyin.encode('ascii', 'ignore').decode('ascii')
return remove_unconverted_chars
def tranform(root_path, the_path):
m3u_file = os.path.join(root_path, the_path + ".m3u")
with open(m3u_file, "w") as m3u:
for sub_path in listdir(the_path):
old_full_path = os.path.join(the_path, sub_path)
if os.path.isdir(old_full_path):
new_path = name_converter(sub_path)
new_full_path = os.path.join(the_path, new_path)
print(sub_path, new_path)
if old_full_path != new_full_path:
os.rename(old_full_path,
new_full_path)
tranform(root_path, os.path.join(the_path, new_path))
elif sub_path.lower().endswith(".mp3"):
new_path = name_converter(sub_path)
old_full_path = os.path.join(the_path, sub_path)
new_full_path = os.path.join(the_path, new_path)
print(root_path, new_full_path)
if old_full_path != new_full_path:
os.rename(old_full_path,
new_full_path)
try:
m3u.write(new_full_path[(len(root_path) + 1):])
m3u.write('\n')
except Exception as ex:
print('could not write', new_full_path, ex)
#
# def tranform(sourcePath):
# for sub_path in listdir(sourcePath):
# print(sub_path)
#
# for dirname, dirnames, filenames in os.walk(sourcePath):
# # print(dirname)
# for subdirname in dirnames:
# # pass
# print (subdirname) #os.path.join(dirname, subdirname)
#
# # print path to all filenames.
# # for filename in filenames:
# # if filename.lower().endswith(".mp3"):
# # print (filename) #os.path.join(dirname, filename)
tranform(u"/Users/yangyoujiang/Music/music",
u"/Users/yangyoujiang/Music/music")
#
# #!/usr/bin/env python
#
# import os
# import sys
# import glob
# from mutagen.mp3 import MP3
# from mutagen.easyid3 import EasyID3
#
# #
# # MP3 playlist generator
# #
# # Generate an mp3 playlist file (.m3u), sorted by album track number.
# #
# # DEPENDENCIES
# #
# # - Mutagen (http://code.google.com/p/mutagen/)
# #
# # NOTE: To install `mutagen`, run:
# #
# # $ cd /path/to/mutagen/download/dir && python setup.py install
# #
# # USAGE
# #
# # You can pass directories two ways this script - as arguments or
# # via standard input.
# #
# # $ m3u.py /AphexTwin/Drukqs
# #
# # or multiple directories:
# #
# # $ find /dir/Music -type d -links 2 | m3u.py -
# #
# # Author: Jon LaBelle <jon@tech0.com>
# # Date: Sun Jul 28 2013 06:27:42 GMT-0500 (CDT)
# #
#
# def create_m3u(dir="."):
#
# try:
# print "Processing directory '%s'..." % dir
#
# playlist = ''
# mp3s = []
# glob_pattern = "*.[mM][pP]3"
#
# os.chdir(dir)
#
# for file in glob.glob(glob_pattern):
# if playlist == '':
# playlist = EasyID3(file)['album'][0] + '.m3u'
#
# meta_info = {
# 'filename': file,
# 'length': int(MP3(file).info.length),
# 'tracknumber': EasyID3(file)['tracknumber'][0].split('/')[0],
# }
#
# mp3s.append(meta_info)
#
# if len(mp3s) > 0:
# print "Writing playlist '%s'..." % playlist
#
# # write the playlist
# of = open(playlist, 'w')
# of.write("#EXTM3Un")
#
# # sorted by track number
# for mp3 in sorted(mp3s, key=lambda mp3: int(mp3['tracknumber'])):
# of.write("#EXTINF:%s,%sn" % (mp3['length'], mp3['filename']))
# of.write(mp3['filename'] + "n")
#
# of.close()
# else:
# print "No mp3 files found in '%s'." % dir
#
# except:
# print "ERROR occured when processing directory '%s'. Ignoring." % dir
# print "Text: ", sys.exc_info()[0] | rockie-yang/mp3 | mp3.py | mp3.py | py | 4,280 | python | en | code | 0 | github-code | 36 |
27502593085 | #!/usr/bin/env python
# coding: utf-8
# In[33]:
import pandas as pd
import streamlit as st
import requests
# In[34]:
username = 'ContainiumTE'
token = 'RRopW0EJvVEcfS5EGt1rxxswfGF5IfzU3Bh4VkPHS10'
github_session = requests.Session()
github_session.auth = (username,token)
# In[30]:
st.title("Discontinuity Weighting Tool")
url = "https://raw.githubusercontent.com/ContainiumTE/discontinuity_refinement/main/table_header.csv"
df_header = pd.read_csv("table_header.csv")
menu = ["Home","Other"]
choice = st.sidebar.selectbox("Menu",menu)
if choice == "Home":
st.subheader("Home")
st.subheader("Import Table format with Headers as follows:")
st.table(df_header)
data_file = st.file_uploader("Upload CSV", type=["csv"])
if data_file is not None:
#st.write(type(data_file))
df_rmr = pd.read_csv(data_file)
# In[50]:
#df_rmr = pd.read_csv('Qjr_selection.csv')
pd.set_option('display.max_columns',500)
pd.set_option('display.max_rows',500)
# In[51]:
df_rmr.columns = df_rmr.columns.str.strip().str.lower().str.replace(' ','_').str.replace('(', '').str.replace(')', '')
df_rmr.head()
# In[52]:
hole_id = df_rmr['hole_id'].unique()
#hole_id
# In[53]:
def joint_roughness1(jr1,jr1_count):
polished_1=0
smooth_planar_2 = 0
rough_planar_3 = 0
slickensided_undulating_4 = 0
smooth_undulating_5 = 0
rough_undulating_6 = 0
slickensided_stepped_7 = 0
smooth_stepped_8 = 0
rough_stepped_9 = 0
pol_rat_1=0
smoot_rat_2=0
rou_rat_3=0
slick_rat_4=0
smoot_und_rat_5=0
rou_und_rat_6=0
slick_ste_rat_7=0
smoot_step_rat_8=0
rou_step_rat_9=0
if jr1=='1 - Polished':
polished_1 = jr1_count
pol_rat_1 = jr1_count*0.45
print("Jr1 Allocated to: 1 - Polished")
elif jr1=='2 - Smooth Planar':
smooth_planar_2= jr1_count
smoot_rat_2 = jr1_count*0.4
print("Jr1 Allocated to: 2 - Smooth Planar")
elif jr1=='3 - Rough Planar':
rough_planar_3 = jr1_count
rou_rat_3 = jr1_count*0.35
print("Jr1 Allocated to: 3 - Rough Planar")
elif jr1=='4 - Slickensided Undulating':
slickensided_undulating_4 = jr1_count
slick_rat_4 = jr1_count*0.3
print("Jr1 Allocated to: 4 - Slickensided Undulating")
elif jr1=='5 - Smooth Undulating':
smooth_undulating_5= jr1_count
smoot_und_rat_5 = jr1_count*0.25
print("Jr1 Allocated to: 5 - Smooth Undulating")
elif jr1=='6 - Rough Undulating':
rough_undulating_6 = jr1_count
rou_und_rat_6 = jr1_count*0.2
print("Jr1 Allocated to: 6 - Rough Undulating")
elif jr1=='7 - Slickensided Stepped':
slickensided_stepped_7 = jr1_count
slick_ste_rat_7 = jr1_count*0.15
print("Jr1 Allocated to: 7 - Slickensided Stepped")
elif jr1=='8 - Smooth Stepped':
smooth_stepped_8 = jr1_count
smoot_step_rat_8 = jr1_count*0.1
print("Jr1 Allocated to: 8 - Smooth Stepped")
elif jr1=='9 - Rough Stepped / Irregular':
rough_stepped_9 = jr1_count
rou_step_rat_9 = jr1_count*0.05
print("Jr1 Allocated to: 9 - Rough Stepped / Irregular")
elif jr1=='':
print("No Jr1")
else:
print("None")
return polished_1, smooth_planar_2, rough_planar_3, slickensided_undulating_4, smooth_undulating_5, rough_undulating_6, slickensided_stepped_7, smooth_stepped_8, rough_stepped_9,pol_rat_1,smoot_rat_2,rou_rat_3,slick_rat_4,smoot_und_rat_5,rou_und_rat_6,slick_ste_rat_7,smoot_step_rat_8, rou_step_rat_9
# In[54]:
def joint_roughness2(jr2,jr2_count):
polished_1_2=0
smooth_planar_2_2 = 0
rough_planar_3_2 = 0
slickensided_undulating_4_2 = 0
smooth_undulating_5_2 = 0
rough_undulating_6_2 = 0
slickensided_stepped_7_2 = 0
smooth_stepped_8_2 = 0
rough_stepped_9_2 = 0
pol_rat_1_2=0
smoot_rat_2_2=0
rou_rat_3_2=0
slick_rat_4_2=0
smoot_und_rat_5_2=0
rou_und_rat_6_2=0
slick_ste_rat_7_2=0
smoot_step_rat_8_2=0
rou_step_rat_9_2=0
if jr2=='1 - Polished':
polished_1_2 = jr2_count
pol_rat_1_2 = jr2_count*0.45
print("Jr2 Allocated to: 1 - Polished")
elif jr2=='2 - Smooth Planar':
smooth_planar_2_2= jr2_count
smoot_rat_2_2 = jr2_count*0.4
print("Jr2 Allocated to: 2 - Smooth Planar")
elif jr2=='3 - Rough Planar':
rough_planar_3_2 = jr2_count
rou_rat_3_2 = jr2_count*0.35
print("Jr2 Allocated to: 3 - Rough Planar")
elif jr2=='4 - Slickensided Undulating':
slickensided_undulating_4_2 = jr2_count
slick_rat_4_2 = jr2_count*0.3
print("Jr2 Allocated to: 4 - Slickensided Undulating")
elif jr2=='5 - Smooth Undulating':
smooth_undulating_5_2= jr2_count
smoot_und_rat_5_2 = jr2_count*0.25
print("Jr2 Allocated to: 5 - Smooth Undulating")
elif jr2=='6 - Rough Undulating':
rough_undulating_6_2 = jr2_count
rou_und_rat_6_2 = jr2_count*0.2
print("Jr2 Allocated to: 6 - Rough Undulating")
elif jr2=='7 - Slickensided Stepped':
slickensided_stepped_7_2 = jr2_count
slick_ste_rat_7_2 = jr2_count*0.15
print("Jr2 Allocated to: 7 - Slickensided Stepped")
elif jr2=='8 - Smooth Stepped':
smooth_stepped_8_2 = jr2_count
smoot_step_rat_8_2 = jr2_count*0.1
print("Jr2 Allocated to: 8 - Smooth Stepped")
elif jr2=='9 - Rough Stepped / Irregular':
rough_stepped_9_2 = jr2_count
rou_step_rat_9_2 = jr2_count*0.05
print("Jr2 Allocated to: 9 - Rough Stepped / Irregular")
elif jr2=='NaN':
print("No Jr2")
else:
print("None")
return polished_1_2, smooth_planar_2_2, rough_planar_3_2, slickensided_undulating_4_2, smooth_undulating_5_2, rough_undulating_6_2, slickensided_stepped_7_2, smooth_stepped_8_2, rough_stepped_9_2,pol_rat_1_2,smoot_rat_2_2,rou_rat_3_2,slick_rat_4_2,smoot_und_rat_5_2,rou_und_rat_6_2,slick_ste_rat_7_2,smoot_step_rat_8_2, rou_step_rat_9_2
# In[55]:
def joint_roughness3(jr3,jr3_count):
polished_1_3=0
smooth_planar_2_3 = 0
rough_planar_3_3 = 0
slickensided_undulating_4_3 = 0
smooth_undulating_5_3 = 0
rough_undulating_6_3 = 0
slickensided_stepped_7_3 = 0
smooth_stepped_8_3 = 0
rough_stepped_9_3 = 0
pol_rat_1_3=0
smoot_rat_2_3=0
rou_rat_3_3=0
slick_rat_4_3=0
smoot_und_rat_5_3=0
rou_und_rat_6_3=0
slick_ste_rat_7_3=0
smoot_step_rat_8_3=0
rou_step_rat_9_3=0
if jr3=='1 - Polished':
polished_1_3 = jr3_count
pol_rat_1_3 = jr3_count*0.45
print("Jr3 Allocated to: 1 - Polished")
elif jr3=='2 - Smooth Planar':
smooth_planar_2_3= jr3_count
smoot_rat_2_3 = jr3_count*0.4
print("Jr3 Allocated to: 2 - Smooth Planar")
elif jr3=='3 - Rough Planar':
rough_planar_3_3 = jr3_count
rou_rat_3_3 = jr3_count*0.35
print("Jr3 Allocated to: 3 - Rough Planar")
elif jr3=='4 - Slickensided Undulating':
slickensided_undulating_4_3 = jr3_count
slick_rat_4_3 = jr3_count*0.3
print("Jr3 Allocated to: 4 - Slickensided Undulating")
elif jr3=='5 - Smooth Undulating':
smooth_undulating_5_3= jr3_count
smoot_und_rat_5_3 = jr3_count*0.25
print("Jr3 Allocated to: 5 - Smooth Undulating")
elif jr3=='6 - Rough Undulating':
rough_undulating_6_3 = jr3_count
rou_und_rat_6_3 = jr3_count*0.2
print("Jr3 Allocated to: 6 - Rough Undulating")
elif jr3=='7 - Slickensided Stepped':
slickensided_stepped_7_3 = jr3_count
slick_ste_rat_7_3 = jr3_count*0.15
print("Jr3 Allocated to: 7 - Slickensided Stepped")
elif jr3=='8 - Smooth Stepped':
smooth_stepped_8_3 = jr3_count
smoot_step_rat_8_3 = jr3_count*0.1
print("Jr3 Allocated to: 8 - Smooth Stepped")
elif jr3=='9 - Rough Stepped / Irregular':
rough_stepped_9_3 = jr3_count
rou_step_rat_9_3 = jr3_count*0.05
print("Jr3 Allocated to: 9 - Rough Stepped / Irregular")
elif jr3=='NaN':
print("No Jr3")
else:
print("None")
return polished_1_3, smooth_planar_2_3, rough_planar_3_3, slickensided_undulating_4_3, smooth_undulating_5_3, rough_undulating_6_3, slickensided_stepped_7_3, smooth_stepped_8_3, rough_stepped_9_3,pol_rat_1_3,smoot_rat_2_3,rou_rat_3_3,slick_rat_4_3,smoot_und_rat_5_3,rou_und_rat_6_3,slick_ste_rat_7_3,smoot_step_rat_8_3, rou_step_rat_9_3
# In[56]:
def sum_of_weighting(count_oj,polished_1,smooth_planar_2,rough_planar_3,slickensided_undulating_4,smooth_undulating_5,rough_undulating_6,slickensided_stepped_7,smooth_stepped_8,rough_stepped_9,pol_rat_1,smoot_rat_2,rou_rat_3,slick_rat_4,smoot_und_rat_5,rou_und_rat_6,slick_ste_rat_7,smoot_step_rat_8, rou_step_rat_9,polished_1_2,smooth_planar_2_2,rough_planar_3_2,slickensided_undulating_4_2,smooth_undulating_5_2,rough_undulating_6_2,slickensided_stepped_7_2,smooth_stepped_8_2,rough_stepped_9_2,pol_rat_1_2,smoot_rat_2_2,rou_rat_3_2,slick_rat_4_2,smoot_und_rat_5_2,rou_und_rat_6_2,slick_ste_rat_7_2,smoot_step_rat_8_2, rou_step_rat_9_2,polished_1_3, smooth_planar_2_3, rough_planar_3_3, slickensided_undulating_4_3, smooth_undulating_5_3, rough_undulating_6_3, slickensided_stepped_7_3, smooth_stepped_8_3, rough_stepped_9_3,pol_rat_1_3,smoot_rat_2_3,rou_rat_3_3,slick_rat_4_3,smoot_und_rat_5_3,rou_und_rat_6_3,slick_ste_rat_7_3,smoot_step_rat_8_3, rou_step_rat_9_3):
sum_total_weighting = pol_rat_1 + smoot_rat_2 + rou_rat_3 + slick_rat_4 + smoot_und_rat_5 + rou_und_rat_6 + slick_ste_rat_7 + smoot_step_rat_8 + rou_step_rat_9 + pol_rat_1_2 + smoot_rat_2_2 + rou_rat_3_2+slick_rat_4_2+smoot_und_rat_5_2+rou_und_rat_6_2+slick_ste_rat_7_2+smoot_step_rat_8_2+ rou_step_rat_9_2+pol_rat_1_3+smoot_rat_2_3+rou_rat_3_3+slick_rat_4_3+smoot_und_rat_5_3+rou_und_rat_6_3+slick_ste_rat_7_3+smoot_step_rat_8_3+ rou_step_rat_9_3
if (count_oj>0) and (sum_total_weighting>0):
count = count_oj
weighting_1 = (polished_1+polished_1_2+polished_1_3)/count
weighting_2 = (smooth_planar_2+smooth_planar_2_2+smooth_planar_2_3)/count
weighting_3 = (rough_planar_3+rough_planar_3_2+rough_planar_3_3)/count
weighting_4 = (slickensided_undulating_4+slickensided_undulating_4_2+slickensided_undulating_4_3)/count
weighting_5 = (smooth_undulating_5+smooth_undulating_5_2+smooth_undulating_5_3)/count
weighting_6 = (rough_undulating_6+rough_undulating_6_2+rough_undulating_6_3)/count
weighting_7 = (slickensided_stepped_7+slickensided_stepped_7_2+slickensided_stepped_7_3)/count
weighting_8 = (smooth_stepped_8+smooth_stepped_8_2+smooth_stepped_8_3)/count
weighting_9 = (rough_stepped_9+rough_stepped_9_2+rough_stepped_9_3)/count
weighting_rating_1 = (pol_rat_1+pol_rat_1_2+pol_rat_1_3)/sum_total_weighting
weighting_rating_2 = (smoot_rat_2+smoot_rat_2_2+smoot_rat_2_3)/sum_total_weighting
weighting_rating_3 = (rou_rat_3+rou_rat_3_2+rou_rat_3_3)/sum_total_weighting
weighting_rating_4 = (slick_rat_4+slick_rat_4_2+slick_rat_4_3)/sum_total_weighting
weighting_rating_5 = (smoot_und_rat_5+smoot_und_rat_5_2+smoot_und_rat_5_3)/sum_total_weighting
weighting_rating_6 = (rou_und_rat_6+rou_und_rat_6_2+rou_und_rat_6_3)/sum_total_weighting
weighting_rating_7 = (slick_ste_rat_7+slick_ste_rat_7_2+slick_ste_rat_7_3)/sum_total_weighting
weighting_rating_8 = (smoot_step_rat_8+smoot_step_rat_8_2+smoot_step_rat_8_3)/sum_total_weighting
weighting_rating_9 = (rou_step_rat_9+rou_step_rat_9_2+rou_step_rat_9_3)/sum_total_weighting
total_rating_1 = weighting_1*weighting_rating_1
total_rating_2 = weighting_2*weighting_rating_2
total_rating_3 = weighting_3*weighting_rating_3
total_rating_4 = weighting_4*weighting_rating_4
total_rating_5 = weighting_5*weighting_rating_5
total_rating_6 = weighting_6*weighting_rating_6
total_rating_7 = weighting_7*weighting_rating_7
total_rating_8 = weighting_8*weighting_rating_8
total_rating_9 = weighting_9*weighting_rating_9
max_rating = max(total_rating_1,total_rating_2,total_rating_3,total_rating_4,total_rating_5,total_rating_6,total_rating_7,total_rating_8,total_rating_9)
ratings = [total_rating_1,total_rating_2,total_rating_3,total_rating_4,total_rating_5,total_rating_6,total_rating_7,total_rating_8,total_rating_9]
index = ratings.index(max_rating)
print("1 ","Polished",polished_1," - ",total_rating_1)
print("2 ","Smoothe Planar",smooth_planar_2," - ",total_rating_2)
print("3 ","Rough Planar",rough_planar_3," - ",total_rating_3)
print("4 ","Slickensided Undulating",slickensided_undulating_4," - ",total_rating_4)
print("5 ","Smooth Undulating",smooth_undulating_5," - ",total_rating_5)
print("6 ","Rough Undulating",rough_undulating_6," - ",total_rating_6)
print("7 ","Slickensided Stepped",slickensided_stepped_7," - ",total_rating_7)
print("8 ","Smoothe Stepped",smooth_stepped_8," - ",total_rating_8)
print("9 ","Rough Stepped",rough_stepped_9," - ",total_rating_9)
#print("The selected Micro Joughness is ",max_rating)
#print(index)
selected_roughness = 0
if index==0:
selected_roughness = '1 - Polished'
elif index==1:
selected_roughness = '2 - Smooth Planar'
elif index==2:
selected_roughness = '3 - Rough Planar'
elif index==3:
selected_roughness = '4 - Slickensided Undulating'
elif index==4:
selected_roughness = '5 - Smooth Undulating'
elif index==5:
selected_roughness = '6 - Rough Undulating'
elif index==6:
selected_roughness = '7 - Slickensided Stepped'
elif index==7:
selected_roughness = '8 - Smooth Stepped'
elif index==8:
selected_roughness = '9 - Rough Stepped/Irregular'
else:
selected_roughness = 'None'
#
else:
print("No Micro Roughness Allocated")
return selected_roughness
# In[57]:
discon_data1 = {'hole_id': [],'from': [],'to': [],'Oj1': [],'Jr1': [],'Oj2': [],'Jr2': [],'Oj3': [],'Jr3': [],'Selected Jr': []}
QJr = pd.DataFrame(discon_data1)
for i in hole_id:
df_b = df_rmr[(df_rmr['hole_id']==i)]
print("Hole ID: ",i)
for k in df_b.index:
from_1 = df_b['from_m'][k]
to_1 = df_b['to_m'][k]
print("Interval Depth (m): ",from_1," - ",to_1)
jr1 = df_b['j1_-_micro_roughness'][k]
jr1_count = df_b['j1_-_oj_count'][k]
jr2 = df_b['j2_-_micro_roughness'][k]
jr2_count = df_b['j2_-_oj_count'][k]
jr3 = df_b['j3_-_micro_roughness'][k]
jr3_count = df_b['j3_-_oj_count'][k]
count_oj = jr1_count + jr2_count + jr3_count
if count_oj > 0:
jr1_result = joint_roughness1(jr1,jr1_count)
jr2_result = joint_roughness2(jr2,jr2_count)
jr3_result = joint_roughness3(jr3,jr3_count)
polished_1,smooth_planar_2,rough_planar_3,slickensided_undulating_4,smooth_undulating_5,rough_undulating_6,slickensided_stepped_7,smooth_stepped_8,rough_stepped_9,pol_rat_1,smoot_rat_2,rou_rat_3,slick_rat_4,smoot_und_rat_5,rou_und_rat_6,slick_ste_rat_7,smoot_step_rat_8, rou_step_rat_9 = jr1_result[0],jr1_result[1],jr1_result[2],jr1_result[3],jr1_result[4],jr1_result[5],jr1_result[6],jr1_result[7],jr1_result[8],jr1_result[9],jr1_result[10],jr1_result[11],jr1_result[12],jr1_result[13],jr1_result[14],jr1_result[15],jr1_result[16],jr1_result[17]
polished_1_2,smooth_planar_2_2,rough_planar_3_2,slickensided_undulating_4_2,smooth_undulating_5_2,rough_undulating_6_2,slickensided_stepped_7_2,smooth_stepped_8_2,rough_stepped_9_2,pol_rat_1_2,smoot_rat_2_2,rou_rat_3_2,slick_rat_4_2,smoot_und_rat_5_2,rou_und_rat_6_2,slick_ste_rat_7_2,smoot_step_rat_8_2, rou_step_rat_9_2 = jr2_result[0],jr2_result[1],jr2_result[2],jr2_result[3],jr2_result[4],jr2_result[5],jr2_result[6],jr2_result[7],jr2_result[8],jr2_result[9],jr2_result[10],jr2_result[11],jr2_result[12],jr2_result[13],jr2_result[14],jr2_result[15],jr2_result[16],jr2_result[17]
polished_1_3,smooth_planar_2_3,rough_planar_3_3,slickensided_undulating_4_3,smooth_undulating_5_3,rough_undulating_6_3,slickensided_stepped_7_3,smooth_stepped_8_3,rough_stepped_9_3,pol_rat_1_3,smoot_rat_2_3,rou_rat_3_3,slick_rat_4_3,smoot_und_rat_5_3,rou_und_rat_6_3,slick_ste_rat_7_3,smoot_step_rat_8_3, rou_step_rat_9_3 = jr3_result[0],jr3_result[1],jr3_result[2],jr3_result[3],jr3_result[4],jr3_result[5],jr3_result[6],jr3_result[7],jr3_result[8],jr3_result[9],jr3_result[10],jr3_result[11],jr3_result[12],jr3_result[13],jr3_result[14],jr3_result[15],jr3_result[16],jr3_result[17]
Qjr = sum_of_weighting(count_oj,polished_1,smooth_planar_2,rough_planar_3,slickensided_undulating_4,smooth_undulating_5,rough_undulating_6,slickensided_stepped_7,smooth_stepped_8,rough_stepped_9,pol_rat_1,smoot_rat_2,rou_rat_3,slick_rat_4,smoot_und_rat_5,rou_und_rat_6,slick_ste_rat_7,smoot_step_rat_8, rou_step_rat_9,polished_1_2,smooth_planar_2_2,rough_planar_3_2,slickensided_undulating_4_2,smooth_undulating_5_2,rough_undulating_6_2,slickensided_stepped_7_2,smooth_stepped_8_2,rough_stepped_9_2,pol_rat_1_2,smoot_rat_2_2,rou_rat_3_2,slick_rat_4_2,smoot_und_rat_5_2,rou_und_rat_6_2,slick_ste_rat_7_2,smoot_step_rat_8_2, rou_step_rat_9_2,polished_1_3, smooth_planar_2_3, rough_planar_3_3, slickensided_undulating_4_3, smooth_undulating_5_3, rough_undulating_6_3, slickensided_stepped_7_3, smooth_stepped_8_3, rough_stepped_9_3,pol_rat_1_3,smoot_rat_2_3,rou_rat_3_3,slick_rat_4_3,smoot_und_rat_5_3,rou_und_rat_6_3,slick_ste_rat_7_3,smoot_step_rat_8_3, rou_step_rat_9_3)
print("Selected Roughness: ",Qjr)
new_row = {'hole_id': i,'from': from_1,'to': to_1, 'Oj1': jr1_count, 'Jr1': jr1, 'Oj2': jr2_count, 'Jr2': jr2, 'Oj3': jr3_count, 'Jr3': jr3, 'Selected Jr': Qjr}
QJr = QJr.append(new_row,ignore_index=True)
else:
new_row = {'hole_id': i,'from': from_1,'to': to_1, 'Oj1': 0, 'Jr1': '', 'Oj2': 0, 'Jr2': '', 'Oj3': 0, 'Jr3': '', 'Selected Jr': ''}
QJr = QJr.append(new_row,ignore_index=True)
#QJr.to_csv('QJr_export.csv')
def convert_df(QJr):
return QJr.to_csv(index=False).encode('utf-8')
csv = convert_df(QJr)
st.download_button("Press to Download",csv,"discontinuity_weighting.csv","text/csv",key='download-csv')
print('Data Export Complete')
# In[ ]:
| ContainiumTE/discontinuity_refinement | Discontinuity_Selector.py | Discontinuity_Selector.py | py | 18,910 | python | en | code | 0 | github-code | 36 |
41566426879 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_readonly(host):
f = '/mnt/ro/hello-ro'
with host.sudo('test'):
c = host.run('touch %s', f)
assert c.rc == 1
assert not host.file(f).exists
def test_readwrite(host):
f = '/mnt/rw/hello-rw'
with host.sudo('test'):
c1 = host.run('touch %s', f)
assert c1.rc == 0
assert host.file(f).exists
with host.sudo('test'):
c2 = host.run('rm %s', f)
assert c2.rc == 0
assert not host.file(f).exists
| ome/ansible-role-nfs-mount | molecule/default/tests/test_default.py | test_default.py | py | 640 | python | en | code | 14 | github-code | 36 |
15857631571 | import shutil
import os
from os.path import exists
import glob
import random
x_path = '../images/'
y_path = './'
if not exists(y_path + 'train'):
os.mkdir(y_path + 'train')
if not exists(x_path + 'train'):
os.mkdir(x_path + 'train')
def duplicate_im_and_ann(y_fname, amount):
x = 0
while x < amount:
x_fname = y_fname.replace('.txt', '.jpg')
prefix = 'd' + str(x) + '_'
# Duplicate label with prefix and put in train folder
shutil.copyfile(y_path + y_fname,
y_path + 'train/' + prefix + y_fname)
# Duplicate and put image file in train folder, use if statement to
# check if the original file has uppercase file extension and preserve it
if exists(x_path + x_fname):
shutil.copyfile(
x_path + x_fname,
x_path + 'train/' + prefix + x_fname
)
elif exists(x_path + x_fname.replace('.jpg', '.JPG')):
shutil.copyfile(
x_path + x_fname.replace('.jpg', '.JPG'),
x_path + 'train/' + prefix + x_fname.replace('.jpg', '.JPG')
)
x += 1
# Also move the original to the training folder to prevent leakage between sets
# shutil.move(y_path + y_fname,
# y_path + 'train/' + y_fname)
# if exists(x_path + x_fname):
# shutil.move(x_path + x_fname, x_path + 'train/' + x_fname)
#
# elif exists(x_path + x_fname.replace('.jpg', '.JPG')):
# shutil.move(
# x_path + x_fname,
# x_path + 'train/' + x_fname.replace('.jpg', '.JPG')
# )
def line_count(file):
file = open(file, 'r')
line_count = 0
for line in file:
if line != "\n":
line_count += 1
file.close()
return line_count
### 2/ Extracting the list of files that contain 1 line:
def single_cat():
for root, dir, file in os.walk('.'):
#print(file)
file_list = []
for name in file:
if line_count(name) == 1:
file_list.append(name)
return file_list
#print(file_list)
#print(len(file_list))
single_cat_list = single_cat()
# print(len(single_cat_list))
##
# Also move the original to the training folder to prevent leakage between sets
for y_fname in single_cat_list:
x_fname = y_fname.replace('.txt', '.jpg')
# print(x_fname)
shutil.move(y_path + y_fname,
y_path + 'train/' + y_fname)
if exists(x_path + x_fname):
shutil.move(x_path + x_fname, x_path + 'train/' + x_fname)
elif exists(x_path + x_fname.replace('.jpg', '.JPG')):
shutil.move(
x_path + x_fname.replace('.jpg', '.JPG'),
x_path + 'train/' + x_fname.replace('.jpg', '.JPG')
)
##
def add_fnames_to_list(cat_id):
fnames = []
for file in single_cat_list:
file = open(file, 'r')
file_ = file.readlines()
add_fname = False
for line in file_:
line = line.split(' ')
if line[0] == (str(cat_id)):
add_fname = True
if add_fname:
fnames.append(file.name)
file.close()
return fnames
# cat_0 = add_fnames_to_list(cat_id = 0) # 1 138 # 100
# cat_1 = add_fnames_to_list(cat_id = 1) # 32 127 # 7
# cat_2 = add_fnames_to_list(cat_id = 2) # 44 184 # 5
# cat_3 = add_fnames_to_list(cat_id = 3) # 15 104 # 13
# cat_4 = add_fnames_to_list(cat_id = 4) # 3 99 # 50
# cat_5 = add_fnames_to_list(cat_id = 5) # 37 198 # 4
# cat_6 = add_fnames_to_list(cat_id = 6) # 14 62 # 3
# cat_7 = add_fnames_to_list(cat_id = 7) # 19 119 # 9
# cat_8 = add_fnames_to_list(cat_id = 8) # 23 148 # 12
# cat_11 = add_fnames_to_list(cat_id = 11) # 64 260 # 4
# cat_12 = add_fnames_to_list(cat_id = 12) # 39 263 # 6
# cat_14 = add_fnames_to_list(cat_id = 14) # 27 157 # 6
#
# print(cat_15)
duplicate_cat_list = [{'id' : 0, 'amount' : 100},
{'id' : 1, 'amount' : 7},
{'id' : 2, 'amount' : 5},
{'id' : 3, 'amount' : 13},
{'id' : 4, 'amount' : 50},
{'id' : 5, 'amount' : 4},
{'id' : 6, 'amount' : 3},
{'id' : 7, 'amount' : 9},
{'id' : 8, 'amount' : 12},
{'id' : 11, 'amount' : 4},
{'id' : 12, 'amount' : 6},
{'id' : 14, 'amount' : 6},
]
# for dict in duplicate_cat_list:
# files_to_dup = add_fnames_to_list(cat_id=dict['id'])
# for fname in files_to_dup:
# # print(dict['id'], dict['amount'], fname)
# duplicate_im_and_ann(fname, dict['amount'])
| fastai-trash-team/TACO-data-preprocessing | replicate.py | replicate.py | py | 4,744 | python | en | code | 0 | github-code | 36 |
71685398505 | __author__ = 'apple'
from turtle import *
from random import randint
K = 20
def reg(szer, n): # szerokość regału, liczba półek
start_position(szer, n)
regal(szer, n)
fd(K)
for _ in range(n):
polka(szer // K - 2)
up(7)
def polka(k):
rect(k * K, 6*K, "white")
pendown()
for _ in range(k):
r = randint(1, 3)
if r == 1:
rect(K, 3 * K, "red")
elif r == 2:
rect(K, 4 * K, "green")
elif r == 3:
rect(K, 5 * K, "darkblue")
fd(K)
penup()
bk(k * K)
def rect(a, b, col):
fillcolor(col)
begin_fill()
for _ in range(2):
fd(a)
lt(90)
fd(b)
lt(90)
end_fill()
def border(szer):
rect(K, K, "sienna")
fd(szer - K)
rect(K, K, "sienna")
bk(szer - K)
def up(k):
lt(90)
fd(k * K)
rt(90)
def start_position(szer, n):
penup()
bk(szer / 2)
up(-(n * 7 + 3) / 2)
def regal(szer, n):
border(szer)
up(1)
rect(szer, n * 7 * K + K, "sienna")
up(n * 7 + 1)
border(szer)
up(-n * 7)
#speed(0)
reg(300, 4)
done()
| chinski99/minilogia | 2015/etap 2/reg.py | reg.py | py | 1,142 | python | hu | code | 0 | github-code | 36 |
16528703119 | from scipy import signal
from pywebio.input import *
from pywebio.output import *
from pywebio import start_server
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import io
def fig2img(fig):
"""
Converts a Matplotlib figure to a PIL Image and return it
"""
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
img = Image.open(buf)
return img
def plot_mag(w,mag):
"""
Plots magnitude graph
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Magnitude plot",fontsize=16)
plt.semilogx(w, mag)
plt.grid(True)
return plt.gcf()
def plot_freqrsp(w,H):
"""
Plots frequency response
"""
plt.figure(figsize=(12,5))
plt.title(f"Frequency response",fontsize=16)
plt.plot(H.real, H.imag, "b")
plt.plot(H.real, -H.imag, "r")
plt.grid(True)
return plt.gcf()
def plot_phase(w,phase):
"""
Plots phase graph
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Phase plot",fontsize=16)
plt.semilogx(w, phase)
plt.grid(True)
return plt.gcf()
def plot_impulse(t,y):
"""
Plots impulse response
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title("Impulse response",fontsize=16)
plt.plot(t,y)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.grid(True)
return plt.gcf()
def plot_step(t,y):
"""
Plots step response
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title("Step response",fontsize=16)
plt.plot(t,y)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.grid(True)
return plt.gcf()
def system(num,den):
"""
Generates plots from a given system input
"""
remove(scope='raw')
with use_scope(name='raw',clear=True,) as img:
#sys = signal.TransferFunction([20,5], [10, 100,1])
sys = signal.TransferFunction(num, den)
w=[10**(i/10) for i in range(-30,41)]
# Bode
w, mag, phase = signal.bode(sys,w=w)
f1 = plot_mag(w,mag)
im1 = fig2img(f1)
put_image(im1)
f2 = plot_phase(w,phase)
im2 = fig2img(f2)
put_image(im2)
# Freq response
w, H = signal.freqresp(sys,w=w)
f3 = plot_freqrsp(w,H)
im3 = fig2img(f3)
put_image(im3)
# Impulse response
t, y = signal.impulse(sys)
f4 = plot_impulse(t,y)
im4 = fig2img(f4)
put_image(im4)
# Step response
t, y = signal.step(sys)
f5 = plot_step(t,y)
im5 = fig2img(f5)
put_image(im5)
def app():
"""
Main app
"""
put_markdown("""
# LTI system demo (using `Scipy.signal`)
## [Dr. Tirthajyoti Sarkar](https://www.linkedin.com/in/tirthajyoti-sarkar-2127aa7/)
## What is a LTI system anyway?
In system analysis, among other fields of study, a linear time-invariant system (or *"LTI system"*) is a system that produces an output signal from any input signal subject to the constraints of **linearity** and **time-invariance**. LTI system theory is an area of applied mathematics which has direct applications in electrical circuit analysis and design, signal processing and filter design, control theory, mechanical engineering, image processing, the design of measuring instruments of many sorts, NMR spectroscopy, and many other technical areas where systems of ordinary differential equations present themselves.
## What are we doing here?
From a given transfer function, we calculate and display the following,
- Bode magnitude plot
- Bode phase plot
- Frequency response plot (real vs. imaginary)
- Impulse response plot
- Step response plot
""", strip_indent=4)
tf = input_group("Transfer function",[input("Input the coefficients of numerator:", type=TEXT,name='num',
help_text='Example: 2,1. No gap between a number and the commas, please.'),
input("Input the coefficients of denominator:", type=TEXT,name='den',
help_text='Example: 5,-2,11. No gap between a number and the commas, please.')],
)
num = [float(n) for n in tf['num'].split(',')]
den = [float (n) for n in tf['den'].split(',')]
system(num,den)
if __name__ == '__main__':
start_server(app,port=9999,debug=True) | tirthajyoti/PyWebIO | apps/bode.py | bode.py | py | 4,531 | python | en | code | 9 | github-code | 36 |
889427858 | from connection import create_connection
import numpy as np,numpy.random
from numpy.core.fromnumeric import size
import requests
from bson.objectid import ObjectId
from tag_classes import classifications
import random
def random_classification():
random_classifcations = {}
for tag in classifications.keys():
random_classifcations[tag] = random.choice(classifications[tag])
return random_classifcations
def classify_tags(par,document_id):
try:
tag_1_applicability = []
tag_2_area = []
tag_3_covenant_type = []
tag_4_covenant_title_tag = []
tag_5_covenant_description_sub_tags = []
Tag_6_User_Defined = []
for i in par:
res = requests.post("http://127.0.0.1:5000/classify/tags",json = {"data":i}).json()
tag_1_applicability.append(res["tag_1_applicability"])
tag_2_area.append(res["tag_2_area"])
tag_3_covenant_type.append(res["tag_3_covenant_type"])
tag_4_covenant_title_tag.append(res["tag_4_covenant_title_tag"])
tag_5_covenant_description_sub_tags.append(res["tag_5_covenant_description_sub_tags"])
Tag_6_User_Defined.append(res["Tag_6_User_Defined"])
tags = {"tag_1_applicability":tag_1_applicability,"tag_2_area":tag_2_area,
"tag_3_covenant_type":tag_3_covenant_type,"tag_4_covenant_title_tag":tag_4_covenant_title_tag,
"tag_5_covenant_description_sub_tags":tag_5_covenant_description_sub_tags,
"Tag_6_User_Defined":Tag_6_User_Defined}
db= create_connection()
for i in tags:
db.update({'_id': ObjectId('{}'.format(document_id)) },{ "$set" : {i:tags[i]}})
print("Tags inserted in Document ID {}".format(document_id))
return "Updated"
except Exception as e:
print(e)
return e
| saarthakbabuta1/loan-agreement | classify.py | classify.py | py | 1,858 | python | en | code | 0 | github-code | 36 |
20031150368 | #encoding: utf-8
#description: 数字序号下的粗抽取
from __future__ import print_function
import os
import re
def produce_filename(targetdir):
targetnames = os.listdir(targetdir)
for name in targetnames:
if '.txt' == name[-4:]:
print("//"*20,name,"//"*20)
print(name,'OK')
attr_get(targetdir+"\\"+name)
def attr_get(filename):
f = open(filename,'r',encoding='utf-8')
newname ="attr_get_"+filename[-50:]
#print(newname)
save =open(newname,'a+',encoding='utf-8')
s =f.read()
#patterns = "[\u4e00-\u9fa5]+\[[\d*\]][\u4e00-\u9fa5]+"
try:
make = re.search(r"阅|请",s,re.M)
print(s[:make.start()],file=save)
except:
print('Error',file=save)
pass
i = 0
data_set = ["保险责任", "责任免除","保险事故", "保险费", "保险期间", "解除合同", "保险金给付", "保险金额",
"保险事故通知", "犹豫期", "效力恢复", "宽限期", "投保范围", "续保","缴费方式",
"疾病身故保险金","护理保险金","健康护理保险金","长寿护理保险金","健康维护保险金",
"观察期","最低保证利率","保单贷款政策","部分领取","等待期","保险金额计算方式",
"保险费率的调整","宽限期","退保","自动垫交保险费","重大疾病保险金","身故保险金","重大疾病的范围",
"是否有多次给付","重大疾病保险金给付日","给付总额的保证","基本保险金额的变更",
"退保/解除合同","首个重大疾病保险金给付日","承保人群","重度失能保险金","一般失能保险金",
"身体全残保险金","一般失能的范围","重度失能的范围","保费豁免","给付标准和保险期间的关系",
"减保","减保(减额交清保险)","减额交清保险","身故给付","身故给付(可能以特殊退费形式)",
"可能以特殊退费形式","定期复查","保单年度累计给付限额","保单年度累计给付限额(年限额)",
"年限额","所有保险期间内最高给付限额","所有保险期间内最高给付限额(最高给付金额)",
"最高给付金额","每日给付限额","每日给付限额(日限额)","日限额","保单年度内累计最高给付日数",
"住院及手术医疗保险金","门诊医疗保险金","参加社会医疗保险","社保补偿","是否存在提额情况",
"保险人不同意续保下","住院费用和门诊费用的范围","无保险事故优惠","保险事故通知时间","合同终止与满期之间间隔限制",
"合作医院","预授权","未及时核定补偿","险种转换","是否存在保额提升情况"]
while(i<79):
try:
pattern = "\d([.]\d)+\s+"+str(data_set[i])+"\s+[\u4e00-\u9fa5]+\s*"
match = re.search(pattern,s,re.M)
begin = match.start()
start = match.end()
match1 = re.search("\d[.]\d*\s+[\u4e00-\u9fa5]+", s[start:], re.M)
end = match1.start()
print('ROUNDBEGIN',data_set[i],file=save)
print(s[begin:end + start],file=save)
print('ROUNDEND', data_set[i], file=save)
except:
pass
i+=1
save.close()
f.close()
produce_filename('D:\\KG\\testa')
#attr_get('D:\\PyCharmProjects\\attrifind\\test\\健康保险_疾病保险\\.txt') | Wilson-ZHANG/AttributeExtraction | find_numNo.py | find_numNo.py | py | 3,557 | python | en | code | 2 | github-code | 36 |
27478835009 | # 1 задание
my_list = [1, 1.2, None, True, 'Text', ['list'], {'key_1':'Val_1'}]
for itam in my_list:
print(type(itam))
# 2 задание
my_list2 = input('Введите элементы списка через запятую: ')
my_list2 = my_list2.split(',')
print(my_list2)
my_list2_len = len(my_list2) if len(my_list2) % 2 ==0 else len(my_list2)-1
i=0
while i <= my_list2_len-1:
if i%2 ==0:
my_list2[i], my_list2[i+1] = my_list2[i+1], my_list2[i]
i+=1
else:
i+=1
print(my_list2)
# 3 задание
month = {'1':'winter', '2':'winter', '3':'spring', '4':'spring', '5':'spring', '6':'summer', '7':'summer', '8':'summer', '9':'autumn', '10':'autumn', '11':'autumn', '12':'winter'}
try:
print(month[input('Введите номер месяца: ')])
except KeyError:
print(f'Такого номера месяца не существует')
try:
month_input = int(input('Введите номер месяца: '))
except:
print(f'Такого номера месяца не существует')
month_input = int(input('Введите номер месяца заново: '))
winter = [1,2,12]
spring = [3,4,5]
summer = [6,7,8]
autumn = [9,10,11]
if month_input in winter:
print("Winter")
elif month_input in spring:
print('Spring')
elif month_input in summer:
print('Summer')
elif month_input in autumn:
print('Autumn')
else: print(f'Такого номера месяца не существует')
# 4 задание
str = input('Введите строку: ')
str_list = str.split(' ')
print(str_list)
print(str_list[1])
i=0
while i<len(str_list):
print(f'{i+1}. {str_list[i][:10]}')
i+=1
# 5 задание
my_list5 = [7,5,3,3,2]
tuple(my_list5)
am_inputs = int(input('Введите количество вводов в рейтинг: '))
q = 1
print(type(q))
print(type(am_inputs))
while q <= am_inputs:
user_input = int(input('Введите значение в рейтинг: '))
result = sorted([user_input] + (my_list5), reverse=True)
q+=1
print(result)
# 6 задание
import sys
import os
import json
with open('goods_base.jon', 'r') as f:
lines = (f.readlines())
def add_good():
goods_dict = {}
goods_dict['Название'] = input('Введите название товара: ')
goods_dict['Цена'] = int(input('Введите цену товара: '))
goods_dict['Количество'] = int(input('Введите количество товара: '))
goods_dict['Единицы измерения'] = input('Введите единицы измерения товара: ')
new_good = (len(lines) + 1,goods_dict)
print(type(new_good))
json_new_good = json.dumps(new_good)
with open('goods_base.jon', 'a',encoding='utf-8') as f:
json.dump(new_good,f)
f.write('\n')
print(len(lines))
add_good()
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
print(goods)
print(len(lines))
names = []
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
names.append(goods[1]['Название'])
price = []
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
price.append(goods[1]['Цена'])
ammount = []
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
ammount.append(goods[1]['Количество'])
units = []
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
units.append(goods[1]['Единицы измерения'])
analis = {
'Название':[names],
'Цена':[price],
'Количество':[ammount],
'Единицы измерения':[units]
}
for key,val in analis.items():
print(key, val[0]) # не понял почему у меня массив вложен в массив, откуда второй массив взялся???
| Glen1679/GeekBrains | Homework2.py | Homework2.py | py | 4,241 | python | ru | code | 0 | github-code | 36 |
22846496457 | from DatabaseContextManager import DatabaseContextManager
def create_table_jobs():
query = """CREATE TABLE `jobs`(
`id` integer NOT NULL AUTO_INCREMENT,
`company_id` integer,
`category_id` integer,
`job_title` varchar(255),
`salary` DECIMAL(50, 2),
`description` varchar(255),
`location` varchar(100),
`position` varchar(100),
`category` varchar(100),
PRIMARY KEY (id),
FOREIGN KEY (company_id) REFERENCES companies(id),
FOREIGN KEY (category_id) REFERENCES categories(id));"""
with DatabaseContextManager() as db:
cursor = db.cursor()
cursor.execute(query)
def create_jobs(company_id, category_id, job_title, salary, description, location, position, category):
query = """INSERT INTO jobs
(company_id, category_id, job_title, salary, description, location, position, category)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
parameters = [company_id, category_id, job_title, salary, description, location, position, category]
with DatabaseContextManager() as db:
cursor = db.cursor()
cursor.execute(query, parameters)
def get_jobs():
query = """SELECT * FROM jobs"""
with DatabaseContextManager() as db:
cursor = db.cursor()
cursor.execute(query)
print(cursor.fetchall())
def delete_jobs(job_id):
query = """DELETE FROM jobs
WHERE id = ?"""
parameters = [job_id]
with DatabaseContextManager() as db:
db.execute(query, parameters)
def get_all_tables():
query = """SELECT * FROM jobs
NATURAL JOIN companies """
with DatabaseContextManager(is_select=True) as db:
cursor = db.cursor()
cursor.execute(query)
print(cursor.fetchall())
| Zydrunas-Sir/RemoteJob | TasksInMySQL/Jobs.py | Jobs.py | py | 1,790 | python | en | code | 0 | github-code | 36 |
28984078467 | import sys
input = sys.stdin.readline
n = int(input())
m = list(map(int, input().split()))
answer = []
for i in range(n):
answer.insert(i-m[i], i+1)
print(*answer)
| youkyoungJung/solved_baekjoon | 백준/Bronze/2605. 줄 세우기/줄 세우기.py | 줄 세우기.py | py | 182 | python | en | code | 0 | github-code | 36 |
11532738343 | OpacInfo = provider(
doc = "opa cli toolchain",
fields = ["opa", "capabilities_json", "builtin_metadata_json", "opa_signer"],
)
def _opa_toolchain_impl(ctx):
toolchain_info = platform_common.ToolchainInfo(
opacinfo = OpacInfo(
opa = ctx.executable.opa,
capabilities_json = ctx.file.capabilities_json,
builtin_metadata_json = ctx.file.builtin_metadata_json,
opa_signer = ctx.executable.opa_signer,
),
)
return [toolchain_info]
opa_toolchain = rule(
implementation = _opa_toolchain_impl,
attrs = {
"opa": attr.label(
executable = True,
allow_single_file = True,
mandatory = True,
cfg = "exec",
),
"capabilities_json": attr.label(
mandatory = True,
allow_single_file = True,
),
"builtin_metadata_json": attr.label(
mandatory = True,
allow_single_file = True,
),
"opa_signer": attr.label(
executable = True,
cfg = "exec",
default = "//tools:opa_signer",
),
"opa_ctx": attr.label(
executable = True,
cfg = "exec",
default = "//tools:opa_ctx",
),
},
)
| ticketmaster/rules_opa | opa/private/opa_toolchain.bzl | opa_toolchain.bzl | bzl | 1,292 | python | en | code | 4 | github-code | 36 |
20681014178 | __author__ = 'elmira'
import numpy as np
import itertools
from matplotlib import mlab
import re
with open('corpus1.txt', encoding='utf-8') as f:
news = f.read()
with open('corpus2.txt', encoding='utf-8') as f:
anna = f.read()
anna_sentences = re.split(r'(?:[.]\s*){3}|[.?!]', anna)
news_sentences = re.split(r'(?:[.]\s*){3}|[.?!]', news)
def words(sentence):
return sentence.lower().split()
def word_lens(sentence):
return [len(i) for i in sentence]
def different_letters(sentence):
russian_letters = 'ёйцукенгшщзхъфывапролджэячсмитьбю'
# число различных букв в предложении,
letters = set()
for word in sentence:
for letter in word:
if letter in russian_letters:
letters.add(letter)
return len(letters)
def vowels(word):
vowel_arr = 'ёуеэоаыяию'
num = 0
for letter in word:
if letter in vowel_arr:
num += 1
return num
def vowels_in_sent(sentence):
# число гласных в предложении,
return [vowels(word) for word in sentence]
anna_sent = [words(sentence) for sentence in anna_sentences if len(words(sentence)) > 0]
news_sent = [words(sentence) for sentence in news_sentences if len(words(sentence)) > 0]
anna_data = [(sum(word_lens(sentence)), # длина предложения в буквах,
different_letters(sentence), # число различных букв в предложении,
sum(vowels_in_sent(sentence)), # число гласных в предложении,
np.median(word_lens(sentence)), # медиана числа букв в слове,
np.median(vowels_in_sent(sentence))) # медиана числа гласных в слове.
for sentence in anna_sent]
news_data = [(sum(word_lens(sentence)),
different_letters(sentence),
sum(vowels_in_sent(sentence)),
np.median(word_lens(sentence)),
np.median(vowels_in_sent(sentence)))
for sentence in news_sent]
from matplotlib import pyplot as plt
anna_data = np.array(anna_data)
news_data = np.array(news_data)
# ВОТ ДЗ:
data = np.vstack((anna_data, news_data))
p = mlab.PCA(data, True)
N = len(anna_data)
plt.plot(p.Y[:N,0], p.Y[:N,1], 'og', p.Y[N:,0], p.Y[N:,1], 'sb')
plt.show()
print(p.Wt) | elmiram/homework | seminar9/task1 (2 points)/genre-by-letters.py | genre-by-letters.py | py | 2,438 | python | en | code | 0 | github-code | 36 |
7183231615 | #!/usr/bin/env python3
"""Finds the optimal number of clusters"""
import numpy as np
kmeans = __import__('1-kmeans').kmeans
variance = __import__('2-variance').variance
def optimum_k(X, kmin=1, kmax=None, iterations=1000):
"""Provides info for optimal cluster number"""
if not isinstance(X, np.ndarray) or len(X.shape) != 2:
return None, None
if not isinstance(iterations, int) or iterations < 1:
return None, None
if kmax is None:
kmax = iterations
if kmin is None:
kmin = 1
if not isinstance(kmin, int):
return None, None
if not isinstance(kmax, int):
return None, None
if kmin < 1:
return None, None
if kmax <= kmin:
return None, None
res = []
d_vars = []
var = 0
for k in range(kmin, kmax + 1):
C, clss = kmeans(X, k, iterations)
var = variance(X, C)
if k == kmin:
new_var = var
if C is not None and clss is not None:
res.append((C, clss))
if isinstance(var, float):
d_vars.append(new_var - var)
return res, d_vars
| JohnCook17/holbertonschool-machine_learning | unsupervised_learning/0x01-clustering/3-optimum.py | 3-optimum.py | py | 1,126 | python | en | code | 3 | github-code | 36 |
20580965010 | from django import forms
from .models import Recipe
from channel.models import Channel
class RecipeForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(RecipeForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['trigger_channel'].initial = self.instance.trigger.channel
self.fields['action_channel'].initial = self.instance.action.channel
trigger_channel = forms.ModelChoiceField(queryset=Channel.objects.all())
action_channel = forms.ModelChoiceField(queryset=Channel.objects.all())
class Meta:
model = Recipe
fields = ('trigger', 'action')
| theju/dtwt | recipe/forms.py | forms.py | py | 650 | python | en | code | 9 | github-code | 36 |
39398980966 | # Python Project B
# Multinomial Naive Bayes
# By
# Valdar Rudman
# R00081134
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import matplotlib.pyplot as plt
import numpy as np
# Read a file in and split on the white space
def readFile(source):
return open(source).read().split()
# Read a file in and split on new line
def readTweetsFile(source):
return open(source).read().lower().split("\n")
# Gets the probability of the word. This can be the probability
# of a word being positive or negative
def prob_of_word(percentDict, fullDict):
words = {}
for word in percentDict:
words[word] = (percentDict[word] / fullDict[word])
return words
# Takes a list of words in and returns list without stopwords
def removeStopWords(sentence):
stopWords = set(stopwords.words('english'))
words = word_tokenize(sentence)
wordsFiltered = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w)
return wordsFiltered
# Working out if tweets are positive or negative
def posNegTweets(tweets, wordsPos, wordsNeg):
posTweets, negTweets, uknownTweets = 0, 0, 0
for tweet in tweets:
words = tweet.split()
posWords, negWords, uknownWord, count = 0, 0, 0, 1
for word in words:
if word in wordsPos:
posWords += wordsPos[word]
if word in wordsNeg:
negWords += wordsNeg[word]
count += 1
posWords = posWords / count
negWords = negWords / count
if posWords > negWords:
posTweets += 1
elif negWords > posWords:
negTweets += 1
else:
uknownTweets += 1
# Returns a list [percent of positive tweets in the batch, percent of negative tweets in the batch, percent of unkown tweets in the batch]
return [((posTweets / len(tweets)) * 100), ((negTweets / len(tweets)) * 100), ((uknownTweets / len(tweets)) * 100)]
# Graph the before and after results of pre-processing for both negative and positive
def graph(PositiveBeforePP, positiveAfterPP, negativeBeforePP, negativeAfterPP):
BarTitles = ('Pos Before Pre-Processing',
'Pos After Pre-Processing',
'Neg before Pre-Processing',
'Neg After Pre-Processing')
plot = [PositiveBeforePP, positiveAfterPP,
negativeBeforePP, negativeAfterPP]
y_pos = np.arange(len(BarTitles))
plt.bar(y_pos, plot, align='center', alpha=0.1)
plt.xticks(y_pos, BarTitles)
plt.ylabel("Percentage")
plt.xlabel("Data")
plt.title("Tweets Accuracy")
plt.show()
def main():
print("Reading in Training Files...")
posList = readFile("train\\trainPos.txt")
negList = readFile("train\\trainNeg.txt")
posList = [item.lower() for item in posList]
negList = [item.lower() for item in negList]
print("Removing stopwords from training files...")
# print(negList)
posList = removeStopWords(' '.join(posList))
negList = removeStopWords(' '.join(negList))
# Getting unique words for positive and negative as well as getting a full set of them
posSet = set(posList)
negSet = set(negList)
fullSet = posSet|negSet
print("Creating dictionaries...")
# Creating dictionaries to use to keep count of how many times a word show up
posDict = dict.fromkeys(posSet, 0)
negDict = dict.fromkeys(negSet, 0)
fullDict = dict.fromkeys(fullSet, 0)
for word in posList:
posDict[word] = posDict[word] + 1
fullDict[word] = fullDict[word] + 1
for word in negList:
negDict[word] = negDict[word] + 1
fullDict[word] = fullDict[word] + 1
# print("Negative: ", negDict)
# print("Full: ", fullDict)
print("Calculate words pos/neg value...")
wordsPos = prob_of_word(posDict, fullDict)
wordsNeg = prob_of_word(negDict, fullDict)
print("Reading in Pos Tweets and removing stopwords...")
posTweets = readTweetsFile("test\\testPos.txt")
posTweetsCleanedUp = []
for tweet in posTweets:
tweet.lower()
posTweetsCleanedUp.append(' '.join(removeStopWords(tweet)))
print("Reading in Neg Tweets and removing stopwords...")
negTweets = readTweetsFile("test\\testNeg.txt")
negTweetsCleanedUp = []
for tweet in negTweets:
tweet.lower()
negTweetsCleanedUp.append(' '.join(removeStopWords(tweet)))
print("Calculating Pre results...")
posPreResults = posNegTweets(posTweets, wordsPos, wordsNeg)
negPreResults = posNegTweets(negTweets, wordsPos, wordsNeg)
print("Pre Results\nPositive: ", posPreResults, "\nNegative: ", negPreResults)
print("Calculating Post results...")
posPostResults = posNegTweets(posTweetsCleanedUp, wordsPos, wordsNeg)
negPostResults = posNegTweets(negTweetsCleanedUp, wordsPos, wordsNeg)
print("Post Results\nPositive: ", posPostResults, "\nNegative: ", negPostResults)
graph(posPreResults[0], posPostResults[0], negPreResults[1], negPostResults[1])
if __name__ == '__main__':
main()
| ValdarRudman/Multinomial-Naive-Bayes | Multinomial Naive Bayes.py | Multinomial Naive Bayes.py | py | 5,233 | python | en | code | 0 | github-code | 36 |
29207899758 | # Definition for a QuadTree node.
class Node:
def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):
self.val = val
self.isLeaf = isLeaf
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
class Solution:
def construct(self, grid) -> 'Node':
def process(r1, r2, c1, c2):
cnt0 = 0
cnt1 = 0
for i in range(r1, r2):
for j in range(c1, c2):
if grid[i][j] == 0:
cnt0 += 1
else:
cnt1 += 1
if cnt0 == 0:
node = Node(1, True)
return node
if cnt1 == 0:
node = Node(0, True)
return node
node = Node(0, False)
node.topLeft = process(r1, (r1 + r2)//2, c1, (c1+c2)//2)
node.topRight = process(r1, (r1 + r2)//2, (c1+c2)//2, c2)
node.bottomLeft = process((r1 + r2)//2, r2, c1, (c1+c2)//2)
node.bottomRight = process((r1 + r2)//2, r2, (c1+c2)//2, c2)
return node
if len(grid) == 0:
return None
return process(0, len(grid), 0, len(grid))
| sakshi5250/6Companies30Days | INTUIT/Question11.py | Question11.py | py | 1,293 | python | en | code | 0 | github-code | 36 |
41763630844 | import random
from terminaltables import AsciiTable
import curses
GAME_TITLE = "`•.,¸¸ [ JEU DU TAQUIN ] ¸¸,.•´"
# Nombre de cases par côté
TAQUIN_SIZE = 4
# Valeur de la case vide
EMPTY_CASE_VALUE = ""
# Taquin correct, dans l'ordre
CORRECT_SOLUTION = [list(a) for a in zip(*[iter(list(range(1, TAQUIN_SIZE ** 2)) + [EMPTY_CASE_VALUE])] * TAQUIN_SIZE)]
# Jeu en cours
CURRENT_STATE = []
def get_available_movements():
# TODO : retourner une liste de mouvements possibles ["LEFT", "UP"]
return []
def move():
# TODO : appliquer le mouvement de la case vide
pass
def has_won():
# TODO : vérifier si le jeu est gagné
pass
def handle_keypress(screen):
try:
key = screen.getkey().upper()
except:
return
height, width = screen.getmaxyx()
screen.erase()
available_movements = get_available_movements()
if key == "KEY_DOWN":
screen.addstr(height - 1, 0, "↓ DOWN - A FAIRE", curses.A_REVERSE)
if "DOWN" in available_movements:
move("DOWN")
elif key == "KEY_UP":
screen.addstr(height - 1, 0, "↑ UP - A FAIRE", curses.A_REVERSE)
if "UP" in available_movements:
move("UP")
elif key == "KEY_LEFT":
screen.addstr(height - 1, 0, "← LEFT - A FAIRE", curses.A_REVERSE)
if "LEFT" in available_movements:
move("LEFT")
elif key == "KEY_RIGHT":
screen.addstr(height - 1, 0, "→ RIGHT - A FAIRE", curses.A_REVERSE)
if "RIGHT" in available_movements:
move("RIGHT")
elif key in ("Q",):
raise KeyboardInterrupt
def get_state_as_str(state):
table = AsciiTable(state)
table.inner_heading_row_border = False
table.inner_row_border = True
table.justify_columns[0] = "center"
table.justify_columns[1] = "center"
return table.table
def display_output(screen, state):
# Title
screen.addstr(0, 0, GAME_TITLE, curses.color_pair(1))
# Table game
screen.addstr(2, 0, get_state_as_str(state), curses.color_pair(1))
# Controls
screen.addstr(4 + TAQUIN_SIZE * 2, 0, "Utiliser les flêches pour déplacer la case vide.")
screen.addstr(5 + TAQUIN_SIZE * 2, 0, "(r)eset | (s)olution | (q)uitter")
def init_state():
cases = list(range(1, TAQUIN_SIZE ** 2)) + [EMPTY_CASE_VALUE]
random.shuffle(cases)
return [list(a) for a in zip(*[iter(cases)] * TAQUIN_SIZE)]
def main():
global CURRENT_STATE
"""Fonction principale de l'application"""
try:
# Initalisation de l'UI
stdscr = curses.initscr()
curses.start_color()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.noecho()
stdscr.keypad(True)
stdscr.nodelay(True)
# Récupération d'un taquin tiré aléatoirement
CURRENT_STATE = init_state()
while True:
# Attend une action et affiche le résultat
handle_keypress(stdscr)
display_output(stdscr, CURRENT_STATE)
# Frequence de rafraichissement
curses.napms(50) # ms
except KeyboardInterrupt:
pass
finally:
# Lorsqu'on quite, on restaure l'environnement du terminal
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
if __name__ == "__main__":
main()
| martync/taquin-py | taquin.py | taquin.py | py | 3,373 | python | en | code | 0 | github-code | 36 |
4801788061 | from django.contrib.auth import get_user_model
from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
from manager.models import Task, TaskType
class TaskPublicTest(TestCase):
def test_task_list_public(self):
res = self.client.get(reverse("manager:task-list"))
self.assertNotEquals(res.status_code, 200)
self.assertRedirects(res, "/accounts/login/?next=%2Ftasks%2F")
class TaskPrivateTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="testuser",
password="testpassword"
)
self.client = Client()
self.client.force_login(self.user)
self.task_type = TaskType.objects.create(
name="personal"
)
self.task = Task.objects.create(
title="Test Task",
owner=self.user,
description="This is a test task description.",
priority="URGENT",
task_type=self.task_type
)
def test_task_list(self):
url = reverse("manager:task-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "manager/tasks_list.html")
self.assertIn(self.task, response.context["object_list"])
def test_task_create(self):
url = reverse("manager:task-create")
data = {
"title": "New Task",
"description": "This is a new task description.",
"date": timezone.datetime(2023, 7, 24).date(),
"priority": "TO-DO",
"task_type": self.task_type.id,
}
res = self.client.post(url, data)
self.assertEqual(res.status_code, 302)
self.assertEqual(Task.objects.count(), 2)
new_task = Task.objects.get(title="New Task")
self.assertEqual(new_task.owner, self.user)
def test_task_update(self):
url = reverse("manager:task-update", args=[self.task.id])
data = {
"title": "Updated Task",
"description": "This is an updated task description.",
"date": timezone.datetime(2023, 7, 24).date(),
"priority": "TO-DO",
"task_type": self.task_type.id,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
updated_task = Task.objects.get(id=self.task.id)
self.assertEqual(updated_task.title, "Updated Task")
def test_task_delete(self):
url = reverse("manager:task-delete", args=[self.task.id])
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Task.objects.count(), 0)
def test_task_complete(self):
url = reverse("manager:task-complete", args=[self.task.id])
data = {"complete": "true"}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.task.refresh_from_db()
self.assertTrue(self.task.completed)
| kovaliskoveronika/task_manager | manager/tests/test_views_task.py | test_views_task.py | py | 3,088 | python | en | code | 0 | github-code | 36 |
7979146061 | # -*- coding: utf-8 -*-
"""
Fenêtre de gestion des notes
Par Geoffrey VENANT et Antoine CASTEL
En classe 2PD2
"""
import tkinter as tk
import definition as df
import Tkinter_GN_ajt as tkgnajt
def open_gn (fenetre_parent):
#Initialisation des paramètres de la fenêtre
fenetre_GN = tk.Toplevel(fenetre_parent)
fenetre_GN.iconbitmap("logo_chapeau.ico")
fenetre_GN.geometry("800x600")
fenetre_GN.title("Gestion Notes")
fenetre_GN.configure(bg="#313131")
df.tree_note_merge(fenetre_GN)
#Label principal
label1 = tk.Label(fenetre_GN, text = "Gestion des notes", font = ("arial",20), relief = "raised", bg = "#646464", bd = "6", fg = "white" )
label1.place(relx = -0.08, rely = 0.05, relwidth = 1.16, relheight = 0.12)
#Boutton Refresh de l'affichage du CSV
reload_button = tk.PhotoImage(file = "reload1.png")
Butaff = tk.Button(fenetre_GN, image = reload_button, font = ("arial",10), overrelief = "groove", activebackground = "grey", command = lambda : df.tree_note_merge(fenetre_GN), bg = "#646464", fg = "white")
Butaff.place(relx = 0.47, rely = 0.222, relwidth = 0.025, relheight = 0.042)
#Boutton menant à la fenêtre d'ajout de note
Butajt = tk.Button(fenetre_GN, text = "Ajouter une note", font = ("arial",10), overrelief = "groove", activebackground = "grey", command = lambda : tkgnajt.open_gn_ajt(fenetre_GN), bg = "#646464", fg = "white")
Butajt.place(relx = 0.15, rely = 0.3, relwidth = 0.2, relheight = 0.07)
#Boutton menant à la fenêtre de modification de note
Butmod = tk.Button(fenetre_GN, text = "Modifier une note", font = ("arial",10), overrelief = "groove", activebackground = "grey", command = lambda : tkgnajt.open_gn_mod(fenetre_GN), bg = "#646464", fg = "white")
Butmod.place(relx = 0.15, rely = 0.45, relwidth = 0.2, relheight = 0.07)
#Boutton menant à la fenêtre de suppression de note
Butsup = tk.Button(fenetre_GN, text = "Supprimer une note", font = ("arial",10), overrelief = "groove", activebackground = "grey", command = lambda : tkgnajt.open_gn_supp(fenetre_GN), bg = "#646464", fg = "white")
Butsup.place(relx = 0.15, rely = 0.6, relwidth = 0.2, relheight = 0.07)
#Boutton de fermeture de la fenêtre
ReturnGN = tk.Button(fenetre_GN, text = "Retour", font = ("arial",10), overrelief = "groove", command = lambda : df.close(fenetre_GN), activebackground = "red", bg = "#8BA0AC")
ReturnGN.place(relx = 0.17, rely = 0.8, relwidth = 0.16, relheight = 0.05)
#Lancement de la fenêtre
fenetre_GN.mainloop() | antoinecstl/Grand-Projet-2021-2022 | grand_projet/Tkinter_GN.py | Tkinter_GN.py | py | 2,693 | python | en | code | 0 | github-code | 36 |
4392041881 | import json
import base64
import pymongo
import time
from json.encoder import JSONEncoder
from azure.storage.queue import (
QueueClient,
BinaryBase64EncodePolicy,
BinaryBase64DecodePolicy
)
azure_storage_account = None
mongo_connect = None
queue = "test"
queue = "general-image-2-crawl"
cookies = []
with open("local.settings.json") as fin:
settings = json.load(fin)
azure_storage_account = settings.get("AzureStorageAccount")
mongo_connect = settings.get("MongoDBConnectionString")
if not azure_storage_account or not mongo_connect:
raise Exception("Null Settings on AzureStorageAccount or mongo connect")
# Setup Base64 encoding and decoding functions
base64_queue_client = QueueClient.from_connection_string(
conn_str=azure_storage_account, queue_name=queue,
message_encode_policy = BinaryBase64EncodePolicy(),
message_decode_policy = BinaryBase64DecodePolicy()
)
mongo_client = pymongo.MongoClient(mongo_connect)
mongo_db = 'dev'
mongo_collection = "mingju5"
mongo_docs = mongo_client[mongo_db][mongo_collection]
with open("data/mingju.csv", 'r', encoding='utf-8') as fin:
fin.readline()
for idx, line in enumerate(fin):
if idx < 4747:
continue
gs = line.split(",")
assert len(gs) == 4
doc = mongo_docs.find_one({"url":gs[1]})
if doc and 'sent_baidu_img_res' in doc and doc['sent_baidu_img_res'] and 'data' in doc['sent_baidu_img_res'] and doc['sent_baidu_img_res']['data']:
for i, image_info in enumerate(doc['sent_baidu_img_res']['data']):
d_int, d_str = {}, {}
if 'thumbURL' not in image_info:
continue
for key, value in image_info.items():
if value:
if type(value) is int:
d_int[key] = value
if type(value) is str:
d_str[key] = value
d_str["source_mingju"] = gs[0]
d_str["source_mingju_url"] = gs[1]
d_str["source_mingju_author_title"] = gs[2]
d_str["source_mingju_poem_url"] = gs[3]
d_int['bdDisplayNum'] = doc['sent_baidu_img_res'].get('displayNum', 0)
d = {
"image_url" : image_info['thumbURL'],
"add_string_info" : d_str,
"add_int_info" : d_int
}
base64_queue_client.send_message(JSONEncoder().encode(d).encode('utf-8'))
if doc:
doc['crawled'] = int(time.time())
mongo_docs.update_one({'url':gs[1]}, {"$set":doc})
print(idx, gs[0], "Done") | harveyaot/AlphaTaiBai | scripts/send_imageurl2crawl.py | send_imageurl2crawl.py | py | 2,867 | python | en | code | 24 | github-code | 36 |
6123605649 | import pandas as pd
import time
import numpy as np
from AI.models import NLPModel
# Architecture of the Muser Data Builder
class MuserDataBuilder:
# The constructor instantiates all the variables that would be used throughout the class
def __init__(self, sp, conn):
self.sp = sp
self.conn = conn
self.df = pd.read_csv('music-analysis.csv')
# Function to add feature columns to the muser data
# Replace the existing csv
def build_muser_data(self):
self.df['acousticness'] = '' * self.df.shape[0]
self.df['danceability'] = '' * self.df.shape[0]
self.df['energy'] = '' * self.df.shape[0]
self.df['instrumentalness'] = '' * self.df.shape[0]
self.df['liveness'] = '' * self.df.shape[0]
self.df['loudness'] = '' * self.df.shape[0]
self.df['speechiness'] = '' * self.df.shape[0]
self.df['tempo'] = '' * self.df.shape[0]
self.df['valence'] = '' * self.df.shape[0]
self.df['popularity'] = '' * self.df.shape[0]
sleep_min = 2
sleep_max = 5
request_count = 0
for idx in self.df.index:
album = self.df.loc[idx, 'song_album_name']
track = self.df.loc[idx, 'song_name']
artist = self.df.loc[idx, 'song_artist_name']
query = 'album:{} track:{} artist:{}'.format(album, track, artist)
spotify_search = self.sp.search(query, limit=1, offset=0, type='track', market=None)
request_count += 1
if request_count % 5 == 0:
time.sleep(np.random.uniform(sleep_min, sleep_max))
if len(spotify_search['tracks']['items']) > 0:
track_uri = spotify_search['tracks']['items'][0]['uri']
audio_features = self.sp.audio_features(track_uri)[0]
self.df.loc[idx, 'popularity'] = self.sp.track(track_uri)['popularity']
else:
target = album + ' ' + track + ' ' + artist
nlp_model = NLPModel(self.sp, self.conn)
audio_features = nlp_model.most_similar_doc(target)
self.df.loc[idx, 'popularity'] = audio_features['popularity']
self.df.loc[idx, 'acousticness'] = audio_features['acousticness']
self.df.loc[idx, 'danceability'] = audio_features['danceability']
self.df.loc[idx, 'energy'] = audio_features['energy']
self.df.loc[idx, 'instrumentalness'] = audio_features['instrumentalness']
self.df.loc[idx, 'liveness'] = audio_features['liveness']
self.df.loc[idx, 'loudness'] = audio_features['loudness']
self.df.loc[idx, 'speechiness'] = audio_features['speechiness']
self.df.loc[idx, 'tempo'] = audio_features['tempo']
self.df.loc[idx, 'valence'] = audio_features['valence']
self.df.to_csv('music-analysis.csv')
| CUTR-at-USF/muser-data-analysis | AI/muserdatabuilder.py | muserdatabuilder.py | py | 2,889 | python | en | code | 0 | github-code | 36 |
41852000033 | from flask import send_file, Flask, redirect, render_template, url_for
# from crypt import methods
import logging
from nltk.stem import WordNetLemmatizer
from fuzzywuzzy import fuzz
from nltk.corpus import wordnet
import nltk
from flask import send_from_directory, Flask, request, render_template, url_for, redirect, jsonify
from firebase_admin import credentials, firestore, initialize_app
import requests
import os.path
from werkzeug.utils import secure_filename
app = Flask(__name__)
app.secret_key = "somesecretkey"
app.config['ALLOWED_EXTENSIONS'] = ['.jpg', '.png']
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
UPLOAD_FOLDER = os.path.join(os.getcwd(), 'uploads')
# [logging config
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(filename)s:%(funcName)s:%(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
# logging config]
cred = credentials.Certificate('key.json')
default_app = initialize_app(cred)
db = firestore.client()
# <<<<<<< HEAD
todo_ref = db.collection('todos')
UPLOAD_FOLDER = os.path.join(os.getcwd(), 'uploads')
# =======
# todo_ref = db.collection('keywords')
# >>>>>>> 84dd66fafd764c527993fc9ae8ebd16abc773985
BASE = "http://127.0.0.1:5000/"
# nltk.download('punkt')
# nltk.download('averaged_perceptron_tagger')
# nltk.download('wordnet')
# nltk.download('omw-1.4')
# Lemmatize with POS Tag
# Init the Wordnet Lemmatizer
lemmatizer = WordNetLemmatizer()
it = {}
# it = {'1.Welcome to Python.org':'Python is a popular general-purpose programming language. It is used in machine learning, web development, desktop applications, and many other fields.','Introduction to Python - W3Schools' : '2.Python is a popular programming language. It was created by Guido van Rossum, and released in 1991. It is used for: web development (server-side),',
# '3.Python Programming Language - GeeksforGeeks':' Python is a high-level, general-purpose and a very popular programming language. Python programming language (latest Python 3) is being used ...',
# '4.Lists in python' : 'In Python, a list is created by placing elements inside square brackets [] , separated by commas. ... A list can have any number of items and they may be of ...' ,
# '5. Data Structures — Python 3.10.6 documentation':'List comprehensions provide a concise way to create lists. Common applications are to make new lists where each element is the result of some operations applied ...',
# '6.Python Lists and List Manipulation | by Michael Galarnykhttps://towardsdatascience.com › python-basics-6-lists-a...':'Each item in a list has an assigned index value. It is important to note that python is a zero indexed based language. All this means is that the first item in ...',
# '7.Python Programming - Wikibooks, open books for an open world' : 'This book describes Python, an open-source general-purpose interpreted programming language available for the most popular operating systems.',
# '8.Complete Python Programming Python Basics to Advanced ...https://www.udemy.com › ... › Python':'10-Aug-2022 — Learn Python programming Python functions Python loops Python files Python DB Python OOP Python regex Python GUI game.',
# '9.Python 3 Programming Specialization - Courserahttps://www.coursera.org › ... › Software Development':'Offered by University of Michigan. Become a Fluent Python Programmer. Learn the fundamentals and become an independent programmer. Enroll for free.'
# }
def get_wordnet_pos(word):
# Map POS tag to first character lemmatize() accepts
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
@app.route("/")
def home():
return render_template("form.html")
@app.route("/learn", methods=['GET', 'POST'])
def lear():
return render_template("index.html",it = it)
@app.route('/res', methods=['POST'])
def my_form_post():
text = request.form['text']
# Init Lemmatizer
lemmatizer = WordNetLemmatizer()
# Lemmatize a Sentence with the appropriate POS tag
sentence = text
dict_keywords = {"class": 0, "variable": 0, "setup": 0,
"object": 0, "function": 0, "comment": 0,"python":0 , "list" : 0,"dictionary": 0, "tuple":0 }
sentence_list = [lemmatizer.lemmatize(
w, get_wordnet_pos(w)) for w in nltk.word_tokenize(sentence)]
print(sentence_list)
# for word in sentence_list:
# if word in dict_keywords:
# dict_keywords[word] = dict_keywords[word] + 1
for word in sentence_list:
for key in dict_keywords:
if fuzz.ratio(word, key) > 50:
dict_keywords[key] = dict_keywords[key] + 1
print(dict_keywords)
words = []
list_labels = {
"list" : "Lists are one of 4 built-in data types in Python used to store collections of data, the other 3 are Tuple, Set, and Dictionary, all with different qualities and usage.Python Lists are just like dynamically sized arrays, declared in other languages (vector in C++ and ArrayList in Java). In simple language, a list is a collection of things, enclosed in [ ] and separated by commas.... read more ",
"python": "Python is a high-level, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. Python is dynamically-typed and garbage-collected. It supports multiple programming paradigms, including structured, object-oriented and functional programming.Dictionaries are used to store data values in key:value pairs. A dictionary is a collection which is ordered*, changeable and do not allow duplicates...... read more",
"tup" : "xyz"
}
#
for key in dict_keywords:
if dict_keywords[key] > 0:
words.append(key)
it[key] = list_labels[key]
print(words)
return redirect("http://127.0.0.1:5000/learn", code=302)
@app.route('/download/<path:filename>', methods=['GET'])
def download(filename):
"""Download a file."""
shepherd = filename
stt = "{}.txt".format(shepherd)
logging.info('Downloading file= [%s]', stt)
logging.info(app.root_path)
full_path = os.path.join(app.root_path, UPLOAD_FOLDER)
logging.info(full_path)
return send_from_directory(full_path, stt, as_attachment=True)
# @app.route('/download')
# def download_file():
# p = "lists.txt"
# return send_file(p,as_attachment=True)
# @app.route("/<name>")
# def user(name):
# return f"Hello {name}!"
if __name__ == "__main__":
app.run()
# @app.route('/add', methods=['POST'])
# def create():
# """
# create() : Add document to Firestore collection with request body
# Ensure you pass a custom ID as part of json body in post request
# e.g. json={'id': '1', 'title': 'Write a blog post'}
# """
# try:
# id = request.json['id']
# todo_ref.document(id).set(request.json)
# return jsonify({"success": True}), 200
# except Exception as e:
# return f"An Error Occured: {e}"
# @app.route('/list', methods=['GET'])
# def read():
# """
# read() : Fetches documents from Firestore collection as JSON
# todo : Return document that matches query ID
# all_todos : Return all documents
# """
# try:
# # Check if ID was passed to URL query
# todo_id = request.args.get('id')
# if todo_id:
# todo = todo_ref.document(todo_id).get()
# return jsonify(todo.to_dict()), 200
# else:
# all_todos = [doc.to_dict() for doc in todo_ref.stream()]
# return jsonify(all_todos), 200
# except Exception as e:
# return f"An Error Occured: {e}"
# @app.route('/callDelete', methods=['GET'])
# def callDelete():
# return render_template("delete.html")
# @app.route('/deleteByPost', methods=['POST'])
# def deleteByPost():
# id = request.form.get('id')
# response = requests.delete(
# BASE + f"delete?id={id}")
# response.raise_for_status() # raises exception when not a 2xx response
# if response.status_code != 204:
# return response.json()
# return False
# @app.route('/delete', methods=['GET', 'DELETE'])
# def delete():
# """
# delete() : Delete a document from Firestore collection
# """
# try:
# # Check for ID in URL query
# todo_id = request.args.get('id')
# todo_ref.document(todo_id).delete()
# return jsonify({"success": True}), 200
# except Exception as e:
# return f"An Error Occured: {e}"
# @app.route('/addByPost', methods=['POST'])
# def addByPost():
# id = request.form.get('id')
# title = request.form.get('title')
# response = requests.post(
# BASE + "add", json={'id': id, 'title': title})
# response.raise_for_status() # raises exception when not a 2xx response
# if response.status_code != 204:
# return response.json()
# return False
# @app.route('/callAdd', methods=['GET'])
# def callAdd():
# return render_template("add.html")
| Rohit-S-Singh/Research-Project | app.py | app.py | py | 9,288 | python | en | code | 0 | github-code | 36 |
15062131417 | import requests
import pandas
import datetime_translator
brandIds = { 202, 88, 31, 123, 101, 122, 36, 48, 135 }
data = {}
for brandId in brandIds:
headers = { 'User-Agent': '', 'content-type': 'application/json' }
jsonData = '{"variables":{"area":"salt-lake","brandId":%d,"countryCode":"US","criteria":{"location_type":"county"},"fuel":1,"maxAge":0,"regionCode":"UT"},"query":"query LocationByArea($area: String, $brandId: Int, $countryCode: String, $criteria: Criteria, $fuel: Int, $maxAge: Int, $regionCode: String) { locationByArea( area: $area countryCode: $countryCode criteria: $criteria regionCode: $regionCode ) { displayName locationType stations(brandId: $brandId, fuel: $fuel, maxAge: $maxAge) { results { address { country line1 line2 locality postalCode region } brands { brandId brandingType imageUrl name } latitude longitude fuels id name prices(fuel: $fuel) { cash { nickname postedTime price } credit { nickname postedTime price } discount fuelProduct } } } } }"}' %(brandId)
#jsonData = '{"variables":{"area":"davis","brandId":%d,"countryCode":"US","criteria":{"location_type":"county"},"fuel":1,"maxAge":0,"regionCode":"UT"},"query":"query LocationByArea($area: String, $brandId: Int, $countryCode: String, $criteria: Criteria, $fuel: Int, $maxAge: Int, $regionCode: String) { locationByArea( area: $area countryCode: $countryCode criteria: $criteria regionCode: $regionCode ) { displayName locationType stations(brandId: $brandId, fuel: $fuel, maxAge: $maxAge) { results { address { country line1 line2 locality postalCode region } brands { brandId brandingType imageUrl name } latitude longitude fuels id name prices(fuel: $fuel) { cash { nickname postedTime price } credit { nickname postedTime price } discount fuelProduct } } } } }"}' %(brandId)
response = requests.post('https://www.gasbuddy.com/graphql', headers=headers, data=jsonData)
jsonResponse = response.json()
stations = jsonResponse['data']['locationByArea']['stations']
for station in stations['results']:
if int(station['brands'][0]['brandId']) in brandIds and (station['prices'][0]['credit']['price'] != 0):
stationId = station['id']
data[stationId] = {
'StationName': station['name'],
'BrandName': station['brands'][0]['name'],
'AddressLine1': station['address']['line1'],
'City': station['address']['locality'],
'RegularFuelPrice': '${:.2f}'.format(station['prices'][0]['credit']['price']),
'TimeSinceReported': datetime_translator.translate(station['prices'][0]['credit']['postedTime']),
'ReportedBy': station['prices'][0]['credit']['nickname']
}
output = pandas.DataFrame(data)
output.transpose().sort_values(by='RegularFuelPrice')
print(output) | ryanbarlow1/cheapest_gas_prices | get_gas_prices.py | get_gas_prices.py | py | 2,836 | python | en | code | 0 | github-code | 36 |
41214332774 | """ In this script I load both the original openML and the abello ones. Then I print the elements of abello which are not
in the original openML. This is because in the original openML table there are only the active ones. """
import pandas as pd
# It checks if lst1 is contained in lst2
def sublist(lst1, lst2):
return set(lst1) <= set(lst2)
# Load the clean original openML csv
original_with_ids = pd.read_csv('csv/original/original_with_ids_all_metafeatures.csv')
original_with_ids_only_name = original_with_ids['Name'].values.tolist()
# Load the abello csv
abello = pd.read_csv('csv/abello/abello_simply_metafeatures.csv')
abello_only_name = abello['Name'].values.tolist()
# Check if the abello datasets are all contained in the original openML datasets
print('Are all the datasets active? ' + str(sublist(abello_only_name, original_with_ids_only_name)))
# Show the non active datasets
print('\nHere the non active datasets:')
for record in abello_only_name:
if record not in original_with_ids_only_name:
print(record)
| josephgiovanelli/openML-datasets-profiling | tests/check_abello_active.py | check_abello_active.py | py | 1,048 | python | en | code | 0 | github-code | 36 |
16505504555 | from nltk.tag.hmm import *
import codecs
import statistics
import numpy as np
from sklearn.metrics import confusion_matrix
import metrics
from metrics import EditDistance
from hmm import HMM
from memm import MEMM
from crf_word import CRF as CRF_WORD
from crf_sentence import CRF as CRF_SENT
from rnn import Encoder as RNN
from post_proc.syllabification import syllabification
from post_proc.post_processing import romanize
stage_names = ['', 'Vowels', 'Syllabification', 'Romanization']
def PrintConfMat(conf_mat):
precision, recall = metrics.MicroAvg(conf_mat)
f1 = metrics.Fscore(precision, recall, 1)
print('MicroAvg:')
print(' Precision = {}\n Recall = {}\n F1 = {}'.format(precision, recall,f1))
precision, recall = metrics.MacroAvg(conf_mat)
f1 = metrics.Fscore(recall, precision, 1)
print('MacroAvg:')
print(' Precision = {}\n Recall = {}\n F1 = {}'.format(precision, recall, f1))
print('Avg Accuracy:', metrics.AvgAcc(conf_mat))
#conf_mat = metrics.NormalizeConfusion(conf_mat)
#print('ConfMat:\n', np.array_str(conf_mat, max_line_width=300, precision=3))
def LoadTestData(file='data/HaaretzOrnan_annotated_test.txt'):
sents, vow_words, syll_words, rom_words = [[]], [], [], []
with codecs.open(file, encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip()
if line.startswith(u'#'):
continue
if len(line) == 0:
if len(sents[-1])>0:
sents.append([])
continue
split_line = line.split(u' ')
sents[-1].append(split_line[2])
vow_words.append(split_line[3].replace(u'-', u''))
syll_words.append(split_line[3])
rom_words.append(split_line[4])
if len(sents[-1])==0:
sents.remove(sents[-1])
return sents, vow_words, syll_words, rom_words
def CalcConfMatrix(pred, gold):
vow = list(u'euioa*')
vow_idx = {x: i for i, x in enumerate(vow)}
conf_mat = np.zeros((len(vow), len(vow)))
for j in range(1, len(pred), 2):
conf_mat[vow_idx[pred[j]], vow_idx[gold[j]]] += 1
return conf_mat
def TestModel(model, data):
conf_mat = None
dist = [None]
pred_stage = [None]
pred_stage.append(model.predict(data[0])) # predict test data
pred_stage[1] = [w for sent in pred_stage[1] for w in sent] # flatten sentences for metric calculation
pred_stage.append([syllabification(w) for w in pred_stage[1]]) # calculate syllabification
pred_stage.append([romanize(w) for w in pred_stage[2]]) # calculate romanization
# Calculate confusuion matrix
conf_mat = np.zeros((6,6))
for i, w in enumerate(pred_stage[1]):
conf_mat += CalcConfMatrix(w, data[1][i])
for stage in range(1,4):
tmp_dist = [EditDistance(w, data[stage][i]) for i, w in enumerate(pred_stage[stage])]
dist.append((sum(tmp_dist)/len(tmp_dist), statistics.median(tmp_dist), min(tmp_dist), max(tmp_dist))) # avg,med.min,max
return conf_mat, dist
def test():
data = LoadTestData()
untrained_models = []
config = {'ngram': 3, 'est': 'add-delta', 'delta': 0.3}
untrained_models.append((HMM(config), 'HMM. config: {}'.format(config)))
config = {'ftrs': ('IS_FIRST', 'IS_LAST', 'VAL', 'PRV_VAL', 'NXT_VAL', 'FRST_VAL', 'LST_VAL', 'SCND_VAL', 'SCND_LST_VAL')}
untrained_models.append((MEMM(config), 'MEMM. config: {}'.format(config)))
config = {'ftrs': ('IS_FIRST', 'IS_LAST', 'IDX', 'VAL', 'PRV_VAL', 'NXT_VAL', 'FRST_VAL', 'LST_VAL', 'SCND_VAL', 'SCND_LST_VAL')}
untrained_models.append((CRF_WORD(config), 'CRF. config: {}'.format(config)))
trained_models = [(model.prep_data().shuffle(0xfab1e).split(0).train(),name) for model,name in untrained_models]
config = {'n_layers': 3, 'hidden_dim': 32, 'embedding': 'mds', 'win_len': 4,"device":"cpu"}
rnn = RNN(config)
trained_models.append((rnn.prep_model().load('rnn_model.bin'), 'RNN. config: {}'.format(config)))
for model,name in trained_models:
trained_model = model
conf_mat, dist = TestModel(trained_model, data)
print('\n')
print(name)
print('='*80)
print('Vowel metrics:')
print('-'*50)
PrintConfMat(conf_mat)
print('-'*50)
print('Edit distance:')
print('-'*50)
for stage in range(1,4):
print('Stage = {}:'.format(stage_names[stage]))
print(' Average = {}\n Median = {}\n Min = {}\n Max = {}'.format(dist[stage][0],dist[stage][1],dist[stage][2],dist[stage][3]))
if __name__ == "__main__":
test()
| albert-shalumov/nlp_proj | test.py | test.py | py | 4,674 | python | en | code | 1 | github-code | 36 |
30453697938 | import struct
import random
def get_checksum(msg: bytes) -> int:
checksum = 0
for i in range(0, len(msg), 2):
part = (msg[i] << 8) + (msg[i + 1])
checksum += part
checksum = (checksum >> 16) + (checksum & 0xffff)
return checksum ^ 0xffff
class IcmpPack:
def __init__(self, icmp_type: int, icmp_code: int):
self.icmp_type = icmp_type
self.icmp_code = icmp_code
@staticmethod
def pack_icmp() -> bytes:
icmp_type = 8
icmp_code = 0
mock_data = struct.pack('!BBH', icmp_type, icmp_code, 0)
current_sum = get_checksum(mock_data)
return struct.pack('!BBHHH', icmp_type, icmp_code, current_sum, 1, random.randint(256, 3000))
@classmethod
def get_icmp(cls, data: bytes):
icmp_type, icmp_code = struct.unpack('!BB', data[:2])
return cls(icmp_type, icmp_code)
| OxyEho/icmp-traceroute | icmp.py | icmp.py | py | 881 | python | en | code | 0 | github-code | 36 |
71877366824 | #!/usr/bin/env python
# coding: utf-8
import sys
import io
import json
import numpy as np
from matplotlib import pyplot as plt
from tensorflow import keras
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from pathlib import Path
import cv2
import skimage
from tensorflow.keras.applications import ResNet50V2, ResNet50
from tensorflow.keras.regularizers import l2
from tensorflow.keras import layers
from tensorflow.keras.layers import Input, Conv2DTranspose
from tensorflow.keras.layers import concatenate
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Dense, Flatten, MaxPooling2D, BatchNormalization, Conv2D, Dropout, LeakyReLU
from tensorflow.keras.regularizers import l2
from adamp_tf import AdamP
from sgdp_tf import SGDP
from collections import Callable
import time
from tensorflow.keras import backend as K
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
tf.test.is_gpu_available()
tf.config.list_physical_devices('GPU')
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
from skimage.transform import resize
import albumentations as A
def augment_img_mask(x, y):
transform = A.Compose(
[
A.VerticalFlip(p=0.5),
A.HorizontalFlip(p=0.5),
A.ElasticTransform(p=0.5, alpha=240, sigma=240 * 0.05, alpha_affine=240 * 0.03)
]
)
transform_image = transform(image=x, mask=y)
return transform_image['image'], transform_image['mask']
class DataGeneratorDivide(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, path_to_dataset, batch_size=32,
shuffle=True, use_augmentations=False,
mode='train', val_percent=0.3):
"""
mode: train or val
"""
self.batch_size = batch_size
self.path_to_dataset = path_to_dataset
self.val_percent = val_percent
self.mode = mode
self.initialize()
self.shuffle = shuffle
self.on_epoch_end()
self.use_aug = use_augmentations
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.X) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X, Y = self.__data_generation(indexes)
return X, Y
def initialize(self):
slice_nums = list(set(
int(file.name.split('_')[-1].split('.')[0]) for file in (self.path_to_dataset / 'gt').iterdir()
))
slice_nums = sorted(list(slice_nums))
num_of_slices = len(slice_nums)
val_num = int(num_of_slices * self.val_percent)
if self.mode == 'train':
curr_slices_to_use = slice_nums[val_num:]
else:
curr_slices_to_use = slice_nums[:val_num]
self.curr_slices_to_use = curr_slices_to_use
self.X, self.Y = [], []
for file in (self.path_to_dataset / 'images').iterdir():
slice_num = int(file.name.split('_')[-1].split('.')[0])
if slice_num in self.curr_slices_to_use:
self.X.append(file)
self.Y.append(self.path_to_dataset / 'gt' / file.name)
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.X))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, indexes):
'Generates data containing batch_size samples'
# Resize or padd?
# imread (M, N, 3). can take only first dim to make them grey
# but resnet preprocessing wants rgb images!!!
X = [np.load(self.X[ind]) for ind in indexes]
Y = [np.load(self.Y[ind]) for ind in indexes]
# batch_shapes = [el.shape for el in X]
max_w, max_h = 256, 256
# print(max_w, max_h)
# # Generate data
for i, img in enumerate(X):
w, h = X[i].shape
X[i] = resize(X[i], (256, 256), preserve_range=True)
Y[i] = resize(Y[i], (256, 256), preserve_range=True)
if self.use_aug:
X[i], Y[i] = augment_img_mask(X[i], Y[i])
# y[i] = y[i][:, :, np.newaxis]
# X[i] = (np.pad(X[i], pad_width=((0, max_w - w), (0, max_h - h), (0, 0))))
# X[i] = tf.keras.applications.resnet.preprocess_input(X[i])
# np.pad(y[i], pad_width=((0, max_w - w), (0, max_h - h), (0, 0)))
# X[i], y[i] = np.pad()
X, Y = np.array(X)[:, :, :, np.newaxis], np.array(Y)[:, :, :, np.newaxis]
# X_padded = np.zeros([X.shape[0], 512, 512, 1])
# X_padded[:, :X.shape[1], :X.shape[2], :] = X
# Y_padded = np.zeros([Y.shape[0], 512, 512, 1])
# Y_padded[:, :Y.shape[1], :Y.shape[2], :] = Y
return X, Y
def get_model(
weight_decay=0.0001,
start_neuron_number=16
):
keras.backend.clear_session()
wd_reg = l2(weight_decay)
inputs = Input((256, 256, 1))
x = inputs
# s = Lambda(lambda x: x / 255) (inputs)
c1 = Conv2D(start_neuron_number * 1, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (x)
# c1 = Dropout(0.1) (c1)
c1 = BatchNormalization()(c1)
c1 = Conv2D(start_neuron_number * 1, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(start_neuron_number * 2, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (p1)
# c2 = Dropout(0.1) (c2)
c2 = BatchNormalization()(c2)
c2 = Conv2D(start_neuron_number * 2, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(start_neuron_number * 4, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (p2)
# c3 = Dropout(0.2) (c3)
c3 = BatchNormalization()(c3)
c3 = Conv2D(start_neuron_number * 4, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (p3)
# c4 = Dropout(0.2) (c4)
c4 = BatchNormalization()(c4)
c4 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal',kernel_regularizer=wd_reg, padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
# p4 = p3
c5 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (p4)
# c5 = Dropout(0.3) (c5)
c5 = BatchNormalization()(c5)
c5 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c5)
u6 = Conv2DTranspose(start_neuron_number * 8, (4, 4), strides=(2, 2), padding='same', kernel_regularizer=wd_reg) (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal',kernel_regularizer=wd_reg, padding='same') (u6)
# c6 = Dropout(0.2) (c6)
c6 = BatchNormalization()(c6)
c6 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c6)
u7 = Conv2DTranspose(start_neuron_number * 4, (4, 4), strides=(2, 2), padding='same', kernel_regularizer=wd_reg) (c6)
u7 = concatenate([u7, c3])
u7 = Dropout(0.2)(u7)
c7 = Conv2D(start_neuron_number * 4, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (u7)
# c7 = BatchNormalization()(c7)
c7 = Conv2D(start_neuron_number * 4, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c7)
u8 = Conv2DTranspose(start_neuron_number * 2, (4, 4), strides=(2, 2), padding='same', kernel_regularizer=wd_reg) (c7)
u8 = concatenate([u8, c2])
u8 = Dropout(0.2)(u8)
c8 = Conv2D(start_neuron_number * 2, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (u8)
# c8 = BatchNormalization()(c8)
c8 = Conv2D(start_neuron_number * 2, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c8)
u9 = Conv2DTranspose(start_neuron_number, (4, 4), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
u9 = Dropout(0.2)(u9)
c9 = Conv2D(start_neuron_number, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (u9)
c9 = BatchNormalization()(c9)
c9 = Conv2D(start_neuron_number, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='linear', dtype='float32') (c9)
model = keras.Model(inputs=[inputs], outputs=[outputs])
return model
def l1(y_true, y_pred):
#print(y_true)
#print(y_pred)
"""Calculate the L1 loss used in all loss calculations"""
if K.ndim(y_true) == 4:
return K.mean(K.abs(y_pred - y_true), axis=[1,2,3])
elif K.ndim(y_true) == 3:
return K.mean(K.abs(y_pred - y_true), axis=[1,2])
else:
raise NotImplementedError("Calculating L1 loss on 1D tensors? should not occur for this network")
def compute_perceptual(vgg_out, vgg_gt):
"""Perceptual loss based on VGG16, see. eq. 3 in paper"""
loss = 0
for o, g in zip(vgg_out, vgg_gt):
loss += l1(o, g)
return loss
def gram_matrix(x, norm_by_channels=False):
"""Calculate gram matrix used in style loss"""
# Assertions on input
# print(K.ndim(x), x.shape)
assert K.ndim(x) == 4, 'Input tensor should be a 4d (B, H, W, C) tensor'
assert K.image_data_format() == 'channels_last', "Please use channels-last format"
#import pdb
#pdb.set_trace()
# Permute channels and get resulting shape
x = K.permute_dimensions(x, (0, 3, 1, 2))
shape = K.shape(x)
B, C, H, W = shape[0], shape[1], shape[2], shape[3]
# Reshape x and do batch dot product
features = K.reshape(x, K.stack([B, C, H*W]))
gram = K.batch_dot(features, features, axes=2)
# Normalize with channels, height and width
gram = gram / K.cast(C * H * W, x.dtype)
return gram
def compute_style(vgg_out, vgg_gt):
"""Style loss based on output/computation, used for both eq. 4 & 5 in paper"""
loss = 0
for o, g in zip(vgg_out, vgg_gt):
loss += l1(gram_matrix(o), gram_matrix(g))
return loss
def get_extracted_values(feature_extractor, y_true, y_pred):
vgg_out = feature_extractor(y_true)
vgg_gt = feature_extractor(y_pred)
if not isinstance(vgg_out, list):
vgg_out = [vgg_out]
vgg_gt = [vgg_gt]
# TODO: make output of autoencoder float32 / это же слои!!! я не смогу так сделать
vgg_out_ = []
vgg_gt_ = []
for el1, el2 in zip(vgg_out, vgg_gt):
vgg_out_.append(K.cast(el1, 'float32'))
vgg_gt_.append(K.cast(el2, 'float32'))
vgg_gt = vgg_gt_
vgg_out = vgg_out_
return vgg_gt, vgg_out
def compute_loss_tv(P):
# Calculate total variation loss
a = l1(P[:,1:,:,:], P[:,:-1,:,:])
b = l1(P[:,:,1:,:], P[:,:,:-1,:])
return a+b
def loss_total(
feature_extractor_content,
feature_extractor_style
):
"""
Creates a loss function which sums all the loss components
and multiplies by their weights. See paper eq. 7.
"""
def loss(y_true, y_pred):
y_true = K.cast(y_true, 'float32')
# Here I assume that rectangular shape is always the same
mask = np.zeros(y_true.shape)
xmin, xmax, ymin, ymax = (55, 200, 86, 169)
mask[:, xmin-20:xmax+20, ymin-20:ymax+20] = 1
mask = K.cast(mask, 'float32')
vgg_gt_c, vgg_out_c = get_extracted_values(feature_extractor_content, y_true, y_pred)
vgg_gt_s, vgg_out_s = get_extracted_values(feature_extractor_style, y_true, y_pred)
loss_mae_hole = l1(mask * y_true, mask * y_pred)
loss_mae_valid = l1((1 - mask) * y_true, (1 - mask) * y_pred)
loss_perceptual = compute_perceptual(vgg_out_c, vgg_gt_c)
loss_style = compute_style(vgg_out_s, vgg_gt_s)
loss_tv_val = compute_loss_tv(P=mask * y_pred)
# Return loss function
return loss_mae_valid, loss_mae_hole, loss_perceptual, loss_style, loss_tv_val
return loss
def make_linear_lr(min_lr, max_lr, number_of_steps):
def gen_lr(step):
return (max_lr - min_lr) / number_of_steps * step + min_lr
return gen_lr
def make_cosine_anneal_lr(learning_rate, alpha, decay_steps):
def gen_lr(global_step):
global_step = tf.minimum(global_step, decay_steps)
global_step = tf.cast(global_step, tf.float32)
cosine_decay = 0.5 * (1 + tf.math.cos(3.1415926 * global_step / decay_steps)) # changed np.pi to 3.14
decayed = (1 - alpha) * cosine_decay + alpha
decayed_learning_rate = learning_rate * decayed
return decayed_learning_rate
return gen_lr
def make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps):
gen_lr_1 = make_linear_lr(min_lr, max_lr, number_of_steps)
gen_lr_2 = make_cosine_anneal_lr(max_lr, alpha, decay_steps)
def gen_lr(global_step):
a = global_step < number_of_steps
a = tf.cast(a, tf.float32)
b = 1. - a
return a * gen_lr_1(global_step) + b * gen_lr_2(global_step - number_of_steps)
return gen_lr
class CosineAnnealingWithWarmUP(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, min_lr, max_lr, number_of_steps, alpha, decay_steps):
super(CosineAnnealingWithWarmUP, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.number_of_steps = number_of_steps
self.alpha = alpha
self.decay_steps = decay_steps
self.gen_lr_ca = make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps)
def __call__(self, step):
return self.gen_lr_ca(step)
def get_config(self):
config = {
'min_lr': self.min_lr,
'max_lr': self.max_lr,
'number_of_steps': self.number_of_steps,
'alpha': self.alpha,
'decay_steps': self.decay_steps
}
return config
def choose_optimizer(
optimizer_name='Adam',
learning_rate_fn=0.001
):
# (learning_rate=learning_rate_fn)
if optimizer_name == 'Adam':
optimizer = tf.keras.optimizers.Adam
elif optimizer_name == 'SGD':
optimizer = tf.keras.optimizers.SGD
elif optimizer_name == 'AdamP':
optimizer = AdamP
else:
print('Choosing SGDP')
optimizer = SGDP
optimizer_with_lr = optimizer(learning_rate_fn)
return optimizer_with_lr
def choose_learning_rate_func(
type_lr_func='constant', max_lr = 0.001,
warmup_steps = 900, max_number_of_steps = 60_000,
epochs=60
):
if type_lr_func == 'constant':
return max_lr
else:
return CosineAnnealingWithWarmUP(.0000001, max_lr, warmup_steps, 0, max_number_of_steps)
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def main(params):
weight_decay = params['weight_decay']
start_neuron_number = params['start_neuron_number']
optimizer_name = params['optimizer_name']
type_lr_func = params['type_lr_func']
max_lr = params['max_lr']
warmup_steps = params['warmup_steps']
max_number_of_steps = params['max_number_of_steps']
epochs = params['epochs']
save_model_tensorboard = params['save_model_tensorboard']
style_layer_names = params['style_layer_names']
content_layer_name = params['content_layer_name']
mae_valid_weight = params['mae_valid_weight']
mae_hole_weight = params['mae_hole_weight']
perceptual_weight = params['perceptual_weight']
style_weight = params['style_weight']
tv_weight = params['tv_weight']
model = get_model(weight_decay, start_neuron_number)
path_to_dataset = Path('./dataset')
autoencoder = tf.keras.models.load_model('./best_weights_24.h5', compile=False)
autoencoder.trainable = False
feature_extractor_style = keras.Model(
inputs=autoencoder.input,
outputs=[autoencoder.get_layer(l).output for l in style_layer_names]
)
feature_extractor_content = keras.Model(
inputs=autoencoder.input,
outputs=[autoencoder.get_layer(content_layer_name).output]
)
optimizer = choose_optimizer(
optimizer_name,
choose_learning_rate_func(type_lr_func, max_lr, warmup_steps, max_number_of_steps, epochs)
)
dg_train = DataGeneratorDivide(
path_to_dataset, mode='train',
val_percent=0.2, use_augmentations=True,
batch_size=6
)
dg_val = DataGeneratorDivide(path_to_dataset, mode='val', val_percent=0.2, batch_size=6)
writer = tf.summary.create_file_writer(save_model_tensorboard)
global_step = 0
for ind in range(epochs):
model.save(f'./{save_model_tensorboard}.h5')
print(f'{ind} epoch')
dg_train.on_epoch_end()
for ind, (x, y) in enumerate(dg_val):
if ind == 1:
break
prediction = model.predict(x)
fig, axes = plt.subplots(1, 3, figsize=(10, 5))
for pred, x_, y_ in zip(prediction, x, y):
axes[0].imshow(pred, cmap='gray')
axes[1].imshow(x_, cmap='gray')
axes[2].imshow(y_, cmap='gray')
# plt.show()
with writer.as_default():
tf.summary.image("Val data", plot_to_image(fig), step=global_step)
start = time.time()
for step_num, (inputs, targets) in enumerate(dg_train):
global_step += 1
with tf.GradientTape() as tape:
predictions = model(inputs)
func = loss_total(feature_extractor_content, feature_extractor_style)
loss_value_list = func(targets, predictions)
loss_value =\
mae_valid_weight * loss_value_list[0] +\
mae_hole_weight * loss_value_list[1] +\
perceptual_weight * loss_value_list[2] +\
style_weight * loss_value_list[3] +\
tv_weight * loss_value_list[4]
gradients = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
if step_num % 10 == 0:
with writer.as_default():
tf.summary.scalar("loss_train", loss_value.numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_mae_valid", loss_value_list[0].numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_mae_hole", loss_value_list[1].numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_percept" , loss_value_list[2].numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_style", loss_value_list[3].numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_tv", loss_value_list[4].numpy().mean(), step=global_step)
if isinstance(optimizer.lr, Callable):
cur_lr = optimizer.lr(global_step).numpy()
else:
cur_lr = optimizer.lr.numpy()
tf.summary.scalar("learning_rate", cur_lr, step=global_step)
writer.flush()
end = time.time()
print(f'Training took {end - start}')
start = time.time()
val_loss_value = 0
corr_coef_value = 0
batch_num = 0
for step_num, (inputs, targets) in enumerate(dg_val):
predictions = model(inputs)
corr_coefs = []
for pred, x_, y_ in zip(predictions, inputs, targets):
xmin, xmax = min(np.where(x_ < 0.001)[0]), max(np.where(x_ < 0.001)[0])
ymin, ymax = min(np.where(x_ < 0.001)[1]), max(np.where(x_ < 0.001)[1])
y_ = y_[xmin-10:xmax+10, ymin-10:ymax+10]
pred = pred[xmin-10:xmax+10, ymin-10:ymax+10]
corr_coef = np.corrcoef(y_.ravel(), pred.numpy().ravel())[0, 1]
corr_coefs.append(corr_coef)
corr_coef_value += np.mean(corr_coefs)
func = loss_total(feature_extractor_content, feature_extractor_style)
loss_value_list = func(targets, predictions)
loss_value =\
mae_valid_weight * loss_value_list[0] +\
mae_hole_weight * loss_value_list[1] +\
perceptual_weight * loss_value_list[2] +\
style_weight * loss_value_list[3] +\
tv_weight * loss_value_list[4]
val_loss_value += loss_value.numpy().mean()
batch_num += 1
with writer.as_default():
tf.summary.scalar("loss_val", val_loss_value / batch_num, step=global_step)
tf.summary.scalar("corr_coeff_val", corr_coef_value / batch_num, step=global_step)
writer.flush()
end = time.time()
print(f'Val took {end - start}')
if __name__ == '__main__':
path_to_json = sys.argv[1]
with open(path_to_json, 'r') as f:
params = json.load(f)
main(params)
| DanilKonon/Seismic_Data_Inpainting | unet_autoencoder.py | unet_autoencoder.py | py | 22,825 | python | en | code | 0 | github-code | 36 |
6347790339 | def readPropertiesFile():
configDict = dict(line.strip().split('=') for line in open('config.properties'))
# print(H["application.name"])
for key in configDict:
print(key + "<---------->" + configDict[key])
print("Operating System Name : ", configDict["os.name"])
if __name__ == "__main__":
readPropertiesFile()
| debjava/py-read-properties-file | main.py | main.py | py | 352 | python | en | code | 0 | github-code | 36 |
74974903784 | """empty message
Revision ID: 3c8f0856b635
Revises: a7b5e34eac58
Create Date: 2018-02-24 13:05:25.721719
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3c8f0856b635'
down_revision = 'a7b5e34eac58'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('API',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('api', sa.String(length=255), nullable=True),
sa.Column('method', sa.String(length=24), nullable=True),
sa.Column('desc', sa.String(length=512), nullable=True),
sa.Column('param', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('API')
# ### end Alembic commands ###
| LDouble/cernet_ipv6_server | migrations/versions/3c8f0856b635_.py | 3c8f0856b635_.py | py | 911 | python | en | code | 0 | github-code | 36 |
35020634837 | # Jordan Callero
# Project Euler
# May 4, 2016
# This function will sum all of the positive integers which
# can not be written as the sum of two abudant numbers.
# Note: An abundant number is a number where the factors added
# together is larger than the number itself.
def nonAbuSum():
abudantList = abuList()
abudantAddList = abuAdd(abudantList)
total = 0
for i in range(1, 28123):
if (i not in abudantAddList):
total += i
return total
def abuList():
AbudantList = []
for i in range(1,28123):
factorList = (factors(i))
factorSum = 0
for factor in factorList:
if(i != factor):
factorSum += factor
if(i < factorSum):
AbudantList.append(i)
return AbudantList
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
def abuAdd(abudantList):
abuAddlist = []
for i in abudantList:
for j in abudantList:
#if(i+j not in abuAddlist):
abuAddlist.append(i+j)
return set(abuAddlist)
| jgcallero/projectEuler | Problem_023/Non-abundant Sums.py | Non-abundant Sums.py | py | 1,220 | python | en | code | 0 | github-code | 36 |
22502437388 | # -*- coding: utf-8 -*-
__docformat__ = "restructuredtext en"
"""
list actions
File: obj_list_act.py
Copyright: Blink AG
Author: Steffen Kube <steffen@blink-dx.com>
"""
import os
from blinkapp.code.lib.main_imports import *
from blinkapp.code.lib.app_plugin import gPlugin
from blinkapp.code.lib.tab_abs_sql import table_sql
from blinkapp.code.lib.f_clip import clipboard
from blinkapp.code.lib.obj_mod_meta import obj_mod_meta
from blinkapp.code.lib.gui.obj_list_sub import Obj_list_sub
from blinkapp.code.lib.oDB_USER import oDB_USER
import sys, traceback
class plug_XPL(gPlugin) :
'''
* @package folder.py
* @author Steffen Kube (steffen@blink-dx.com)
:var self._req_data:
't' : table
'action':
'delete'
'set_mdo' : set MDO group
'''
action=''
def register(self) :
action = self._req_data.get( 'action' , '' )
self.action = action
table_nice='???'
self.tablib = None
if 't' not in self._req_data:
self.table = ''
else:
self.table = self._req_data['t']
self.tablib = table_cls(self.table)
table_nice = self.tablib.nice_name()
self.infoarr['title'] = 'List action of ' + table_nice
self.infoarr['layout'] = 'ADM/obj_list_act'
self.infoarr['objtype'] = self.table
self.infoarr['viewtype']= 'list'
self.infoarr['list.check_sel'] = 1
self.infoarr['js.scripts'] = ['x_modal.js']
self.infoarr['locrow'] = [
{'url':'ADM/home', 'text':'Home'},
]
def act_delete(self, db_obj, db_obj2, sql_from_order):
"""
delete objects
"""
table = self.table
# check role rights
user_id = session['sesssec']['user_id']
user_lib = oDB_USER.mainobj(user_id)
acc_matrix = user_lib.role_rights_tab(db_obj, table)
if not acc_matrix['delete']:
raise BlinkError(4, 'You have no "delete" right for this table.')
modi_lib = obj_mod_meta(db_obj,table, None)
pk_col = self.tablib.pk_col_get()
sql_cmd = "x."+ pk_col +" from " + sql_from_order
db_obj2.select_tuple(sql_cmd)
cnt=0
infoarr=[]
while db_obj2.ReadRow():
objid = db_obj2.RowData[0]
try:
modi_lib.set_obj(db_obj,objid)
modi_lib.delete(db_obj)
except:
message = str(sys.exc_info()[1])
infoarr.append( ['OBJ-ID:'+str(objid)+' delete failed: ' + message] )
cnt = cnt + 1
if len(infoarr):
self.setMessage( 'ERROR', str(len(infoarr)) + ' Problem(s) occurred.')
self._html.add_meta('error_list', infoarr )
self.setMessage('OK', str(cnt) + ' Elements deleted.')
def act_set_mdo(self, db_obj, db_obj2, sql_from_order, argu):
"""
set MDO group of objects
"""
table = self.table
try:
mdo_grp = int(argu.get('mdo_grp', 0))
except:
mdo_grp = 0
if not mdo_grp:
self.setMessage('ERROR', 'No input given.')
debug.printx( __name__, 'MDO: ' + str(mdo_grp) )
# check role rights
user_id = session['sesssec']['user_id']
user_lib = oDB_USER.mainobj(user_id)
acc_matrix = user_lib.role_rights_tab(db_obj, table)
if not acc_matrix['write']:
raise BlinkError(4, 'You have no "write" right for this table.')
modi_lib = obj_mod_meta(db_obj,table, None)
pk_col = self.tablib.pk_col_get()
sql_cmd = "x."+ pk_col +" from " + sql_from_order
db_obj2.select_tuple(sql_cmd)
cnt=0
infoarr=[]
while db_obj2.ReadRow():
objid = db_obj2.RowData[0]
try:
modi_lib.set_obj(db_obj,objid)
args={
'access': {
'OWN_GRP_ID': mdo_grp
}
}
modi_lib.update(db_obj, args)
except:
message = str(sys.exc_info()[1])
infoarr.append( ['OBJ-ID:'+str(objid)+' SET_MDO failed: ' + message] )
cnt = cnt + 1
if len(infoarr):
self.setMessage( 'ERROR', str(len(infoarr)) + ' Problem(s) occurred.')
self._html.add_meta('error_list', infoarr )
self.setMessage('OK', str(cnt) + ' Elements modified.')
def startMain(self) :
db_obj = self._db_obj1
db_obj2 = self.db_obj2()
table = self.table
sql_select_lib = table_sql(table)
self.data_out = {}
self.sql_from = sql_select_lib.get_sql_from(db_obj)
self.sql_from_order = sql_select_lib.get_sql_from_order(db_obj)
sql_nice = sql_select_lib.get_sql_nice()
debug.printx( __name__, 'SQL: ' + self.sql_from )
pk_col = self.tablib.pk_col_get()
sql_cmd = "count(1) from " + self.sql_from
db_obj.select_tuple(sql_cmd)
db_obj.ReadRow()
objcnt = db_obj.RowData[0]
action = self._req_data.get( 'action' , '' )
debug.printx( __name__, 'ACTION: ' + action )
while 1:
if action=='delete':
if int(self._req_data.get('go', 0)) < 1:
self.data_out['form'] = {
'init': { 'title':'Do you want to delete '+str(objcnt)+' objects?', 'submit.text':'Delete', 'editmode':'edit' },
'hidden': {
"mod": self._mod,
"t" : table,
"action" : 'delete',
"go": 1,
},
'main': [ ]
}
break
self.act_delete(db_obj, db_obj2, self.sql_from_order)
break
if action=='set_mdo':
if int(self._req_data.get('go', 0)) < 1:
self.data_out['form'] = {
'init': {
'title':'Set MDO-Group for '+str(objcnt)+' objects?',
'submit.text':'Set',
'editmode':'edit',
'app.space.prefix': 'ADM/'
},
'hidden': {
"mod": self._mod,
"t" : table,
"action" : 'set_mdo',
"go": 1,
},
'main': [ {
'object':'objlink',
'name': 'argu[mdo_grp]',
'edit':1,
'id':1,
'val.nice': '',
'fk_t':'USER_GROUP'
}
]
}
break
self.act_set_mdo(db_obj, db_obj2, self.sql_from_order, self._req_data.get('argu', {}))
break
self.setMessage('WARN', 'Action "'+action+'" unknown.')
break # main loop break
def mainframe(self):
db_obj = self._db_obj1
self.sh_main_layout(massdata=self.data_out) | qbicode/blinkdms | blinkdms/ADM/plugin/obj_list_act.py | obj_list_act.py | py | 8,003 | python | en | code | 0 | github-code | 36 |
18446560206 | # -*- coding: utf-8 -*-
"""
@author: DongXiaoning
"""
import numpy as np
import operator
import collections
import sklearn.datasets
# compute gini index
def compute_gini(group):
m,n = group.shape
data = group[:,:-1]
label = group[:,-1]
dict_label = collections.Counter(label)
group_size = float(m)
if group_size == 0:
gini_index = 0
else:
proportion = np.array(list(dict_label.values()))/group_size
gini_index = 1 - np.dot(proportion,proportion)
return gini_index
def compute_information_gain(gini_group,gini_subgroup1,weight1,gini_subgroup2,weight2):
return gini_group - (gini_subgroup1 * weight1 + gini_subgroup2 * weight2)
def predict(data,stump):
if data[stump[1]] >= stump[4]:
return 0
return 1
if __name__ == '__main__':
breast_dataset = sklearn.datasets.load_breast_cancer()
breast_data = breast_dataset.data
m,n = breast_data.shape
breast_label =breast_dataset.target
breast_label = breast_dataset.target.reshape(m,1)
group = np.concatenate((breast_data,breast_label),axis = 1)
m,n = group.shape
gini = compute_gini(group)
# compute info gain
largest_info_gain_list = [] # on each attributes
info_gain_dict = {}
for i in range(n-1): # traverse each attribute/col
for j in range(m-1): # traverse each row
# split into two groups
mask = group[:,i] >= group[j][i] # mask is like a filter, which compares each element in space object
index = np.where(mask) # (here is group[:,j]) with group[i][j].
group1 = group[index] # index is a tuple and only has an element(size = 1), the element is a list.
row,col = group1.shape # thus, group[index,:] will output undesirable result.
group1_size = float(row)
mask = group[:,i] < group[j][i]
index = np.where(mask)
group2 = group[index]
row,col = group2.shape
group2_size = float(row)
# group1 : gini and weight
gini_group1 = compute_gini(group1)
weight_group1 = group1_size / m
# group2 : gini and weight
gini_group2 = compute_gini(group2)
weight_group2 = group2_size / m
# info gain
info_gain = compute_information_gain(gini,gini_group1,weight_group1,gini_group2,weight_group2)
info_gain_dict[j] = info_gain
largest_info_gain = max(info_gain_dict.items(),key=operator.itemgetter(1))
print(f'Attribute {i}\'s name is \'{breast_dataset.feature_names[i]}\', split node is in row {largest_info_gain[0]} ---> value is {group[largest_info_gain[0]][i]}, info gain is: {largest_info_gain[1]}')
largest_info_gain_list.append((f'attribute {i}',i,breast_dataset.feature_names[i],largest_info_gain[0],group[largest_info_gain[0]][i],largest_info_gain[1]))
s = max(largest_info_gain_list,key = operator.itemgetter(-1))
print(f'Best split attribute is \'{s[0]}\' : {s[2]}, and split node is in row {s[3]}, value is {s[4]}')
# add test code to test our result
mask = group[:,20] >= 16.82
index = np.where(mask)
group3 = group[index]
mask = group[:,20] < 16.82
index = np.where(mask)
group4 = group[index]
| xndong/ML-foundation-and-techniques | Decision stump/decision_stump.py | decision_stump.py | py | 3,428 | python | en | code | 0 | github-code | 36 |
40588074038 | import cadquery as cq
from math import sin, pi
import numpy as np
plateRadius = 15
plateCenterHole = 4
pinRadius = 3.5/2
pinInter = 18
SCALE= 100 # scale profile dimentions
# input data from csv file of wing profile
data = np.genfromtxt('data/s7075-il.csv',delimiter=',')
pts = data[9:89]
# if we can normalize vectors< then we can scale it
def normalize(data: np.array) -> np.array:
'''
Input numpy 2D array, that describes wing profile
Putput as list of vector Tuples
cq.Sketch doent accepts any other format, even list of lists
'''
res = data/np.linalg.norm(data)
res = res*SCALE
res = [tuple(item) for item in res.tolist()]
return res
pts2 = normalize(pts)
################
# Sketch zone
prof = (
cq.Sketch()
.spline(pts2)
.close()
.assemble()
)
# Sketch Zone end
plate = (
cq.Workplane()
.circle(plateRadius)
.circle(plateCenterHole)
.rect(pinInter,pinInter,forConstruction=True)
.vertices()
.circle(pinRadius)
.extrude(5)
)
###########
#sweep test
path = (
(10,-1,10),
(50,15,-15),
(100,0,0)
)
pathWire = cq.Workplane().spline(path)
"""
res = (
cq.Workplane('YZ')
.placeSketch(prof)
.sweep(pathWire)
)
"""
###########
def makeIt(pts):
wp = cq.Workplane("XY").polyline(pts).close().workplane()
result = None
for i in range(0,20):
wp2 = (
wp.transformed(offset=cq.Vector(0, -20, 5),
rotate=cq.Vector(1, 0, 0))
.polyline(pts).close()
.workplane()
)
if result == None:
result = wp2.loft(combine=True)
else:
nextpart = wp2.loft(combine=True)
result = result.union(nextpart)
wp = wp.transformed(offset=cq.Vector(0, -5, 5),
rotate=cq.Vector(18, 0, 0)).polyline(pts).close().workplane()
show_object(result, options=dict(alpha=0.8,color='blue'))
def makeSweep(pts):
path = (
cq.Workplane()
.parametricCurve(lambda t:(100*sin(t*pi/180),t,0),
start=0, stop = 10, N = 1000)
)
debug(path)
res = (
cq.Workplane('YZ')
.polyline(pts)
.close()
.sweep(path)
)
show_object(res, options=dict(aplha=0.7, color='magenta'))
makeSweep(pts2)
| Opezdol/pohhmann | src/cooling/carlson.py | carlson.py | py | 2,475 | python | en | code | 0 | github-code | 36 |
74197853543 | from collections.abc import Iterable
from circkit import Circuit, Operation, Node
import logging
log = logging.getLogger("Transformer")
class Transformer:
"""Base transformer class."""
START_FROM_VARS = False
source_circuit: Circuit = None
current_node: Node = None
current_operation: Operation = None
def transform(self, circuit, **kwargs):
self.before_transform(circuit, **kwargs)
self.visit_all(circuit)
self.output = [
self.make_output(node, self.result[node])
for node in circuit.outputs
]
self.transform_output = self.output
self.after_transform(circuit, **kwargs) # can change self.transform_output
return self.transform_output
def before_transform(self, circuit, **kwargs):
self.source_circuit = circuit
self.result = {}
self._current_stack = []
def after_transform(self, circuit, **kwargs):
self.source_circuit = None
assert not self._current_stack
def visit_all(self, circuit):
if self.START_FROM_VARS:
nodes_to_visit = (
list(circuit.inputs)
+ [node for node in circuit if not node.is_INPUT()]
)
else:
nodes_to_visit = list(circuit)
for node in nodes_to_visit:
self.before_visit(node)
self.visit(node, *[self.result[sub] for sub in node.incoming])
self.after_visit(node)
def before_visit(self, node):
"""Event handler before visiting node"""
self._current_stack.append((
self.current_node,
self.current_operation
))
self.current_node = node
self.current_operation = node.operation
def after_visit(self, node):
"""Event handler after visiting node"""
self.current_node, self.current_operation = self._current_stack.pop()
def on_visit_error(self, node, err):
log.error(f"node: {node} err: {err}")
if hasattr(node, "show_debug"):
node.show_debug()
def visit(self, node, *args):
method_name = f"visit_{node.operation._name}"
method = getattr(self, method_name, self.visit_generic)
try:
result = self.result[node] = method(node, *args)
except Exception as err:
if not self.on_visit_error(node, err):
raise
return result
def visit_generic(self, node, *args):
raise NotImplementedError(
f"Visit method for {node.operation._name} "
f"is not implemented in {type(self)}"
)
def visit_GET(self, node, multi_result):
return multi_result[node.operation.index]
def make_output(self, node, result):
return result
class CircuitTransformer(Transformer):
"""Base class for circuit->circuit transformers."""
DEFAULT_CIRCUIT_CLASS = None
DEFAULT_BASE_RING = None
AUTO_OUTPUT = True
NAME_SUFFIX = None
FORCE_MANY_TO_ONE = False
def create_target_circuit(
self,
source_circuit,
# keyword-only
*, name=None, circuit_class=None, base_ring=None, **kwargs):
if name is None and source_circuit.name and self.NAME_SUFFIX:
name = source_circuit.name + self.NAME_SUFFIX
if circuit_class:
target_circuit_class = circuit_class
elif self.DEFAULT_CIRCUIT_CLASS:
target_circuit_class = self.DEFAULT_CIRCUIT_CLASS
else:
target_circuit_class = type(source_circuit)
if base_ring:
target_base_ring = base_ring
elif self.DEFAULT_BASE_RING:
target_base_ring = self.DEFAULT_BASE_RING
else:
target_base_ring = source_circuit.base_ring
log.debug(
f"{type(self)}: create target circuit {target_circuit_class} "
f"with ring {base_ring}"
)
target_circuit = target_circuit_class(
base_ring=target_base_ring,
name=name,
)
return target_circuit
@property
def base_ring(self):
return self.target_circuit.base_ring
# VSN: It is better to write this prototype in a clearer way
# so that we can understand what we need to pass for kwargs
# (circuit_class, base_ring, etc for create_target_circuit)
def transform(self, circuit, **kwargs):
if not isinstance(circuit, Circuit):
raise TypeError(
"Transformers are defined only for Circuits,"
f" passed: {type(circuit)}"
)
self.source_circuit = circuit
if "target_circuit" in kwargs:
self.target_circuit = kwargs["target_circuit"]
else:
self.target_circuit = self.create_target_circuit(circuit, **kwargs)
super().transform(circuit, **kwargs)
return self.target_circuit
def visit_generic(self, node, *args):
return node.reapply(*args, circuit=self.target_circuit)
def make_output(self, node, result):
""" Default implementation: mark images of output notes as outputs in
new circuit. """
if not self.AUTO_OUTPUT:
return
if isinstance(result, self.target_circuit.Node):
return self.target_circuit.add_output(result)
elif isinstance(result, Iterable):
ret = []
for result_node in result:
ret.append(self.target_circuit.add_output(result_node))
return ret
else:
log.error(f"{type(result)} cannot be outputted")
raise NotImplementedError(f"{type(result)} cannot be outputted")
| hellman/ches2022wbc | circkit/transformers/core.py | core.py | py | 5,704 | python | en | code | 18 | github-code | 36 |
72788289704 | import subprocess
import re
import os.path
import sheetFeeder as gs
def main():
saxon_path = 'saxon-9.8.0.12-he.jar'
xslt1_path = 'ead_merge.xsl'
xslt2_path = 'ead_cleanup_1.xsl'
xslt3_path = 'ead_cleanup_2.xsl'
data_folder1 = '/path/to/exported/legacy/ead/files'
data_folder2 = '/path/to/as/exported/ead'
output_folder = '/path/to/output/folder'
the_sheet='[google-sheet-id]'
the_tab='migrate-grid'
default_range = str(the_tab + '!A1:Z1400')
try:
print("Gathering data from spreadsheet...")
the_mig_data = get_migration_grid(the_sheet, default_range)
except:
print("*** Error: there was a problem collecting data from the spreadsheet.***")
quit()
for a_record in the_mig_data:
the_bibid = a_record.pop(0)
the_rel_path = a_record.pop(0)
the_flags = a_record
print('BibID: ' + the_bibid)
the_params = ['asXMLFolder=' + data_folder2 + ' ']
for a_flag in the_flags:
the_params.append('m_' + a_flag + '=Y')
the_params = ' '.join(the_params)
the_path1 = str(data_folder1 + '/'+ the_rel_path)
the_path2 = str(data_folder2 + '/'+ the_bibid + '_ead.xml')
# Check to see if the two files exist before processing.
if (not(os.path.isfile(the_path1))):
print('*** Error: File ' + the_path1+ ' not found! ***')
continue
if (not(os.path.isfile(the_path2))):
print('*** Error: File ' + the_path2+ ' not found! ***')
continue
out_file = str(output_folder + '/' + the_bibid + '_MERGED-CLEAN_ead.xml')
print('Processing file: ' + the_rel_path + " to: " + out_file + ' with params: ' + the_params)
saxon_process_pipe(saxon_path, the_path1, out_file, [[xslt1_path, the_params], [xslt2_path, ' '], [xslt3_path, ' ']])
quit()
def get_migration_grid(theSheet,theRange):
the_data = []
the_values = gs.getSheetData(theSheet, theRange)
# the_values = x["values"]
the_heads = the_values[0]
for a_row in the_values:
my_bibid = a_row[0]
my_path = a_row[3]
my_row_data = [my_bibid,my_path]
for index, item in enumerate(a_row):
if item == "X":
the_name = the_heads[index]
my_row_data.append(the_name)
the_data.append(my_row_data)
del the_data[0:2]
return the_data
def saxon_process(saxonPath, inFile, transformFile, outFile, theParams):
cmd = 'java -jar ' + saxonPath + ' ' + inFile + ' ' + transformFile + ' ' + theParams + ' ' + '--suppressXsltNamespaceCheck:on' + ' > ' + outFile
p = subprocess.Popen([cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
result = p.communicate()
return result[0]
def saxon_process_pipe(saxonPath, in_file, out_file, the_pipes):
# This is a multi-step transform; stdout from first is input to next.
the_cmds = []
for i in range(len(the_pipes)):
if i == 0:
the_cmds.append('java -jar ' + saxonPath + ' ' + in_file + ' ' + the_pipes[i][0] + ' ' + the_pipes[i][1] + ' ' + '--suppressXsltNamespaceCheck:on' + ' ')
else:
the_cmds.append('java -jar ' + saxonPath + ' - ' + ' ' + the_pipes[i][0] + ' ' + the_pipes[i][1] + '--suppressXsltNamespaceCheck:on' + ' ')
the_cmd = ' | '.join(the_cmds)
the_cmd += ' > ' + out_file
# print('Executing command: ' + the_cmd)
p = subprocess.Popen([the_cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
result = p.communicate()
return result[0]
if __name__ == '__main__':
main()
| cul/rbml-archivesspace | ead_merge/ead_merge.py | ead_merge.py | py | 3,727 | python | en | code | 6 | github-code | 36 |
8828088539 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import math
import numpy as np
class trackerPoint(object):
def __init__(self, x, y, size, frame):
# KCF tracker init
self.tracker = cv2.TrackerKCF_create()
self.bbox = (x-size/2, y-size/2, size,size)
self.tracker.init(frame, self.bbox)
self.x = x
self.y = y
self.size = size
self.ptsize = 4
def update(self, frame, frameToDrawOn):
ok, self.bbox = self.tracker.update(frame)
if ok:
# Draw the new point
self.x = int(self.bbox[0] + self.size/2)
self.y = int(self.bbox[1] + self.size/2)
p1 = (self.x-self.ptsize, self.y-self.ptsize)
p2 = (self.x+self.ptsize, self.y+self.ptsize)
cv2.rectangle(frameToDrawOn, p1, p2, (0,0,255), -1)
def Dist(p1, p2):
x1 = p1.x
y1 = p1.y
x2 = p2.x
y2 = p2.y
return math.sqrt(math.pow((x2-x1), 2)+math.pow((y2-y1), 2))
def calcAngle(pTrack):
a = Dist(pTrack[0], pTrack[1])
b = Dist(pTrack[1], pTrack[2])
c = Dist(pTrack[2], pTrack[0])
angRad = math.acos(((a*a)+(b*b)-(c*c))/(2*a*b))
return math.degrees(angRad)
def drawLine(frame, pTrack):
cv2.line(frame, (pTrack[0].x,pTrack[0].y), (pTrack[1].x,pTrack[1].y), (255,0,255), 2)
cv2.line(frame, (pTrack[1].x,pTrack[1].y), (pTrack[2].x,pTrack[2].y), (255,0,255), 2)
def main():
# Init kernel for erode / dilate
kernel = np.ones((3,3), np.uint8)
# Init media in/out
cap = cv2.VideoCapture('Video.mp4')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 30.0, (1280,720))
# Read first frame for trackers
ret, frame = cap.read()
# Instantiate trackers at known positions
pList = [(561,421),(656,385),(584,263)]
pTrack = []
for pt in pList:
pTrack.append(trackerPoint(pt[0], pt[1], 80, frame))
while(cap.isOpened()):
# Read new frame
ret, frame = cap.read()
if(frame is None):
break
# Thresholde / Erosion / Dilatation for arm detection
thresh1 = cv2.inRange(frame, (170,170,170), (255,255,255))
thresh1 = cv2.erode(thresh1, kernel, iterations = 3)
thresh1 = cv2.dilate(thresh1, kernel, iterations = 3)
# Mask
res = cv2.bitwise_and(frame, frame, mask=thresh1)
# Update trackers
for p in pTrack:
p.update(res, frame)
drawLine(frame, pTrack)
# Calculate angle between points
ang = calcAngle(pTrack)
strAng = "%2.2f deg" % ang
# Display it
cv2.putText(frame, strAng, (pTrack[1].x+40,pTrack[1].y), cv2.FONT_HERSHEY_DUPLEX, 1, (255,255,255))
# Show image
cv2.imshow('frame', frame)
# Write to output video
out.write(frame)
# "q" key to escape
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything
cap.release()
out.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| ThibaudMZN/GeneralWork | ArmAngleCalculation/ArmAngle.py | ArmAngle.py | py | 3,041 | python | en | code | 0 | github-code | 36 |
5255273641 | import os
from re import I
import sys
from openpyxl import Workbook
from openpyxl.styles import Border, Side, PatternFill, Font, Alignment
from datetime import datetime
sys.path.insert(0, os.path.abspath('..\\pycatia'))
from pycatia import catia
from pycatia.enumeration.enumeration_types import cat_work_mode_type
caa = catia()
documents = caa.documents
document = caa.active_document
product = document.product
product.apply_work_mode(cat_work_mode_type.index("DESIGN_MODE"))
class excel:
def __init__(self):
self.wb = Workbook()
self.ws = self.wb.create_sheet("开料与加工清单",0)
self.ds = self.wb.create_sheet("图纸清单",1)
self.ws['A1'].value = "编号"
#self.ws.merge_cells('B5:G5')
#self.ws.merge_cells('N5:S5')
self.ws['B1'].value = "图号"
self.ws['H1'].value = "类型"
self.ws['I1'].value = "材质"
self.ws['J1'].value = "规格"
self.ws['K1'].value = "(长)"
self.ws['L1'].value = "(宽)"
self.ws['M1'].value = "(后)"
self.ws['N1'].value = "重量(kg)"
self.ws['O1'].value = "总量"
self.ws['U1'].value = "加工方式#1"
self.ws['V1'].value = "加工方式#2"
self.ws['W1'].value = "备注"
self.ws.merge_cells('B1:G1')
self.ws.merge_cells('O1:T1')
self.ws['B1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['H1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['I1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['J1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['K1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['L1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['M1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['N1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['O1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['U1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['V1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['W1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws.column_dimensions['A'].width = 5
self.ws.column_dimensions['B'].width = 10
self.ws.column_dimensions['C'].width = 10
self.ws.column_dimensions['D'].width = 10
self.ws.column_dimensions['E'].width = 10
self.ws.column_dimensions['F'].width = 10
self.ws.column_dimensions['G'].width = 10
self.ws.column_dimensions['H'].width = 9
self.ws.column_dimensions['I'].width = 12
self.ws.column_dimensions['J'].width = 30
self.ws.column_dimensions['K'].width = 7
self.ws.column_dimensions['L'].width = 7
self.ws.column_dimensions['M'].width = 7
self.ws.column_dimensions['N'].width = 10
self.ws.column_dimensions['O'].width = 3
self.ws.column_dimensions['P'].width = 3
self.ws.column_dimensions['Q'].width = 3
self.ws.column_dimensions['R'].width = 3
self.ws.column_dimensions['S'].width = 3
self.ws.column_dimensions['T'].width = 3
self.ws.column_dimensions['U'].width = 12
self.ws.column_dimensions['V'].width = 12
self.ws.column_dimensions['W'].width = 12
#self.ws.column_dimensions['X'].width = 9
#self.ws.merge_cells('A1:X2')
#self.ws['A1'] = "开工与加工清单"
#self.ws['A3'] = "工号"
#self.ws['A4'] = "更新日期"
#self.ws['W3'] = "模型"
#self.ws['W4'] = "编写人"
self.ds['A2'].value = "编号"
self.ds['B2'].value = "类型"
self.ds['C2'].value = "图号"
self.ds['D2'].value = "图"
self.ds['E2'].value = "幅"
self.ds['F2'].value = "页数"
self.ds['G2'].value = "版本"
self.ds['H2'].value = "总生产数量"
self.ws['A1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['B1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['C1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['D1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['E1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['F1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['G1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['H1'].alignment = Alignment(horizontal="center", vertical="center")
self.ds.column_dimensions['A'].width = 4.5
self.ds.column_dimensions['B'].width = 8
self.ds.column_dimensions['C'].width = 15
self.ds.column_dimensions['D'].width = 3
self.ds.column_dimensions['E'].width = 3
self.ds.column_dimensions['F'].width = 4.5
self.ds.column_dimensions['G'].width = 4.5
self.ds.column_dimensions['H'].width = 11
def input(self, input_row, input_column, input_value):
self.ws.cell(row=input_row, column=input_column).value = input_value
def save_excel(self):
self.wb.save("BOM and process list.xlsx")
class process:
def __init__(self):
self.iteration = 2
self.blank = " "
self.excel = excel()
self.fillrowno = 1
self.partlist = []
def prod_process(self, obje, current_layer, listofpart, newlistofpart):
if "APC" in obje.part_number:
listofpart.append(obje.part_number)
self.iteration += 1
self.excel.input(self.iteration, 1, self.iteration-1)
self.excel_write(self.iteration, current_layer, obje)
self.excel.input(self.iteration, current_layer+13, listofpart.count(obje.part_number))
self.excel.save_excel()
def quantity_update(self, obje1, current_layer1, listofpart1, newlistofpart1, indexlist):
self.excel.input(indexlist[newlistofpart1.index(obje1.part_number)], current_layer1+13, listofpart1.count(obje1.part_number))
def excel_write(self, rowno, columnno, target_obj):
weight = round(target_obj.analyze.mass,2)
partno = target_obj.part_number
definition = target_obj.definition
self.excel.input(rowno, columnno, partno)
self.excel.input(rowno, 14, weight)
category = " "
definition_text = " "
if target_obj.is_catpart():
part_parameters = target_obj.parameters
if part_parameters.is_parameter("Material"):
materialv = part_parameters.item("Material").value
if part_parameters.is_parameter("THK"):
thkv = round(part_parameters.item("THK").value,1)
if part_parameters.is_parameter("W"):
Wid = part_parameters.item("W").value
if part_parameters.is_parameter("L"):
Len = float(part_parameters.item("L").value)
if part_parameters.is_parameter("D_in"):
D_inv = float(part_parameters.item("D_in").value)
if part_parameters.is_parameter("D_out"):
Diav = float(part_parameters.item("D_out").value)
if part_parameters.is_parameter("D"):
Diav = float(part_parameters.item("D").value)
if part_parameters.is_parameter("A"):
Ah = part_parameters.item("A").value
if part_parameters.is_parameter("B"):
Bh = part_parameters.item("B").value
if part_parameters.is_parameter("t"):
tv = part_parameters.item("t").value
if part_parameters.is_parameter("model"):
model = part_parameters.item("model").value
if part_parameters.is_parameter("Model"):
model = part_parameters.item("Model").value
if part_parameters.is_parameter("W"):
if part_parameters.is_parameter("L"):
if part_parameters.is_parameter("THK"):
category = "钢板"
definition_text = str(category) + " " + str(int(thkv)) + "THK" + "x" + str(int(Wid)) + "x"+ str(int(Len))
elif part_parameters.is_parameter("D_in"):
if part_parameters.is_parameter("D_out"):
if part_parameters.is_parameter("L"):
category = "圆管"
definition_text = str(category) + " " + str(int(Diav)) + "x" + str(int(D_inv)) + "x" + "L=" + str(int(Len))
elif part_parameters.is_parameter("THK"):
category = "钢板"
definition_text = str(category) + " " + str(int(thkv)) + "THK" + "x" + str(int(Diav))
elif part_parameters.is_parameter("D"):
if part_parameters.is_parameter("THK"):
category = "钢板"
definition_text = str(category) + " " + str(int(thkv)) + "THK" + "x" + str(int(Diav))
elif part_parameters.is_parameter("L"):
category = "圆钢"
definition_text = str(category) + " " + "D" + str(int(Diav)) + "x" + "L=" + str(int(Len))
elif part_parameters.is_parameter("D_out"):
if part_parameters.is_parameter("THK"):
category = "钢板"
definition_text = str(category) + " " + str(int(thkv)) + "THK" + "x" + str(int(Diav))
elif part_parameters.is_parameter("L"):
category = "圆钢"
definition_text = str(category) + " " + "D" + str(int(Diav)) + "x" + "L=" + str(int(Len))
elif part_parameters.is_parameter("A"):
if part_parameters.is_parameter("t"):
if part_parameters.is_parameter("B"):
category = "扁通"
definition_text = str(model) + "," + "L=" + str(int(Len))
else:
category = "方通"
definition_text = str(model) + "," + "L=" + str(int(Len))
elif "角钢" in definition:
category = "角钢"
if part_parameters.is_parameter("model"):
definition_text = str(model) + "," + "L=" + str(int(Len))
elif part_parameters.is_parameter("Model"):
definition_text = str(model) + "," + "L=" + str(int(Len))
elif "槽钢" in definition:
category = "槽钢"
if part_parameters.is_parameter("model"):
definition_text = str(model) + "," + "L=" + str(int(Len))
elif part_parameters.is_parameter("Model"):
definition_text = str(model) + "," + "L=" + str(int(Len))
else :
category = "其他"
definition_text = target_obj.definition
'''
elif "扁通" in definition:
category = "扁通"
if part_parameters.is_parameter("Model"):
definition_text = str(category) + str(model) + "L=" + str(int(Len)) + "mm"
else:
definition_text = target_obj.definition
elif "圆通" in definition:
category = "圆通"
if part_parameters.is_parameter("Model"):
definition_text = str(category) + str(model) + "L=" + str(int(Len)) + "mm"
else:
definition_text = target_obj.definition
elif "方通" in definition:
category = "方通"
if part_parameters.is_parameter("Model"):
definition_text = str(category) + str(model) + "L=" + str(int(Len)) + "mm"
else:
definition_text = target_obj.definition
elif "钢板" in definition:
category = "钣金"
'''
self.excel.input(rowno, 8, category)
if part_parameters.is_parameter("L"):
self.excel.input(rowno, 11, Len)
if part_parameters.is_parameter("W"):
self.excel.input(rowno, 12, Wid)
if part_parameters.is_parameter("THK"):
self.excel.input(rowno, 13, thkv)
elif part_parameters.is_parameter("t"):
self.excel.input(rowno, 13, tv)
if part_parameters.is_parameter("Material"):
self.excel.input(rowno, 9, materialv)
self.excel.input(rowno, 10, definition_text)
else:
category = "组装件"
self.excel.input(rowno, 8, category)
self.excel.input(rowno, 10, definition_text)
p = process()
list_1 = []
newlist_1 = []
pl1 = []
npl1 = []
ql1 = []
index1 = []
stime = datetime.now()
p.excel.input(2,2,product.part_number)
p.excel.input(2,1,1)
for product1 in product.products:
if "APC" in product1.part_number:
ql1.append(product1.part_number)
if product1.part_number not in pl1:
npl1.append(product1.part_number)
p.prod_process(product1, 3, list_1, newlist_1)
index1.append(p.iteration)
print("-------------")
print(index1)
print(npl1)
print("-------------")
if product1.is_catproduct():
list_2 = []
newlist_2 = []
pl2 = []
npl2 = []
ql2 = []
index2 = []
for product2 in product1.products:
if "APC" in product2.part_number:
ql2.append(product2.part_number)
if product2.part_number not in pl2:
npl2.append(product2.part_number)
p.prod_process(product2, 4, list_2, newlist_2)
index2.append(p.iteration)
if product2.is_catproduct():
list_3 = []
newlist_3 = []
pl3 = []
npl3 = []
ql3 = []
index3 = []
for product3 in product2.products:
if "APC" in product3.part_number:
ql3.append(product3.part_number)
if product3.part_number not in pl3:
npl3.append(product3.part_number)
p.prod_process(product3, 5, list_3, newlist_3)
index3.append(p.iteration)
if product3.is_catproduct():
list_4 = []
newlist_4 = []
pl4 = []
npl4 = []
ql4 = []
index4 = []
for product4 in product3.products:
if "APC" in product4.part_number:
ql4.append(product4.part_number)
if product4.part_number not in pl4:
npl4.append(product4.part_number)
p.prod_process(product4, 6, list_4, newlist_4)
index4.append(p.iteration)
if product4.is_catproduct():
list_5 = []
newlist_5 = []
pl5 = []
npl5 = []
ql5 = []
index5 = []
for product5 in product4.products:
if "APC" in product5.part_number:
ql5.append(product5.part_number)
if product5.part_number not in pl5:
npl5.append(product5.part_number)
p.prod_process(product5, 7, list_5, newlist_5)
index5.append(p.iteration)
if product5.is_catproduct():
list_6 = []
newlist_6 = []
pl6 = []
npl6 = []
ql6 = []
index6 = []
for product6 in product5.products:
if "APC" in product6.part_number:
ql6.append(product6.part_number)
if product6.part_number not in pl6:
npl6.append(product6.part_number)
p.prod_process(product6, 8, list_6, newlist_6)
index6.append(p.iteration)
elif product6.part_number in npl6:
p.quantity_update(product6, 8, ql6, npl6, index6)
#else :
# p.prod_process(product5, 6, list_5, newlist_5)
# index5.append(p.iteration)
pl5.append(product5.part_number)
elif product4.part_number in npl5:
p.quantity_update(product5, 7, ql5, npl5, index5)
#else :
# p.prod_process(product4, 5, list_4, newlist_4)
# index4.append(p.iteration)
pl4.append(product4.part_number)
elif product4.part_number in npl4:
p.quantity_update(product4, 6, ql4, npl4, index4)
#else :
# p.prod_process(product3, 4, list_3, newlist_3)
# index3.append(p.iteration)
pl3.append(product3.part_number)
elif product3.part_number in npl3:
p.quantity_update(product3, 5, ql3, npl3, index3)
#else :
# p.prod_process(product2, 3, list_2, newlist_2)
# index2.append(p.iteration)
pl2.append(product2.part_number)
elif product2.part_number in npl2:
p.quantity_update(product2, 4, ql2, npl2, index2)
#else:
# p.prod_process(product1, 2, list_1, newlist_1)
# index1.append(p.iteration)
pl1.append(product1.part_number)
elif product1.part_number in npl1:
p.quantity_update(product1, 3, ql1, npl1, index1)
p.excel.save_excel()
drawinglist =[]
max = int(p.excel.ws.max_row)
for r in range(2, max):
for c in range(2, 6):
drawingno = p.excel.ws.cell(r,c).value
if drawingno not in drawinglist and drawingno != None:
drawinglist.append(drawingno)
drawinglist.sort()
for i in range(0,len(drawinglist)):
p.excel.ds.cell(row=i+3,column=1).value = i+1
p.excel.ds.cell(row=i+3,column=3).value = drawinglist[i]
p.excel.ds.cell(row=i+3,column=4).value = "A"
p.excel.ds.cell(row=i+3,column=5).value = "3"
p.excel.ds.cell(row=i+3,column=7).value = "A"
qty=0
max = int(p.excel.ds.max_row)
for ii in range(2, max):
dwgno = str(p.excel.ds.cell(row=ii+1, column=3).value)
print(dwgno)
for product1 in product.products:
if dwgno in product1.part_number:
qty = qty + 1
if product1.is_catproduct():
for product2 in product1.products:
if dwgno in product2.part_number:
qty = qty + 1
if product2.is_catproduct():
for product3 in product2.products:
if dwgno in product3.part_number:
qty = qty + 1
if product3.is_catproduct():
for product4 in product3.products:
if dwgno in product4.part_number:
qty = qty + 1
if product4.is_catproduct():
for product5 in product4.products:
if dwgno in product5.part_number:
qty = qty + 1
if product5.is_catproduct():
for product6 in product5.products:
if dwgno in product6.part_number:
qty = qty + 1
p.excel.ds.cell(row=ii+1,column=8).value = qty
qty=0
p.excel.save_excel()
etime = datetime.now()
print("Start Time: ", stime.strftime("%H:%M:%S"))
print("End Time: ", etime.strftime("%H:%M:%S"))
| kang851216/CATIA_macro | manufacturing and process list_adding drawing list_test.py | manufacturing and process list_adding drawing list_test.py | py | 24,204 | python | en | code | 0 | github-code | 36 |
1528415930 |
import cv2
import tensorflow as tf
import numpy as np
import glob
import os
import time
import argparse
import configparser
from auto_pose.ae import factory, utils
parser = argparse.ArgumentParser()
parser.add_argument("experiment_name")
parser.add_argument("-f", "--file_str", required=True, help='folder or filename to image(s)')
# parser.add_argument("-gt_bb", action='store_true', default=False)
arguments = parser.parse_args()
full_name = arguments.experiment_name.split('/')
experiment_name = full_name.pop()
experiment_group = full_name.pop() if len(full_name) > 0 else ''
print('experiment name: ', experiment_name)
print('experiment group: ', experiment_group)
file_str = arguments.file_str
if os.path.isdir(file_str):
files = sorted(glob.glob(os.path.join(str(file_str),'*.png'))+glob.glob(os.path.join(str(file_str),'*.jpg'))+glob.glob(os.path.join(str(file_str),'*.JPG')))
else:
files = [file_str]
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
if workspace_path == None:
print('Please define a workspace path:\n')
print('export AE_WORKSPACE_PATH=/path/to/workspace\n')
exit(-1)
log_dir = utils.get_log_dir(workspace_path,experiment_name,experiment_group)
ckpt_dir = utils.get_checkpoint_dir(log_dir)
start_time = time.time()
encoder = factory.build_codebook_from_name(experiment_name, experiment_group, return_encoder=True)
end_time = time.time()
print("encoder loading: ", str(end_time - start_time))
with tf.Session() as sess:
start_time = time.time()
factory.restore_checkpoint(sess, tf.train.Saver(), ckpt_dir)
end_time = time.time()
print("restoring checkpoint: ", str(end_time - start_time))
# for i in range(1, 8):
for file in files:
im = cv2.imread(file)
im = cv2.resize(im, (256, 256))
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im = np.expand_dims(im, axis=2)
start_time = time.time()
latent_vector = encoder.latent_vector(sess, im)
end_time = time.time()
print('latent vector: ', latent_vector)
print("inference time: ", int(1000 * (end_time - start_time)) / 1000., " fps: ",
int(1 / (end_time - start_time)))
| logivations/AugmentedAutoencoder | auto_pose/test/encoder_inference.py | encoder_inference.py | py | 2,190 | python | en | code | 1 | github-code | 36 |
35387136484 | #!/usr/bin/env python3
from sys import stderr
from multilanguage import Env, Lang, TALcolors
from TALinputs import TALinput
from TALfiles import TALfilesHelper
import os
import random
import networkx as nx
import vertex_cover_lib as vcl
import matplotlib
import multiprocessing
# METADATA OF THIS TAL_SERVICE:
args_list = [
('source',str),
('collection',str),
('instance_id',int),
('instance_format',str),
('num_vertices',int),
('num_edges',int),
('plot',bool),
('plot_sol',bool),
('seed',str),
('vc_sol_val',str),
('display',bool),
('silent',bool),
('lang',str),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"), print_opening_msg = 'now')
TALf = TALfilesHelper(TAc, ENV)
chk_backend = False
if matplotlib.get_backend().lower() in map(str.lower,vcl.backends):
chk_backend = True
## Input Sources
if TALf.exists_input_file('instance'):
instance = vcl.get_instance_from_str(TALf.input_file_as_str('instance'), instance_format_name=ENV["instance_format"])
TAc.print(LANG.render_feedback("successful-load", 'The file you have associated to `instance` filehandler has been successfully loaded.'), "yellow", ["bold"])
elif ENV["source"] == 'terminal':
instance = {}
instance['num_vertices'] = ENV['num_vertices']
instance['num_edges'] = ENV['num_edges']
#TAc.print(LANG.render_feedback("waiting-line", f'#? Waiting for the graph.\nGraph format: (x,y) (w,z) ... (n,m)\n'), "yellow")
TAc.print(LANG.render_feedback("waiting-line", f'#? Waiting for the graph.\n'), "yellow")
TAc.print(LANG.render_feedback("insert-edges", f'Given {ENV["num_vertices"]} vertices labelled with the naturals in the interval [0,{ENV["num_vertices"]-1}], you are now expected to enter {ENV["num_edges"]} edges. To specify an edge, simply enter its two endonodes separated by spaces.'), "yellow", ["bold"])
edges = []
for i in range(1,1+ENV["num_edges"]):
TAc.print(LANG.render_feedback("insert-edge", f'Insert the two endpoints of edge {i}, that is, enter a line with two naturals in the interval [0,{ENV["num_vertices"]-1}], separated by spaces.'), "yellow", ["bold"])
u,v = TALinput(int, 2, TAc=TAc)
edges.append([u,v])
for u,v in edges:
if u not in range(ENV['num_vertices']) or v not in range(ENV['num_vertices']):
TAc.print(f'Edge ({u}, {v}) is not a valid edge for the graph. Aborting.\n', "red", ["bold"], flush=True)
exit(0)
if len(edges) != ENV['num_edges']:
TAc.print(LANG.render_feedback("wrong-edges-number", f'\nWrong number of edges ({len(edges)} instead of {ENV["num_edges"]})\n'), "red", ["bold"])
exit(0)
G = nx.Graph()
G.add_nodes_from([int(v) for v in range(ENV['num_vertices'])])
G.add_edges_from(edges)
instance['graph'] = G
instance_str = vcl.instance_to_str(instance, format_name=ENV['instance_format'])
output_filename = f"terminal_instance.{ENV['instance_format']}.txt"
elif ENV["source"] == 'randgen_1':
# Get random instance
instance = vcl.instances_generator(1, 1, ENV['num_vertices'], ENV['num_edges'], ENV['seed'])[0]
else: # take instance from catalogue
#instance_str = TALf.get_catalogue_instancefile_as_str_from_id_and_ext(ENV["instance_id"], format_extension=vcl.format_name_to_file_extension(ENV["instance_format"],'instance'))
instance_str = TALf.get_catalogue_instancefile_as_str_from_id_collection_and_ext(ENV["collection"], ENV["instance_id"], format_extension=vcl.format_name_to_file_extension(ENV["instance_format"],'instance'))
instance = vcl.get_instance_from_str(instance_str, instance_format_name=ENV["instance_format"])
TAc.print(LANG.render_feedback("instance-from-catalogue-successful", f'The instance with instance_id={ENV["instance_id"]} has been successfully retrieved from the catalogue.'), "yellow", ["bold"], flush=True)
if ENV['display']:
TAc.print(LANG.render_feedback("this-is-the-instance", '\nThis is the instance:\n'), "white", ["bold"], flush=True)
TAc.print(vcl.instance_to_str(instance,ENV["instance_format"]), "white", ["bold"], flush=True)
if ENV['vc_sol_val'] == '0': # manual insertion
TAc.print(LANG.render_feedback("insert-opt-value", f'\nWrite here your conjectured maximal matching size for this graph if you have one. Otherwise, if you only intend to be told about the approximation, enter "C".'), "yellow", ["bold"], flush=True)
if ENV['plot'] and chk_backend:
proc = multiprocessing.Process(target=vcl.plot_graph, args=(instance['graph'],))
proc.start()
#vcl.plot_graph(instance['graph'])
choice = TALinput(str, 1, TAc=TAc)
if choice[0] != 'C' and choice[0] != 'c':
if not choice[0].isdigit():
TAc.print(LANG.render_feedback("invalid-input", f'Input must be an integer number or "C". Aborting.\n'), "red", ["bold"], flush=True)
if ENV['plot'] and chk_backend:
proc.terminate()
exit(0)
TAc.print(LANG.render_feedback("waiting-matching", f'Please, provide the maximal matching:'), "yellow", ["bold"], flush=True)
answer = []
for i in range(int(choice[0])):
TAc.print(LANG.render_feedback("insert-edge", f'Insert the two endpoints of edge {i}, that is, enter a line with two naturals in the interval [0,{ENV["num_vertices"]-1}], separated by spaces.'), "yellow", ["bold"], flush=True)
u,v = TALinput(int, 2, TAc=TAc)
answer.append((u,v))
else:
answer = [eval(t) for t in ENV['vc_sol_val'].split()]
choice = ' '
if choice[0] != 'C' and choice[0] != 'c':
for t in answer:
if t not in instance['graph'].edges():
TAc.print(LANG.render_feedback("edge-not-in-graph", f'Edge {t} is not an edge of the graph. Aborting.\n'), "red", ["bold"], flush=True)
if ENV['plot'] and chk_backend:
proc.terminate()
exit(0)
if (ENV['source'] == "catalogue" and instance['exact_sol'] == 1) or (ENV['source'] != "catalogue"):
size_sol,appr_sol,max_matching = vcl.calculate_approx_vc(instance['graph'], 'greedy')
else:
#appr_sol = instance['sol'].replace(')(',' ').replace('(','').replace(')','').replace(',','')
#max_matching = instance['sol']
if not instance['weighted']:
sol = instance['sol'].split('\n')
appr_sol = sol[0]
max_matching = sol[1]
size_sol = len([int(i) for i in appr_sol.split() ])
else:
size_sol,appr_sol,max_matching = vcl.calculate_approx_vc(instance['graph'], 'greedy')
if choice[0] == 'C' or choice[0] == 'c':
TAc.print(LANG.render_feedback("best-sol", f'A possible 2-approximated vertex cover is: '), "green", ["bold"], flush=True, end='')
TAc.print(f'{appr_sol}.', "white", ["bold"], flush=True)
TAc.print(LANG.render_feedback("min-maximal-matching", f'A possible maximal matching is: '), "green", ["bold"], flush=True, end='')
TAc.print(f'{max_matching}.', "white", ["bold"], flush=True)
TAc.print(LANG.render_feedback("size-sol", f'The size of the 2-approximated vertex cover is: '), "green", ["bold"], flush=True, end='')
TAc.print(f'{size_sol}.', "white", ["bold"], flush=True)
else:
for e in answer:
if e not in instance['graph'].edges():
TAc.print(LANG.render_feedback("edge-not-in-graph", f'Edge {e} not in the graph. Aborting.'), "red", ["bold"], flush=True)
if ENV['plot'] and chk_backend:
proc.terminate()
exit(0)
size_ans = 2 * (len(answer))
is_vertex_cover, reason, data = vcl.verify_approx_vc(answer, instance['graph'], 1)
if is_vertex_cover:
if size_ans == size_sol:
TAc.OK()
TAc.print(LANG.render_feedback("right-best-sol", f'We agree, the solution you provided is a valid 2-approximation vertex cover for the graph.'), "white", ["bold"], flush=True)
elif size_ans > size_sol:
TAc.print(LANG.render_feedback("right-sol", f'The solution you provided is a valid 2-approximation vertex cover for the graph. You can improve your approximation.'), "yellow", ["bold"], flush=True)
else:
TAc.OK()
TAc.print(LANG.render_feedback("new-best-sol", f'Great! The solution you provided is a valid 2-approximation vertex cover for the graph and it\'s better than mine!'), "green", ["bold"], flush=True)
if ENV['source'] == 'catalogue' and not instance['exact_sol'] and not instance['weighted']:
#path=os.path.join(ENV.META_DIR, 'instances_catalogue', 'all_instances')
path=os.path.join(ENV.META_DIR, 'instances_catalogue', ENV['collection'])
instance_filename = f'instance_{str(ENV["instance_id"]).zfill(3)}'
answer = ' '.join(map(str, answer))
risp = f'{answer.replace(",", " ").replace("(", "").replace(")","")}'
#matching = f'{answer.replace(",",", ").replace(") (", ")(")}'
matching = f'{answer.replace(",",", ")}'
new_data = f'{risp}\n{matching}'
#vcl.update_instance_txt(path, instance_filename, answer)
vcl.update_instance_txt(path, instance_filename, new_data)
else:
TAc.NO()
TAc.print(LANG.render_feedback("wrong-sol", f'We don\'t agree, the solution you provided is not a valid 2-approximation vertex cover for the graph.'), "red", ["bold"], flush=True)
if reason == 1:
TAc.print(LANG.render_feedback("edge-incident", f'Reason: edge {data} incident to another one.'), "red", ["bold"], flush=True)
elif reason == 2:
TAc.print(LANG.render_feedback("not-vertex-cover", f'Reason: not a vertex cover. Edges not covered: '), "red", ["bold"], flush=True, end='')
for t in data:
TAc.print(f'{t} ', "red", ["bold"], flush=True, end='')
elif reason == 3:
TAc.print(LANG.render_feedback("node-already-visited", f'Reason: vertex {data} already visited.'), "red", ["bold"], flush=True)
print()
if ENV['plot_sol'] and chk_backend:
if ENV['plot']:
proc.terminate()
if choice[0] != 'C' and choice[0] != 'c':
vertices = ' '.join(map(str, answer)).replace('(', '').replace(') (',' ').replace(')','').replace(',',' ')
proc1 = multiprocessing.Process(target=vcl.plot_2app_vc, args=(instance['graph'],vertices,answer))
proc1.start()
#vcl.plot_2app_vc(instance['graph'], vertices, answer)
else:
proc1 = multiprocessing.Process(target=vcl.plot_2app_vc, args=(instance['graph'],appr_sol,[eval(t) for t in max_matching.replace(', ',',').split()]))
proc1.start()
#vcl.plot_2app_vc(instance['graph'], appr_sol, [eval(t) for t in max_matching.replace(', ',',').split()])
exit(0)
| romeorizzi/TALight | example_problems/tutorial/vertex_cover/services/check_approx_vc_driver.py | check_approx_vc_driver.py | py | 10,368 | python | en | code | 11 | github-code | 36 |
21121065737 | """File system hook for the S3 file system."""
from builtins import super
import posixpath
try:
import s3fs
except ImportError:
s3fs = None
from . import FsHook
class S3Hook(FsHook):
"""Hook for interacting with files in S3."""
def __init__(self, conn_id=None):
super().__init__()
self._conn_id = conn_id
self._conn = None
def get_conn(self):
if s3fs is None:
raise ImportError("s3fs must be installed to use the S3Hook")
if self._conn is None:
if self._conn_id is None:
self._conn = s3fs.S3FileSystem()
else:
config = self.get_connection(self._conn_id)
extra_kwargs = {}
if "encryption" in config.extra_dejson:
extra_kwargs["ServerSideEncryption"] = config.extra_dejson[
"encryption"
]
self._conn = s3fs.S3FileSystem(
key=config.login,
secret=config.password,
s3_additional_kwargs=extra_kwargs,
)
return self._conn
def disconnect(self):
self._conn = None
def open(self, file_path, mode="rb"):
return self.get_conn().open(file_path, mode=mode)
def exists(self, file_path):
return self.get_conn().exists(file_path)
def isdir(self, path):
if "/" not in path:
# Path looks like a bucket name.
return True
parent_dir = posixpath.dirname(path)
for child in self.get_conn().ls(parent_dir, detail=True):
if child["Key"] == path and child["StorageClass"] == "DIRECTORY":
return True
return False
def mkdir(self, dir_path, mode=0o755, exist_ok=True):
self.makedirs(dir_path, mode=mode, exist_ok=exist_ok)
def listdir(self, dir_path):
return [posixpath.relpath(fp, start=dir_path)
for fp in self.get_conn().ls(dir_path, details=False)]
def rm(self, file_path):
self.get_conn().rm(file_path, recursive=False)
def rmtree(self, dir_path):
self.get_conn().rm(dir_path, recursive=True)
# Overridden default implementations.
def makedirs(self, dir_path, mode=0o755, exist_ok=True):
if self.exists(dir_path):
if not exist_ok:
self._raise_dir_exists(dir_path)
else:
self.get_conn().mkdir(dir_path)
def walk(self, root):
root = _remove_trailing_slash(root)
for entry in super().walk(root):
yield entry
def _remove_trailing_slash(path):
if path.endswith("/"):
return path[:-1]
return path
| jrderuiter/airflow-fs | src/airflow_fs/hooks/s3_hook.py | s3_hook.py | py | 2,720 | python | en | code | 16 | github-code | 36 |
4855310925 | #!/usr/bin/python
# -*- coding: utf-8 -*
from fabric.api import *
from fabric.context_managers import *
from fabric.contrib.console import confirm
from fabric.contrib.files import *
from fabric.contrib.project import rsync_project
import fabric.operations
import time,os
import logging
import base64
from getpass import getpass
import json
import sys
# 定义一些常量
## 本地软件目录
env.local_softdir="/opt/software/"
## 远端软件目录
env.remote_softdir="/opt/software/"
## 远端家目录
env.remote_dir="/opt/machtalk/"
############## MQ
@task
@roles('rabbitmq')
def rabbitmq_putfile():
# 上传文件
fileNeedTransfer = []
fileNeedTransfer.append("rabbitmq_server-3.6.5.tar.gz")
fileNeedTransfer.append("erlang.tar.gz")
for tarFileName in fileNeedTransfer:
put("%s%s" % (env.local_softdir,tarFileName), env.remote_dir)
@task
@roles('rabbitmq')
def rabbitmq_deploy():
with cd(env.remote_dir):
# 获取fabric传过来的变量
ip = env.host
info = env.info
# 判断目录是否存在,如果存在就退出
run(""" [ -e "./rabbitmq" ] && exit 1 || echo '开始部署rabbitmq!' """)
# 根据变量获取
ip = env.host
ipListNumber = info['services']['rabbitmq']['servers'].index(ip) + 1
serverName = "rabbit%s"%(ipListNumber)
# 设置主机名
# sudo("""
#cat << 'EOF' > /etc/sysconfig/network
#NETWORKING=yes
#HOSTNAME=%s
#EOF
#hostname %s
# """%(serverName,serverName))
# 设置hosts
conf_hosts = ""
itemNumber = 0
for item in info['services']['rabbitmq']['servers']:
conf_hosts += """
%s rabbit%s"""%(item, itemNumber + 1)
itemNumber += 1
sudo("""
sed -i "/rabbit/d" /etc/hosts
#service network restart
echo '%s' >> /etc/hosts
"""%(conf_hosts))
# 上传文件
fileNeedTransfer = []
fileNeedTransfer.append("rabbitmq_server-3.6.5.tar.gz")
fileNeedTransfer.append("erlang.tar.gz")
for tarFileName in fileNeedTransfer:
#put("%s%s" % (Const.SOURCE_DIR,tarFileName), Const.DEST_DIR)
run("tar xzf %s"%tarFileName)
#run("rm -f %s"%tarFileName)
# 做软链
run("""ln -s ./rabbitmq_server-3.6.5 ./rabbitmq && echo '软链创建成功!' || echo '软链已经存在!' """)
# 修改本机hostname 对于rabbitmq来说
run('''
sed -i '/RABBITMQ_NODENAME/d' ./rabbitmq/etc/rabbitmq/rabbitmq-env.conf || echo "异常,可能文件不存在!"
echo 'RABBITMQ_NODENAME=rabbit@%s' >> ./rabbitmq/etc/rabbitmq/rabbitmq-env.conf
'''%serverName)
# erlang修改
run(""" echo -n "KGGDOQPNUOBMMBGGVRCU" > ~/.erlang.cookie """)
run(""" chmod 600 ~/.erlang.cookie""")
# 修改环境变量
run('''
sed -i '/rabbitmq/d' ~/.bashrc
sed -i '$a export PATH=%s/rabbitmq/sbin:$PATH' ~/.bashrc
'''%(env.remote_dir))
# 修改权限
run("chmod 755 ./rabbitmq/sbin/*")
run("chmod 755 ./erlang/bin/*")
run("chmod 755 ./erlang/lib/erlang/erts-7.3/bin/*")
# 服务停止与启动
run("set -m;./rabbitmq/sbin/rabbitmq-server -detached || echo '进程已经存在!'")
# 添加插件
run("./rabbitmq/sbin/rabbitmq-plugins enable rabbitmq_management || echo '插件安装异常!' ")
if ipListNumber == 1:
# 添加用户
run("./rabbitmq/sbin/rabbitmqctl add_user %s %s || echo '123' "%(info['services']['rabbitmq']['usrname'],info['services']['rabbitmq']['pwd']) )
# 创建vhost以及权限
run("./rabbitmq/sbin/rabbitmqctl add_vhost /xcloud || echo 'vhost添加异常!' ")
run("./rabbitmq/sbin/rabbitmqctl set_user_tags %s administrator || echo '123' "%info['services']['rabbitmq']['usrname'])
run("""./rabbitmq/sbin/rabbitmqctl set_permissions -p /xcloud %s ".*" ".*" ".*" """%info['services']['rabbitmq']['usrname'] )
else:
# 加入集群
run("""
rabbitmqctl stop_app
#rabbitmqctl join_cluster --ram rabbit@rabbit1
rabbitmqctl join_cluster rabbit@rabbit1
rabbitmqctl start_app
""")
'''
# 备注
# 可以查看15672端口
http://192.168.3.133:15672
'''
@task
@roles('rabbitmq')
def rabbitmq_clean():
with cd(env.remote_dir):
run(" ps aux | grep rabbitmq | grep -v grep | awk '{print $2}' | xargs -i kill -9 {} ")
run("rm -rf erlang rabbitmq_server-3.6.5 rabbitmq")
@task
@roles('rabbitmq')
def rabbitmq_restart():
with cd(env.remote_dir):
run(" ps aux | grep rabbitmq | grep -v grep | awk '{print $2}' | xargs -i kill -9 {} 2>/dev/null")
run("set -m;./rabbitmq/sbin/rabbitmq-server -detached || echo '进程已经存在!'")
| zzlyzq/speeding | funcs/rabbitmq.py | rabbitmq.py | py | 5,005 | python | en | code | 1 | github-code | 36 |
26299614326 |
### ===== Load libraries =====
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings import CacheBackedEmbeddings, HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.storage import LocalFileStore
from langchain.text_splitter import TokenTextSplitter
from langchain.llms import HuggingFacePipeline
from langchain.chains import RetrievalQA, LLMChain
from langchain.prompts import PromptTemplate
from huggingface_hub import login as hf_login
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftConfig, PeftModel
import torch
from torch import cuda
import locale
locale.getpreferredencoding = lambda: "UTF-8"
def prepare_data():
# ----- Data Parsing
library = CSVLoader("library_data.csv")
library_data = library.load()
# library_data[0]
# ----- Text Splitter
text_splitter = TokenTextSplitter(
chunk_size=1000,
chunk_overlap = 200,
)
library_doc = text_splitter.split_documents(library_data)
# library_doc[0]
return library_doc
def prepare_data_retriever(library_doc):
# ----- Index / Vector Store (FAISS)
embed_model_id = 'sentence-transformers/all-MiniLM-L6-v2'
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
core_embeddings_model = HuggingFaceEmbeddings(
model_name=embed_model_id,
model_kwargs={'device': device},
encode_kwargs={'device': device, 'batch_size': 32}
)
# CacheBackedEmbeddings saves time and money when user asks same question.
store = LocalFileStore("./cache/")
embedder = CacheBackedEmbeddings.from_bytes_store(
core_embeddings_model, store, namespace=embed_model_id
)
vector_store = FAISS.from_documents(library_doc, embedder)
# ----- Check if the vectorstore is working correctly.
#
# query = "In python, write a code that reads the csv file and plot a scatter plot of x-axis labeled 'Year' and the y-axis labeled 'value'"
#
# embedding_vector = core_embeddings_model.embed_query(query)
# docs = vector_store.similarity_search_by_vector(embedding_vector, k=3)
#
# for page in docs:
# print(page.page_content)
# ----- Build retriever
#
retriever = vector_store.as_retriever(search_type="mmr", search_kwargs={"k": 5})
# docs = retriever.get_relevant_documents("In python, write a code that reads the csv file and plot a scatter plot of x-axis labeled 'Year' and the y-axis labeled 'value'")
return retriever
def load_llm(model_id):
hf_login(token="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") # ENV
# ----- Load model directly
if model_id == "SaloniJhalani/ft-falcon-7b-instruct":
dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
model = AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
load_in_8bit=True,
device_map="auto",
torch_dtype = dtype, #torch.bfloat16
)
else:
model = AutoModelForCausalLM.from_pretrained(model_id,device_map='cuda')
tokenizer = AutoTokenizer.from_pretrained(model_id)
generate_text = transformers.pipeline(
model=model,
tokenizer=tokenizer,
task='text-generation',
return_full_text=True,
temperature=0.0,
max_new_tokens=1024, # a higher number of tokens delays the prompt
repetition_penalty=1.1 # avoid repeating
)
# result = generate_text("Write a code that plot a bar graph to display the value of 'Philosophy and psychology' title_en over the years?")
# result[0]["generated_text"]
llm = HuggingFacePipeline(pipeline=generate_text)
return llm
def prepare_llm(llm, retriever):
# ----- Template for an instruction with no input
prompt = PromptTemplate(
input_variables=["instruction"],
template="{instruction}"
)
# ----- LLMChain
#
# llm_chain = LLMChain(llm=llm, prompt=prompt)
#
# print(llm_chain.predict(
# instruction="Write a code that plot a bar graph to display the value of 'Philosophy and psychology' title_en over the years?"
# ).lstrip())
# ----- RetrievalQA
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever
)
return qa
def execute_code(code):
""" Parse and execute the returned python code """
# Remove "```python" at the beginning
code = code.replace("```python", "")
# Remove "```" at the end
code = code.replace("```", "")
code = code.replace('"""', "")
code = code.split("###")[0]
try:
exec(code)
except Exception as e:
print(f"Error executing code:{str(e)}")
return code
def init_llm_retriever(model_id):
print("\n", " Initialize the chat components ".center(100, "*"), "\n")
library_doc = prepare_data()
retriever = prepare_data_retriever(library_doc)
llm = load_llm(model_id)
qa = prepare_llm(llm, retriever)
print("\n", " LLM is ready ".center(100, "*"), "\n")
return qa
if __name__ == "__main__":
qa = init_llm_retriever("TheBloke/CodeLlama-7B-Python-GPTQ")
| Valkea/Omdena_Falcon | deployment02/backend/llm_setup.py | llm_setup.py | py | 5,289 | python | en | code | 1 | github-code | 36 |
770438494 | #Import libraries
import scipy.io as spio
from scipy import fftpack
import matplotlib.pyplot as plt
import numpy as np
#Process the dataset into samples
def process_positions(dataset, positions):
output_range = 10
classification_input = []
for position in positions:
lower = position - output_range
upper = position + output_range
classification_input.append(list(dataset[lower:upper]))
return classification_input
#Put peak through fft
def process_FFT(time_sample):
X = fftpack.fft(time_sample)
return X
#Put all peaks through fft and put them in a list.
def process_all_FFT(time_samples):
freq_samples = []
for sample in time_samples:
freq_samples.append(process_FFT(sample))
unsorted_x = []
#For all the samples, convert imaginary values into real values.
for sample in freq_samples:
new_sample = []
for item in sample:
new_sample.append(item.real)
new_sample.append(item.imag)
unsorted_x.append(list(new_sample))
return unsorted_x
#Convert the dataset into frequency series samples.
def time_freq(dataset, positions):
time_samples = process_positions(dataset, positions)
freq_samples = process_all_FFT(time_samples)
return freq_samples
| khb00/peak_classifier_and_detector | TimeFreq.py | TimeFreq.py | py | 1,337 | python | en | code | 0 | github-code | 36 |
69889167786 | import torch
import torch.nn as nn
from attention import MultiheadedAttention
from feed_forward import PositionWiseDenseNetwork, LayerNorm
class DecoderBlock(nn.Module):
def __init__(self,
key_dim: int = 64,
embedding_dim: int = 512,
heads_number: int = 8,
hidden_dim: int = 2048,
dropout_prob: float = 0.1) -> None:
super().__init__()
self.key_dim = key_dim
self.heads_number = heads_number
self.embedding_dim = embedding_dim
self.decoder_self_attention = MultiheadedAttention(key_dim=key_dim,
embedding_dim=embedding_dim,
heads_number=heads_number)
self.layer_norm_0 = LayerNorm(embedding_dim=embedding_dim)
self.dropout_0 = nn.Dropout(p=dropout_prob)
self.decoder_encoder_attention = MultiheadedAttention(key_dim=key_dim,
embedding_dim=embedding_dim,
heads_number=heads_number)
self.layer_norm_1 = LayerNorm(embedding_dim=embedding_dim)
self.dropout_1 = nn.Dropout(p=dropout_prob)
self.position_wise_dense = PositionWiseDenseNetwork(hidden_dim=hidden_dim,
embedding_dim=embedding_dim,
dropout_prob=dropout_prob)
self.layer_norm_2 = LayerNorm(embedding_dim=embedding_dim)
self.dropout_2 = nn.Dropout(p=dropout_prob)
def forward(self,
x: torch.Tensor,
encoder_outputs: torch.Tensor,
encoder_padding_mask: torch.Tensor,
decoder_padding_mask: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]
tokens_in_document = x.shape[1]
decoder_mask = decoder_padding_mask.unsqueeze(dim=1).unsqueeze(dim=2)
subsequent_mask = torch.ones((tokens_in_document, tokens_in_document), dtype=torch.bool)
subsequent_mask = torch.triu(subsequent_mask, diagonal=1)
subsequent_mask = subsequent_mask.unsqueeze(dim=0).unsqueeze(dim=1)
decoder_mask = decoder_mask | subsequent_mask
self_attention_representations = self.decoder_self_attention(x, x, x, decoder_mask)
x = self.layer_norm_0(x + self_attention_representations)
x = self.dropout_0(x)
encoder_padding_mask = encoder_padding_mask.unsqueeze(dim=1).unsqueeze(dim=2)
attention_representations = self.decoder_encoder_attention(x, encoder_outputs, encoder_outputs, encoder_padding_mask)
x = self.layer_norm_1(x + attention_representations)
x = self.dropout_1(x)
position_wise_values = self.position_wise_dense(x)
x = self.layer_norm_2(x + position_wise_values)
x = self.dropout_2(x)
return x
class Decoder(nn.Module):
def __init__(self,
vocabulary_size: int,
blocks_number: int = 8,
key_dim: int = 64,
embedding_dim: int = 512,
heads_number: int = 8,
hidden_dim: int = 2048,
dropout_prob: float = 0.1) -> None:
super().__init__()
self.blocks_number = blocks_number
self.decoder_blocks = nn.ModuleList([DecoderBlock(key_dim=key_dim,
embedding_dim=embedding_dim,
heads_number=heads_number,
hidden_dim=hidden_dim,
dropout_prob=dropout_prob)
for _ in range(self.blocks_number)])
self.output_weights = nn.Parameter(torch.rand(size=(embedding_dim, vocabulary_size)))
nn.init.xavier_uniform_(self.output_weights)
def forward(self,
x: torch.Tensor,
encoder_outputs: torch.Tensor,
decoder_padding_mask: torch.Tensor,
encoder_padding_mask: torch.Tensor) -> torch.Tensor:
for decoder_block in self.decoder_blocks:
x = decoder_block(x, encoder_outputs, encoder_padding_mask, decoder_padding_mask)
output_logits = torch.matmul(x, self.output_weights)
# we don't apply softmax since loss function does it inplace
# tokens_probs = torch.softmax(output_logits, dim=-1)
return output_logits
| KolodziejczykWaldemar/Transformers | decoder.py | decoder.py | py | 4,651 | python | en | code | 0 | github-code | 36 |
22625649989 | import pygame
import random
import time
#飞机大战
#手机上单手操作游戏
#屏幕长方形
# **************************我方飞机
class Hero(object):
def __init__(self, _screen, _x, _y):
self.image = pygame.image.load("images\hero.gif")
self.rect = self.image.get_rect()
self.width = self.rect.width
self.height = self.rect.height
self.screen = _screen
self.x = _x
self.y = _y
def show(self, _x, _y):
self.x = _x
self.y = _y
self.width = self.rect.width
self.height = self.rect.height
self.screen.blit(self.image, (self.x, self.y))
pygame.init()
pygame.mixer.init()
font = pygame.font.Font("C:\Windows\Fonts\SimHei.ttf",25)
back_music = pygame.mixer.Sound("sound\game_music.ogg")
back_music.play()
# ****************** 音乐 ****************************
screen = pygame.display.set_mode((495,800))
bg = pygame.image.load(r"images\background.png")
bg = pygame.transform.scale(bg, (498, 800))
# **********************************子弹
bullet = pygame.image.load(r"images\bullet.png")
b_rect = bullet.get_rect()
b_w = b_rect.width
b_h = b_rect.height
b_x = []
b_y = []
b_v = 30
times = b_v
# ***********************敌方飞机
# 小型战机
enemy1 = pygame.image.load(r"images\enemy0_down1.png")
enemy2 = pygame.image.load(r"images\enemy0_down2.png")
enemy3 = pygame.image.load(r"images\enemy0_down3.png")
enemy4 = pygame.image.load(r"images\enemy0_down4.png")
enemy = pygame.image.load(r"images\enemy0.png")
list_enemy_down = []
list_enemy_down.append(enemy1)
list_enemy_down.append(enemy2)
list_enemy_down.append(enemy3)
list_enemy_down.append(enemy4)
e_rect = enemy.get_rect()
e_h = e_rect.height
e_w = e_rect.width
#中型战机
mid_enemy = pygame.image.load(r"images\enemy1.png")
mid_enemy1 = pygame.image.load(r"images\enemy1_down1.png")
mid_enemy2 = pygame.image.load(r"images\enemy1_down2.png")
mid_enemy3 = pygame.image.load(r"images\enemy1_down3.png")
mid_enemy4 = pygame.image.load(r"images\enemy1_down4.png")
mid_rect = mid_enemy.get_rect()
mid_h = mid_rect.height
mid_w = mid_rect.width
mid_ex = []
mid_ey = []
heroA = Hero(screen,100,100)
# 敌方飞机产地坐标
list_ex = []
list_ey = []
for i in range(5):
enemyx = random.randint(50,400)
enemyy = random.randint(-100,-50)
list_ex.append(enemyx)
list_ey.append(enemyy)
midx = random.randint(50, 400)
midy = random.randint(-300, -100)
def collsion(bullet_x,bullet_y,bullet_rect,p_x,p_y,p_rect):
if bullet_x + bullet_rect.width > p_x and \
bullet_x < p_x + p_rect.width and \
bullet_y < p_y + p_rect.height and \
bullet_y + bullet_rect.height > p_y:
print("发生碰撞")
return True
else:
return False
# 爆炸函数
# def boom(_screen,list_time,list_x,list_y,_flag, list_image):
# if _flag == 1:
# start = time.time()
# for i in range(len(list_time)):
# if start-list_time[i] < 0.2:
# _screen.blit(list_image[0], (list_x[i], list_y[i]))
# elif 0.2 < start-list_time[i] < 0.4:
# _screen.blit(list_image[1], (list_x[i], list_y[i]))
# elif 0.4 < start-list_time[i] < 0.6:
# _screen.blit(list_image[2], (list_x[i], list_y[i]))
# elif 0.6 < start-list_time[i] < 0.8:
# _screen.blit(list_image[3], (list_x[i], list_y[i]))
shoot_speed = 5
#小型机
end = []
boom_x = []
boom_y = []
flag = 0
#中型机
mid_end = []
mid_boom_x = []
mid_boom_y = []
mid_flag = 0
# 得分
score = 0
blood = 5
#发射中型机
send = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
screen.blit(bg, (0, 0))
hx, hy = pygame.mouse.get_pos()
pygame.mouse.set_visible(False)
heroA.show(hx-heroA.width/2, hy-heroA.height/2)
# 画出敌方飞机
for i in range(5):
screen.blit(enemy,(list_ex[i],list_ey[i]))
if list_ey[i] < 800:
list_ey[i] += 1
else:
list_ey[i] = random.randint(-100,-50)
screen.blit(mid_enemy, (midx, midy))
if score != 0 and score%12 == 0:
send = score
if send != 0 and send % 12 == 0:
midy += 0.5
if midy > 800:
send = 0
midy = random.randint(-300, -100)
# 我方发射子弹
if times:
times -= 1
else:
b_x.append(hx - b_w/2+2)
b_y.append(hy - heroA.height / 2- b_h)
times = b_v
for i in range(len(b_x)):
screen.blit(bullet, (b_x[i], b_y[i]))
b_y[i] -= shoot_speed
# if b_y[i] < 0: #假设迭代到3,出界后移除,前后面的有关i的代码就会出错
# b_y.pop(i)
for j in range(len(list_ex)):
if collsion(b_x[i], b_y[i], b_rect, list_ex[j], list_ey[j], e_rect):
b_y[i] = -100 #子弹消失
score += 1
flag = 1
end.append(time.time())
boom_x.append(list_ex[j])
boom_y.append(list_ey[j])
list_ey[j] = random.randint(-100, -50) # 飞机消失
if collsion(b_x[i], b_y[i], b_rect, midx, midy, mid_rect):
blood -= 1
b_y[i] = -100 # 子弹消失
if blood <= 0:
mid_flag = 1
mid_end.append(time.time())
mid_boom_x.append(midx)
mid_boom_y.append(midy)
midy = random.randint(-300, -100) # 飞机消失
midx = random.randint(50, 400)
score += 1
blood = 5
#小型飞机爆炸
if flag == 1:
start = time.time()
for i in range(len(end)):
if start-end[i] < 0.2:
screen.blit(enemy1, (boom_x[i], boom_y[i]))
elif 0.2 < start-end[i] < 0.4:
screen.blit(enemy2, (boom_x[i], boom_y[i]))
elif 0.4 < start-end[i] < 0.6:
screen.blit(enemy3, (boom_x[i], boom_y[i]))
elif 0.6 < start-end[i] < 0.8:
screen.blit(enemy4, (boom_x[i], boom_y[i]))
#中型飞机爆炸
if mid_flag == 1:
mid_start = time.time()
for i in range(len(mid_end)):
if start-end[i] < 0.2:
screen.blit(mid_enemy1, (mid_boom_x[i], mid_boom_y[i]))
elif 0.2 < mid_start-mid_end[i] < 0.4:
screen.blit(mid_enemy2, (mid_boom_x[i], mid_boom_y[i]))
elif 0.4 < mid_start-mid_end[i] < 0.6:
screen.blit(mid_enemy3, (mid_boom_x[i], mid_boom_y[i]))
elif 0.6 < mid_start-mid_end[i] < 0.8:
screen.blit(mid_enemy4, (mid_boom_x[i], mid_boom_y[i]))
# 子弹优化,节省空间
for i in b_y:
index = b_y.index(i)
if i < 0:
b_y.pop(index)
b_x.pop(index)
scorep = font.render("得分:"+str(score),True,(255,255,255))
screen.blit(scorep,(10,20))
pygame.display.update()
# if a ==0:
# bx = hx - h_w / 10
# by = hy - h_h /2
# a = 1
# by -= shoot_speed
# screen.blit(bullet, (bx, by))
# if by < 0:
# a = 0
| gaicigame99/GuangdongUniversityofFinance-Economics | airplaneWar/黄海辉/飞机大战.py | 飞机大战.py | py | 7,173 | python | en | code | 3 | github-code | 36 |
3825597824 | """A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name="emu-docker-tools",
version="0.1.0",
description="Tools to create and deploy android emulator docker containers.",
url="https://github.com/kneczaj/android-emulator-docker",
author="Kamil Neczaj",
author_email="kneczaj@protonmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: System :: Emulators",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="android emulator virtualization",
packages=find_packages(),
python_requires=">=3.0, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
install_requires=[
"emu-docker",
],
package_data={},
data_files={},
project_urls={
"Bug Reports": "https://github.com/kneczaj/android-emulator-docker/issues",
"Source": "https://github.com/kneczaj/android-emulator-docker",
},
)
| kneczaj/android-emulator-docker | setup.py | setup.py | py | 1,553 | python | en | code | 0 | github-code | 36 |
21054969908 | import numpy as np
from scipy.special import logsumexp, gammaln
from astropy import constants, units as au
from astropy.units import Quantity
Gauss = 1e-4 * au.T
au.set_enabled_equivalencies(au.dimensionless_angles())
def pad_with_absorbing_boundary_conditions(k2, k02, N, *coords, dn_max=0.05):
if dn_max is None:
dn_max = np.max(np.abs(np.sqrt(k2 / k02) - 1.))
print("Using the dn_max={}".format(dn_max))
alpha = np.abs(dn_max)*np.sqrt(k02)#/(np.pi*2.)
l = N / alpha
print("Extinction alpha={}".format(alpha))
print("Extinction l={}".format(l))
def log_Pn(alpha, x, N):
log_res = -np.inf
for n in range(N + 1):
log_res = np.logaddexp(n * (np.log(alpha * x)) - gammaln(n + 1.), log_res)
return np.where(x > 0, log_res, 0.)
def evaluate_k2(alpha, x):
k2 = k02 + alpha**2 - 2j*alpha*np.sqrt(k02)
return k02*np.ones(x.shape)
def _evaluate_k2(alpha, x, N):
return alpha**2 * np.exp(np.log(N - alpha * x + 2j * np.sqrt(k02) * x) + (N - 1) * (np.log(alpha * x))
- log_Pn(alpha, x, N) - gammaln(N + 1.)) + k02
def _add_other_dims(v, shape, i):
"""
[
Args:
v: [D]
shape: (s0,s1,s2,...)
i: int
Returns: same shape as `shape` except ith dim which is D.
"""
dims = list(range(len(shape)))
del dims[i]
v = np.expand_dims(v, dims)
grow = list(shape)
grow[i] = 1
return np.tile(v,grow)
m = []
out_coords = []
for i,x in enumerate(coords):
dx = x[1] - x[0]
M = int(l / dx) + 1
m.append(M)
print("Dimension {} padded by {}".format(i, M))
x_pad = np.arange(1,M+1)*dx
k2_pad = evaluate_k2(alpha, x_pad)
k2_before = _add_other_dims(k2_pad[::-1], k2.shape, i)
k2_after = _add_other_dims(k2_pad, k2.shape, i)
k2 = np.concatenate([k2_before, k2, k2_after], axis=i)
x_out = np.concatenate([x[0] - np.arange(1,M+1)[::-1]*dx, x, x[-1]+np.arange(1,M+1)*dx])
out_coords.append(x_out)
return k2, m, tuple(out_coords)
def pad_with_vacuum_conditions(k2, k02, pad_size, *coords):
def evaluate_k2(x):
return k02*np.ones(x.shape)
def _add_other_dims(v, shape, i):
"""
[
Args:
v: [D]
shape: (s0,s1,s2,...)
i: int
Returns: same shape as `shape` except ith dim which is D.
"""
dims = list(range(len(shape)))
del dims[i]
v = np.expand_dims(v, dims)
grow = list(shape)
grow[i] = 1
return np.tile(v,grow)
m = []
out_coords = []
for i,x in enumerate(coords):
print("Dimension {} padded by {}".format(i, pad_size))
dx = x[1] - x[0]
x_pad = np.arange(1,pad_size+1)*dx
k2_pad = evaluate_k2(x_pad)
m.append(pad_size)
k2_before = _add_other_dims(k2_pad[::-1], k2.shape, i)
k2_after = _add_other_dims(k2_pad, k2.shape, i)
k2 = np.concatenate([k2_before, k2, k2_after], axis=i)
x_out = np.concatenate([x[0] - np.arange(1,pad_size+1)[::-1]*dx, x, x[-1]+np.arange(1, pad_size+1)*dx])
out_coords.append(x_out)
return k2, m, tuple(out_coords)
def appleton_hartree(ne, nu):
def _plasma_freqency_squared(fed):
omega_p_squared = fed * (constants.e.si ** 2 / constants.eps0 / constants.m_e)
return omega_p_squared
omega_0_squared = _plasma_freqency_squared(ne)
dn = omega_0_squared / (2 * np.pi * nu) ** 2
return 1. - dn
def partial_blockage(N, nu, sinusoidal_blockage=False):
"""
| * source
|
| _________________
| | n = 1 - dn
| |________________
|
|
| x receiver
|(0,0)
Args:
x:
z:
nu:
Returns:
"""
ne = 2e12 / au.m ** 3
wavelength = constants.c.si / nu
x = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
z = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
n_ionosphere = appleton_hartree(ne, nu)
k0 = 2. * np.pi / wavelength
X, Z = np.meshgrid(x, z, indexing='ij')
z_bar_bottom = z.min() + 0.5 * (z.max() - z.min())
z_bar_top = z_bar_bottom + 10. * wavelength
x_bar_left = x.min() + 0. * (x.max() - x.min())
where_bar = (X > x_bar_left) & (Z > z_bar_bottom) & (Z < z_bar_top)
if sinusoidal_blockage:
refractive_index = np.where(where_bar, 1. - (1. - n_ionosphere) * np.cos(2 * np.pi * X / (10. * wavelength)),
1.)
else:
refractive_index = np.where(where_bar, n_ionosphere, 1.)
k2 = 4. * np.pi ** 2 * refractive_index ** 2 / wavelength ** 2
return x, z, k2, k0 ** 2
def single_blob(N, nu, l):
"""
| * source
|
| _________________
| | n = 1 - dn
| |________________
|
|
| x receiver
|(0,0)
Args:
x:
z:
nu:
Returns:
"""
ne = 2e12 / au.m ** 3
wavelength = constants.c.si / nu
x = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
z = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
n_ionosphere = appleton_hartree(ne, nu)
k0 = 2. * np.pi / wavelength
X, Z = np.meshgrid(x, z, indexing='ij')
z_blob = z.min() + 0.5 * (z.max() - z.min())
x_blob = x.min() + 0.5 * (x.max() - x.min())
refractive_index = (n_ionosphere - 1) * np.exp(-0.5*((X-x_blob)**2 + (Z-z_blob)**2)/l**2) + 1.
k2 = 4. * np.pi ** 2 * refractive_index ** 2 / wavelength ** 2
return x, z, k2, k0 ** 2
def test_partial_blockage():
import pylab as plt
nu = 100e6 / au.s
N = 1000
x, z, k2, k02 = partial_blockage(N, nu)
scattering_potential = k2 - k02
plt.imshow(scattering_potential.T.value, interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.title(r'Partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {}'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
x, z, k2, k02 = partial_blockage(N, nu, sinusoidal_blockage=True)
scattering_potential = k2 - k02
plt.imshow(scattering_potential.T.value, interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.title(r'Sinusoidal partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {}'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
k2, m, (x,z) = pad_with_absorbing_boundary_conditions(k2, k02, 4, x, z, dn_max=0.01)
scattering_potential = k2 - k02
plt.imshow(np.abs(scattering_potential.T.value), interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
print(x)
plt.plot(Quantity([x[m[0]], x[-m[0]], x[-m[0]], x[m[0]], x[m[0]]]).value, Quantity([z[m[1]], z[m[1]],z[-m[1]],z[-m[1]],z[m[1]]]).value, c='red')
plt.title(r'Sinusoidal partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {} with boundary'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
| Joshuaalbert/born_rime | born_rime/potentials.py | potentials.py | py | 7,396 | python | en | code | 1 | github-code | 36 |
7060266333 | # Hen1 Problem
# Student B
HENS = 4
DAYS = 7
grand_sum = 0
for i in range(DAYS):
day_sum = sum(int(s) for s in input('Enter eggs laid by each hen for day {}: '.format(i + 1)).split(','))
print('Day {} {} egg(s)'.format(i + 1, day_sum))
grand_sum += day_sum
print()
print('Average number of eggs ' + str(round(grand_sum / HENS)))
print('Total number of eggs for the week ' + str(grand_sum))
| ceucomputing/automarker | test2/student_B/HEN1_B.py | HEN1_B.py | py | 421 | python | en | code | 1 | github-code | 36 |
40027862614 | # Created on 12/5/15
if __name__ == '__main__':
f_input = []
with open("input.txt") as f:
f_input = f.readlines()
total = 0
for line in f_input:
check1 = False
check2 = False
for i in range(len(line) - 2):
if line[i] == line[i + 2]:
check1 = True
break
# For some semblance of speed, added a check to see if performing the second check is even worth it
if check1:
for i in range(len(line) - 1):
pair = line[i] + line[i + 1]
if line.count(pair) >= 2:
check2 = True
break
if check1 and check2:
total += 1
print(total)
| liamrahav/adventofcode-2015 | day5/day5_part2.py | day5_part2.py | py | 740 | python | en | code | 0 | github-code | 36 |
72394020264 | # 1 Вычислить числить число c заданной точностью d
# Пример:
# - при d = 0.001, π = 3.141
# Ввод: 0.01
# Вывод: 3.14
# Ввод: 0.001
# Вывод: 3.141
import math
print(math.pi)
num = float(input("Введите число: "))
def schet_znakov(number_to_count):
count = 0
while number_to_count % 1 != 0:
number_to_count *= 10
count += 1
return count
kol_znakov= schet_znakov(num)
print(round(math.pi, kol_znakov)) | ArtemTomilov13/python | python/seminar4/1.py | 1.py | py | 521 | python | ru | code | 0 | github-code | 36 |
27541539070 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Travis Anderson"
"""
This is for contacting twitter, and watching a specific user or word
"""
import logging
import tweepy
import time
import os
import datetime
from threading import Thread
import threading
logger = logging.getLogger(__name__)
exit_flag = False
def _start(self, is_async):
"""Monkey patch to allow multi threading so twitter can run and
main program can run"""
self.running = True
if is_async:
logger.warning("Initiating multithread")
self._thread = Thread(
target=self._run, name="Tweepy Thread", daemon=True)
self._thread.start()
else:
self._run()
class WatchTwitter(tweepy.StreamListener):
"""Class that subscribes to keywords on twitter """
def __init__(self):
logger.info("Creating api")
consumer_key = os.getenv("API_KEY")
assert consumer_key is not None
consumer_secret = os.getenv("API_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_SECRET")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
tweepy.Stream._start = _start
self.subscriptions = []
self._stop_event = threading.Event()
self.stream_timestamp = 0
self.master_timestamp = 0
self.register = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stream.running:
self.stream.running = False
def add_subscription(self, subscribe_to):
"""If stream is running adds new subscription, restarts stream"""
if subscribe_to not in self.subscriptions:
logger.info('Adding subscription: {}'.format(subscribe_to))
self.subscriptions.append(subscribe_to)
logger.info(self.subscriptions)
self.stream.running = False
self.start_stream()
else:
logger.info("Already subscribed: {}" .format(self.subscriptions))
def remove_subscription(self, unsubscribe_from):
logger.info("Attempting to remove {}".format(unsubscribe_from))
if unsubscribe_from in self.subscriptions:
logger.info(
'Removing from subscriptions: {}'.format(unsubscribe_from))
self.subscriptions.remove(unsubscribe_from)
self.stream.running = False
self.start_stream()
def pause_stream(self):
if self.stream.running:
logger.info("Pausing all subscriptions: {}".format(
self.subscriptions))
self.stream.running = False
def restart_stream(self):
if not self.stream.running:
logger.info("Restarting stream")
self.start_stream()
def init_stream(self, string):
self.subscriptions.append(string)
self.start_stream()
def start_stream(self):
global exit_flag
exit_flag = False
logger.info('Subscriptions: {}'.format(self.subscriptions))
self.stream = tweepy.Stream(auth=self.api.auth, listener=self)
self.stream.filter(track=self.subscriptions, is_async=True)
def on_status(self, status):
# need a stream handler, if not none run the stream handler and
# send the status to slack, else return not exit flag
logger.info(status.text)
def on_connect(self):
self.stream_timestamp = datetime.datetime.now()
logger.info('Connected to twitter at: {}'.format(
datetime.datetime.now()))
if not self.master_timestamp:
self.master_timestamp = self.stream_timestamp
def log_config():
"""Adjusts how info is displayed in log"""
return logging.basicConfig(
format=(
'%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s '
'[%(threadName) -12s] %(message)s'),
datefmt='%Y-%m-%d %H:%M:%S')
def log_set_level():
"""Sets defaulf log level"""
logger.setLevel(logging.DEBUG)
def init_logger():
logging.basicConfig(
format=(
'%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s '
'[%(threadName) -12s] %(message)s'),
datefmt='%Y-%m-%d %H:%M:%S')
logger.setLevel(logging.DEBUG)
def main():
global exit_flag
log_config()
log_set_level()
tb = WatchTwitter()
tb.init_stream('python')
while not exit_flag:
time.sleep(5)
tb.pause_stream()
time.sleep(5)
tb.add_subscription('Trump')
time.sleep(5)
tb.remove_subscription('Trump')
if __name__ == "__main__":
main()
pass
| tander29/backend-slackbot | twitbot.py | twitbot.py | py | 4,752 | python | en | code | 0 | github-code | 36 |
4887469879 |
instructions = []
with open("input.txt", "r") as f:
instructions = [[op, int(arg)] for op,arg in (line.split(" ") for line in f)]
# Returns a (bool, int) tuple, the first bool indicating whether or not the
# program halted normally, the second int being the accumulator.
#
# Fun fact: this function is impossible to write for "real" programming
# languages: this is the "halting problem", which Alan Turing proved impossible
# to solve for general computational models. The paper where he proved this ("On
# Computable Numbers") is literally the foundation for the entire field of
# Computer Science.
def halts(instructions):
# "ip" stands for "instruction pointer", which is the traditional name for
# this variable in virtual machines.
ip = 0
acc = 0
visited = set()
# Functions that return the new values for ip and acc, depending on the
# opcode and argument
operations = {
"nop": lambda arg: (ip + 1, acc),
"acc": lambda arg: (ip + 1, acc + arg),
"jmp": lambda arg: (ip + arg, acc),
}
while ip < len(instructions) and ip not in visited:
visited.add(ip)
op, arg = instructions[ip]
ip, acc = operations[op](arg)
if ip == len(instructions):
return True, acc
else:
return False, acc
def part1(instructions):
return halts(instructions)[1]
def part2(instructions):
for i in range(len(instructions)):
op, arg = instructions[i]
if op == "acc":
continue
# Toggles "nop" to "jmp" or "jmp" to "nop"
instructions[i][0] = "nop" if op == "jmp" else "jmp"
did_halt, acc = halts(instructions)
if did_halt:
return acc
# Restore instruction
instructions[i][0] = op
return -1
print(part1(instructions))
print(part2(instructions))
| OskarSigvardsson/adventofcode2020 | day8/day8.py | day8.py | py | 1,862 | python | en | code | 0 | github-code | 36 |
3521482660 | import pytest
import yaml
from meltano.core.behavior.canonical import Canonical
definition = {
# a, b, …, z
chr(ord("a") + i): i if i % 2 else None
for i in range(10)
}
class TestCanonical:
@pytest.fixture
def subject(self):
return Canonical(**definition)
def test_canonical(self, subject):
# make sure the Nones are removed
assert len(list(subject)) == 5
subject.test = "hello"
yaml_definition = "\n".join(f"{k}: {v}" for k, v in iter(subject))
assert yaml.dump(subject).strip() == yaml_definition
def test_false(self, subject):
subject.false_value = False
assert subject.canonical()["false_value"] is False
def test_nested(self, subject):
nested = Canonical(test="value")
subject.nested = nested
assert Canonical.as_canonical(subject)["nested"] == Canonical.as_canonical(
nested
)
def test_nested_empty(self, subject):
nested = Canonical(test="")
subject.nested = nested
assert "nested" not in Canonical.as_canonical(subject)
def test_update_canonical(self, subject):
subject.update(Canonical(test="value"))
assert subject.test == "value"
def test_update_dict(self, subject):
subject.update({"test": "value"})
assert subject.test == "value"
def test_update_kwargs(self, subject):
subject.update(test="value")
assert subject.test == "value"
def test_with_attrs(self, subject):
subject.test = "value"
assert subject.with_attrs().canonical() == subject.canonical()
new = subject.with_attrs(test="other_value")
assert new.test == "other_value"
assert new.canonical() == {**subject.canonical(), "test": "other_value"}
new = subject.with_attrs(new_test="new_value")
assert new.new_test == "new_value"
assert new.canonical() == {**subject.canonical(), "new_test": "new_value"}
def test_defaults(self, subject):
with pytest.raises(AttributeError):
subject.test
subject.test = None
assert subject.test is None
# This would typically be set from a Canonical subclass
subject._defaults["test"] = lambda _: "default"
# Default values show up when getting an attr
assert subject.test == "default"
# But they're not included in the canonical representation
assert "test" not in subject.canonical()
subject.test = "changed"
assert subject.test == "changed"
assert subject.canonical()["test"] == "changed"
def test_fallbacks(self, subject):
# Calling an unknown attribute is not supported
with pytest.raises(AttributeError):
subject.unknown
fallback = Canonical(unknown="value", known="value")
# This would typically be set from a Canonical subclass
subject._fallback_to = fallback
# Unknown attributes fall back
assert subject.unknown == "value"
assert "unknown" not in subject.canonical()
# Known attributes don't fall back
subject.known = None
assert subject.known is None
# Unless we make them
subject._fallbacks.add("known")
assert subject.known == "value"
assert "known" not in subject.canonical()
# Unless there is nothing to fallback to
subject._fallback_to = None
assert subject.known is None
# Defaults are still applied
subject._defaults["known"] = lambda _: "default"
assert subject.known == "default"
assert "known" not in subject.canonical()
# Until a value is set
subject.known = "value"
assert subject.known == "value"
assert subject.canonical()["known"] == "value"
| learningequality/meltano | tests/meltano/core/behavior/test_canonical.py | test_canonical.py | py | 3,836 | python | en | code | 1 | github-code | 36 |
37591561830 | #!/usr/bin/env python
'''
Rutgers Data Science Homework Week 3, Assignment #1
To run this script:
pybank.py [--summary_file=SUMMARY_FILE] input_file_1 input_file_2 ...
<Chan Feng> 2018-02
'''
import os
import csv
from argparse import ArgumentParser
_SUMMARY_FILE = 'pybank_summary.txt'
_SUMMARY_FORMAT = '''
Financial Analysis
--------------------------------
Total Month: {total_month}
Total Revenue: ${total_revenue:,}
Average Revenue Change: ${avg_revenue_change:,}
Greatest Increase in Revenue: {greatest_increase_month} (${greatest_increase:,})
Greatest Decrease in Revenue: {greatest_decrease_month} (${greatest_decrease:,})'''
_MONTH_LOOKUP = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12
}
_DATA_DIR = 'raw_data'
def main():
'''
return: 0 for success
'''
arg_parser = ArgumentParser()
arg_parser.add_argument('input_files', type=str, nargs='+',
help='One or more input files')
arg_parser.add_argument('--summary_file', type=str,
help='Default summary file name is ' + _SUMMARY_FILE )
args = arg_parser.parse_args()
data = {}
for input_file in [os.path.join(_DATA_DIR, f) for f in args.input_files]:
gather_data(data, input_file)
summarize(data, args.summary_file or _SUMMARY_FILE)
return 0
def gather_data(data, input_file):
'''
:param data: data object
:param input_file: Input file name
:return: 0 for success
'''
with open(input_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None) # Skip header
for row in reader:
month = normalize_month(row[0])
data[month] = data.get(month, 0) + int(row[1])
def summarize(data, summary_file=None):
'''
:param data: data objectu
:param summary_file: optional summary file name
:return: 0 for success
'''
total_revenue = 0
change = 0
total_change = 0
total_change_cnt = 0
prev_revenue = None
increase_month = None
increase_revenue = 0
decrease_month = None
decrease_revenue = 0
for month in sorted(data, key=month_sort_key):
revenue = data[month]
total_revenue += revenue
if prev_revenue:
change = revenue - prev_revenue
if change > increase_revenue:
increase_month = month
increase_revenue = change
if change < decrease_revenue:
decrease_month = month
decrease_revenue = change
total_change += change
total_change_cnt += 1
prev_revenue = revenue
summary = _SUMMARY_FORMAT.format(
total_month=len(data),
total_revenue=total_revenue,
avg_revenue_change=int(round(total_change/total_change_cnt)),
greatest_increase_month=increase_month,
greatest_increase=increase_revenue,
greatest_decrease_month=decrease_month,
greatest_decrease=decrease_revenue,
)
print(summary)
if summary_file:
with open(summary_file, 'w', newline='') as outfile:
outfile.write(summary)
return 0
def normalize_month(month):
'''
:param month:
:return: month normalized to Jan-12
Assume either Jan-12 or Jan-2012 format.
Production system will need to a lot more sophisticated
'''
(mth, year) = month.split('-')
if int(year) > 2000:
return '{}-{:02d}'.format(mth, int(year) - 2000)
return month
def month_sort_key(month):
'''
Define how month are sorted
:param month: 'Jan-12' format
:return: 12-01
'''
(month, year) = month.split('-')
return '{}-{:02d}'.format(year, _MONTH_LOOKUP[month])
if __name__ == '__main__':
main() | feng443/RUDSWeek3 | PyBank/pybank.py | pybank.py | py | 3,904 | python | en | code | 0 | github-code | 36 |
21871433231 | import jieba,re
#去除标点
def get_text(file_name):
with open(file_name, 'r', encoding='utf-8') as fr:
text = fr.read()
#删除的标点
del_ch = ['《',',','》','\n','。','、',';','"',\
':',',','!','?',' ']
for ch in del_ch:
text = text.replace(ch,'')
return text
file_name = 'comment.txt'
text = get_text(file_name)
vlist = jieba.lcut(text)#调用jieba实现分词,返回列表
res_dict = {}
#进行词频统计
for i in vlist:
res_dict[i] = res_dict.get(i,0) + 1
res_list = list(res_dict.items())
#print(res_list)
#降序排序
res_list.sort(key = lambda x:x[1], reverse = True)
fin_res_list = []
#去除单个字的词
for item in res_list:
if(len(item[0])>=2):
fin_res_list.append(item)
word_list=[]
words=[]
for i in range(1000):
word,count = fin_res_list[i]
pstr = str(i+1) + ':'
word_list.append(word)
with open('ignore_dict.txt', 'r', encoding='utf-8') as f:
ignore_words = f.read().splitlines()
# 遍历分词
for word in word_list:
if word not in ignore_words:#排除词
word = re.sub(r'[\n ]', '', word)
if len(word) < 1:
continue
words.append(word)
# print(pstr, end=' ')
# print(words[i], count)
with open("res.csv","a+")as fa:
fa.write(str(words[i])+","+str(count)+"\n")
| 2412322029/bilibili-spyder | 词频.py | 词频.py | py | 1,370 | python | en | code | 0 | github-code | 36 |
32886590499 | import discord
from discord.ext import commands
from discord.ui import Select, View
from discord.ext.commands import bot
from discord import app_commands
class Select(discord.ui.Select):
def __init__(self):
options=[
discord.SelectOption(label="НАВИГАЦИЯ: команды до игры", value="1", emoji="📜", description="Команды которые вы можете использовать до игры!"),
discord.SelectOption(label="НАВИГАЦИЯ: Команды во время игры", value="2", emoji="🔦", description="Команды которые вы можете использовать во время игры!"),
discord.SelectOption(label="НАВИГАЦИЯ: Предметы", value="3", emoji="🛠", description="Придметы которые есть в игры, и вы их можете использовать!"),
discord.SelectOption(label="НАВИГАЦИЯ: Призраки", value="4", emoji="🎃", description="Все призраки нашей игры!")
]
super().__init__(placeholder="Помощь", max_values=1, min_values=1, options=options)
async def callback(self, interaction: discord.Interaction):
if self.values[0] == "1":
emb = discord.Embed(title="НАВИГАЦИЯ: команды до игры", description='`Join` - присоединиться к игре \n`leave` - Отключиться от игры \n`Start` - начать игру', colour = discord.Color.og_blurple() )
await interaction.response.send_message( embed = emb, ephemeral=True )
elif self.values[0] == "2":
emb = discord.Embed(title="НАВИГАЦИЯ: Команды во время игры", description='`end` - закончить ход \n`use_item` - использовать предмет (1/2/3...) \n`inventory` - показывает предметы в инвентаре \n`ghost` - весь список призраков на сервере\n`theend` - закончить игру (1/2/3)', colour = discord.Color.og_blurple() )
await interaction.response.send_message( embed = emb, ephemeral=True )
elif self.values[0] == "3":
emb = discord.Embed(title="НАВИГАЦИЯ: Предметы", description='1 - соль (спасает один раз) \n2 - крест (спасает один раз) \n3 - Датчик Движения \n4 - Датчик Активности Призрака \n5 - Камера \n6 - пустая книга \n7 - книга (Да/не) \n8 - УФ-Фонарик \n 9 - успокоение для призрака(понимажает минимум до нуля) \n 10 - шкатулка призрака(понимажает максимум на 20 единиц)', colour = discord.Color.og_blurple() )
await interaction.response.send_message( embed = emb, ephemeral=True )
elif self.values[0] == "4":
emb = discord.Embed(title="НАВИГАЦИЯ: Призраки", description='1 - ***Полтергейст*** \n2 - ***Демон*** \n3 - ***Тень*** \n4 - ***Мимик***\n5 - ***Дух***\nузнать лучше можно командой `ghost`')
await interaction.response.send_message( embed = emb, ephemeral=True )
class SelectView(discord.ui.View):
def __init__(self, *, timeout=30):
super().__init__(timeout=timeout)
self.add_item(Select())
class help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@app_commands.command(name = "help", description="Помощь по командам бота!")
async def _help(self, interaction: discord.Interaction):
await interaction.response.send_message("Помощь по командам", view=SelectView(), ephemeral=True)
async def setup(bot):
await bot.add_cog(help(bot)) | FoxSweets/PhasmoBot | cogs/help.py | help.py | py | 3,732 | python | ru | code | 0 | github-code | 36 |
37502296937 | # https://school.programmers.co.kr/learn/courses/19344/lessons/242261
from collections import deque
dire = [[-1, 0], [1, 0], [0, -1], [0, 1]]
def CHECK(a, b, g):
return not (0 <= a < len(g) and 0 <= b < len(g[0]))
def BFS(graph, visit, RB):
global answer
que = deque()
RB.extend([0, False, False])
que.append(RB)
visit[RB[0]][RB[1]][RB[2]][RB[3]] = 1
visit[RB[2]][RB[3]][RB[0]][RB[1]] = 1
while que:
rx, ry, bx, by, depth, R, B = que.popleft()
if R and B:
return depth
# 빨간거 부터 옮기는 코드
for i in range(4):
Rx, Ry = rx + dire[i][0], ry + dire[i][1]
if R:
Rx, Ry = rx, ry
if CHECK(Rx, Ry, graph) : continue
if Rx == bx and Ry == by: continue
if Rx == RB[0] and Ry == RB[1] : continue
if graph[Rx][Ry] == 5 : continue
for j in range(4):
Bx, By = bx + dire[j][0], by + dire[j][1]
if B:
Bx, By = bx, by
if CHECK(Bx, By, graph) : continue
if Bx == RB[2] and By == RB[3]: continue
if visit[Rx][Ry][Bx][By]: continue
if Bx == Rx and By == Ry: continue
if graph[Bx][By] == 5 : continue
visit[Rx][Ry][Bx][By] = 1
que.append([Rx, Ry, Bx, By, depth + 1, graph[Rx][Ry] == 3, graph[Bx][By] == 4])
# 파란거부터 옮기는 코드
for i in range(4):
Bx, By = bx + dire[i][0], by + dire[i][1]
if B:
Bx, By = bx, by
if CHECK(Bx, By, graph) : continue
if Bx == RB[2] and By == RB[3]: continue
if Bx == rx and By == ry: continue
if graph[Bx][By] == 5 : continue
for j in range(4):
Rx, Ry = rx + dire[j][0], ry + dire[j][1]
if R:
Rx, Ry = rx, ry
if CHECK(Rx, Ry, graph) : continue
if Rx == RB[0] and Ry == RB[1] : continue
if visit[Rx][Ry][Bx][By]: continue
if Rx == Bx and Ry == By: continue
if graph[Rx][Ry] == 5 : continue
visit[Rx][Ry][Bx][By] = 1
que.append([Rx, Ry, Bx, By, depth + 1, graph[Rx][Ry] == 3, graph[Bx][By] == 4])
return 0
def solution(maze):
global answer
RB = [None] * 4
for i in range(len(maze)):
for j in range(len(maze[0])):
if maze[i][j] == 1:
RB[0], RB[1] = i, j
if maze[i][j] == 2:
RB[2], RB[3] = i, j
visit2 = [[[[0] * 4 for _ in range(4)] for __ in range(4)] for __ in range(4)]
return BFS(maze, visit2, RB)
| junsgi/Algorithm | BFS_DFS/기출문제 4번_BFS.py | 기출문제 4번_BFS.py | py | 2,756 | python | en | code | 0 | github-code | 36 |
72838543143 |
exec(open("init_notebook.py").read())
from helper import *
import time
client = connectToClient()
world = client.get_world()
spectator = set_camera_over_intersection(world)
extent = carla.Vector3D(x=100, y=100)
location = carla.Location(x=80, y=-133, z=0)
bounding_box = carla.BoundingBox(location, extent)
rotation = carla.Rotation(pitch=-90, yaw=95, roll=0)
extent1 = carla.Vector3D(z=20,y=20)
debug_helper = world.debug
debug_helper.draw_box(carla.BoundingBox(spectator.get_transform().location,extent1),spectator.get_transform().rotation, 0.05, carla.Color(20,160,255,0),0)
| jawadefaj/SIP-CARLA | CARLA/PythonAPI/tutorial/position_camera.py | position_camera.py | py | 585 | python | en | code | 0 | github-code | 36 |
71648501864 |
from PIL import Image, ImageDraw
import random as rd
import imageio
def create_simple_tile(size: int, bg_color:str, fg_color: str) -> Image:
tile_img = Image.new("RGB", (size, size))
tile_img_draw = ImageDraw.Draw(tile_img)
tile_img_draw.rectangle([(0, 0), (size, size)], fill = bg_color)
tile_img_draw.polygon([(0, 0), (size, 0), (0, size)], fill = fg_color )
return tile_img
def create_smith_tile(size: int, bg_color:str, fg_color: str) -> Image:
tile_img = Image.new("RGB", (size, size))
tile_img_draw = ImageDraw.Draw(tile_img)
tile_img_draw.rectangle([(0, 0), (size, size)], fill = bg_color)
tile_img_draw.arc([(-size//2,-size//2), (size//2, size//2)],0,-270,fill = fg_color)
tile_img_draw.arc([(size//2,size//2), (size +(size//2), size+(size//2))],0,360,fill = fg_color)
return tile_img
def create_base_tile(size: int, bg_color:str, fg_color: str, kind:str) -> Image:
if kind == 'simple':
tile_img = create_simple_tile(size, bg_color, fg_color)
elif kind == 'smith':
tile_img = create_smith_tile(size, bg_color, fg_color)
else:
raise Exception("Sorry, this tiling kind does not exists")
imageio.imsave("base_tile.gif", tile_img)
return tile_img
def paint_a_truchet(how_many_tiles: int, tile_size: int, kind: str) -> Image:
base_tile = create_base_tile(tile_size, 'white', 'black', kind)
w, h = how_many_tiles * tile_size, how_many_tiles * tile_size
img = Image.new("RGB", (w, h))
for i in range(how_many_tiles):
for j in range(how_many_tiles):
offset = (i * tile_size, j * tile_size)
# toss for rotation
base_tile = base_tile.rotate(90 * rd.randint(0,3))
img.paste(base_tile, offset)
return img | antigones/py-truchet | truchet.py | truchet.py | py | 1,782 | python | en | code | 0 | github-code | 36 |
10495520006 | from django.test import TestCase
from djlotrek.templatetags.djlotrek_filters import (
key,
is_in,
is_not_in,
get_class,
get_sorted,
media_url,
regex_match,
)
class TemplateFiltersTestCase(TestCase):
def test_key(self):
"""
templatefilter key is use for get value from dictionary object it's
pass dictionary object and key name then return value if
key exists otherwise return none
"""
my_dict = {"mykey": "value"}
self.assertEqual(key(my_dict, "mykey"), "value")
self.assertEqual(key(my_dict, "nokey"), None)
def test_is_in(self):
"""
templatefilter is_in use check arguments from string list separate
by comma (,) it pass value and arguments string then return a
boolean object of existen of value
"""
self.assertEqual(is_in("ciao", "hello,ciao"), True)
self.assertEqual(is_in("hola", "hello,ciao"), False)
def test_is_not_in(self):
"""
templatefilter is_not_in use to check not existen arguments
from string list separate by comma (,) it pass value and
arguments string then return a boolean object of not existen of value
"""
self.assertEqual(is_not_in("ciao", "hello,ciao"), False)
self.assertEqual(is_not_in("hola", "hello,ciao"), True)
def test_get_class(self):
"""
templatefilter get_class use to get a class name of retrieved class
"""
a = 1
my_dict = {"mykey": "value"}
self.assertEqual(get_class(a), "int")
self.assertEqual(get_class(my_dict), "dict")
def test_get_sorted(self):
"""
templatefilter get_sorted retrive list objects and return sorted
version of it
"""
a = [10, 2, 3, 5, 1]
self.assertEqual(get_sorted(a), [1, 2, 3, 5, 10])
def test_media_url(self):
"""
templatefilter media_url retrive a media object and get the url
"""
self.assertEqual(media_url(None), "")
self.assertEqual(media_url({"a": 2}), "")
def test_regex_match(self):
"""
templatefilter regex_match return True if regex matches
"""
self.assertEqual(
regex_match("Cats are smarter than dogs", "(.*) are (.*?) .*"), True
)
self.assertEqual(
regex_match("Cats are smarter than dogs", "(.*) àre (.*?) .*"), False
)
| lotrekagency/djlotrek | tests/test_templatefilters.py | test_templatefilters.py | py | 2,463 | python | en | code | 7 | github-code | 36 |
74229865385 | import configparser
from pathlib import Path
from flask import Flask
from flask_restful import Resource, Api
import sqlite3
from todo import DB_WRITE_ERROR, SUCCESS
DEFAULT_DB_FILE_PATH = Path.cwd().joinpath(
"." + Path.cwd().stem + "_todo.db"
)
def get_database_path(config_file: Path) -> Path:
"""Return the current path to the to-do database."""
config_parser = configparser.ConfigParser()
config_parser.read(config_file)
return Path(config_parser["General"]["database"])
def init_database(db_path: Path) -> int:
"""Create the to-do database."""
conn = None
try:
conn = sqlite3.connect(db_path) # Empty to-do database
conn.execute("""CREATE TABLE TASKS
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
NAME TEXT NOT NULL,
DESCRIPTION TEXT NOT NULL,
START_DATE DATE,
DUE_DATE DATE,
PRIORITY INT,
COMPLETE INT,
DELETED INT);""")
print('sqlite3.version')
return SUCCESS
except OSError:
return DB_WRITE_ERROR
finally:
if conn:
conn.close()
class DatabaseHandler(Resource):
def __init__(self, db_path: Path) -> None:
self._db_path = db_path | CR-Lough/todo_app | core/src/todo/database.py | database.py | py | 1,220 | python | en | code | 0 | github-code | 36 |
21200837689 | # coding: utf-8
import websocket
from threading import Thread
import time
from secrets import token_hex
from hashlib import sha256
import hmac
import json
class RealtimeAPIWebsocket:
def __init__(self, logger, parameters, public_handler, private_handler):
self.logger = logger
self._parameters = parameters
self._ws = None
self.auth_retry = 0
self.auth_try_time = 0
self.auth_completed = False
self.RealtimeAPIWebsocket(public_handler, private_handler)
def _auth(self):
self.auth_try_time = time.time()
if self._parameters._config['apikey'] == '' or self._parameters._config['secret'] == '':
return
now = int(time.time())
nonce = token_hex(16)
sign = hmac.new(self._parameters._config['secret'].encode(
'utf-8'), ''.join([str(now), nonce]).encode('utf-8'), sha256).hexdigest()
params = {'method': 'auth', 'params': {
'api_key': self._parameters._config['apikey'], 'timestamp': now, 'nonce': nonce, 'signature': sign}, 'id': 1}
self.logger.info("Auth process started")
self._ws.send(json.dumps(params))
def auth_check(self):
# Private channelの認証が完了していない & 前回のチャレンジから1分以上経過で再トライ
if self.auth_try_time+60 < time.time() and not self.auth_completed:
self.auth_retry = 0
self._auth()
return self.auth_completed
def RealtimeAPIWebsocket(self, public_handler, private_handler):
# ハンドラ呼び出し
def handler(func, *args):
return func(*args)
def on_message(ws, message):
messages = json.loads(message)
# auth レスポンスの処理
if 'id' in messages and messages['id'] == 1:
if 'error' in messages and self.auth_retry < 10:
self.logger.error(
'auth error: {} retry({})'.format(messages["error"], self.auth_retry))
self.auth_retry += 1
self._auth()
elif 'result' in messages and messages['result'] == True:
self.auth_retry = 0
params = [{'method': 'subscribe', 'params': {
'channel': c}} for c in private_handler]
self.logger.info("Websocket auth successed")
mention = '' if not 'websocket_auth' in self._parameters._strategy else self._parameters._strategy[
'websocket_auth']+'\n'
self.auth_completed = True
if self._parameters.no_trade_period:
mention = '' # ノートレード期間はメンション送らない(メンテ時間に毎日メンション来てウザいので)
self._parameters._message = mention+"Websocket auth successed"
self._parameters._parameter_message_send()
self.logger.debug(
"send private api subscribe {}".format(params))
ws.send(json.dumps(params))
return
if messages['method'] != 'channelMessage':
return
params = messages["params"]
channel = params["channel"]
recept_data = params["message"]
realtime_handler = public_handler.get(channel)
if realtime_handler != None:
realtime_handler(recept_data)
return
realtime_handler = private_handler.get(channel)
if realtime_handler != None:
realtime_handler(recept_data)
return
def on_error(ws, error):
self.logger.error(error)
def on_close(ws):
self.auth_completed = False
self._ws = None
self.logger.info("Websocket closed")
mention = '' if not 'websocket_close' in self._parameters._strategy else self._parameters._strategy[
'websocket_close']+'\n'
if self._parameters.no_trade_period:
mention = '' # ノートレード期間はメンション送らない(メンテ時間に毎日メンション来てウザいので)
self._parameters._message = mention+"Websocket closed"
self._parameters._parameter_message_send()
def on_open(ws):
self.auth_completed = False
self._ws = ws
self.logger.info("Websocket connected")
mention = '' if not 'websocket_connect' in self._parameters._strategy else self._parameters._strategy[
'websocket_connect']+'\n'
self._parameters._message = mention+"Websocket connected"
self._parameters._parameter_message_send()
params = [{'method': 'subscribe', 'params': {'channel': c}}
for c in public_handler]
ws.send(json.dumps(params))
self._auth()
def run(ws):
while True:
ws.run_forever()
time.sleep(3)
ws = websocket.WebSocketApp("wss://ws.lightstream.bitflyer.com/json-rpc",
on_message=on_message, on_error=on_error, on_close=on_close)
ws.on_open = on_open
websocketThread = Thread(target=run, args=(ws, ))
websocketThread.start()
| PP-lib/BFS | BFS-X/libs/realtimeapi.py | realtimeapi.py | py | 5,448 | python | en | code | 2 | github-code | 36 |
27031060369 | import subprocess
import sys
import json
from workflow import Workflow3
log = None
GITHUB_SLUG = 'tilmanginzel/alfred-bluetooth-workflow'
def _read_devices():
proc = subprocess.Popen(['./blueutil', '--paired', '--format=JSON'], stdout=subprocess.PIPE)
devices_raw = json.loads(proc.stdout.read())
bluetooth_devices = []
for device in devices_raw:
if device['name'] and device['address'] and device['connected'] is not None:
is_connected = device['connected']
bluetooth_devices.append({
'type': 'file:skipcheck',
'arg': device['address'],
'subtitle': 'Connected' if is_connected else 'Disconnected',
'connected': is_connected,
'title': device['name'],
'icon': './icons/bluetooth-' + ('connected' if is_connected else 'disconnected') + '.png'
})
return sorted(bluetooth_devices, key = lambda x: (-x['connected'], x['title']))
def main(wf):
if wf.update_available:
wf.add_item('Update available for Bluetooth Connector!',
autocomplete='workflow:update',
valid=False)
query = wf.args[0] if len(wf.args) else None
devices = _read_devices()
filtered_devices = wf.filter(query, devices, key=lambda k: k['title'])
for device in filtered_devices:
item = wf.add_item(
type=device['type'],
title=device['title'],
subtitle=device['subtitle'],
arg=device['arg'],
icon=device['icon'],
valid=True
)
item.setvar('title', device['title'])
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow3(update_settings={'github_slug': GITHUB_SLUG})
log = wf.logger
sys.exit(wf.run(main))
| tilmanginzel/alfred-bluetooth-workflow | alfred_bluetooth_workflow.py | alfred_bluetooth_workflow.py | py | 1,825 | python | en | code | 188 | github-code | 36 |
19665159792 | from flask import Flask, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, login_required, current_user, AnonymousUser, roles_required
from flask.ext.security.utils import *
from flask.ext.security.confirmable import *
from flask.ext.principal import Principal, Permission, RoleNeed
from flask.ext.login import LoginManager
from flask_mail import Mail, Message
import hashlib
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://roverpass:roverpass@localhost/roverpass'
db = SQLAlchemy(app)
BASE_URL = 'http://107.170.60.95'
app.jinja_options['extensions'].append('jinja2.ext.loopcontrols')
SQLALCHEMY_BINDS = {
'user_db': app.config['SQLALCHEMY_DATABASE_URI'],
'campground_db': 'postgres://postgres:postgres@localhost/campground'
}
app.secret_key = 'goforfun'
#google api info
GOOGLE_API_KEY='AIzaSyDqQU7ovrKcbjS13lifn83dG6FLmM71hFA'
GOOGLE_URL = 'https://www.googleapis.com/customsearch/v1'
GOOGLE_CX = '011939436523733206751:6hccyfxo7qc'
#flask-security
app.config['SECURITY_POST_LOGIN'] = '/'
#flask-social for facebook and twitter
app.config['SOCIAL_TWITTER'] = {
'consumer_key': 'HXy7JHIBI5kIpfRRPnq0EWlYp',
'consumer_secret': 'LAto3gGXRXwJzD4aKSbMVTs3LuI41GgKKcSIutSnZi5F7Uk4sn'
}
app.config['SOCIAL_FACEBOOK'] = {
'consumer_key' : '1498934676996386',
'consumer_secret' : '3b89f94bb85ae16093bcc550fc9e5b99'
}
#handle permissions via principal
#to restrict view to user type, add decorator:
# @permission_name.require()
#principals = Principal(app)
#flask-login prep
login_manager = LoginManager()
login_manager.login_view = 'login' #after login, there is a "next" variable in the query string that indicates where the user was trying to access
login_manager.login_message = "You must logged in to do that."
login_manager.init_app(app)
#flask-mail
#mail = Mail(app)
#define messages here
#welcome_to_roverpass = Message()
#thank_you_for_opting_in = Message()
#forgot_password = Message()
| rparikh42790/roverpass1 | kickstart.py | kickstart.py | py | 2,047 | python | en | code | 0 | github-code | 36 |
32138192143 | import discord
from discord.ext import commands
import response
import re
import logging
from get_token import get_token
imageKWS = ['img','imgs','image','images','pic','pics','pictures','picture']
class botName(commands.Bot):
intents = discord.Intents.default()
def __init__(self):
super().__init__(command_prefix='-', intents=self.intents)
self.intents.message_content = True
async def close(self):
await super().close()
async def send_message(message, userMsg, aiMsgContent, isPrivate=False):
try:
res = await response.get_response(userMsg, aiMsgContent)
except Exception as e:
await message.channel.send('Something went wrong, please try again later')
else:
if isPrivate:
await message.author.send(res)
else:
await message.channel.send(res)
async def generate_img(message, userMsg):
try:
res = await response.get_img(userMsg)
except Exception as e:
await message.channel.send('https://media.makeameme.org/created/bad-word-dont.jpg')
else:
await message.channel.send(res)
async def show_help(message):
helpMsg = """
`@MentionBot yourmessage` : chat with AI\n`@MentionBot /h` : show help\n`@MentionBot /p yourmessage` : send private response\n`@MentionBot /i` : generate random image
"""
await message.channel.send(helpMsg)
def run_discord_bot():
bot = botName()
@bot.event
async def on_ready():
print('Bot is running')
@bot.listen('on_message')
async def message_monitor(message):
for x in message.mentions:
if x==bot.user:
userMsg = re.sub(f" *<@{x.id}> *", '', message.content)
if message.reference:
aiMsg = await message.channel.fetch_message(message.reference.message_id)
aiMsgContent = aiMsg.content
else:
aiMsgContent = ''
if userMsg.startswith('/h'):
await show_help(message)
elif userMsg.startswith('/p'):
await message.delete()
private=True
await send_message(message,userMsg,aiMsgContent,private)
elif userMsg.startswith('/i') or any(word in userMsg for word in imageKWS):
await generate_img(message, userMsg)
else:
await send_message(message,userMsg,aiMsgContent)
bot.run(get_token("discord_token"))
run_discord_bot() | benwen2511/chatGBT-discord-bot | main.py | main.py | py | 2,311 | python | en | code | 7 | github-code | 36 |
17567102459 | # URI Problem Link: https://www.urionlinejudge.com.br/judge/en/problems/view/1011
# Programmed by Marufur Rahman.
radius = int(input())
pi = 3.14159
volume = float(4.0 * pi * (radius* radius * radius) / 3)
print("VOLUME = %0.3f" %volume) | MarufurRahman/URI-Beginner-Solution | Solutions/URI-1011.py | URI-1011.py | py | 241 | python | en | code | 1 | github-code | 36 |
2251885893 | import math
import numpy as np
import pygame as pg
def box_l2_loss(obj1, obj2):
r1 = np.array([obj1.rect.x, obj1.rect.y, obj1.rect.width, obj1.rect.height])
r2 = np.array([obj2.rect.x, obj2.rect.y, obj2.rect.width, obj2.rect.height])
return np.linalg.norm(r1 - r2)
def move_from_vector(vector):
angle, speed = vector
rad_angle = angle * math.pi / 180
dx = speed * math.cos(rad_angle)
dy = speed * math.sin(rad_angle)
return dx, dy
def draw_obj(list_obj):
for obj in list_obj:
obj.draw()
def remove_corps(list_obj):
return [obj for obj in list_obj if obj.alive]
def predation(list_obj):
names = [obj.name for obj in list_obj]
for obj in list_obj:
idx_prey = names.index(obj.prey) if obj.prey in names else -1
if obj.prey != -1 and obj.prey != list_obj[idx_prey].prey:
obj.grow()
obj.prey = -1
list_obj[idx_prey].eated()
def check_borders(obj_list):
width, height = pg.display.get_surface().get_size()
for el in obj_list:
if el.x < 0:
el.x = 0
if el.y < 0:
el.y = 0
if el.x > height - 20:
el.x = height - 20
if el.y > width - 20:
el.y = width - 20
def matprint(mat, fmt="g"):
col_maxes = [max([len(("{:"+fmt+"}").format(x)) for x in col]) for col in mat.T]
for x in mat:
for i, y in enumerate(x):
print(("{:"+str(col_maxes[i])+fmt+"}").format(y), end=" ")
print("")
| thbeucher/Games | life_games/utils.py | utils.py | py | 1,395 | python | en | code | 0 | github-code | 36 |
26510682653 | #!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Clean (i.e. remove commented messages) po’s in branches or trunk.
import os
import sys
import collections
try:
import settings
import utils
except:
from . import (settings, utils)
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
BRANCHES_DIR = settings.BRANCHES_DIR
def do_clean(po, strict):
print("Cleaning {}...".format(po))
messages, states, u1 = utils.parse_messages(po)
if strict and states["is_broken"]:
print("ERROR! This .po file is broken!")
return 1
for msgkey in states["comm_msg"]:
del messages[msgkey]
utils.write_messages(po, messages, states["comm_msg"], states["fuzzy_msg"])
print("Removed {} commented messages.".format(len(states["comm_msg"])))
return 0
def main():
import argparse
parser = argparse.ArgumentParser(description="Clean po’s in branches " \
"or trunk (i.e. remove " \
"all commented messages).")
parser.add_argument('-t', '--trunk', action="store_true",
help="Clean po’s in trunk rather than branches.")
parser.add_argument('-s', '--strict', action="store_true",
help="Raise an error if a po is broken.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
if args.langs:
for lang in args.langs:
if args.trunk:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
else:
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
t = do_clean(po, args.strict)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
po = os.path.join(TRUNK_PO_DIR, po)
t = do_clean(po, args.strict)
if t:
ret = t
else:
for lang in os.listdir(BRANCHES_DIR):
for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
if po.endswith(".po"):
po = os.path.join(BRANCHES_DIR, lang, po)
t = do_clean(po, args.strict)
if t:
ret = t
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())
| patins1/raas4emf | build/mac/blender/blender.app/Contents/MacOS/2.64/scripts/modules/bl_i18n_utils/clean_po.py | clean_po.py | py | 3,338 | python | en | code | 1 | github-code | 36 |
27980759583 | """
Simulated Annealing Class
"""
import pickle
import random
import math
import numpy as np
import sklearn
import pandas as pd
import configparser
import random
from pathlib import Path
import joblib
from Utils.attack_utils import get_constrains
from Models.scikitlearn_wrapper import SklearnClassifier
from Utils.data_utils import split_to_datasets
def get_config():
config = configparser.ConfigParser()
# config.read(sys.argv[1])
config.read('configurations.txt')
config = config['DEFAULT']
return config
def date_change(current):
# year and month are not change. only the day
dates = []
new_date = current.copy() # 20180200
while (
new_date / 100 == current / 100 and new_date % 100 <= 30): # stay in same year and month, day can increase until 30
new_date = new_date + 1
dates.append(new_date)
return dates
def time_change(current):
new_time = current.copy()
times = []
new_date = current.copy() # 235959
while (new_time / 10000 < 24):
while ((new_time / 100) % 100 < 60):
while (new_time % 100 < 60):
new_time = new_time + 29 # should be 1
times.append(new_time)
new_time = (new_time / 100 + 2) * 100 # add minute #should be +1
times.append(new_time)
new_time = (new_time / 10000 + 1) * 10000 # add hour
times.append(new_time)
return times
def get_feature_range(dataset_name):
feature_range = {}
if dataset_name == "RADCOM":
# feature_range = {
# 'agg_count': range(1, 300, 1), # 0
# 'delta_delta_delta_from_previous_request': range(0, 1000, 10), # 100000, 1 # 1
# 'delta_delta_from_previous_request': range(0, 1000, 10), # 2
# 'delta_from_previous_request': range(0, 1000, 10), # 3
# 'delta_from_start': range(0, 1000, 10), # 4
# 'effective_peak_duration': range(0, 1000, 10), # 100000, 0.01 # 5
# # 'index':range(), # 6
# # 'minimal_bit_rate':range(), # 7
# 'non_request_data': range(0, 100, 1), # 8
# # 'peak_duration':range(), # 9
# # 'peak_duration_sum':range(), # 10
# 'previous_previous_previous_previous_total_sum_of_data_to_sec': range(0, 100000, 1000), # 100000000 # 11
# 'previous_previous_previous_total_sum_of_data_to_sec': range(0, 100000, 1000), # 12
# 'previous_previous_total_sum_of_data_to_sec': range(0, 100000, 1000), # 13
# 'previous_total_sum_of_data_to_sec': range(0, 100000, 1000), # 14
# 'sum_of_data': range(0, 100000, 1000), # 100000000, 1 # 15
# 'total_sum_of_data': range(0, 100000, 1000), # 100000000, 1 # 16
# 'total_sum_of_data_to_sec': range(0, 1000, 10), # 1000000, 1 # 17
# 'serv_label': range(1, 3, 1), # 0,1,2 # 18
# 'start_of_peak_date': date_change(), # 19
# 'start_of_peak_time': date_change(), # 20
# 'end_of_peak_date': time_change(), # 21
# 'end_of_peak_time': time_change(), # 22
# }
feature_range = {
'previous_previous_previous_previous_total_sum_of_data_to_sec': range(0, 100000, 100), # 100000000 # 11
'previous_previous_previous_total_sum_of_data_to_sec': range(0, 100000, 100), # 12
'previous_previous_total_sum_of_data_to_sec': range(0, 100000, 100), # 13
'previous_total_sum_of_data_to_sec': range(0, 100000, 100), # 14
'total_sum_of_data_to_sec': range(0, 1000, 10), # 1000000, 1 # 17
}
elif dataset_name == "HATE":
feature_range = {
'c_work_empath': np.linspace(0.1, 0.9, 100),
'normal_neigh': np.linspace(0.1, 0.9, 100),
'c_legend_empath': np.linspace(0.1, 0.9, 100),
'c_cleaning_empath': np.linspace(0.1, 0.9, 100),
'sleep_empath': np.linspace(0.1, 0.9, 100),
'c_furniture_empath': np.linspace(0.1, 0.9, 100),
'c_ridicule_empath': np.linspace(0.1, 0.9, 100),
'c_fire_empath': np.linspace(0.1, 0.9, 100),
'hate_neigh': np.linspace(0.1, 0.9, 100),
}
"""
'sports_empath': np.linspace(0.1, 0.9, 100),
'statuses_count': np.linspace(0, 1000, 10),
'surprise_empath': np.linspace(0.1, 0.9, 100),
'tourism_empath': np.linspace(0.1, 0.9, 100),
'urban_empath': np.linspace(0.1, 0.9, 100),
'vacation_empath': np.linspace(0.1, 0.9, 100),
'warmth_empath': np.linspace(0.1, 0.9, 100),
'work_empath': np.linspace(0.1, 0.9, 100),
'youth_empath': np.linspace(0.1, 0.9, 100),
'zest_empath': np.linspace(0.1, 0.9, 100),
"""
elif dataset_name == 'CREDIT':
feature_range = {
'PREV_ACTIVE_INSTALMENT_PAYMENT_DIFF_MEAN': np.linspace(0.1, 0.9, 100),
'PREV_Consumer_AMT_CREDIT_SUM': np.linspace(0.1, 0.9, 100),
#'PREV_NAME_CONTRACT_STATUS_Refused_MEAN': np.linspace(0.1, 0.9, 10),
'NAME_EDUCATION_TYPE': {0,0.25,0.5,0.75},
'AMT_ANNUITY': np.linspace(0.1, 0.9, 100),
'PREV_Cash_SIMPLE_INTERESTS_MEAN': np.linspace(0.1, 0.9, 100),
'CREDIT_TO_GOODS_RATIO': np.linspace(0.1, 0.9, 100),
'DAYS_EMPLOYED': np.linspace(0.1, 0.9, 100),
'CREDIT_TO_ANNUITY_RATIO': np.linspace(0.1, 0.9, 100),
}
return feature_range
class SimulatedAnnealing:
def __init__(self, initialSolution, solutionEvaluator, initialTemp, finalTemp, tempReduction, neighborOperator=None,
iterationPerTemp=200, alpha=10, beta=5, record_id=0, record_true_class=0, model_name=""):
self.solution = initialSolution
self.evaluate = solutionEvaluator
self.initialTemp = initialTemp
self.currTemp = initialTemp
self.finalTemp = finalTemp
self.iterationPerTemp = iterationPerTemp
self.alpha = alpha
self.beta = beta
self.neighborOperator = self.neighbor_operator_func
self.record_id = record_id
self.record_true_class = record_true_class
df_temp = pd.DataFrame(self.solution).T
self.path_to_file = "results/" + model_name + f"/solution_{self.record_id}_{self.record_true_class}.csv"
output_dir = Path("results/" + model_name)
output_dir.mkdir(parents=True, exist_ok=True)
df_temp.to_csv(self.path_to_file, index=False)
self.max_cost = self.evaluate(self.solution.values.reshape(1, -1))[0][self.record_true_class]
self.best_solution = self.solution
if tempReduction == "linear":
self.decrementRule = self.linearTempReduction
elif tempReduction == "geometric":
self.decrementRule = self.geometricTempReduction
elif tempReduction == "slowDecrease":
self.decrementRule = self.slowDecreaseTempReduction
else:
self.decrementRule = tempReduction
def linearTempReduction(self):
self.currTemp -= self.alpha
def geometricTempReduction(self):
self.currTemp *= self.alpha
def slowDecreaseTempReduction(self):
self.currTemp = self.currTemp / (1 + self.beta * self.currTemp)
def isTerminationCriteriaMet(self):
# can add more termination criteria
return self.currTemp <= self.finalTemp or self.neighborOperator(self.solution) == 0
def neighbor_operator_func(self, current):
# return all neighbor of cuurent
# neighbor is a sample that differ from current in one editable feature
editable = perturbability
neighbors = []
for feature in editable.Row: # for each feature
if editable[editable['Row'] == feature]['Perturbability'].values[0] == 1: # the feature can be edited
if feature in feature_range:
for change in feature_range[feature]:
neighbor = current.copy()
if neighbor[feature] != change: # different value for specific feature
neighbor[feature] = change
neighbors.append(neighbor)
return neighbors
def run(self):
while not self.isTerminationCriteriaMet():
new_sol_value = 0
# iterate that number of times, based on the temperature
for i in range(self.iterationPerTemp):
# get all the neighbors
neighbors = self.neighborOperator(self.solution)
if len(neighbors) == 0:
continue
# print("Number of neighbors: ", len(neighbors))
'''
# pick a random neighbor
# newSolution = random.choice(neighbors)
'''
# get 10 random neighbors and pick the best one -> minimal cost
reandom_neighbors = random.sample(neighbors, 100)
# predict the cost of each neighbor and get the solution with the minimal cost -> the best neighbor
# neighbors_cost = []
# for neighbor in reandom_neighbors:
# neighbors_cost.append(self.evaluate(neighbor.values.reshape(1, -1))[0][self.record_true_class])
# newSolution = reandom_neighbors[np.argmin(neighbors_cost)]
newSolution = reandom_neighbors[np.argmin(self.evaluate(reandom_neighbors), axis=0)[self.record_true_class]]
# df_temp = pd.DataFrame(newSolution).T
# df_old_sols = pd.read_csv(self.path_to_file)
# all_df = pd.concat([df_old_sols, df_temp], axis=0, ignore_index=True)
'''
# check if the neighbor is already in the path
old_shape = all_df.shape
all_df.drop_duplicates(inplace=True)
if old_shape != all_df.shape: # duplicate -> new neighbor in path already -> do not add to neighbors
continue
# no duplicate -> new neighbor not in path:
'''
# get the cost between the two solutions
# cost = self.evaluate(self.solution) - self.evaluate(newSolution)
curr_sol_val = self.evaluate(self.solution.values.reshape(1, -1))[0][self.record_true_class]
new_sol_val = self.evaluate(newSolution.values.reshape(1, -1))[0][self.record_true_class]
if new_sol_val < 0.5:
print("find attacked sample!!!")
#print("Best Cost: ", new_sol_val)
return [1, newSolution]
cost = curr_sol_val - new_sol_val
# if the new solution is better, accept it
if cost >= 0:
self.solution = newSolution
# self.path = pd.concat([self.path, self.solution], axis=1)
# self.path_score.append(new_sol_val)
# all_df.to_csv(self.path_to_file, index=False)
if new_sol_val < self.max_cost: # new best solution
self.max_cost = new_sol_val
self.best_solution = self.solution
# self.currTemp = self.initialTemp
print("Best Cost: ", self.evaluate(self.solution.values.reshape(1, -1))[0][self.record_true_class])
# if the new solution is not better, accept it with a probability of e^(-cost/temp)
else:
if random.uniform(0, 0.7) < math.exp(-cost / (self.currTemp*2)):
self.solution = newSolution
# self.path = pd.concat([self.path, self.solution], axis=1)
# self.path_score.append(new_sol_val)
#all_df.to_csv(self.path_to_file, index=False)
#print("Current Temperature: ", self.currTemp)
print("Current Cost: ", self.evaluate(self.solution.values.reshape(1, -1))[0][self.record_true_class])
'''
if new_sol_val > self.max_cost: # current solution is not the best
self.currTemp += self.alpha # increase temperature because we are not improving
self.solution = self.best_solution
'''
# decrement the temperature
self.decrementRule()
if self.neighborOperator(self.solution) == 0:
print('no neighbors')
return[0, None]
if __name__ == '__main__':
# Set parameters
configurations = get_config()
data_path = configurations["data_path"]
raw_data_path = configurations["raw_data_path"]
perturbability_path = configurations["perturbability_path"]
results_path = configurations["results_path"]
seed = int(configurations["seed"])
exclude = configurations["exclude"]
dataset_name = raw_data_path.split("/")[1]
datasets = split_to_datasets(raw_data_path, save_path=data_path)
x_attack = datasets.get("x_test")
y_attack = datasets.get("y_test")
if ('RADCOM' in dataset_name):
x_attack = pd.read_csv('Datasets/RADCOM/x_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
y_attack = pd.read_csv('Datasets/RADCOM/y_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
# model = pickle.load(open('Models/RADCOM/RADCOM_target_GB_seed-42_lr-0.01_estimators-500_maxdepth-9.pkl', 'rb'))
model = pickle.load(open('Models/RADCOM/RADCOM_target_RF_seed-42_estimators-500_maxdepth-9.pkl', 'rb'))
# model = pickle.load(open('RADCOM_target_XGB_seed-42_lr-0.1_estimators-70_maxdepth-8', 'rb'))
elif ('HATE' in dataset_name):
x_attack = pd.read_csv('Datasets/HATE/x_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
y_attack = pd.read_csv('Datasets/HATE/y_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
#x_attack = pd.read_csv('Datasets/HATE/x_orig_attack.csv')
#y_attack = pd.read_csv('Datasets/HATE/y_orig_attack.csv')
#model = joblib.load('Models/HATE/rf_sota_model.pkl')
#model = pickle.load(open('Models/HATE/HATE_target_RF_seed-42_estimators-100_maxdepth-3.pkl', 'rb'))
#model = pickle.load(open('Models/HATE/HATE_target_XGB_seed-42_lr-0.1_estimators-70_maxdepth-8.pkl', 'rb'))
model = pickle.load(open('Models/HATE/HATE_target_GB_seed-42_lr-1.0_estimators-100_maxdepth-3.pkl', 'rb'))
elif ('CREDIT' in dataset_name):
x_attack = pd.read_csv('Datasets/CREDIT/x_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
y_attack = pd.read_csv('Datasets/CREDIT/y_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
#x_attack = pd.read_csv('Datasets/HATE/x_orig_attack.csv')
#y_attack = pd.read_csv('Datasets/HATE/y_orig_attack.csv')
model = pickle.load(open('Models/CREDIT/CREDIT_target_RF_seed-42_estimators-200_maxdepth-9.pkl', 'rb'))
#model = pickle.load(open('Models/CREDIT/CREDIT_target_GB_seed-42_lr-1.0_estimators-100_maxdepth-3.pkl', 'rb'))
perturbability = pd.read_csv(perturbability_path)
feature_range = get_feature_range(dataset_name)
model_name = model.__class__.__name__
print("model name: ", model_name)
attack_x = datasets.get("x_test")
attack_y = datasets.get("y_test")
preds = model.predict(attack_x)
eq = np.equal(preds,attack_y['pred'])
i=0
#target_model = SklearnClassifier(model=target, columns=attack_x.columns)
#constrains, perturbability = get_constrains(dataset_name, perturbability_path)
columns_names = list(attack_x.columns)
#random.seed(seed)
#np.random.seed(seed)
#print (target_models_names[j])
num_success = 0
mis = 0
well = 0
attack_set = []
orig = []
a=[]
while i < attack_x.shape[0]: # 10 random records to attack
record = attack_x.loc[i]
record_true_class = int(attack_y.pred[i])
record_pred = int(preds[i])
#print("true label: ", int(record_true_class))
prediction_record = model.predict(record.values.reshape(1, -1))[0]
if (record_pred != prediction_record):
print('pred != pred')
if (record_pred != record_true_class):
print("record is misclassified")
mis += 1
i += 1
continue
i += 1
well +=1
SA = SimulatedAnnealing(initialSolution=record, solutionEvaluator=model.predict_proba,
initialTemp=100, finalTemp=0.01,
tempReduction="linear",
iterationPerTemp=100, alpha=10, beta=5, record_id=i, record_true_class=int(record_true_class),
model_name=model_name)
attack_res = SA.run()
if (attack_res[0] == 1):
num_success = num_success+1
rec = list((attack_res[1].values.reshape(1,-1)).flatten())
attack_set.append(rec)
orig.append(rec)
#print("final solution for sample : ", SA.max_cost)
print("======================+=======================")
print(i, " samples")
print(num_success, " samples success attack")
print(mis, "mis")
print(well, "well")
print('a', a)
attack_sets = pd.DataFrame(attack_set, columns=columns_names)
origs = pd.DataFrame(orig, columns=columns_names)
attack_sets.to_csv("Datasets/HATE/HATE_adv_"+ model_name +".csv",index=False)
attack_sets.to_csv("Datasets/HATE/HATE_orig_"+ model_name +".csv",index=False)
| adiashk/search_AI_project | Simulated_Annealing.py | Simulated_Annealing.py | py | 17,737 | python | en | code | 0 | github-code | 36 |
7504092122 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import WebDriverException
import time
from django.test import LiveServerTestCase
MAX_WAIT = 10
class NewVisitorTest(LiveServerTestCase):
'''New visitor test'''
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self) -> None:
self.browser.quit()
def wait_for_row_in_list_table(self, row_text):
'''check row in table list'''
start_time = time.time()
while True:
try:
table = self.browser.find_element(By.ID, 'id_list_table')
rows = table.find_elements(By.TAG_NAME, 'td')
self.assertIn(row_text, [ row.text for row in rows ])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_can_start_a_list_and_retrive_it_later(self):
self.browser.get(self.live_server_url)
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element(By.TAG_NAME, 'h1').text
self.assertIn('To-Do', header_text)
inputbox = self.browser.find_element(By.ID, 'id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
inputbox.send_keys('Купить павлиньи перья')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Купить павлиньи перья')
inputbox = self.browser.find_element(By.ID, 'id_new_item')
inputbox.send_keys('Сделать мушку из павлиньих перьев')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('2: Сделать мушку из павлиньих перьев')
self.fail("End test!")
| ollko/tdd_book | functional_tests/tests.py | tests.py | py | 2,043 | python | en | code | 0 | github-code | 36 |
933471323 | import abc
from neutron import quota
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as qexception
from neutron.plugins.common import constants
UOS_SERVICE_PROVIDER = 'uos:service_provider'
UOS_NAME = 'uos:name'
UOS_REGISTERNO = 'uos:registerno'
UOS_PORT_DEVICE_NAME = 'uos:port_device_name'
UOS_PORT_DEVICE_OWNER = 'uos:port_device_owner'
UOS_PORT_DEVICE_ID = 'uos:port_device_id'
UOS_RATE_LIMIT = 'rate_limit'
RESOURCE_ATTRIBUTE_MAP = {
'floatingipsets': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'floatingipset_address': {'allow_post': False, 'allow_put': False,
'convert_to': attr._validate_dict_or_none,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True, 'default': list()},
'floatingipset_subnet_id': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_list,
'validate': {'type:uuid_list': None},
'is_visible': True,
'default': None},
'floatingipset_network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'router_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'port_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required_by_policy': True},
'fixed_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
UOS_NAME: {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_REGISTERNO: {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_SERVICE_PROVIDER: {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_list,
'is_visible': True, 'default': ''},
UOS_PORT_DEVICE_NAME: {'allow_post': False, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_PORT_DEVICE_OWNER: {'allow_post': False, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_PORT_DEVICE_ID: {'allow_post': False, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_RATE_LIMIT: {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_int,
'validate': {'type:fip_rate_limit': None},
'is_visible': True, 'default': 1024}
}
}
class ServiceProviderNotExist(qexception.BadRequest):
message = _("the service provider %(service_provider)s is not exists")
class InputServieProviderNull(qexception.BadRequest):
message = _("the service provider could not be found")
class FloatingipsLenTooLong(qexception.BadRequest):
message = _("In the floatingipset, the num of floatingip must be only one")
class FloatingIPSetNotFound(qexception.NotFound):
message = _("Floating IP Set %(floatingipset_id)s could not be found")
class Uosfloatingipset(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "UnitedStack Floatingipset"
@classmethod
def get_alias(cls):
return "uos_floatingipsets"
@classmethod
def get_description(cls):
return ("Return related resources")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/uos/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-12-25T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns uos floatingipset Resources."""
return []
@classmethod
def get_resources(cls):
"""Returns floatingipset Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
#quota.QUOTAS.register_resource_by_name('floatingset')
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.L3_ROUTER_NAT,
register_quota=True)
def update_attributes_map(self, attributes):
super(Uosfloatingipset, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class FloatingipsetBase(object):
@abc.abstractmethod
def create_floatingipset(self, context, floatingipset):
pass
@abc.abstractmethod
def update_floatingipset(self, context, id, floatingipset):
pass
@abc.abstractmethod
def get_floatingipset(self, context, id, fields=None):
pass
@abc.abstractmethod
def delete_floatingipset(self, context, id):
pass
@abc.abstractmethod
def get_floatingipsets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
def get_floatingipsets_count(self, context, filters=None):
pass
| CingHu/neutron-ustack | neutron/extensions/uosfloatingipset.py | uosfloatingipset.py | py | 6,685 | python | en | code | 0 | github-code | 36 |
21546274042 | #!/Users/shounak/anaconda3/bin/python3
#This program plots histograms to depict genome-wide methylation patterns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import argparse
import matplotlib
import matplotlib.axes
matplotlib.rcParams['font.family']="monospace"
matplotlib.rcParams['font.monospace']="Courier New"
matplotlib.rcParams['font.size']=24
#argument handling
optparse = argparse.ArgumentParser()
optparse.add_argument("-c","--csvfile",help="list of methylation ratios")
optparse.add_argument("-t","--type",help="methlyation type:CpG, CHG or CHH")
optparse.add_argument("-l","--lookup",help="look-up table to validate methylation sites")
optparse.add_argument("-d","--date",help="day in M/DD format, enclose within quotes")
optparse.add_argument("-o","--outfile",help="output histogram file basename")
argstr = optparse.parse_args()
#Read in the data
reads=pd.read_csv(argstr.csvfile,sep='\t',low_memory=False)
#Read in the validation table
val_tab=pd.read_csv(argstr.lookup,sep=',').rename(columns={"Scaffold":"chrom"}).drop_duplicates()
#Take the intersection of the reads and validation table to filter out the valid calls
ratios=pd.merge(reads,val_tab,on=["chrom","start"],how='inner')
#ratios.to_csv(argstr.outfile+"_all_validated.csv",sep=',',index=False)
#extract the relevant columns; need to replace this with generalized column list
#For 5-aza treated samples
aza_means=ratios.loc[:,(["Aza_1 "+argstr.date+" meth_ratio","Aza_2 "+argstr.date+" meth_ratio","Aza_3 "+argstr.date+" meth_ratio"])].loc[ratios['Type']==argstr.type].mean(axis=1).to_numpy()
#For control (untreated) samples
co_means=ratios.loc[:,(["Co_1 "+argstr.date+" meth_ratio","Co_2 "+argstr.date+" meth_ratio","Co_3 "+argstr.date+" meth_ratio"])].loc[ratios['Type']==argstr.type].mean(axis=1).to_numpy()
#means=pd.concat([aza_means,co_means],axis=1).rename(columns={0:"AZA",1:"CON"})
#hist_data=means.to_numpy()
#create a histogram for the 5-aza methylation calls...
plt.hist(aza_means,bins=np.arange(0.0,1,0.05),alpha=0.5,color="blue",label="AZA")
#... and the control methylation calls for a given methylation type (CpG,CHG or CHG)
plt.hist(co_means,bins=np.arange(0.0,1,0.05),alpha=0.5,color="red",label="CON")
#set the axis labels
plt.xlabel("Methylation ratio",fontsize=28)
plt.ylabel("Counts",fontsize=28)
#set the axis scales so we can compare plots
plt.xlim((0,1.0))
#plt.ylim((0,1.5E4))
#optional; tick label in scientific notation
plt.ticklabel_format(axis="y",scilimits=(2,4),useMathText=True)
# add the legend
plt.legend(fontsize=28,framealpha=1.0)
# and save the figure as a 300 DPI png file
plt.savefig(argstr.type+"_"+argstr.outfile+".png",dpi=300,format="png", bbox_inches='tight')
# close the plt object so that the above plots are not copied unintentionally,
# if this subroutine is called multiple times by the same parent python process
plt.close()
| lanl/DNA_methylation_analysis | Genome_meth_ratio_distribution histograms.py | Genome_meth_ratio_distribution histograms.py | py | 2,883 | python | en | code | 0 | github-code | 36 |
5091344976 | import numpy as np
import time
from ezGraph import *
from jStats import *
# Finite Difference Model
#on and off flow
#PARAMETERS
dt = 1
nsteps = 100
r = 2.25 # radius (cm)
Qin = 30 # Volume inflow rate (dV/dt) : (cubic cm/s)
h = 0 #intial height (cm)
k = 0.15 #outflow rate constant
# EXPERIMENTAL DATA
y_modeled = []
# GRAPH
graph = ezGraph (xmin=0, xmax=100,
xLabel= "Time (s)",
yLabel= "Height (cm)")
graph.add(0, h) # add intial vaules
Qflag = True
# TIME LOOP
for t in range (1, nsteps) :
modelTime = t * dt
#turning the inflow rate on and off
if 0 == modelTime%5:
if Qflag:
Qflag = False
else:
Qflag = True
if Qflag:
Qin = 30
else:
Qin = 0
print (modelTime, Qflag, Qin)
#Filling
dh = Qin * dt / (np.pi * r **2) #find the change in height
h = h + dh #update height
# Draining
dVdt = -k * h
dh = dVdt * dt / (np.pi * r **2) #np.pi = pi
h = h + dh
graph.add (modelTime , h)
#graph.wait (0.1)
# DRAW GRAPH
graph.keepOpen ()
| joydunne/waterTube | ezGraph/step-wiseInflow.py | step-wiseInflow.py | py | 1,092 | python | en | code | 0 | github-code | 36 |
71578894183 | #!/usr/bin/env python
from __future__ import print_function
import vtk
def main():
# Create a square in the x-y plane.
points = vtk.vtkPoints()
points.InsertNextPoint(0.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 1.0, 0.0)
points.InsertNextPoint(0.0, 1.0, 0.0)
# Create the polygon
polygon = vtk.vtkPolygon()
polygon.GetPoints().DeepCopy(points)
polygon.GetPointIds().SetNumberOfIds(4) # The 4 corners of the square
for i in range(4):
polygon.GetPointIds().SetId(i, i)
# Inputs
p1 = [0.1, 0, -1.0]
p2 = [0.1, 0, 1.0]
tolerance = 0.001
# Outputs
t = vtk.mutable(0) # Parametric coordinate of intersection (0 (corresponding to p1) to 1 (corresponding to p2))
x = [0.0, 0.0, 0.0]
pcoords = [0.0, 0.0, 0.0]
subId = vtk.mutable(0)
iD = polygon.IntersectWithLine(p1, p2, tolerance, t, x, pcoords, subId)
print("intersected? ", 'Yes' if iD == 1 else 'No')
print("intersection: ", x)
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/GeometricObjects/PolygonIntersection.py | PolygonIntersection.py | py | 1,059 | python | en | code | 319 | github-code | 36 |
18764507379 | """
Given a list of UQ course codes, crawl the UQ course website and scrape
information pertaining to said course.
"""
import sys
import requests
from bs4 import BeautifulSoup
# Headers for making web requests look like a real user (or they may be
# rejected by the UQ website)
headers = requests.utils.default_headers()
headers.update(
{
'User-Agent': 'PreReqBot 1.0',
}
)
# The base URL we want to make requests to
BASE = 'https://my.uq.edu.au/programs-courses/course.html?course_code='
# The internal HTML id's of the blocks of interest
INCOMPAT = "course-incompatible"
PREREQ = "course-prerequisite"
# Converts the resulting HTML to string, and converts commas to "and"
def format_courses(results):
outstring = " " .join(x.text.strip() for x in results)
outstring = outstring.replace(",", " and ")
return outstring.replace(" ", " ") # Remove double spaces
# Run it
def main():
if len(sys.argv) != 2:
print ("Usage: python3 crawl.py [file-of-courses]")
print ()
print ("[file-of-courses] is a one-course-per-line text file.")
sys.exit(1)
# Open the input file
with open(sys.argv[1]) as f:
# For each line in the file
for line in f:
# Grab the course code
code = line.strip()
# Build the URL target
url = BASE + code
# Download the page and get the content
html = requests.get(url, headers=headers).content
# Parse the HTML
parsed_doc = BeautifulSoup(html, "html.parser")
# Extract the elements of interest
incompat = parsed_doc.findAll('p', {'id': INCOMPAT})
prereq = parsed_doc.findAll('p', {'id': PREREQ})
# Print them out
print(code + ",incompatible," + format_courses(incompat))
print(code + ",prerequisite," + format_courses(prereq))
if __name__ == "__main__":
main()
| tompoek/uq-course-prereqs-viz | data-crawler/crawl.py | crawl.py | py | 1,826 | python | en | code | 0 | github-code | 36 |
29226135271 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from pymongo import MongoClient
import numpy as np
from tqdm import tqdm
def insertInfo(df):
client = MongoClient('mongodb://localhost:27017/')
infodb = client.Infodb
userInfo = infodb.userInfo
for index, instance in tqdm(df.iterrows(), total=df.shape[0]):
ID = instance["id"]
name = instance["name"]
birth = instance["birth"]
embeddings = instance["embedding"].tobytes()
user = {'_id': ID, 'name': name, 'birth': birth, 'embeddings': embeddings}
try :
userInfo.insert_one(user)
except :
print('ID already exists.')
def load_info(ID):
client = MongoClient('mongodb://localhost:27017/')
infodb = client.Infodb
userInfo = infodb.userInfo
results = userInfo.find({"_id": ID}, {'name': True ,'embeddings': True})
embedding = []
for result in results:
#id = result["_id"]
name = result['name']
embedding_bytes = result["embeddings"]
embedding = np.frombuffer(embedding_bytes, dtype='float32')
return name, embedding
| inhye6-6/project_face_authentication | connect_db.py | connect_db.py | py | 1,131 | python | en | code | 0 | github-code | 36 |
39253734345 | from functools import reduce
from collections import Counter
import math
import operator
import numpy as np
class SpamHamClassifier(object):
def __init__(self, training_data, vocabulary_size,
compute_mutual_information, lambda_constant=0):
self._num_training_data = len(training_data)
self._lambda_constant = lambda_constant
self._num_ham_documents = 0
self._num_spam_documents = 0
self._ham_counter = Counter()
self._spam_counter = Counter()
vocabulary = Counter()
for data in training_data:
counter = Counter(data.tokens)
vocabulary.update(counter)
vectorized = self._vectorize(counter)
if data.label == 'ham':
self._num_ham_documents += 1
self._ham_counter.update(vectorized)
elif data.label == 'spam':
self._num_spam_documents += 1
self._spam_counter.update(vectorized)
self._probability_ham = np.divide(
self.num_ham_documents,
self.num_training_data
)
self._probability_spam = np.divide(
self.num_spam_documents,
self.num_training_data
)
if compute_mutual_information:
word_mi = {}
for word, frequency in vocabulary.items():
pwordspam = self.spam_counter[word] / len(training_data)
pwordham = self.ham_counter[word] / len(training_data)
pnotwordspam = (len(training_data) - self.spam_counter[word]) / len(training_data)
pnotwordham = (len(training_data) - self.ham_counter[word]) / len(training_data)
pword = frequency / len(training_data)
pnotword = (len(training_data) - frequency) / len(training_data)
mi = np.sum([
np.multiply(
pwordham,
np.log(
np.divide(
pwordham,
np.multiply(pword, self.probability_ham)
)
)
),
np.multiply(
pwordspam,
np.log(
np.divide(
pwordspam,
np.multiply(pword, self.probability_spam)
)
)
),
np.multiply(
pnotwordham,
np.log(
np.divide(
pnotwordspam,
np.multiply(pnotword, self.probability_ham)
)
)
),
np.multiply(
pnotwordspam,
np.log(
np.divide(
pnotwordspam,
np.multiply(pnotword, self.probability_spam)
)
)
)
])
word_mi[word] = mi
word_mi = sorted(
word_mi.items(), key=lambda kv: kv[1], reverse=True)
vocabulary = word_mi[:vocabulary_size]
else:
vocabulary = vocabulary.most_common(vocabulary_size)
self._vocabulary = [v[0]
for v in vocabulary]
self._ham_counter = Counter({
k: v for k, v in self.ham_counter.items() if k in self.vocabulary
})
self._spam_counter = Counter({
k: v for k, v in self.spam_counter.items() if k in self.vocabulary
})
@property
def num_training_data(self):
return self._num_training_data
@property
def num_spam_documents(self):
return self._num_spam_documents
@property
def num_ham_documents(self):
return self._num_ham_documents
@property
def lambda_constant(self):
return self._lambda_constant
@property
def vocabulary(self):
return self._vocabulary
@property
def spam_counter(self):
return self._spam_counter
@property
def ham_counter(self):
return self._ham_counter
@property
def probability_spam(self):
return self._probability_spam
@property
def probability_ham(self):
return self._probability_ham
def _vectorize(self, counter):
return Counter({x: 1 for x in counter})
def classify(self, document):
vector = self._vectorize(document.tokens)
document_likelihood_spam = self._compute_likelihood(
vector,
self.num_spam_documents,
self.spam_counter
)
document_likelihood_ham = self._compute_likelihood(
vector,
self.num_ham_documents,
self.ham_counter
)
probability_ham_document = self._compute_bayes(
document_likelihood_ham,
document_likelihood_spam
)
if probability_ham_document >= 0.5:
return 'ham'
return 'spam'
def _compute_likelihood(self, document, label_total, labelled_counter):
tmp = []
vocabulary = self.vocabulary
if self.lambda_constant:
vocabulary = list(document.keys())
for word in vocabulary:
count = labelled_counter[word]
if not document[word]:
count = label_total - labelled_counter[word]
likelihood = np.divide(
np.add(count, self.lambda_constant),
np.add(
label_total,
np.multiply(self.lambda_constant, len(self.vocabulary))
)
)
if likelihood == 0:
return 0.0
tmp.append(np.log(likelihood))
return np.exp(np.sum(tmp), dtype=np.float128)
def _compute_bayes(self, ham_likelihood, spam_likelihood):
return np.divide(
np.multiply(ham_likelihood, self.probability_ham),
np.add(
np.multiply(ham_likelihood, self.probability_ham),
np.multiply(spam_likelihood, self.probability_spam)
)
)
| jvmsangkal/spam-filter-py | spamfilter/classifier.py | classifier.py | py | 6,440 | python | en | code | 1 | github-code | 36 |
32766043528 | #!/urs/bin/python
#-*- coding:utf8 -*-
from bs4 import BeautifulSoup as bs
import urllib
import re
import json
import os
def get_musicid(url):
#url='http://music.baidu.com/top/dayhot'
html = urllib.urlopen(url).read()
soup = bs(html,'lxml',from_encoding='utf8')
urls = soup.findAll('a',href=re.compile(r'/song/(\d+)'))
musicidlist=set()
for url in urls:
musicidlist.add(url['href'].split('/')[-1])
return musicidlist
def parser(api):
#api="http://musicapi.qianqian.com/v1/restserver/ting?method=baidu.ting.song.play&format=jsonp&callback=jQuery17208098337996053833_1513859108469&songid=%s&_=1513859109906" % musicid
html=urllib.urlopen(api).read()
data = re.findall(r'\((.*)\)',html)[0]
jsondata = json.loads(data)
songtitle=jsondata['songinfo']['title']
songdownloadlink=jsondata['bitrate']['file_link']
songformat=jsondata['bitrate']['file_extension']
#print(jsondata)
return songtitle,songformat,songdownloadlink
def music_download(filename,downloadlink):
dir = os.getcwd()+'/music/'
path= dir + filename
if(os.path.exists(dir)==False):
os.makedirs(dir)
elif(os.path.isfile(path)==False):
urllib.urlretrieve(downloadlink, dir + filename)
else:
return
url='http://music.baidu.com/top/dayhot'
musicidlist = get_musicid(url)
# num = 1
for songid in musicidlist:
try:
api = "http://musicapi.qianqian.com/v1/restserver/ting?method=baidu.ting.song.play&format=jsonp&callback=jQuery17208098337996053833_1513859108469&songid=%s&_=1513859109906"%songid
songtitle,songformat,songdownloadlink=parser(api)
filename=songtitle+'.'+songformat
music_download(filename,songdownloadlink)
print(songtitle+' downloaded successfully!')
# num+=1
# if num>10:
# break
except:
print('download fail')
#parser(api)
#print(musicidlist)
| carloszo/Carlos_python | Crawler/BaiduMusicCrawler.py | BaiduMusicCrawler.py | py | 1,916 | python | en | code | 0 | github-code | 36 |
1593336231 | # program1
a = ['banana', 'apple', 'microsoft']
for i in range(len(a)):
for j in range(i + 1):
print(a[i])
# progam 2
'''
a = range(1, 100)
total = 0
for b in a:
if b % 3 == 0 or b % 5 == 0:
print (b)
total += b
print total
'''
# program 4
'''
total = 0
for i in range(1, 100):
if i % 3 == 0:
total += i
elif i % 5 == 0:
total += i
print total
'''
# program 5
'''
total = 0
j = 1
for i in range(1, 5):
while j < 5:
total += j
j += 1
print total
'''
# program 6
'''
a = ['banana', 'apple', 'microsoft']
for element in a:
print (element)
b = [2, 2, 1]
total = 0
for e in b:
total = total + e
print(total)
c = list(range(1, 1000))
print list((c))
total2 = 0
for i in range(1, 1000):
total2 += i
print (total2)
d = range(1, 11)
print(d)
total = 0
for r in d:
if r % 3 == 0:
total += r
print (total)
'''
| Parth-Ps/python | for_loop.py | for_loop.py | py | 914 | python | en | code | 0 | github-code | 36 |
1846312 | def read_graph(vertex_number, edge_number):
graph = [[float('+inf')] * vertex_number for i in range(vertex_number)]
for i in range(edge_number):
v1, v2, w = map(int, input().split())
graph[v1][v2] = w
if i < vertex_number:
graph[i][i] = 0
return graph
def floyd_warshall(graph):
n = len(graph)
w = [graph[i].copy() for i in range(n)]
for k in range(n):
for i in range(n):
for j in range(n):
w[i][j] = min(w[i][j], w[i][k] + w[k][j])
return w
v, e = map(int, input().split())
my_graph = read_graph(v, e)
for string in floyd_warshall(my_graph):
print(' '.join(map(str, string)))
| andrewsonin/4sem_fin_test | _16_floyd_warshall.py | _16_floyd_warshall.py | py | 707 | python | en | code | 0 | github-code | 36 |
40978312177 | # pylint: disable=E0401,E0611
import os
import json
script_dir = os.path.dirname(__file__)
from helpers.DataService import DataService
from models.InputData import InputData
from models.OutputData import OutputData
from models.DataResult import DataResult
from models.Encoder import Encoder
from models.Decoder import Decoder
from callbacks.BatchSaver import BatchSaver
from Config import BATCH_SIZE, EPOCHS, LIMIT_GPU_USAGE
from generator.TrainingGenerator import TrainingGenerator
from tensorflow.keras.models import Model, save_model
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow as tf
from tensorflow.keras import backend as ktf
def get_session(gpu_fraction=0.3):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
if (LIMIT_GPU_USAGE):
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
ktf.set_session(get_session())
# Process the dataset
print('STARTING: loading_data')
data_result = DataResult(None, None)
with open(script_dir + './temp/processed_data.json', 'r') as output:
json_data = json.load(output)
data_result.loadJSON(json_data)
print('END: loading_data')
print('')
# Create the encoder
print('STARTING: create encoder')
encoder = Encoder(data_result.input_data)
print('END: create encoder')
print('')
# Create the decoder
print('STARTING: create decoder')
decoder = Decoder(data_result.output_data, encoder)
print('STARTING: create decoder')
print('')
# Create the model
print('STARTING: create model')
model = Model([encoder.inputs, decoder.inputs], decoder.outputs)
print('END: create model')
print('')
# Compile the model
print('STARTING: compile model')
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
print('END: compile model')
print('')
# Train the model
print('STARTING: train model')
print(' Training with ' + str(data_result.input_data.num_lines) + ' lines')
generator = TrainingGenerator(data_result, BATCH_SIZE)
model.fit_generator(generator, epochs=EPOCHS, verbose=1, callbacks=[BatchSaver()])
# model.fit([token_result.encoder_input, token_result.decoder_input], token_result.decoder_output, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_split=0.2)
print('END: train model')
print('')
#Save the entire model
save_model(model, 'model.h5')
#Save the weights for cpu compatibility
model.save_weights('model_weights.h5')
| AtLeastITry/seq2seq-keras-chatBot | train.py | train.py | py | 2,470 | python | en | code | 2 | github-code | 36 |
42886474434 | import logging
from json import JSONDecodeError
from typing import Dict, Any
import requests
from .exceptions import TrefleException
from .models import Result
class RestAdapter:
def __init__(self, token: str,
logger: logging.Logger = None):
"""
Constructor for RestAdapter
:param token:
:param logger: (optional) If your app has a logger,
pass it in here.
"""
self._logger = logger or logging.getLogger(__name__)
self._token = token
def _make_request(self, http_method: str, url: str, ep_params=None,
data: Dict = None, **kwargs) -> (Result, Any):
if kwargs:
url = url.format(**kwargs)
ep_params["token"] = self._token
log_line_pre = f"method={http_method}, url={url}, params={ep_params.items()}"
log_line_post = ', '.join((log_line_pre, "success={}, status_code={}, message={}"))
# Log HTTP params and perform an HTTP request, catching and
# re-raising any exceptions
try:
self._logger.debug(msg=log_line_pre)
response = requests.request(method=http_method, url=url,
params=ep_params,
json=data,
timeout=None)
except requests.exceptions.RequestException as exception:
self._logger.error(msg=(str(exception)))
raise TrefleException("Request Failed") from exception
# Deserialize JSON output to Python object, or
# return failed Result on exception
try:
data_out = response.text
except (ValueError, JSONDecodeError) as exception:
raise TrefleException("Bad JSON in response") from exception
# If status_code in 200-299 range, return success Result with data,
# otherwise raise exception
is_success = 299 >= response.status_code >= 200 # 200 to 299 is OK
log_line = log_line_post.format(is_success, response.status_code, response.reason)
if is_success:
self._logger.debug(msg=log_line)
return Result(response.status_code, message=response.reason), data_out
self._logger.error(msg=log_line)
raise TrefleException(f"{response.status_code}: {response.reason}")
def get(self, url: str, ep_params=None, **kwargs) -> Result:
if ep_params is None:
ep_params = {}
return self._make_request(http_method='get', url=url, ep_params=ep_params,
kwargs=kwargs)
def post(self, url: str, ep_params=None, data: Dict = None,
**kwargs) -> Result:
if ep_params is None:
ep_params = {}
return self._make_request(http_method='post', url=url, ep_params=ep_params,
data=data, kwargs=kwargs)
| Overlrd/trefle | src/trefleapi/rest_adapter.py | rest_adapter.py | py | 2,929 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.