blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M โ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5d0e48c3c53e1cf2b510d508a8c698bdd55892a | c04cd0fb88a51670bc06247601dc44af7d16f50b | /Category/bar_charts_to_ppt.py | aed75fab2409c5f6fa575eeb6cba257004e7b3d9 | [] | no_license | slim-shah/Knime_Workflow_demo | e162593f5b468e490fec59dfdcfc17f2ca1d0ab2 | 871b9df14c865bdaf331eb8ea887006f66bd32e8 | refs/heads/master | 2021-04-15T17:15:14.045880 | 2018-03-26T09:53:33 | 2018-03-26T09:53:33 | 126,807,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | from pptx import Presentation
#SLD_LAYOUT_TITLE_AND_CONTENT = 1
prs = Presentation('Growth of Category.pptx')
slide = prs.slides.add_slide(prs.slide_layouts[9])
title_placeholder = slide.shapes.title
title_placeholder = 'Hello'
pic_left = int(prs.slide_width * 0.15)
pic_top = int(prs.slide_height * 0.1)
pic_width = int(prs.slide_width * 0.7)
placeholder = slide.placeholders[1]
picture = placeholder.insert_picture('Certificates Others.png')
placeholder1 = slide.placeholders[0]
placeholder1.text = 'sdadas'
#pic = slide.shapes.add_picture('Certificates Others.png', pic_left, pic_top, pic_width)
prs.save('Growth of Category.pptx') | [
"slimshahda97@gmail.com"
] | slimshahda97@gmail.com |
5227f9d6b5eb4371104a5de1424d2bec42568366 | 59beef06efb305f1b39e85cfdfb69221241ff9df | /pororo/tasks/paraphrase_generation.py | 2225891c0cd3ddef948906807b8e8bda4a265419 | [
"Apache-2.0",
"BSD-3-Clause",
"HPND",
"MIT",
"ISC",
"Python-2.0",
"Unlicense",
"LicenseRef-scancode-secret-labs-2011",
"BSD-2-Clause"
] | permissive | saimishra/pororo | 747bb506f92b35a9da7e18f43e20d4954db5d985 | 1f61b331d07c22fb9e8ee051a5f6c0c91ae67062 | refs/heads/master | 2023-02-28T09:06:24.492863 | 2021-02-03T08:19:25 | 2021-02-03T08:19:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,253 | py | """Paraphrase Generation modeling class"""
from typing import Optional
from pororo.tasks.utils.base import PororoFactoryBase, PororoGenerationBase
from pororo.tasks.utils.download_utils import download_or_load
class PororoParaphraseFactory(PororoFactoryBase):
"""
paraphrase generation using Transformer Seq2Seq
Multi (`transformer.large.multi.mtpg`)
- dataset: Internal data
- metric: BLEU score
+----------+------------+
| Language | BLEU score |
+==========+============+
| Average | 33.00 |
+----------+------------+
| Englosh | 54 |
+----------+------------+
| Korean | 50 |
+----------+------------+
| Japanese | 20 |
+----------+------------+
| Chinese | 8 |
+----------+------------+
Multi (`transformer.large.multi.fast.mtpg`)
- dataset: Internal data
- metric: BLEU score
+----------+------------+
| Language | BLEU score |
+==========+============+
| Average | 33.50 |
+----------+------------+
| Englosh | 56 |
+----------+------------+
| Korean | 50 |
+----------+------------+
| Japanese | 20 |
+----------+------------+
| Chinese | 8 |
+----------+------------+
Args:
text (str): input sentence to be paraphrase generated
beam (int): beam search size
temperature (float): temperature scale
top_k (int): top-K sampling vocabulary size
top_p (float): top-p sampling ratio
no_repeat_ngram_size (int): no repeat ngram size
len_penalty (float): length penalty ratio
Returns:
str: generated paraphrase
Examples:
>>> pg = Pororo(task="pg", lang="ko")
>>> pg("๋
ธ๋๊ฒ ์ ์ผ ์ข์. ์น๊ตฌ๋ค ๋ชจ์ฌ๋ผ. ์ธ์ ๋ ์ฆ๊ฑฐ์.")
๋
ธ๋ ๊ฒ์ด ๊ฐ์ฅ ์ข์ต๋๋ค. ์น๊ตฌ๋ค๋ผ๋ฆฌ ๋ชจ์ฌ ์ฃผ์ธ์. ์ธ์ ๋ ์ฆ๊ฑฐ์ด ์๊ฐ ๋์ธ์.
>>> pg = Pororo("pg", lang="zh")
>>> pg("ๆๅๆฌข่ถณ็") # ๋๋ ์ถ๊ตฌ๋ฅผ ์ข์ํด
'ๆๅๆฌข็็็' # ๋๋ ๊ณต์ ์ข์ํด
>>> pg = Pororo(task="pg", lang="ja")
>>> pg("้จใฎๆฅใ่ใ่ฏใ้ณๆฅฝใใๅงใใใฆใใใ") # ๋น์ค๋ ๋ ๋ฃ๊ธฐ ์ข์ ์์
๊ฐ๋ฅด์ณ์ค
'้จใฎๆฅใ่ใใใ้ณๆฅฝใๆใใฆใใ ใใใ' # ๋น์ค๋ ๋ ๋ฃ๊ธฐ ์ข์ ์์
์ ๊ฐ๋ฅด์ณ ์ฃผ์ธ์
>>> pg = Pororo("pg", lang="en")
>>> pg("There is someone at the door.")
"Someone's at the door."
>>> pg("I'm good, but thanks for the offer.")
"I'm fine, but thanks for the deal."
"""
def __init__(self, task: str, lang: str, model: Optional[str]):
super().__init__(task, lang, model)
@staticmethod
def get_available_langs():
return ["en", "ko", "zh", "ja"]
@staticmethod
def get_available_models():
return {
"en": [
"transformer.large.multi.mtpg",
"transformer.large.multi.fast.mtpg",
"transformer.base.en.pg",
],
"ko": [
"transformer.large.multi.mtpg",
"transformer.large.multi.fast.mtpg",
"transformer.base.ko.pg_long",
"transformer.base.ko.pg",
],
"zh": [
"transformer.large.multi.mtpg",
"transformer.large.multi.fast.mtpg",
"transformer.base.zh.pg",
],
"ja": [
"transformer.large.multi.mtpg",
"transformer.large.multi.fast.mtpg",
"transformer.base.ja.pg",
],
}
def load(self, device: str):
"""
Load user-selected task-specific model
Args:
device (str): device information
Returns:
object: User-selected task-specific model
"""
if "multi" in self.config.n_model:
from fairseq.models.transformer import TransformerModel
from pororo.tasks.utils.tokenizer import CustomTokenizer
load_dict = download_or_load(
f"transformer/{self.config.n_model}",
"multi",
)
model = (TransformerModel.from_pretrained(
model_name_or_path=load_dict.path,
checkpoint_file=f"{self.config.n_model}.pt",
data_name_or_path=load_dict.dict_path,
source_lang=load_dict.src_dict,
target_lang=load_dict.tgt_dict,
).eval().to(device))
tokenizer = CustomTokenizer.from_file(
vocab_filename=f"{load_dict.src_tok}/vocab.json",
merges_filename=f"{load_dict.src_tok}/merges.txt",
)
return PororoTransformerTransMulti(
model,
self.config,
tokenizer,
)
if "transformer" in self.config.n_model:
from fairseq.models.transformer import TransformerModel
load_dict = download_or_load(
f"transformer/{self.config.n_model}",
self.config.lang,
)
tokenizer = None
model = (TransformerModel.from_pretrained(
model_name_or_path=load_dict.path,
checkpoint_file=f"{self.config.n_model}.pt",
data_name_or_path=load_dict.dict_path,
source_lang=load_dict.src_dict,
target_lang=load_dict.tgt_dict,
).eval().to(device))
if self.config.lang != "zh":
from pororo.tasks.utils.tokenizer import CustomTokenizer
tokenizer = CustomTokenizer.from_file(
vocab_filename=f"{load_dict.src_tok}/vocab.json",
merges_filename=f"{load_dict.src_tok}/merges.txt",
)
return PororoTransformerParaphrase(model, self.config, tokenizer)
class PororoTransformerTransMulti(PororoGenerationBase):
def __init__(self, model, config, tokenizer):
super().__init__(config)
self._model = model
self._tokenizer = tokenizer
self._mapping = {"en": "_XX", "ja": "_XX", "ko": "_KR", "zh": "_CN"}
def _langtok(self, lang: str):
"""
Args:
lang (str): language code
See Also:
https://github.com/pytorch/fairseq/blob/master/fairseq/data/multilingual/multilingual_utils.py#L34
"""
return f"[{lang + self._mapping[lang]}]"
def _preprocess(self, text: str) -> str:
"""
Preprocess non-chinese input sentence to replace whitespace token with whitespace
Args:
text (str): non-chinese sentence
Returns:
str: preprocessed non-chinese sentence
"""
if self.config.lang == "en":
pieces = " ".join(self._tokenizer.segment(text.strip()))
else:
pieces = " ".join([c if c != " " else "โ" for c in text.strip()])
return f"{self._langtok(self.config.lang)} {pieces} {self._langtok(self.config.lang)}"
def _postprocess(self, output: str) -> str:
"""
Postprocess output sentence to replace whitespace
Args:
output (str): output sentence generated by model
Returns:
str: postprocessed output sentence
"""
return output.replace(" ", "").replace("โ", " ").strip()
def predict(
self,
text: str,
beam: int = 5,
temperature: float = 1.0,
top_k: int = -1,
top_p: float = -1,
no_repeat_ngram_size: int = 4,
len_penalty: float = 1.0,
) -> str:
"""
Conduct machine translation
Args:
text (str): input sentence to be paraphrase generated
beam (int): beam search size
temperature (float): temperature scale
top_k (int): top-K sampling vocabulary size
top_p (float): top-p sampling ratio
no_repeat_ngram_size (int): no repeat ngram size
len_penalty (float): length penalty ratio
Returns:
str: machine translated sentence
"""
text = self._preprocess(text)
sampling = False
if top_k != -1 or top_p != -1:
sampling = True
output = self._model.translate(
text,
beam=beam,
sampling=sampling,
temperature=temperature,
sampling_topk=top_k,
sampling_topp=top_p,
max_len_a=1,
max_len_b=50,
no_repeat_ngram_size=no_repeat_ngram_size,
lenpen=len_penalty,
)
output = self._postprocess(output)
return output
def __call__(
self,
text: str,
beam: int = 5,
temperature: float = 1.0,
top_k: int = -1,
top_p: float = -1,
no_repeat_ngram_size: int = 4,
len_penalty: float = 1.0,
):
assert isinstance(text, str), "Input text should be string type"
return self.predict(
text,
beam,
temperature,
top_k,
top_p,
no_repeat_ngram_size,
len_penalty,
)
class PororoTransformerParaphrase(PororoGenerationBase):
def __init__(self, model, config, tokenizer):
super().__init__(config)
self._model = model
self._tokenizer = tokenizer
def _preprocess(self, text: str):
"""
Preprocess non-chinese input sentence to replace whitespace token with whitespace
Args:
text (str): non-chinese sentence
Returns:
str: preprocessed non-chinese sentence
"""
pieces = self._tokenizer.segment(text.strip())
return " ".join(pieces)
def _zh_preprocess(self, text: str):
"""
Preprocess chinese input sentence to replace whitespace token with whitespace
Args:
text (str): chinese sentence
Returns:
str: preprocessed chinese sentence
"""
return " ".join(char for char in text)
def _postprocess(self, output: str):
"""
Postprocess output sentence to replace whitespace
Args:
output (str): output sentence generated by model
Returns:
str: postprocessed output sentence
"""
return output.replace(" ", "").replace("โ", " ").strip()
def predict(
self,
text: str,
beam: int = 1,
temperature: float = 1.0,
top_k: int = -1,
top_p: float = -1,
no_repeat_ngram_size: int = 4,
len_penalty: float = 1.0,
):
"""
Conduct paraphrase generation using Transformer Seq2Seq
Args:
text (str): input sentence to be paraphrase generated
beam (int): beam search size
temperature (float): temperature scale
top_k (int): top-K sampling vocabulary size
top_p (float): top-p sampling ratio
no_repeat_ngram_size (int): no repeat ngram size
len_penalty (float): length penalty ratio
Returns:
str: generated paraphrase
"""
sampling = False
if top_k != -1 or top_p != -1:
sampling = True
if self._tokenizer is not None:
text = self._preprocess(text)
else:
text = self._zh_preprocess(text)
output = self._model.translate(
text,
beam=beam,
sampling=sampling,
temperature=temperature,
sampling_topk=top_k,
sampling_topp=top_p,
max_len_a=1,
max_len_b=50,
no_repeat_ngram_size=no_repeat_ngram_size,
lenpen=len_penalty,
)
output = self._postprocess(output)
return output
| [
"huffonism@gmail.com"
] | huffonism@gmail.com |
56e4f4d212052b25d4ddeb66cb7f8131c3cad155 | 24b45cd696fcb2001019b21ec5fb5bcf9f5794f9 | /angularJSversion.py | 8bbe5264df723abda98de28e6a4f500a03d1625b | [] | no_license | Bazskillz/coala-bears | 492d11343c7fa16bc4def624d30d66411c8573bb | 7db69b73cc73a9524b17699e59f5c4624ee2d9da | refs/heads/main | 2023-02-11T09:33:57.111982 | 2021-01-07T21:21:15 | 2021-01-07T21:21:15 | 314,247,723 | 0 | 0 | null | 2021-01-07T21:01:12 | 2020-11-19T12:54:29 | Python | UTF-8 | Python | false | false | 450 | py | import logging
from coalib.bears.LocalBear import LocalBear
class angularJSversion(LocalBear):
def run(self,
filename,
file):
for line in file:
if '"angularjs": ' in line:
logging.debug("HIT!")
yield self.new_result("Found angular version which is:" + line + ".", file=filename)
else:
logging.debug("Checking line")
| [
"noreply@github.com"
] | noreply@github.com |
719131a310f356ebd894b51e12750e04e1d0b138 | 79bcfe1ef0cd82bca98b367d574d5ff6bfc7c27f | /codigo/rugbymofreak/src/classification_tests.py | c49e81eeaad41280ae6a8b65debcb827681d163c | [] | no_license | luciamr/tesina | a962819087f56462d92849c97297a16e9cbefe11 | d927c2f7d79131c7d9cc922338d41761a2c03993 | refs/heads/master | 2021-03-27T13:38:12.964831 | 2016-10-06T16:06:06 | 2016-10-06T16:06:06 | 27,589,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,359 | py | # This script is used to test different classifiers on the mofreak data. Quick and dirty script, not necessarily extensible.
import numpy
import pylab
from sklearn import svm, pipeline
from sklearn.kernel_approximation import RBFSampler, AdditiveChi2Sampler
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
# Constants
NUMBER_OF_ACTIONS = 6
NUMBER_OF_PEOPLE = 25
NUMBER_OF_VIDEOS_PER_PERSON_PER_ACTION = 4
NUMBER_OF_CLUSTERS = 600
NUMBER_OF_VIDEOS = 599
FENG = True
EHSAN = False
# this class essentially implements an enum.
# For example, x = Labeling.BOXING (1)
class Labeling:
BOXING = 1
HANDCLAPPING = 2
WALKING = 3
JOGGING = 4
RUNNING = 5
HANDWAVING = 6
def convertLabel(s):
if s == "boxing":
return Labeling.BOXING
elif s == "handclapping":
return Labeling.HANDCLAPPING
elif s == "walking":
return Labeling.WALKING
elif s == "jogging":
return Labeling.JOGGING
elif s == "running":
return Labeling.RUNNING
elif s == "handwaving":
return Labeling.HANDWAVING
else:
print s
exit()
return -1 # error. unseen class.
def histogram_intersection(A, B):
ret_val = 0
for i in xrange(A.shape[0]):
ret_val += min(A[i], B[i])
return ret_val
def intersection(A,B):
B = B.T
rows = A.shape[0]
cols = B.shape[1]
kernel = numpy.zeros(shape = (rows, cols))
for row in xrange(rows):
for col in xrange(cols):
kernel[row, col] = histogram_intersection(A[row, :], B[:, col])
return kernel
# reprocess data so that the first column is a numeric value
# this value corresponds to the class.
def reprocessData(file_path):
DEPTH_OF_LABELING = 4
f = open(file_path, 'r')
out = open(file_path + ".reprocessed.txt", 'w')
for line in f.readlines():
# first column is the label, remainder are features.
label_and_features = line.split(",")
# label is currently a path, split that path.
# we will reprint the file with the label as a numeric
words = label_and_features[0].split("\\")
new_line = ""
new_line += str(convertLabel(words[DEPTH_OF_LABELING]))
new_line += ","
new_line += ",".join(label_and_features[1:])
out.write(new_line)
out.close()
f.close()
# load and parse data for SVM
def loadTrainingAndTestData(features_file, labels_file):
# group by person.
grouped_data = []
grouped_labels = []
current_indices = []
label_data = numpy.genfromtxt(labels_file, delimiter = ',')
training_data = numpy.genfromtxt(features_file, delimiter = ',')
# group data by people, so we can easily leave-one-out.
for i in xrange(NUMBER_OF_PEOPLE):
# person 13 is missing one video...
MISSING_VIDEO_OFFSET = 0
if i == 12:
MISSING_VIDEO_OFFSET = -1
#data = numpy.zeros(shape = (NUMBER_OF_PEOPLE * NUMBER_OF_VIDEOS_PER_PERSON_PER_ACTION + MISSING_VIDEO_OFFSET, NUMBER_OF_CLUSTERS))
#labels = numpy.zeros(shape = (NUMBER_OF_PEOPLE * NUMBER_OF_VIDEOS_PER_PERSON_PER_ACTION + MISSING_VIDEO_OFFSET, 3))
data = numpy.zeros(shape = (NUMBER_OF_ACTIONS * NUMBER_OF_VIDEOS_PER_PERSON_PER_ACTION + MISSING_VIDEO_OFFSET, NUMBER_OF_CLUSTERS))
labels = numpy.zeros(shape = (NUMBER_OF_ACTIONS * NUMBER_OF_VIDEOS_PER_PERSON_PER_ACTION + MISSING_VIDEO_OFFSET, 3))
grouped_data.append(data)
grouped_labels.append(labels)
current_indices.append(0) # track current row in each group
i = 0
STEP = NUMBER_OF_VIDEOS_PER_PERSON_PER_ACTION
MISSING_VIDEO_I = 148
if EHSAN:
STEP = NUMBER_OF_VIDEOS_PER_PERSON_PER_ACTION
MISSING_VIDEO_I = 148
# to account for the missing video. Odd that it's missing!
# i == 148 or 288 corresponds to the location of the missing video.
while i < NUMBER_OF_VIDEOS:
person_index = int(label_data[i, 1])
current_index = current_indices[person_index - 1]
# slice corresponding piece of matrix from training data into grouping.
if i == MISSING_VIDEO_I:
grouped_data[person_index - 1][current_index : current_index + STEP - 1, :] = training_data[i : i + STEP - 1, :]
grouped_labels[person_index - 1][current_index : current_index + STEP - 1, :] = label_data[i : i + STEP - 1, :]
current_indices[person_index - 1] += STEP - 1
i += STEP - 1
else:
print i
#print label_data[i : i + STEP, :]
#print label_data[i : i + STEP, :].shape
grouped_labels[person_index - 1][current_index : current_index + STEP, :] = label_data[i : i + STEP, :]
grouped_data[person_index - 1][current_index : current_index + STEP, :] = training_data[i : i + STEP, :]
current_indices[person_index - 1] += STEP
i += STEP
#print grouped_data[0]
#print grouped_labels[13]
#print "exiting"
#exit()
return grouped_data, grouped_labels
def generateAllPossibleLeaveOneOutCombosForLibSVM(grouped_data, grouped_labels):
for left_out_person in xrange(NUMBER_OF_PEOPLE):
rows = NUMBER_OF_VIDEOS - grouped_data[left_out_person].shape[0]
cols = NUMBER_OF_CLUSTERS
# the testing data is simply the data from the left out person.
testing_data = grouped_data[left_out_person]
testing_labels = grouped_labels[left_out_person]
# build the training data by concatenating all of the data from each person except the left out person.
training_data = numpy.zeros(shape = (rows, cols))
training_labels = numpy.zeros(shape = (rows, 3))
current_index = 0
for training_person in xrange(NUMBER_OF_PEOPLE):
# don't add the left out person to the training set, clearly..
if training_person == left_out_person:
continue
new_rows = grouped_data[training_person].shape[0]
training_data[current_index : current_index + new_rows, :] = grouped_data[training_person]
training_labels[current_index : current_index + new_rows, :] = grouped_labels[training_person]
current_index += new_rows
# write data file.
training_filename = "C:/data/kth/lucia/run1/left_out_" + str(left_out_person + 1) + ".train"
setupInLibsvmFormat(training_data, training_labels, training_filename)
testing_filename = "C:/data/kth/lucia/run1/left_out_" + str(left_out_person + 1) + ".test"
setupInLibsvmFormat(testing_data, testing_labels, testing_filename)
# Build an SVM with a chi-squared kernel for accurate recognition.
def buildClassifiers(grouped_data, grouped_labels):
scores = []
for left_out_person in xrange(NUMBER_OF_PEOPLE):
rows = NUMBER_OF_VIDEOS - grouped_data[left_out_person].shape[0]
cols = NUMBER_OF_CLUSTERS
# the testing data is simply the data from the left out person.
testing_data = grouped_data[left_out_person]
testing_labels = grouped_labels[left_out_person]
# build the training data by concatenating all of the data from each person except the left out person.
training_data = numpy.zeros(shape = (rows, cols))
training_labels = numpy.zeros(shape = (rows, 3))
current_index = 0
for training_person in xrange(NUMBER_OF_PEOPLE):
# don't add the left out person to the training set, clearly..
if training_person == left_out_person:
continue
new_rows = grouped_data[training_person].shape[0]
training_data[current_index : current_index + new_rows, :] = grouped_data[training_person]
training_labels[current_index : current_index + new_rows, :] = grouped_labels[training_person]
current_index += new_rows
# for now, remove all columns from labels except first.
training_labels = training_labels[:, 0]
testing_labels = testing_labels[:, 0]
print "made it to training"
#kernel_svm = svm.SVC(gamma = .2, degree = 100)
#linear_svm = svm.LinearSVC()
new_svm = svm.SVC(kernel = intersection)
rf = RandomForestClassifier(n_estimators = 300, min_samples_split = 2, n_jobs = -1, oob_score = True)
# create a pipeline for kernel approximation
#feature_map = RBFSampler(gamma = .2, random_state = 1)
#feature_map = AdditiveChi2Sampler()
#approx_kernel_svm = pipeline.Pipeline([("feature_map", feature_map), ("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm.
new_svm.fit(training_data, training_labels)
new_svm_score = new_svm.score(testing_data, testing_labels)
rf.fit(training_data, training_labels)
rf_score = rf.score(testing_data, testing_labels)
#kernel_svm.fit(training_data, training_labels)
#kernel_svm_score = kernel_svm.score(testing_data, testing_labels)
#linear_svm.fit(training_data, training_labels)
#linear_svm_score = linear_svm.score(testing_data, testing_labels)
#approx_kernel_svm.fit(training_data, training_labels)
#cs_score = approx_kernel_svm.score(testing_data, testing_labels)
#score_set = [new_svm_score, kernel_svm_score, linear_svm_score, score]
score_set = [new_svm_score, rf_score]#, cs_score, rf_score]
scores.append(score_set)
#print "linear score: ", linear_svm_score
#print "kernel score: ", kernel_svm_score
print "histogram intersection score: ", new_svm_score
#print "approx chi-squared score: ", cs_score
print "RF score: ", rf_score
# for now, return this for plotting.
print scores
print "done."
print "length of scores: ", len(scores)
summed_chisquared_score = 0
summed_hi_score = 0
summed_rf_score = 0
for i in xrange(NUMBER_OF_PEOPLE):
summed_hi_score += scores[i][0]
#summed_chisquared_score += scores[i][0]
#summed_hi_score += scores[i][1]
summed_rf_score += scores[i][1]
#avg_cs_score = summed_chisquared_score/float(NUMBER_OF_PEOPLE)
avg_hi_score = summed_hi_score/float(NUMBER_OF_PEOPLE)
#avg_rf_score = summed_rf_score/float(NUMBER_OF_PEOPLE)
#print "Chi-squared average: ", avg_cs_score
print "HI average: ", avg_hi_score
print "RF average: ", avg_rf_score
return linear_svm
# visualization based on code from
# http://scikit-learn.org/dev/auto_examples/plot_kernel_approximation.html#example-plot-kernel-approximation-py
def visualize(training_features, training_labels, linear_svm):
# project the decision surface down to the 2 principal components of the dataset
# enables us to visualize the dataset in 2D.
pca = PCA(n_components = 2).fit(training_features)
X = pca.transform(training_features)
# generate grid along first 2 princ comps
multiples = numpy.arange(-2, 2, 0.1) # from -2 to 2, on intervals of size 0.1
# steps along first component
first = multiples[:, numpy.newaxis] * pca.components_[0, :]
# 2nd.
second = multiples[:, numpy.newaxis] * pca.components_[1, :]
# combine them
grid = first[numpy.newaxis, :, :] + second[:, numpy.newaxis, :]
flat_grid = grid.reshape(-1, training_features.shape[1]) # this was data, not training_features
# title for the plots
titles = ['Linear SVM']
pylab.figure(figsize = (12, 5))
# predict and plot
pylab.subplot(1, 2, 1)
Z = linear_svm.predict(flat_grid)
# put hte result into a colour plot
Z = Z.reshape(grid.shape[:-1])
pylab.contourf(multiples, multiples, Z, cmap = pylab.cm.Paired)
pylab.axis('off')
# plot the training points.
pylab.scatter(X[:, 0], X[:, 1], c = training_labels, cmap = pylab.cm.Paired)
pylab.title(titles[0])
pylab.show()
return
def setupInLibsvmFormat(training_data, label_data, output_filename):
f = open(output_filename, "w")
for line in xrange(label_data.shape[0]):
libsvm_line = ""
# first, the label.
libsvm_line += str(int(label_data[line, 0]))
libsvm_line += " "
# now the features.
for feature in xrange(training_data.shape[1]):
libsvm_line += str(feature + 1)
libsvm_line += ":"
libsvm_line += str(training_data[line, feature])
libsvm_line += " "
# finally, end the line.
libsvm_line += "\n"
f.write(libsvm_line)
f.close()
# entry point
if __name__ == '__main__':
data = "C:/data/kth/lucia/run1/hist.txt"
labels = "C:/data/kth/lucia/run1/label.txt"
# Step 1: Reprocess the data into the desired format.
label_data = numpy.genfromtxt(labels, delimiter = ',')
training_data = numpy.genfromtxt(data, delimiter = ',')
#setupInLibsvmFormat(training_data, label_data, "entire_dataset_libsvm.txt")
#file_path = "C:/data/kth/histogramsDev.txt"
#reprocessData(file_path)
# Step 2: Load new data into label/feature arrays, with a train and test set.
grouped_data, grouped_labels = loadTrainingAndTestData(data, labels)
# Step 2.5: Export all possible leave-one-out combos to libsvm format.
#generateAllPossibleLeaveOneOutCombosForLibSVM(grouped_data, grouped_labels)
# Step 3: Build classifiers.
linear_svm = buildClassifiers(grouped_data, grouped_labels)
# Step 4: Visualize. [broken] [todo]
#visualize(training_features, training_labels, linear_svm)
| [
"lucia@cifasis-conicet.gov.ar"
] | lucia@cifasis-conicet.gov.ar |
5a4c47575345054bc275919e15ab49390207af4c | cfbb78a82b9ee9b125eb11b1a7abf607704a0b2d | /scatter.py | 74d23eb7a900b0bb81f198575473a450cf6e7323 | [] | no_license | dimpy-chhabra/sup | 4323b180f89a8188010844c4ee7f0b24015734f9 | 69ade5c0984bd9d124afe6db78e8e7762b94b958 | refs/heads/master | 2020-03-08T08:10:27.263963 | 2018-04-26T18:42:59 | 2018-04-26T18:42:59 | 128,014,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import matplotlib.pyplot
import pylab
import csv
import pandas as pd
dataSet_1 = pd.read_csv('data_news_hillary.csv')
dataSet_2 = pd.read_csv('data_news_Trump.csv')
#source_name desc_subjectivity desc_polarity titl_subjectivity titl_polarity
y = dataSet_2.source
y2 = dataSet_1.source
#data_1 = dataSet_1.desc_polarity
data_2 = dataSet_2.desc_polarity
data_1 = dataSet_1.desc_polarity
#matplotlib.pyplot.scatter(x,y)
matplotlib.pyplot.scatter(y2, data_1 , color='red')
#matplotlib.pyplot.scatter(y, data_2, color='blue')
matplotlib.pyplot.show()
| [
"dimpychhabra@yahoo.co.uk"
] | dimpychhabra@yahoo.co.uk |
7e7e0400ffe2834140e94109ce4329b16205fa98 | 3034e86347c71bf7e7af9e5f7aa44ab5ad61e14b | /pbase/day18/classMyList.py | 814391481cac5af7082c88e154e183db16732870 | [] | no_license | jason12360/AID1803 | bda039b82f43d6609aa8028b0d9598f2037c23d5 | f0c54a3a2f06881b3523fba7501ab085cceae75d | refs/heads/master | 2020-03-17T00:43:42.541761 | 2018-06-29T10:07:44 | 2018-06-29T10:07:44 | 133,127,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | class Mylist(list):
def insert_head(self,element):
self[0:0] = [element]
L = Mylist(range(5))
L.insert_head(-1)
print(L)
| [
"370828117@qq.com"
] | 370828117@qq.com |
a41a3904dfdcdee0e727b00a0ec2e8dfc1657f97 | 173e0aed80b0d0c01252dd2891be6967f60ce008 | /healthcare/api-client/v1/hl7v2/hl7v2_messages_test.py | 06ae20b41a350c427386f1d25cb88cd42f6f9ec2 | [
"Apache-2.0"
] | permissive | yuriatgoogle/python-docs-samples | a59298504c73d7f272637b033662d920dfcc314b | 9fb1bf82b447e920fe9b80564cc110d1e50f43ab | refs/heads/master | 2023-04-08T03:36:18.691386 | 2021-02-28T17:17:57 | 2021-02-28T17:17:57 | 337,138,011 | 1 | 0 | Apache-2.0 | 2021-02-28T17:17:58 | 2021-02-08T16:30:52 | Python | UTF-8 | Python | false | false | 7,415 | py | # Copyright 2018 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import uuid
import backoff
from googleapiclient.errors import HttpError
import pytest
# Add datasets for bootstrapping datasets for testing
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "datasets")) # noqa
import datasets # noqa
import hl7v2_messages # noqa
import hl7v2_stores # noqa
cloud_region = "us-central1"
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
dataset_id = "test_dataset_{}".format(uuid.uuid4())
hl7v2_store_id = "test_hl7v2_store-{}".format(uuid.uuid4())
hl7v2_message_file = "resources/hl7-sample-ingest.json"
label_key = "PROCESSED"
label_value = "TRUE"
@pytest.fixture(scope="module")
def test_dataset():
@backoff.on_exception(backoff.expo, HttpError, max_time=60)
def create():
try:
datasets.create_dataset(project_id, cloud_region, dataset_id)
except HttpError as err:
# We ignore 409 conflict here, because we know it's most
# likely the first request failed on the client side, but
# the creation suceeded on the server side.
if err.resp.status == 409:
print("Got exception {} while creating dataset".format(err.resp.status))
else:
raise
create()
yield
# Clean up
@backoff.on_exception(backoff.expo, HttpError, max_time=60)
def clean_up():
try:
datasets.delete_dataset(project_id, cloud_region, dataset_id)
except HttpError as err:
# The API returns 403 when the dataset doesn't exist.
if err.resp.status == 404 or err.resp.status == 403:
print("Got exception {} while deleting dataset".format(err.resp.status))
else:
raise
clean_up()
@pytest.fixture(scope="module")
def test_hl7v2_store():
@backoff.on_exception(backoff.expo, HttpError, max_time=60)
def create():
try:
hl7v2_stores.create_hl7v2_store(
project_id, cloud_region, dataset_id, hl7v2_store_id
)
except HttpError as err:
# We ignore 409 conflict here, because we know it's most
# likely the first request failed on the client side, but
# the creation suceeded on the server side.
if err.resp.status == 409:
print(
"Got exception {} while creating HL7v2 store".format(
err.resp.status
)
)
else:
raise
create()
yield
# Clean up
@backoff.on_exception(backoff.expo, HttpError, max_time=60)
def clean_up():
try:
hl7v2_stores.delete_hl7v2_store(
project_id, cloud_region, dataset_id, hl7v2_store_id
)
except HttpError as err:
# The API returns 403 when the HL7v2 store doesn't exist.
if err.resp.status == 404 or err.resp.status == 403:
print(
"Got exception {} while deleting HL7v2 store".format(
err.resp.status
)
)
else:
raise
clean_up()
def test_CRUD_hl7v2_message(test_dataset, test_hl7v2_store, capsys):
hl7v2_messages.create_hl7v2_message(
project_id, cloud_region, dataset_id, hl7v2_store_id, hl7v2_message_file
)
@backoff.on_exception(backoff.expo, AssertionError, max_time=60)
def run_eventually_consistent_test():
hl7v2_messages_list = hl7v2_messages.list_hl7v2_messages(
project_id, cloud_region, dataset_id, hl7v2_store_id
)
assert len(hl7v2_messages_list) > 0
hl7v2_message_name = hl7v2_messages_list[0].get("name")
elms = hl7v2_message_name.split("/", 9)
assert len(elms) >= 10
hl7v2_message_id = elms[9]
return hl7v2_message_id
hl7v2_message_id = run_eventually_consistent_test()
hl7v2_messages.get_hl7v2_message(
project_id, cloud_region, dataset_id, hl7v2_store_id, hl7v2_message_id
)
hl7v2_messages.delete_hl7v2_message(
project_id, cloud_region, dataset_id, hl7v2_store_id, hl7v2_message_id
)
out, _ = capsys.readouterr()
# Check that create/get/list/delete worked
assert "Created HL7v2 message" in out
assert "Name" in out
assert "Deleted HL7v2 message" in out
def test_ingest_hl7v2_message(test_dataset, test_hl7v2_store, capsys):
hl7v2_messages.ingest_hl7v2_message(
project_id, cloud_region, dataset_id, hl7v2_store_id, hl7v2_message_file
)
@backoff.on_exception(backoff.expo, AssertionError, max_time=60)
def run_eventually_consistent_test():
hl7v2_messages_list = hl7v2_messages.list_hl7v2_messages(
project_id, cloud_region, dataset_id, hl7v2_store_id
)
assert len(hl7v2_messages_list) > 0
hl7v2_message_name = hl7v2_messages_list[0].get("name")
elms = hl7v2_message_name.split("/", 9)
assert len(elms) >= 10
hl7v2_message_id = elms[9]
return hl7v2_message_id
hl7v2_message_id = run_eventually_consistent_test()
hl7v2_messages.get_hl7v2_message(
project_id, cloud_region, dataset_id, hl7v2_store_id, hl7v2_message_id
)
hl7v2_messages.delete_hl7v2_message(
project_id, cloud_region, dataset_id, hl7v2_store_id, hl7v2_message_id
)
out, _ = capsys.readouterr()
# Check that ingest/get/list/delete worked
assert "Ingested HL7v2 message" in out
assert "Name" in out
assert "Deleted HL7v2 message" in out
def test_patch_hl7v2_message(test_dataset, test_hl7v2_store, capsys):
hl7v2_messages.create_hl7v2_message(
project_id, cloud_region, dataset_id, hl7v2_store_id, hl7v2_message_file
)
@backoff.on_exception(backoff.expo, (AssertionError, HttpError), max_time=60)
def run_eventually_consistent_test():
hl7v2_messages_list = hl7v2_messages.list_hl7v2_messages(
project_id, cloud_region, dataset_id, hl7v2_store_id
)
assert len(hl7v2_messages_list) > 0
hl7v2_message_name = hl7v2_messages_list[0].get("name")
elms = hl7v2_message_name.split("/", 9)
assert len(elms) >= 10
hl7v2_message_id = elms[9]
return hl7v2_message_id
hl7v2_message_id = run_eventually_consistent_test()
hl7v2_messages.patch_hl7v2_message(
project_id,
cloud_region,
dataset_id,
hl7v2_store_id,
hl7v2_message_id,
label_key,
label_value,
)
hl7v2_messages.delete_hl7v2_message(
project_id, cloud_region, dataset_id, hl7v2_store_id, hl7v2_message_id
)
out, _ = capsys.readouterr()
# Check that patch worked
assert "Patched HL7v2 message" in out
| [
"noreply@github.com"
] | noreply@github.com |
ae28b288e8588b7a164a13119aebe56843af8a10 | 89a90707983bdd1ae253f7c59cd4b7543c9eda7e | /programming_python/Dstruct/OldIntro/stack3.py | e1a2711fb805383ddc41021d2bab73940be3a07f | [] | no_license | timothyshull/python_reference_code | 692a7c29608cadfd46a6cc409a000023e95b9458 | f3e2205dd070fd3210316f5f470d371950945028 | refs/heads/master | 2021-01-22T20:44:07.018811 | 2017-03-17T19:17:22 | 2017-03-17T19:17:22 | 85,346,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | class Stack:
def __init__(self):
self.stack = [] # initialize list
def push(self, object):
self.stack.append(object) # change in-place
def pop(self):
top = self.stack[-1] # top = end
del self.stack[-1] # delete in-place
return top
def empty(self):
return not self.stack
| [
"timothyshull@gmail.com"
] | timothyshull@gmail.com |
afc602d9bdf3bf7677eb8bf36c2ee1c2b0579314 | d7d2536a22439178eadf92e92f99422dce6d1a0d | /useraccount/urls.py | 4b5f541cc49966640b2e6867b09b76ccaf95f807 | [] | no_license | nkpc14/PlantFlix | 56dac037f9a1cadd7f6b1b2f850511525b19ab78 | 0dd71d9a0ead1a8e33c8e29008e6469870809035 | refs/heads/master | 2020-09-03T07:39:18.139394 | 2019-11-07T16:20:57 | 2019-11-07T16:20:57 | 219,418,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from django.urls import path
from .views import *
urlpatterns = [
path('', PlantFlixUserList.as_view(), name='users-list'),
path('get/<int:pk>', UserView.as_view(), name='users-list'),
path('signup', PlantFlixUserCreate.as_view(), name='users-list'),
]
| [
"32247486+nkpc14@users.noreply.github.com"
] | 32247486+nkpc14@users.noreply.github.com |
af15bb2708ac33286e3cd7ea1907e886af99d6d6 | 127fa3dd454434b4c7526afe161177af2e10226e | /2018ๆ กๆ็้ข/็ฝๆ ๆไฝๅบๅ.py | bfda124710b7fbdb1440fc50027fa803d3ac8b78 | [] | no_license | lunar-r/sword-to-offer-python | 966c46a8ddcff8ce5c95697638c988d83da3beab | fab4c341486e872fb2926d1b6d50499d55e76a4a | refs/heads/master | 2023-04-18T18:57:12.126441 | 2020-11-29T09:51:23 | 2020-11-29T09:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | # -*- coding: utf-8 -*-
"""
File Name๏ผ ็ฝๆ ๆไฝๅบๅ
Description :
Author : simon
date๏ผ 19-4-9
"""
def helper(nums):
if nums == [1]:
return [1]
return [nums[-1]] + helper(nums[:-1])[::-1]
"""
ๆพ่งๅพ [nums[-1], nums[-3], ...., nums[-4],nums[-2]]
"""
def helper2(nums):
nums = nums[::-1]
res = ['*'] * len(nums)
N = len(nums)
if len(nums) % 2:
res[len(nums) // 2] = nums[-1]
q = 0
for i in range(len(nums)//2):
res[i] = nums[q]
res[N-1-i] = nums[q+1]
q += 2
return res
"""
ๅ็ฐ็ๅฆๅคไธ็ง่งๅพ
"""
n = int(input().strip())
a = input().strip().split()
b = []
if n % 2 == 0:
b.extend(a[1::2][::-1])
b.extend(a[::2])
else:
b.extend(a[::2][::-1])
b.extend(a[1::2])
print(' '.join(b))
if __name__ == '__main__':
_ = input()
nums = input().strip().split(' ')
# nums = list(map(int, nums))
res = helper2(nums)
# res = list(map(str, res))
print(' '.join(res))
| [
"2711772037@qq.com"
] | 2711772037@qq.com |
c0a102ad95fccafa50e9a55290f3204b98a38ab8 | c56d3f1e7dde1c4556759b7005a60056003b7d54 | /scrapprj/middlewares.py | 61223d127e344b565e6bba1da0636086229472a9 | [] | no_license | alefrolov/scrapprj | b73c2f98775f7d1de1e2ec4987bbea5f6ef9bb07 | 70c470327df09dd7c87dbe2d8cb652aa6aa40285 | refs/heads/master | 2023-03-05T05:28:32.771100 | 2018-11-29T22:12:31 | 2018-11-29T22:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,601 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class ScrapprjSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnโt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ScrapprjDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"Alex@ya.ru"
] | Alex@ya.ru |
fb6a14738a0a1697a8faf85de8928e5f2f6051e5 | 7c3ae114503112f1435acfad9be6be041cb7762c | /robotarena/roborally/robots/scaredycat.py | 2bc67425c87df782442e76643477764b7a91b5ce | [
"Apache-2.0"
] | permissive | sdherr/robot-arena | e95cf559b86776b4aec05b93afb1b974049449ee | 0cca149a74cb758d617912042621f642ebac9f8e | refs/heads/master | 2021-01-22T06:40:27.127685 | 2017-02-13T03:30:43 | 2017-02-13T03:30:43 | 81,780,182 | 0 | 0 | null | 2017-02-13T03:17:28 | 2017-02-13T03:17:28 | null | UTF-8 | Python | false | false | 573 | py | import random
from roborally.api import *
def move():
my_moves = list(MOVES)
scored_moves = {}
for move in my_moves:
scored_moves[move] = score(move)
min_score = min(scored_moves.values())
return random.choice([move for move in scored_moves if scored_moves[move] == min_score])
def score(move):
# Score the move based on how much damage the robot will take if it makes the move
if falls_into_pit(move):
return 20
else:
move_score = len(shot_by(move))
if move in PRIORITY_MOVES and charges() < 1:
move_score += 1
return move_score
| [
"ken.ganong@gmail.com"
] | ken.ganong@gmail.com |
f7c2aa264a5e5ac7293a87fa6c6ebe060740e356 | 43e8af55bd953a3e435810574f0d4db8a00bcd82 | /nut/apps/api/views/sla.py | e3eb61cce1a35fe3874df8a51f75571aca1504c3 | [] | no_license | bopopescu/nut | 3818cc66e543870180dae943944ef026f191a385 | 39c58afe7fa7be185b1a3ac64e8c295d16601bd6 | refs/heads/master | 2022-11-18T05:28:14.209311 | 2017-09-29T10:32:54 | 2017-09-29T10:32:54 | 282,010,209 | 0 | 0 | null | 2020-07-23T17:05:57 | 2020-07-23T17:05:56 | null | UTF-8 | Python | false | false | 769 | py | from rest_framework import generics
from rest_framework.permissions import IsAdminUser
from apps.core.models import Selection_Article
from apps.api.serializers.articles import NestedSelectionArticleSerializer
from apps.api.permissions import Admin_And_Editor_Only
class RFSlaListView(generics.ListCreateAPIView):
permission_class=(IsAdminUser,)
queryset = Selection_Article.objects.all().order_by('-pub_time')
serializer_class= NestedSelectionArticleSerializer
paginate_by = 20
paginate_by_param = 'page_size'
max_paginate_by = 100
class RFSlaDetailView(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsAdminUser,)
queryset = Selection_Article.objects.all()
serializer_class = NestedSelectionArticleSerializer
| [
"anchen@guoku.com"
] | anchen@guoku.com |
113cb5acc88ecdd692bbe9af0b8d3a02610d3f18 | 3e67503851365b38a7ac0e78b5ae54eeac2cc5e8 | /information/migrations/0013_auto_20200927_1051.py | c4d18126290885d5f77520633ae0dbe60fd99a9d | [] | no_license | JOHNYXUU/Ahu-Curriculum-Design-of-Principles-of-Database | 121c52f2b77b0122ffa06e89ee3e06030af05e04 | f4298882feb5d109eb010feb39d453e6f3953508 | refs/heads/master | 2022-12-25T10:35:31.818740 | 2020-10-09T12:48:25 | 2020-10-09T12:48:25 | 302,631,512 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # Generated by Django 3.0.3 on 2020-09-27 02:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('information', '0012_auto_20200927_1033'),
]
operations = [
migrations.AlterField(
model_name='ware',
name='wname',
field=models.CharField(max_length=50),
),
]
| [
"1163444531@qq.com"
] | 1163444531@qq.com |
17c14221dd22255ed751dc3f60dafc64a8e62399 | 01857ef455ea60eccaf03b5a9059ec83e9803c2e | /nicegui/tailwind_types/text_align.py | 476e196f84b46e2c187f30f1984c510e8a9430c5 | [
"MIT"
] | permissive | zauberzeug/nicegui | f08312cc1f393deca79e0e84a2506d3a35efff16 | c61b1315f29d51e26cc1168207f5616b302f8df0 | refs/heads/main | 2023-08-18T18:09:30.937322 | 2023-08-18T15:04:00 | 2023-08-18T15:04:00 | 365,250,183 | 5,128 | 271 | MIT | 2023-09-14T01:50:56 | 2021-05-07T13:55:05 | Python | UTF-8 | Python | false | false | 129 | py | from typing import Literal
TextAlign = Literal[
'left',
'center',
'right',
'justify',
'start',
'end',
]
| [
"falko@zauberzeug.com"
] | falko@zauberzeug.com |
2ed8a5ddc32ec856e5b12247439bf6992e88fb69 | 09eee55bbff98de8c790054fe4cd5ad24c193181 | /homeassistant/config/custom_components/sensor/zte_router.py | a6ae89ba355c4db71d2aaee0bdd44ebac7cff698 | [] | no_license | peleccom/hass_config | 53d9feb1c6b5da7211e1a5bd43c39eb8d9769c26 | 26b8c90f3773a85cb4e1c4fe676869f62866fc41 | refs/heads/master | 2022-12-14T20:40:08.673476 | 2022-11-26T12:14:11 | 2022-11-26T12:21:49 | 140,076,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,367 | py | """
Sensor for ZTE router.
"""
import logging
import re
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from datetime import timedelta
from telnetlib import Telnet
from homeassistant.const import (CONF_NAME, CONF_DISPLAY_OPTIONS, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=30)
DEFAULT_PORT = 23
DEFAULT_NAME = 'ZTERouter'
DEFAULT_TIMEOUT = 5
SENSOR_TYPES = {
'uptime' : ['Uptime', None, 'mdi:clock'],
'status' : ['Status', None, 'mdi:water'],
'dl_speed' : ['Download speed', 'Kbps', 'mdi:download'],
'ul_speed' : ['Upload speed', 'Kbps', 'mdi:upload'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_DISPLAY_OPTIONS, default=list(SENSOR_TYPES.keys())):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
ICON = 'mdi:router-wireless'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the sensor platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
password = config.get(CONF_PASSWORD)
data = ZTERouterData(host, port, password, config[CONF_DISPLAY_OPTIONS])
devices = []
for variable in config[CONF_DISPLAY_OPTIONS]:
devices.append(ZTERouterSensor(config, data, variable))
add_devices(devices)
class ZTERouterSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, config, data, sensor_types):
"""Initialize a HDDTemp sensor."""
self.data = data
self._name = '{0}_{1}'.format(config.get(CONF_NAME),
SENSOR_TYPES[sensor_types][0])
self._unit_of_measurement = SENSOR_TYPES[sensor_types][1]
self.type = sensor_types
self._state = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return SENSOR_TYPES[self.type][1] \
if self.type in SENSOR_TYPES else None
@property
def icon(self):
"""Return the icon."""
return SENSOR_TYPES[self.type][2] \
if self.type in SENSOR_TYPES else None
def update(self):
"""Get the latest data from router and update the states."""
self.data.update()
if self.data.status is None:
#self._state = "OK"
_LOGGER.error('No Data Received')
return
else:
if self.type == 'uptime':
self._state = self.data.status.get('uptime')
elif self.type == 'status':
self._state = self.data.status.get('status')
elif self.type == 'dl_speed':
self._state = self.data.status.get('dl_speed')
elif self.type == 'ul_speed':
self._state = self.data.status.get('ul_speed')
class ZTERouterData(object):
"""Get the latest data from modem and update the states."""
ADSL_STATUS_REGEX = {
"uptime": re.compile(r'ADSL uptime\s+(.*)'),
"status": re.compile(r'current modem status:\s(.*)'),
"dl_speed": re.compile(r'near-end interleaved channel bit rate:\s(.*)kbps'),
"ul_speed": re.compile(r'far-end interleaved channel bit rate:\s(.*)kbps'),
}
def __init__(self, host, port, password, CONF_DISPLAY_OPTIONS):
"""Initialize the data object."""
self.host = host
self.port = port
self.password = password
self.options = CONF_DISPLAY_OPTIONS
self.data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Connect to router via telnet to gather status."""
try:
self.status = dict()
connection = Telnet(
self.host, self.port, timeout=DEFAULT_TIMEOUT)
connection.read_until(b"Password: ")
connection.write((self.password + "\n").encode('ascii'))
connection.read_until(b"ZTE>")
if 'uptime' in self.options:
connection.write(("show wan adsl uptime\n").encode('ascii'))
connection.read_until(b"\n").decode('ascii')
uptime = connection.read_until(b"\n").decode('ascii')
match = re.match(self.ADSL_STATUS_REGEX['uptime'], uptime)
if match:
group_str = match.group(1).strip()
self.status['uptime'] = group_str
if 'status' in self.options:
connection.write(("show wan adsl status\n").encode('ascii'))
connection.read_until(b"\n").decode('ascii')
uptime = connection.read_until(b"\n").decode('ascii')
match = re.match(self.ADSL_STATUS_REGEX['status'], uptime)
if match:
group_str = match.group(1).strip()
self.status['status'] = group_str
if 'dl_speed' in self.options or 'ul_speed' in self.options:
connection.write(("show wan adsl chandata\n").encode('ascii'))
connection.read_until(b"\n").decode('ascii')
uptime = connection.read_until(b"ZTE>").decode('ascii')
match = re.search(self.ADSL_STATUS_REGEX['dl_speed'], uptime)
if match:
group_str = match.group(1).strip()
self.status['dl_speed'] = group_str
match = re.search(self.ADSL_STATUS_REGEX['ul_speed'], uptime)
if match:
group_str = match.group(1).strip()
self.status['ul_speed'] = group_str
except ConnectionRefusedError:
_LOGGER.error('ZTE Router is not available at %s:%s', self.host, self.port)
self.data = None
| [
"peleccom@gmail.com"
] | peleccom@gmail.com |
6d436950f72fdebca3a2b9ddcdfb8295af5e2ef8 | f100a01e9629b4073968b3cf40fef1f5c07318b4 | /home/migrations/0003_auto_20200817_1405.py | ac1e0cd7bb8cde7a187d8161ed79cc84bcb640ce | [] | no_license | reiosantos/rkh_website | 2b48856a4cef1e19fa0604c291552f8e2f01de0c | 551df5a9953bf7517fc2520000618612e47ff2ff | refs/heads/master | 2022-12-05T15:45:40.517499 | 2020-08-17T19:59:26 | 2020-08-17T19:59:26 | 288,273,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # Generated by Django 3.1 on 2020-08-17 14:05
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0002_create_homepage'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='author',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())], null=True),
),
]
| [
"ronireiosantos@gmail.com"
] | ronireiosantos@gmail.com |
0f5012b740558463761ad7c2c0edf8aac3ec1da1 | 24f2b56ac7458e72f1f90abd16302a41dbaa7977 | /starter.py | 94875abe1fc3276f82d887f3786c9079988b1128 | [
"MIT"
] | permissive | rmechler/aws-swf-boto3 | 0397db99e4f45adba09d16b504b94d6c50346bf4 | 122144b11dc0e142f9e4e3797a1e96ffd39786a7 | refs/heads/master | 2021-01-21T18:46:29.073499 | 2016-05-25T22:00:18 | 2016-05-25T22:00:18 | 59,585,186 | 0 | 0 | null | 2016-05-24T15:28:06 | 2016-05-24T15:28:06 | null | UTF-8 | Python | false | false | 446 | py | #!/usr/bin/python
import swf_helper
import logging
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-w", "--workflow-id", type=str, dest="workflow_id", default="test-1", help="workflow ID", metavar="workflow_id")
args = parser.parse_args()
WORKFLOW_ID = 'test-1'
response = swf_helper.start_workflow(args.workflow_id, workflowTimeout=180, deciderTimeout=10)
print("Workflow requested: {}".format(response))
| [
"rmechler@cisco.com"
] | rmechler@cisco.com |
075c8cdbfc5841798c3a4bd57ab4ee7c7d9494c7 | 712d63f842d5dddcd21889cc5edabf2381662d3a | /py2-kms/client.py | 5410c485552160f3dd6e37c4e012a43ff4767428 | [
"Unlicense"
] | permissive | 13128228284/py-kms | 6d91e4e39291b14742d9852ce141b051da2d3aaf | 3d154f8cf5d572087103397057702596caea41e2 | refs/heads/master | 2020-04-28T01:43:28.633934 | 2019-04-17T14:20:23 | 2019-04-17T14:20:23 | 174,868,599 | 0 | 0 | Unlicense | 2019-04-17T14:20:24 | 2019-03-10T19:10:12 | Python | UTF-8 | Python | false | false | 11,643 | py | #!/usr/bin/env python
import re
import argparse
import binascii
import datetime
import random
import socket
import string
import sys
import uuid
import logging
import os
import errno
import filetimes, rpcBind, rpcRequest
from dcerpc import MSRPCHeader, MSRPCBindNak, MSRPCRequestHeader, MSRPCRespHeader
from kmsBase import kmsBase, UUID
from kmsRequestV4 import kmsRequestV4
from kmsRequestV5 import kmsRequestV5
from kmsRequestV6 import kmsRequestV6
from rpcBase import rpcBase
from kmsDB2Dict import kmsDB2Dict
from formatText import shell_message, justify
config = {}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ip", action="store", help='The IP address or hostname of the KMS server.', type=str)
parser.add_argument("port", nargs="?", action="store", default=1688,
help='The port the KMS service is listening on. The default is \"1688\".', type=int)
parser.add_argument("-m", "--mode", dest="mode",
choices=["WindowsVista","Windows7","Windows8","Windows8.1","Windows10",
"Office2010","Office2013","Office2016","Office2019"], default="Windows8.1",
help='Use this flag to manually specify a Microsoft product for testing the server. The default is \"Windows81\".', type=str)
parser.add_argument("-c", "--cmid", dest="cmid", default=None,
help='Use this flag to manually specify a CMID to use. If no CMID is specified, a random CMID will be generated.', type=str)
parser.add_argument("-n", "--name", dest="machineName", default=None,
help='Use this flag to manually specify an ASCII machineName to use. If no machineName is specified,\
a random machineName will be generated.', type=str)
parser.add_argument("-v", "--loglevel", dest="loglevel", action="store", default="ERROR", choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
help='Use this flag to set a Loglevel. The default is \"ERROR\".', type=str)
parser.add_argument("-f", "--logfile", dest="logfile", action="store", default=os.path.dirname(os.path.abspath( __file__ )) + "/py2kms_client.log",
help='Use this flag to set an output Logfile. The default is \"pykms_client.log\".', type=str)
config.update(vars(parser.parse_args()))
logging.basicConfig(level=config['loglevel'], format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S', filename=config['logfile'], filemode='w')
checkConfig()
config['call_id'] = 1
updateConfig()
s = socket.socket()
logging.info("Connecting to %s on port %d..." % (config['ip'], config['port']))
s.connect((config['ip'], config['port']))
logging.info("Connection successful !")
binder = rpcBind.handler(None, config)
RPC_Bind = str(binder.generateRequest())
logging.info("Sending RPC bind request...")
shell_message(nshell = [-1, 1])
s.send(RPC_Bind)
try:
shell_message(nshell = [-4, 7])
bindResponse = s.recv(1024)
except socket.error, e:
if e.errno == errno.ECONNRESET:
logging.error("Connection reset by peer. Exiting...")
sys.exit()
else:
raise
if bindResponse == '' or not bindResponse:
logging.error("No data received ! Exiting...")
sys.exit()
packetType = MSRPCHeader(bindResponse)['type']
if packetType == rpcBase.packetType['bindAck']:
logging.info("RPC bind acknowledged.")
shell_message(nshell = 8)
kmsRequest = createKmsRequest()
requester = rpcRequest.handler(kmsRequest, config)
s.send(str(requester.generateRequest()))
shell_message(nshell = [-1, 12])
response = s.recv(1024)
logging.debug("Response: \n%s\n" % justify(binascii.b2a_hex(response)))
shell_message(nshell = [-4, 20])
parsed = MSRPCRespHeader(response)
kmsData = readKmsResponse(parsed['pduData'], kmsRequest, config)
kmsResp = kmsData['response']
try:
hwid = kmsData['hwid']
logging.info("KMS Host HWID: %s" % binascii.b2a_hex(hwid).upper())
except KeyError:
pass
logging.info("KMS Host ePID: %s" % kmsResp['kmsEpid'].decode('utf-16le').encode('utf-8'))
logging.info("KMS Host Current Client Count: %s" % kmsResp['currentClientCount'])
logging.info("KMS VL Activation Interval: %s" % kmsResp['vLActivationInterval'])
logging.info("KMS VL Renewal Interval: %s" % kmsResp['vLRenewalInterval'])
shell_message(nshell = 21)
elif packetType == rpcBase.packetType['bindNak']:
logging.info(justify(MSRPCBindNak(bindResponse).dump(print_to_stdout = False)))
sys.exit()
else:
logging.critical("Something went wrong.")
sys.exit()
def checkConfig():
if config['cmid'] is not None:
try:
uuid.UUID(config['cmid'])
except ValueError:
logging.error("Bad CMID. Exiting...")
sys.exit()
if config['machineName'] is not None:
if len(config['machineName']) < 2 or len(config['machineName']) > 63:
logging.error("Error: machineName must be between 2 and 63 characters in length.")
sys.exit()
def updateConfig():
kmsdb = kmsDB2Dict()
appitems = kmsdb[2]
for appitem in appitems:
kmsitems = appitem['KmsItems']
for kmsitem in kmsitems:
# Threshold.
try:
count = int(kmsitem['NCountPolicy'])
except KeyError:
count = 25
name = re.sub('\(.*\)', '', kmsitem['DisplayName']).replace('2015', '').replace(' ', '')
if name == config['mode']:
skuitems = kmsitem['SkuItems']
# Select 'Enterprise' for Windows or 'Professional Plus' for Office.
# (improvement: choice could be also random: skuitem = random.choice(skuitems))
for skuitem in skuitems:
if skuitem['DisplayName'].replace(' ','') == name + 'Enterprise' or \
skuitem['DisplayName'].replace(' ','') == name[:6] + 'ProfessionalPlus' + name[6:]:
config['KMSClientSkuID'] = skuitem['Id']
config['RequiredClientCount'] = count
config['KMSProtocolMajorVersion'] = int(float(kmsitem['DefaultKmsProtocol']))
config['KMSProtocolMinorVersion'] = 0
config['KMSClientLicenseStatus'] = 2
config['KMSClientAppID'] = appitem['Id']
config['KMSClientKMSCountedID'] = kmsitem['Id']
break
def createKmsRequestBase():
requestDict = kmsBase.kmsRequestStruct()
requestDict['versionMinor'] = config['KMSProtocolMinorVersion']
requestDict['versionMajor'] = config['KMSProtocolMajorVersion']
requestDict['isClientVm'] = 0
requestDict['licenseStatus'] = config['KMSClientLicenseStatus']
requestDict['graceTime'] = 43200
requestDict['applicationId'] = UUID(uuid.UUID(config['KMSClientAppID']).bytes_le)
requestDict['skuId'] = UUID(uuid.UUID(config['KMSClientSkuID']).bytes_le)
requestDict['kmsCountedId'] = UUID(uuid.UUID(config['KMSClientKMSCountedID']).bytes_le)
requestDict['clientMachineId'] = UUID(uuid.UUID(config['cmid']).bytes_le if (config['cmid'] is not None) else uuid.uuid4().bytes_le)
requestDict['previousClientMachineId'] = '\0' * 16 #requestDict['clientMachineId'] # I'm pretty sure this is supposed to be a null UUID.
requestDict['requiredClientCount'] = config['RequiredClientCount']
requestDict['requestTime'] = filetimes.dt_to_filetime(datetime.datetime.utcnow())
requestDict['machineName'] = (config['machineName'] if (config['machineName'] is not None) else
''.join(random.choice(string.letters + string.digits) for i in range(random.randint(2,63)))).encode('utf-16le')
requestDict['mnPad'] = '\0'.encode('utf-16le') * (63 - len(requestDict['machineName'].decode('utf-16le')))
# Debug Stuff
shell_message(nshell = 9)
logging.debug("Request Base Dictionary: \n%s\n" % justify(requestDict.dump(print_to_stdout = False)))
return requestDict
def createKmsRequest():
# Update the call ID
config['call_id'] += 1
# KMS Protocol Major Version
if config['KMSProtocolMajorVersion'] == 4:
handler = kmsRequestV4(None, config)
elif config['KMSProtocolMajorVersion'] == 5:
handler = kmsRequestV5(None, config)
elif config['KMSProtocolMajorVersion'] == 6:
handler = kmsRequestV6(None, config)
else:
return None
requestBase = createKmsRequestBase()
return handler.generateRequest(requestBase)
def readKmsResponse(data, request, config):
if config['KMSProtocolMajorVersion'] == 4:
logging.info("Received V4 response")
response = readKmsResponseV4(data, request)
elif config['KMSProtocolMajorVersion'] == 5:
logging.info("Received V5 response")
response = readKmsResponseV5(data)
elif config['KMSProtocolMajorVersion'] == 6:
logging.info("Received V6 response")
response = readKmsResponseV6(data)
else:
logging.info("Unhandled response version: %d.%d" % (config['KMSProtocolMajorVersion'], config['KMSProtocolMinorVersion']))
logging.info("I'm not even sure how this happened...")
return response
def readKmsResponseV4(data, request):
response = kmsRequestV4.ResponseV4(data)
hashed = kmsRequestV4(data, config).generateHash(bytearray(str(response['response'])))
if hashed == response['hash']:
logging.info("Response Hash has expected value !")
return response
def readKmsResponseV5(data):
response = kmsRequestV5.ResponseV5(data)
decrypted = kmsRequestV5(data, config).decryptResponse(response)
return decrypted
def readKmsResponseV6(data):
response = kmsRequestV6.ResponseV5(data)
decrypted = kmsRequestV6(data, config).decryptResponse(response)
message = decrypted['message']
return message
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
156fd1f615108a6a975110457e3f01ee5b5a7ca9 | c858d9511cdb6a6ca723cd2dd05827d281fa764d | /MFTU/lesson 2/practice_robot/robot-tasks-master/task_25.py | ed88ac2f39e4be1dd10bd7d1af830ef72db2fa21 | [] | no_license | DontTouchMyMind/education | 0c904aa929cb5349d7af7e06d9b1bbaab972ef95 | 32a53eb4086b730cc116e633f68cf01f3d4ec1d1 | refs/heads/master | 2021-03-12T11:15:02.479779 | 2020-09-17T08:19:50 | 2020-09-17T08:19:50 | 246,616,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | #!/usr/bin/python3
from pyrob.api import *
@task
def task_2_2():
def chest():
fill_cell()
move_right()
fill_cell()
move_right()
fill_cell()
move_down()
move_left()
fill_cell()
move_up()
move_up()
fill_cell()
move_down(2)
chest()
for i in range(4):
move_right(3)
move_down()
chest()
move_left()
if __name__ == '__main__':
run_tasks()
| [
"tobigface@gmail.com"
] | tobigface@gmail.com |
b0a7d0decaed56f6352cea3a737a4b1d712e05b7 | 1833d04a610604e2ce0ee95c1eb93e64403a6e07 | /pyassign5.py | 4ccd8f73abe9cbde7f40b1c00620dc5d665cc6d6 | [] | no_license | PoojaKamboj/AllPythonAssignments | fe9292aea3faf1dedd3ebbaeeb7d597de811ac2a | 0a538699eed22a3b59473c47e962a812ecc76cc0 | refs/heads/master | 2020-03-19T12:18:50.656520 | 2018-07-01T10:25:12 | 2018-07-01T10:25:12 | 136,510,109 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,886 | py |
# Take 10 integers from user and print it on screen
myList = list()
print("\nEnter values to List : ")
for i in range(10):
int_val = int(input(("Enter %d value : ") %(i+1))) # Taking input from user
myList.append(int_val)
print("Integer List : " ,myList)
# Write an infinite loop.An infinite loop never ends. Condition is always true
while range(10): # Condition is always true
print("It is an infinite loop : ")
#Create a list of integer elements by user input
#Make a new list which will store square of elements of previous list
myList1 = list()
myList2 = list()
print("\n\nEnter values to List to find Square List: ")
for a in range(5):
int_val1 = int(input(("Enter %d value : ") %(a+1)))
myList1.append(int_val1) # a list of integer
print("Integer List : " ,myList1)
for item in myList1:
sq_val = item*item
myList2.append(sq_val) # a list of square of elements
print("Square List : " ,myList2)
# From a list containing ints, strings and floats, make three lists to store them separately
myList3 = [ 1,2,3,"a", "b", 1.5,1.6, 1.7, 8]
int_List = [ i for i in myList3 if isinstance(i, int) ]
str_List = [ i for i in myList3 if isinstance(i, str) ]
float_List = [ i for i in myList3 if isinstance(i, float) ]
print("\n\nMixed List : " , myList3)
print("Integer List : " ,int_List)
print("String List : " ,str_List)
print("Float List : " ,float_List)
# Using range(1,101), make a list containing even and odd numbers.
eList = list()
oList = list()
for li in range(1,101):
if(li % 2 == 0):
eList.append(li)
else:
oList.append(li)
print("\nEven List : ", eList)
print("Odd List : ", oList)
#Print the pattern
pattern = '*'
print("\n\n")
for i in range(1,5):
print(pattern*i)
# Create a user defined dictionary and get keys corresponding to the value using for loop
myDict = dict()
print("\n\nEnter elements to dictionary as Key:Value pair : ")
for i in range(5):
key = input(("Enter key %d") % (i+1))
value = input(("Enter Value of %d key") % (i+1))
myDict[key] = value
for it in myDict:
print(it,':',myDict[it])
# Take inputs from user to make a list. Again take one input from user and
# search it in the list and delete that element, if found. Iterate over list using for loop.
num = int(input('\n\nEnter the length of List : '))
user_list = list()
print("Enter the elements to List : ")
for x in range(num):
in_val = input()
user_list.append(in_val)
print(" List : ", user_list)
del_item = input(("Enter the input to delete : "))
if( del_item in user_list):
user_list.remove(del_item)
print(" List after deletion : ", user_list)
else:
print("Input value is not in List")
# Termination of code
print("\n\nEnd of code")
| [
"noreply@github.com"
] | noreply@github.com |
11f133b5743530c70732435cdbdab5330259df50 | 75b9d4487eac9e9017d528df0a5c9a0e93822ffc | /ClaseVehiculoNuevo.py | 9ef8be7bd58adfaa17279cb21aac5bfe69918558 | [] | no_license | Marcos556149/UNIDAD-3-ACTIVIDAD-6 | 8072b7c216640932f2c4a6998363778947fdb2cc | a0f3a98da5e045adb2a326f724fbf0c5ec1077dd | refs/heads/main | 2023-05-05T07:59:22.526828 | 2021-05-31T04:57:41 | 2021-05-31T04:57:41 | 372,386,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | from ClaseVehiculo import Vehiculo
class VehiculoNuevo(Vehiculo):
Marca1= 'Tesla'
__version= ''
def __init__(self,mod='',cant=0,col='',pre=0,ver=''):
super().__init__(mod,cant,col,pre)
self.__version= ver
def __str__(self):
print("VEHICULO NUEVO")
return 'Modelo: {}, CantPuertas: {}, Color: {}, PrecioBase: {}, Marca: {}, Version: {}'.format(super().getModelo(),super().getCantidadPuertas(),super().getColor(),super().getPrecioBase(),self.Marca1,self.__version)
@classmethod
def getMarca1(cls):
return cls.Marca1
def importeVenta(self):
Base1= self.getPrecioBase()
Total1= Base1 + (Base1 * 0.1)
if self.__version == 'full':
Total1 += Base1 * 0.02
return Total1
def toJSON(self):
d = dict(
__class__=self.__class__.__name__,
__atributos__=dict(
mod=super().getModelo(),
cant=super().getCantidadPuertas(),
col=super().getColor(),
pre=super().getPrecioBase(),
ver=self.__version
)
)
return d
| [
"noreply@github.com"
] | noreply@github.com |
ddf58b250c7f058d2c834a8d0fa0382a06543ac1 | d82de8384b06dc5788b60d08c93f5f20214f60c6 | /defs/keras_mlp.py | b838746655925fd05195c19314883845a5405ab9 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | hyunghunny/hyperband | 810d79f5f2600bddcdcb73d997ae1f1ab6e85995 | ac07fa5a41eabbbdd6cf8c02628ade183207e5ec | refs/heads/master | 2020-05-22T21:52:30.022693 | 2019-05-14T11:04:29 | 2019-05-14T11:04:29 | 186,536,539 | 0 | 0 | NOASSERTION | 2019-05-14T03:18:15 | 2019-05-14T03:18:14 | null | UTF-8 | Python | false | false | 4,624 | py | "function (and parameter space) definitions for hyperband"
"binary classification with Keras (multilayer perceptron)"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data import data
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.normalization import BatchNormalization as BatchNorm
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import *
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler, MaxAbsScaler
#
# TODO: advanced activations - 'leakyrelu', 'prelu', 'elu', 'thresholdedrelu', 'srelu'
max_layers = 5
space = {
'scaler': hp.choice( 's',
( None, 'StandardScaler', 'RobustScaler', 'MinMaxScaler', 'MaxAbsScaler' )),
'n_layers': hp.quniform( 'l', 1, max_layers, 1 ),
#'layer_size': hp.quniform( 'ls', 5, 100, 1 ),
#'activation': hp.choice( 'a', ( 'relu', 'sigmoid', 'tanh' )),
'init': hp.choice( 'i', ( 'uniform', 'normal', 'glorot_uniform',
'glorot_normal', 'he_uniform', 'he_normal' )),
'batch_size': hp.choice( 'bs', ( 16, 32, 64, 128, 256 )),
'optimizer': hp.choice( 'o', ( 'rmsprop', 'adagrad', 'adadelta', 'adam', 'adamax' ))
}
# for each hidden layer, we choose size, activation and extras individually
for i in range( 1, max_layers + 1 ):
space[ 'layer_{}_size'.format( i )] = hp.quniform( 'ls{}'.format( i ), 2, 200, 1 )
space[ 'layer_{}_activation'.format( i )] = hp.choice( 'a{}'.format( i ),
( 'relu', 'sigmoid', 'tanh' ))
space[ 'layer_{}_extras'.format( i )] = hp.choice( 'e{}'.format( i ), (
{ 'name': 'dropout', 'rate': hp.uniform( 'd{}'.format( i ), 0.1, 0.5 )},
{ 'name': 'batchnorm' },
{ 'name': None } ))
def get_params():
params = sample( space )
return handle_integers( params )
#
# print hidden layers config in readable way
def print_layers( params ):
for i in range( 1, params['n_layers'] + 1 ):
print "layer {} | size: {:>3} | activation: {:<7} | extras: {}".format( i,
params['layer_{}_size'.format( i )],
params['layer_{}_activation'.format( i )],
params['layer_{}_extras'.format( i )]['name'] ),
if params['layer_{}_extras'.format( i )]['name'] == 'dropout':
print "- rate: {:.1%}".format( params['layer_{}_extras'.format( i )]['rate'] ),
print
def print_params( params ):
pprint({ k: v for k, v in params.items() if not k.startswith( 'layer_' )})
print_layers( params )
print
def try_params( n_iterations, params ):
print "iterations:", n_iterations
print_params( params )
y_train = data['y_train']
y_test = data['y_test']
if params['scaler']:
scaler = eval( "{}()".format( params['scaler'] ))
x_train_ = scaler.fit_transform( data['x_train'].astype( float ))
x_test_ = scaler.transform( data['x_test'].astype( float ))
else:
x_train_ = data['x_train']
x_test_ = data['x_test']
input_dim = x_train_.shape[1]
model = Sequential()
model.add( Dense( params['layer_1_size'], init = params['init'],
activation = params['layer_1_activation'], input_dim = input_dim ))
for i in range( int( params['n_layers'] ) - 1 ):
extras = 'layer_{}_extras'.format( i + 1 )
if params[extras]['name'] == 'dropout':
model.add( Dropout( params[extras]['rate'] ))
elif params[extras]['name'] == 'batchnorm':
model.add( BatchNorm())
model.add( Dense( params['layer_{}_size'.format( i + 2 )], init = params['init'],
activation = params['layer_{}_activation'.format( i + 2 )]))
model.add( Dense( 1, init = params['init'], activation = 'sigmoid' ))
model.compile( optimizer = params['optimizer'], loss = 'binary_crossentropy' )
#print model.summary()
#
validation_data = ( x_test_, y_test )
early_stopping = EarlyStopping( monitor = 'val_loss', patience = 5, verbose = 0 )
history = model.fit( x_train_, y_train,
nb_epoch = int( round( n_iterations )),
batch_size = params['batch_size'],
shuffle = False,
validation_data = validation_data,
callbacks = [ early_stopping ])
#
p = model.predict_proba( x_train_, batch_size = params['batch_size'] )
ll = log_loss( y_train, p )
auc = AUC( y_train, p )
acc = accuracy( y_train, np.round( p ))
print "\n# training | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}".format( ll, auc, acc )
#
p = model.predict_proba( x_test_, batch_size = params['batch_size'] )
ll = log_loss( y_test, p )
auc = AUC( y_test, p )
acc = accuracy( y_test, np.round( p ))
print "# testing | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}".format( ll, auc, acc )
return { 'loss': ll, 'log_loss': ll, 'auc': auc, 'early_stop': model.stop_training }
| [
"zajac.zygmunt@gmail.com"
] | zajac.zygmunt@gmail.com |
1b3221ba8ff6401dc3e95db960e66b8d4d2943ec | 17ca4c286c70277c626735bb7959e0fc08a8be1f | /start/editor/ActivityScriptExporter.py | 095c1f94a0c806a8992183245073dbbc2e0a3880 | [] | no_license | huazhicai/kidney | 6edae18eee63a8f71cb6cde3a070526192ecdcf1 | 87ec2bcc5929e641870ec89e08fbe1177748434e | refs/heads/master | 2022-11-10T20:59:42.970794 | 2020-04-29T01:39:09 | 2020-04-29T01:39:09 | 200,326,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,143 | py | ๏ปฟ# coding: utf-8
import os
import math
import csv
import re
import traceback
"""
NOTE: ้ขๅญๆฐๆฎ็runTimeDataไป
ๆฏๆไปฅไธๅ ็ง็ฑปๅ
1. String -> string
2. Float -> float
3. Int -> int
4. None -> None
5. Vec3 -> (a, b, c) -> math3d.vector
่ฟ้ไฝฟ็จtuple่กจ็คบVec3, ๆ
tupleๆฐๆฎ็ฑปๅ่ขซๅ ็จ
"""
# TODO: ๆฐๆฎๆ ก้ช
# TODO: const function ๅ
ๆ
# TODO: Vec3
CLIENT_SCRIPT = "client"
SERVER_SCRIPT = "server"
class NoneValueError(Exception):
def __init__(self, message, nodeID, argTypeID):
super(NoneValueError, self).__init__(message)
self.nodeID = nodeID
self.argTypeID = argTypeID
class TypeMismatchError(Exception):
def __init__(self, message, nodeID, argTypeID):
super(TypeMismatchError, self).__init__(message)
self.nodeID = nodeID
self.argTypeID = argTypeID
class Node(object):
def __init__(self):
self.name = None
self.nodeType = None
# self.preQueryNodes = []
self.args = {}
self.returns = {}
self.eventLinks = {}
self.preLinks = {}
self.funcs = {}
self.nodeDef = None
def is_node(self, nodeName, nodeType):
if self.name == nodeName and self.nodeType == nodeType:
assert self.name == nodeName
assert self.nodeType == nodeType
return True
return False
class Value(object):
def __init__(self):
self.idx = None
self.value = None
def num_events_in_args(nodeDef):
count = 0
for arg in nodeDef['args']:
if arg['type'] == 'Event':
count += 1
return count
def string_to_vec3(value):
assert value[:5] == "Vec3("
assert value[-1] == ")"
a, b, c = value[5:-1].split(',')
a = float(a)
b = float(b)
c = float(c)
return (a, b, c)
def extract_multiple_string(value):
# Note: csv.reader้่ฆไธไธช่ฟญไปฃๅจไฝไธบๅๆฐ๏ผๆไปฅ่ฟ้ๅ
่ฃ
ไธไธ
# TODO: ่ฟ้่ฝฌๅบๆฅไธๆฏunicode?
data = next(csv.reader([value], quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True))
return data
def validate_type(value, argType):
if argType == 'Int':
return type(value) == int
elif argType == 'Float':
return type(value) == float
elif argType == 'Bool':
return type(value) == bool
elif argType == 'String':
return type(value) == str
elif argType == 'Vec3':
return type(value) == tuple and len(value) == 3 and type(value[0]) == float and type(
value[1]) == float and type(value[2]) == float
return True
def validate_def_data(defData):
from uuid import UUID
uuidSet = set()
for nodeDef in defData:
for arg in nodeDef['args']:
uuid = arg['name'][1]
try:
assert uuid not in uuidSet
except:
print('Duplicate UUID !!! : ', uuid)
raise
assert UUID(uuid, version=4)
uuidSet.add(uuid)
if 'default' in arg:
try:
value = arg['default']
if arg['type'] == 'Vec3':
value = string_to_vec3(value)
assert validate_type(value, arg['type'])
except:
print('Wrong default value type: ', arg)
raise
for ret in nodeDef['returns']:
uuid = ret['name'][1]
try:
assert uuid not in uuidSet
except:
print('Duplicate UUID !!! : ', uuid)
raise
# assert UUID(uuid, version=4)
uuidSet.add(uuid)
uuid = nodeDef['name'][1]
try:
assert uuid not in uuidSet
except:
print('Duplicate UUID !!! : ', uuid)
raise
# assert UUID(uuid, version=4)
uuidSet.add(uuid)
# NOTE: query็ฑป่็นไธๅฏไปฅไฝฟ็จEvent
# NOTE: ็ฑปๅ็้ฆๅญๆฏๅฟ
้กปๆฏๅคงๅ
for nodeDef in defData:
if nodeDef.get('query', False):
for arg in nodeDef['args']:
assert arg['type'] != 'Event'
assert 65 <= ord(arg['type'][0]) <= 90
for ret in nodeDef['returns']:
assert ret['type'] != 'Event'
assert 65 <= ord(ret['type'][0]) <= 90
# NOTE: ้query่็น็ฑป๏ผๅฆๆๅฎไนไบevent๏ผไธๅฎ่ฆๆๅฏนๅบ็func
for nodeDef in defData:
if nodeDef.get('query', False):
continue
for arg in nodeDef['args']:
if arg['type'] != 'Event':
continue
try:
assert 'action' or 'function' in arg
except:
print('Def Error, event does not have func', nodeDef)
raise
def validate_editor_data(editorData):
edgeSet = set()
for editorEdge in editorData["edges"]:
start = editorEdge["start"]
end = editorEdge["end"]
startItemID = editorEdge["startItemId"]
endItemID = editorEdge['endItemId']
key = (start, end, startItemID, endItemID)
assert key not in edgeSet
edgeSet.add(key)
def generate_node_graph(defData, editorData):
defData = {node['name'][1]: node for node in defData}
nodes = {}
defaultNoneValue = Value()
for editorNode in editorData["nodes"]:
node = Node()
nodes[editorNode['id']] = node
node.nodeType = editorNode['type']
node.nodeID = editorNode['id']
nodeDef = defData[node.nodeType] # {} ไธไธช่็น
node.name = nodeDef['name'][0]
node.nodeDef = nodeDef
for returnDef in nodeDef['returns']:
returnType = returnDef['type']
returnName = returnDef['name'][0]
returnUUID = returnDef['name'][1]
if returnType == 'Event':
node.eventLinks[returnUUID] = {
'name': returnName,
'links': []
}
else:
valueRef = Value()
if 'value' in returnDef:
valueRef.value = returnDef['value']
assert validate_type(valueRef.value, returnType)
node.returns[returnUUID] = {
'name': returnName,
'type': returnType,
'valueRef': valueRef,
'linked': False
}
for (order, argDef) in enumerate(nodeDef["args"]):
argType = argDef['type']
argName = argDef["name"][0]
argUUID = argDef["name"][1]
argOrder = order
if argType == 'Event':
node.funcs[argUUID] = argDef.get('action', None) or argDef.get('function')
node.preLinks[argUUID] = {
'name': argName,
'links': []
}
else:
# TODO
node.args[argUUID] = {
'name': argName,
'type': argType,
'valueRef': defaultNoneValue,
'order': argOrder,
'argDef': argDef,
'dataProvider': None,
}
if argUUID in editorNode['args']:
value = Value()
if argType == 'Vec3':
try:
value.value = string_to_vec3(editorNode['args'][argUUID])
except:
raise TypeMismatchError(
'validate_type Vec3 error, argName "%s", type of (%s) is not %s, def is %s' % (
argName, value.value, argType, node.nodeDef), node.nodeID, argUUID)
else:
if editorNode['args'][argUUID] is None:
value = defaultNoneValue
else:
value.value = editorNode['args'][argUUID]
try:
assert validate_type(value.value, argType)
except:
raise TypeMismatchError(
'validate_type error, argName "%s", type of (%s) is not %s, %s, def is %s' % (
argName, value.value, argType, type(value.value), node.nodeDef), node.nodeID,
argUUID)
node.args[argUUID]['valueRef'] = value
node.args[argUUID]['dataProvider'] = node
for editorEdge in editorData["edges"]:
startNode = nodes[editorEdge["start"]]
endNode = nodes[editorEdge["end"]]
if editorEdge['linktype'] == 'Event':
assert editorEdge['endItemId'] in endNode.funcs
startNode.eventLinks[editorEdge["startItemId"]]['links'].append({
'node': endNode,
'eventUUID': editorEdge['endItemId'],
'funcID': endNode.funcs[editorEdge['endItemId']]
})
endNode.preLinks[editorEdge['endItemId']]['links'].append({
'node': startNode,
'eventUUID': editorEdge['startItemId']}
)
else:
# NOTE: ๅฆๆไธไธช่็นๅทฒ็ปๆๅทฅๅไบๅผไบ๏ผ้ฃไนไธๅบ่ฏฅๅ็ฑๅ
ถไป่็นๆไพๅผ
try:
assert endNode.args[editorEdge["endItemId"]]['valueRef'] is defaultNoneValue
except:
print("endNode '%s', attribute '%s', value is '%s', which should be None" % (
endNode.name, endNode.args[editorEdge["endItemId"]]['name'],
endNode.args[editorEdge["endItemId"]]['valueRef'].value))
raise
assert endNode.args[editorEdge["endItemId"]]['dataProvider'] is None
if startNode.nodeDef.get('query', False):
endNode.preQueryNodes.append(startNode)
endNode.args[editorEdge["endItemId"]]['valueRef'] = startNode.returns[editorEdge["startItemId"]]['valueRef']
startNode.returns[editorEdge["startItemId"]]["linked"] = True
endNode.args[editorEdge["endItemId"]]['dataProvider'] = startNode
# NOTE: ๅ
่ฎธAny็ฑปๅ่็นๆฅๅไปปไฝ่พๅ
ฅ๏ผๅ
่ฎธไปปไฝ็ฑปๅๆฅๅAny็ฑปๅ็่พๅ
ฅ๏ผๅ
ถไปๆ
ๅตไธไฟๆไธคไพง็ฑปๅไธ่ด
assert endNode.args[editorEdge["endItemId"]]['type'] == startNode.returns[editorEdge["startItemId"]][
'type'] or endNode.args[editorEdge["endItemId"]]['type'] == 'Any' or \
startNode.returns[editorEdge["startItemId"]]['type'] == 'Any'
for node in nodes.values():
for argUUID, arg in node.args.items():
argDef = arg['argDef']
argType = argDef['type']
if argType == 'Event':
continue
argValueRef = arg['valueRef']
argValue = arg['valueRef'].value
if argValue is None:
if argDef.get('valueCanBeNone', False):
pass
else:
try:
assert arg['dataProvider'] is not None
assert arg['dataProvider'] is not node
except:
raise NoneValueError('value error, argName "%s" of node [ %s ] can not be None' % (
argDef['name'][0], node.nodeDef['name'][0]), node.nodeID, argUUID)
else:
try:
assert validate_type(argValue, argType)
except:
raise TypeMismatchError(
'validate_type error, argName "%s", type of (%s) is not %s, %s, def is %s' % (
argDef['name'][0], argValue, argType, type(argValue), node.nodeDef), node.nodeID,
argUUID)
if argDef.get('ensureStaticConst') and argValueRef is not defaultNoneValue:
try:
assert arg['dataProvider'] is node
except:
raise TypeMismatchError(
'validate_type error, value must be static const, argName "%s", type of (%s) is not %s, def is %s' % (
argDef['name'][0], argValue, argType, node.nodeDef), node.nodeID, argUUID)
return nodes
def do_work(defData, editorData, byEditor, filename, is_city=None):
nodeGraph = generate_node_graph(defData, editorData)
# NOTE: ่ฟ้ๅไบไธไบtrick๏ผๆฅๆstring่ฝฌๆขๆlist๏ผ้ฟๅ
่ฟ่กๆถ่ฝฌๆขๅผ้
for node in nodeGraph.values():
if node.is_node('Random Select String', 'a6bfe777-df7c-484c-b16d-71259527dca4'):
assert node.nodeDef['function']
assert node.nodeDef['query'] is True
assert num_events_in_args(node.nodeDef) is 0
assert len(node.preQueryNodes) is 0
assert len(node.args) is 1
arg = next(iter(node.args.values()))
assert arg['dataProvider'] is node
value = extract_multiple_string(arg['valueRef'].value)
assert len(value) > 0
for v in value:
if len(v) <= 0 or not isinstance(v, str):
print("Wrong value format in random_select_string", node)
raise EOFError
arg['valueRef'].value = value
elif node.is_node('Play SFX', '79e89d07-876e-4b9e-a00d-c3f1221582b6'):
degreeArg = node.args['34c3310e-4df4-403f-adda-d5786a4345f5']
degreeArgValue = degreeArg['valueRef'].value
if degreeArgValue is not None:
arcArg = node.args['e5785ef1-3c37-402b-883d-20116aaa63c7']
arcArg['valueRef'].value = degreeArgValue / 180.0 * math.pi
elif node.is_node('Set Int Variable', "bf7eab3f-b0b2-426f-9ddc-355c930ec0e6"):
assert len(node.args) is 2
arg = node.args['d4444300-2f7a-4ea2-80a0-40ed7b393d78']
if arg['dataProvider'] is node:
assert type(arg['valueRef'].value) is int
elif node.is_node('Set Variable', '7532929c-3d5e-4264-92cc-7f0b5c7ca0b7'):
assert len(node.args) is 2
arg = node.args['17ddb382-4d7f-47b4-a1f1-929bd74cf91e']
assert arg['dataProvider'] is not node, "use 'Set XXX Variable' instead"
elif node.is_node('Array Data(Int)', 'bf11a6e1-e92d-4cb9-80dd-0e3cd22f164a'):
assert node.nodeDef['function']
assert node.nodeDef['query'] is True
assert num_events_in_args(node.nodeDef) is 0
assert len(node.preQueryNodes) is 0
assert len(node.args) is 1
arg = next(iter(node.args.values()))
assert arg['dataProvider'] is node
value = list(map(int, arg['valueRef'].value.split(',')))
assert len(value) > 0
arg['valueRef'].value = value
elif node.is_node('Array Data(Float)', '800cdc88-c30e-4b7f-8d39-d3f3843e53df'):
assert node.nodeDef['function']
assert node.nodeDef['query'] is True
assert num_events_in_args(node.nodeDef) is 0
assert len(node.preQueryNodes) is 0
assert len(node.args) is 1
arg = next(iter(node.args.values()))
assert arg['dataProvider'] is node
value = list(map(float, arg['valueRef'].value.split(',')))
assert len(value) > 0
arg['valueRef'].value = value
elif node.is_node('Array Data(String)', '400d6243-58e4-43e1-a1fc-34fa41a421ff'):
assert node.nodeDef['function']
assert node.nodeDef['query'] is True
assert num_events_in_args(node.nodeDef) is 0
assert len(node.preQueryNodes) is 0
assert len(node.args) is 1
arg = next(iter(node.args.values()))
assert arg['dataProvider'] is node
value = extract_multiple_string(arg['valueRef'].value)
assert len(value) > 0
for v in value:
if len(v) <= 0 or not isinstance(v, str):
print("Wrong value format in random_select_string", node)
raise Exception
arg['valueRef'].value = value
elif node.is_node('Random Select Float', 'ca345a9f-56d9-4197-b6e3-1dfc65dfae0c'):
assert node.nodeDef['function']
assert node.nodeDef['query'] is True
assert num_events_in_args(node.nodeDef) is 0
assert len(node.preQueryNodes) is 0
assert len(node.args) is 1
arg = next(iter(node.args.values()))
assert arg['dataProvider'] is node
value = list(map(float, arg['valueRef'].value.split(',')))
assert len(value) > 0
arg['valueRef'].value = value
elif node.is_node('Random Select Integer', '4e0fa583-7ba8-40d1-82d2-375d98b95500'):
assert node.nodeDef['function']
assert node.nodeDef['query'] is True
assert num_events_in_args(node.nodeDef) is 0
assert len(node.preQueryNodes) is 0
assert len(node.args) is 1
arg = next(iter(node.args.values()))
assert arg['dataProvider'] is node
value = list(map(int, arg['valueRef'].value.split(',')))
assert len(value) > 0
arg['valueRef'].value = value
elif node.is_node('Open NPC Dialog', 'e0e5d422-f970-429b-8a62-7b5fcae3a5c4'):
arg = node.args.get('d10b29b5-7276-4df1-84ca-7fec5fc44b67', None)
value = arg['valueRef'].value
if value:
talk_list = []
for item in value.split(';'):
talk_item = item.split(',')
assert len(talk_item) == 2
talk_item[1] = int(talk_item[1])
talk_item[0] = str(talk_item[0])
talk_list.append(talk_item)
arg['valueRef'].value = talk_list
elif node.is_node('Open NPC Dialog(Only In Dungeon)', '32cff2f0-7157-4d59-a658-13fdbae44b6d'):
arg = node.args.get('361036ea-5700-4e79-bed8-1c0a84c64f16', None)
value = arg['valueRef'].value
if value:
talk_list = []
for item in value.split(';'):
talk_item = item.split(',')
assert len(talk_item) == 2
talk_item[1] = int(talk_item[1])
talk_item[0] = str(talk_item[0])
talk_list.append(talk_item)
arg['valueRef'].value = talk_list
elif node.is_node('Play Cinematic', '8d21cbcb-287f-48f9-8aa9-71b9293a6348'):
def get_num(reg_str, prefix_len, line):
index = re.search(reg_str, line)
if index:
length = index.group(0)
length = length[prefix_len:-1]
return float(length)
return 0
path_arg = node.args.get('954cde6d-c8cc-4b61-bc48-19e0a0d60987', None)
path = path_arg['valueRef'].value
if path:
anim_file = os.path.join(resPath, path)
if os.path.exists(anim_file):
try:
anim_file_handle = open(anim_file, 'r')
lines = anim_file_handle.readlines()
length = get_num('''length = "[0-9.]*''', len('length = "'), lines[0])
if length:
start_black_time = get_num('''start_black_time = "[0-9.]*''', len('start_black_time = "'),
lines[0])
length += start_black_time
end_black_time = get_num('''end_black_time = "[0-9.]*''', len('end_black_time = "'),
lines[0])
length += end_black_time
path_arg['valueRef'].value = path + ';' + str(length)
else:
print('Error: can not find length in ', anim_file)
traceback
except Exception as e:
print('Error: get anim length failed, ', anim_file, e)
traceback
else:
print('Error: open anim_file failed, ', anim_file)
traceback
player_id_arg = node.args.get('4407dd3a-9af0-46f3-a82d-ca47f5ed9b8a', None)
if not player_id_arg.get('dataProvider') and is_city is True:
raise Exception('Play Cinematic, but Player EntityID == None is forbid in city level!!')
elif node.is_node('Start Unit Dialog Tips', '5c7c06b0-b06c-49b7-afaf-e2473cb12c10'):
player_id_arg = node.args.get('9eef619e-0ffc-488a-89dd-276ba67dcdb5', None)
player_id = player_id_arg['valueRef'].value
if not player_id and is_city is True:
raise Exception('Start Unit Dialog Tips, but Player EntityID == None is forbid in city level!!')
elif node.is_node('Advance Task', '217dd054-8c6e-4976-bf3c-9defbc218c74'):
for_all_players_arg = node.args.get('9ddf1c84-db67-421e-951c-4607821abdd8', None)
for_all_players = for_all_players_arg['valueRef'].value
if for_all_players and is_city is True:
raise Exception('Advance Task, but \'For All Players\' == True is forbid in city level!!')
elif node.is_node('Create Mechanism', 'f0f220b5-e584-4aff-8c86-f039d030c02b'):
arg = node.args.get('b6e0a54f-b5b2-11e5-8bb7-448a5b598860', None)
value = arg['valueRef'].value
if value:
value = value.split(',')
arg['valueRef'].value = value
elif node.is_node('Create Mechanism With Lightmap', '03cfda15-a336-4c6c-b8a4-75123e88628d'):
arg = node.args.get('cf1d99e2-a1b7-48e8-ad79-40014ef128bf', None)
value = arg['valueRef'].value
if value:
value = value.split(',')
arg['valueRef'].value = value
# NOTE: lightmap ไฟกๆฏๅฏผๅบ
arg = node.args['6fd92e9d-7e74-414a-a662-c411aee4f19d']
value = arg['valueRef'].value
value = extract_multiple_string(value)
assert len(value) == 2
arg['valueRef'].value = value
elif node.is_node('Set Enable Player Skills', '49c402b3-022a-47e8-8028-1401c78b332c'):
arg = node.args.get('ea57e264-6569-4dba-8b0a-0995f1f0825c') # Skill ID List
value = list(map(int, arg['valueRef'].value.split(',')))
assert len(value) > 0
arg['valueRef'].value = value
nodes = []
idx = 0
for node in nodeGraph.values():
node.idx = idx
nodes.append(None)
idx += 1
runTimeData = []
# ๅ
ๅฎๆๆๆ่็น็่ฝฌๆขๅๅ่ฟ่ก้ๅ
trigger_grids_all = {}
for node in nodeGraph.values():
for retUUID, value in node.returns.items():
valueRef = value['valueRef']
if valueRef.idx is None:
idx = len(runTimeData)
valueRef.idx = idx
runTimeData.append(valueRef.value)
for node in nodeGraph.values():
for argUUID, value in node.args.items():
valueRef = value['valueRef']
if valueRef.idx is None:
idx = len(runTimeData)
valueRef.idx = idx
runTimeData.append(valueRef.value)
for node in nodeGraph.values():
# if len(node.preQueryNodes) > 0:
# preQueryNodes = [(preQueryNode.idx, preQueryNode.nodeDef['function']) for preQueryNode in
# node.preQueryNodes]
# else:
# preQueryNodes = None
"""
# TODO: ็ผๅญไผๅ
args = [ (value['order'], value['valueRef'].idx) for argUUID, value in node.args.iteritems() ]
args.sort(key = lambda x : x[0])
args = tuple([ value[1] for value in args ])
"""
# TODO: Vector
args = {value['name']: value['valueRef'].idx for argUUID, value in node.args.items()}
returns = {value['name']: value['valueRef'].idx for _, value in node.returns.items()}
# returns_linked = False
# for retUUID, value in node.returns.items():
# returns.append((value['name'], value['valueRef'].idx))
# # returns_linked |= value['linked']
# returns = tuple(returns)
# eventLinks = {value['name']: {link['node'].idx: 'In' for link in value['links']} for
# eventUUID, value in node.eventLinks.items()}
eventLinks = {value['name']: [(link['node'].idx, link['funcID']) for link in value['links']] for
eventUUID, value in node.eventLinks.items()}
prelinks = {}
for value in node.preLinks.values():
# if value['links']:
prelinks[value['name']] = [node.funcs[key] for key in node.funcs][0]
if byEditor:
nodes[node.idx] = {
# 'preQueryNodes': preQueryNodes,
'eventLinks': eventLinks,
'args': args,
'returns': returns,
'preLinks': prelinks,
'nodeUUidIdx': (node.nodeID, node.idx)
}
else:
nodes[node.idx] = {
# 'preQueryNodes': preQueryNodes,
'event_actions': prelinks,
'event_links': eventLinks,
'inputs': args,
'outputs': returns,
}
# if returns_linked:
# nodes[node.idx]['returns_linked'] = True
"""
้ๅฏนๅค้จไบไปถ็็นๆฎๅค็
"""
def append_runtime_data(value):
runTimeData.append(value)
return len(runTimeData) - 1
on_script_start = []
on_player_load_scene_finish = []
on_player_unit_dead = []
levelEventListeners = {
'on_script_start': on_script_start,
'on_player_load_scene_finish': on_player_load_scene_finish,
'on_player_unit_dead': on_player_unit_dead
}
clientEventListeners = {}
taskEventListeners = {}
activityStartListeners = {}
levelTimesUseoutEventListeners = {}
openPublicInstnaceEventListeners = {}
# staticMechanisms = []
questionEventListeners = []
STATIC_MECHANISM_PARAMS = {
'Mechanism Config ID': 'mechanism_config_id',
'Block Point': 'block_point',
'Spawn Point': 'spawn_point',
'Tile Index': 'tile_index',
'Unit Type': 'unit_type'
}
autoCreatePlayerUnitNodeCount = 0
setColorThemeNodeCount = 0
BUILTIN_LEVEL_EVENTS = ['on_script_start', 'on_player_load_scene_finish', 'on_player_unit_dead']
AddTaskDetailRequiredNames = {}
CounterNames = {}
mechanism_with_blcok = set()
has_opt_mechanism = set()
for node in nodeGraph.values():
if node.name == 'On Script Start' or node.nodeType == 'f8af0dbe-14b1-415d-bc91-5eb68bd2bd06':
assert node.nodeType == 'f8af0dbe-14b1-415d-bc91-5eb68bd2bd06'
assert node.name == 'On Script Start'
assert node.nodeDef['function']
assert num_events_in_args(node.nodeDef) is 0
on_script_start.append((node.idx, node.nodeDef['function']))
elif node.is_node("Create NPC", "83e7076d-9d00-4415-9e9f-d121c6d0d2e6"):
if node.args['2c5d3d12-509f-45ce-ba8d-b8590c12739c']['dataProvider'] is None: # Position
arg = node.args['7032826b-37c5-4299-a172-2621c92ef289'] # Spawn Point
argValue = arg['valueRef'].value
if argValue is None:
assert arg['dataProvider'] is not node
assert arg['dataProvider'] is not None
elif node.is_node("Create Monster", "b8bdb1c4-48e3-4c7b-b38e-12aef4a29db0"):
if node.args['ad60708a-4917-4fef-a193-9e1d62d2e8bd']['dataProvider'] is None: # Position
arg = node.args['33228f1d-1b7a-405d-82d4-7958acc8e8dc'] # Spawn Point
argValue = arg['valueRef'].value
if argValue is None:
assert arg['dataProvider'] is not node
assert arg['dataProvider'] is not None
elif node.is_node("Create Monster Boss", "f7454550-56f5-4242-86a8-9e46b140feab"):
if node.args['8b062406-a831-4a43-baad-9c7bd65a84ef']['dataProvider'] is None: # Position
arg = node.args['40c74e07-8e28-4366-bd92-25e5d3c97dc7'] # Spawn Point
argValue = arg['valueRef'].value
if argValue is None:
assert arg['dataProvider'] is not node
assert arg['dataProvider'] is not None
elif node.is_node("Create Monster Avatar", "9d1d78e6-0591-4b88-b34f-2a6864252151"):
if node.args['8dc0f1c1-188d-4e41-979f-737bda223697']['dataProvider'] is None: # Position
arg = node.args['2c74e5fd-8695-4bd2-aa71-9a28ff8ad5a6'] # Spawn Point
argValue = arg['valueRef'].value
if argValue is None:
assert arg['dataProvider'] is not node
assert arg['dataProvider'] is not None
elif node.is_node("Create Monster World Boss", "f7454550-56f5-4242-86a8-9e46b141feab"):
if node.args['40c74e07-8e28-4366-bd92-25e5d3c97dc8']['dataProvider'] is None: # Position
arg = node.args['8b062406-a831-4a43-baad-9c7bd65a85ef'] # Spawn Point
argValue = arg['valueRef'].value
if argValue is None:
assert arg['dataProvider'] is not node
assert arg['dataProvider'] is not None
elif node.is_node("Create Mechanism", "f0f220b5-e584-4aff-8c86-f039d030c02b"):
if node.args['cb7c120d-aa4d-4adc-8a09-20ecd5b539c0']['dataProvider'] is None: # Position
arg = node.args['b84f6f5c-695f-4b56-b98b-3bd61698bd3c'] # Spawn Point
argValue = arg['valueRef'].value
if argValue is None:
assert arg['dataProvider'] is not node
assert arg['dataProvider'] is not None
elif node.name == 'On Level Loaded' or node.nodeType == 'e468c7df-0563-4f68-9c8f-daf2e77d08b7':
assert node.name == 'On Level Loaded'
assert node.nodeType == 'e468c7df-0563-4f68-9c8f-daf2e77d08b7'
assert node.nodeDef['function']
assert num_events_in_args(node.nodeDef) is 0
on_player_load_scene_finish.append((node.idx, node.nodeDef['function']))
elif node.is_node('Auto Create Player Unit', '02b886bc-365f-4913-bae0-3dbf270633f3'):
assert autoCreatePlayerUnitNodeCount == 0
assert node.nodeDef['function']
assert num_events_in_args(node.nodeDef) is 0
autoCreatePlayerUnitNodeCount += 1
on_player_load_scene_finish.append((node.idx, node.nodeDef['function']))
elif node.is_node('Add Task Detail', '3d934991-fcfe-44f4-8129-35295f0e4393'):
arg = node.args['8519ec91-87de-48a3-a049-47c107f7ac39']
assert arg['name'] == 'Counter Name'
name = arg['valueRef'].value
assert not name in AddTaskDetailRequiredNames
AddTaskDetailRequiredNames[name] = node
elif node.is_node('Counter', '7035072e-0ca7-4ec4-b36b-0a25123386fe'):
arg = node.args['fbed0f80-12ad-48a2-a3a3-e733ee0b6c01']
assert arg['name'] == 'Name'
name = arg['valueRef'].value
if name is not None:
assert not name in CounterNames
CounterNames[name] = node
elif node.is_node('Create Static Object', '9ed37096-bc78-47c9-b0d0-44fef1f8002d'):
# NOTE: ๅๅปบ้ๆๆบๅ
ณ๏ผๅ
ๆพๅจ่ฟ้๏ผไนๅๅ่ฐๆด
on_script_start.append((node.idx, 'create_static_object'))
elif node.is_node('On Player Unit Dead', '95d00220-6cc5-4ecd-bbe7-73bbfbd827a7'):
assert num_events_in_args(node.nodeDef) is 0
on_player_unit_dead.append((node.idx, node.nodeDef['function']))
elif node.name == 'On Level Event' or node.nodeType == '7c2ac230-a0dd-41a7-a3f3-c501cfce7e59':
assert node.name == 'On Level Event'
assert node.nodeType == '7c2ac230-a0dd-41a7-a3f3-c501cfce7e59'
assert len(node.args) == 1
eventName = list(node.args.values())[0]['valueRef'].value
assert list(node.args.values())[0]['dataProvider'] is node
assert eventName not in BUILTIN_LEVEL_EVENTS
if eventName in levelEventListeners:
levelEventListeners[eventName].append((node.idx, node.nodeDef['function']))
else:
levelEventListeners[eventName] = [(node.idx, node.nodeDef['function'])]
elif node.name == 'Response Server Event' or node.nodeType == '0a8f2f1b-1999-411c-b690-662924beef44':
assert node.name == 'Response Server Event'
assert node.nodeType == '0a8f2f1b-1999-411c-b690-662924beef44'
assert len(node.args) == 1
eventName = list(node.args.values())[0]['valueRef'].value
assert list(node.args.values())[0]['dataProvider'] is node
if eventName in clientEventListeners:
clientEventListeners[eventName].append((node.idx, node.nodeDef['function']))
else:
clientEventListeners[eventName] = [(node.idx, node.nodeDef['function'])]
elif node.name == 'Trigger Client Event' or node.nodeType == 'e4a4da2b-d039-4c91-9fc6-4287557cee1f':
assert node.name == 'Trigger Client Event'
assert node.nodeType == 'e4a4da2b-d039-4c91-9fc6-4287557cee1f'
assert len(node.args) == 3
elif node.name == 'On UI Event' or node.nodeType == '2415af72-a28a-4647-a484-51b94f75c89e':
assert node.name == 'On UI Event'
assert node.nodeType == '2415af72-a28a-4647-a484-51b94f75c89e'
assert len(node.args) == 1
eventName = list(node.args.values())[0]['valueRef'].value
assert list(node.args.values())[0]['dataProvider'] is node
assert eventName not in BUILTIN_LEVEL_EVENTS
if eventName in levelEventListeners:
levelEventListeners[eventName].append((node.idx, node.nodeDef['function']))
else:
levelEventListeners[eventName] = [(node.idx, node.nodeDef['function'])]
elif node.name == 'On Task Event' or node.nodeType == '0dafa7b6-28f5-424a-83b3-bc6cc7728016':
assert node.name == 'On Task Event'
assert node.nodeType == '0dafa7b6-28f5-424a-83b3-bc6cc7728016'
assert len(node.args) == 1
eventName = list(node.args.values())[0]['valueRef'].value
assert list(node.args.values())[0]['dataProvider'] is node
assert eventName not in BUILTIN_LEVEL_EVENTS # ไปไฝฟ็จไธ่ฟๆฏๆๅฟ
่ฆ
if eventName in taskEventListeners:
taskEventListeners[eventName].append((node.idx, node.nodeDef['function']))
else:
taskEventListeners[eventName] = [(node.idx, node.nodeDef['function'])]
elif node.name == 'On Level Times Useout' or node.nodeType == '1a33c290-d57a-47ab-8551-7702e0c96f8e':
assert node.name == 'On Level Times Useout'
assert node.nodeType == '1a33c290-d57a-47ab-8551-7702e0c96f8e'
assert len(node.args) == 1
levelId = list(node.args.values())[0]['valueRef'].value
assert list(node.args.values())[0]['dataProvider'] is node
assert levelId not in BUILTIN_LEVEL_EVENTS
if levelId in levelTimesUseoutEventListeners:
levelTimesUseoutEventListeners[levelId].append((node.idx, node.nodeDef['function']))
else:
levelTimesUseoutEventListeners[levelId] = [(node.idx, node.nodeDef['function'])]
elif node.name == 'On Open Public Instance' or node.nodeType == 'ea5801b5-68cb-4a85-bb68-36076c00f1ac':
assert node.name == 'On Open Public Instance'
assert node.nodeType == 'ea5801b5-68cb-4a85-bb68-36076c00f1ac'
assert len(node.args) == 1
levelId = list(node.args.values())[0]['valueRef'].value
assert list(node.args.values())[0]['dataProvider'] is node
assert levelId not in BUILTIN_LEVEL_EVENTS
if levelId in openPublicInstnaceEventListeners:
openPublicInstnaceEventListeners[levelId].append((node.idx, node.nodeDef['function']))
else:
openPublicInstnaceEventListeners[levelId] = [(node.idx, node.nodeDef['function'])]
elif node.name == 'On Activity Start' or node.nodeType == 'b7ae0919-bf04-464d-bd9c-5535a8203500':
assert node.name == 'On Activity Start'
assert node.nodeType == 'b7ae0919-bf04-464d-bd9c-5535a8203500'
assert len(node.args) == 1
activityid = list(node.args.values())[0]['valueRef'].value
assert list(node.args.values())[0]['dataProvider'] is node
assert activityid not in BUILTIN_LEVEL_EVENTS
if activityid not in activityStartListeners:
activityStartListeners[activityid] = []
activityStartListeners[activityid].append((node.idx, node.nodeDef['function']))
elif node.name == 'On Question Event' or node.nodeType == '982dd635-edaf-43d1-a302-a8757cab48f2':
assert node.name == 'On Question Event'
assert node.nodeType == '982dd635-edaf-43d1-a302-a8757cab48f2'
questionEventListeners.append((node.idx, node.nodeDef['function']))
elif node.name == 'Trigger Level Event' or node.nodeType == '59e9fcdb-1c18-45d9-896d-b85267809adc':
assert node.name == 'Trigger Level Event'
assert node.nodeType == '59e9fcdb-1c18-45d9-896d-b85267809adc'
assert len(node.args) == 1
assert list(node.args.values())[0]['valueRef'].value not in BUILTIN_LEVEL_EVENTS
elif node.name == 'Opt Mechanism':
assert node.name == 'Opt Mechanism'
if node.args['0d538da7-1471-42c6-ab7b-4035c21ccae3']['valueRef'].value == True:
mechanismNode = node.args['1411ae69-eaba-4afb-8785-1006a7d2e093']['dataProvider']
if mechanismNode:
idx = mechanismNode.idx
has_opt_mechanism.add(idx)
elif node.name == 'Create Mechanism' or node.nodeType == 'f0f220b5-e584-4aff-8c86-f039d030c02b':
assert node.name == 'Create Mechanism'
assert node.nodeType == 'f0f220b5-e584-4aff-8c86-f039d030c02b'
relevantParams = {}
for value in node.args.values():
if value['name'] in STATIC_MECHANISM_PARAMS:
paramName = STATIC_MECHANISM_PARAMS[value['name']]
paramValue = value['valueRef'].value
if paramValue is None:
if value.get('dataProvider'):
strNode = value.get('dataProvider')
if strNode.name == 'String Data':
paramValue = strNode.args['f1c40cbd-73e8-469c-afde-0375398a510c']['valueRef'].value
if paramValue is None:
continue
relevantParams[paramName] = paramValue
# if len(relevantParams) == len(STATIC_MECHANISM_PARAMS):
elif node.name == 'Create Mechanism With Lightmap' or node.nodeType == '03cfda15-a336-4c6c-b8a4-75123e88628d':
assert node.name == 'Create Mechanism With Lightmap'
assert node.nodeType == '03cfda15-a336-4c6c-b8a4-75123e88628d'
relevantParams = {}
for value in node.args.values():
if value['name'] in STATIC_MECHANISM_PARAMS:
paramName = STATIC_MECHANISM_PARAMS[value['name']]
paramValue = value['valueRef'].value
if paramValue is None:
if value.get('dataProvider'):
strNode = value.get('dataProvider')
paramValue = strNode.args['f1c40cbd-73e8-469c-afde-0375398a510c']['valueRef'].value
else:
continue
relevantParams[paramName] = paramValue
if 'mechanism_config_id' in relevantParams and 'unit_type' in relevantParams:
mechanism_config_id = relevantParams.get('mechanism_config_id')
if 'mechanism_config_id' in relevantParams:
mechanism_config_id = relevantParams.get('mechanism_config_id')
elif node.is_node('Set Color Theme', 'a712c423-d24a-4617-a049-00f4f02b9ebb'):
assert setColorThemeNodeCount == 0
assert node.nodeDef['function']
assert num_events_in_args(node.nodeDef) is 0
setColorThemeNodeCount += 1
on_script_start.append((node.idx, node.nodeDef['function']))
# nodes[node.idx]['args'].append( ('_ColorMultipliers', append_runtime_data([ (1.0, 0.0, 0.0), (0.0, 1.0, 0.0) ])) )
for idx in mechanism_with_blcok:
if idx not in has_opt_mechanism:
for node in nodeGraph.values():
if idx == node.idx:
errorNode = node
mechanism_id = errorNode.args
for value in errorNode.args.values():
if value['name'] == 'Mechanism Config ID':
mechanism_id = value['valueRef'].value
print('WARNING:mechanism %d is used without open in leve:%s' % (mechanism_id, str(filename)))
def get_node_arg_index(args, argName):
for name, argIndex in args:
if name == argName:
return argIndex
raise Exception("cannot find argName %s" % argName)
for name, node in AddTaskDetailRequiredNames.items():
assert name in CounterNames
addTaskDetailNode = nodes[node.idx]
counterNode = nodes[CounterNames[name].idx]
if counterNode['preQueryNodes'] is not None:
if addTaskDetailNode['preQueryNodes'] is None:
addTaskDetailNode['preQueryNodes'] = []
addTaskDetailNode['preQueryNodes'].extend(counterNode['preQueryNodes'])
addTaskDetailNode['args'].append(('_CountNeeded', get_node_arg_index(counterNode['args'], 'Count Needed')))
addTaskDetailNode['args'].append(('_Value', get_node_arg_index(counterNode['returns'], 'Value')))
counterNode['eventLinks']['Value Updated'].append((node.idx, 'add_task_detail_counter_updated'))
counterNode['eventLinks']['Count Reached'].append((node.idx, 'add_task_detail_counter_reached'))
ret = {
'nodes': nodes,
'runTimeData': runTimeData,
}
return ret
def single_file_export(defData, editorData, byEditor, filename):
validate_def_data(defData)
validate_editor_data(editorData)
result = do_work(defData, editorData, byEditor, filename)
# result["staticMechanisms"] = singleStaticMechanisms
return result
def multi_file_export_mode():
import sys
import os
nodeDefFilepath = sys.argv[1]
levelScriptsPath = sys.argv[2]
resPath = sys.argv[3]
outputPath = os.path.join(sys.argv[4], 'levelscripts')
# nodeDefFilepath = "F:/H43/design/tools/main/meta/nodes.json"
# levelScriptsPath = "F:/H43/design/tools/csv2py/levelscripts/"
# resPath = 'F:/H43/common/neox/res'
# outputPath = 'F:/ttt'
defData = json.loads(open(nodeDefFilepath, 'r').read())
validate_def_data(defData)
try:
os.makedirs(outputPath)
except OSError:
if not os.path.isdir(outputPath):
raise
f = open(os.path.join(outputPath, '__init__.py'), 'wb')
f.close()
from output.common.city import data as cityData
needCheckLevelName = []
CITY_TYPE_CAMP_BATTLE = 3
for data in cityData.values():
if 'use_level_config' in data and data.get('city_type') != CITY_TYPE_CAMP_BATTLE:
needCheckLevelName.append('level_%d' % data['use_level_config'])
staticMechanisms = {}
staticMechanismsFile = 'static_mechanisms.py'
for filename in os.listdir(levelScriptsPath):
fullpath = os.path.join(levelScriptsPath, filename)
if os.path.isfile(fullpath) and fullpath.endswith('.json'):
outputFilePath = os.path.join(outputPath, os.path.splitext(filename)[0] + '.py')
print('[ActivityScriptExporter]:', fullpath, '-->', outputFilePath, end=' ')
editorFilepath = fullpath
editorData = json.loads(open(editorFilepath, 'r').read())
validate_editor_data(editorData)
if os.path.splitext(filename)[0] in needCheckLevelName:
is_city = True
else:
is_city = False
result, singleStaticMechanisms = do_work(defData, editorData, False, filename.split('.')[0], resPath,
is_city)
staticMechanisms[os.path.splitext(filename)[0]] = singleStaticMechanisms
resultStr = repr(result)
if is_city:
if "drop_gift_for_instance" in resultStr:
raise Exception('Drop Gift For Instance node in city level!!')
if "open_npc_dialog_broadcast_mode" in resultStr:
raise Exception('open_npc_dialog_broadcast_mode node in city level!!')
# if "Wait NPC Submit Event Time" in resultStr:
# raise Exception('use Wait NPC Submit Event Time in city level!!')
f = open(outputFilePath, 'wb')
f.write('data = ')
f.write(resultStr)
# NOTE: ไธ่ฝ่ฝฌconst๏ผๅ ไธบvectorๆฏ็จtupleๅญ็๏ผ่ฝฌๆขไผๅฏผ่ด่ๆฌๅฑๅฏนไบ็ฑปๅๅคๅฎ้่ฏฏ
f.write('\n_reload_all = True\n')
f.close()
print(' ... OK')
outputFilePath = os.path.join(outputPath, staticMechanismsFile)
print('[ActivityScriptExporter, StaticMechanisms]:', fullpath, '-->', outputFilePath, end=' ')
f = open(outputFilePath, 'wb')
f.write('data = ')
f.write(repr(staticMechanisms))
f.write('\n_reload_all = True\n')
f.close()
# NOTE: ็ผ่พๅจ่ฐ็จ๏ผ็จไบ้ช่ฏๆฐๆฎๅนถ่ฝฌๆข
def editor_validate_and_export(defData, editorData, filename, resPath):
typeErrors = []
noneValueErrors = []
unknownErrors = []
result = {
"errors": {
"TypeErrors": typeErrors,
"NullValueError": noneValueErrors,
"UnknownErrors": unknownErrors
},
"result": None
}
try:
result['result'] = single_file_export(defData, editorData, True, filename)
except NoneValueError as e:
noneValueErrors.append({
'id': e.nodeID,
'subItemId': e.argTypeID,
'message': e.message
})
except TypeMismatchError as e:
typeErrors.append({
'id': e.nodeID,
'subItemId': e.argTypeID,
'message': e.message
})
except Exception as e:
unknownErrors.append(e)
return result
# if __name__ == '__main__':
# import sys
# import os
# import json
#
# if len(sys.argv) == 4:
# nodeDefFilepath = sys.argv[1]
# editorFilepath = sys.argv[2]
# resPath = sys.argv[3]
#
# defData = json.loads(open(nodeDefFilepath, 'r').read())
# editorData = json.loads(open(editorFilepath, 'r').read())
#
# result = single_file_export(defData, editorData, False, os.path.basename(nodeDefFilepath).split('.')[0],
# resPath)
#
# print('data = ', end=' ')
# print(repr(result))
#
# elif len(sys.argv) == 5:
# multi_file_export_mode()
# else:
# nodeDefFilepath = 'E:\\PycharmProjects\\crawler\\editor\meta\\nodes.json'
# editorFilepath = 'E:\\PycharmProjects\\crawler\\editor\\graph\\temp.json'
# # resPath = 'F:/H43/trunk/Client_Resources/res'
# defData = json.loads(open(nodeDefFilepath, 'r').read())
# editorData = json.loads(open(editorFilepath, 'r').read())
#
# # scriptType = guess_script_type(editorFilepath, editorData)
#
# result = single_file_export(defData, editorData, False, os.path.basename(nodeDefFilepath).split('.')[0])
#
# print('data = ', end=' ')
# print(repr(result))
# # raise NotImplementedError("wrong args")
#
# sys.exit(0)
| [
"936844218@qq.com"
] | 936844218@qq.com |
93f988f8434a28ac537e6aa41024262334b8bd8c | 9bedd3e8c8b84cf4c8fa3ab1b04bfa0fddd1707e | /pymela/tools/tag_creators.py | 3e9e4c0106b5f9773424cb1d8028a280ab57c6c3 | [] | no_license | ckallidonis/Pymela | 6266b9285bff84cf22ac3de17c79819030b78ab3 | 1cb0e4cd578073808ea14816a8a690909360634d | refs/heads/main | 2023-03-17T05:58:19.966750 | 2021-03-16T14:25:27 | 2021-03-16T14:25:27 | 314,623,326 | 0 | 0 | null | 2021-01-07T00:21:40 | 2020-11-20T17:25:55 | Python | UTF-8 | Python | false | false | 962 | py | '''
Created on Nov.23, 2020
@author: Christos Kallidonis
Copyright (C) 2020. All rights reserved.
Helper functions that create tags used as keys in various objects
'''
valSgnPN = lambda i: ("+" if i > 0 else "") + str(i)
valSgnN = lambda i: ("" if i > 0 else "") + str(i)
def momH5(mom,jc = '_'):
return 'mom'+ jc + jc.join([valSgnPN(i) for i in mom])
def momVec(mStr,sep=','):
return [int(i) for i in (mStr.split(sep))]
def momFile(mom,jc= '.'):
return 'momXYZ'+ jc + jc.join([str(i) for i in mom])
def momString(mom,sep=','):
return sep.join([str(i) for i in mom])
def t0(t0):
return 't0_%d'%(t0)
def tsep(tsep):
return 'tsnk_%d'%(tsep)
def src_snk(opPair):
return 'src:%s_snk:%s'%(opPair[0],opPair[1])
def row(row):
return 'row_%d'%(row)
def disp(z3):
dL = lambda i: ("z" if i != 0 else "") + ("+" if i > 0 else "") + str(i)
return 'disp_%s'%(dL(z3))
def insertion(gamma):
return 'insertion_%s'%(gamma) | [
"kallidonis@gmail.com"
] | kallidonis@gmail.com |
14b571e28b701feb5fb4fd9e579333e6895f223b | 6108da1779a14d6eb3641b1f84a2a09f36678278 | /scripts/corpus/json_utils.py | ac69e6a880ed96f76d5fba7489241286782bf21d | [] | no_license | ShawonAshraf/emotion-classification-isear | 55def5fa4f4519021d78521720ad9e0254968784 | 03035ec2db487d10fab117889c682a25f35bfd8d | refs/heads/master | 2023-08-30T02:09:19.289558 | 2021-11-15T02:52:12 | 2021-11-15T02:52:12 | 283,501,779 | 1 | 0 | null | 2021-11-15T02:52:13 | 2020-07-29T13:07:56 | Python | UTF-8 | Python | false | false | 378 | py | import json
import os
"""
read from a json file
and returns the content as a python object -> list / dict
"""
def read_json_file(json_path):
if not os.path.exists(json_path):
print("File does not exist. Exiting ....")
exit(1)
else:
with open(json_path, "r") as jsonfile:
data = json.load(jsonfile)
return data
| [
"shawon13@live.com"
] | shawon13@live.com |
f30bdf31413b0ddc5c61eee3ae905f28120a9e8c | 739a4501128c948d73b522db2fca0b2a3f3de95e | /old_stuff/blender3d_exporter/io_export_pascal3d/p3d_export_armature.py | 2dd479a1f621b2c55617d4a92c2ccfe4c5fc317d | [
"MIT"
] | permissive | soerensen3/pascal3d | 68be3af62adb473c96198ba44155c8f74cd03d8b | 4671d383bd9e4ce13142104b18ec23954f445858 | refs/heads/master | 2021-04-06T18:40:16.813755 | 2019-01-29T21:35:39 | 2019-01-29T21:35:39 | 125,377,509 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,172 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 11 23:44:04 2014
@author: johannes
"""
import bpy
from . p3d_helper import *
from mathutils import *
from . p3d_export_anim import *
def ExportBonesActions( Config, Bones ):
for Bone in Bones:
BoneMatrix = Matrix()
Config.File.write("{}bone \'{}\'\n".format(" " * Config.Whitespace, Bone.name ))
Config.Whitespace += 1
#if Config.ExportRestBone:
#Rotation = ArmatureObject.data.bones[Bone.name] \
# .matrix.to_quaternion() * \
# Bone.rotation_quaternion
PoseMatrix = Matrix()
if Bone.parent:
PoseMatrix = Bone.parent.matrix.inverted()
PoseMatrix *= Bone.matrix
WriteMatrix( Config, PoseMatrix )
ExportBonesActions( Config, Bone.children )
Config.Whitespace -= 1
Config.File.write("{}end;\n".format(" " * Config.Whitespace ))
def ExportActions(Config, Armature):
if ( Armature.animation_data == None ) or ( Armature.animation_data.action == None ):
return
AnimationData = Armature.animation_data.action
scene = bpy.context.scene
Config.File.write("{}action \'{}\'\n".format(" " * Config.Whitespace, AnimationData.name ))
Config.Whitespace += 1
Bones = Armature.data.bones
BlenderCurrentFrame = scene.frame_current
for frame in range(scene.frame_start, scene.frame_end + 1):
scene.frame_set( frame )
Config.File.write("{}frame {}\n".format(" " * Config.Whitespace, frame ))
Config.Whitespace += 1
RootBones = [Bone for Bone in Bones if Bone.parent is None]
ExportBonesActions( Config, RootBones )
Config.Whitespace -= 1
Config.File.write("{}end;\n".format(" " * Config.Whitespace ))
scene.frame_set( BlenderCurrentFrame )
Config.Whitespace -= 1
Config.File.write("{}end;\n".format(" " * Config.Whitespace ))
def ExportArmature(Config, Object):
print("Exporting Armature")
Armature = Object.data
RootBones = [Bone for Bone in Armature.bones if Bone.parent is None]
Config.File.write("{}bones\n".format(" " * Config.Whitespace ))
Config.Whitespace += 1
ExportBones( Config, RootBones )
Config.Whitespace -= 1
Config.File.write("{}end;\n".format(" " * Config.Whitespace ))
if Config.ExportAnimation:
Config.File.write("{}actions\n".format(" " * Config.Whitespace ))
Config.Whitespace += 1
ExportActions( Config, Object )
Config.Whitespace -= 1
Config.File.write("{}end;\n".format(" " * Config.Whitespace ))
def ExportBones( Config, Bones ):
for Bone in Bones:
BoneMatrix = Matrix()
Config.File.write("{}bone \'{}\'\n".format(" " * Config.Whitespace, Bone.name ))
Config.Whitespace += 1
#if Config.ExportRestBone:
if Bone.parent:
BoneMatrix = Bone.parent.matrix_local.inverted()
BoneMatrix *= Bone.matrix_local
WriteMatrix( Config, BoneMatrix )
ExportBones( Config, Bone.children )
Config.Whitespace -= 1
Config.File.write("{}end;\n".format(" " * Config.Whitespace ))
''' else:
PoseBone = self.BlenderObject.pose.bones[Bone.name]
if Bone.parent:
BoneMatrix = PoseBone.parent.matrix.inverted()
BoneMatrix *= PoseBone.matrix
'''
# BoneSafeName = self.SafeName + "_" + \
# Util.SafeName(Bone.name)
# self.__OpenBoneFrame(BoneSafeName, BoneMatrix)
# self.__WriteBoneChildren(Bone)
# self.__CloseBoneFrame(BoneSafeName) | [
"johannes.soerensen@gmail.com"
] | johannes.soerensen@gmail.com |
a8225118f48451a97437b3a5591febc28c132342 | 5afac6cca24a469fdddc7dcf9d9e48f7f5221933 | /BiDLSTM-master/nn_layers.py | 3b2c3f3e65774e7db9b0c20a79751b4052f17157 | [] | no_license | biswajyoti2607/quora-question-pairs-experiments | b5c2ffc4b3082b2c10062f923c0e93afa7b06ca6 | 8bfdaf1aa9232bf3c01408e27551f410e99db892 | refs/heads/master | 2020-03-13T17:21:30.979335 | 2017-12-01T17:29:00 | 2017-12-01T17:29:00 | 112,755,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,086 | py | """
This file contains the rest of the layers in the network.
"""
import theano
from theano import tensor
from util import numpy_floatX
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
def embeddings_layer(x, Wemb, dim_proj):
"""
Returns the one-hot vector x after encoding in the Wemb embedding space.
:param x: One-hot index vector (25, 23...)
:param Wemb: Word embeddings
:return:
"""
n_words = x.shape[0]
n_max_letters_in_word = x.shape[1]
n_batch = x.shape[2]
dist = Wemb[x.flatten()].reshape([n_words, n_max_letters_in_word, n_batch, dim_proj])
return dist
def lstm_mask_layer(proj, mask):
"""
Removes any spurious output from the LSTM that's not covered by a label
or doesn't correspond to any real input.
:param proj: Output of the LSTM layer
:param mask: 1 if the position is valid, 0 otherwise
:return: The masked values
"""
return proj * mask[:, :, None]
def per_word_averaging_layer_distrib(proj, wmask, maxw):
"""
"""
print maxw, "MAXW"
dup = [tensor.shape_padaxis(proj, 0) for _ in range(maxw)]
dup = tensor.concatenate(dup, 0)
#dup = tensor.shape_padaxis(proj, 0)
mul = tensor.mul(wmask, dup)
mul = theano.printing.Print("mul", attrs=["shape"])(mul)
# mul = mul[mul.nonzero()]
# mul = mul[mul != 0]
compare = tensor.eq(mul, numpy_floatX(0.))
mul = mul[(1-compare).nonzero()[0]]
mul = theano.printing.Print("mul", attrs=["shape"])(mul)
# mul = theano.printing.Print("mul")(mul)
return mul
def per_word_averaging_layer(proj, wmask, maxw, trim=False):
"""
:param proj: Output of the LSTM layer
:param wmask: Unravelled 4D-index tensor (represented in 2d)
:return: The per-word averages.
"""
n_chars = proj.shape[0]
n_samples = proj.shape[1]
n_proj = proj.shape[2]
dist = per_word_averaging_layer_distrib(proj, wmask, maxw)
dist = dist.dimshuffle(1, 2, 0, 3)
divider = tensor.cast(tensor.neq(dist, numpy_floatX(0.0)).sum(axis=0), theano.config.floatX)
divider += tensor.eq(divider, numpy_floatX(0.0)) # Filter NaNs
tmp = tensor.cast(dist.sum(axis=0), theano.config.floatX)
tmp /= divider
# tmp = theano.printing.Print("tmp", attrs=["shape"])(tmp)
#_max = dist.max(axis=0)
#_min = dist.min(axis=0)
#tmp = tensor.concatenate([tmp, _max, _min], axis=2)
# tmp = theano.printing.Print("tmp", attrs=["shape"])(tmp)
if not trim:
return tmp
else:
ret = tensor.zeros_like(tmp)
ret = tensor.set_subtensor(ret[:, :-1], tmp[:, 1:])
return tensor.cast(ret, theano.config.floatX)
def softmax_layer(avg_per_word, U, b, y_mask, maxw, training=False):
"""
Produces the final labels via softmax
:param avg_per_word: Output from word-averaging
:param U: Classification weight matrix
:param b: Classification bias layer
:param y_mask: Because not all fragments are the same length, set y_mask to 0 in those positions
where the output is undefined, causing this thing to output the special 0 label (for "don't care")
:return: Softmax predictions
"""
#avg_per_word = theano.printing.Print("avg_per_word")(avg_per_word)
if training:
srng = RandomStreams(seed=12345)
dropout_mask = tensor.cast(srng.binomial(size=U.shape, p=0.5), theano.config.floatX)
#U = theano.printing.Print("U", attrs=["shape"])(U)
#dropout_mask = theano.printing.Print("dropout_mask", attrs=["shape"])(dropout_mask)
raw_pred, _ = theano.scan(fn=lambda p, free_variable: tensor.nnet.softmax(tensor.dot(p, tensor.mul(U, dropout_mask)) + b),
outputs_info=None,
sequences=[avg_per_word, tensor.arange(maxw)]
)
else:
raw_pred, _ = theano.scan(fn=lambda p, free_variable: tensor.nnet.softmax(tensor.dot(p, U) + b),
outputs_info=None,
sequences=[avg_per_word, tensor.arange(maxw)]
)
return raw_pred
| [
"biswajyoti@gatech.edu"
] | biswajyoti@gatech.edu |
5d9db594141e523299b132bb63b29929f1a35ebe | aeb2f0bb7b01f87a1b6c65b88b216bed47025fe5 | /experiment/model225_3.py | 89d3aacb28fc16f8f206203bc05a981c353f8a0e | [] | no_license | kurupical/riiid | 7e68239cd50243fbb734bf433d60ebd7469cb180 | 7bab580ce03d03873748a6afc91092c11871465f | refs/heads/master | 2023-03-30T04:15:54.109815 | 2021-04-04T01:20:33 | 2021-04-04T01:20:33 | 302,828,112 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 27,225 | py | import numpy as np
import pandas as pd
import gc
import random
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from datetime import datetime as dt
import os
import glob
import pickle
import json
from feature_engineering.feature_factory_for_transformer import FeatureFactoryForTransformer
from feature_engineering.feature_factory import \
FeatureFactoryManager, \
DurationPreviousContent, \
ElapsedTimeBinningEncoder, \
UserContentRateEncoder, \
QuestionQuestionTableEncoder2, \
PreviousAnswer2, \
StudyTermEncoder2, \
MeanAggregator, \
ElapsedTimeMeanByContentIdEncoder, \
DurationFeaturePostProcess
from experiment.common import get_logger
import time
from transformers import AdamW, get_linear_schedule_with_warmup
torch.manual_seed(0)
np.random.seed(0)
is_debug = False
is_make_feature_factory = False
load_pickle = True
epochs = 12
device = torch.device("cuda")
wait_time = 0
class SAKTDataset(Dataset):
def __init__(self, group, n_skill, n_part=8, max_seq=100, is_test=False, predict_mode=False):
super(SAKTDataset, self).__init__()
self.max_seq = max_seq
self.n_skill = n_skill
self.samples = group
self.is_test = is_test
self.n_part = n_part
self.predict_mode = predict_mode
self.user_ids = []
for user_id in group.keys():
q = group[user_id][("content_id", "content_type_id")]
if not is_test:
self.user_ids.append([user_id, -1])
else:
is_val = group[user_id]["is_val"]
for i in range(len(q)):
if is_val[i]:
self.user_ids.append([user_id, i+1])
def __len__(self):
return len(self.user_ids)
def __getitem__(self, index):
user_id = self.user_ids[index][0]
end = self.user_ids[index][1]
# =============================
# setting index
# =============================
idx_dict = {
("content_id", "content_type_id"): 0,
"user_answer": 1,
"part": 2,
"prior_question_elapsed_time_bin300": 3,
"duration_previous_content_bin300": 4,
"answered_correctly": 5,
"prior_question_had_explanation": 6,
"rating_diff_content_user_id": 7,
"task_container_id_bin300": 8,
"previous_answer_index_content_id": 9,
"previous_answer_content_id": 10,
"timediff-elapsedtime_bin500": 11
}
num_sequence = len(idx_dict)
item_ary = np.zeros((num_sequence, self.max_seq), dtype=np.float32)
data_length = len(self.samples[user_id][("content_id", "content_type_id")])
if self.is_test:
start = np.max([0, end - self.max_seq])
else:
start = 0
end = data_length
seq_length = end - start
for item_name, idx in idx_dict.items():
item_ary[idx, -seq_length:] = self.samples[user_id][item_name][start:end]
def get_data(key, remove_now=False):
if remove_now:
return item_ary[idx_dict[key], :][:-1]
else:
return item_ary[idx_dict[key], :][1:]
return {
"x": get_data(key="answered_correctly", remove_now=True) + 2, # 1: lecture, 2: not correct, 3: correct
"user_answer": get_data(key="user_answer", remove_now=True) + 1,
"target_id": get_data(key=("content_id", "content_type_id")),
"part": get_data(key="part"),
"elapsed_time": get_data(key="prior_question_elapsed_time_bin300"),
"duration_previous_content": get_data(key="duration_previous_content_bin300"),
"label": get_data(key="answered_correctly"),
"prior_q": get_data(key="prior_question_had_explanation"),
"rate_diff": get_data(key="rating_diff_content_user_id"),
"container_id": get_data(key="task_container_id_bin300"),
"previous_answer_index_content_id": get_data(key="previous_answer_index_content_id"),
"previous_answer_content_id": get_data(key="previous_answer_content_id"),
"timediff-elapsedtime_bin500": get_data(key="timediff-elapsedtime_bin500"),
"prior_content_id": get_data(key=("content_id", "content_type_id"), remove_now=True)
}
class FFN(nn.Module):
def __init__(self, state_size=200):
super(FFN, self).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, state_size)
self.ln1 = nn.LayerNorm(state_size)
self.relu = nn.ReLU()
self.lr2 = nn.Linear(state_size, state_size)
self.ln2 = nn.LayerNorm(state_size)
def forward(self, x):
x = self.lr1(x)
x = self.ln1(x)
x = self.relu(x)
x = self.lr2(x)
x = self.ln2(x)
return x
class ContEmbedding(nn.Module):
def __init__(self, input_dim, embed_dim, seq_len):
super(ContEmbedding, self).__init__()
self.embed_dim = embed_dim
self.bn = nn.BatchNorm1d(seq_len-1)
self.gru = nn.GRU(input_size=input_dim, hidden_size=embed_dim // 2)
self.ln2 = nn.LayerNorm(embed_dim // 2)
def forward(self, x):
x = self.bn(x)
x, _ = self.gru(x)
x = self.ln2(x)
return x
def future_mask(seq_length):
future_mask = np.triu(np.ones((seq_length, seq_length)), k=1).astype('bool')
return torch.from_numpy(future_mask)
class CatEmbedding(nn.Module):
def __init__(self, embed_dim):
super(CatEmbedding, self).__init__()
self.embed_dim = embed_dim
self.ln1 = nn.LayerNorm(embed_dim)
self.gru = nn.GRU(input_size=embed_dim, hidden_size=embed_dim // 2)
self.ln2 = nn.LayerNorm(embed_dim // 2)
def forward(self, x):
x = self.ln1(x)
x, _ = self.gru(x)
x = self.ln2(x)
return x
class ContEmbedding(nn.Module):
def __init__(self, input_dim, embed_dim, seq_len):
super(ContEmbedding, self).__init__()
self.embed_dim = embed_dim
self.bn = nn.BatchNorm1d(seq_len-1)
self.gru = nn.GRU(input_size=input_dim, hidden_size=embed_dim)
self.ln2 = nn.LayerNorm(embed_dim)
def forward(self, x):
x = self.bn(x)
x, _ = self.gru(x)
x = self.ln2(x)
return x
class SAKTModel(nn.Module):
def __init__(self, n_skill, max_seq=100, embed_dim=128, num_heads=8, dropout=0.2,
cont_emb=None):
super(SAKTModel, self).__init__()
self.n_skill = n_skill
self.embed_dim_cat = embed_dim
embed_dim_small_cat = 32
embed_dim_middle_cat = 32
embed_dim_cat_all = embed_dim_small_cat*5 + embed_dim_middle_cat*5 + embed_dim
embed_dim_all = embed_dim_cat_all + cont_emb
self.embedding = nn.Embedding(4, embed_dim_small_cat)
self.user_answer_embedding = nn.Embedding(6, self.embed_dim_cat)
self.prior_question_had_explanation_embedding = nn.Embedding(4, embed_dim_small_cat)
self.e_embedding = nn.Embedding(n_skill + 1, self.embed_dim_cat)
self.part_embedding = nn.Embedding(8, embed_dim_small_cat)
self.elapsed_time_embedding = nn.Embedding(302, embed_dim_middle_cat)
self.duration_previous_content_embedding = nn.Embedding(302, embed_dim_middle_cat)
self.container_embedding = nn.Embedding(302, embed_dim_middle_cat)
self.prev_ans_idx_embedding = nn.Embedding(302, embed_dim_middle_cat)
self.prev_ans_content_id_embedding = nn.Embedding(4, embed_dim_small_cat)
self.timediff_elapsedtime_embedding = nn.Embedding(502, embed_dim_middle_cat)
encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim_all, nhead=num_heads, dropout=dropout)
self.transformer_enc = nn.TransformerEncoder(encoder_layer=encoder_layer, num_layers=4)
self.gru = nn.GRU(input_size=embed_dim_all, hidden_size=embed_dim_all)
self.continuous_embedding = ContEmbedding(input_dim=1, embed_dim=cont_emb, seq_len=max_seq)
self.prior_content_embedding = nn.Sequential(
nn.Linear(self.embed_dim_cat, embed_dim_small_cat),
nn.LayerNorm(embed_dim_small_cat)
)
self.cat_embedding = nn.Sequential(
nn.Linear(embed_dim_cat_all, embed_dim_cat_all),
nn.LayerNorm(embed_dim_cat_all)
)
self.layer_normal = nn.LayerNorm(embed_dim_all)
self.ffn = FFN(embed_dim_all)
self.dropout = nn.Dropout(dropout/2)
self.pred = nn.Linear(embed_dim_all, 1)
def forward(self, item, device):
x = item["x"].to(device).long()
question_ids = item["target_id"].to(device).long()
parts = item["part"].to(device).long()
label = item["label"].to(device).float()
elapsed_time = item["elapsed_time"].to(device).long()
duration_previous_content = item["duration_previous_content"].to(device).long()
prior_q = item["prior_q"].to(device).long()
user_answer = item["user_answer"].to(device).long()
rate_diff = item["rate_diff"].to(device).float()
container_id = item["container_id"].to(device).long()
prev_ans_idx = item["previous_answer_index_content_id"].to(device).long()
prior_content_id_ans_correctly = item["previous_answer_content_id"].to(device).long()
prior_content_id = item["prior_content_id"].to(device).long()
timediff_elapsedtime = item["timediff-elapsedtime_bin500"].to(device).long()
att_mask = future_mask(x.size(1)).to(device)
e = self.e_embedding(question_ids)
p = self.part_embedding(parts)
prior_q_emb = self.prior_question_had_explanation_embedding(prior_q)
user_answer_emb = self.user_answer_embedding(user_answer)
prior_content_id_emb = self.e_embedding(prior_content_id)
prior_content_user_answer_emb = self.prior_content_embedding(user_answer_emb + prior_content_id_emb)
timediff_elapsedtime_emb = self.timediff_elapsedtime_embedding(timediff_elapsedtime)
# decoder
x = self.embedding(x)
el_time_emb = self.elapsed_time_embedding(elapsed_time)
dur_emb = self.duration_previous_content_embedding(duration_previous_content)
container_emb = self.container_embedding(container_id)
prev_ans_idx_emb = self.prev_ans_idx_embedding(prev_ans_idx)
prev_ans_content_id_emb = self.prev_ans_content_id_embedding(prior_content_id_ans_correctly)
x = torch.cat([x, el_time_emb, dur_emb, e, p, prior_q_emb, container_emb,
prev_ans_idx_emb, prev_ans_content_id_emb,
prior_content_user_answer_emb,
timediff_elapsedtime_emb], dim=2)
cont = rate_diff
cont_emb = self.continuous_embedding(cont.view(x.size(0), x.size(1), -1))
x = self.cat_embedding(x)
x = torch.cat([x, cont_emb], dim=2)
x = x.permute(1, 0, 2) # x: [bs, s_len, embed] => [s_len, bs, embed]
att_dec = self.transformer_enc(x,
mask=att_mask)
att_dec, _ = self.gru(att_dec)
att_dec = att_dec.permute(1, 0, 2) # att_output: [s_len, bs, embed] => [bs, s_len, embed]
x = self.layer_normal(att_dec)
x = self.ffn(x) + att_dec
x = self.dropout(x)
x = self.pred(x)
return x.squeeze(-1)
def train_epoch(model, train_iterator, val_iterator, optim, criterion, scheduler, epoch, device="cuda"):
model.train()
train_loss = []
num_corrects = 0
num_total = 0
labels = []
outs = []
tbar = tqdm(train_iterator)
for item in tbar:
optim.zero_grad()
label = item["label"].to(device).float()
output = model(item, device)
target_idx = (label.view(-1) >= 0).nonzero()
loss = criterion(output.view(-1)[target_idx], label.view(-1)[target_idx])
loss.backward()
optim.step()
scheduler.step()
train_loss.append(loss.item())
output = output[:, -1]
label = label[:, -1]
target_idx = (label.view(-1) >= 0).nonzero()
pred = (torch.sigmoid(output) >= 0.5).long()
num_corrects += (pred.view(-1)[target_idx] == label.view(-1)[target_idx]).sum().item()
num_total += len(label)
labels.extend(label.view(-1)[target_idx].data.cpu().numpy())
outs.extend(output.view(-1)[target_idx].data.cpu().numpy())
tbar.set_description('loss - {:.4f}'.format(loss))
acc = num_corrects / num_total
auc = roc_auc_score(labels, outs)
loss = np.mean(train_loss)
preds = []
labels = []
model.eval()
i = 0
with torch.no_grad():
for item in tqdm(val_iterator):
label = item["label"].to(device).float()
output = model(item, device)
preds.extend(torch.nn.Sigmoid()(output[:, -1]).view(-1).data.cpu().numpy().tolist())
labels.extend(label[:, -1].view(-1).data.cpu().numpy())
i += 1
if i > 100 and epoch < 10:
break
auc_val = roc_auc_score(labels, preds)
return loss, acc, auc, auc_val
def main(params: dict,
output_dir: str):
import mlflow
print("start params={}".format(params))
model_id = "all"
logger = get_logger()
df = pd.read_pickle("../input/riiid-test-answer-prediction/train_merged.pickle")
# df = pd.read_pickle("../input/riiid-test-answer-prediction/split10/train_0.pickle").sort_values(["user_id", "timestamp"]).reset_index(drop=True)
if is_debug:
df = df.head(30000)
df["prior_question_had_explanation"] = df["prior_question_had_explanation"].fillna(-1)
# df["answered_correctly"] = df["answered_correctly"].replace(-1, np.nan)
column_config = {
("content_id", "content_type_id"): {"type": "category"},
"user_answer": {"type": "leakage_feature"},
"answered_correctly": {"type": "leakage_feature"},
"part": {"type": "category"},
"prior_question_elapsed_time_bin300": {"type": "category"},
"duration_previous_content_bin300": {"type": "category"},
"prior_question_had_explanation": {"type": "category"},
"rating_diff_content_user_id": {"type": "numeric"},
"task_container_id_bin300": {"type": "category"},
"previous_answer_index_content_id": {"type": "category"},
"previous_answer_content_id": {"type": "category"},
"timediff-elapsedtime_bin500": {"type": "category"}
}
if not load_pickle or is_debug:
feature_factory_dict = {"user_id": {}}
feature_factory_dict["user_id"]["DurationPreviousContent"] = DurationPreviousContent(is_partial_fit=True)
feature_factory_dict["user_id"]["ElapsedTimeBinningEncoder"] = ElapsedTimeBinningEncoder()
feature_factory_dict["user_id"]["UserContentRateEncoder"] = UserContentRateEncoder(rate_func="elo",
column="user_id")
feature_factory_dict["user_id"]["PreviousAnswer2"] = PreviousAnswer2(groupby="user_id",
column="content_id",
is_debug=is_debug,
model_id=model_id,
n=300)
feature_factory_dict["user_id"]["StudyTermEncoder2"] = StudyTermEncoder2(is_partial_fit=True)
feature_factory_dict["user_id"][f"MeanAggregatorStudyTimebyUserId"] = MeanAggregator(column="user_id",
agg_column="study_time",
remove_now=False)
feature_factory_dict["user_id"]["ElapsedTimeMeanByContentIdEncoder"] = ElapsedTimeMeanByContentIdEncoder()
feature_factory_dict["post"] = {
"DurationFeaturePostProcess": DurationFeaturePostProcess()
}
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger,
split_num=1,
model_id=model_id,
load_feature=not is_debug,
save_feature=not is_debug)
print("all_predict")
df = feature_factory_manager.all_predict(df)
def f(x):
x = x // 1000
if x < -90:
return -90
if x > 90:
return 90
return x
df["task_container_id_bin300"] = [x if x < 300 else 300 for x in df["task_container_id"]]
df["timediff-elapsedtime_bin500"] = [f(x) for x in df["timediff-elapsedtime"].values]
df = df[["user_id", "content_id", "content_type_id", "part", "user_answer", "answered_correctly",
"prior_question_elapsed_time_bin300", "duration_previous_content_bin300",
"prior_question_had_explanation", "rating_diff_content_user_id", "task_container_id_bin300",
"previous_answer_index_content_id", "previous_answer_content_id", "row_id",
"timediff-elapsedtime_bin500"]]
print(df.head(10))
print("data preprocess")
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
ff_for_transformer.make_dict(df=df)
n_skill = len(ff_for_transformer.embbed_dict[("content_id", "content_type_id")])
if not load_pickle or is_debug:
df_val_row = pd.read_feather("../input/riiid-test-answer-prediction/train_transformer_last2500k_only_row_id.feather")
if is_debug:
df_val_row = df_val_row.head(3000)
df_val_row["is_val"] = 1
df = pd.merge(df, df_val_row, how="left", on="row_id")
df["is_val"] = df["is_val"].fillna(0)
print(df["is_val"].value_counts())
w_df = df[df["is_val"] == 0]
w_df["group"] = (w_df.groupby("user_id")["user_id"].transform("count") - w_df.groupby("user_id").cumcount()) // params["max_seq"]
w_df["user_id"] = w_df["user_id"].astype(str) + "_" + w_df["group"].astype(str)
group = ff_for_transformer.all_predict(w_df)
dataset_train = SAKTDataset(group,
n_skill=n_skill,
max_seq=params["max_seq"])
del w_df
gc.collect()
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
if not load_pickle or is_debug:
group = ff_for_transformer.all_predict(df[df["content_type_id"] == 0])
dataset_val = SAKTDataset(group,
is_test=True,
n_skill=n_skill,
max_seq=params["max_seq"])
os.makedirs("../input/feature_engineering/model225", exist_ok=True)
if not is_debug and not load_pickle:
with open(f"../input/feature_engineering/model225/train.pickle", "wb") as f:
pickle.dump(dataset_train, f)
with open(f"../input/feature_engineering/model225/val.pickle", "wb") as f:
pickle.dump(dataset_val, f)
if not is_debug and load_pickle:
with open(f"../input/feature_engineering/model225/train.pickle", "rb") as f:
dataset_train = pickle.load(f)
with open(f"../input/feature_engineering/model225/val.pickle", "rb") as f:
dataset_val = pickle.load(f)
print("loaded!")
dataloader_train = DataLoader(dataset_train, batch_size=params["batch_size"], shuffle=True)
dataloader_val = DataLoader(dataset_val, batch_size=params["batch_size"], shuffle=False)
model = SAKTModel(n_skill, embed_dim=params["embed_dim"], max_seq=params["max_seq"], dropout=dropout,
cont_emb=params["cont_emb"])
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=params["lr"],
weight_decay=0.01,
)
num_train_optimization_steps = int(len(dataloader_train) * 20)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=params["num_warmup_steps"],
num_training_steps=num_train_optimization_steps)
criterion = nn.BCEWithLogitsLoss()
model.to(device)
criterion.to(device)
for epoch in range(epochs):
loss, acc, auc, auc_val = train_epoch(model, dataloader_train, dataloader_val, optimizer, criterion, scheduler,
epoch, device)
print("epoch - {} train_loss - {:.3f} auc - {:.4f} auc-val: {:.4f}".format(epoch, loss, auc, auc_val))
torch.save(model.state_dict(), f"{output_dir}/transformers_epoch{epoch}_auc{round(auc, 4)}.pth")
preds = []
labels = []
with torch.no_grad():
for item in tqdm(dataloader_val):
label = item["label"].to(device).float()
output = model(item, device)
preds.extend(torch.nn.Sigmoid()(output[:, -1]).view(-1).data.cpu().numpy().tolist())
labels.extend(label[:, -1].view(-1).data.cpu().numpy().tolist())
auc_transformer = roc_auc_score(labels, preds)
print("single transformer: {:.4f}".format(auc_transformer))
df_oof = pd.DataFrame()
# df_oof["row_id"] = df.loc[val_idx].index
print(len(dataloader_val))
print(len(preds))
df_oof["predict"] = preds
df_oof["target"] = labels
df_oof.to_csv(f"{output_dir}/transformers1.csv", index=False)
"""
df_oof2 = pd.read_csv("../output/ex_237/20201213110353/oof_train_0_lgbm.csv")
df_oof2.columns = ["row_id", "predict_lgbm", "target"]
df_oof2 = pd.merge(df_oof, df_oof2, how="inner")
auc_lgbm = roc_auc_score(df_oof2["target"].values, df_oof2["predict_lgbm"].values)
print("lgbm: {:.4f}".format(auc_lgbm))
print("ensemble")
max_auc = 0
max_nn_ratio = 0
for r in np.arange(0, 1.05, 0.05):
auc = roc_auc_score(df_oof2["target"].values, df_oof2["predict_lgbm"].values*(1-r) + df_oof2["predict"].values*r)
print("[nn_ratio: {:.2f}] AUC: {:.4f}".format(r, auc))
if max_auc < auc:
max_auc = auc
max_nn_ratio = r
print(len(df_oof2))
"""
if not is_debug:
mlflow.start_run(experiment_id=10,
run_name=os.path.basename(__file__))
for key, value in params.items():
mlflow.log_param(key, value)
mlflow.log_metric("auc_val", auc_transformer)
mlflow.end_run()
torch.save(model.state_dict(), f"{output_dir}/transformers.pth")
del model
torch.cuda.empty_cache()
with open(f"{output_dir}/transformer_param.json", "w") as f:
json.dump(params, f)
if is_make_feature_factory:
# feature factory
feature_factory_dict = {"user_id": {}}
feature_factory_dict["user_id"]["DurationPreviousContent"] = DurationPreviousContent(is_partial_fit=True)
feature_factory_dict["user_id"]["ElapsedTimeBinningEncoder"] = ElapsedTimeBinningEncoder()
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger,
split_num=1,
model_id="all",
load_feature=not is_debug,
save_feature=not is_debug)
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
df = pd.read_pickle("../input/riiid-test-answer-prediction/train_merged.pickle")
if is_debug:
df = df.head(10000)
df = df.sort_values(["user_id", "timestamp"]).reset_index(drop=True)
feature_factory_manager.fit(df)
df = feature_factory_manager.all_predict(df)
for dicts in feature_factory_manager.feature_factory_dict.values():
for factory in dicts.values():
factory.logger = None
feature_factory_manager.logger = None
with open(f"{output_dir}/feature_factory_manager.pickle", "wb") as f:
pickle.dump(feature_factory_manager, f)
ff_for_transformer.fit(df)
ff_for_transformer.logger = None
with open(f"{output_dir}/feature_factory_manager_for_transformer.pickle", "wb") as f:
pickle.dump(ff_for_transformer, f)
if __name__ == "__main__":
if not is_debug:
for _ in tqdm(range(wait_time)):
time.sleep(1)
output_dir = f"../output/{os.path.basename(__file__).replace('.py', '')}/{dt.now().strftime('%Y%m%d%H%M%S')}/"
os.makedirs(output_dir, exist_ok=True)
for cont_emb in [8]:
for cat_emb in [256]:
dropout = 0.2
lr = 0.8e-3
if is_debug:
batch_size = 8
else:
batch_size = 128
params = {"embed_dim": cat_emb,
"cont_emb": cont_emb,
"max_seq": 100,
"batch_size": batch_size,
"num_warmup_steps": 12000,
"lr": lr,
"dropout": dropout}
main(params, output_dir=output_dir) | [
"kurupical@gmail.com"
] | kurupical@gmail.com |
bbe6002915498beb2e376cc05a40e4a8f0294afc | 0f58e79ee760e54183d69d707cfbbcfd1f0aa9ed | /setup.py | 7721788f99902b49c29487a550b28e116a7f3e4d | [] | no_license | AnneYinSJ/pystream | 771896066be2cf12b7d3db592fbcc8cb17729198 | 92893a91a03b6410261de00da83068fe42d90eb4 | refs/heads/master | 2020-05-16T09:01:49.169266 | 2019-03-30T20:04:00 | 2019-03-30T20:04:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,608 | py | from setuptools import setup, Extension, find_packages
from Cython.Build import cythonize
import Cython
import numpy as np
import os
libs = []
if os.name == 'posix':
libs.append('m')
dirs = [np.get_include(), '.']
extensions = []
# ----------------------------- algorithms -----------------------------
# ----------------------------- base -----------------------------
dependencies = ['pystream/algorithms/nominal_counters/nominal_counter.pxd'
'pystream/algorithms/nominal_counters/nominal_counter.pxd']
E = Extension(name='pystream.algorithms.base.vfdt',
sources=['pystream/algorithms/base/vfdt.pyx'],
libraries=libs,
include_dirs=dirs,
depends=dependencies)
extensions.append(E)
E = Extension(name='pystream.algorithms.base.svfdt',
sources=['pystream/algorithms/base/svfdt.pyx'],
libraries=libs,
include_dirs=dirs,
depends=dependencies + ['pystream/algorithms/base/vfdt.pxd'])
extensions.append(E)
E = Extension(name='pystream.algorithms.base.svfdt_ii',
sources=['pystream/algorithms/base/svfdt_ii.pyx'],
libraries=libs,
include_dirs=dirs,
depends=dependencies + ['pystream/algorithms/base/vfdt.pxd',
'pystream/algorithms/base/svfdt.pxd'])
extensions.append(E)
# ----------------------------- change_detectors -----------------------------
E = Extension(name='pystream.algorithms.change_detectors.adwin',
sources=['pystream/algorithms/change_detectors/adwin.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
# ----------------------------- ensembles -----------------------------
dependencies = ['pystream/algorithms/change_detectors/adwin.pxd']
E = Extension(name='pystream.algorithms.ensembles.arf',
sources=['pystream/algorithms/ensembles/arf.pyx'],
libraries=libs,
include_dirs=dirs,
depends=dependencies)
extensions.append(E)
E = Extension(name='pystream.algorithms.ensembles.leveraging_bagging',
sources=['pystream/algorithms/ensembles/leveraging_bagging.pyx'],
libraries=libs,
include_dirs=dirs,
depends=dependencies)
extensions.append(E)
E = Extension(name='pystream.algorithms.ensembles.oaue',
sources=['pystream/algorithms/ensembles/oaue.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
E = Extension(name='pystream.algorithms.ensembles.ozabagging',
sources=['pystream/algorithms/ensembles/ozabagging.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
E = Extension(name='pystream.algorithms.ensembles.ozaboosting',
sources=['pystream/algorithms/ensembles/ozaboosting.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
# ----------------------------- nominal_counters -----------------------------
E = Extension(name='pystream.algorithms.base.nominal_counters.nominal_counter',
sources=['pystream/algorithms/base/nominal_counters/nominal_counter.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
# ----------------------------- numeric_estimators -----------------------------
E = Extension(name='pystream.algorithms.base.numeric_estimators.gaussian_estimator',
sources=['pystream/algorithms/base/numeric_estimators/gaussian_estimator.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
# ----------------------------- statistics -----------------------------
E = Extension(name='pystream.algorithms.base.statistics.tree_stats',
sources=['pystream/algorithms/base/statistics/tree_stats.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
E = Extension(name='pystream.algorithms.base.statistics.value_stats',
sources=['pystream/algorithms/base/statistics/value_stats.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
# ----------------------------- evaluation -----------------------------
E = Extension(name='pystream.evaluation.evaluate_prequential',
sources=['pystream/evaluation/evaluate_prequential.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
E = Extension(name='pystream.evaluation.performance_statistics',
sources=['pystream/evaluation/performance_statistics.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
# ----------------------------- utils -----------------------------
E = Extension(name='pystream.utils.stream_gen',
sources=['pystream/utils/stream_gen.pyx'],
libraries=libs,
include_dirs=dirs)
extensions.append(E)
setup(name='pystream',
version='1.0',
description='Pystream',
url='http://github.com/vturrisi/pystream',
author='Victor Turrisi',
license='MIT',
packages=find_packages(exclude=['tests']),
quiet=True,
ignore_setup_xxx_py=True,
assume_default_configuration=True,
ext_modules=cythonize(extensions, nthreads=4, quiet=False),
cmdclass={'build_ext': Cython.Build.build_ext},
setup_requires=['cython>=0.x'],
install_requires=['numpy>=1.14.1', 'pandas>=0.20.0', 'cython>=0.x'],
include_package_data=True,
zip_safe=False) | [
"vt.turrisi@gmail.com"
] | vt.turrisi@gmail.com |
462611dfa57951daafc012909847ccf8fa2f644e | 60c84d8dc4b30731193745bf0580584087efe337 | /examples/hello/hello.py | 53bd3f56564ec3d0888b59c23c85a01ffe0e0ee1 | [] | no_license | danse-inelastic/pyregui | f321917f6c0c955356d8b87f6466c3acddd5b194 | 3d7f90352361cbdbaa553002be6e810e84b3f44d | refs/heads/master | 2020-04-05T18:57:14.422685 | 2013-06-14T00:30:44 | 2013-06-14T00:30:44 | 34,145,791 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.components.Component import Component
class hello(Component):
'A greeter that says "hello"'
class Inventory(Component.Inventory):
import pyre.inventory
greeting = pyre.inventory.str( 'greeting', default = 'hello' )
pass
def greet(self, name):
greeting = self.greeting
print "%s %s" % (greeting, name)
return
def __init__(self, name, facility = "greeter"):
Component.__init__(self, name, facility=facility)
return
def _defaults(self):
Component._defaults(self)
return
def _configure(self):
Component._configure(self)
self.greeting = self.inventory.greeting
return
def _init(self):
Component._init(self)
return
def greeter(): return hello( 'hello' )
# version
__id__ = "$Id$"
# End of file
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
0b16e097fdaf58999c9428e768d344bdaabaeb74 | b7e580300600eccd906817db68eadd3988a6f68d | /rest101/superhero/migrations/0003_auto_20200304_2305.py | bc98233f07e27a62473a57c087c41bf9fbd699d9 | [] | no_license | Prasan1/rest | 01c54d913300d9533930b32d1afbbf7b835a68c5 | be4549727d2118c15655ca8f21d5b015a2d5c16e | refs/heads/master | 2021-03-23T19:37:27.503022 | 2020-03-16T02:41:49 | 2020-03-16T02:41:49 | 247,478,613 | 0 | 1 | null | 2020-03-16T12:17:30 | 2020-03-15T14:07:23 | Python | UTF-8 | Python | false | false | 619 | py | # Generated by Django 3.0.3 on 2020-03-05 04:05
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('superhero', '0002_auto_20200304_2214'),
]
operations = [
migrations.AlterModelOptions(
name='superhero',
options={'ordering': ['active']},
),
migrations.AlterField(
model_name='superhero',
name='id',
field=models.UUIDField(default=uuid.UUID('066969d3-c54e-48f9-a041-607e82333003'), editable=False, primary_key=True, serialize=False),
),
]
| [
"prasanna.puaudyal@capgemini.com"
] | prasanna.puaudyal@capgemini.com |
202f024bbb7de7efa4e06907f005302147decfc9 | fdbdbf4dd1ccbdf6ff3f23ed441de58f57753c33 | /helloworld.py | 2bc20ebb1427e3b18c372e7738fa86f6dcab488e | [] | no_license | antondlf/Homework1 | bd0fa36b8a9027f7e2d4847a9a3f7f16102c292c | d36a323a5375062e8e4d9a1620415e6bbf9964c0 | refs/heads/main | 2022-12-29T15:55:01.560719 | 2020-10-19T20:04:15 | 2020-10-19T20:04:15 | 303,840,249 | 0 | 2 | null | 2020-10-19T20:04:16 | 2020-10-13T22:11:18 | Python | UTF-8 | Python | false | false | 1,308 | py | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.pyami.scriptbase import ScriptBase
class HelloWorld(ScriptBase):
def main(self):
self.log('Hello World!!!')
print('Hello', 'world', 'from', 'Nick')
print("I like cats")
| [
"noreply@github.com"
] | noreply@github.com |
a5abfa7a61778e4bbeea6fb77cdeef314fff171f | 0cd407f6a73c21bbeb52958665fc2872bd25e83b | /python/snippets/file_splicer.py | 9af41130b54a71493342a76b7d3b476c46453a1a | [] | no_license | ikenticus/blogcode | 178c6d99e7805490fbe95417d6e28d1f64b0ce9d | 550b57c46284bb2e47ac23b3984792f3e68d73ea | refs/heads/master | 2023-07-05T09:00:55.780386 | 2023-06-30T02:08:10 | 2023-06-30T02:08:10 | 1,432,663 | 4 | 3 | null | 2023-01-05T17:26:25 | 2011-03-02T22:12:14 | Python | UTF-8 | Python | false | false | 1,123 | py |
'''
Splice one file into many
- match line on cfg_pre
- split line by cfg_sep
- use index cfg_idx for output filename
'''
import os
import sys
from pprint import pprint
cfg_idx = 1
cfg_sep = '"'
cfg_pre = 'job('
cfg_out = 'output'
def splice(data, outdir, ext):
name = None
for d in data:
#print(d, end='')
if d.startswith(cfg_pre):
name = d.split(cfg_sep)[cfg_idx]
if name:
file = open('%s/%s.%s' % (outdir, name, ext), 'a')
file.write(d)
file.close()
def main():
"""
Entry point of the program when called as a script.
"""
if len(sys.argv) > 1:
input = sys.argv[1]
else:
print('Usage: %s <large-file>' % os.path.basename(sys.argv[0]))
exit()
file = open(input)
data = file.readlines()
file.close()
outdir = '%s/%s' % (os.path.dirname(input), cfg_out)
#print(outdir)
if not os.path.isdir(outdir):
os.mkdir(outdir)
#pprint(data)
ext = input.split('.')[-1]
splice(data, outdir, ext)
if __name__ == '__main__':
main()
| [
"Kent.Lee@resy.com"
] | Kent.Lee@resy.com |
62784316908e7cbe2cae8d4846b6e889a330c571 | 3e1dd5fe52a3083fd2d98d1518cc3174a8a03658 | /myawards/migrations/0010_auto_20210124_1905.py | 6f52d3c41191054de3acd3b3862fd809cf1c2d0a | [
"MIT"
] | permissive | ucynthy12/awards | 777ba0d5878db4c6563f8dc147428a7620107193 | 51c993b2e5b779103f1a43246d939a66364a187c | refs/heads/master | 2023-02-20T12:38:29.469288 | 2021-01-24T20:57:58 | 2021-01-24T20:57:58 | 329,887,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | # Generated by Django 3.1.5 on 2021-01-24 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myawards', '0009_auto_20210123_1225'),
]
operations = [
migrations.AlterField(
model_name='rate',
name='content',
field=models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], null=True),
),
migrations.AlterField(
model_name='rate',
name='creativity',
field=models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], null=True),
),
migrations.AlterField(
model_name='rate',
name='design',
field=models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], null=True),
),
migrations.AlterField(
model_name='rate',
name='usability',
field=models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], null=True),
),
]
| [
"ucynthy12@gmail.com"
] | ucynthy12@gmail.com |
9ef0313a78d7df9a2072dd19e7b8a17ce7012b41 | fff67f30c7cbe366a84c7eed7c82258a12b53411 | /setup.py | 58e945a1b1ea918271b33d92b80fab085627826f | [
"MIT"
] | permissive | N0x1s/b64 | 98871d21a6aaef32ba55f4f53bc4c65312a7713f | 632ec603b7c558d7ca9bebe12cdf7300a3b52c5f | refs/heads/master | 2022-11-08T07:39:09.502826 | 2020-06-25T17:25:18 | 2020-06-25T17:25:18 | 274,590,737 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | from distutils.core import setup
setup(
name='b64',
packages=['b64'],
version='0.4',
license='MIT',
description='convert html/local/online data to base64 fast, easy and clean',
author='n0x1s',
author_email='n0x1s0x01@gmail.com',
url='https://github.com/n0x1s/b64',
download_url='https://github.com/N0x1s/b64/archive/0.4.tar.gz',
keywords=['base64', 'image to base64', 'video to base64',
'base64 convert', 'html to base64', 'data uri'],
install_requires=[
'requests',
'bs4',
'lxml',
'cached_properties',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"ilyasselfatimy9@gmail.com"
] | ilyasselfatimy9@gmail.com |
f31c16c5d140c382a22a09d71a9596909e6fab63 | 57d49730e08d631f590fabcc88bf30fb5dfc1a59 | /ZOAC2(18238).py | 43b7cb9be8a791f7356fb078a3521f41a0e48d6c | [] | no_license | wargin-E/Baekjoon | acb0275e240741bf8122cac07af1e91d1425ba84 | 76773699acb07ffd3d7fec3c9adfc85bfafab800 | refs/heads/master | 2021-05-21T03:53:48.631819 | 2021-02-14T10:22:49 | 2021-02-14T10:22:49 | 252,531,304 | 0 | 0 | null | 2020-04-09T09:35:24 | 2020-04-02T18:10:21 | Python | UTF-8 | Python | false | false | 1,005 | py | dictionary=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
starting_alphabet="A"
destination_alphabet=0
def minimum_distance(dictionary,starting_alphabet,destination_alphabet):
for k in range(0,len(dictionary)):
if dictionary[k]==starting_alphabet:
start=k
if dictionary[k]==destination_alphabet:
destination=k
if start-destination>0 or start==destination:
absolute=start-destination
if start-destination<0:
absolute=destination-start
if absolute<13 or absolute==13:
return absolute
if absolute>13:
return 26-absolute
string_word=input()
word=[]
time=0
for k in range(0,len(string_word)):
word.append(string_word[k])
for k in range(0,len(word)):
destination_alphabet=word[k]
time=time+minimum_distance(dictionary,starting_alphabet,destination_alphabet)
starting_alphabet=destination_alphabet
print(time)
| [
"noreply@github.com"
] | noreply@github.com |
733f9358c73617a8542ae58b5af6e60c4fd3b6da | 831fc9a25077345226874a103a46724a3906ddbe | /b_list/urls.py | 5b00c938cb3bbe4efe019852ead1926ef5a8a777 | [
"BSD-3-Clause"
] | permissive | softwareprojectPHD/b_list | a2f6c0890b0dbd348c77d62cdac81db69ea2e2c3 | 7efc5d15750b0fd366a40eef4794a0356f309327 | refs/heads/master | 2020-12-25T09:27:44.477238 | 2016-04-16T03:54:13 | 2016-04-17T03:58:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | """
Root URLconf.
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from blog.views import EntryArchiveIndex
from flashpolicies.views import no_access
# Redirect views which prevent an older URL scheme from 404'ing.
from . import views
urls = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$',
EntryArchiveIndex.as_view(
template_name='home.html',
),
name='home'),
url(r'^contact/', include('contact_form.urls')),
url(r'^feeds/', include('blog.urls.feeds')),
url(r'^projects/', include('projects.urls')),
url(r'^weblog/categories/', include('blog.urls.categories')),
url(r'^weblog/', include('blog.urls.entries')),
url(r'^crossdomain.xml$', no_access),
]
legacy = [
url(r'^links/', views.gone),
url(r'^weblog/(?P<year>\d{4})/(?P<month>\d{2})/$',
views.EntryMonthRedirect.as_view()),
url(r'^weblog/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/$',
views.EntryDayRedirect.as_view()),
url(r'^weblog/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$',
views.EntryDetailRedirect.as_view()),
url(r'^media/(?P<path>.*)$', views.MediaRedirect.as_view()),
]
# The redirecting patterns come first; otherwise, the main URLs would
# catch those and 404.
urlpatterns = legacy + urls
| [
"james@b-list.org"
] | james@b-list.org |
78abd3a4e3fbcc34e0e8ec4eaef5fe5ac4e374e9 | c9fd4301e3cbbb7b1144b1985be79c05f218be1c | /application/models.py | a5a46cc6b6a6bbe892650a273866213b57692901 | [] | no_license | Solertis/Psychographic-Segmentation-Of-Consumer-Behaviors | a6d76ace3f2bb6037125c2ed530cb1f2def1de3c | f17405b7ec59e9ae8c0cde2fb9fcb12caf4d05e2 | refs/heads/master | 2020-04-23T17:16:01.603556 | 2017-08-20T10:29:30 | 2017-08-20T10:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | from django.db import models
from django.contrib.auth.models import Permission, User
from mongoengine import *
def user_directory_path(instance, filename):
# return 'documents/user_{0}/new/{1}'.format(instance.user.id, filename)
return 'application/static/application/images/user_{0}/new/{1}'.format(instance.user.id, filename)
class Upload(models.Model):
docfile = models.FileField(upload_to=user_directory_path)
filename = models.CharField(max_length=250)
user = models.ForeignKey(User, default=2)
date = models.DateTimeField()
def __str__(self):
return self.filename
class Table(DynamicDocument):
username = IntField()
fid = IntField()
tablename = StringField(max_length=150)
| [
"noreply@github.com"
] | noreply@github.com |
9c079ffee4a8446340856d6c9a3f18c21f3f77a0 | a81c1492783e7cafcaf7da5f0402d2d283b7ce37 | /google/ads/google_ads/v6/proto/errors/account_link_error_pb2.py | bcb4b476f4b191721a76ed542da5e88fb13651ea | [
"Apache-2.0"
] | permissive | VincentFritzsche/google-ads-python | 6650cf426b34392d1f58fb912cb3fc25b848e766 | 969eff5b6c3cec59d21191fa178cffb6270074c3 | refs/heads/master | 2023-03-19T17:23:26.959021 | 2021-03-18T18:18:38 | 2021-03-18T18:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,974 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/errors/account_link_error.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/errors/account_link_error.proto',
package='google.ads.googleads.v6.errors',
syntax='proto3',
serialized_options=b'\n\"com.google.ads.googleads.v6.errorsB\025AccountLinkErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V6.Errors\312\002\036Google\\Ads\\GoogleAds\\V6\\Errors\352\002\"Google::Ads::GoogleAds::V6::Errors',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n7google/ads/googleads/v6/errors/account_link_error.proto\x12\x1egoogle.ads.googleads.v6.errors\x1a\x1cgoogle/api/annotations.proto\"\\\n\x14\x41\x63\x63ountLinkErrorEnum\"D\n\x10\x41\x63\x63ountLinkError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x12\n\x0eINVALID_STATUS\x10\x02\x42\xf0\x01\n\"com.google.ads.googleads.v6.errorsB\x15\x41\x63\x63ountLinkErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V6.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V6\\Errors\xea\x02\"Google::Ads::GoogleAds::V6::Errorsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ACCOUNTLINKERRORENUM_ACCOUNTLINKERROR = _descriptor.EnumDescriptor(
name='AccountLinkError',
full_name='google.ads.googleads.v6.errors.AccountLinkErrorEnum.AccountLinkError',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_STATUS', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=145,
serialized_end=213,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTLINKERRORENUM_ACCOUNTLINKERROR)
_ACCOUNTLINKERRORENUM = _descriptor.Descriptor(
name='AccountLinkErrorEnum',
full_name='google.ads.googleads.v6.errors.AccountLinkErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_ACCOUNTLINKERRORENUM_ACCOUNTLINKERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=121,
serialized_end=213,
)
_ACCOUNTLINKERRORENUM_ACCOUNTLINKERROR.containing_type = _ACCOUNTLINKERRORENUM
DESCRIPTOR.message_types_by_name['AccountLinkErrorEnum'] = _ACCOUNTLINKERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AccountLinkErrorEnum = _reflection.GeneratedProtocolMessageType('AccountLinkErrorEnum', (_message.Message,), {
'DESCRIPTOR' : _ACCOUNTLINKERRORENUM,
'__module__' : 'google.ads.googleads.v6.errors.account_link_error_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.errors.AccountLinkErrorEnum)
})
_sym_db.RegisterMessage(AccountLinkErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | noreply@github.com |
1ae8eddbd844dfaff811a922e66a120228c8cf01 | eb3f8ee889969a675d6d44e13a89236bcf273389 | /src/isanlp/processor_syntaxnet_remote.py | e20857109b647169b1fc4b26683eb30ae7f0b707 | [
"MIT",
"Python-2.0"
] | permissive | IINemo/isanlp | d14ce4b15e31d0eced0a1588db3f33ac73f88083 | 88e8b6edd0785718b118e2cbf3822b26f2a33cff | refs/heads/master | 2023-07-08T02:11:22.096613 | 2023-06-27T15:13:56 | 2023-06-27T15:13:56 | 121,495,819 | 65 | 9 | MIT | 2023-05-11T14:45:05 | 2018-02-14T10:05:20 | Python | UTF-8 | Python | false | false | 4,205 | py | from .annotation import WordSynt
from .annotation_repr import CSentence
from .conll_format_parser import ConllFormatStreamParser
import sys
import socket
import logging
logger = logging.getLogger('isanlp')
class ProcessorSyntaxNetRemote:
"""Processor for calling SyntaxNet remotely.
It is intended to work with docker container inemo/syntaxnet_ru inemo/syntaxnet_eng etc.
Calls SyntaxNet remotely, but does not use gRPC.
Args:
host(str): hostname via which the SyntaxNet container can be accessed.
port(int): port of the accessibly docker container.
"""
def __init__(self, host, port):
self.host_ = host
self.port_ = port
def __call__(self, tokens, sentences):
"""Invokes SyntaxNet on the remote server.
Args:
tokens(list): list of objects Token.
sentences(list): list of objects Sentence.
Returns:
Dictionary that contains:
1. 'syntax_dep_tree': List of objects SynWord that represent a dependency tree.
2. 'postag': List of lists of strings that represent postags of words.
3. 'morph': List of strings that represent morphological features.
"""
raw_input_s = self._prepare_raw_input_for_syntaxnet(tokens, sentences)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host_, self.port_))
sock.sendall(raw_input_s)
raw_output_s = self._read_all_from_socket(sock)
sock.close()
if not raw_output_s:
return None
result_postag, result_morph, result_synt = self._parse_conll_format(raw_output_s)
return {'postag' : result_postag,
'morph' : result_morph,
'syntax_dep_tree' : result_synt}
def _fill_spans_in_trees(self, tokens, sentences, trees):
for in_sent, p_sent in zip(sentences, trees):
for in_word, p_word in zip(CSentence(tokens, in_sent), p_sent):
p_word.begin = in_word.begin
p_word.end = in_word.end
def _prepare_raw_input_for_syntaxnet(self, tokens, input_data):
raw_input_s = ''
if input_data is str:
raw_input_s = text + '\n\n'
else:
for sent in input_data:
line = ' '.join((e.text for e in CSentence(tokens, sent)))
raw_input_s += line
raw_input_s += '\n'
raw_input_s += '\n'
return raw_input_s.encode('utf8')
def _read_all_from_socket(self, sock):
buf = bytes()
try:
while True:
data = sock.recv(51200)
if data:
buf += data
else:
break
except socket.error as err:
logger.error('Err: Socket error: {}'.format(err))
raise
return buf.decode('utf8')
def _parse_conll_format(self, string):
try:
result_postag = list()
result_morph = list()
result_synt = list()
for sent in ConllFormatStreamParser(string):
new_sent_postag = list()
new_sent_morph = list()
new_sent_synt = list()
for word in sent:
new_word = WordSynt(parent = int(word[6]) - 1,
link_name = word[7])
new_sent_synt.append(new_word)
new_sent_morph.append(word[5])
new_sent_postag.append(word[3])
result_postag.append(new_sent_postag)
result_morph.append(new_sent_morph)
result_synt.append(new_sent_synt)
return result_postag, result_morph, result_synt
except IndexError as err:
logger.error('Err: Index error: {}'.format(err))
logger.error('--------------------------------')
logger.error(string)
logger.error('--------------------------------')
raise
| [
"artemshelmanov@gmail.com"
] | artemshelmanov@gmail.com |
c371911079fd8ad137118cf71c9d851ae79c6e20 | d0b637b44baa2abaf76e2f7dadb138d63ea089b2 | /old_code/QualityKmerToFasta.py | deae89767ea528064b58b6e7ad01f1965f656a32 | [] | no_license | Strideradu/OverlapFind | c0af03a3c47462ca5a0c2ebecf95eb8821a8e349 | 397007293cde571dee165dc5318421812e8d6863 | refs/heads/master | 2021-01-19T11:25:23.959784 | 2018-01-29T19:19:49 | 2018-01-29T19:19:49 | 82,241,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 19 12:13:17 2017
@author: Nan
"""
import os
import sys
from Bio import SeqIO
import numpy as np
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_dna
from collections import Counter
import quality_kmer
def count_score_kmer(record, k, kmer_list, quality_handle = None):
if quality_handle == None:
fastq_record = record
else:
fastq_record = quality_handle[record.id]
Q = np.array(fastq_record.letter_annotations["phred_quality"])
P = 10.0 ** (-Q / 10.0)
# P is the error probability
# total_kmers = len(record.seq) - k + 1
# first assemble dict of kmer counts
for x in range(len(record.seq) + 1 - k):
kmer_seq = record.seq[x:x + k]
score = np.sum(P[x:x + k])
kmer_list.append(kmer_seq)
"""
fastq = sys.argv[1]
if os.path.exists(fastq):
print os.path.basename(fastq)
"""
fastq = "D:/Data/20170116/filtered_subreads_15X.fastq"
records = SeqIO.parse(fastq, "fastq")
insertion_dict = SeqIO.index("D:/Data/20161125/filtered_subreads_insertion_first1k.fastq", "fastq")
deletion_dict = SeqIO.index("D:/Data/20161125/filtered_subreads_deletion_first1k.fastq", "fastq")
substitution_dict = SeqIO.index("D:/Data/20161125/filtered_subreads_substitution_first1k.fastq", "fastq")
deletion_tag_dict = SeqIO.index("D:/Data/20161125/filtered_subreads_deletion_tag_first1k.fasta", "fasta")
substitution_tag_dict = SeqIO.index("D:/Data/20161125/filtered_subreads_substitution_tag_first1k.fasta", "fasta")
"""
output = sys.argv[2]
if os.path.exists(output):
print os.path.basename(output)
"""
output = "D:/Data/20170116/filtered_subreads_15X_kmer.fasta"
kmer_list = []
for record in records:
insertion_rec = insertion_dict[record.id]
deletion_rec = deletion_dict[record.id]
substitution_rec = substitution_dict[record.id]
sub_tag_rec = substitution_tag_dict[record.id]
del_tag_rec = deletion_tag_dict[record.id]
qual_record = quality_kmer.QualitySeq(record, insertion_rec, deletion_rec, substitution_rec, del_tag_rec, sub_tag_rec)
record_kmer = qual_record.generate_quality_kmer(11)
kmer_num = Counter(record_kmer)
kmer_fasta = []
for kmer in kmer_num:
kmer_fasta.append(SeqRecord(Seq(kmer),id=kmer + "_" + str(kmer_num[kmer]), description=""))
SeqIO.write(kmer_fasta, output, "fasta") | [
"dunan00001@hotmail.com"
] | dunan00001@hotmail.com |
6586ba17d3c64f3b0ee887616f7f4e4e07d684fb | d0bb6390880d8c14062510712664869b03cfb9bc | /IOTserver/py_resources/auth.py | 7ddeb93890df5b28ad39a29bab4a35ab826738cc | [
"Unlicense"
] | permissive | luismmorera/IoT_ESD | a4f9fba7fb7819bfdd13c747fd946af8e6d1f393 | 4b2740a6b44493c845dc3e3498f60c31136b33f4 | refs/heads/master | 2023-05-31T04:39:19.959272 | 2021-06-19T17:33:15 | 2021-06-19T17:33:15 | 367,965,761 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | import pickle
import os.path
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
class auth:
def __init__(self, SCOPES, CLIENT_SECRET_FILE):
self.SCOPES = SCOPES
self.CLIENT_SECRET_FILE = CLIENT_SECRET_FILE
def get_credentials(self):
"""
Obtains valid credentials for accessing Gmail API
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
self.CLIENT_SECRET_FILE, self.SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
| [
"luis.mmorera@alumnos.upm.es"
] | luis.mmorera@alumnos.upm.es |
440e4c560ae61fe94aa2b5cb5c987c940277427d | 63dfcc893b0b24e632074107a322a713b42c7508 | /lastpage/resource.py | a39404dc2360680d314dcda11efe0089aae46e33 | [
"Apache-2.0"
] | permissive | jkakar/lastpage-server | c2d206d58baa96c02b417c28f381fa8ba1aeb7ff | 313927d343101968fa9654e8710343e8a988e131 | refs/heads/master | 2020-12-25T01:50:29.802340 | 2011-08-03T22:15:04 | 2011-08-03T22:15:04 | 2,154,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,529 | py | # Copyright 2011 Fluidinfo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from jinja2.exceptions import TemplateNotFound
from twisted.internet import defer
from twisted.python import log
from twisted.web import resource, http, server
from twisted.web.resource import ErrorPage
from twisted.web.static import File
from txfluiddb.client import Object, Tag, Namespace
from txfluiddb.http import HTTPError
aboutTag = Tag(u'fluiddb', u'about')
# Content we serve statically, if static files are not being served by some
# other means (e.g., nginx).
_staticFiles = {
'/static/favicon.ico': 'static/favicon.ico',
'/robots.txt': 'static/robots.txt',
'/static/bullet.png': 'static/bullet.png',
'/static/icon.png': 'static/icon.png',
'/static/logo.png': 'static/logo.png',
'/static/style.css': 'static/style.css',
}
class LastPage(resource.Resource):
"""
Top-level resource for the lastpage.me service.
In production, requests for static files for lastpage.me should be
served by some other process. This resource is best used to handle
requests for the top-level site (/) and (via getChild) can produce
resources for children (e.g., /username). If you do not set
serveStaticFiles=True, it will 404 requests for things like
/static/style.css or anything else (these requests may come from
templates we return). It is suggested you use nginx or some other web
server to deliver those resources (including requests for
/favicon.ico).
@param endpoint: the Fluidinfo API endpoint to use.
@param env: The Jinja2 C{Environment} to use for rendering.
@param serveStaticFiles: if C{True} handle requests for known
static files.
"""
allowedMethods = ('GET',)
def __init__(self, endpoint, env, serveStaticFiles):
resource.Resource.__init__(self)
self._endpoint = endpoint
self._env = env
self._serveStaticFiles = serveStaticFiles
def getChild(self, what, request):
"""
Find and return a child resource.
@param what: The thing (either a user name or an html page) wanted.
@param request: The HTTP request.
"""
# Serve static files.
if self._serveStaticFiles:
path = request.path
if path in _staticFiles:
filename = _staticFiles[path]
log.msg('Serving static path %s -> %s.' % (path, filename))
request.setResponseCode(http.OK)
fileResource = File(filename)
fileResource.isLeaf = True
return fileResource
# Serve .html requests.
if what == '' or what.endswith('.html'):
try:
template = self._env.get_template(what or 'index.html')
except TemplateNotFound:
# There could in theory be a user whose name ends in .html,
# so we'll just let this go through.
pass
else:
self._template = template
return self
log.msg('Request for path %s assumed to be a user URL lookup.' %
request.path)
# Serve normal user redirects.
try:
# Decode the path components into unicode.
who = what.decode('utf-8')
rest = u'-'.join([x.decode('utf-8') for x in request.postpath])
except UnicodeDecodeError:
return ErrorPage(http.BAD_REQUEST, 'Bad URI UTF-8', 'Bad UTF-8')
if rest:
tag = u'%s/lastpage-%s' % (who, rest)
else:
tag = u'%s/lastpage' % who
return LastPageOf(self._endpoint, self._env, who, tag)
def render_GET(self, request):
"""
Handle a GET request. This is a request for a top-level HTML page
like http://lastpage.me/tools.html
@param request: The HTTP request.
"""
return str(self._template.render())
class LastPageOf(resource.Resource):
"""
A resource for a specific user of lastpage.me. This resource is used to
handle requests for http://lastpage.me/username.
@param endpoint: the Fluidinfo API endpoint to use.
@param env: The Jinja2 C{Environment} to use for rendering.
@param who: A C{unicode} username to redirect to, if possible.
@param tag: The C{unicode} path name of the tag to query for.
"""
allowedMethods = ('GET',)
isLeaf = True
def __init__(self, endpoint, env, who, tag):
resource.Resource.__init__(self)
self._endpoint = endpoint
self._env = env
self._who = who
self._tag = tag
def render_GET(self, request):
"""
Handle a GET request.
@param request: The HTTP request.
"""
query = u'has %s' % self._tag
# log.msg('Sending %r query to %r.' % (query, self._endpoint.baseURL))
d = Object.query(self._endpoint, query)
d.addCallback(self._finishHas, request)
d.addErrback(self._hasErr, request)
d.addErrback(log.err)
return server.NOT_DONE_YET
def _hasErr(self, fail, request):
"""
Handle an error in the get on the user's tag.
@param fail: the Twisted failure.
@param request: the original HTTP request.
"""
fail.trap(HTTPError)
errorClass = fail.value.response_headers.get('x-fluiddb-error-class')
if errorClass:
if errorClass[0] == 'TNonexistentTag':
d = Namespace(self._who).exists(self._endpoint)
d.addCallback(self._testUserExists, request)
d.addErrback(self._oops, request)
d.addErrback(log.err)
return d
else:
log.msg('Fluidinfo error class %s.' % errorClass[0])
log.err(fail)
request.setResponseCode(http.NOT_FOUND)
template = self._env.get_template('404.html')
request.write(str(template.render()))
else:
request.write('Sorry! No Fluidinfo error class. %s' % fail)
request.finish()
def _finishHas(self, results, request):
"""
Handle the result of the 'has username/lastpage' /objects query.
@param results: the result of the query.
@param request: the original HTTP request.
"""
nResults = len(results)
if nResults == 0:
# This user doesn't have a lastpage tag on any page.
template = self._env.get_template('no-pages-tagged.html')
request.write(str(template.render(user=self._who,
tag=self._tag)))
request.setResponseCode(http.OK)
request.finish()
elif nResults == 1:
# Normal case. There is a lastpage tag on one object, and we
# can do the redirect. Because txFluidDB doesn't support
# /values yet though, we need to send another request to
# Fluidinfo (to get the fluiddb/about value of the object,
# which will be the URL).
obj = Object(results[0].uuid)
d = obj.get(self._endpoint, aboutTag)
d.addCallback(self._finishSingle, request)
d.addErrback(self._hasErr, request)
return d
else:
# There are lastpage tags on multiple objects. Get the
# fluiddb/about for all of them.
log.msg('got %d results' % nResults)
deferreds = []
for result in results:
obj = Object(result.uuid)
d = obj.get(self._endpoint, aboutTag)
deferreds.append(d)
d = defer.DeferredList(deferreds, consumeErrors=True)
d.addCallback(self._finishMany, request)
return d
def _finishSingle(self, result, request):
"""
Handle the result of a GET to fetch the user's tag from a single
object.
@param result: the result of the GET.
@param request: the original HTTP request.
"""
try:
url = str(result)
except:
raise
log.msg('Redirect: %s -> %s' % (self._who.encode('utf-8'), url))
request.setResponseCode(http.TEMPORARY_REDIRECT)
request.redirect(url)
request.finish()
def _finishMany(self, results, request):
"""
Handle the result of a GET to fetch what are hopefully URLs
(fluiddb/about values) of the many objects that have a
username/lastpage on them.
@param results: a list of (succeeded, result) 2-tuples from
a C{DeferredList} firing. These are the results of the GET
requests to fetch the fluiddb/about tags on the objects that
have a username/lastpage tag on them.
@param request: the original HTTP request.
"""
templateURLs = []
for (succeeded, result) in results:
if succeeded:
low = result.lower()
if low.startswith('http://') or low.startswith('https://'):
result = '<a href="%s">%s</a>' % (result, result)
templateURLs.append(result)
else:
log.msg('Failure getting %r/lastpage tag:' % self._who)
log.err(result)
if templateURLs:
request.setResponseCode(http.OK)
template = self._env.get_template('multiple-pages-tagged.html')
request.write(str(template.render(user=self._who,
tag=self._tag,
pages=templateURLs)))
request.setResponseCode(http.OK)
request.finish()
else:
# We only got errors back...
request.write('Oops, sorry, all we got were errs!')
request.finish()
def _testUserExists(self, exists, request):
"""
Produce an informative page to indicate that we can't help because
either the user doesn't exist or the tag isn't present.
@param exists: C{True} if the user exists, else C{False}.
@param request: the original HTTP request.
"""
if exists:
template = self._env.get_template('no-pages-tagged.html')
else:
template = self._env.get_template('no-user.html')
request.write(str(template.render(user=self._who,
tag=self._tag)))
request.setResponseCode(http.OK)
request.finish()
def _oops(self, fail, request):
"""
Produce an internal server error page to indicate that we had a
severed problem.
@param fail: the Twisted failure.
@param request: the original HTTP request.
"""
log.err(fail)
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
template = self._env.get_template('500.html')
request.write(str(template.render()))
request.finish()
| [
"terry@fluidinfo.com"
] | terry@fluidinfo.com |
95f1540887394030c26f2f55d4910567d0642e49 | 6dfa271fb41c9d4a1a74ce34c4bee252a2d86291 | /sympy/thirdparty/pyglet/pyglet/window/xlib/cursorfont.py | 16c5e740ecd81ff97be40c31104783842b690ad0 | [
"BSD-3-Clause"
] | permissive | gnulinooks/sympy | 7e4776cd1ea24bde56dbc17207611a9bc7523e50 | 46f63841f96cd025289b91ba9db3e261138d720a | refs/heads/master | 2016-09-10T18:56:49.556138 | 2009-04-05T14:10:49 | 2009-04-05T14:10:49 | 169,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,295 | py | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: cursorfont.py 1322 2007-10-23 12:58:03Z Alex.Holkner $'
# /usr/include/X11/cursorfont.h
XC_num_glyphs = 154
XC_X_cursor = 0
XC_arrow = 2
XC_based_arrow_down = 4
XC_based_arrow_up = 6
XC_boat = 8
XC_bogosity = 10
XC_bottom_left_corner = 12
XC_bottom_right_corner = 14
XC_bottom_side = 16
XC_bottom_tee = 18
XC_box_spiral = 20
XC_center_ptr = 22
XC_circle = 24
XC_clock = 26
XC_coffee_mug = 28
XC_cross = 30
XC_cross_reverse = 32
XC_crosshair = 34
XC_diamond_cross = 36
XC_dot = 38
XC_dotbox = 40
XC_double_arrow = 42
XC_draft_large = 44
XC_draft_small = 46
XC_draped_box = 48
XC_exchange = 50
XC_fleur = 52
XC_gobbler = 54
XC_gumby = 56
XC_hand1 = 58
XC_hand2 = 60
XC_heart = 62
XC_icon = 64
XC_iron_cross = 66
XC_left_ptr = 68
XC_left_side = 70
XC_left_tee = 72
XC_leftbutton = 74
XC_ll_angle = 76
XC_lr_angle = 78
XC_man = 80
XC_middlebutton = 82
XC_mouse = 84
XC_pencil = 86
XC_pirate = 88
XC_plus = 90
XC_question_arrow = 92
XC_right_ptr = 94
XC_right_side = 96
XC_right_tee = 98
XC_rightbutton = 100
XC_rtl_logo = 102
XC_sailboat = 104
XC_sb_down_arrow = 106
XC_sb_h_double_arrow = 108
XC_sb_left_arrow = 110
XC_sb_right_arrow = 112
XC_sb_up_arrow = 114
XC_sb_v_double_arrow = 116
XC_shuttle = 118
XC_sizing = 120
XC_spider = 122
XC_spraycan = 124
XC_star = 126
XC_target = 128
XC_tcross = 130
XC_top_left_arrow = 132
XC_top_left_corner = 134
XC_top_right_corner = 136
XC_top_side = 138
XC_top_tee = 140
XC_trek = 142
XC_ul_angle = 144
XC_umbrella = 146
XC_ur_angle = 148
XC_watch = 150
XC_xterm = 152
| [
"ondrej@certik.cz"
] | ondrej@certik.cz |
5e0fa71248a72d116471e1ebbb5d85a675191bed | 4429c23d283d28d0e6a3d784517146ee69b82f74 | /strings.py | 05be2839ad570445c6db9dbdc746b56dee5584de | [] | no_license | rajesh0025/intermediate-python | f8d8e64887bf7b1182e91f156bf62efa07b36c62 | 0cd25724c3a03cdc4bcba17159bde7bdb5933fc0 | refs/heads/main | 2023-07-11T18:33:08.658845 | 2021-08-16T10:52:18 | 2021-08-16T10:52:18 | 314,479,542 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,086 | py | #strings orederd immutable text representation
# we can use sclicese in strings
my_string ="hello world"
sub=my_string[5:2:-1] #[start::end::gap b/w each char]
print(sub)
my_string ="hello world"
print(my_string.startswith("hello"))
print(my_string.startswith("world"))
print(my_string.endswith("world"))
print(my_string.find("l"))#index
print(my_string.count("l"))#count
print(my_string.replace("world","universe"))
print(my_string)
my_string='how are you doing'
print(my_string)#gives string
my_list=my_string.split(" ")#where to split here space is to split
print(my_list)#gives list
join=' '.join(my_list) #to join listn with given space like split
print(join) #gives string
print("rajesh")
join=' '.join(my_list) #to join listn with given space like split
print(join)
nums="""1
2
3
4
5 """
numms=nums.split("""
""")
print(numms)
join=" ".join(numms)
print(join)
my_string='how are you doing and where are you and whats up'
my_list=my_string.split("and")#here and is the point to split
print(my_list)
for ti in my_list:
print(ti)
my_list= ['a'] *10
print(my_list)
#to print like a string (aaaaaaaaaa)(BAD METHOD)
my_string=''
for i in my_list:
my_string+=i
print(my_string)
#GOOD METHOD TO PRINT LIKE A STRING
my_string=''.join(my_list)
print(my_string)
#to know the time taken by each method
from timeit import default_timer as timer
#we can calculate time in this way
staet=timer()
my_list= ['a'] *1000000
print(my_list)
stop=timer()
print(stop-staet)
#to print like a string (aaaaaaaaaa)(BAD METHOD)
start=timer()
my_string=''
for i in my_list:
my_string+=i
stop=timer()
print(stop-start)
#GOOD METHOD TO PRINT LIKE A STRING
start=timer()
my_string=''.join(my_list)
stop=timer()
print(stop -start)
#% in string
var="rajesh"
bar=10.12345
my_string='%s is the student'%var
my_bring='%.3s is the student'%var
my_list="the percetage is %f"%bar
my_fist="the percetage is %.3f"%bar
print(my_string)
print(my_bring)
print(my_list)
print(my_fist)
| [
"noreply@github.com"
] | noreply@github.com |
c6371779de8aeb3db8983e514c6240c6528cb326 | 08efe66527ef088c985b2731a80ec217f6d64e27 | /DiscordBot.py | a8732cb40fd4fb92c627f8cd7eadbc2af99cc2cf | [] | no_license | cmd-k/DiscordBot | 14d17e793bcce59b0d17a57f11e6be4c0ecef44f | e221e802232c2757ba552a7b96199637d8a04d92 | refs/heads/master | 2021-03-18T22:30:03.514327 | 2020-03-13T15:03:40 | 2020-03-13T15:03:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,471 | py | import asyncio
import time
import discord
from discord import Member
client = discord.Client()
messages = 0
anzahlZรผge = 0
feld = [['1','2','3'],['4','5','6'],['7','8','9']]
@client.event
async def on_ready(): #console text wenn gestartet und ready ist
print("Hey dudes me is {}".format(client.user.name))
client.loop.create_task(status_task())
async def update_stats(): #stats in stats.txt schreiben
await client.wait_until_ready()
global messages
while not client.is_closed():
try:
with open("stats.txt", "a") as f:
f.write(f"Time: {int(time.time())}, Messages: {messages}\n")
messages = 0
await asyncio.sleep(5)
except Exception as e:
print("Fehler beim reinschreiben in Datei: ", e)
await asyncio.sleep(5)
async def status_task(): #status Leiste des bots รคndern
while True:
await client.change_presence(activity=discord.Game("Mit dir"), status=discord.Status.online)
await asyncio.sleep(5)
await asyncio.sleep(5)
await client.change_presence(activity=discord.Game("ยคOยค mit dir"), status=discord.Status.online)
@client.event
async def on_message(message): #alle ifs bei unterschliedlichen commands
global feld
global anzahlZรผge
global messages
messages += 1
commands = ['!gayTester', '!userinfo', '!negerTester', '!TicTacTo']
if message.author.bot:
return
if '!INeedSomeHelp' in message.content:
helpEmbed = discord.Embed(title='You need some help?',
description='Here I give help:',
color=0x22a7f0)
helpEmbed.add_field(name='Commands:', value=commands, inline=True)
await message.channel.send(embed=helpEmbed)
if commands[0] in message.content: #gayTester
member = discord.utils.get(message.guild.members, name="WhitePanda")
if message.author == member:
await message.channel.send('you not gay')
else:
messone = await message.channel.send('yes you gay')
await messone.add_reaction('<:UGAY:642807039780716604>')
if message.content.startswith(commands[1]): #userinfo
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed = discord.Embed(title='Userinfo fรผr {}'.format(member.name),
description='UserInfo von {}'.format(member.mention),
color=0x22a7f0)
embed.add_field(name='Server beigetreten', value=member.joined_at.strftime('%d/%m/%Y, %H:%M:%S'),
inline=True)
rollen = ''
for role in member.roles:
if not role.is_default():
rollen += '{} \r\n'.format(role.mention)
if rollen:
embed.add_field(name='Rollen', value=rollen, inline=True)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text='_________________________.')
await message.channel.send(embed=embed)
if message.content.startswith(commands[2]): #!negerTester
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
await message.channel.send("yes {} is a neger".format(member.mention))
else:
await message.channel.send("yes you neger")
if message.content.startswith(commands[3]): #!TicTacTo
feld = [['1','2','3'],['4','5','6'],['7','8','9']]
anzahlZรผge = 0
embed = discord.Embed(title="TicTacTo",color=0x22a7f0)
embed.add_field(name="Neustart", value="Neustart")
await message.channel.send(embed=embed)
if message.content.startswith("!N"):
args = message.content.split(' ')
if len(args) == 2:
if(Result(feld) == False):
SpielfeldChange(feld, int(args[1]), anzahlZรผge)
anzahlZรผge+=1
embed = discord.Embed(title="TicTacTo",color=0x22a7f0)
embed.add_field(name="Feld:",value=Spielfeld(feld))
await message.channel.send(embed=embed)
if(Result(feld) == True):
gewonnenMessage = await message.channel.send("Hast Gewonnen {0}".format(Spieler(anzahlZรผge-1)))
await gewonnenMessage.add_reaction("<:GayAlex:642807888280027147>")
await message.channel.send("zum Neustarten !TicTacTo schreiben")
if(Unentschieden(feld) == True):
await message.channel.send("Unentschieden")
await message.channel.send("zum Neustarten !TicTacTo schreiben")
def Spielfeld(feld):
spielfeld = ("{0} {1} {2} \n{3} {4} {5}\n{6} {7} {8}".format(feld[0][0],feld[0][1],feld[0][2],feld[1][0],feld[1][1],feld[1][2],feld[2][0],feld[2][1],feld[2][2]))
return spielfeld
def SpielfeldChange(spielfeld, eingabefeld, anzahlZรผge):
raus = False
printen = True
for i in range(len(spielfeld)):
if(raus == True):
break
for j in range(len(spielfeld[i])):
eingabefeld-=1
if(eingabefeld == 0):
if(spielfeld[i][j] != 'X' and spielfeld[i][j] != 'O'):
spielfeld[i][j] = Spieler(anzahlZรผge)
raus = True
break
def GleicheZeichen(eins, zwei, drei):
alleGleich = False
if(eins == 'X'):
if(zwei == 'X'):
if(drei == 'X'):
alleGleich = True
elif(eins == 'O'):
if(zwei == 'O'):
if(drei == 'O'):
alleGleich = True
return alleGleich
def Unentschieden(feld):
unentschieden = False
belegt = 0
for i in range(len(feld)):
for j in range(len(feld[i])):
if(feld[i][j] == 'X' or feld[i][j] == 'O'):
belegt+=1
if(belegt == 9):
unentschieden = True
return unentschieden
def Result(feld):
Gewonnen = False
for i in range(3):
if(GleicheZeichen(feld[i][0],feld[i][1],feld[i][2]) == True):
Gewonnen = True
break
elif(GleicheZeichen(feld[0][i],feld[1][i],feld[2][i]) == True):
Gewonnen = True
break
if(GleicheZeichen(feld[0][0],feld[1][1],feld[2][2]) == True):
Gewonnen = True
elif(GleicheZeichen(feld[0][2],feld[1][1],feld[2][0]) == True):
Gewonnen = True
return Gewonnen
def Spieler(anzahlZรผge):
XO = 'X'
if(anzahlZรผge == 0 or anzahlZรผge == 2 or anzahlZรผge == 4 or anzahlZรผge == 6 or anzahlZรผge == 8):
XO = 'X'
elif(anzahlZรผge == 1 or anzahlZรผge == 3 or anzahlZรผge == 5 or anzahlZรผge == 7 or anzahlZรผge == 9):
XO = 'O'
return XO
client.loop.create_task(update_stats())
client.run("der token")
#https://discordapp.com/oauth2/authorize?permissions=271969383&scope=bot&client_id=678018272406405123 | [
"finnp@schule.koeln"
] | finnp@schule.koeln |
1a7f5319009ed0666fd94a80ff9343ec1cc10202 | 1a41e00d8a585cac18e667a68a3c71b7a79cc8fe | /test_script.py | 4aecbfef74ad439a7f2d82028d320dc0a78ef445 | [] | no_license | xianhegithub/deeprm | 56aa7206b5713546dc8f5b5eb6acd5439ca11b88 | c82220249a03e02ec5f89854c09d6ba713efdcb1 | refs/heads/master | 2022-12-07T17:05:44.701290 | 2020-09-02T21:34:57 | 2020-09-02T21:34:57 | 292,399,156 | 0 | 0 | null | 2020-09-02T21:32:52 | 2020-09-02T21:32:52 | null | UTF-8 | Python | false | false | 73 | py | import os
os.system("THEANO_FLAGS='device=gpu0' python -u pg_re_xh.py")
| [
"noreply@github.com"
] | noreply@github.com |
7aa59aea975905e32d5a95f52ad0352ff138a8cb | 8f7dd3ff195cfb32138693c7f26b8daf860ad68b | /math_series/series.py | 5e2ee689ccd682a070b3a57e44b5b1d46bf8a006 | [] | no_license | mrobeidat/math-series | e241563a8f599169448d1e801b1189176ed5b40f | 5f6e27086227046f8d81953889afba5601912ea2 | refs/heads/main | 2023-08-29T20:35:13.507862 | 2021-11-01T14:29:24 | 2021-11-01T14:29:24 | 423,444,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py |
def fibonacci(n):
if n == 0:
return 0
if n == 1:
return 1
if n > 1:
return fibonacci(n-1)+fibonacci(n-2)
def lucas(n):
if n == 0:
return 2
if n == 1:
return 3
if n > 1:
return lucas(n-1)+lucas(n-2)
def sum_series(n,first=0,second=1):
if n == 0:
return first
if n == 1:
return first + second
if n > 1:
return sum_series(n-1,first,second)+sum_series(n-2,first,second)
| [
"y.linux96@gmail.com"
] | y.linux96@gmail.com |
eaed7ba6c6b86c670cb4dca633d285a1e400b6d3 | 8b21292a6b866a6b2b3a54802ecc3369840d0a1f | /APITesting.py | 3250aebbc5738fb94eb19e0bc491ec69bc97c547 | [] | no_license | Sahi1012/Automations | 2fdc71475be04ae2c929eba41e5b553d7812a662 | 55b28a0bb86fd784be1131ad6745a353399eefd6 | refs/heads/master | 2021-03-28T18:18:47.707797 | 2020-05-27T13:15:31 | 2020-05-27T13:15:31 | 247,884,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import json
import requests
import jsonpath
url = "https://reqres.in/api/users?page=2"
get= requests.get(url)
#print(get)
#Display Response Content
#print(get.content)
#print(get.headers)
#Validate Response Code
response_code = get.status_code
assert response_code == 200
print(get.headers.get('Date'))
print(get.headers.get('Connection'))
print(get.encoding)
#Fetch Everything in a Text Format
response = json.loads(get.text)
print(response)
#Search required data with JsonPath
for i in range(0,3):
data = jsonpath.jsonpath(response,'data['+str(i)+'].first_name')
print(data)
| [
"geekergeek1012@gmail.com"
] | geekergeek1012@gmail.com |
6daaf89e48c59092496821c2c18d5ea4a9650384 | 0f02cf1470124e57b0e3f79c2adeb789dc1fb1ac | /Preparation/SimilarityNER.py | 8497af154d28335757fa57df715318e407b7182d | [
"MIT"
] | permissive | chenhan97/PersonGAN | 8fd06679df1895cfce7b38f3533a2a0bf3455c9f | 573704cbb7d2269135795c768d79dc979c821477 | refs/heads/master | 2022-04-29T13:39:03.340243 | 2022-03-29T11:14:40 | 2022-03-29T11:14:40 | 160,812,967 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py | from nltk.tag import StanfordNERTagger
import sys
sys.path.append("..")
from utils import Util
import os
def TextNERExtractor(filepath):
NERList = ['LOCATION', 'PERSON', 'ORGANIZATION', 'MONEY', 'PERCENT', 'DATE', 'TIME']
model_filename = sys.path[0] + '/Preparation/models/english.muc.7class.distsim.crf.ser.gz'
path_to_jar = sys.path[0] + '/Preparation/stanford-ner.jar'
NER_word_list = []
Text = open(filepath,encoding='utf-8')
text = Text.read()
tagged = StanfordNERTagger(model_filename,path_to_jar)
entities = tagged.tag(text.split())
for word in entities:
if word[1] in NERList:
NER_word_list.append(word[0])
Text.close()
return list(set(NER_word_list))
def CompareNER(path, key_info_list, cover=1, min_corpu=500): #change for test
files = Util.NERlist(path)
train_file = []
for file in files:
NER_word_list = TextNERExtractor(file)
if len([word for word in NER_word_list if word in key_info_list]) >=cover:
train_file.append(file)
if len(train_file) < min_corpu:
print("Source is limited, please show more relevant news")
from random import sample
extend_train_file = sample([file for file in files if file not in train_file], min_corpu - len(train_file))
train_file = train_file + extend_train_file
return train_file
def MergeQualFile(key_info_list,author_name,Cover,Min_corpu):
QualifyFileList = CompareNER(sys.path[0]+"/Preparation/data",key_info_list,cover=Cover,min_corpu = Min_corpu)
for file in QualifyFileList:
with open(file,'r',encoding='utf-8') as Reader, open(sys.path[0]+"/Preparation/save/data/"+author_name,'a',encoding='utf-8') as Writer:
for line in Reader:
Writer.write(line)
print("qulified files are all found")
| [
"noreply@github.com"
] | noreply@github.com |
aab90cce97303add44325afff2ac246e0fb5bed5 | 86f46a8c637726d1a69d5b326b6ff1890c901cc8 | /blog/helper.py | e1408cced2ec63ac621d8ebe87adbd6ac2260756 | [] | no_license | ErikBZ/instant-crush | 00af49145e1f0afe516c0b1b3749b19514ca8104 | 5fc0bd580bf071c43e1c736d97f3ec6b4ba683f1 | refs/heads/master | 2021-03-27T18:52:18.848277 | 2017-10-27T13:33:59 | 2017-10-27T13:33:59 | 95,374,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | # file for some functions that don't need to live in
# in the django specific files
import os
from django.conf import settings
from django.utils.timezone import localtime, now
from .models import Blog
def get_md_file(md_path):
md_file = "";
dir = os.path.join(settings.BASE_DIR, "content/" + md_path)
# verifying that the path was made correctly
print(dir)
with open(dir) as f:
md_file = f.read()
return md_file
def update(blog_name, is_proj):
file = get_md_file(blog_name + ".md")
# i can use filter().exists
if Blog.objects.filter(title=blog_name).exists():
obj = Blog.objects.get(title=blog_name)
obj.content = file
obj.save()
print("Updating query")
else:
obj = Blog()
obj.title = blog_name
obj.content = file
obj.pub_date = now()
obj.is_project = is_proj
print("Entry does not exist. Creating new entry")
obj.content = file
obj.save()
| [
"zapatabrandon@gmail.com"
] | zapatabrandon@gmail.com |
746438e8b55784eaf8d646743e2aee5439493ec8 | 662a9f2efe3f30eff1f72dd3b7a7b89a5844f5cf | /system/README/user-prefs.conf.spec | e725c508e1b2f906cfec50ee53e13230542cfc85 | [] | no_license | roniandgit/splunk | 23e1f3a8cfb0844d423a5e756b69756574b7292c | 4faf4c75e19ae234965318ad6db7aed2ea98f33a | refs/heads/master | 2020-04-27T10:48:16.347056 | 2019-03-07T04:42:16 | 2019-03-07T04:42:16 | 174,269,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,732 | spec | # Version 7.2.4.2
#
# This file describes some of the settings that are used, and
# can be configured on a per-user basis for use by the Splunk Web UI.
# Settings in this file are requested with user and application scope of the
# relevant user, and the user-prefs app.
# Additionally, settings by the same name which are available in the roles
# the user belongs to will be used at lower precedence.
# This means interactive setting of these values will cause the values to be
# updated in
# $SPLUNK_HOME/etc/users/<username>/user-prefs/local/user-prefs.conf where
# <username> is the username for the user altering their preferences.
# It also means that values in another app will never be used unless they
# are exported globally (to system scope) or to the user-prefs app.
# In practice, providing values in other apps isn't very interesting, since
# values from the authorize.conf roles settings are more typically sensible
# ways to defaults for values in user-prefs.
[general]
default_namespace = <app name>
* Specifies the app that the user will see initially upon login to the
Splunk Web User Interface.
* This uses the "short name" of the app, such as launcher, or search,
which is synonymous with the app directory name.
* Splunk defaults this to 'launcher' via the default authorize.conf
tz = <timezone>
* Specifies the per-user timezone to use
* If unset, the timezone of the Splunk Server or Search Head is used.
* Only canonical timezone names such as America/Los_Angeles should be
used (for best results use the Splunk UI).
* Defaults to unset.
lang = <language>
* Specifies the per-user language preference for non-webui operations, where
multiple tags are separated by commas.
* If unset, English "en-US" will be used when required.
* Only tags used in the "Accept-Language" HTTP header will be allowed, such as
"en-US" or "fr-FR".
* Fuzzy matching is supported, where "en" will match "en-US".
* Optional quality settings is supported, such as "en-US,en;q=0.8,fr;q=0.6"
* Defaults to unset.
install_source_checksum = <string>
* Records a checksum of the tarball from which a given set of private user
configurations was installed.
* Analogous to <install_source_checksum> in app.conf.
search_syntax_highlighting = [light|dark|black-white]
* Highlights different parts of a search string with different colors.
* Defaults to light.
* Dashboards ignore this setting.
search_use_advanced_editor = <boolean>
* Specifies whether the search bar is run using the advanced editor or in just plain text.
* If set to false, search_auto_format, and search_line_numbers will be false and search_assistant can only be [full|none].
* Defaults to true.
search_assistant = [full|compact|none]
* Specifies the type of search assistant to use when constructing a search.
* Defaults to compact.
search_auto_format = <boolean>
* Specifies if auto-format is enabled in the search input.
* Default to false.
search_line_numbers = <boolean>
* Display the line numbers with the search.
* Defaults to false.
datasets:showInstallDialog = <boolean>
* Flag to enable/disable the install dialog for the datasets addon
* Defaults to true
dismissedInstrumentationOptInVersion = <integer>
* Set by splunk_instrumentation app to its current value of optInVersion when the opt-in modal is dismissed.
hideInstrumentationOptInModal = <boolean>
* Set to 1 by splunk_instrumentation app when the opt-in modal is dismissed.
[default]
# Additional settings exist, but are entirely UI managed.
<setting> = <value>
[general_default]
default_earliest_time = <string>
default_latest_time = <string>
* Sets the global default time range across all apps, users, and roles on the search page.
[role_<name>]
<name> = <value>
| [
"mrsayantabasak@gmail.com"
] | mrsayantabasak@gmail.com |
03b546b952137e54723329d1559c8288fabd29d9 | 765189a475513378ae80c97faf38b99e3ce1dc28 | /algorithms/401-500/476.number-complement.py | 2a0252d82de04ee699e259e24f88bb2856487172 | [] | no_license | huilizhou/kemu_shuati | c8afc979f57634e066f6ce98da879cf1ed4e6b95 | 55b52cfff699aa71d92dd09d150ce71628b21890 | refs/heads/master | 2020-04-29T01:13:53.784665 | 2019-03-15T01:16:06 | 2019-03-15T01:16:06 | 175,723,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
# ไบบๅฎถ็่งฃๆณ
# return 2 ** (len(bin(num)) - 2) - 1 - num
# # ๆ็่งฃๆณ
# z = bin(num)[2:]
# z1 = ''
# for i in z:
# if i == '1':
# z1 += '0'
# else:
# z1 += '1'
# res = int(z1, 2)
# return res
# ไบบๅฎถ็่งฃๆณ
# n = 2
# while n < num:
# n <<= 1
# return n - 1 - num
i = 1
while num >= i:
num ^= i
i <<= 1
return num
print(Solution().findComplement(5))
| [
"2540278344@qq.com"
] | 2540278344@qq.com |
9c9914865de3c1df41c80e63a0c11656069a6471 | 0455bd20bfc0fdd9b8553d033891e5b31e2e9384 | /CrunchbaseInitial/CrunchbaseDataExtraction/AsianGendersNonEntrepreneurs.py | e278a5d1b35335d8f68f2f83f007fc2ada86f1e0 | [] | no_license | kyriacosar/LInC-Eclipse-Repository | a0419305b824d8adcab0d31ab71ce9e4e2307f22 | c480e071f9e571224e55983c3e9c6d0f70d0e511 | refs/heads/master | 2020-12-02T21:25:36.537787 | 2017-07-27T09:25:00 | 2017-07-27T09:25:00 | 96,314,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | '''
Created on Jul 11, 2017
@author: kyriacos
'''
import json
import math
if __name__ == '__main__':
e_file_in = open("../../../../Documents/Crunchbase Project Data/Microsoft/Crunchbase Results/Data Dictionaries/Crunchbase_Non_Entrepreneurs_FaceAttributes_Dictionaries.json", "r")
e_file_out = open("../../../../Documents/Crunchbase Project Data/Microsoft/Crunchbase Results/Data Extraction/Crunchbase_Asian_Non_Entrepreneurs_Gender_Percentages.txt", "w")
male = 0
maleAge = 0
female = 0
femaleAge = 0
count=0
for record in e_file_in:
try:
jDict = json.loads(record)
str_gender = str(jDict['faceAttributes']['gender'])
try:
str_ethn = str(jDict['faceAttributes']['ethnicity'])
if str_ethn == 'Asian':
if str_gender == 'male':
male += 1
maleAge += jDict['faceAttributes']['age']
elif str_gender == 'female':
female += 1
femaleAge += jDict['faceAttributes']['age']
count += 1
except KeyError:
print
except ValueError:
print("Error in json to dictionary translation.")
e_file_out.write("Gender percentages among Asian Entrepreneurs:\n\n")
e_file_out.write("Males: "+str(male)+" out of "+str(count)+" with percentage "+str(male/float(count)*100.0))
e_file_out.write("\nFemales: "+str(female)+" out of "+str(count)+" with percentage "+str(female/float(count)*100.0))
e_file_out.write("\n\nAverage age among non Asian Entrepreneurs:\n\n")
e_file_out.write("Male: "+str(maleAge/male))
e_file_out.write("\nFemale: "+str(femaleAge/female)) | [
"Kyriacos"
] | Kyriacos |
e2daf2c10b89897b95813eb9b9aa8cb2a7c61e8b | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_textbox04.py | 486daa245958fcdc1dee5ed14a655faaee774af2 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,369 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox04.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [61365632, 64275584]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
worksheet.insert_textbox('F25', 'This is some text')
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
fea8328430f99719be80b3244d0aa3325e8de808 | 4f08a8172142c09b4d4f05f302ff120e91ef2f09 | /src/train_att.py | c540ba36179dc736124e28a7b68dcab9d93c9629 | [] | no_license | rpmcruz/background-invariance | d6e434d2da9c32bd0e9d7456263cfda5fa741ca9 | 5c5e4587f35f817473a0cacdf76059947eb5972e | refs/heads/main | 2023-02-18T04:48:37.518278 | 2021-01-16T12:43:06 | 2021-01-16T12:43:06 | 330,155,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,217 | py | # https://towardsdatascience.com/self-attention-in-computer-vision-2782727021f6
# https://arxiv.org/pdf/1801.09927.pdf
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dataset')
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batchsize', type=int, default=8)
parser.add_argument('--tau', type=float, default=0.7)
parser.add_argument('--output')
args = parser.parse_args()
import tensorflow as tf
for g in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(g, True)
import numpy as np
from skimage.measure import label, regionprops
import mydatasets, mymodels, mydatagen
(Xtr, Ytr), tss = getattr(mydatasets, args.dataset)()
g = mydatagen.Gen(Xtr, Ytr, args.batchsize)
# 1. Train global model
ce = tf.keras.losses.SparseCategoricalCrossentropy(True)
acc = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
global_model = mymodels.vgg19 if Xtr.shape[1] >= 128 else mymodels.classifier
global_model, opt = global_model(Xtr.shape[1:], Ytr.max()+1)
global_model.summary()
global_model.compile(opt, ce, [acc])
global_model.fit(g.flow(), steps_per_epoch=g.steps(),
epochs=args.epochs, verbose=2, validation_data=tss[-1])
# 2. Masks and bounding boxes
def crop_heatmap(x):
# the heatmap is generated by reducing channels using the maximum absolute
h = heatmap_model.predict(x)[0]
h = np.max(np.abs(h), 2)
# mask is created by using a threshold on the normalized heatmap
min_h = np.min(h)
max_h = np.max(h)
if min_h == max_h:
return None
h = (h - min_h) / (max_h - min_h)
m = h >= args.tau
# get bounding box of largest bounding box
labels = label(m)
largest = labels == np.argmax(np.bincount(labels.flat)[1:])+1
row1, col1, row2, col2 = regionprops(largest.astype(int))[0].bbox
# crop
return x[:,
x.shape[1]*row1//h.shape[0]:x.shape[1]*row2//h.shape[0]+1,
x.shape[2]*col1//h.shape[1]:x.shape[2]*col2//h.shape[1]+1]
# 3. Train the attention branch
# Unlike the paper, we train the concatenation of local and global branch at the same time.
def AttClassifier(X, Y, heatmap_model):
x = input_crop_shape = tf.keras.layers.Input((None, None, X.shape[3]))
for filters in [128, 256, 512]:
x = tf.keras.layers.Conv2D(filters, 3, 2, 'same', activation='relu')(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x) # crop/local branch
h = tf.keras.layers.GlobalAveragePooling2D()(heatmap_model.output) # global branch
x = tf.keras.layers.Concatenate()([x, h])
x = tf.keras.layers.Dense(2048)(x)
x = tf.keras.layers.Dense(256)(x)
x = tf.keras.layers.Dense(Y.max()+1)(x)
return tf.keras.models.Model([heatmap_model.input, input_crop_shape], x)
heatmap = [l for l in global_model.layers if type(l).__name__ == 'MaxPooling2D'][-1]
heatmap_model = tf.keras.models.Model(global_model.input, heatmap.output)
heatmap_model.trainable = False
local_model = AttClassifier(Xtr, Ytr, heatmap_model)
def gen(X, Y):
t = mydatagen.Transform()
while True:
ix = np.random.choice(len(X), len(X), False)
sum_crop_size = np.zeros((2,))
for i in ix:
X_ = t.each(X[i])[np.newaxis]
X_crop = crop_heatmap(X_)
if X_crop is None or X_crop.shape[1] == 0 or X_crop.shape[2] == 0:
continue # ignore crop of size zero
sum_crop_size += X_crop.shape[1:3]
yield [X_, X_crop], Y[i:i+1]
print('Average crop:', sum_crop_size/len(X))
class ResultsCb(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
for i, (Xts, Yts) in enumerate([(Xtr, Ytr)] + tss):
scores = local_model.evaluate(gen(Xts, Yts), verbose=0, steps=len(Xts))
print(f'score {i}:', scores)
local_model.compile('adam', ce, [acc])
local_model.fit(
gen(Xtr, Ytr), steps_per_epoch=len(Xtr), epochs=args.epochs, verbose=2,
validation_data=gen(*tss[-1]), validation_steps=len(tss[-1][0]),
callbacks=[ResultsCb()])
for i, (Xts, Yts) in enumerate([(Xtr, Ytr)] + tss):
scores = local_model.evaluate(gen(Xts, Yts), verbose=0, steps=len(Xts))
print(f'score {i}:', scores)
| [
"ricardo.pdm.cruz@gmail.com"
] | ricardo.pdm.cruz@gmail.com |
977b1e7ce55933a751614e974582e0c561f72d3c | 353a149675375ba0895798933455d9c0718c9cb9 | /.venv/bin/wheel | 37a0e7d2adc33bee7dab690c9f9726a0a6934134 | [] | no_license | dhdepddl/produce101 | c69906f0de6c2ce6c407edacdfddd316364d18e4 | 538c3c211c544ec4f8f613465e8014f2883f6bad | refs/heads/master | 2021-01-23T05:46:00.122176 | 2017-05-31T20:34:37 | 2017-05-31T20:34:37 | 92,986,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | #!/Users/Young-eunKim/Downloads/csProject/aws/produce101/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dhdepddl@gmail.com"
] | dhdepddl@gmail.com | |
f7af1f93ea513112a11febdbc0200461756311cf | a9ee69f84351a80cc02c971a04344c7c58d0154d | /CSGO_match_detail.py | bd8e8b561909f07a5dd36de14dbcf27153f75fa4 | [] | no_license | yanlele9654/Dota2matchIDandELOscoreupdate | 679673b17039ea8bec56690b4ed0fecb1bab41b6 | 9dad5df8bfd191cfb86d970a544727ad9da7fcb3 | refs/heads/master | 2023-02-10T12:56:36.517401 | 2021-01-08T05:51:00 | 2021-01-08T05:51:00 | 262,246,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py | # -*- coding: UTF-8 -*-
#%%
import pandas as pd
import pymongo
import DataBaseAccess_c as DBA
import dota2_api
db_eng_1 = DBA.DbInfoGenerator('vpgame').info
# db_eng_2 = DBA.DbInfoGenerator('model_builder').info
# ่ฟๆฅๅฐๆฌๅฐๅบ
client = pymongo.MongoClient(db_eng_1['host'], 7974)
# Newclient = pymongo.MongoClient(db_eng_2['host'], 27017)
# ่ฟๆฅdatabase'damin'
db = client['admin']
# 'admin'็่ดฆๅทๅฏ็
db.authenticate(db_eng_1['user'], db_eng_1['password'])
print('success connet the database')
NewClient = pymongo.MongoClient('121.41.79.9',7979)
new_db = NewClient['crawler']
new_db.authenticate('yanziao','euOkM2gYuPdvxUbl')
print('success connet to the CSGO match database')
# ๆฅ่ฏขๅฏนๅบ็ๆฏ่ตidๅนถไธๅปบ็ซlist
dota_match = list(new_db.csgo_match_detail.find({}, {'_id': 0, 'third_id': 1,'team1':1,'team2':1}))
#%%
print(len(dota_match))
team1_player_info = []
for i in range(len(dota_match)):
if 'team1' in dota_match[i].keys():
#print('team1 in dota_match')
if 'player_stat' in dota_match[i]['team1']:
#print('player_stat in team1')
for j in range(len(dota_match[i]['team1']['player_stat'])):
try:
player_info = dota_match[i]['team1']['player_stat'][j]
#print(player_info)
#player_info['match_id'] = dota_match[i]['third_id']
except KeyError:
continue
else:
player_info['match_id'] = dota_match[i]['third_id']
player_info['team_id'] = dota_match[i]['team1']['id']
#print(player_info)
#team1_player_info = team1_player_info.append(player_info)
#print(team1_player_info)
#team1_player_info = []
db.CSGO_players_info.insert_one(player_info)
if 'team2' in dota_match[i].keys():
if 'player_stat' in dota_match[i]['team2']:
#print('player_stat in team1')
for j in range(len(dota_match[i]['team2']['player_stat'])):
try:
player_info_2 = dota_match[i]['team2']['player_stat'][j]
#print(player_info)
#player_info['match_id'] = dota_match[i]['third_id']
except KeyError:
continue
else:
player_info_2['match_id'] = dota_match[i]['third_id']
player_info_2['team_id'] = dota_match[i]['team2']['id']
#print(player_info)
#team1_player_info = team1_player_info.append(player_info)
#print(team1_player_info)
#team1_player_info = []
db.CSGO_players_info.insert_one(player_info_2)
print(i)
print('success insert data ready to calculate the elo')
# %%
dota_match[1085]['team1']['player_stat']
# %%
| [
"ziaoyan@gmail.com"
] | ziaoyan@gmail.com |
4cb2807167fc3041090d3ad932f279538d7d8e6d | 3cb84cc8fec5c4a661b35bb1206e4e1488bf24ae | /src/predictionAlgorithms/sequenceCorelation/multiImageSequenceTransformation.py | 14978da46ef4ee832f925310a582674ff836fda6 | [
"MIT"
] | permissive | aivaras-ciurlionis/meteo | dbdf3fad13884e42264c309e166381009eb2a82d | 434759d16f7cca505d280475611d1fef5176827b | refs/heads/master | 2023-07-20T03:35:51.176354 | 2020-05-24T11:50:15 | 2020-05-24T11:50:15 | 115,193,737 | 0 | 0 | MIT | 2023-07-05T21:01:45 | 2017-12-23T12:19:49 | Python | UTF-8 | Python | false | false | 2,969 | py | import numpy
from src.predictionAlgorithms.correlation.baseTransformation import BaseTransformation
from src.utilities.imageAnalysis.pixelsRainStrengthConverter import PixelsRainStrengthConverter
class MultiImageSequenceTransformation(BaseTransformation):
transformations = []
baseName = 'Multi image sequence'
name = ''
source_count = 4
errorFunction = None
def __init__(self, transformation_algorithm, error_function, source_count=4):
self.name = self.baseName + ' ' + transformation_algorithm[0]
self.transformations = transformation_algorithm[1]
self.source_count = source_count
self.errorFunction = error_function
super().__init__(transformation_algorithm, error_function)
def predict(self, source_images, count):
best_vector = self.find_best_movement_vector_multi(source_images[-self.source_count].copy(), source_images[-self.source_count+1:])
print(best_vector)
return self.generate_images(source_images, best_vector, count)
def find_best_movement_vector_multi(self, start_image, evaluation_images):
base_vector = numpy.zeros(len(self.transformations))
current_vector = numpy.zeros(len(self.transformations))
print('-------')
return self\
.find_vector_recursive(start_image, evaluation_images, 0, -100, base_vector, current_vector)[0]
def find_vector_recursive(self, start_image, evaluation_images, index, best_error, best_vector, current_vector):
if index >= len(self.transformations):
return best_vector, best_error
value = self.transformations[index][1][0]
end = self.transformations[index][1][1]
step = 1
if len(self.transformations[index][1]) > 2:
step = self.transformations[index][1][2]
current_vector[index] = value
while value < end:
result = self\
.find_vector_recursive(start_image, evaluation_images, index + 1, best_error, best_vector, current_vector)
best_vector = result[0]
best_error = result[1]
value += step
current_vector[index] = value
error = self.find_current_error(start_image, evaluation_images, current_vector)
if error > best_error:
best_vector = list(current_vector)
best_error = error
return best_vector, best_error
def find_current_error(self, start_image, evaluation_images, current_vector):
generated = self.generate_images([start_image], current_vector, self.source_count-1)
error = 0
for i, image in enumerate(generated):
image1 = PixelsRainStrengthConverter.normalise_image(image)
image2 = PixelsRainStrengthConverter.normalise_image(evaluation_images[i])
step_error = self.errorFunction.get_error(image1, image2)
error += step_error
return error / len(evaluation_images)
| [
"aivarasciurlionis@gmail.com"
] | aivarasciurlionis@gmail.com |
7c807805926e18c66667a6c5a5ddb2ab2e810823 | 6a675191b9104b3bd306f3e588bf7f81185071f5 | /example.py | 0392a6db5dd412b57af2e27f0f1a41b5f3943814 | [
"MIT"
] | permissive | daviskirk/subseqs | 8acc00d063dcdcb68dd0a6ef6c427bf66cac8b57 | 8e538b1d1857413721c27eb47b567e9b7fedddf6 | refs/heads/main | 2023-03-30T15:07:32.352141 | 2021-04-01T06:49:59 | 2021-04-01T06:49:59 | 353,602,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py |
import pyximport; pyximport.install()
import pandas as pd
import numpy as np
import numba as nb
from numba import jit, njit
from time import perf_counter, perf_counter_ns
from subseq import is_subseq_py, is_subseq_rs
from cy_subseq import find_loop_cy
import cython
def find_loop(seq, subseq):
n = len(seq)
m = len(subseq)
for i in range(n - m + 1):
found = True
for j in range(m):
if seq[i + j] != subseq[j]:
found = False
break
if found:
return True
return False
find_loop_jit = jit(forceobj=True, cache=True)(find_loop)
find_loop_njit = njit(cache=True)(find_loop)
subseq = ['dd', 'ee']
seq = ['a', 'b', 'c'] * 100 + subseq
np_seq = np.array(seq)
np_subseq = np.array(subseq)
pd_seq = pd.Series(seq).astype("string").values
pd_subseq = pd.Series(subseq).astype("string").values
cat_seq = pd.Series(seq).astype("category").values.codes
cat_subseq = pd.Series(subseq).astype("category").values.codes
if __name__ == "__main__":
fcn_map = {
"py": lambda: find_loop(seq, subseq),
"cy": lambda: find_loop_cy(seq, subseq),
"rs": lambda: is_subseq_rs(seq, subseq),
"rs_py": lambda: is_subseq_py(seq, subseq),
"py_np": lambda: find_loop(np_seq, np_subseq),
"py_pd": lambda: find_loop(pd_seq, pd_subseq),
"jit": lambda: find_loop_jit(pd_seq, pd_subseq),
"njit": lambda: find_loop_njit(np_seq, np_subseq),
}
for k, fcn in fcn_map.items():
result = fcn()
print(f"{k}: {result}")
n = 1000
for k, fcn in fcn_map.items():
dt = 0
for i in range(n):
t0 = perf_counter_ns()
fcn()
t1 = perf_counter_ns()
dt += t1 - t0
print(f"{k}: {dt / n}")
| [
"davis.e.kirkendall@gmail.com"
] | davis.e.kirkendall@gmail.com |
0b4d9d25c17ed1578e764ccaff622ee73acbde60 | 7f831c5c2fe6d34ff59453c58e4ebd701b4efc7f | /venv/bin/pylint | 4e14f78291791db9e42cdc9baa2ce59a75f14018 | [] | no_license | bunny1985/NotifierServer | cb1797ceb492402a3813c69f281f038565292b44 | 3c6f1c6422b4cfb360e4ac9c201119594c1f4ac5 | refs/heads/master | 2022-12-09T14:42:26.876353 | 2017-12-21T23:17:47 | 2017-12-21T23:17:47 | 115,055,570 | 0 | 0 | null | 2021-06-01T21:48:20 | 2017-12-21T23:16:06 | Python | UTF-8 | Python | false | false | 252 | #!/home/michal/Project/notifyTest/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"mb.michal.banas@gmail.com"
] | mb.michal.banas@gmail.com | |
03f5b30b8b31f117f607625f46e2d24b23d191a9 | ba7a297f75b016a286d677b55343f60de69a9453 | /model/utils/test_policy.py | 591ffda8ab89a016e1b7abca18156654a5eb41bf | [
"MIT"
] | permissive | TheMonocledHamster/Counsel | 5666d11216c3cfee49e13b13fbf5723d6b43d698 | d2e4a2566d4d571d9ba0babe9a15750d827327b7 | refs/heads/master | 2023-05-10T15:57:06.061065 | 2023-04-22T11:39:56 | 2023-04-22T11:39:56 | 553,064,046 | 3 | 0 | MIT | 2023-04-22T07:33:10 | 2022-10-17T16:53:28 | Python | UTF-8 | Python | false | false | 3,903 | py | import joblib
import csv
import os
import os.path as osp
import torch
import sys
sys.path.insert(0, '../../')
from .logx import EpochLogger
from ..env import CloudEnv
def load_policy_and_env(fpath, itr='last', params=None):
# handle which epoch to load from
if itr=='last':
# check filenames for epoch (AKA iteration) numbers, find maximum value
pytsave_path = osp.join(fpath, 'pyt_save')
# Each file in this folder has naming convention 'modelXX.pt', where
# 'XX' is either an integer or empty string. Empty string case
# corresponds to len(x)==8, hence that case is excluded.
saves = [int(x.split('.')[0][5:]) for x in os.listdir(pytsave_path) if len(x)>8 and 'model' in x]
itr = '%d'%max(saves) if len(saves) > 0 else ''
else:
assert isinstance(itr, int), \
"Bad value provided for itr (needs to be int or 'last')."
itr = '%d'%itr
# load the get_action function
get_action = load_pytorch_policy(fpath, itr)
# try to load environment from save
# (sometimes this will fail because the environment could not be pickled)
# try:
# state = joblib.load(osp.join(fpath, 'vars'+itr+'.pkl'))
# env = state['env']
env = CloudEnv(log_dir=params['log_dir'], steps_per_epoch=params['steps_per_epoch'],
budget=params['budget'], slo_latency=params['slo_latency'],
overrun_lim=params['overrun_lim'], mode=params['mode'],
nconf=params['nconf'], ncomp=params['ncomp'])
return env, get_action
def load_pytorch_policy(fpath, itr):
""" Load a pytorch policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'pyt_save', 'model'+itr+'.pt')
print('\n\nLoading from %s.\n\n'%fname)
model = torch.load(f=fname)
# make function for producing an action given a single state
def get_action(x, y):
with torch.no_grad():
x = torch.as_tensor(x, dtype=torch.float32)
y = torch.as_tensor(y, dtype=torch.float32)
action, _, _ = model.step(x, y)
return action
return get_action
def run_policy(env, get_action, output_fname, num_episodes=50):
assert env is not None, \
"Environment not found!\n\n It looks like the environment wasn't saved, " + \
"and we can't run the agent in it. :( \n\n Check out the readthedocs " + \
"page on Experiment Outputs for how to handle this situation."
logger = EpochLogger(output_fname=output_fname)
obs, r, d, ep_ret, ep_len, n = env.reset(), 0, False, 0, 0, 0
o, m = obs
while n < num_episodes:
a = get_action(o, m)
o, m, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
if d:
logger.store(EpRet=ep_ret, EpLen=ep_len)
print('Episode %d \t EpRet %.3f \t EpLen %d'%(n, ep_ret, ep_len))
obs, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
o, m = obs
n += 1
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('fpath', type=str)
parser.add_argument('--len', '-l', type=int, default=0)
parser.add_argument('--episodes', '-n', type=int, default=100)
parser.add_argument('--norender', '-nr', action='store_true')
parser.add_argument('--itr', '-i', type=int, default=-1)
parser.add_argument('--deterministic', '-d', action='store_true')
args = parser.parse_args()
env, get_action = load_policy_and_env(args.fpath,
args.itr if args.itr >=0 else 'last',
args.deterministic)
run_policy(env, get_action, args.len, args.episodes, not(args.norender))
| [
"adithyah07@gmail.com"
] | adithyah07@gmail.com |
088f5a935085713d1f20c82d2f95e0ba31e9fc85 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/difference-of-squares/6bb2f99092244bb48dce2c00c19a804f.py | bad5f033d6a46058afa17473abbfba5fefc147e7 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 1,191 | py | # -*- coding: utf-8 -*-
from __future__ import division
def difference(n):
"""
differences(int) -> int
Return the difference between the square of the sums and the sums of the
squares up to n.
"""
return square_of_sum(n) - sum_of_squares(n)
def square_of_sum(n):
"""
square_of_sum(int) -> int
Return the square of the sum of all integers from 0 to n >= 0.
"""
#1 + 2 + 3 + 4 + ... + n = n (n+1) //2
#note that the division works because n or n+1 is even
return ((n * (n + 1)) // 2) ** 2
def sum_of_squares(n):
"""
square_of_sum(int) -> int
Return the sum of all integers squared from 0 to n >= 0.
"""
#Let T_n be the sum of the integers up to n
#Let S_n be the sum of the squares up to n
#Let K_n be the sum of the cubes up to n
# K_n = K_(n+1) - (n+1)**3
# = K_n + 3*S_n + 3*T_n + n - (n+1)**3
#
# <=> 3*S_n = (n+1)**3 - n - 3*T_n
# = n**3 + 3*n**2 + 3*n + 1 - 3/2(n**2 - 1)
# = n**3 + 3/2*n**2 + n/2
# <=> S_n = 1/3 * n**3 + 1/2 * n**2 + 1/6 * n
# = ((2 * n + 3) * n + 1) * n * 1/6
return (((2 * n + 3) * n + 1) * n) // 6
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
3468a788789a89e21d9f7bca64d58226d88bd41f | f329f3061e3a72b2562bb242dfe1a2ed07fe65f0 | /plugins/yongyou_zhiyuan_a6_app.py | b860deb6797eb3a2707688c9c7b7c34d87871b9e | [
"MIT"
] | permissive | ver007/getcms | 58263174355eb16bae95b74f6efaff5069b4ce56 | da03c07457abc266cacddc3ccd67126f0b03da3d | refs/heads/master | 2021-01-19T07:01:51.453626 | 2016-04-13T08:28:14 | 2016-04-13T08:28:14 | 56,134,430 | 0 | 0 | null | 2016-04-13T08:27:38 | 2016-04-13T08:27:38 | null | UTF-8 | Python | false | false | 164 | py | #!/usr/bin/env python
# encoding: utf-8
def run(whatweb, pluginname):
whatweb.recog_from_file(pluginname, "yyoa/common/js/javaSeesion.js", "f_showallCookie")
| [
"hackerlq@gmail.com"
] | hackerlq@gmail.com |
ee32f1fbf5d8f763a4e5b636a2b60dcbbde94db1 | 98a26171e36e2e6f54fb067b62f36147c5bb2f52 | /Desafios/venv/Scripts/easy_install-script.py | c73dc38f1ea23a3c7b2073b4c24a9e6f90206b2e | [] | no_license | brpadilha/exercicioscursoemvideo | d75361726b9863c34a06883ce8f26131104c1dd8 | 1b82492a1bb6b7cfaf4b74b4fa9ea9eedad4b6ff | refs/heads/master | 2020-03-11T22:48:45.747256 | 2019-05-03T17:18:11 | 2019-05-03T17:18:11 | 130,303,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!"A:\Projeto curso python\Desafios\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
| [
"brpadilha.dev@gmail.com"
] | brpadilha.dev@gmail.com |
ea78d6fb59ce193e9097c5c549b653d24cd1882a | 54fce28140274c1136ef58283e7f6925ba67c32f | /aws/run_ec2_instance.py | 5de82a1cdf73ae5501a65826f500af3fefe0a54e | [] | no_license | mohitsingla123/Industrial-Assistent | 0bd8b06c82ac9a290ef0a2740fc654f67ca0e0f9 | de95f5d2dd1e3ecf4f4ca3352865a8e00d5da527 | refs/heads/master | 2022-07-11T04:23:34.432295 | 2020-05-18T16:09:54 | 2020-05-18T16:09:54 | 260,149,901 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | import create_ec2_instance
import ec2_instance
import os
import sys
import time
'''To provide choice to the user whether he wants to create the instance or
change the state to stop or start of the existing instance'''
def state_choice(st_ch):
if st_ch==0:
create_ec2_instance.main()
elif st_ch==1:
ec2_instance.main()
#Defining main function where he enters the choice to perform the state_choice function
def main():
print("Welcome to the portal where you can work on AWS EC2 instance")
print("************************************************************\n")
state_ch=int(input("""Enter the choice that you want to create instance or change the state of instance,
Press 0: For creating instance
Press 1: For changing the state of existing instance\n"""))
if state_ch==0 or state_ch==1:
state_choice(state_ch)
else:
print("Invalid Choice! Please select the correct choice")
main()
#Provide choice to again run the program or exit the program
def status():
l1=int(input("""Press 0, If you want to run the program: \nPress 1, If you want to exit the program\n"""))
if l1==0:
loop()
elif l1==1:
print("*********Thanks for using this Script*********\n")
time.sleep(1)
exit()
else:
print("Invalid Choice\n")
status()
#Starting point of the program
def loop():
if __name__ == '__main__':
os.system('cls')
main()
status()
loop()
| [
"noreply@github.com"
] | noreply@github.com |
96c866757520ccc5e307a307a8fd697449d3becc | 11485f818c6b7b6cf35c84d1abab77881e4e2004 | /ambience/modules/phash/phash_web.py | fefa08cbfeca4ae0b5e46b2c151d8abd326d48b8 | [] | no_license | fu4303/photos-scenery | f398659579982d20573485b98b825cc2a2ee7cdd | c475363564f1e8cf551f24829a8104bcab88f66c | refs/heads/master | 2023-06-17T12:11:53.495380 | 2021-07-15T19:01:31 | 2021-07-15T19:01:31 | 386,404,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,946 | py | import uvicorn
if __name__ == '__main__':
uvicorn.run('phash_web:app', host='127.0.0.1', port=33336, log_level="info")
import faiss
from pydantic import BaseModel
from fastapi import FastAPI, File,Form, HTTPException
from os import listdir
import numpy as np
from scipy.fft import dct
from numba import jit
import cv2
import sqlite3
import io
conn = sqlite3.connect('phashes.db')
index=None
IMAGE_PATH="./../../../public/images"
def init_index():
global index
try:
index = faiss.read_index_binary("trained.index")
except:
d=32*8
quantizer = faiss.IndexBinaryFlat(d)
index = faiss.IndexBinaryIVF(quantizer, d, 1)
index.nprobe = 1
index.train(np.array([np.zeros(32)],dtype=np.uint8))
all_data=get_all_data()
image_ids=np.array([np.int64(x[0]) for x in all_data])
phashes=np.array([x[1] for x in all_data])
if len(all_data)!=0:
index.add_with_ids(phashes, image_ids)
print("Index is ready")
def read_img_file(image_data):
return np.fromstring(image_data, np.uint8)
@jit(nopython=True)
def bit_list_to_32_uint8(bit_list_256):
uint64_arr=[]
for i in range(32):
bit_list=[]
for j in range(8):
if(bit_list_256[i*8+j]==True):
bit_list.append(1)
else:
bit_list.append(0)
uint64_arr.append(bit_list_to_int(bit_list))
return np.array(uint64_arr,dtype=np.uint8)
@jit(nopython=True)
def bit_list_to_int(bitlist):
out = 0
for bit in bitlist:
out = (out << 1) | bit
return out
@jit(nopython=True)
def diff(dct, hash_size):
dctlowfreq = dct[:hash_size, :hash_size]
med = np.median(dctlowfreq)
diff = dctlowfreq > med
return diff.flatten()
def fast_phash(resized_image,hash_size):
dct_data = dct(dct(resized_image, axis=0), axis=1)
return diff(dct_data, hash_size)
def get_phash(image_buffer,hash_size=16, highfreq_factor=4):
img_size = hash_size * highfreq_factor
query_image=cv2.imdecode(read_img_file(image_buffer),cv2.IMREAD_GRAYSCALE)
query_image = cv2.resize(query_image, (img_size, img_size), interpolation=cv2.INTER_LINEAR) #cv2.INTER_AREA
bit_list_256=fast_phash(query_image,hash_size)
phash=bit_list_to_32_uint8(bit_list_256)
return phash
def get_phash_and_mirrored_phash(image_buffer,hash_size=16, highfreq_factor=4):
img_size = hash_size * highfreq_factor
query_image=cv2.imdecode(read_img_file(image_buffer),cv2.IMREAD_GRAYSCALE)
query_image = cv2.resize(query_image, (img_size, img_size), interpolation=cv2.INTER_LINEAR) #cv2.INTER_AREA
mirrored_query_image=cv2.flip(query_image,1)
bit_list_256=fast_phash(query_image,hash_size)
bit_list_256_mirrored=fast_phash(mirrored_query_image,hash_size)
phash=bit_list_to_32_uint8(bit_list_256)
mirrored_phash=bit_list_to_32_uint8(bit_list_256_mirrored)
return np.array([phash,mirrored_phash])
def create_table():
cursor = conn.cursor()
query = '''
CREATE TABLE IF NOT EXISTS phashes(
id INTEGER NOT NULL UNIQUE PRIMARY KEY,
phash BLOB NOT NULL
)
'''
cursor.execute(query)
conn.commit()
def check_if_exists_by_id(id):
cursor = conn.cursor()
query = '''SELECT EXISTS(SELECT 1 FROM phashes WHERE id=(?))'''
cursor.execute(query,(id,))
all_rows = cursor.fetchone()
return all_rows[0] == 1
def delete_descriptor_by_id(id):
cursor = conn.cursor()
query = '''DELETE FROM phashes WHERE id=(?)'''
cursor.execute(query,(id,))
conn.commit()
def get_all_ids():
cursor = conn.cursor()
query = '''SELECT id FROM phashes'''
cursor.execute(query)
all_rows = cursor.fetchall()
return list(map(lambda el:el[0],all_rows))
def get_all_data():
cursor = conn.cursor()
query = '''
SELECT id, phash
FROM phashes
'''
cursor.execute(query)
all_rows = cursor.fetchall()
return list(map(lambda el:(el[0],convert_array(el[1])),all_rows))
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
def adapt_array(arr):
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def add_descriptor(id,phash):
cursor = conn.cursor()
query = '''INSERT INTO phashes(id, phash) VALUES (?,?)'''
cursor.execute(query,(id,phash))
conn.commit()
def sync_db():
ids_in_db=set(get_all_ids())
file_names=listdir(IMAGE_PATH)
for file_name in file_names:
file_id=int(file_name[:file_name.index('.')])
if file_id in ids_in_db:
ids_in_db.remove(file_id)
for id in ids_in_db:
delete_descriptor_by_id(id) #Fix this
print(f"deleting {id}")
print("db synced")
def phash_reverse_search(image_buffer):
target_features=get_phash_and_mirrored_phash(image_buffer)
D, I = index.search(target_features, 1)
print(D,I)
for i in range(2):
if D[i][0]<=32:
return [int(I[i][0])]
return []
app = FastAPI()
@app.get("/")
async def read_root():
return {"Hello": "World"}
class Item_image_id(BaseModel):
image_id: int
@app.post("/phash_reverse_search")
async def phash_reverse_search_handler(image: bytes = File(...)):
found_image=phash_reverse_search(image)
print(found_image)
return found_image
@app.post("/calculate_phash_features")
async def calculate_phash_features_handler(image: bytes = File(...),image_id: str = Form(...)):
features=get_phash(image)
add_descriptor(int(image_id),adapt_array(features))
index.add_with_ids(np.array([features]), np.int64([image_id]))
return {"status":"200"}
@app.post("/delete_phash_features")
async def delete_hist_features_handler(item:Item_image_id):
delete_descriptor_by_id(item.image_id)
index.remove_ids(np.int64([item.image_id]))
return {"status":"200"}
print(__name__)
if __name__ == 'phash_web':
create_table()
sync_db()
init_index()
| [
"44163887+qwertyforce@users.noreply.github.com"
] | 44163887+qwertyforce@users.noreply.github.com |
dee47a4c698525f9918c3cb48749bf0d284021a0 | 520f9caeb5c380c42efb26f6778340e8fa7deba8 | /Novice/02-04/listkerja/list/models.py | de355f6194c5b08910e9803b62c61ef780a2d8c6 | [] | no_license | Tama96/praxis-academy | 1fda57d4ddae1d87deb5bede2e9475e01ab75214 | b4de60ea0246dac6ee5f898454e5942e0ca91ac8 | refs/heads/master | 2022-12-26T13:49:09.925185 | 2020-10-12T01:39:07 | 2020-10-12T01:39:07 | 287,658,864 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.db import models
# Create your models here.
class Tugas (models.Model):
Task = models.TextField()
#Status = models.TextField()
#Action = models.TextField()
| [
"aldypratama96@gmail.com"
] | aldypratama96@gmail.com |
5470cb1f96191a6a4abe8370fa67ee1c31561713 | 3f02c24c865c30113cc622a4fe19cfdeace0dfc5 | /dashboard/api.py | c1fb432c4021c8a0b04284dae9fb8d4b2d42f21a | [] | no_license | ravi1173/django_login | 7385dfac41e55dd849e09728e22bfe71db1bea5a | 36d17b34b5a3b8bf5ac25c585a23d7808ae49894 | refs/heads/main | 2023-08-26T08:42:55.828435 | 2021-11-05T10:40:36 | 2021-11-05T10:40:36 | 424,098,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .serializers import *
class ImagesList(APIView):
def get(self, request):
model = my_images.objects.all()
serializer = ImagesSerializer(model, many=True)
return Response(serializer.data)
def post(self, request):
serializer = ImagesSerializer(data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | [
"noreply@github.com"
] | noreply@github.com |
812c60a9a62f39e42d66aaf3a54af5b8ea26977c | 42ad8af851f911a12ea831d752897ecba597abcc | /wargame/migrations/0006_challange.py | 99dd38ccf69b1e8202e0b750f77fb82ac527b384 | [] | no_license | wojtekzozlak/wargame | 75d2afebbaaa19a8d57f24bfd778f4f1cb39de76 | d61b8c60be70f5fc1c514e8b08e61a2cd5691061 | refs/heads/master | 2020-12-20T12:42:00.472359 | 2016-08-04T20:43:58 | 2016-08-04T20:43:58 | 53,892,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-02 07:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wargame', '0005_shipai_representant'),
]
operations = [
migrations.CreateModel(
name='Challange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_time', models.DateTimeField(auto_now_add=True)),
('accepted', models.BooleanField(default=False)),
('challanged', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='challanged', to=settings.AUTH_USER_MODEL)),
('challanger', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='challangers', to=settings.AUTH_USER_MODEL)),
('match', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='wargame.Match')),
],
),
]
| [
"wzoltak@localhost"
] | wzoltak@localhost |
098652f1ba5f7e35f2cd33828c5f556fb7f691c3 | 866efb0624cf3ceb9903ed34aaab3e7d11c8bd27 | /9y10.py | 56e042c3e52de2a8e3fcef525a71726d6921c01f | [] | no_license | StephyMunoz/ExamenADD | 9dfa4983f7f037478edc763883e062c9edc82b54 | 40db1a0f4a9811756f3ad54052f0dc452c6052f2 | refs/heads/main | 2023-02-12T19:22:19.012280 | 2021-01-19T04:46:46 | 2021-01-19T04:46:46 | 330,797,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | import pandas as pd
from pymongo import MongoClient
import csv
client = pymongo.MongoClient("mongodb+srv://esfot:esfot@cluster0.845nq.mongodb.net/animalsandRecycling?retryWrites=true&w=majority")
try:
client.admin.command('ismaster')
print('MongoDB Atlas connection: Success')
except ConnectionFailure as e:
print('MongoDB Atlas connection: failed', e)
db = client.get_database('elecciones_2021')
collection = db.get_collection('eleccionesgenerales2021')
dfMA = pd.DataFrame(list(collection.find()))
dfMA
dfMA.to_csv('elecciones2021.csv', index=True) | [
"noreply@github.com"
] | noreply@github.com |
3bf9d2b2c05304b6e664170afe451608b5884827 | 56832c284c45fa19ea0de80c38a432271ad6d144 | /app/server.py | 07bc5e35871f29dccf47f1448e1c46b9e6e19bff | [] | no_license | biancarosa/music-stats-api | c42bbf5395e9ace4fade1f6726ba54b893c171b9 | 6e855bab2e07f82c03b13d306f91ad427da1495a | refs/heads/master | 2022-07-11T20:34:04.568634 | 2018-01-09T16:00:15 | 2018-01-09T16:00:15 | 116,841,138 | 0 | 0 | null | 2019-06-09T03:53:59 | 2018-01-09T16:28:48 | Python | UTF-8 | Python | false | false | 828 | py | # encoding: utf-8
"""Inicia aplicaรงรฃo flask."""
from flask import Flask, jsonify
from app.health_check import blueprint as health_check_blueprint
from app import logger
LOG = logger.new(__name__)
def setup():
"""Monta uma API flask e registra seus blueprints."""
LOG.info("Creating API.")
api = Flask(__name__)
LOG.info("Registering blueprints.")
api.register_blueprint(health_check_blueprint.setup())
LOG.info("Registering error handlers.")
api.register_error_handler(Exception, default_error_handler)
LOG.info("Setting up config variables.")
api.config['PROPAGATE_EXCEPTIONS'] = True
return api
def default_error_handler(exception):
LOG.error(exception)
response = jsonify({"success": False, "message": exception.message})
response.status_code =500
return response | [
"me@biancarosa.com.br"
] | me@biancarosa.com.br |
8f2320db05e849c4bc3d43febb2099b487e896cd | f3cd34c154e9d791c9c70ea75ffab43257b4fedc | /Interface.py | 55c08969a3af000664cb5f9c849da86eac2a7231 | [] | no_license | rafatespindola/ProjetoIntegrador2 | b24ed1a86fb2adf8edf2813a4ea402e3a674d0a0 | 2092e86d483178aac53b72454f38cd98cc4c7628 | refs/heads/master | 2020-05-19T15:33:39.853301 | 2019-06-06T21:36:30 | 2019-06-06T21:36:30 | 185,087,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,392 | py | from Communication import *
from Treasure import *
import threading
import time
from os import system, name
#------- atributos do SS --------- #
isRobo = 0
def atualizarmapa(lista):
A = list(lista)
for x in range(0, 8):
for y in range (0, 8):
A = list(lista)
if(y==0):
print(x, end = " ")
elif(x==0):
print (" " + str(y), end = " ")
else:
while(A):
c = A.pop()
if(c.getPosx() == x and c.getPosy() == y):
print(" " + c.id, end = " ")
print(" -", end=" ")
print("\n")
def interface(mode, sendSR):
system('clear')
while(1):
#atualizarmapa(c)
if (mode == "modo,manual"):
print("Robรด Manual - Escolha uma das opรงรตes abaixo:")
entrada = input("W - Mover para frente;\n"
"A - Mover para esquerda;\n"
"S - Mover para trรกs;\n"
"D - Mover para esquerda;\n"
"V - Validar caรงa;\n")
if(entrada):
sendSR.send("c," +entrada)
time.sleep(2)
else:
pass
#atualizarmapa(c)
#teste = Communication('127.0.0.1', 7000, "teste");
#teste.connect();
#------------------------- Dados do SA ------------------------ #
modo = "modo,automatico"
cor = "cor,azul"
local = "cacas,1:1;2:2;1:1;3:3"
posin = "posin,0:0"
### teste lista de caรงas ###
c = list()
#------------------------##
send_toSR = Communication("192.168.43.248", "50009",'toSR')
receive_fromSR = Communication("192.168.43.248", "50008", "fromSR")
send_toSA = Communication("127.0.0.1", "50006",'toSA')
receive_fromSA = Communication("127.0.0.1", "50007", "fromSA")
#receivefromSS = Communication('127.0.0.1', "50009", "fromSS")
interface_t = threading.Thread(target=interface, args=(modo, send_toSR))
#receive_t = threading.Thread(target=recSS.receiveMessage())
#send_t = threading.Thread(target=sendSR.sendMessage)
#atualizarmapa_t = threading.Thread(target=atualizarmapa())
send_toSA.start()
receive_fromSA.start()
send_toSR.start()
receive_fromSR.start()
#ip teles (SR) = 191.36.10.250, porta 7000
#send_toSR.send(modo)
#send_toSR.send(cor)
#send_toSR.send(local)
#------------------------
#interface_t.start()
#print("asd)
#while(1):
# print("Iniciou")
#inicia o jogo enviando configs pro robo
# receiveSR = Communication('127.0.0.1', 7000, 'fromSS')
#receiveSR.receiveMessage()
#sendSR.send(10)
#receiveSR.start()
#print(i)
#inicia robo e menu no manual
#inicia robo e menu no automatico
#mantรฉm menu e lista de caรงas na tela
#inicia thread de atualizaรงรฃo das caรงas (recebe do SA e envia pro SR)
print("Esperando endereรงo MAC do robรด")
while (1):
if(isRobo == 0):
if(receive_fromSR.getConfigList()):
#if (len(receive_fromSR.getConfigList().pop()) == 17):
#receive_fromSR.popConfigList()
if (len(receive_fromSR.popConfigList()) == 17):
print("Endereรงo MAC recebido")
isRobo = 1
send_toSR.send("ack,OK")
time.sleep(2)
send_toSR.send(modo)
send_toSR.send(cor)
send_toSR.send(local)
send_toSR.send(posin)
if(modo == "modo,manual"):
interface_t.start()
else: #apos configurar e startar o robo, verificar listas de recebimento
if (receive_fromSR.getAttlist()): # recebeu alguma atualizacao
pass
if (receive_fromSR.getConfigList()): # recebeu alguma config
pass
if(receive_fromSR.getCommandList()):
msg = receive_fromSR.popCommandList()
if (msg in "vV"):
#resp = input("Existe caca na posicao ")
send_toSR.send("ack, OK")
if(receive_fromSA.getAttlist()):
listatt = receive_fromSA.popAttlist()
send_toSR.send("cacass," + listatt)
#send_toSR.send("ack,OK")
#send_toSR.send(local)
#time.sleep(5)
#break
#pass
#print(send_toSR.getSendList())
#time.sleep(5)
#pass
#print(len(receive_fromSR.getConfigList()))
| [
"rafatespindola@gmail.com"
] | rafatespindola@gmail.com |
8b9709abb675aab988c321e62f856a53d3a011e2 | 37ecd827fc193d9085c491bea4bf51ce971a11fa | /sklearnpractice/linearregression.py | 9d6744ebecbe23ef0ae80713772cc3aff8ecc4b7 | [] | no_license | heblade/learnPython3 | fc01bf024e1f95633c990e398a641d544b6d0b8d | dfd2cd6952f56c346ffe0dbdcf95f4f5eaca517e | refs/heads/master | 2021-09-02T14:44:26.749660 | 2018-01-03T08:31:08 | 2018-01-03T08:31:08 | 105,245,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_predict
from sklearn.datasets import load_boston
from sklearn.metrics import explained_variance_score, mean_squared_error
import numpy as np
import pylab as pl
boston = load_boston()
x = boston.data
y = boston.target
linereg = LinearRegression()
linereg.fit(x, y)
yp = linereg.predict(x)
yp_cv = cross_val_predict(linereg, x, y, cv = 10)
print('linreg.coef_ = ', linereg.coef_)
print('linreg.intercept_ = ', linereg.intercept_) | [
"blade.he@morningstar.com"
] | blade.he@morningstar.com |
ba06bd7292f8e629f4ba451286d9ca78aa821f4b | ea02b744a4c87035fc9819adab761b09255ca8a0 | /2.py | 6a70eaefa6fe1e15dd84984bb761777468d3c0a8 | [] | no_license | tigerthelion/advent-of-code-2019 | 4405d5c7513c8fd984a624fabbd940918a3a23a3 | 016b19894805a71a4c31d799f18bcd5891a93595 | refs/heads/master | 2020-09-28T12:27:56.735899 | 2019-12-09T03:36:25 | 2019-12-09T03:36:25 | 226,778,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | program = [1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,13,1,19,1,6,19,23,2,23,6,27,1,5,27,31,1,10,31,35,2,6,35,39,1,39,13,43,1,43,9,47,2,47,10,51,1,5,51,55,1,55,10,59,2,59,6,63,2,6,63,67,1,5,67,71,2,9,71,75,1,75,6,79,1,6,79,83,2,83,9,87,2,87,13,91,1,10,91,95,1,95,13,99,2,13,99,103,1,103,10,107,2,107,10,111,1,111,9,115,1,115,2,119,1,9,119,0,99,2,0,14,0]
program[1] = 12
program[2] = 2
class IntCode():
def __init__(self, instructions: list) -> None:
self.seek_position = 0
self._instructions: list = instructions
self._register: list = self.load_register()
self.value = None
print(self.register)
while True:
self.value = self.operate()
if self.value:
break
self.seek()
self.load_register()
def read(self, location: int) -> int:
return self._instructions[location]
def write(self, location: int, value: int) -> None:
self._instructions[location] = value
def load_register(self) -> None:
self.register = self._instructions[self.seek_position : self.seek_position + 4]
def seek(self) -> None:
self.seek_position += 4
def operate(self) -> None:
op_code = self.register[0]
ops = {
1: self.one,
2: self.two,
99: self.ninetynine
}
if op_code in list(ops.keys()):
return ops[op_code]()
else:
raise AttributeError(f'Unknown Op Code. {op_code}')
def one(self):
self.write(self.register[3], self.read(self.register[1]) + self.read(self.register[2]))
return None
def two(self):
self.write(self.register[3], self.read(self.register[1]) * self.read(self.register[2]))
return None
def ninetynine(self):
return self._instructions[0]
# prog = IntCode(program)
# print(prog.value)
num_range = [i for i in range(0, 100)]
perms = []
for i in num_range:
for j in num_range:
perms.append((i, j))
num_to_find = 19690720
for perm in perms:
program[1], program[2] = perm
prog = IntCode(program[:])
if prog.value == num_to_find:
print(perm)
print(100 *perm[0] + perm[1])
break | [
"bjt.thompson@gmail.com"
] | bjt.thompson@gmail.com |
87c0bb86ab7e49b7da4231abad6364ea302f122e | b5b1be6063901bd0bc97c3fbc2c26be1d02ce79e | /output/__init__.py | e9b0cb52d8081c4e39506efa55060fb586d86b77 | [] | no_license | dssg/rcra | ff566ff388596a733757e4de27631cbabdc7f15c | fcdd8f95c25902e46c55d85cbc4fe54196163f3d | refs/heads/master | 2021-06-11T13:09:01.686456 | 2017-02-24T05:19:51 | 2017-02-24T05:19:51 | 61,817,642 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from drain.data import FromSQL
facilities = FromSQL("""
select rcra_id, zip_code, dedupe_id as entity_id
from output.facilities
join dedupe.unique_map using (rcra_id)
""", tables=['output.facilities', 'output.handler_names'])
facilities.target=True
| [
"eric@k2co3.net"
] | eric@k2co3.net |
919f7d6944f73974b7fe12fde93dea2c2b2ab328 | 738aee780a09095cf78f43383b74fe31968b3e61 | /fundamentals/fundamentals/function_intermediate_i.py | 266817c272af4ad0063712ca1fffb5c8e96020c8 | [] | no_license | csmatthews1/Python | 6940c710b9bb282044b109c4bf0c903344aedf89 | 0abaa4ff0b57f6ea23e2d10154d94059c1c4372c | refs/heads/main | 2023-08-06T04:15:12.218307 | 2021-10-11T17:10:53 | 2021-10-11T17:10:53 | 401,413,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | x = [ [5,2,3], [10,8,9] ]
students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'}
]
sports_directory = {
'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],
'soccer' : ['Messi', 'Ronaldo', 'Rooney']
}
z = [ {'x': 10, 'y': 20} ]
#update values is dictionaries and lists
x[1][0] = 15
students[0]['last_name'] = 'Bryant'
sports_directory['soccer'][0]='Andres'
z[0]['y'] = 30
#iterate through a list of Dictionaries
students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
]
def iterateDictionary (students):
for student in students:
print(f"first name - {student['first_name']}, last_name - {student['last_name']}")
iterateDictionary(students)
#Get Values from a List of Dictionaries
def iterateDictionary2 (key_name, some_list):
for item in some_list:
print(item[key_name])
iterateDictionary2 ("first_name", students)
#Iterate through a Dictionary with List Values
dojo = {
'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],
'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']
}
def printInfo(some_dict):
keys=some_dict.keys()
for key in keys:
print(str(len(some_dict[key])), key.upper())
for item in some_dict[key]:
print(item)
print()
printInfo(dojo)
| [
"86792473+csmatthews1@users.noreply.github.com"
] | 86792473+csmatthews1@users.noreply.github.com |
d14fb1d43dc3d360f35e8f8cf30463263d7b729e | 6944866ffa5b6899c812cbd06929cd29896f557d | /oauth_dropins/mastodon.py | 567dea787adeec4c951c61cbd9db7e18b92723d2 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | stedn/oauth-dropins | f7b329c69f79297dfc6f8060c10d2b069c5cb0a8 | afe5cc946e2d969a37d83e2a093195c1d5cf27db | refs/heads/master | 2021-05-23T15:56:22.354077 | 2020-03-27T17:40:51 | 2020-03-27T17:40:51 | 253,368,311 | 2 | 0 | Unlicense | 2020-04-06T01:31:32 | 2020-04-06T01:31:32 | null | UTF-8 | Python | false | false | 12,423 | py | """Mastodon OAuth drop-in.
Mastodon is an ActivityPub implementation, but it also has a REST + OAuth 2 API
independent of AP. Uh, ok, sure.
API docs: https://docs.joinmastodon.org/api/
Interestingly: as usual w/OAuth, they require registering apps beforehand...but
since AP and Mastodon are decentralized, there's no single place to register an
app. So they have an API for registering apps, per instance:
https://docs.joinmastodon.org/api/authentication/
Surprising, and unusual, but makes sense.
"""
import logging
from urllib.parse import quote_plus, unquote, urlencode, urljoin, urlparse, urlunparse
from google.cloud import ndb
import requests
from webob import exc
from . import handlers
from .models import BaseAuth
from .webutil import appengine_info, util
from .webutil.util import json_dumps, json_loads
# https://docs.joinmastodon.org/api/permissions/
ALL_SCOPES = (
'read',
'read:accounts',
'read:blocks',
'read:favourites',
'read:filters',
'read:follows',
'read:lists',
'read:mutes',
'read:notifications',
'read:reports',
'read:search',
'read:statuses',
'write',
'write:accounts',
'write:blocks',
'write:favourites',
'write:filters',
'write:follows',
'write:lists',
'write:media',
'write:mutes',
'write:notifications',
'write:reports',
'write:statuses',
'follow',
'push',
)
INSTANCE_API = '/api/v1/instance'
REGISTER_APP_API = '/api/v1/apps'
VERIFY_API = '/api/v1/accounts/verify_credentials'
# URL templates. Can't (easily) use urlencode() because I want to keep
# the %(...)s placeholders as is and fill them in later in code.
AUTH_CODE_API = '&'.join((
'/oauth/authorize?'
'response_type=code',
'client_id=%(client_id)s',
'client_secret=%(client_secret)s',
# https://docs.microsoft.com/en-us/linkedin/shared/integrations/people/profile-api?context=linkedin/consumer/context#permissions
'scope=%(scope)s',
# must be the same in the access token request
'redirect_uri=%(redirect_uri)s',
'state=%(state)s',
))
ACCESS_TOKEN_API = '/oauth/token'
def _encode_state(app, state):
wrapped = json_dumps({
'app_key': app.key.urlsafe().decode(),
'state': quote_plus(state),
})
logging.debug('Encoding wrapper state: %r', wrapped)
return wrapped
def _decode_state(state):
logging.debug('Decoding wrapper state: %r', state)
decoded = json_loads(state)
return decoded['app_key'], unquote(decoded['state'])
class MastodonApp(ndb.Model):
"""A Mastodon API OAuth2 app registered with a specific instance."""
instance = ndb.StringProperty(required=True) # URL, eg https://mastodon.social/
data = ndb.TextProperty(required=True) # JSON; includes client id/secret
instance_info = ndb.TextProperty() # JSON; from /api/v1/instance
app_url = ndb.StringProperty()
app_name = ndb.StringProperty()
created_at = ndb.DateTimeProperty(auto_now_add=True, required=True)
class MastodonAuth(BaseAuth):
"""An authenticated Mastodon user.
Provides methods that return information about this user and make OAuth-signed
requests to the Mastodon REST API. Stores OAuth credentials in the datastore.
See models.BaseAuth for usage details.
Key name is the fully qualified actor address, ie @username@instance.tld.
Implements get() and post() but not urlopen() or api().
"""
app = ndb.KeyProperty()
access_token_str = ndb.TextProperty(required=True)
user_json = ndb.TextProperty()
def site_name(self):
return 'Mastodon'
def user_display_name(self):
"""Returns the user's full ActivityPub address, eg @ryan@mastodon.social."""
return self.key.id()
def instance(self):
"""Returns the instance base URL, eg https://mastodon.social/."""
return self.app.get().instance
def username(self):
"""Returns the user's username, eg ryan."""
return json_loads(self.user_json).get('username')
def user_id(self):
"""Returns the user's id, eg 123."""
return json_loads(self.user_json).get('id')
def access_token(self):
"""Returns the OAuth access token string."""
return self.access_token_str
def get(self, *args, **kwargs):
"""Wraps requests.get() and adds instance base URL and Bearer token header."""
url = urljoin(self.instance(), args[0])
return self._requests_call(util.requests_get, url, *args[1:], **kwargs)
def post(self, *args, **kwargs):
"""Wraps requests.post() and adds the Bearer token header."""
return self._requests_call(util.requests_post, *args, **kwargs)
def _requests_call(self, fn, *args, **kwargs):
headers = kwargs.setdefault('headers', {})
headers['Authorization'] = 'Bearer ' + self.access_token_str
resp = fn(*args, **kwargs)
try:
resp.raise_for_status()
except BaseException as e:
util.interpret_http_exception(e)
raise
return resp
class StartHandler(handlers.StartHandler):
"""Starts Mastodon auth. Requests an auth code and expects a redirect back.
Attributes:
DEFAULT_SCOPE: string, default OAuth scope(s) to request
REDIRECT_PATHS: sequence of string URL paths (on this host) to register as
OAuth callback (aka redirect) URIs in the OAuth app
SCOPE_SEPARATOR: string, used to separate multiple scopes
"""
NAME = 'mastodon'
LABEL = 'Mastodon'
DEFAULT_SCOPE = 'read:accounts'
REDIRECT_PATHS = ()
SCOPE_SEPARATOR = ' '
@classmethod
def to(cls, path, **kwargs):
return super(StartHandler, cls).to(path, **kwargs)
def app_name(self):
"""Returns the user-visible name of this application.
To be overridden by subclasses. Displayed in Mastodon's OAuth prompt.
"""
return 'oauth-dropins demo'
def app_url(self):
"""Returns this application's web site.
To be overridden by subclasses. Displayed in Mastodon's OAuth prompt.
"""
return self.request.host_url
def redirect_url(self, state=None, instance=None):
"""Returns the local URL for Mastodon to redirect back to after OAuth prompt.
Args:
state: string, user-provided value to be returned as a query parameter in
the return redirect
instance: string, Mastodon instance base URL, e.g.
'https://mastodon.social'. May also be provided in the 'instance'
request as a URL query parameter or POST body.
Raises: ValueError if instance isn't a Mastodon instance.
"""
# normalize instance to URL
if not instance:
instance = util.get_required_param(self, 'instance')
instance = instance.strip().split('@')[-1] # handle addresses, eg user@host.com
parsed = urlparse(instance)
if not parsed.scheme:
instance = 'https://' + instance
# fetch instance info from this instance's API (mostly to test that it's
# actually a Mastodon instance)
try:
resp = util.requests_get(urljoin(instance, INSTANCE_API))
except requests.RequestException as e:
logging.info('Error', stack_info=True)
resp = None
is_json = resp and resp.headers.get('Content-Type', '').strip().startswith(
'application/json')
if is_json:
logging.info(resp.text)
if (not resp or not resp.ok or not is_json or
# Pixelfed (https://pixelfed.org/) pretends to be Mastodon but isn't
'Pixelfed' in resp.json().get('version')):
msg = "%s doesn't look like a Mastodon instance." % instance
logging.info(resp)
logging.info(msg)
raise ValueError(msg)
# if we got redirected, update instance URL
parsed = list(urlparse(resp.url))
parsed[2] = '/' # path
instance = urlunparse(parsed)
app_name = self.app_name()
app_url = self.app_url()
query = MastodonApp.query(MastodonApp.instance == instance,
MastodonApp.app_url == app_url)
if appengine_info.DEBUG:
# disambiguate different apps in dev_appserver, since their app_url will
# always be localhost
query = query.filter(MastodonApp.app_name == app_name)
app = query.get()
if not app:
app = self._register_app(instance, app_name, app_url)
app.instance_info = resp.text
app.put()
logging.info('Starting OAuth for Mastodon instance %s', instance)
app_data = json_loads(app.data)
return urljoin(instance, AUTH_CODE_API % {
'client_id': app_data['client_id'],
'client_secret': app_data['client_secret'],
'redirect_uri': quote_plus(self.to_url()),
'state': _encode_state(app, state),
'scope': self.scope,
})
def _register_app(self, instance, app_name, app_url):
"""Register a Mastodon API app on a specific instance.
https://docs.joinmastodon.org/api/rest/apps/
Args:
instance: string
app_name: string
app_url: string
Returns: MastodonApp
"""
logging.info("first time we've seen Mastodon instance %s with app %s %s! "
"registering an API app.", instance, app_name, app_url)
redirect_uris = set(urljoin(self.request.host_url, path)
for path in set(self.REDIRECT_PATHS))
redirect_uris.add(self.to_url())
resp = util.requests_post(
urljoin(instance, REGISTER_APP_API),
data=urlencode({
'client_name': app_name,
# Mastodon uses Doorkeeper for OAuth, which allows registering
# multiple redirect URIs, separated by newlines.
# https://github.com/doorkeeper-gem/doorkeeper/pull/298
# https://docs.joinmastodon.org/api/rest/apps/
'redirect_uris': '\n'.join(redirect_uris),
'website': app_url,
# https://docs.joinmastodon.org/api/permissions/
'scopes': self.SCOPE_SEPARATOR.join(ALL_SCOPES),
}))
resp.raise_for_status()
app_data = json_loads(resp.text)
logging.info('Got %s', app_data)
app = MastodonApp(instance=instance, app_name=app_name,
app_url=app_url, data=json_dumps(app_data))
app.put()
return app
@classmethod
def button_html(cls, *args, **kwargs):
kwargs['form_extra'] = kwargs.get('form_extra', '') + """
<input type="url" name="instance" class="form-control" placeholder="Mastodon instance" scheme="https" required style="width: 150px; height: 50px; display:inline;" />"""
return super(cls, cls).button_html(
*args, input_style='background-color: #EBEBEB; padding: 5px', **kwargs)
class CallbackHandler(handlers.CallbackHandler):
"""The OAuth callback. Fetches an access token and stores it."""
def get(self):
app_key, state = _decode_state(util.get_required_param(self, 'state'))
# handle errors
error = self.request.get('error')
desc = self.request.get('error_description')
if error:
# user_cancelled_login and user_cancelled_authorize are non-standard.
# https://tools.ietf.org/html/rfc6749#section-4.1.2.1
if error in ('user_cancelled_login', 'user_cancelled_authorize', 'access_denied'):
logging.info('User declined: %s', self.request.get('error_description'))
self.finish(None, state=state)
return
else:
msg = 'Error: %s: %s' % (error, desc)
logging.info(msg)
raise exc.HTTPBadRequest(msg)
app = ndb.Key(urlsafe=app_key).get()
assert app
app_data = json_loads(app.data)
# extract auth code and request access token
auth_code = util.get_required_param(self, 'code')
data = {
'grant_type': 'authorization_code',
'code': auth_code,
'client_id': app_data['client_id'],
'client_secret': app_data['client_secret'],
# redirect_uri here must be the same in the oauth code request!
# (the value here doesn't actually matter since it's requested server side.)
'redirect_uri': self.request.path_url,
}
resp = util.requests_post(urljoin(app.instance, ACCESS_TOKEN_API),
data=urlencode(data))
resp.raise_for_status()
resp_json = resp.json()
logging.debug('Access token response: %s', resp_json)
if resp_json.get('error'):
raise exc.HTTPBadRequest(resp_json)
access_token = resp_json['access_token']
user = MastodonAuth(app=app.key, access_token_str=access_token).get(VERIFY_API).json()
logging.debug('User: %s', user)
address = '@%s@%s' % (user['username'], urlparse(app.instance).netloc)
auth = MastodonAuth(id=address, app=app.key, access_token_str=access_token,
user_json=json_dumps(user))
auth.put()
self.finish(auth, state=state)
| [
"git@ryanb.org"
] | git@ryanb.org |
64b3f19b920cf2ef85bbd55f568c52dfebcb448e | 23928301ed2ff498252cf400fb22af77bc237656 | /meiduo_api/apps/contents/admin.py | 2cd59eb409e561dab8e793bfcfbc1ba7e5048281 | [] | no_license | lanxuyang/meiduo_site | 261657ad99d4fb5cecf072361f8fbb3499360908 | 777401235ebbeb99f822cafe47531ce71fe4def9 | refs/heads/master | 2020-09-22T02:30:57.617206 | 2019-03-14T00:45:30 | 2019-03-14T00:45:30 | 225,018,316 | 1 | 0 | null | 2019-11-30T13:42:21 | 2019-11-30T13:42:20 | null | UTF-8 | Python | false | false | 134 | py | from django.contrib import admin
from . import models
admin.site.register(models.ContentCategory)
admin.site.register(models.Content) | [
"jerry.liubing@gmail.com"
] | jerry.liubing@gmail.com |
d3496fd26e6b3bb167a75d8585d01b0454ce14ec | 27dd31929023cea60b481f2e0c05f1394cd2691a | /netchecker.py | 2dd00212e3fe5341cc3861aff1ff2b2dcd9d9ba7 | [] | no_license | adminempire/pentestpi-port | f11c4c777bb69df32f6f9008bc6b1e4c912485df | 1245b0aca94588226da0ed34bdcba2d0bd572964 | refs/heads/master | 2021-01-20T06:06:09.926404 | 2014-03-26T06:33:01 | 2014-03-26T06:33:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | #!/usr/bin/env python2
import subprocess
import re
# Constants
IWCONFIG = "iwconfig"
IFCONFIG = "ifconfig"
ipre = re.compile("addr:[0-9.]+")
class Device():
"""Class that defines a device object."""
ifname = None
ip = None
def __init__(self, ifname):
self.ifname = ifname
if __name__ == "__main__":
"""Main function"""
# Get iwconfig output - no params results in error, but it's what we want
tmp = subprocess.Popen(IWCONFIG, stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[1]
# Split into array
tmp2 = tmp.split("\n")
# Remove empty indexes from devices list
tmp3 = [x for x in tmp2 if x != '']
# Clean indexs of excess whitespace
devices = []
for i in tmp3:
devices.append(i.strip())
# Parse iwconfig output
deviceList = []
for device in devices:
tmp = device.replace(' ', ":", 1)
tmp2 = tmp.split(':')
desc = tmp2[1].strip()
ifname = tmp2[0].strip()
if ifname != "lo":
if "no wireless" in desc:
dev = Device(ifname)
deviceList.append(dev)
# Get ifconfig output and parse
tmp = subprocess.Popen(IFCONFIG, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
tmp2 = re.sub("\n\n", "---", tmp)
ifoutput = tmp2.split("---")
# Parse output for ip and insert in device objects
for device in deviceList:
for i in ifoutput:
if device.ifname in i:
tmp = ipre.search(i)
if tmp is not None:
device.ip = tmp.group().split(":")[1]
# Checks if device has an active connection, the prints output
for device in deviceList:
if device.ip is not None:
print "Wired: True"
print "Interface:", device.ifname
print "IP:", device.ip + "\n"
| [
"jl@adminempire.com"
] | jl@adminempire.com |
7772812ab1b3ee82383dff5bf42fefc38b7d651f | d6474e6ecd24ca291a62371621535cc6b344bc4b | /cs61a/projects/scheme/scheme_reader.py | 6def4ce2c43c88d1cc1421c9042e461c0cba2750 | [] | no_license | williamwang0/Coursework | 1d14121dab1a843f4b82755972c6eb98228bb901 | a8eec4cd3f3175062aad64579dd747afee00826a | refs/heads/master | 2020-07-08T15:41:37.923352 | 2019-08-31T23:31:33 | 2019-08-31T23:31:33 | 203,714,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,448 | py | """This module implements the built-in data types of the Scheme language, along
with a parser for Scheme expressions.
In addition to the types defined in this file, some data types in Scheme are
represented by their corresponding type in Python:
number: int or float
symbol: string
boolean: bool
unspecified: None
The __repr__ method of a Scheme value will return a Python expression that
would be evaluated to the value, where possible.
The __str__ method of a Scheme value will return a Scheme expression that
would be read to the value, where possible.
"""
from __future__ import print_function # Python 2 compatibility
import numbers
from ucb import main, trace, interact
from scheme_tokens import tokenize_lines, DELIMITERS
from buffer import Buffer, InputReader, LineReader
import scheme
# Pairs and Scheme lists
class Pair(object):
"""A pair has two instance attributes: first and second. Second must be a Pair or nil
>>> s = Pair(1, Pair(2, nil))
>>> s
Pair(1, Pair(2, nil))
>>> print(s)
(1 2)
>>> print(s.map(lambda x: x+4))
(5 6)
"""
def __init__(self, first, second):
from scheme_builtins import scheme_valid_cdrp, SchemeError
if not (second is nil or isinstance(second, Pair) or type(x).__name__ == 'Promise'):
raise SchemeError("cdr can only be a pair, nil, or a promise but was {}".format(second))
self.first = first
self.second = second
def __repr__(self):
return 'Pair({0}, {1})'.format(repr(self.first), repr(self.second))
def __str__(self):
s = '(' + repl_str(self.first)
second = self.second
while isinstance(second, Pair):
s += ' ' + repl_str(second.first)
second = second.second
if second is not nil:
s += ' . ' + repl_str(second)
return s + ')'
def __len__(self):
n, second = 1, self.second
while isinstance(second, Pair):
n += 1
second = second.second
if second is not nil:
raise TypeError('length attempted on improper list')
return n
def __eq__(self, p):
if not isinstance(p, Pair):
return False
return self.first == p.first and self.second == p.second
def map(self, fn):
"""Return a Scheme list after mapping Python function FN to SELF."""
mapped = fn(self.first)
if self.second is nil or isinstance(self.second, Pair):
return Pair(mapped, self.second.map(fn))
else:
raise TypeError('ill-formed list (cdr is a promise)')
class nil(object):
"""The empty list"""
def __repr__(self):
return 'nil'
def __str__(self):
return '()'
def __len__(self):
return 0
def map(self, fn):
return self
nil = nil() # Assignment hides the nil class; there is only one instance
# Scheme list parser
# Quotation markers
quotes = {"'": 'quote',
'`': 'quasiquote',
',': 'unquote'}
def scheme_read(src):
"""Read the next expression from SRC, a Buffer of tokens.
>>> scheme_read(Buffer(tokenize_lines(['nil'])))
nil
>>> scheme_read(Buffer(tokenize_lines(['1'])))
1
>>> scheme_read(Buffer(tokenize_lines(['true'])))
True
>>> scheme_read(Buffer(tokenize_lines(['(+ 1 2)'])))
Pair('+', Pair(1, Pair(2, nil)))
"""
if src.current() is None:
raise EOFError
val = src.remove_front() # Get the first token
if val == 'nil':
# BEGIN PROBLEM 2
return nil
# END PROBLEM 2
elif val == '(':
# BEGIN PROBLEM 2
return read_tail(src)
# END PROBLEM 2
elif val in quotes:
# BEGIN PROBLEM 7
return Pair(quotes[val], Pair(scheme_read(src), nil))
# END PROBLEM 7
elif val not in DELIMITERS:
return val
else:
raise SyntaxError('unexpected token: {0}'.format(val))
def read_tail(src):
"""Return the remainder of a list in SRC, starting before an element or ).
>>> read_tail(Buffer(tokenize_lines([')'])))
nil
>>> read_tail(Buffer(tokenize_lines(['2 3)'])))
Pair(2, Pair(3, nil))
"""
try:
if src.current() is None:
raise SyntaxError('unexpected end of file')
elif src.current() == ')':
# BEGIN PROBLEM 2
src.remove_front()
return nil
# END PROBLEM 2
else:
# BEGIN PROBLEM 2
firstOfPair = scheme_read(src)
secondOfPair = read_tail(src)
return Pair(firstOfPair, secondOfPair)
# END PROBLEM 2
except EOFError:
raise SyntaxError('unexpected end of file')
# Convenience methods
def buffer_input(prompt='scm> '):
"""Return a Buffer instance containing interactive input."""
return Buffer(tokenize_lines(InputReader(prompt)))
def buffer_lines(lines, prompt='scm> ', show_prompt=False):
"""Return a Buffer instance iterating through LINES."""
if show_prompt:
input_lines = lines
else:
input_lines = LineReader(lines, prompt)
return Buffer(tokenize_lines(input_lines))
def read_line(line):
"""Read a single string LINE as a Scheme expression."""
return scheme_read(Buffer(tokenize_lines([line])))
def repl_str(val):
"""Should largely match str(val), except for booleans and undefined."""
if val is True:
return "#t"
if val is False:
return "#f"
if val is None:
return "undefined"
if isinstance(val, numbers.Number) and not isinstance(val, numbers.Integral):
return repr(val) # Python 2 compatibility
return str(val)
# Interactive loop
def read_print_loop():
"""Run a read-print loop for Scheme expressions."""
while True:
try:
src = buffer_input('read> ')
while src.more_on_line:
expression = scheme_read(src)
if expression == 'exit':
print()
return
print('str :', expression)
print('repr:', repr(expression))
except (SyntaxError, ValueError) as err:
print(type(err).__name__ + ':', err)
except (KeyboardInterrupt, EOFError): # <Control>-D, etc.
print()
return
@main
def main(*args):
if len(args) and '--repl' in args:
read_print_loop() | [
"wwang0430@berkeley.edu"
] | wwang0430@berkeley.edu |
5820da32b2e5e20654de32623f69f4536d704f73 | 8799e7fb96abb8922d40c8534553887b846870d0 | /client/game/MouseGame.py | f9eacca90bc62d4776bcea50cd64dbc515b0f758 | [] | no_license | awakening95/Catch-Catch | 7d0e5c0b329890954715a633f3547f021b60cd94 | 87bc4403bfa92b559dfbe7bc8d773b8a318cad29 | refs/heads/master | 2020-09-13T12:19:17.822630 | 2019-12-09T04:50:49 | 2019-12-09T04:50:49 | 222,775,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,381 | py | #!/usr/bin/evn python
import pygame
import asyncio
import random
import math
import pyautogui
import socket
import os
import glob
live_list = [True, False, False, False, False, False]
control_live = False
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
pad_width = 1280 # 1024, 512 640, 360
pad_height = 720
mouse_width = 49 # 64, 64
mouse_height = 114
pang_width = 220
pang_height = 220
dirpath = os.path.dirname(os.path.realpath(__file__))
gamepad = pygame.display.set_mode((pad_width, pad_height), pygame.FULLSCREEN) #, pygame.FULLSCREEN
socket_switch = True
control_angle = 0
control_speed = 5
X0 = 0
Y0 = 0
X1 = 0
Y1 = 0
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# ํต์ ------------------------------
if socket_switch:
socket.connect(("127.0.0.1", 9505))
# ์ฅ๊ฐ ๋ณด๊ณ ์๋ ๋ฐฉํฅ์ ๊ณ ์์ด๊ฐ ์๋์ง ํ๋ณํ๋ ํจ์
def isCatDirection(mx, my, lx, ly, x1, y1, x2, y2):
global cat_x, cat_y
#print("์ ํด๋ฐ์ ๊ณ ์์ด ์ขํ", x1, y1, x2, y2)
# ์ธ๋ก ์ง์
if lx - mx == 0:
if x1 <= lx and lx <= x2:
if ((my < ly) and (my > ((y1 + y2) / 2))) or ((my > ly) and (my < ((y1 + y2) / 2))): # ๊ณ ์์ด๊ฐ ์ฅ ๋ทํต์์ ์์ ๋
return False
return True
# ๊ฐ๋ก ์ง์
elif ly - my == 0:
if y1 <= ly and ly <= y2:
if ((mx < lx) and (mx > ((x1 + x2) / 2))) or ((mx > lx) and (mx < ((x1 + x2) / 2))): # ๊ณ ์์ด๊ฐ ์ฅ ๋ทํต์์ ์์ ๋
return False
return True
else:
if (x2 - x1) == 0: # ๊ธฐ์ธ๊ธฐ ๋ถ๋ชจ 0์ผ ๋
pass
else: # ๋๋จธ์ง
a = (ly - my) / (lx - mx) # ๊ธฐ์ธ๊ธฐ
d = my - (a * mx) # y์ ํธ
a2 = (y2 - y1) / (x2 - x1) # ์์ ๋๊ฐ์ ๊ธฐ์ธ๊ธฐ
if (a < a2) and (a > (-a2)): # ๊ฐ๋ก ๋ฐฉํฅ ์ฒดํฌ
if ((y1 <= (a * x1 + d)) and ((a * x1 + d) <= y2)) or ((y1 <= (a * x2 + d)) and ((a * x2 + d) <= y2)):
if ((mx < lx) and (mx > ((x1 + x2) / 2))) or (
(mx > lx) and (mx < ((x1 + x2) / 2))): # ๊ณ ์์ด๊ฐ ์ฅ ๋ทํต์์ ์์ ๋
return False
return True
else: # ์ธ๋ก ๋ฐฉํฅ ์ฒดํฌ
if ((x1 <= ((y1 - d) / a)) and (((y1 - d) / a) <= x2)) or (
(x1 <= ((y2 - d) / a)) and (((y2 - d) / a) <= x2)):
if ((my < ly) and (my > ((y1 + y2) / 2))) or (
(my > ly) and (my < ((y1 + y2) / 2))): # ๊ณ ์์ด๊ฐ ์ฅ ๋ทํต์์ ์์ ๋
return False
return True
# ์์ ํ ๋ฐฉํฅ
return False
def direction_decision(my_direction, goal_direction):
if my_direction <= 180: # ์ฅ๊ฐ 180 ์ดํ์ธ๊ฐ
if goal_direction <= my_direction: # 90(๋ชฉํ ๊ฐ๋. 90์ ์์ ๊ฐ)๋ณด๋ค ์์๊ฐ
return -1 # ์ฐํ์
else: # 90๋ณด๋ค ์์ง ์๋ค
if goal_direction <= (my_direction + 180): # 90+180๋ณด๋ค ์์๊ฐ
return 1 # ์ขํ์
else:
return -1 # ์ฐํ์
else: # ์ฅ๊ฐ 180 ์ดํ๊ฐ ์๋๋ค. 180๋ณด๋ค ํฌ๋ค.
if goal_direction > my_direction: # 270(์์ ๊ฐ)๋ณด๋ค ํฐ๊ฐ
return 1 # ์ขํ์
else: # 270๋ณด๋ค ์๋ค
if goal_direction > (my_direction - 180): # 270-180๋ณด๋ค ํฐ๊ฐ
return -1 # ์ฐํ์
else:
return 1 # ์ขํ์
# ํ๋ฉด์ ๊ฐ์ฒด ๊ทธ๋ฆฌ๊ธฐ
def instantiate(object, x, y):
gamepad.blit(object, (x, y))
# ๋๋ค ์ขํ ์์ฑ ํจ์
def random_destination(x, y):
x = random.randint(0, x)
y = random.randint(0, y)
return [x, y]
# ์ฅ ํด๋์ค
class Mouse():
global X0, X1, Y0, Y1, live_list
default_mspeed = 10
def __init__(self, live, idx):
# ์ฅ ๊ฐ์์ ์๋
self.live = live
self.idx = idx
live_list[idx] = live
self.mspeed = self.default_mspeed
# ์ฅ ์์น ๋๋ค์ผ๋ก ์ ํจ
self.x, self.y = random_destination(pad_width - mouse_width, pad_height - mouse_height)
self.is_rotating = False
self.angle_sum = 0
self.mouse_angle = 0
self.absolute_angle_sum = 0
# rotation_direction = 1 # ์ฅ ํ์ ๋ฐฉํฅ (1 : ์ฐํ์ , -1: ์ขํ์ )
self.rotation_direction = 1
self.rotation_mouse = None # 0 -> None
# ์ฅ์ ์ด๋ ๋ฐฉํฅ
self.leader_x, self.leader_y = random_destination(pad_width - mouse_width, pad_height - mouse_height)
# ์ฃฝ์ ์์น
self.pang_point = None
# ํฐ์ง๋ ํจ๊ณผ ์ ์ง ํ๋ ์
self.panglist = ratpanglist
self.pang_max = len(ratpang)
self.default_pang_speed = 2
self.pang_speed = self.default_pang_speed
self.pang_ani_pos = None
self.ani_pos = 0 # ํ์ฌ ๋์ฐ๊ณ ์๋ ์ ๋๋ฉ์ด์
์ด๋ฏธ์ง ๋ฒํธ
self.ani_max = len(ratani) - 1
self.default_ani_speed = 1
self.ani_speed = self.default_ani_speed
self.imagelist = ratimagelist
self.deading = False
self._pos = None
def update(self):
# ์ฅ ์ด๋
if self.live:
a = self.leader_x - self.x # ์ด๋ ๊ฑฐ๋ฆฌ
b = self.leader_y - self.y
c = math.pow(math.pow(a, 2) + math.pow(b, 2), -0.5)
dx = a * c * self.mspeed
dy = b * c * self.mspeed
self.x += dx # ์ด๋
self.y += dy
# ์ฅ ํ์
if self.is_rotating == True: # ๋๊ณ ์๋ ๋งค ์๊ฐ๋ค
if self.absolute_angle_sum >= self.mouse_angle - 10 and self.absolute_angle_sum <= self.mouse_angle + 10: # ๋ชฉํ ๋ฐฉํฅ ๋์ฐฉ
self.is_rotating = False
else:
self.angle_sum = self.angle_sum + 8 * self.rotation_direction # ํ์ ์๋(8)
elif self.is_rotating == False: # ํ๋ฒ๋ง ๋ฐ์ง ํ๋๊ฑฐ
self.is_rotating = True
self.mouse_angle = math.degrees(
math.atan2(a, b) + math.pi) # (self.leader_x - self.x, self.leader_y - self.y) -> (a, b)
self.rotation_direction = direction_decision(self.absolute_angle_sum, self.mouse_angle)
self.absolute_angle_sum = abs(self.angle_sum % 360)
# ์ ๋๋ฉ์ด์
์ด๋ฏธ์ง ํ์
self.image = self.imagelist[self.ani_pos]
self.ani_speed -= 1
if self.ani_speed <= 0:
self.ani_speed = self.default_ani_speed
if self.ani_pos == self.ani_max:
self.ani_pos = 0
else:
self.ani_pos += 1
self.rotation_mouse = pygame.transform.rotate(self.image, self.angle_sum)
self._pos = self.rotation_mouse.get_rect()
self._pos.center = (self.x, self.y)
if not self.deading:
if self.x >= X0 and self.x <= X1 and self.y >= Y0 and self.y <= Y1: # ์ฅ๊ฐ ์กํ๋ฉด
self.deading = True
# ์๋ฆฌ
meat_punch_sound.play()
# ์กํ ์์น ์ถ๊ฐ, ํ๋ ์ ์นด์ดํธ ์์
self.pang_point = [self.x, self.y]
self.pang_ani_pos = 0
self.mspeed = 0
else:
gamepad.blit(self.rotation_mouse, self._pos)
if self.pang_ani_pos is not None:
# ---ํก
self.pang_speed -= 1
self.pang = self.panglist[self.pang_ani_pos]
self.pang_pos = self.pang.get_rect()
self.pang_pos.center = (self.pang_point[0], self.pang_point[1])
if self.pang_speed <= 0:
self.pang_speed = self.default_pang_speed
self.pang_ani_pos += 1
if self.pang_ani_pos >= self.pang_max:
self.live = False
live_list[self.idx] = False
gamepad.blit(self.pang, self.pang_pos)
if self.leader_x - (self.mspeed / 2) <= self.x and self.leader_x + (self.mspeed / 2) >= \
self.x and self.leader_y - (self.mspeed / 2) <= self.y and self.leader_y + (
self.mspeed / 2) >= self.y:
self.leader_x, self.leader_y = random_destination(pad_width, pad_height)
if isCatDirection(self.x, self.y, self.leader_x, self.leader_y, X0, Y0, X1, Y1):
self.leader_x, self.leader_y = random_destination(pad_width, pad_height)
# ์ปจํธ๋กค ์ฅ ํด๋์ค
class ControlMouse():
default_mspeed = 10
live = False
def __init__(self, live):
global control_live
# ์ฅ ๊ฐ์์ ์๋
self.live = live
self.mspeed = self.default_mspeed
control_live = live
# ์ฅ ์์น ๋๋ค์ผ๋ก ์ ํจ
self.x, self.y = random_destination(pad_width - mouse_width, pad_height - mouse_height)
self.is_rotating = False
self.angle_sum = 0
self.mouse_angle = 0
self.absolute_angle_sum = 0
# rotation_direction = 1 # ์ฅ ํ์ ๋ฐฉํฅ (1 : ์ฐํ์ , -1: ์ขํ์ )
self.rotation_direction = 1
self.rotation_mouse = None # 0 -> None
# ์ฅ์ ์ด๋ ๋ฐฉํฅ
self.leader_x, self.leader_y = self.x+100, self.y+100
# ์ฃฝ์ ์์น
self.pang_point = None
# ํฐ์ง๋ ํจ๊ณผ ์ ์ง ํ๋ ์
self.panglist = ratpanglist
self.pang_max = len(ratpang)
self.default_pang_speed = 2
self.pang_speed = self.default_pang_speed
self.pang_ani_pos = None
self.ani_pos = 0 # ํ์ฌ ๋์ฐ๊ณ ์๋ ์ ๋๋ฉ์ด์
์ด๋ฏธ์ง ๋ฒํธ
self.ani_max = len(ratani) - 1
self.default_ani_speed = 1
self.ani_speed = self.default_ani_speed
self.imagelist = cratimagelist
self.deading = False
self._pos = None
def update(self):
global control_speed, control_live
# ์ฅ ์ด๋
if self.live and control_use:
a = self.leader_x - self.x # ์ด๋ ๊ฑฐ๋ฆฌ
b = self.leader_y - self.y
c = math.pow(math.pow(a, 2) + math.pow(b, 2), -0.5)
dx = a * c * control_speed
dy = b * c * control_speed
self.x += dx # ์ด๋
self.y += dy
# ์ฅ ์ด๋ ํ๊ณ
if self.x > pad_width+200:
self.x = pad_width+100
elif self.x < -pad_width-100:
self.x = -pad_width
if self.y > pad_height+200:
self.y = pad_height+100
elif self.y < -pad_height-100:
self.y = -pad_height
# ์ฅ ํ์
if self.is_rotating == True: # ๋๊ณ ์๋ ๋งค ์๊ฐ๋ค
if self.absolute_angle_sum >= self.mouse_angle - 10 and self.absolute_angle_sum <= self.mouse_angle + 10: # ๋ชฉํ ๋ฐฉํฅ ๋์ฐฉ
self.is_rotating = False
else:
self.angle_sum = self.angle_sum + 8 * self.rotation_direction # ํ์ ์๋(8)
elif self.is_rotating == False: # ํ๋ฒ๋ง ๋ฐ์ง ํ๋๊ฑฐ
self.is_rotating = True
self.mouse_angle = math.degrees(
math.atan2(a, b) + math.pi) # (self.leader_x - self.x, self.leader_y - self.y) -> (a, b)
self.rotation_direction = direction_decision(self.absolute_angle_sum, self.mouse_angle)
self.absolute_angle_sum = abs(self.angle_sum % 360)
# ์ ๋๋ฉ์ด์
์ด๋ฏธ์ง ํ์
self.image = self.imagelist[control_booster][self.ani_pos]
self.ani_speed -= 1
if self.ani_speed <= 0:
self.ani_speed = self.default_ani_speed
if self.ani_pos == self.ani_max:
self.ani_pos = 0
else:
self.ani_pos += 1
self.rotation_mouse = pygame.transform.rotate(self.image, self.angle_sum)
self._pos = self.rotation_mouse.get_rect()
self._pos.center = (self.x, self.y)
if not self.deading:
if self.x >= X0 and self.x <= X1 and self.y >= Y0 and self.y <= Y1: # ์ฅ๊ฐ ์กํ๋ฉด
self.deading = True
# ์๋ฆฌ
meat_punch_sound.play()
# ์กํ ์์น ์ถ๊ฐ, ํ๋ ์ ์นด์ดํธ ์์
self.pang_point = [self.x, self.y]
self.pang_ani_pos = 0
control_speed = 0
else:
gamepad.blit(self.rotation_mouse, self._pos)
if self.pang_ani_pos is not None:
# ---ํก
self.pang_speed -= 1
self.pang = self.panglist[self.pang_ani_pos]
self.pang_pos = self.pang.get_rect()
self.pang_pos.center = (self.pang_point[0], self.pang_point[1])
if self.pang_speed <= 0:
self.pang_speed = self.default_pang_speed
self.pang_ani_pos += 1
if self.pang_ani_pos >= self.pang_max:
self.live = False
control_live = False
gamepad.blit(self.pang, self.pang_pos)
# ๊ฐ๋ ๊ณ์ฐ ์์
g = math.tan((-math.pi / 180) * control_angle)
b = self.y - g * self.x
nx = self.x + 100
ny = g * nx + b
a = nx - self.x
b = ny - self.y
# leader ์์น
movex = 100 * a * math.pow(math.pow(a, 2) + math.pow(b, 2), -0.5)
movey = 100 * b * math.pow(math.pow(a, 2) + math.pow(b, 2), -0.5)
if control_angle > 90 and control_angle <= 270:
if control_speed != 0:
self.leader_x, self.leader_y = self.x - movex, self.y - movey
else:
if control_speed != 0:
self.leader_x, self.leader_y = self.x + movex, self.y + movey
class StageManager():
def __init__(self, live_list):
level = 1
#self.clear = False
self.length = len(live_list)
self.maxControlCount = 150
self.controlRespqwnCount = 0
def update(self, live_list, mouse_list):
if (respawn) or (True not in live_list):
#level += 1
for i in range(self.length):
if i < level:
mouse_list[i].__init__(True, i)
else:
mouse_list[i].__init__(False, i)
if (control_use == True) and (not control_live):
self.controlRespqwnCount += 1
if self.controlRespqwnCount >= self.maxControlCount:
self.controlRespqwnCount = 0
controlMouse.__init__(True)
# while๋ฌธ ๋ฐ๋ณต
async def run_Game():
global cat_x, cat_y, live_list, control_live
global X0, Y0, X1, Y1
global meat_punch_sound
global control_angle, control_speed, control_booster, control_use, controlMouse
global level, respawn
pyautogui.moveTo(600, 1000) # ๋ง์ฐ์ค ํฌ์ธํฐ ์ฐ์ธก ํ๋จ์ผ๋ก ์น์ฐ๊ธฐ
cat_x = -500
cat_y = -500
meat_punch_sound = pygame.mixer.Sound(dirpath + '/sound/Meat_Punch01.wav')
mouse_list = [Mouse(True, 0), Mouse(False, 1), Mouse(False, 2), Mouse(False, 3), Mouse(False, 4), Mouse(False, 5)]
controlMouse = ControlMouse(True);
crashed = False
level = 1
prevmax = 1
stageManager = StageManager(live_list)
while not crashed:
for event in pygame.event.get():
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
crashed = True
# ๋งค ํ๋ ์๋ง๋ค ์ด์ ํ๋ ์์ ์์ ์ ๊ฑฐ
instantiate(background, 0, 0)
send_data1 = "receive_cat_info"
send_data2 = "receive_joystick_info"
# ํต์ ----------------------------------------------------------
if socket_switch:
socket.send(send_data1.encode())
recv_data = socket.recv(32).decode("UTF-8")
data = recv_data.split(" ")
X0, Y0, X1, Y1 = int(data[0]), int(data[1]), int(data[2]), int(data[3])
socket.send(send_data2.encode())
recv_data = socket.recv(32).decode("UTF-8")
data = recv_data.split(" ")
control_angle, control_speed, control_booster, control_use, max = int(data[0]), int(data[1])//7, int(data[2]), data[3], int(data[4])
if max != prevmax:
respawn = True
else:
respawn = False
prevmax = max
level = max
if control_booster == 1:
control_speed *= 1.5
if control_use == 'auto':
control_use = False
else:
control_use = True
else:
X0, Y0, X1, Y1 = cat_x, cat_y, cat_x + 50, cat_y + 50
control_booster = 0
control_use = True
respawn = False
for x in mouse_list:
x.update()
controlMouse.update()
stageManager.update(live_list, mouse_list)
# ๋ง์ง๋ง
pygame.display.update()
clock.tick(120) # ํ๋ ์ ์
pygame.quit()
quit()
def init_Game():
global gamepad, clock, grass
global mouse, cat, ratpang
global ratani, ratimagelist, cratani, cratimagelist
global ratpang, ratpanglist
global background
pygame.init()
pygame.display.set_caption('Mouse Game')
clock = pygame.time.Clock()
# ํญ๋ฐ
ratpang = glob.glob(dirpath + '/images/Explosion/explosion_*.png')
ratpang.sort()
ratpanglist = []
for i in range(len(ratpang)):
ratpanglist.append(pygame.transform.scale(pygame.image.load(ratpang[i]), (pang_width, pang_height)))
# ์ฅ
ratani = glob.glob(dirpath + '/images/rat/Armature_move_*.png')
cratani = glob.glob(dirpath + '/images/rat/ControlMouse_*.png')
boosterani = glob.glob(dirpath + '/images/rat/ControlBooster_*.png')
ratani.sort()
cratani.sort()
boosterani.sort()
ratimagelist = []
firstimagelist = []
secondimagelist = []
cratimagelist = []
for i in range(len(ratani)):
ratimagelist.append(pygame.transform.scale(pygame.image.load(ratani[i]), (mouse_width, mouse_height)))
firstimagelist.append(pygame.transform.scale(pygame.image.load(cratani[i]), (mouse_width, mouse_height)))
secondimagelist.append(pygame.transform.scale(pygame.image.load(boosterani[i]), (mouse_width, mouse_height)))
cratimagelist.append(firstimagelist)
cratimagelist.append(secondimagelist)
# ๊ณ ์์ด
cat = pygame.image.load(dirpath +'/images/cat.png')
cat = pygame.transform.scale(cat, (128, 128))
# ๋ฐฐ๊ฒฝ
background = pygame.image.load(dirpath + '/images/background/ForestFloorGreen.png')
background = pygame.transform.scale(background, (pad_width, pad_height))
# ์์
loop = asyncio.get_event_loop()
loop.run_until_complete(run_Game())
loop.close()
init_Game()
| [
"awakening95@naver.com"
] | awakening95@naver.com |
33a35015fa000b26ac58e46c1a4864e421259f6c | 4b0487c0852c24de9f3ab90e52109d425b709b9a | /colorization/util.py | d08223a4ee745925a59ed5eb312714a820365bf0 | [] | no_license | RuolinQu/CS520-Intro-to-AI-project | 327d923c9b997bf169ae3188a3dbbd3b6bb2d6df | 446c4e2db113f26cf252c7da92e6b8c78e9c0f2e | refs/heads/master | 2021-08-11T03:39:00.940314 | 2021-01-05T22:14:10 | 2021-01-05T22:14:10 | 238,278,285 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | import numpy as np
import cv2
def mean_padding(image,n):
mean=np.mean(image)
h,w=np.shape(image)
image_pad=np.zeros((h+2*n,w+2*n))
image_pad.fill(mean)
image_pad[n:n+h,n:n+w]=image
return image_pad
def initializeFilter(size, scale = 1.0):
stddev = scale/np.sqrt(np.prod(size))
return np.random.normal(loc = 0, scale = stddev, size = size)
def initializeWeight(size):
return np.random.standard_normal(size=size) * 0.01
def gradient_clip(w,threshold=1):
if np.linalg.norm(w)>1:
w_clipped=threshold/np.linalg.norm(w)*w
return w_clipped
else:
return w | [
"quruolin95@gamil.com"
] | quruolin95@gamil.com |
60398c62b4d0d98472e28c1228240e17ae865c49 | 63ac1ebd4c82d5128caf22800e7df6a582683a0b | /website_calendar/controllers/main.py | b52f1ef34bd70dd847ced6c82d5af09eb3aa2463 | [] | no_license | Darknroses/pedinnails | 5cce55c66330ec2e3f30d4ea0f883e85df68e56e | 56584712ee11758a5473b959ad075dcd5fc04636 | refs/heads/main | 2023-08-23T02:27:11.147977 | 2021-10-08T03:26:00 | 2021-10-08T03:26:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,445 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytz
from babel.dates import format_datetime, format_date
from werkzeug.urls import url_encode
from odoo import http, _, fields
from odoo.http import request
from odoo.tools import html2plaintext, DEFAULT_SERVER_DATETIME_FORMAT as dtf
from odoo.tools.misc import get_lang
class WebsiteCalendar(http.Controller):
@http.route([
'/calendar',
'/calendar/<model("calendar.appointment.type"):appointment_type>',
], type='http', auth="public", website=True, sitemap=True)
def calendar_appointment_choice(self, appointment_type=None, employee_id=None, message=None, **kwargs):
if not appointment_type:
country_code = request.session.geoip and request.session.geoip.get('country_code')
if country_code:
suggested_appointment_types = request.env['calendar.appointment.type'].search([
'|', ('country_ids', '=', False),
('country_ids.code', 'in', [country_code])])
else:
suggested_appointment_types = request.env['calendar.appointment.type'].search([])
if not suggested_appointment_types:
return request.render("website_calendar.setup", {})
appointment_type = suggested_appointment_types[0]
else:
suggested_appointment_types = appointment_type
suggested_employees = []
if employee_id and int(employee_id) in appointment_type.sudo().employee_ids.ids:
suggested_employees = request.env['hr.employee'].sudo().browse(int(employee_id)).name_get()
elif appointment_type.assignation_method == 'chosen':
suggested_employees = appointment_type.sudo().employee_ids.name_get()
return request.render("website_calendar.index", {
'appointment_type': appointment_type,
'suggested_appointment_types': suggested_appointment_types,
'message': message,
'selected_employee_id': employee_id and int(employee_id),
'suggested_employees': suggested_employees,
})
@http.route(['/calendar/get_appointment_info'], type='json', auth="public", methods=['POST'], website=True)
def get_appointment_info(self, appointment_id, prev_emp=False, **kwargs):
Appt = request.env['calendar.appointment.type'].browse(int(appointment_id)).sudo()
result = {
'message_intro': Appt.message_intro,
'assignation_method': Appt.assignation_method,
}
if result['assignation_method'] == 'chosen':
selection_template = request.env.ref('website_calendar.employee_select')
result['employee_selection_html'] = selection_template._render({
'appointment_type': Appt,
'suggested_employees': Appt.employee_ids.name_get(),
'selected_employee_id': prev_emp and int(prev_emp),
})
return result
@http.route(['/calendar/<model("calendar.appointment.type"):appointment_type>/appointment'], type='http', auth="public", website=True, sitemap=True)
def calendar_appointment(self, appointment_type=None, employee_id=None, timezone=None, failed=False, **kwargs):
request.session['timezone'] = timezone or appointment_type.appointment_tz
Employee = request.env['hr.employee'].sudo().browse(int(employee_id)) if employee_id else None
Slots = appointment_type.sudo()._get_appointment_slots(request.session['timezone'], Employee)
return request.render("website_calendar.appointment", {
'appointment_type': appointment_type,
'timezone': request.session['timezone'],
'failed': failed,
'slots': Slots,
})
@http.route(['/calendar/<model("calendar.appointment.type"):appointment_type>/info'], type='http', auth="public", website=True, sitemap=True)
def calendar_appointment_form(self, appointment_type, employee_id, date_time, **kwargs):
partner_data = {}
if request.env.user.partner_id != request.env.ref('base.public_partner'):
partner_data = request.env.user.partner_id.read(fields=['name', 'mobile', 'country_id', 'email'])[0]
day_name = format_datetime(datetime.strptime(date_time, dtf), 'EEE', locale=get_lang(request.env).code)
date_formated = format_datetime(datetime.strptime(date_time, dtf), locale=get_lang(request.env).code)
return request.render("website_calendar.appointment_form", {
'partner_data': partner_data,
'appointment_type': appointment_type,
'datetime': date_time,
'datetime_locale': day_name + ' ' + date_formated,
'datetime_str': date_time,
'employee_id': employee_id,
'countries': request.env['res.country'].search([]),
})
@http.route(['/calendar/<model("calendar.appointment.type"):appointment_type>/submit'], type='http', auth="public", website=True, method=["POST"])
def calendar_appointment_submit(self, appointment_type, datetime_str, employee_id, name, phone, email, country_id=False, **kwargs):
timezone = request.session['timezone']
tz_session = pytz.timezone(timezone)
date_start = tz_session.localize(fields.Datetime.from_string(datetime_str)).astimezone(pytz.utc)
date_end = date_start + relativedelta(hours=appointment_type.appointment_duration)
# check availability of the employee again (in case someone else booked while the client was entering the form)
Employee = request.env['hr.employee'].sudo().browse(int(employee_id))
if Employee.user_id and Employee.user_id.partner_id:
if not Employee.user_id.partner_id.calendar_verify_availability(date_start, date_end):
return request.redirect('/calendar/%s/appointment?failed=employee' % appointment_type.id)
country_id = int(country_id) if country_id else None
country_name = country_id and request.env['res.country'].browse(country_id).name or ''
Partner = request.env['res.partner'].sudo().search([('email', '=like', email)], limit=1)
if Partner:
if not Partner.calendar_verify_availability(date_start, date_end):
return request.redirect('/calendar/%s/appointment?failed=partner' % appointment_type.id)
if not Partner.mobile or len(Partner.mobile) <= 5 and len(phone) > 5:
Partner.write({'mobile': phone})
if not Partner.country_id:
Partner.country_id = country_id
else:
Partner = Partner.create({
'name': name,
'country_id': country_id,
'mobile': phone,
'email': email,
})
description = (_('Country: %s', country_name) + '\n' +
_('Mobile: %s', phone) + '\n' +
_('Email: %s', email) + '\n')
for question in appointment_type.question_ids:
key = 'question_' + str(question.id)
if question.question_type == 'checkbox':
answers = question.answer_ids.filtered(lambda x: (key + '_answer_' + str(x.id)) in kwargs)
description += question.name + ': ' + ', '.join(answers.mapped('name')) + '\n'
elif kwargs.get(key):
if question.question_type == 'text':
description += '\n* ' + question.name + ' *\n' + kwargs.get(key, False) + '\n\n'
else:
description += question.name + ': ' + kwargs.get(key) + '\n'
categ_id = request.env.ref('website_calendar.calendar_event_type_data_online_appointment')
alarm_ids = appointment_type.reminder_ids and [(6, 0, appointment_type.reminder_ids.ids)] or []
partner_ids = list(set([Employee.user_id.partner_id.id] + [Partner.id]))
# FIXME AWA/TDE double check this and/or write some tests to ensure behavior
# The 'mail_notify_author' is only placed here and not in 'calendar.attendee#_send_mail_to_attendees'
# Because we only want to notify the author in the context of Online Appointments
# When creating a meeting from your own calendar in the backend, there is no need to notify yourself
event = request.env['calendar.event'].with_context(mail_notify_author=True).sudo().create({
'name': _('%s with %s', appointment_type.name, name),
'start': date_start.strftime(dtf),
# FIXME master
# we override here start_date(time) value because they are not properly
# recomputed due to ugly overrides in event.calendar (reccurrencies suck!)
# (fixing them in stable is a pita as it requires a good rewrite of the
# calendar engine)
'start_date': date_start.strftime(dtf),
'stop': date_end.strftime(dtf),
'allday': False,
'duration': appointment_type.appointment_duration,
'description': description,
'alarm_ids': alarm_ids,
'location': appointment_type.location,
'partner_ids': [(4, pid, False) for pid in partner_ids],
'categ_ids': [(4, categ_id.id, False)],
'appointment_type_id': appointment_type.id,
'user_id': Employee.user_id.id,
})
event.attendee_ids.write({'state': 'accepted'})
return request.redirect('/calendar/view/' + event.access_token + '?message=new')
@http.route(['/calendar/view/<string:access_token>'], type='http', auth="public", website=True)
def calendar_appointment_view(self, access_token, edit=False, message=False, **kwargs):
event = request.env['calendar.event'].sudo().search([('access_token', '=', access_token)], limit=1)
if not event:
return request.not_found()
timezone = request.session.get('timezone')
if not timezone:
timezone = request.env.context.get('tz') or event.appointment_type_id.appointment_tz or event.partner_ids and event.partner_ids[0].tz or event.user_id.tz or 'UTC'
request.session['timezone'] = timezone
tz_session = pytz.timezone(timezone)
date_start_suffix = ""
format_func = format_datetime
if not event.allday:
url_date_start = fields.Datetime.from_string(event.start).strftime('%Y%m%dT%H%M%SZ')
url_date_stop = fields.Datetime.from_string(event.stop).strftime('%Y%m%dT%H%M%SZ')
date_start = fields.Datetime.from_string(event.start).replace(tzinfo=pytz.utc).astimezone(tz_session)
else:
url_date_start = url_date_stop = fields.Date.from_string(event.start_date).strftime('%Y%m%d')
date_start = fields.Date.from_string(event.start_date)
format_func = format_date
date_start_suffix = _(', All Day')
locale = get_lang(request.env).code
day_name = format_func(date_start, 'EEE', locale=locale)
date_start = day_name + ' ' + format_func(date_start, locale=locale) + date_start_suffix
details = event.appointment_type_id and event.appointment_type_id.message_confirmation or event.description or ''
params = {
'action': 'TEMPLATE',
'text': event.name,
'dates': url_date_start + '/' + url_date_stop,
'details': html2plaintext(details.encode('utf-8'))
}
if event.location:
params.update(location=event.location.replace('\n', ' '))
encoded_params = url_encode(params)
google_url = 'https://www.google.com/calendar/render?' + encoded_params
return request.render("website_calendar.appointment_validated", {
'event': event,
'datetime_start': date_start,
'google_url': google_url,
'message': message,
'edit': edit,
})
@http.route(['/calendar/cancel/<string:access_token>'], type='http', auth="public", website=True)
def calendar_appointment_cancel(self, access_token, **kwargs):
event = request.env['calendar.event'].sudo().search([('access_token', '=', access_token)], limit=1)
if not event:
return request.not_found()
if fields.Datetime.from_string(event.allday and event.start_date or event.start) < datetime.now() + relativedelta(hours=event.appointment_type_id.min_cancellation_hours):
return request.redirect('/calendar/view/' + access_token + '?message=no-cancel')
event.with_context(archive_on_error=True).unlink()
return request.redirect('/calendar?message=cancel')
@http.route(['/calendar/ics/<string:access_token>.ics'], type='http', auth="public", website=True)
def calendar_appointment_ics(self, access_token, **kwargs):
event = request.env['calendar.event'].sudo().search([('access_token', '=', access_token)], limit=1)
if not event or not event.attendee_ids:
return request.not_found()
files = event._get_ics_file()
content = files[event.id]
return request.make_response(content, [
('Content-Type', 'application/octet-stream'),
('Content-Length', len(content)),
('Content-Disposition', 'attachment; filename=Appoinment.ics')
])
| [
"toan@syncoria.com"
] | toan@syncoria.com |
9f4252f5cb1f766315de51f2b117bd05243e6cb3 | d13d5f0faab22556a55507c3a7fe558c8639187c | /setup.py | f074c19e09c9d6ef711682f4e79a929c7c5a0fa8 | [] | no_license | st9540808/ChatBot | 6627092b5c309fd7e2580f7e843a39ece963a616 | 459a74f3b513da1c1b5a8a4dea0984c1f98672cc | refs/heads/master | 2021-09-02T07:41:04.717672 | 2017-12-31T15:27:28 | 2017-12-31T15:27:28 | 114,142,707 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | import sys
import telegram
from flask import Flask, request
import first_transition as ft
app = Flask(__name__)
bot = telegram.Bot(token='434221090:AAGIniGBERUTy6bTkPqQgZzP3UXkhU4eif8')
bus = ft.BusInfo()
def _set_webhook():
status = bot.set_webhook('https://cbd9b717.ngrok.io/hook')
if not status:
print('Webhook setup failed')
sys.exit(1)
@app.route('/hook', methods=['POST'])
def webhook_handler():
if request.method == "POST":
update = telegram.Update.de_json(request.get_json(force=True), bot)
text = update.message.text
if text.lower() == 'activate':
bus.activate()
res = 'activated'
elif text.lower() == 'update':
bus.update()
res = bus.res
elif u'ๅพ' in text and u'ๅฐ' in text:
bus.by_route(text)
res = bus.res
else:
res = 'ไฝฟ็จๆนๆณ๏ผ "่ทฏ็ท"ๅพ"่ตท็ซ"ๅฐ"ไธ่ป็ซ"'
update.message.reply_text(res)
return 'ok'
if __name__ == "__main__":
_set_webhook()
app.run() | [
"st9540808@gmail.com"
] | st9540808@gmail.com |
ca4233e96235fc0c51199d4ec636baa2356b6554 | a97af2ae2b132f4a467c00135e00e7434b722403 | /prac_03/password_check.py | 71e2a7aa12bfe09a9f23e538af33fd6809e2237d | [] | no_license | daniel-bush/Practicals_CP1404 | a4dec44603d751bc37efcdffb0235c3d757fe777 | 4919d79f86a87969cec38e40cf34dc0f599390e0 | refs/heads/master | 2023-01-04T11:03:46.042788 | 2020-10-28T13:22:21 | 2020-10-28T13:22:21 | 290,442,213 | 0 | 0 | null | 2020-09-03T09:30:54 | 2020-08-26T08:37:46 | Python | UTF-8 | Python | false | false | 436 | py | MIN_LENGTH = 10
def main():
password = get_password(MIN_LENGTH)
print_asterisks(password)
def print_asterisks(password):
for char in password:
print("*", end="")
def get_password(MIN_LENGTH):
password = input("Password: ")
while len(password) < MIN_LENGTH:
print("Password must be at least {} characters!".format(MIN_LENGTH))
password = input("Password: ")
return password
main()
| [
"Themepw999GH1987"
] | Themepw999GH1987 |
4a9e8aa14c6caaa64e388c73cf1955139791697f | 3a771b72dae1aae406b94726bcbcf73915577b18 | /q11.py | 7957ae29604c51312fee4b0a13e0a5bfe42decff | [] | no_license | SHANK885/Python-Basic-Programs | 4fcb29280412baa63ffd33efba56d9f59770c9dc | 157f0f871b31c4523b6873ce5dfe0d6e26a6dc61 | refs/heads/master | 2021-07-18T18:24:10.455282 | 2018-11-19T07:02:27 | 2018-11-19T07:02:27 | 138,009,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | '''
Question:
Write a program which accepts a sequence of comma separated 4 digit binary numbers as its input and then check whether they are divisible by 5 or not. The numbers that are divisible by 5 are to be printed in a comma separated sequence.
Example:
0100,0011,1010,1001
Then the output should be:
1010
Notes: Assume the data is input by console.
'''
out = []
binary = input("Enter comma separated 4 digit binary number : ")
bin_list = [b for b in binary.split(",")]
for item in bin_list:
int_item = int(item, 2)
if int_item%5 == 0:
out.append(item)
print(",".join(out)) | [
"shashankshekhar885@gmail.com"
] | shashankshekhar885@gmail.com |
c51784928162c8a674b9a5deffb9654d0c8ef600 | 0dcb2348314ad4f9021f7cd1661bf8f80a4d592a | /RightTriangleApp.py | e7eab9763f8eebee32106af35af45ed63052e8c8 | [] | no_license | GhulamMustafaGM/PythonChallenges | 62cfa7a86d1bc780e75978db012408c545229778 | d3c16b173994968ce8bcb67a35a70ca860b640b9 | refs/heads/master | 2023-03-08T11:39:21.967898 | 2021-02-15T22:07:15 | 2021-02-15T22:07:15 | 325,989,469 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py |
# Right Triangle
import math
print("Welcome to the Right Triangle Solver App")
# Get user input
side_a = float(input("\nWhat is the first leg of the triangle: "))
side_b = float(input("What is the second leg of the triangle: "))
# Calculations
side_c = math.sqrt(side_a**2 + side_b**2)
side_c = round(side_c, 3)
area = 0.5*side_a*side_b
area = round(area, 3)
# Summary
print("\nFor a triangle with legs of " + str(side_a) + " and " + str(side_b) + " hypotenuse is " + str(side_c) + ".")
print("For a triangle with legs of " + str(side_a) + " and " + str(side_b) + " the area is " + str(area) + ".")
# output
# Welcome to the Right Triangle Solver App
# What is the first leg of the triangle: 30
# What is the second leg of the triangle: 50.5
# For a triangle with legs of 30.0 and 50.5 hypotenuse is 58.739.
# For a triangle with legs of 30.0 and 50.5 the area is 757.5. | [
"mustafaji@gmail.com"
] | mustafaji@gmail.com |
1fbbba47143217fdc15bcfdfc0c824212343623f | 5fa2b72654e7676ce4cff1c5d3d115bab9a7d30f | /conanfile.py | 4911a064e42314e20bac8948c26cdc79c8d9ce32 | [] | no_license | ambroff/conan-visit_struct | 86fbf970b2c148abaa71be9cc396bac18bcb41c0 | 9b80802f3a54454f4475f37ac9e80727919316c5 | refs/heads/master | 2020-04-08T21:08:31.190979 | 2018-11-29T21:39:53 | 2018-11-29T21:39:53 | 159,730,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
class VisitStructConan(ConanFile):
name = "visit_struct"
version = "1.0"
url = "https://github.com/ambroff/conan-visit_struct"
description = "A miniature library for struct-field reflection in C++"
exports_sources = "include/*"
no_copy_source = True
# Indicates License type of the packaged library
license = "BSL-1.0"
def source(self):
archive_url = 'https://github.com/cbeck88/visit_struct/archive/v{}.zip'.format(
self.version)
checksum = '7152df2f6b0ce4c64d94a116073b196ec1e188a0a142a5138b016562f9bdc4e4'
tools.get(archive_url, sha256=checksum)
def package(self):
self.copy(pattern="LICENSE", dst="license")
self.copy(
'*.hpp',
src=os.path.join('{}-{}'.format(self.name, self.version), 'include'),
dst='include')
def package_id(self):
self.info.header_only()
| [
"kyle@ambroffkao.com"
] | kyle@ambroffkao.com |
beb8b556b8292e3e60a49a4dd5625d013750d1d7 | aa480d8b09dd7ad92c37c816ebcace24a35eb34c | /third-round/43.ๅญ็ฌฆไธฒ็ธไน.py | cf3fd3f45529613e3a133392c58f55c8d88caa5a | [] | no_license | SR2k/leetcode | 7e701a0e99f9f05b21216f36d2f5ac07a079b97f | de131226159865dcb7b67e49a58d2ddc3f0a82c7 | refs/heads/master | 2023-03-18T03:37:02.916453 | 2022-09-16T01:28:13 | 2022-09-16T01:28:13 | 182,083,445 | 0 | 0 | null | 2023-03-08T05:44:26 | 2019-04-18T12:27:12 | Python | UTF-8 | Python | false | false | 1,634 | py | #
# @lc app=leetcode.cn id=43 lang=python3
#
# [43] ๅญ็ฌฆไธฒ็ธไน
#
# https://leetcode-cn.com/problems/multiply-strings/description/
#
# algorithms
# Medium (44.96%)
# Likes: 862
# Dislikes: 0
# Total Accepted: 205.5K
# Total Submissions: 457.8K
# Testcase Example: '"2"\n"3"'
#
# ็ปๅฎไธคไธชไปฅๅญ็ฌฆไธฒๅฝขๅผ่กจ็คบ็้่ดๆดๆฐย num1ย ๅย num2๏ผ่ฟๅย num1ย ๅย num2ย ็ไน็งฏ๏ผๅฎไปฌ็ไน็งฏไน่กจ็คบไธบๅญ็ฌฆไธฒๅฝขๅผใ
#
# ๆณจๆ๏ผไธ่ฝไฝฟ็จไปปไฝๅ
็ฝฎ็ BigInteger ๅบๆ็ดๆฅๅฐ่พๅ
ฅ่ฝฌๆขไธบๆดๆฐใ
#
#
#
# ็คบไพ 1:
#
#
# ่พๅ
ฅ: num1 = "2", num2 = "3"
# ่พๅบ: "6"
#
# ็คบไพย 2:
#
#
# ่พๅ
ฅ: num1 = "123", num2 = "456"
# ่พๅบ: "56088"
#
#
#
# ๆ็คบ๏ผ
#
#
# 1 <= num1.length, num2.length <= 200
# num1ย ๅ num2ย ๅช่ฝ็ฑๆฐๅญ็ปๆใ
# num1ย ๅ num2ย ้ฝไธๅ
ๅซไปปไฝๅๅฏผ้ถ๏ผ้คไบๆฐๅญ0ๆฌ่บซใ
#
#
#
# @lc code=start
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if num1 == '0' or num2 == '0':
return '0'
result = [0] * (len(num1) + len(num2))
for i in range(len(num1)):
for j in range(len(num2)):
r = (ord(num1[-(i + 1)]) - ord('0')) * (ord(num2[-(j + 1)]) - ord('0'))
result[i + j] += r
carry = 0
for i in range(len(result)):
d = result[i] + carry
carry = d // 10
result[i] = str(d % 10)
while result[-1] == '0':
result.pop()
result.reverse()
return "".join(result)
# @lc code=end
print(Solution().multiply("2", "3"))
print(Solution().multiply("123", "456"))
| [
"luozhou.csy@alibaba-inc.com"
] | luozhou.csy@alibaba-inc.com |
e21d152f45de841949af418b3a1b04c8ac9e5f34 | 3ec04f397e18c9b8136cb5bfa26079b15568e0e3 | /downloader/context.py | aeae072516e1a9a786ddfe9780217fddbe42990b | [] | no_license | fagan2888/soundcloud-dl | 3c8a772debe27746bca80ff998ca166232a4531f | 3ff77425553e8f765ec452a83076c4cc16ad54ab | refs/heads/master | 2022-04-13T00:23:54.908666 | 2020-03-07T05:13:06 | 2020-03-07T05:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from config import client_id | [
"suyash.behera458@gmail.com"
] | suyash.behera458@gmail.com |
6b9b9ead9132737c8e9590117045b92764a234ae | 11a4f47647bc2113641ef44b0cafc3011ca5ad69 | /VBinterface.spec | f9d675882d7dc0194bec10194138dd296530a9b0 | [] | no_license | LucioPg/VBinterface | 617d9f9006ce1666f0a575f0aee72d9fe8eb8c50 | f8db385e2cad3a74bac5433b50b5acbb644cc39c | refs/heads/master | 2020-09-09T11:21:04.646925 | 2019-11-13T10:33:12 | 2019-11-13T10:33:12 | 221,433,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=['C:\\Users\\Lucio\\PycharmProjects\\VBinterface'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='VBinterface',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='VBinterface')
| [
"lucio.di.capua@gmail.com"
] | lucio.di.capua@gmail.com |
4b817c90da1a1bf413b75b098e4e7aced20b4cdb | 8034442a9778043b1d886220a3c928327b6297d4 | /Case_rbm/vlan_bond/index.py | 2225ed51c99d287e815f12237d49525615f04bb8 | [] | no_license | wangqian0818/auto_test | 5efe6d7b41ff01e6a9f10211674f55e195484a1c | 803a485d9720f090f7fa5d4482092cc4e7d9aa73 | refs/heads/master | 2023-08-24T01:27:40.956398 | 2021-11-02T02:12:14 | 2021-11-02T02:12:14 | 367,355,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | #coding:utf-8
from common import baseinfo
vlanCardid = str(baseinfo.gwVlanCardid)
vlanA = str(baseinfo.vlanA)
vlanB = str(baseinfo.vlanB)
#้
็ฝฎไธๅ
#ๅ่กจ้้ข็้กบๅบไพๆฌกไธบ๏ผๆฅ่ฏขๅฝไปค๏ผ้ขๆ็ปๆ
case1_step1={
"step1":[f"export cardid={vlanCardid}&&switch-jsac --set --module 12 --switch on",f"export cardid={vlanCardid}&&switch-jsac --get | grep 12","on"],
"step2":[f"export cardid={vlanCardid}&&switch-jsac --set --module 15 --switch on",f"export cardid={vlanCardid}&&switch-jsac --get | grep 15","on"]
}
case1_step2={
"step1":[f"export cardid={vlanCardid}&&vlan-jsac --get",vlanA],
"step2":[f"export cardid={vlanCardid}&&vlan-jsac --get",vlanB],
"step3":[f"export cardid={vlanCardid}&&vlan-jsac --get |wc -l",'5']
}
case1_step11={
"step1":[f"export cardid={vlanCardid}&&switch-jsac --set --module 12 --switch off",f"export cardid={vlanCardid}&&switch-jsac --get | grep 12","off"],
"step2":[f"export cardid={vlanCardid}&&switch-jsac --set --module 15 --switch off",f"export cardid={vlanCardid}&&switch-jsac --get | grep 15","off"]
}
| [
"wangqianjob0818@163.com"
] | wangqianjob0818@163.com |
9f48906c1b9de377c3ac9cfb75b91d0f5fdffaca | b40770127462ff2ac9ca9c8c2a427839ce00c8e6 | /sdks/python/apache_beam/internal/metrics/cells.py | 44ee3953b4685ebaf1215850b40dffe49d41acee | [
"Apache-2.0"
] | permissive | PolideaInternal/beam | 99f6fcd17dfc8320152d433bd106cde93af33f62 | 7223fb3d6bb43e956a480a7f6640c62c78350aab | refs/heads/master | 2021-07-12T19:38:08.047853 | 2020-11-05T05:46:51 | 2020-11-05T05:46:51 | 232,585,848 | 1 | 1 | Apache-2.0 | 2020-11-08T14:10:37 | 2020-01-08T14:46:29 | Java | UTF-8 | Python | false | false | 5,538 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This file contains internal metric cell classes. A metric cell is used to
accumulate in-memory changes to a metric. It represents a specific metric
in a single context.
For internal use only. No backwards compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from builtins import object
from typing import TYPE_CHECKING
from typing import Optional
from apache_beam.metrics.cells import MetricAggregator
from apache_beam.metrics.cells import MetricCell
from apache_beam.metrics.cells import MetricCellFactory
from apache_beam.utils.histogram import Histogram
if TYPE_CHECKING:
from apache_beam.utils.histogram import BucketType
class HistogramCell(MetricCell):
"""For internal use only; no backwards-compatibility guarantees.
Tracks the current value and delta for a histogram metric.
Each cell tracks the state of a metric independently per context per bundle.
Therefore, each metric has a different cell in each bundle, that is later
aggregated.
This class is thread safe since underlying histogram object is thread safe.
"""
def __init__(self, bucket_type):
self._bucket_type = bucket_type
self.data = HistogramAggregator(bucket_type).identity_element()
def reset(self):
self.data = HistogramAggregator(self._bucket_type).identity_element()
def combine(self, other):
# type: (HistogramCell) -> HistogramCell
result = HistogramCell(self._bucket_type)
result.data = self.data.combine(other.data)
return result
def update(self, value):
self.data.histogram.record(value)
def get_cumulative(self):
# type: () -> HistogramData
return self.data.get_cumulative()
def to_runner_api_monitoring_info(self, name, transform_id):
# Histogram metric is currently worker-local and internal
# use only. This method should be implemented when runners
# support Histogram metric reporting.
return None
class HistogramCellFactory(MetricCellFactory):
def __init__(self, bucket_type):
self._bucket_type = bucket_type
def __call__(self):
return HistogramCell(self._bucket_type)
def __eq__(self, other):
if not isinstance(other, HistogramCellFactory):
return False
return self._bucket_type == other._bucket_type
def __hash__(self):
return hash(self._bucket_type)
class HistogramResult(object):
def __init__(self, data):
# type: (HistogramData) -> None
self.data = data
def __eq__(self, other):
if isinstance(other, HistogramResult):
return self.data == other.data
else:
return False
def __hash__(self):
return hash(self.data)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return '<HistogramResult({})>'.format(
self.data.histogram.get_percentile_info())
@property
def p99(self):
return self.data.histogram.p99()
@property
def p95(self):
return self.data.histogram.p95()
@property
def p90(self):
return self.data.histogram.p90()
class HistogramData(object):
"""For internal use only; no backwards-compatibility guarantees.
The data structure that holds data about a histogram metric.
This object is not thread safe, so it's not supposed to be modified
outside the HistogramCell.
"""
def __init__(self, histogram):
self.histogram = histogram
def __eq__(self, other):
return self.histogram == other.histogram
def __hash__(self):
return hash(self.histogram)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return 'HistogramData({})'.format(self.histogram.get_percentile_info())
def get_cumulative(self):
# type: () -> HistogramData
return HistogramData(self.histogram)
def combine(self, other):
# type: (Optional[HistogramData]) -> HistogramData
if other is None:
return self
return HistogramData(self.histogram.combine(other.histogram))
class HistogramAggregator(MetricAggregator):
"""For internal use only; no backwards-compatibility guarantees.
Aggregator for Histogram metric data during pipeline execution.
Values aggregated should be ``HistogramData`` objects.
"""
def __init__(self, bucket_type):
# type: (BucketType) -> None
self._bucket_type = bucket_type
def identity_element(self):
# type: () -> HistogramData
return HistogramData(Histogram(self._bucket_type))
def combine(self, x, y):
# type: (HistogramData, HistogramData) -> HistogramData
return x.combine(y)
def result(self, x):
# type: (HistogramData) -> HistogramResult
return HistogramResult(x.get_cumulative())
| [
"heejong@gmail.com"
] | heejong@gmail.com |
4fac2c031badc7ec46cb85af953b08954fb12561 | b7b43e286059b7ecaa2564b9b91cd2f10de4f5b3 | /files/default/iprule-smart-add.py | 9ab5914bcae4faaa2a3405bb9404371000ed01ea | [] | no_license | ym/chef-iproute2 | 6bd87d469488105eb2cd846c6d0ac5329dad5602 | 23e85b4434f808691ca222c958b4ce7800b5cde1 | refs/heads/master | 2021-01-20T21:11:59.610759 | 2016-06-01T10:29:53 | 2016-06-01T10:29:53 | 60,167,843 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | #!/usr/bin/env python
from pyroute2 import IPRoute
from pyroute2.netlink.rtnl.fibmsg import FR_ACT_NAMES
from netaddr import IPNetwork
from socket import AF_INET
import click
ipr = IPRoute()
FR_ACT_NAMES_MAP = dict((v, k) for k, v in FR_ACT_NAMES.iteritems())
IPRULES_MAP = {
'family': 'family',
'action': FR_ACT_NAMES_MAP,
'dst_len': 'dst_len',
'src_len': 'src_len',
}
IPRULE_ATTRS_MAP = {
'priority': 'FRA_PRIORITY',
'table': 'FRA_TABLE',
'src': 'FRA_SRC',
'dst': 'FRA_DST',
}
def nla_slots_to_dict(slots):
return {slot[0]: slot[1] for slot in slots}
def map_dict(d, mappings):
ret = dict()
for k, mapping in mappings.iteritems():
map_type = type(mapping)
if map_type == str:
if mapping in d:
ret[k] = d[mapping]
elif map_type == dict:
if k not in d:
continue
src = d[k]
if src in mapping:
ret[k] = mapping[src]
else:
continue
return ret
def rule_to_dict(rule):
attrs = nla_slots_to_dict(rule['attrs'])
ret = map_dict(rule, IPRULES_MAP)
ret.update(map_dict(attrs, IPRULE_ATTRS_MAP))
return ret
def add_rule(from_cidr, table):
cidr = IPNetwork(from_cidr).cidr
table = int(table)
hit = 0
# Search existing rules
for rule in ipr.get_rules(family=AF_INET):
rule = rule_to_dict(rule)
if not all(k in rule for k in ('src', 'src_len', 'table')):
continue
_cidr = IPNetwork("%s/%s" % (rule['src'], rule['src_len']))
if _cidr != cidr or rule['table'] != table:
continue
hit += 1
# Clean up existing malformed or duplicated rule
if str(_cidr) != str(cidr) or hit > 1:
ipr.rule('delete', **rule)
hit -= 1
if hit == 0:
ipr.rule(
'add', action='FR_ACT_TO_TBL',
src=str(cidr.ip), src_len=cidr.prefixlen,
table=table, family=AF_INET
)
@click.command()
@click.argument('src')
@click.argument('table')
def add_rule_command(src, table):
return add_rule(src, table)
if __name__ == '__main__':
add_rule_command()
| [
"i@xswan.net"
] | i@xswan.net |
78a6189426285641334e96eb52250ffb83f8de7a | cf0c0ad8707802ab74bd7b66869ab5715359aea4 | /trading_strategy_learner/ManualStrategy.py | 6c8fc6816f93b0fd5fc24e3401eb02cbffff417d | [] | no_license | yubailibra/Machine_Learning-CourseProjects | 074dde678659fca437e95fbce15fee8f5d6a23b5 | 858db25ccf43acc79c03a007b0b58c365900f841 | refs/heads/master | 2020-04-24T11:00:15.707464 | 2019-03-10T18:50:44 | 2019-03-10T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | # name: Yu Bai; ID: ybai67
#implement testPolicy() which returns a trades data frame according to the Manual Strategy
import os
import pandas as pd
from indicators import *
from marketsimcode import *
def testPolicy(symbol="JPM", sd=dt.datetime(2008, 1, 1), ed=dt.datetime(2009, 12, 31), sv=100000):
#prices = getPrice(symbol, sd, ed, includeSPY=True)
#sma, bbp, rsi, momentum, rel_std = cal_indicators(prices, lookback=20)[0:5]
prices = getPrice(syms=[symbol], sd=sd, ed=ed, includeSPY=True, extendPrior=20)
sma, bbp, rsi, momentum, rel_std = cal_indicators(prices, lookback=20)[0:5]
prices = prices.iloc[0:, 1:]
prices = prices.loc[prices.index >= sd]
sma = sma.loc[sma.index >= sd]
bbp = bbp.loc[bbp.index >= sd]
rsi = rsi.loc[rsi.index >= sd]
momentum = momentum.loc[momentum.index >= sd]
rel_std = rel_std.loc[rel_std.index >= sd]
sma_cross = pd.DataFrame(0, index=sma.index, columns=sma.columns)
sma_cross[prices >= sma] = 1 # days when price is higher
sma_cross[1:] = sma_cross.diff()
sma_cross.iloc[0] = 0
debug = sma_cross.ix[(np.isfinite(sma.ix[:, prices.columns[0]].values))]
firstNonNan = (debug.index)[0]
sma_cross.ix[firstNonNan] = 0
positions = prices.copy()
positions[:] = np.nan
# sma (compare to price), bbp, rel_std, rsi, momentum
positions[(prices / sma < 0.95) & (bbp < 0) & (rel_std <= 0.07) & (rsi < 35) & (momentum < -0.1)] = 1 #
positions[(prices / sma > 1.05) & (bbp > 1) & (rel_std <= 0.07) & (rsi > 60) & (momentum > 0.12)] = -1 #
positions[(sma_cross != 0) & (rel_std <= 0.06) & (momentum.abs() > 0.2)] = 0
positions.ffill(inplace=True)
positions.fillna(0, inplace=True)
df_trades = positions.copy()
df_trades[1:] = df_trades.diff() * 1000
holdings = df_trades.cumsum(axis=0)
return df_trades
| [
"yubailibra@yahoo.com"
] | yubailibra@yahoo.com |
d1d81bf00ebe1dfccd8e1c08d0c0757afe92f0ab | 5db12cd9e0be631afe739102b18254e15761a4e2 | /studios/account/admin.py | c7d369b9a6770535ed452f5182cb8a7bd51346b8 | [] | no_license | mustgern/sitegood | 60646544ccf762a0729f85f52f74dbcf99ed24f2 | 6aba72a7812739fb9ea6460ebc9b1641321ea50b | refs/heads/master | 2022-11-22T22:29:19.401462 | 2020-07-30T10:25:29 | 2020-07-30T10:25:29 | 283,474,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django.contrib import admin
from .models import Users
# Register your models here.
admin.site.register(Users)
| [
"noreply@github.com"
] | noreply@github.com |
5d506815f5dc84a3bd1828dcf28536a7cf28de68 | e603275243725c6ef7e3d6a608994b773c79d853 | /third-party/stanza/stanza/research/metrics.py | e4ff49dcef1cb1eb413bb15a5a961756a7cd919e | [
"Apache-2.0"
] | permissive | futurulus/rl-cards | 07adad2c2572aaa58eddb90ea916b564ee16092b | 080d9c043b77794e9306111d8c3af82ad9ef4a8f | refs/heads/master | 2020-04-12T09:35:03.809920 | 2017-04-11T21:57:30 | 2017-04-11T21:57:30 | 64,797,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,314 | py | import numpy as np
'''
import warnings
try:
import nltk.translate.bleu_score as nltk_bleu
except ImportError as e:
warnings.warn('Cannot import nltk; BLEU will be unavailable: ' + str(e))
nltk_bleu = None
'''
from .bleu import corpus_bleu
from .instance import Instance # NOQA: for doctest
from .learner import Learner # NOQA: for doctest
def log_likelihood(eval_data, predictions, scores, learner='ignored'):
'''
Return the log likelihood of each correct output, which is simply equal to
the score in `scores`.
>>> log_likelihood(None, None, [-0.5, -1.0, -2.0])
[-0.5, -1.0, -2.0]
'''
return scores
def log_likelihood_bits(eval_data, predictions, scores, learner='ignored'):
'''
Return the log likelihood of each correct output in base 2 (bits),
computed from the scores in `scores` (which should be in base e, nats).
>>> bits = log_likelihood_bits(None, None, [np.log(0.5), np.log(0.125), np.log(0.25)])
>>> [round(b) for b in bits]
[-1.0, -3.0, -2.0]
'''
return (np.array(scores) / np.log(2.0)).tolist()
def accuracy(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return the accuracy of each prediction in `predictions`: 1 if it is equal
to the correct output in `eval_data`, 0 otherwise.
>>> data = [Instance('input', 'correct'),
... Instance('input', 'correct'),
... Instance('input', 'correct')]
>>> accuracy(data, ['correct', 'wrong', 'correct'])
[1, 0, 1]
'''
return [int(inst.output == pred)
for inst, pred in zip(eval_data, predictions)]
def prec1(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return the precision@1 of each prediction in `predictions`: 1 if it is equal
to any of the correct outputs for the corresponding instance in `eval_data`,
0 otherwise.
>>> data = [Instance('input', ['correct', 'right']),
... Instance('input', ['correct', 'right']),
... Instance('input', ['correct', 'right'])]
>>> prec1(data, ['correct', 'wrong', 'right'])
[1, 0, 1]
'''
return [int(any(o == pred for o in inst.output))
for inst, pred in zip(eval_data, predictions)]
def bleu(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return corpus-level BLEU score of `predictions` using the `output`
field of the instances in `eval_data` as references. This is returned
as a length-1 list of floats.
This uses the NLTK unsmoothed implementation, which has been known
to have some bugs. This function patches over the biggest bug, which
is that NLTK ignores n-gram overlap counts of 0 (this should result
in a zero BLEU score).
>>> data = [Instance('input', 'this is the good'),
... Instance('input', 'the bad'),
... Instance('input', 'and the ugly')]
>>> bleu(data, ['this is the good', 'the good', 'seriously really good']) # doctest: +ELLIPSIS
[0.65599...]
>>> np.exp(np.mean([np.log(5. / 9.), np.log(3. / 6.),
... np.log(2. / 3.), np.log(1. / 1.)])) # doctest: +ELLIPSIS
0.65599...
'''
ref_groups = ([inst.output.split()]
if isinstance(inst.output, basestring) else
[_maybe_tokenize(r) for r in inst.output]
for inst in eval_data)
return [corpus_bleu(ref_groups, [p.split() for p in predictions])]
def _has_4gram_match(ref, pred):
'''
>>> _has_4gram_match(['four', 'lovely', 'tokens', 'here'],
... ['four', 'lovely', 'tokens', 'here'])
True
>>> _has_4gram_match(['four', 'lovely', 'tokens', 'here'],
... ['four', 'lovely', 'tokens', 'here', 'and', 'there'])
True
>>> _has_4gram_match(['four', 'lovely', 'tokens', 'here'],
... ['four', 'ugly', 'tokens', 'here'])
False
>>> _has_4gram_match(['four', 'lovely', 'tokens'],
... ['lovely', 'tokens', 'here'])
False
'''
if len(ref) < 4 or len(pred) < 4:
return False
for i in range(len(ref) - 3):
for j in range(len(pred) - 3):
if ref[i:i + 4] == pred[j:j + 4]:
return True
return False
def squared_error(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return the squared error of each prediction in `predictions` with respect
to the correct output in `eval_data`.
>>> data = [Instance('input', (0., 0., 1.)),
... Instance('input', (0., 1., 1.)),
... Instance('input', (1., 0., 0.))]
>>> squared_error(data, [(0., 1., 1.), (0., 1., 1.), (-1., 1., 0.)])
[1.0, 0.0, 5.0]
'''
return [np.sum((np.array(pred) - np.array(inst.output)) ** 2)
for inst, pred in zip(eval_data, predictions)]
def perplexity(eval_data, predictions, scores, learner='ignored'):
'''
Return the perplexity `exp(-score)` computed from each score in `scores`.
The log scores in `scores` should be base e (`exp`, `log`).
The correct average to use for this metric is the geometric mean. It is
recommended to work in log space to calcuate this mean (or use
`scipy.stats.mstats.gmean`):
mean_perplexity = np.exp(np.log(perplexities).mean())
>>> perplexities = perplexity(None, None, [np.log(0.5), np.log(0.1), np.log(0.25)])
>>> [round(p) for p in perplexities]
[2.0, 10.0, 4.0]
'''
return np.exp(-np.array(scores)).tolist()
def token_perplexity_macro(eval_data, predictions, scores, learner='ignored'):
'''
Return the per-token perplexity `exp(-score / num_tokens)` computed from each
score in `scores.`
The correct macro-average is given by the geometric mean.
>>> refs = [Instance(None, ''),
... Instance(None, ''),
... Instance(None, '2')]
>>> scores = [np.log(1.0), np.log(0.25), np.log(1 / 64.)]
>>> perplexities = token_perplexity_macro(refs, None, scores)
>>> [round(p) for p in perplexities]
... # sequence perplexities: [1, 4, 16]
... # per-token perplexities: [1, 4, 8]
[1.0, 4.0, 8.0]
'''
lens = np.array([len(_maybe_tokenize(inst.output)) + 1 for inst in eval_data])
return np.exp(-np.array(scores) / lens).tolist()
def token_perplexity_micro(eval_data, predictions, scores, learner='ignored'):
'''
Return the micro-averaged per-token perplexity `exp(-score / num_tokens)`
computed over the entire corpus, as a length-1 list of floats.
The log scores in `scores` should be base e (`exp`, `log`).
>>> refs = [Instance(None, ''),
... Instance(None, ''),
... Instance(None, '2')]
>>> scores = [np.log(1.0), np.log(0.25), np.log(1 / 64.)]
>>> perplexity = token_perplexity_micro(refs, None, scores)
>>> [round(p) for p in perplexity]
... # sequence perplexities: [1, 4, 64]
... # per-token perplexities: [1, 4, 8]
... # micro-average: gmean([1, 4, 8, 8])
[4.0]
'''
lens = np.array([len(_maybe_tokenize(inst.output)) + 1 for inst in eval_data])
return [np.exp(np.average(-np.array(scores) / lens, weights=lens))]
def _maybe_tokenize(seq):
if isinstance(seq, basestring):
return seq.split()
else:
return seq
def aic(eval_data, predictions, scores, learner):
'''
Return Akaike information criterion (AIC) scores for the given
`learner` producing the given `scores` (log likelihoods in base e):
aic = 2 * learner.num_params - 2 * sum(log_2(exp(scores)))
The result is a list *one element longer* than the number of scores:
the last element of this list is the penalty for the learner from the
number of parameters, and the others are negative log likelihoods in
base 2.
The standard way to aggregate this metric is to sum the resulting list.
>>> learner = Learner(); learner.num_params = 1024
>>> aic(None, None, [np.log(0.5), np.log(0.125), np.log(0.25), np.log(0.5)], learner)
[2.0, 6.0, 4.0, 2.0, 2048.0]
'''
return (-2.0 * np.array(scores) / np.log(2.0)).tolist() + [2.0 * float(learner.num_params)]
def aic_averaged(eval_data, predictions, scores, learner):
'''
Return Akaike information criterion (AIC) scores for the given
`learner` producing the given `scores` (log likelihoods in base e):
aic = 2 * learner.num_params - 2 * sum(log_2(exp(scores)))
The result is a list of the same length as the number of scores.
The penalty from the number of parameters is divided by the number of
scores and added to the contribution of each score; thus, `aic` and
`aic_averaged` will have the same mean but yield different-size lists.
The standard way to aggregate this metric is to sum the resulting list.
>>> learner = Learner(); learner.num_params = 1024
>>> aic_averaged(None, None, [np.log(0.5), np.log(0.125), np.log(0.25), np.log(0.5)], learner)
[514.0, 518.0, 516.0, 514.0]
'''
scores = np.array(scores)
penalty = 2.0 * float(learner.num_params) / len(scores)
return (penalty - 2.0 * scores / np.log(2.0)).tolist()
METRICS = {
name: globals()[name]
for name in dir()
if (name not in ['np', 'corpus_bleu', 'Instance', 'Learner']
and not name.startswith('_'))
}
| [
"wmonroe4@stanford.edu"
] | wmonroe4@stanford.edu |
7ff5ace33b7b5f94bd27e78e54a51bb4adfe7e97 | e58fcc1467ad81084b016d2a48d672d75da2c058 | /rdkit/Code/DataStructs/Wrap/testSparseIntVect.py | 3cc02547f6b0be4e87ec009699478e2eb5f412f7 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ali1810/sol_heroku | 294375d70c656452749e959bfb851a50defc0e01 | 97b548ce7d864e6fed936c53b790c1dc8038cff2 | refs/heads/main | 2023-08-15T06:18:26.933254 | 2021-09-14T10:20:19 | 2021-09-14T10:20:19 | 405,223,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,585 | py | # $Id$
#
# Copyright (C) 2007,2008 Greg Landrum
#
# @@ All Rights Reserved @@
#
import os, sys
import io
import unittest
import pickle
from rdkit import RDConfig
from rdkit import DataStructs as ds
import random
def feq(v1, v2, tol=1e-4):
return abs(v1 - v2) < tol
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1Int(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError, lambda: v1[5])
v1[0] = 1
v1[2] = 2
v1[3] = 3
self.assertTrue(v1 == v1)
self.assertTrue(v1.GetLength() == 5)
v2 = ds.IntSparseIntVect(5)
self.assertTrue(v1 != v2)
v2 |= v1
self.assertTrue(v2 == v1)
v3 = v2 | v1
self.assertTrue(v3 == v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs == {0: 1, 2: 2, 3: 3})
def test2Long(self):
"""
"""
l = 1 << 42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l])
v1[0] = 1
v1[2] = 2
v1[1 << 35] = 3
self.assertTrue(v1 == v1)
self.assertTrue(v1.GetLength() == l)
v2 = ds.LongSparseIntVect(l)
self.assertTrue(v1 != v2)
v2 |= v1
self.assertTrue(v2 == v1)
v3 = v2 | v1
self.assertTrue(v3 == v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs == {0: 1, 2: 2, 1 << 35: 3})
def test3Pickle1(self):
"""
"""
l = 1 << 42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l + 1])
v1[0] = 1
v1[2] = 2
v1[1 << 35] = 3
self.assertTrue(v1 == v1)
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(v2 == v1)
v3 = ds.LongSparseIntVect(v2.ToBinary())
self.assertTrue(v2 == v3)
self.assertTrue(v1 == v3)
#pickle.dump(v1,file('lsiv.pkl','wb+'))
with open(os.path.join(RDConfig.RDBaseDir, 'Code/DataStructs/Wrap/testData/lsiv.pkl'),
'r') as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = pickle.load(f)
self.assertTrue(v3 == v1)
def test3Pickle2(self):
"""
"""
l = 1 << 21
v1 = ds.IntSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l + 1])
v1[0] = 1
v1[2] = 2
v1[1 << 12] = 3
self.assertTrue(v1 == v1)
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(v2 == v1)
v3 = ds.IntSparseIntVect(v2.ToBinary())
self.assertTrue(v2 == v3)
self.assertTrue(v1 == v3)
#pickle.dump(v1,file('isiv.pkl','wb+'))
with open(os.path.join(RDConfig.RDBaseDir, 'Code/DataStructs/Wrap/testData/isiv.pkl'),
'r') as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = pickle.load(f)
self.assertTrue(v3 == v1)
def test4Update(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError, lambda: v1[6])
v1[0] = 1
v1[2] = 2
v1[3] = 3
self.assertTrue(v1 == v1)
v2 = ds.IntSparseIntVect(5)
v2.UpdateFromSequence((0, 2, 3, 3, 2, 3))
self.assertTrue(v1 == v2)
def test5Dice(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
v1[4] = 4
v1[0] = 2
v1[3] = 1
self.assertTrue(feq(ds.DiceSimilarity(v1, v1), 1.0))
v1 = ds.IntSparseIntVect(5)
v1[0] = 2
v1[2] = 1
v1[3] = 4
v1[4] = 6
v2 = ds.IntSparseIntVect(5)
v2[1] = 2
v2[2] = 3
v2[3] = 4
v2[4] = 4
self.assertTrue(feq(ds.DiceSimilarity(v1, v2), 18.0 / 26.))
self.assertTrue(feq(ds.DiceSimilarity(v2, v1), 18.0 / 26.))
def test6BulkDice(self):
"""
"""
sz = 10
nToSet = 5
nVs = 6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0, sz - 1)] = random.randint(1, 10)
vs.append(v)
baseDs = [ds.DiceSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
bulkDs = ds.BulkDiceSimilarity(vs[0], vs[1:])
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i], bulkDs[i]))
def test6BulkTversky(self):
"""
"""
sz = 10
nToSet = 5
nVs = 6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0, sz - 1)] = random.randint(1, 10)
vs.append(v)
baseDs = [ds.TverskySimilarity(vs[0], vs[x], .5, .5) for x in range(1, nVs)]
bulkDs = ds.BulkTverskySimilarity(vs[0], vs[1:], 0.5, 0.5)
diceDs = [ds.DiceSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i], bulkDs[i]))
self.assertTrue(feq(baseDs[i], diceDs[i]))
bulkDs = ds.BulkTverskySimilarity(vs[0], vs[1:], 1.0, 1.0)
taniDs = [ds.TanimotoSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i], taniDs[i]))
taniDs = ds.BulkTanimotoSimilarity(vs[0], vs[1:])
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i], taniDs[i]))
def test7ToList(self):
l = [0]*2048
nbits = 2048
bv = ds.IntSparseIntVect(nbits)
for j in range(nbits):
x = random.randrange(0, nbits)
l[x] = x
bv[x] = x
l2 = list(bv)
l3 = bv.ToList()
self.assertEqual(l, l2)
self.assertEqual(l, l3)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.