blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1174cf6f242fcf0de6c0b28dfeaa51c1d81c9911 | f3046b51f3ee47dfaf6c203ac066d60c5f4bd345 | /src/TextClassifier/main_q_s.py | 31eea427e236fcb5236ce660ee5f87e5142f7558 | [] | no_license | manulaAtapattu/FYP_2 | ae1e4b1c5b1075bc5d449839f8595673e2c28b01 | dcdbc092b8a1202df6be993e329a7c1f84537fa0 | refs/heads/master | 2020-09-09T04:21:54.771491 | 2019-12-15T11:05:50 | 2019-12-15T11:05:50 | 220,775,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,300 | py | import matplotlib.pyplot as plt
import numpy as np
import os
import re
import string
import keras
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
plt.style.use('seaborn')
# WORD-level
# MAX_NUM_WORDS = 15000
MAX_NUM_WORDS = 100000
EMBEDDING_DIM = 200
#MAX_SEQ_LENGTH = 500
MAX_SEQ_LENGTH = 200
USE_GLOVE = True
#KERNEL_SIZES = [3,4,5]
KERNEL_SIZES = [2,3]
#FEATURE_MAPS = [100,100,100]
FEATURE_MAPS = [50,50]
# CHAR-level
USE_CHAR = False
ALPHABET = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}"
ALPHABET_SIZE = len(ALPHABET)
CHAR_MAX_LENGTH = 1600
CHAR_KERNEL_SIZES = [5,10,20]
CHAR_FEATURE_MAPS = [100,100,100]
# GENERAL
DROPOUT_RATE = 0.5
HIDDEN_UNITS = 200
NB_CLASSES = 2
# LEARNING
BATCH_SIZE = 64
NB_EPOCHS = 10
RUNS = 5
VAL_SIZE = 0.3
def clean_doc(doc):
"""
Cleaning a document by several methods:
- Lowercase
- Removing whitespaces
- Removing numbers
- Removing stopwords
- Removing punctuations
- Removing short words
"""
# stop_words = set(stopwords.words('english'))
# Lowercase
doc = doc.lower()
# Remove numbers
# doc = re.sub(r"[0-9]+", "", doc)
# Split in tokens
tokens = doc.split()
# Remove Stopwords
# tokens = [w for w in tokens if not w in stop_words]
# Remove punctuation
# tokens = [w.translate(str.maketrans('', '', string.punctuation)) for w in tokens]
# Tokens with less then two characters will be ignored
# tokens = [word for word in tokens if len(word) > 1]
return ' '.join(tokens)
def read_files(path):
documents = list()
# Read in all files in directory
if os.path.isdir(path):
for filename in os.listdir(path):
with open('%s/%s' % (path, filename), encoding="utf8") as f:
doc = f.read()
doc = clean_doc(doc)
documents.append(doc)
# Read in all lines in a txt file
if os.path.isfile(path):
with open(path, encoding='iso-8859-1') as f:
doc = f.readlines()
for line in doc:
documents.append(clean_doc(line))
return documents
## Sentence polarity dataset v1.0
#negative_docs = read_files('data/rt-polaritydata/rt-polarity.neg')
#positive_docs = read_files('data/rt-polaritydata/rt-polarity.pos')
## IMDB
# negative_docs = read_files('data/imdb/train/neg/')
# positive_docs = read_files('data/imdb/train/pos')
# negative_docs_test = read_files('data/imdb/test/neg')
# positive_docs_test = read_files('data/imdb/test/pos')
# ## FYP
negative_docs = read_files('data/fyp/train/negative.txt')
positive_docs = read_files('data/fyp/train/positive.txt')
complete_docs = read_files('data/fyp/train/complete.txt')
neutral_docs = read_files('data/fyp/train/neutral.txt')
# question and statement classifier
# questions = read_files('data/q_s/train/questions.txt')
# statements = read_files('data/q_s/train/statements.txt')
# negative_docs_test = read_files('data/fyp/test/negative.txt')
# positive_docs_test = read_files('data/fyp/test/positive.txt')
# complete_docs_test = read_files('data/fyp/test/complete.txt')
# neutral_docs_test = read_files('data/fyp/test/neutral.txt')
## Yelp
#negative_docs = read_files('data/yelp/neg.txt')
#positive_docs = read_files('data/yelp/pos.txt')
#negative_docs_test = negative_docs[300000:]
#positive_docs_test = positive_docs[300000:]
#negative_docs = negative_docs[:300000]
#positive_docs = positive_docs[:300000]
#equalize length
#arr = [negative_docs,positive_docs,complete_docs,neutral_docs]
# maxLength = int(max([len(i) for i in arr]))
# adjustments = []
# for j in arr:
# adjustments.append(int(maxLength/len(j)))
#
# negative_docs *=adjustments[0]
# positive_docs *=adjustments[1]
# complete_docs *=adjustments[2]
# neutral_docs *=adjustments[3]
#docs = negative_docs + positive_docs
docs = negative_docs + positive_docs + complete_docs + neutral_docs
#docs = questions + statements
#labels = [0 for _ in range(len(negative_docs))] + [1 for _ in range(len(positive_docs))]
labels = [0 for _ in range(len(negative_docs))] + [1 for _ in range(len(positive_docs))] + [2 for _ in range(len(complete_docs))] + [3 for _ in range(len(neutral_docs))]
#labels = [0 for _ in range(len(questions))] + [1 for _ in range(len(statements))]
c = list(zip(docs, labels))
import random
random.shuffle(c)
docs, labels = zip(*c)
labels = keras.utils.to_categorical(labels)
print('labels : ',labels)
print('Training samples: %i' % len(docs))
tokenizer = keras.preprocessing.text.Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(docs)
sequences = tokenizer.texts_to_sequences(docs)
word_index = tokenizer.word_index
result = [len(x.split()) for x in docs]
# Plot histogram
plt.figure(figsize=(20,5))
plt.title('Document length')
plt.hist(result, 200, density=False, range=(0,np.max(result)))
#plt.show()
print('Text informations:')
print('max length: %i / min length: %i / mean length: %i / limit length: %i' % (np.max(result),
np.min(result),
np.mean(result),
MAX_SEQ_LENGTH))
print('vacobulary size: %i / limit: %i' % (len(word_index), MAX_NUM_WORDS))
# Padding all sequences to same length of `MAX_SEQ_LENGTH`
word_data = keras.preprocessing.sequence.pad_sequences(sequences, maxlen=MAX_SEQ_LENGTH, padding='post')
if USE_CHAR:
char2idx_dict = {}
idx2char_dict = {}
for idx, char in enumerate(ALPHABET):
char2idx_dict[char] = idx + 1
idx2char_dict = dict([(i + 1, char) for i, char in enumerate(char2idx_dict)])
# Get informations about char length
result = [len(x) for x in docs]
plt.figure(figsize=(20, 5))
plt.title('Char length')
plt.hist(result, 200, density=False, range=(0, np.max(result)))
plt.show()
print('Text informations:')
print('max length: %i / min length: %i / mean length: %i / limit length: %i' % (np.max(result),
np.min(result),
np.mean(result),
CHAR_MAX_LENGTH))
def char_vectorizer(X):
str2idx = np.zeros((len(X), CHAR_MAX_LENGTH), dtype='int64')
for idx, doc in enumerate(X):
max_length = min(len(doc), CHAR_MAX_LENGTH)
for i in range(0, max_length):
c = doc[i]
if c in char2idx_dict:
str2idx[idx, i] = char2idx_dict[c]
return str2idx
def create_glove_embeddings():
print('Pretrained embeddings GloVe is loading...')
embeddings_index = {}
f = open('glove.twitter.27B.%id.txt' % EMBEDDING_DIM, encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors in GloVe embedding' % len(embeddings_index))
embedding_matrix = np.zeros((MAX_NUM_WORDS, EMBEDDING_DIM))
for word, i in tokenizer.word_index.items():
if i >= MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return keras.layers.Embedding(
input_dim=MAX_NUM_WORDS,
output_dim=EMBEDDING_DIM,
input_length=MAX_SEQ_LENGTH,
weights=[embedding_matrix],
trainable=True,
name="word_embedding"
)
# TRAINING ----------------------------------------------------------------------------------
from cnn_model import CNN
histories = []
for i in range(RUNS):
print('Running iteration %i/%i' % (i + 1, RUNS))
random_state = np.random.randint(1000)
X_train, X_val, y_train, y_val = train_test_split(word_data, labels, test_size=VAL_SIZE, random_state=random_state)
if USE_CHAR:
X_train_c, X_val_c, _, _ = train_test_split(char_vectorizer(docs), labels, test_size=VAL_SIZE,
random_state=random_state)
X_train = [X_train, X_train_c]
X_val = [X_val, X_val_c]
emb_layer = None
if USE_GLOVE:
emb_layer = create_glove_embeddings()
model = CNN(
embedding_layer=emb_layer,
num_words=MAX_NUM_WORDS,
embedding_dim=EMBEDDING_DIM,
kernel_sizes=KERNEL_SIZES,
feature_maps=FEATURE_MAPS,
max_seq_length=MAX_SEQ_LENGTH,
use_char=USE_CHAR,
char_max_length=CHAR_MAX_LENGTH,
alphabet_size=ALPHABET_SIZE,
char_kernel_sizes=CHAR_KERNEL_SIZES,
char_feature_maps=CHAR_FEATURE_MAPS,
dropout_rate=DROPOUT_RATE,
hidden_units=HIDDEN_UNITS,
nb_classes=NB_CLASSES
).build_model()
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(),
metrics=['accuracy']
)
# model.summary()
history = model.fit(
X_train, y_train,
epochs=NB_EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(X_val, y_val),
callbacks=[
keras.callbacks.ModelCheckpoint(
# 'model-%i.h5' % (i + 1), monitor='val_loss', verbose=1, save_best_only=True, mode='min'
'model_q_s.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min'
),
# keras.callbacks.TensorBoard(log_dir='./logs/temp', write_graph=True)
]
)
print()
histories.append(history.history)
# EVALUATION -------------------------------------------------------------------
def get_avg(histories, his_key):
tmp = []
for history in histories:
tmp.append(history[his_key][np.argmin(history['val_loss'])])
return np.mean(tmp)
print('Training: \t%0.4f loss / %0.4f acc' % (get_avg(histories, 'loss'),
get_avg(histories, 'acc')))
print('Validation: \t%0.4f loss / %0.4f acc' % (get_avg(histories, 'val_loss'),
get_avg(histories, 'val_acc')))
def plot_acc_loss(title, histories, key_acc, key_loss):
fig, (ax1, ax2) = plt.subplots(1, 2)
# Accuracy
ax1.set_title('Model accuracy (%s)' % title)
names = []
for i, model in enumerate(histories):
ax1.plot(model[key_acc])
ax1.set_xlabel('epoch')
names.append('Model %i' % (i+1))
ax1.set_ylabel('accuracy')
ax1.legend(names, loc='lower right')
# Loss
ax2.set_title('Model loss (%s)' % title)
for model in histories:
ax2.plot(model[key_loss])
ax2.set_xlabel('epoch')
ax2.set_ylabel('loss')
ax2.legend(names, loc='upper right')
fig.set_size_inches(20, 5)
plt.show()
plot_acc_loss('training', histories, 'acc', 'loss')
plot_acc_loss('validation', histories, 'val_acc', 'val_loss')
| [
"mratapattu.15@cse.mrt.ac.lk"
] | mratapattu.15@cse.mrt.ac.lk |
dee654d6dd62c57d449909085c9e9a389e29d419 | 29b8529e02975c810b8bf7bc0493259bccf415e4 | /Level 3/Landscape Panic.py | bb88d4efc875f37530b6afddc3053f2b5580a244 | [
"MIT"
] | permissive | subhamb123/Python-Projects | 74af72151bc7613dee976c35b477f2947c679d2d | e180a3f503be8375b4cf65cda7fc01cdaaa300e1 | refs/heads/master | 2021-05-29T00:26:34.036862 | 2020-12-26T10:22:16 | 2020-12-26T10:22:16 | 254,294,524 | 1 | 0 | null | 2020-06-15T19:59:37 | 2020-04-09T06:53:36 | Python | UTF-8 | Python | false | false | 568 | py | import tsk
import pygame
pygame.init()
window = pygame.display.set_mode([1018, 573])
sky = tsk.Sprite("outdoor_sky.png", 0, 0)
back_mountains = tsk.Sprite("outdoor_mountain_b.png", 0, 0)
front_mountains = tsk.Sprite("outdoor_mountain_a.png", 0, 0)
foreground = tsk.Sprite("outdoor_foreground.png", 0, 0)
drawing = True
while drawing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
drawing = False
sky.draw()
back_mountains.draw()
front_mountains.draw()
foreground.draw()
pygame.display.flip()
| [
"noreply@github.com"
] | subhamb123.noreply@github.com |
ee6f15e3182bc0c650262c869b4aa170fc6f416d | 40f8107fdd2afa1f9c41d4d02b32298258bd3ae7 | /src/app/cache.py | bdc0c3b9d8a36c32ac9b7ea12af8a10dacd4439c | [
"Apache-2.0"
] | permissive | ConvergeTP/von_tails | 91f62e722325e1a0845e766359dae94de13076d3 | 98ce984b001cd09005b6496ce10687588def53ef | refs/heads/master | 2020-05-30T23:18:30.532496 | 2019-08-21T14:30:58 | 2019-08-21T14:30:58 | 170,901,819 | 0 | 0 | Apache-2.0 | 2019-04-02T18:01:45 | 2019-02-15T17:18:29 | Python | UTF-8 | Python | false | false | 705 | py | """
Copyright 2017-2019 Government of Canada - Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from aiocache import SimpleMemoryCache
MEM_CACHE = SimpleMemoryCache()
| [
"srklump@hotmail.com"
] | srklump@hotmail.com |
9d87f084d9a02881509f3b986c12f3641d981396 | e9d15cf96d9f03761b8cda870973d1f62d1c30bb | /pp/rgbmf.py | 31e7a08e4e931970e39dc0e3ff83a60b6b828863 | [] | no_license | gkoundry/glen | 32ecb9a40c7e14ba1c1a2b603b024a7a0a7264e1 | f1bf76a2a247e19803ba4b8b3a55eea33b76708e | refs/heads/master | 2020-05-09T17:07:30.657019 | 2014-11-18T14:28:41 | 2014-11-18T14:28:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,230 | py | import pandas
import cPickle
import numpy as np
from sklearn.preprocessing import Imputer
from math import log
import sys
import rpy2.robjects as robjects
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
from rpy2.robjects.numpy2ri import numpy2ri
robjects.conversion.py2ri = numpy2ri
from rpy2.robjects.packages import importr
rgbm = importr("gbm")
LR=0.03
COL='E'
dist='bernoulli'
#dist='adaboost'
X=pandas.read_csv("train6%s.csv" % (COL,))
rid=X.pop('id').apply(str)
wt=X.pop('wt')
rest=X.pop('rest')
cvp = ([r'[12467]$',r'[35890]$'],[r'[35890]$',r'[12467]$'])
ls=X.pop('ls')
csls=X.pop('csls')
y=X.pop('y')
imp = Imputer(strategy='most_frequent')
for mf in (9,):
for mn in (30,):
ttr = 0
for tr in (500,):
scp = 0
scl = 0
rsp = 0
rsl = 0
tot = 0
for cv in cvp:
train=rid.str.contains(cv[0])
test=rid.str.contains(cv[1])
#train = np.logical_and(train,np.logical_or(np.logical_and(y.values!=ls.values,rest.values==1),np.random.randint(0,1000,train.shape[0])<200))
xtrain = X.values[train]
xtrain = imp.fit_transform(xtrain)
xtest = X.values[test]
xtest = imp.transform(xtest)
ytrain = (rest.values[train]==1).astype(int)
ytrain2 = y.values[train]
print str(sum(ytrain.tolist()))+'/'+str(xtrain.shape)
#ytest = y.values[test]
ytest = rest.values[test]
ytest2 = y.values[test]
rtest = rest.values[test]
lstest = ls.values[test]
idtest = rid.values[test].astype(float)
m=rgbm.gbm_fit(xtrain,ytrain,nTrain=xtrain.shape[0],bag_fraction=1,n_trees=tr,verbose=False,keep_data=True,n_minobsinnode=mn,distribution=dist,interaction_depth=mf,shrinkage=LR) #, w=wtrain[rows]*2.0)
pp = np.array(rgbm.predict_gbm(m,xtest,n_trees=tr,type="response"))
m2=rgbm.gbm_fit(xtrain,ytrain2,nTrain=xtrain.shape[0],bag_fraction=1,n_trees=tr,verbose=False,keep_data=True,n_minobsinnode=mn,distribution=dist,interaction_depth=mf,shrinkage=LR) #, w=wtrain[rows]*2.0)
pp2 = np.array(rgbm.predict_gbm(m2,xtest,n_trees=tr,type="response"))
print pp.shape
for i,a in enumerate(ytest):
if a==1:
scl += 1
if lstest[i]==ytest2[i]:
rsl += 1
if int(pp[i]>0.5)==a:
scp += 1
if a==1:
if pp[i]>0.6:
print '%f %d %f %d %d %d' % (pp[i],a,pp2[i],ytest2[i],lstest[i]==int(pp2[i]>0.5),int(pp2[i]>0.5)==ytest2[i])
if int(pp2[i]>0.5)==ytest2[i]:
rsp += 1
else:
if lstest[i]==ytest2[i]:
rsp += 1
tot += 1
print 'tr%d mf%d mn%d scp %d scl %d rsp %d rsl %d tot %d' % (tr,mf,mn,scp,scl,rsp,rsl,tot)
sys.stdout.flush()
| [
"glen@datarobot.com"
] | glen@datarobot.com |
f45a7c021c917211499697638ecd5c9ba9364482 | 24d374628f25549b3162672826dfd5e58f9bd952 | /test.py | 0526b70107b60bf0ba8811a9d9b461e875f0d0f0 | [] | no_license | XiaojiaoLiu/Test_project | 4c2fb7d274494ed8d440559fbf04910dfbd81f49 | 9da0dd584340b662c8ca0c4e965ec691884a5c44 | refs/heads/master | 2021-01-22T13:57:48.368654 | 2015-01-31T18:50:10 | 2015-01-31T18:50:10 | 29,825,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | ##encoding=utf8
from __future__ import print_function
print("hello kitty")
| [
"lxjiao0805@126.com"
] | lxjiao0805@126.com |
5cf1d0203417d378aac698a22fa0890bb4bffcae | ba0e07b34def26c37ee22b9dac1714867f001fa5 | /azure-mgmt-web/azure/mgmt/web/operations/recommendations_operations.py | c3e0db7d769a8e215a861a5b4665f4787afeeee7 | [
"MIT"
] | permissive | CharaD7/azure-sdk-for-python | b11a08ac7d24a22a808a18203072b4c7bd264dfa | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | refs/heads/master | 2023-05-12T12:34:26.172873 | 2016-10-26T21:35:20 | 2016-10-26T21:35:20 | 72,448,760 | 1 | 0 | MIT | 2023-05-04T17:15:01 | 2016-10-31T15:14:09 | Python | UTF-8 | Python | false | false | 14,493 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class RecommendationsOperations(object):
"""RecommendationsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_recommendation_by_subscription(
self, featured=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of recommendations associated with the specified
subscription.
:param featured: If set, this API returns only the most critical
recommendation among the others. Otherwise this API returns all
recommendations available
:type featured: bool
:param filter: Return only channels specified in the filter. Filter
is specified by using OData syntax. Example: $filter=channels eq
'Api' or channel eq 'Notification'
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`Recommendation
<azure.mgmt.web.models.Recommendation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/recommendations'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if featured is not None:
query_parameters['featured'] = self._serialize.query("featured", featured, 'bool')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[Recommendation]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_rule_details_by_site_name(
self, resource_group_name, site_name, name, custom_headers=None, raw=False, **operation_config):
"""Gets the detailed properties of the recommendation object for the
specified web site.
:param resource_group_name: Resource group name
:type resource_group_name: str
:param site_name: Site name
:type site_name: str
:param name: Recommendation rule name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RecommendationRule
<azure.mgmt.web.models.RecommendationRule>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/recommendations/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RecommendationRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_recommended_rules_for_site(
self, resource_group_name, site_name, featured=None, site_sku=None, num_slots=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of recommendations associated with the specified web site.
:param resource_group_name: Resource group name
:type resource_group_name: str
:param site_name: Site name
:type site_name: str
:param featured: If set, this API returns only the most critical
recommendation among the others. Otherwise this API returns all
recommendations available
:type featured: bool
:param site_sku: The name of site SKU.
:type site_sku: str
:param num_slots: The number of site slots associated to the site
:type num_slots: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`Recommendation
<azure.mgmt.web.models.Recommendation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/recommendations'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if featured is not None:
query_parameters['featured'] = self._serialize.query("featured", featured, 'bool')
if site_sku is not None:
query_parameters['siteSku'] = self._serialize.query("site_sku", site_sku, 'str')
if num_slots is not None:
query_parameters['numSlots'] = self._serialize.query("num_slots", num_slots, 'int')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[Recommendation]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_recommendation_history_for_site(
self, resource_group_name, site_name, start_time=None, end_time=None, custom_headers=None, raw=False, **operation_config):
"""Gets the list of past recommendations optionally specified by the time
range.
:param resource_group_name: Resource group name
:type resource_group_name: str
:param site_name: Site name
:type site_name: str
:param start_time: The start time of a time range to query, e.g.
$filter=startTime eq '2015-01-01T00:00:00Z' and endTime eq
'2015-01-02T00:00:00Z'
:type start_time: str
:param end_time: The end time of a time range to query, e.g.
$filter=startTime eq '2015-01-01T00:00:00Z' and endTime eq
'2015-01-02T00:00:00Z'
:type end_time: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`Recommendation
<azure.mgmt.web.models.Recommendation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/recommendationHistory'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'str')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[Recommendation]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
caa1bab989647808316c09990fc13f5e713b386c | 21e7753732296bfdfb6dd9a9b58c7c6b8d90a1e5 | /Bits/nextNumber/getNext.py | 6ebc140b20a361fff4350f249e1d6384893f8d31 | [] | no_license | rongfeng-china/python-algorithms-and-data-structures | eb8514b44d7ff97dd7c4deda2d8ea888a5aa8d04 | a69241bb7b684bc7d00acdd46c2fc214f7b61887 | refs/heads/master | 2020-03-13T09:08:13.375870 | 2015-12-11T07:37:30 | 2015-12-11T07:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | # Given a positive number, print the next smallest and the next
# largest number that have the same number of 1 bits in their
# binary representation.
def getNext(n):
c = n
# let p denote the position of first non-trailing 0 ie a zero which is followed by 1s
c0 = 0 # number of zeros to right of position p
c1 = 0 # number of ones to right of position p
# while there are training zeros and c > 0
while (c & 1) == 0 and (c != 0):
c0 += 1
c >>= 1
while (c & 1) == 1:
c1 += 1
c >>= 1
# If n = 111...1100...000, then there is no bigger number with same number of 1s
if c0 + c1 == 31 or c0 + c1 == 0:
return -1
p = c0 + c1
n |= (1 << p) # Flip rightmost non trailing zero
n &= ~((1 << p) - 1) # Clear all bits to right of 1
n |= (1 << (c1 - 1)) - 1 # Insert (c1-1) ones on the right
return n
| [
"prathamt@outlook.com"
] | prathamt@outlook.com |
699b728edbe5c238230c82160cd3126f39a42d6e | b8a2cb1d6996f46e541e9f3a387feb3e27e39fdf | /sorting/containsdup3.py | 9017054d0d2a71eda620d0158c38447821763e80 | [] | no_license | TDress/py-algorithms | 489dba40442cd84e10d52d30a843bb660c4e522d | c851a79955130f7cc09943654efcc2ffe3ea69d6 | refs/heads/master | 2018-09-29T14:07:11.584501 | 2018-07-24T14:56:14 | 2018-07-24T14:56:14 | 113,503,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | def _isNAD(numpos1, numpos2, k, t):
pos1, val1 = numpos1
pos2, val2 = numpos2
return (abs(pos1 - pos2) <= k) and (abs(val1 - val2) <= t)
def containsNearbyAlmostDuplicate(nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
"""
numpos = [(i, num) for i, num in enumerate(nums)]
numpos.sort(key=lambda t: t[1])
for i in range(len(numpos) - 1):
if _isNAD(numpos[i], numpos[i + 1], k, t):
return True
return False
def main():
l = [1,2,3,1]
k, t = 3, 0
assert(containsNearbyAlmostDuplicate(l, k, t))
| [
"Thomas.Dressler1@gmail.com"
] | Thomas.Dressler1@gmail.com |
3cc09a706da3a3bc86607104e7b965444bbc1e0a | 4fc6074b6d5d4cf5e943e9b11ded26a1897b2952 | /Power_prod_data_handling.py | 974adefd754d00685abbbb8a39a1f9705bfc92a4 | [] | no_license | Torbjfug/TTK4260_MultivariatDatamodellering | dd6f4d2f424dcdab5a4fe6d56c1c97b9351a54df | ebb2a5cfa209f5e4c9b56bc2dd05b91ee1142ce9 | refs/heads/master | 2020-12-23T18:20:02.281331 | 2020-02-25T10:17:07 | 2020-02-25T10:17:07 | 237,231,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | import pandas as pd
import sys
import numpy as np
filepath = 'Data/PowerProduction/'
X = pd.read_csv(filepath + 'X.csv', sep=';')
Y = pd.read_csv(filepath + 'Y.csv', sep=';')
print(len(X))
one_hot_columns = ['IsDayBin', 'Month']
X = X.join(Y['WindPower'])
for col in one_hot_columns:
one_hot = pd.get_dummies(X[col], prefix=col)
X = X.join(one_hot)
X = X.drop(col, axis=1)
X = X.drop('Id', axis=1)
test_percent = 0.1
X_test = X.sample(round(len(X)*test_percent))
X = X.drop(X_test.index)
val_mask = X["Year"] == 2018
X_train = X[~val_mask]
X_val = X[val_mask]
Y_val = X_val['WindPower']
Y_test = X_test['WindPower']
Y_train = X_train['WindPower']
X_test = X_test.drop(['WindPower', 'Year'], axis=1)
X_val = X_val.drop(['WindPower', 'Year'], axis=1)
X_train = X_train.drop(['WindPower', 'Year'], axis=1)
X_train.to_csv(filepath+'X_train.csv', sep=',')
X_test.to_csv(filepath+'X_test.csv', sep=',')
X_val.to_csv(filepath+'X_val.csv', sep=',')
Y_train.to_csv(filepath+'Y_train.csv', sep=',', header=True)
Y_val.to_csv(filepath+'Y_val.csv', sep=',', header=True)
Y_test.to_csv(filepath+'Y_test.csv', sep=',', header=True)
| [
"torbjfug@stud.ntnu.no"
] | torbjfug@stud.ntnu.no |
fe592940da0522d107051269bce908a8c55a816d | e36cd65361ccfed2bd7bf910d57da41028c45a71 | /clase2.py | 0b8dac521488a251811e5b26afc568866506b921 | [] | no_license | fzanollo/algoBio | 3647ed59b298336ed655c770aba874d1fc04804a | 3c6d79f4c1338e60ce7280f37145541d25aa395b | refs/heads/master | 2022-11-29T02:26:29.213324 | 2020-08-05T19:33:53 | 2020-08-05T19:33:53 | 285,385,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,298 | py | from pprint import pprint
def score(vi, wi):
sigma = 2
mu = 1
score = 0
if vi=='-' or wi=='-':
score = -sigma
elif vi==wi:
score = 1
else:
score = -mu
return score
def alineamientoLocal(v,w):
n = len(v)
m = len(w)
camino = [ [ None for i in range(m) ] for j in range(n) ]
s = [ [ None for i in range(m) ] for j in range(n) ]
s[0][0] = 0
for i in range(1,n):
s[i][0] = 0
camino[i][0] = (i-1, 0)
for j in range(1,m):
s[0][j] = 0
camino[0][j] = (0, j-1)
maxGlobalValor = 0
maxGlobalPosicion = (0,0)
for i in range(1,n):
for j in range(1,m):
maxLocal = sorted([
(0,(0,0)),
(s[i-1][j] + score(v[i], '-'), (i-1,j)), # Arriba
(s[i][j-1] + score('-', w[j]), (i, j-1)), # Izq
(s[i-1][j-1] + score(v[i], w[j]), (i-1,j-1)) # Diagonal
], reverse=True)[0]
s[i][j] = maxLocal[0]
camino[i][j] = maxLocal[1]
if s[i][j] > maxGlobalValor:
print('max valor viejo', maxGlobalValor, 'nuevo', s[i][j])
print('max valor pos vieja', maxGlobalPosicion, 'nueva', (i,j))
maxGlobalValor = s[i][j]
maxGlobalPosicion = (i,j)
# if s[i][j] == s[i-1][j-1] + score(v[i], w[j]) and v[i] == w[j]:
# camino[i][j] = (i-1,j-1) # Diagonal
# elif s[i][j] == s[i-1][j] + score(v[i], '-'):
# camino[i][j] = (i-1,j) # Arriba
# elif s[i][j] == s[i][j-1] + score('-', w[j]):
# camino[i][j] = (i, j-1) # Izq
# elif s[i][j] == 0:
# camino[i][j] = (0,0)
pprint(s)
camino[0][0] = (-1,-1)
camino[n-1][m-1] = maxGlobalPosicion
return camino
def alinear(camino, v, w):
caminoSeguido = []
vAlineado = wAlineado = ''
i = len(v)-1
j = len(w)-1
while i>=0 or j>=0:
direccion = camino[i][j]
if direccion == (i-1,j-1): # Diagonal
caminoSeguido.append('Diag'+ str((i,j)))
vAlineado = v[i] + vAlineado
wAlineado = w[j] + wAlineado
elif direccion == (i-1,j): # Arriba
caminoSeguido.append('Abajo'+ str((i,j)))
vAlineado = v[i] + vAlineado
wAlineado = '-' + wAlineado
elif direccion == (i, j-1): # Izq
caminoSeguido.append('Der'+ str((i,j)))
vAlineado = '-' + vAlineado
wAlineado = w[j] + wAlineado
else:
caminoSeguido.append('Salto desde '+ str(direccion)+' a '+str((i,j)))
vAlineado = '-'*(i-direccion[0]) + vAlineado
wAlineado = '-'*(j-direccion[1]) + wAlineado
i, j = direccion
return vAlineado, wAlineado, caminoSeguido[::-1]
v = "YAFDLGYTCMFPVLLGGGELHIVQKETYTAPDEIAHYIKEHGITYIKLTPSLFHTIVNTASFAFDANFESLRLIVLGGEKIIPIDVIAFRKMYGHTEFINHYGPTEATIGA"
w = "AFDVSAGDFARALLTGGQLIVCPNEVKMDPASLYAIIKKYDITIFEATPALVIPLMEYIYEQKLDISQLQILIVGSDSCSMEDFKTLVSRFGSTIRIVNSYGVTEACIDS"
v = "ATGTTATA"
w = "ATCGTCC"
alocal = alineamientoLocal(v,w)
pprint(alocal)
vAlineado, wAlineado, caminoSeguido = alinear(alocal, v, w)
print(vAlineado)
print(wAlineado)
print(caminoSeguido)
| [
"fzanollo@dc.uba.ar"
] | fzanollo@dc.uba.ar |
032935fea5faa717d6af89c63d1eb9d6fd00fda5 | 5164498df453f7c46a3fc6e1c15bda1277fa7158 | /features_featuretools.py | 4b64a5ae8578340613bce03060302571c8fc36bd | [
"MIT"
] | permissive | pnb/naep | 333b17ee4e6714fb44a54f6591c61e96095fa00c | eb01cff143cfe02819f8bec8781ae495501c06ad | refs/heads/master | 2023-03-26T13:14:57.362543 | 2021-03-24T19:05:20 | 2021-03-24T19:05:20 | 195,287,497 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,879 | py | # Automatic feature engineering with featuretools ("deep feature synthesis")
import featuretools as ft
import pandas as pd
import load_data
# Combine all dataframes so that features will be consistent across them
dfs = []
dfs.append(load_data.train_full())
dfs[-1]['STUDENTID'] = [str(p) + '_train_30m' for p in dfs[-1].STUDENTID]
dfs.append(load_data.train_10m())
dfs[-1]['STUDENTID'] = [str(p) + '_train_10m' for p in dfs[-1].STUDENTID]
dfs.append(load_data.train_20m())
dfs[-1]['STUDENTID'] = [str(p) + '_train_20m' for p in dfs[-1].STUDENTID]
dfs.append(load_data.holdout_10m())
dfs[-1]['STUDENTID'] = [str(p) + '_holdout_10m' for p in dfs[-1].STUDENTID]
dfs[-1].insert(7, 'label', '')
dfs.append(load_data.holdout_20m())
dfs[-1]['STUDENTID'] = [str(p) + '_holdout_20m' for p in dfs[-1].STUDENTID]
dfs[-1].insert(7, 'label', '')
dfs.append(load_data.holdout_30m())
dfs[-1]['STUDENTID'] = [str(p) + '_holdout_30m' for p in dfs[-1].STUDENTID]
dfs[-1].insert(7, 'label', '')
df = pd.concat(dfs).reset_index(drop=True)
df = df[['STUDENTID', 'AccessionNumber', 'ItemType', 'Observable', 'EventTime']]
df['row_index'] = df.index
var_types = {
'STUDENTID': ft.variable_types.Index,
'AccessionNumber': ft.variable_types.Categorical,
'ItemType': ft.variable_types.Categorical,
'Observable': ft.variable_types.Categorical,
'EventTime': ft.variable_types.TimeIndex,
}
es = ft.EntitySet().entity_from_dataframe('rows', dataframe=df, index='row_index',
time_index='EventTime', variable_types=var_types)
es = es.normalize_entity('rows', 'students', 'STUDENTID')
es = es.normalize_entity('rows', 'items', 'AccessionNumber', additional_variables=['ItemType'])
print('\n', es)
print('\n', es['rows'].variables)
es.plot('features_featuretools/entity_structure.png')
es.add_interesting_values(max_values=10, verbose=True)
es['rows']['AccessionNumber'].interesting_values = \
[v for v in es['rows'].df.AccessionNumber.unique() if v.startswith('VH')]
# Basically all the primitives that seemed to make any sense -- there may be more!
ft.list_primitives().to_csv('features_featuretools/ft_primitives.csv', index=False)
aggregation_primitives = [
'max',
'median',
'mode',
'time_since_first',
'sum',
'avg_time_between',
'num_unique',
'skew',
'min',
'trend',
'mean',
'count',
'time_since_last',
'std',
'entropy',
]
transform_primitives = [
'time_since_previous',
'divide_by_feature',
'greater_than_equal_to',
'time_since',
'cum_min',
'cum_count',
'month',
'cum_max',
'cum_mean',
'weekday',
'cum_sum',
'percentile',
]
feature_matrix, feature_defs = ft.dfs(entityset=es, target_entity='students', verbose=True,
agg_primitives=aggregation_primitives,
trans_primitives=transform_primitives,
where_primitives=aggregation_primitives)
# One-hot encode categorical features where needed
feature_matrix_enc, features_defs_enc = ft.encode_features(feature_matrix, feature_defs)
ft.save_features(features_defs_enc, 'features_featuretools/feature_defs.json')
print(len(features_defs_enc), 'features after one-hot encoding')
# Re-split features into appropriate train/holdout sets
print('Saving features files')
feature_matrix_enc['source_file'] = ''
for pid in feature_matrix_enc.index.unique():
feature_matrix_enc.at[pid, 'source_file'] = pid[pid.index('_') + 1:]
for source_file, feat_df in feature_matrix_enc.groupby('source_file'):
# STUDENTID back to normal
feat_df.insert(0, 'STUDENTID', [p[:p.index('_')] for p in feat_df.index])
feat_df.reset_index(drop=True).sort_values('STUDENTID').drop(columns=['source_file']) \
.to_csv('features_featuretools/' + source_file + '.csv', index=False)
| [
"pnigelb@gmail.com"
] | pnigelb@gmail.com |
7a5041bb2d78f7bce8b0fc577ee989af570720e0 | ff52b2b95642fc9f14c2bc1d9d4ee6d6147b70a1 | /config/settings/base.py | 727f73cf4bfd7fd0f924f761d73a94493987a284 | [] | no_license | Srednogorie/data_branch | f36f9aa5c857443d5f5daaa779aed966660a77f9 | d2c8637978bb16bc8387e7aa75d31d0b3237eed4 | refs/heads/master | 2020-03-08T14:40:57.823312 | 2018-04-05T11:10:30 | 2018-04-05T11:10:31 | 128,187,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,551 | py | """
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (data_branch/config/settings/base.py - 3 = data_branch/)
APPS_DIR = ROOT_DIR.path('data_branch')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
# DATABASES = {
# 'default': env.db('DATABASE_URL'),
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'data_branch',
'USER': 'macbook',
# 'PASSWORD': 'mypassword',
'HOST': 'localhost',
'PORT': '5432',
},
'data_core': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'data_branch_core',
'USER': '***',
'PASSWORD': '***',
'HOST': '46.101.0.188',
'PORT': '5432',
}
}
DATABASE_ROUTERS = ['config.routers.DataTablesRouter']
# DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
# ... include the providers you want to enable:
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
'rest_framework',
]
LOCAL_APPS = [
'data_branch.users.apps.UsersConfig',
# Your stuff: custom apps go here
'data_branch.searching.apps.SearchingConfig',
'data_branch.data_core.apps.DataCoreConfig',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'data_branch.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = 'thedatabranch@gmail.com'
EMAIL_HOST_PASSWORD = '***'
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = r'^admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Alexander Krachunov - Leno""", 'akrachunov@gmail.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'data_branch.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'data_branch.users.adapters.SocialAccountAdapter'
ACCOUNT_LOGIN_ATTEMPTS_LIMIT = None
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGOUT_ON_GET = True
# Django REST Framework
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': [
# 'data_core.api.authentication.MyAuthentication',
# # 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
# ],
# 'DEFAULT_PERMISSION_CLASSES': [
# # 'rest_framework.permissions.IsAuthenticated',
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
# ],
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.JSONRenderer',
'rest_framework_csv.renderers.CSVRenderer',
'rest_framework_xml.renderers.XMLRenderer',
],
# 'DEFAULT_THROTTLE_CLASSES': [
# 'rest_framework.throttling.AnonRateThrottle',
# 'rest_framework.throttling.UserRateThrottle'
# ],
# 'DEFAULT_THROTTLE_RATES': {
# 'anon': '50/hour',
# 'user': '50/hour'
# },
}
# Your stuff
# ------------------------------------------------------------------------------
| [
"akrachunov@gmail.com"
] | akrachunov@gmail.com |
85f4e51c026da2a1a3bc7fb5c2ebd7e01c96d33e | 2a1969afe3818412140efb25921f35610dd9023d | /python/python爬虫/python爬虫/爬虫/爬虫代码/1.5ip地址爬取.py | df6ff01467963e24b1e921e23de8567edacb83ae | [] | no_license | Light2077/LightNote | 149cf42089f15bbebd62e27fe5aa6afe67f25779 | cd733014f8be44207d624a5fd02dfddcd776aad1 | refs/heads/master | 2023-09-01T07:49:05.494481 | 2023-08-24T10:00:09 | 2023-08-24T10:00:09 | 224,410,710 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import requests
import os
url ='http://m.ip138.com/ip.asp?ip='
try:
r = requests.get(url + '202.204.80.112')
r.raise_for_status()
r.encoding = r.apparent_encoding
print(r.text[-500:])
except:
print('爬取失败')
| [
"Light1912@users.noreply.github.com"
] | Light1912@users.noreply.github.com |
8fd09d2e5543ab05838d7dc9fa167237ed8e091c | c1f454623b7e5568ef58c5aa12d62d13bbb95711 | /grid.py | c94b62890aecf80d14dff5c5392b21b50aceff94 | [] | no_license | kalakuta/protein_surfaces | d9672836977f282faab18a35de5949dfe134ba37 | c642f4351a410bfea993503e8b32d01a7b54ca39 | refs/heads/master | 2020-12-02T08:15:46.163577 | 2018-08-21T11:44:03 | 2018-08-21T11:44:03 | 96,789,748 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | import matplotlib.pyplot as plt
import math
plt.plot()
plt.xlim(-30,30)
plt.ylim(-30,30)
plt.gca().set_aspect('equal',adjustable='box')
#plt.suptitle(domain)
s = 3
cell_list = [(x,y) for x in range(-5*s,6*s,s) for y in range(-5*s,6*s,s) if x+y < 6*s and x+y > -6*s]
for cell in cell_list:
p = cell[0] + 0.5 * cell[1]
q = cell[1] * math.sqrt(3) / 2
plt.plot(p,q,'h',markersize=22, color=(0.9,0.9,1))
plt.show()
#plt.savefig('./runs/hex_map%s.png' % domain) | [
"patrick.macmahon@gmail.com"
] | patrick.macmahon@gmail.com |
32119cf0bfd53844d02f1fad478fdca6ab9ee59e | b5025befdf74fff3071252abaa4db09479f2d763 | /Ananthu/Aug31/Qstn1/server_socket_chat.py | 9cdbaa12df72a2298625550949f95bb2f9462c38 | [] | no_license | sidv/Assignments | d2fcc643a2963627afd748ff4d690907f01f71d8 | d50d668264e2a31581ce3c0544f9b13de18da2b3 | refs/heads/main | 2023-07-30T02:17:19.392164 | 2021-09-23T04:47:56 | 2021-09-23T04:47:56 | 392,696,356 | 1 | 20 | null | 2021-09-23T08:14:11 | 2021-08-04T13:20:43 | Python | UTF-8 | Python | false | false | 303 | py | #server socket
import socket
PORT = 3002
serv = socket.socket()
serv.bind(("",PORT))
serv.listen(10)
print("Lisening")
conn,addr = serv.accept()
print(f"Connected to {addr[0]} on {addr[1]}")
while True:
data = conn.recv(200)
print(data)
inp = input("-:")
conn.send(bytes(inp,"utf-8"))
conn.close()
| [
"ananthups666@gmail.com"
] | ananthups666@gmail.com |
c9a4982606ec0ac098d6647639e73c8df949a444 | 946d5814b15ec3bffd25e6a377dea7c1cca313de | /grit/node/base.py | 2d093e6b443afda23717aa369319e5389c860f87 | [] | no_license | Magister/pak-tools | bad82f124be3fd2cb63bdd2ecd820df42b286f60 | 025a1bacecbffe2121e27568dac821354b7d5d8a | refs/heads/master | 2021-01-25T05:15:30.906020 | 2015-07-18T22:38:25 | 2015-07-18T22:38:25 | 10,413,713 | 13 | 6 | null | 2015-07-18T22:38:26 | 2013-05-31T21:53:37 | Python | UTF-8 | Python | false | false | 19,868 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Base types for nodes in a GRIT resource tree.
'''
import os
import sys
import types
from xml.sax import saxutils
from grit import exception
from grit import util
from grit import clique
import grit.format.interface
class Node(grit.format.interface.ItemFormatter):
'''An item in the tree that has children. Also implements the
ItemFormatter interface to allow formatting a node as a GRD document.'''
# Valid content types that can be returned by _ContentType()
_CONTENT_TYPE_NONE = 0 # No CDATA content but may have children
_CONTENT_TYPE_CDATA = 1 # Only CDATA, no children.
_CONTENT_TYPE_MIXED = 2 # CDATA and children, possibly intermingled
# Default nodes to not whitelist skipped
_whitelist_marked_as_skip = False
def __init__(self):
self.children = [] # A list of child elements
self.mixed_content = [] # A list of u'' and/or child elements (this
# duplicates 'children' but
# is needed to preserve markup-type content).
self.name = u'' # The name of this element
self.attrs = {} # The set of attributes (keys to values)
self.parent = None # Our parent unless we are the root element.
self.uberclique = None # Allows overriding uberclique for parts of tree
def __iter__(self):
'''An in-order iteration through the tree that this node is the
root of.'''
return self.inorder()
def inorder(self):
'''Generator that generates first this node, then the same generator for
any child nodes.'''
yield self
for child in self.children:
for iterchild in child.inorder():
yield iterchild
def GetRoot(self):
'''Returns the root Node in the tree this Node belongs to.'''
curr = self
while curr.parent:
curr = curr.parent
return curr
# TODO(joi) Use this (currently untested) optimization?:
#if hasattr(self, '_root'):
# return self._root
#curr = self
#while curr.parent and not hasattr(curr, '_root'):
# curr = curr.parent
#if curr.parent:
# self._root = curr._root
#else:
# self._root = curr
#return self._root
def StartParsing(self, name, parent):
'''Called at the start of parsing.
Args:
name: u'elementname'
parent: grit.node.base.Node or subclass or None
'''
assert isinstance(name, types.StringTypes)
assert not parent or isinstance(parent, Node)
self.name = name
self.parent = parent
def AddChild(self, child):
'''Adds a child to the list of children of this node, if it is a valid
child for the node.'''
assert isinstance(child, Node)
if (not self._IsValidChild(child) or
self._ContentType() == self._CONTENT_TYPE_CDATA):
explanation = 'invalid child %s for parent %s' % (str(child), self.name)
raise exception.UnexpectedChild(explanation)
self.children.append(child)
self.mixed_content.append(child)
def RemoveChild(self, child_id):
'''Removes the first node that has a "name" attribute which
matches "child_id" in the list of immediate children of
this node.
Args:
child_id: String identifying the child to be removed
'''
index = 0
# Safe not to copy since we only remove the first element found
for child in self.children:
name_attr = child.attrs['name']
if name_attr == child_id:
self.children.pop(index)
self.mixed_content.pop(index)
break
index += 1
def AppendContent(self, content):
'''Appends a chunk of text as content of this node.
Args:
content: u'hello'
Return:
None
'''
assert isinstance(content, types.StringTypes)
if self._ContentType() != self._CONTENT_TYPE_NONE:
self.mixed_content.append(content)
elif content.strip() != '':
raise exception.UnexpectedContent()
def HandleAttribute(self, attrib, value):
'''Informs the node of an attribute that was parsed out of the GRD file
for it.
Args:
attrib: 'name'
value: 'fooblat'
Return:
None
'''
assert isinstance(attrib, types.StringTypes)
assert isinstance(value, types.StringTypes)
if self._IsValidAttribute(attrib, value):
self.attrs[attrib] = value
else:
raise exception.UnexpectedAttribute(attrib)
def EndParsing(self):
'''Called at the end of parsing.'''
# TODO(joi) Rewrite this, it's extremely ugly!
if len(self.mixed_content):
if isinstance(self.mixed_content[0], types.StringTypes):
# Remove leading and trailing chunks of pure whitespace.
while (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes) and
self.mixed_content[0].strip() == ''):
self.mixed_content = self.mixed_content[1:]
# Strip leading and trailing whitespace from mixed content chunks
# at front and back.
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes)):
self.mixed_content[0] = self.mixed_content[0].lstrip()
# Remove leading and trailing ''' (used to demarcate whitespace)
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes)):
if self.mixed_content[0].startswith("'''"):
self.mixed_content[0] = self.mixed_content[0][3:]
if len(self.mixed_content):
if isinstance(self.mixed_content[-1], types.StringTypes):
# Same stuff all over again for the tail end.
while (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes) and
self.mixed_content[-1].strip() == ''):
self.mixed_content = self.mixed_content[:-1]
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes)):
self.mixed_content[-1] = self.mixed_content[-1].rstrip()
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes)):
if self.mixed_content[-1].endswith("'''"):
self.mixed_content[-1] = self.mixed_content[-1][:-3]
# Check that all mandatory attributes are there.
for node_mandatt in self.MandatoryAttributes():
mandatt_list = []
if node_mandatt.find('|') >= 0:
mandatt_list = node_mandatt.split('|')
else:
mandatt_list.append(node_mandatt)
mandatt_option_found = False
for mandatt in mandatt_list:
assert mandatt not in self.DefaultAttributes().keys()
if mandatt in self.attrs:
if not mandatt_option_found:
mandatt_option_found = True
else:
raise exception.MutuallyExclusiveMandatoryAttribute(mandatt)
if not mandatt_option_found:
raise exception.MissingMandatoryAttribute(mandatt)
# Add default attributes if not specified in input file.
for defattr in self.DefaultAttributes():
if not defattr in self.attrs:
self.attrs[defattr] = self.DefaultAttributes()[defattr]
def GetCdata(self):
'''Returns all CDATA of this element, concatenated into a single
string. Note that this ignores any elements embedded in CDATA.'''
return ''.join([c for c in self.mixed_content
if isinstance(c, types.StringTypes)])
def __unicode__(self):
'''Returns this node and all nodes below it as an XML document in a Unicode
string.'''
header = u'<?xml version="1.0" encoding="UTF-8"?>\n'
return header + self.FormatXml()
# Compliance with ItemFormatter interface.
def Format(self, item, lang_re = None):
return item.FormatXml()
def FormatXml(self, indent = u'', one_line = False):
'''Returns this node and all nodes below it as an XML
element in a Unicode string. This differs from __unicode__ in that it does
not include the <?xml> stuff at the top of the string. If one_line is true,
children and CDATA are layed out in a way that preserves internal
whitespace.
'''
assert isinstance(indent, types.StringTypes)
content_one_line = (one_line or
self._ContentType() == self._CONTENT_TYPE_MIXED)
inside_content = self.ContentsAsXml(indent, content_one_line)
# Then the attributes for this node.
attribs = u' '
for (attrib, value) in self.attrs.iteritems():
# Only print an attribute if it is other than the default value.
if (not self.DefaultAttributes().has_key(attrib) or
value != self.DefaultAttributes()[attrib]):
attribs += u'%s=%s ' % (attrib, saxutils.quoteattr(value))
attribs = attribs.rstrip() # if no attribs, we end up with '', otherwise
# we end up with a space-prefixed string
# Finally build the XML for our node and return it
if len(inside_content) > 0:
if one_line:
return u'<%s%s>%s</%s>' % (self.name, attribs, inside_content, self.name)
elif content_one_line:
return u'%s<%s%s>\n%s %s\n%s</%s>' % (
indent, self.name, attribs,
indent, inside_content,
indent, self.name)
else:
return u'%s<%s%s>\n%s\n%s</%s>' % (
indent, self.name, attribs,
inside_content,
indent, self.name)
else:
return u'%s<%s%s />' % (indent, self.name, attribs)
def ContentsAsXml(self, indent, one_line):
'''Returns the contents of this node (CDATA and child elements) in XML
format. If 'one_line' is true, the content will be laid out on one line.'''
assert isinstance(indent, types.StringTypes)
# Build the contents of the element.
inside_parts = []
last_item = None
for mixed_item in self.mixed_content:
if isinstance(mixed_item, Node):
inside_parts.append(mixed_item.FormatXml(indent + u' ', one_line))
if not one_line:
inside_parts.append(u'\n')
else:
message = mixed_item
# If this is the first item and it starts with whitespace, we add
# the ''' delimiter.
if not last_item and message.lstrip() != message:
message = u"'''" + message
inside_parts.append(util.EncodeCdata(message))
last_item = mixed_item
# If there are only child nodes and no cdata, there will be a spurious
# trailing \n
if len(inside_parts) and inside_parts[-1] == '\n':
inside_parts = inside_parts[:-1]
# If the last item is a string (not a node) and ends with whitespace,
# we need to add the ''' delimiter.
if (isinstance(last_item, types.StringTypes) and
last_item.rstrip() != last_item):
inside_parts[-1] = inside_parts[-1] + u"'''"
return u''.join(inside_parts)
def RunGatherers(self, recursive=0, debug=False, substitute_messages=False):
'''Runs all gatherers on this object, which may add to the data stored
by the object. If 'recursive' is true, will call RunGatherers() recursively
on all child nodes first. If 'debug' is True, will print out information
as it is running each nodes' gatherers.
'''
if recursive:
for child in self.children:
assert child.name != 'translations' # <grit> node overrides
child.RunGatherers(recursive=recursive, debug=debug)
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Called as a final step of RunGatherers.
Args:
substituter: a grit.util.Substituter object.
'''
for child in self.children:
child.SubstituteMessages(substituter)
def ItemFormatter(self, type):
'''Returns an instance of the item formatter for this object of the
specified type, or None if not supported.
Args:
type: 'rc-header'
Return:
(object RcHeaderItemFormatter)
'''
if type == 'xml':
return self
else:
return None
def SatisfiesOutputCondition(self):
'''Returns true if this node is either not a descendant of an <if> element,
or if all conditions on its <if> element ancestors are satisfied.
Used to determine whether to return item formatters for formats that
obey conditional output of resources (e.g. the RC formatters).
'''
from grit.node import misc
if self.parent:
return self.parent.SatisfiesOutputCondition()
else:
return True
def _IsValidChild(self, child):
'''Returns true if 'child' is a valid child of this node.
Overridden by subclasses.'''
return False
def _IsValidAttribute(self, name, value):
'''Returns true if 'name' is the name of a valid attribute of this element
and 'value' is a valid value for that attribute. Overriden by
subclasses unless they have only mandatory attributes.'''
return (name in self.MandatoryAttributes() or
name in self.DefaultAttributes())
def _ContentType(self):
'''Returns the type of content this element can have. Overridden by
subclasses. The content type can be one of the _CONTENT_TYPE_XXX constants
above.'''
return self._CONTENT_TYPE_NONE
def MandatoryAttributes(self):
'''Returns a list of attribute names that are mandatory (non-optional)
on the current element. One can specify a list of
"mutually exclusive mandatory" attributes by specifying them as one
element in the list, separated by a "|" character.
'''
return []
def DefaultAttributes(self):
'''Returns a dictionary of attribute names that have defaults, mapped to
the default value. Overridden by subclasses.'''
return {}
def GetCliques(self):
'''Returns all MessageClique objects belonging to this node. Overridden
by subclasses.
Return:
[clique1, clique2] or []
'''
return []
def ToRealPath(self, path_from_basedir):
'''Returns a real path (which can be absolute or relative to the current
working directory), given a path that is relative to the base directory
set for the GRIT input file.
Args:
path_from_basedir: '..'
Return:
'resource'
'''
return util.normpath(os.path.join(self.GetRoot().GetBaseDir(),
os.path.expandvars(path_from_basedir)))
def FilenameToOpen(self):
'''Returns a path, either absolute or relative to the current working
directory, that points to the file the node refers to. This is only valid
for nodes that have a 'file' or 'path' attribute. Note that the attribute
is a path to the file relative to the 'base-dir' of the .grd file, whereas
this function returns a path that can be used to open the file.'''
file_attribute = 'file'
if not file_attribute in self.attrs:
file_attribute = 'path'
return self.ToRealPath(self.attrs[file_attribute])
def UberClique(self):
'''Returns the uberclique that should be used for messages originating in
a given node. If the node itself has its uberclique set, that is what we
use, otherwise we search upwards until we find one. If we do not find one
even at the root node, we set the root node's uberclique to a new
uberclique instance.
'''
node = self
while not node.uberclique and node.parent:
node = node.parent
if not node.uberclique:
node.uberclique = clique.UberClique()
return node.uberclique
def IsTranslateable(self):
'''Returns false if the node has contents that should not be translated,
otherwise returns false (even if the node has no contents).
'''
if not 'translateable' in self.attrs:
return True
else:
return self.attrs['translateable'] == 'true'
def GetNodeById(self, id):
'''Returns the node in the subtree parented by this node that has a 'name'
attribute matching 'id'. Returns None if no such node is found.
'''
for node in self:
if 'name' in node.attrs and node.attrs['name'] == id:
return node
return None
def GetChildrenOfType(self, type):
'''Returns a list of all subnodes (recursing to all leaves) of this node
that are of the indicated type.
Args:
type: A type you could use with isinstance().
Return:
A list, possibly empty.
'''
return [child for child in self if isinstance(child, type)]
def GetTextualIds(self):
'''Returns the textual ids of this node, if it has some.
Otherwise it just returns None.
'''
if 'name' in self.attrs:
return [self.attrs['name']]
return None
def EvaluateCondition(self, expr):
'''Returns true if and only if the Python expression 'expr' evaluates
to true.
The expression is given a few local variables:
- 'lang' is the language currently being output
- 'defs' is a map of C preprocessor-style define names to their values
- 'os' is the current platform (likely 'linux2', 'win32' or 'darwin').
- 'pp_ifdef(define)' which behaves just like the C preprocessors #ifdef,
i.e. it is shorthand for "define in defs"
- 'pp_if(define)' which behaves just like the C preprocessor's #if, i.e.
it is shorthand for "define in defs and defs[define]".
'''
root = self.GetRoot()
lang = ''
defs = {}
def pp_ifdef(define):
return define in defs
def pp_if(define):
return define in defs and defs[define]
if hasattr(root, 'output_language'):
lang = root.output_language
if hasattr(root, 'defines'):
defs = root.defines
variable_map = {
'lang' : lang,
'defs' : defs,
'os': sys.platform,
'is_linux': sys.platform.startswith('linux'),
'is_macosx': sys.platform == 'darwin',
'is_win': sys.platform in ('cygwin', 'win32'),
'is_posix': (sys.platform in ('darwin', 'linux2', 'linux3', 'sunos5')
or sys.platform.find('bsd') != -1),
'pp_ifdef' : pp_ifdef,
'pp_if' : pp_if,
}
return eval(expr, {}, variable_map)
def OnlyTheseTranslations(self, languages):
'''Turns off loading of translations for languages not in the provided list.
Attrs:
languages: ['fr', 'zh_cn']
'''
for node in self:
if (hasattr(node, 'IsTranslation') and
node.IsTranslation() and
node.GetLang() not in languages):
node.DisableLoading()
def PseudoIsAllowed(self):
'''Returns true if this node is allowed to use pseudo-translations. This
is true by default, unless this node is within a <release> node that has
the allow_pseudo attribute set to false.
'''
p = self.parent
while p:
if 'allow_pseudo' in p.attrs:
return (p.attrs['allow_pseudo'].lower() == 'true')
p = p.parent
return True
def ShouldFallbackToEnglish(self):
'''Returns true iff this node should fall back to English when
pseudotranslations are disabled and no translation is available for a
given message.
'''
p = self.parent
while p:
if 'fallback_to_english' in p.attrs:
return (p.attrs['fallback_to_english'].lower() == 'true')
p = p.parent
return False
def WhitelistMarkedAsSkip(self):
'''Returns true if the node is marked to be skipped in the output by a
whitelist.
'''
return self._whitelist_marked_as_skip
def SetWhitelistMarkedAsSkip(self, mark_skipped):
'''Sets WhitelistMarkedAsSkip.
'''
self._whitelist_marked_as_skip = mark_skipped
def ExpandVariables(self):
'''Whether we need to expand variables on a given node.'''
return False
class ContentNode(Node):
'''Convenience baseclass for nodes that can have content.'''
def _ContentType(self):
return self._CONTENT_TYPE_MIXED
| [
"misha.cn.ua@gmail.com"
] | misha.cn.ua@gmail.com |
791eb38cc58c33681d0a94055779d53287ea54ce | 68fb568c78dbcd4e73c2b697ab463e02fdde7960 | /_scripts/utils_databasePopulate.py | cf0c8c512c0b35083f405a6f9bafae21830b87a2 | [] | no_license | StBogdan/LightningExplorer | 114400dfb813ee039b1b73d7d3f92817b982c79b | 4ef5e5e1c633bd1ba18a5b37e3e9d1db79f1d503 | refs/heads/master | 2020-03-20T06:06:06.264959 | 2019-04-21T14:52:07 | 2019-04-21T14:52:07 | 137,238,511 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,013 | py | import os
import django
import sys
import _scripts.utils_config as config
from datetime import datetime
from nodes.models import *
"""
What: Take data from files, convert it, put it in database
Why: Automate data loading
"""
# Django setup (run in the virtual environment)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lightningExplorer.settings")
django.setup()
def get_data_files(full_data_path, one_per_day=0):
"""
Gets the data files from the given string full path
For testing, a one-datapoint-per-day flag can be set
Returns: list of fpath string to files with data
"""
files = []
for day_dir in os.listdir(full_data_path):
# print("In folder " + full_data_path+ os.sep + day_dir )
day_dir_fpath = os.path.join(full_data_path, day_dir)
if not os.path.isdir(day_dir_fpath):
continue # Only look at directories
if one_per_day: # Then only look at the first 2 files (hopefully a .graph and .netinfo pair)
day_data = [x for x in os.listdir(day_dir_fpath) if
x.endswith(".netinfo") or x.endswith(".graph")][0:2]
files += [day_dir_fpath + os.sep + x for x in day_data if x.endswith(".graph")]
else:
day_data = os.listdir(day_dir_fpath)
files += [day_dir_fpath + os.sep + x for x in day_data if x.endswith(".graph")]
return files
def get_node_capacity(nodeID, channel_dict):
capacity = 0
channels = 0
for edge in channel_dict:
if edge["node1_pub"] == nodeID or edge["node2_pub"] == nodeID:
capacity += int(edge["capacity"])
channels += 1
return [channels, capacity]
def get_net_data(file_fpath):
file_name = file_fpath.split(os.sep)[-1]
try:
date = datetime.strptime(file_name.split(".")[0], "%Y-%m-%d-%H-%M-%S")
except Exception as e:
date = datetime.strptime(file_name.split(".")[0], "%Y-%m-%d-%H:%M:%S")
netData = json.loads(open(file_fpath).read())
return [date, netData["nodes"], netData["edges"]]
def createNodeEntries(nodes_info, node_date, nodes_chans, nodes_capacity, network_origin):
new_nodes = []
new_nodes_dict = {}
new_addresses = []
index = 0
for node_info in nodes_info:
nodeObj = Node(date_logged=node_date,
network=network_origin,
last_update=node_info["last_update"],
pub_key=node_info["pub_key"],
alias=node_info["alias"],
color=node_info["color"],
channels=nodes_chans[index],
capacity=nodes_capacity[index])
new_nodes.append(nodeObj)
new_nodes_dict[node_info["pub_key"]] = nodeObj;
index += 1
# Saves the enties, making nodes_dict usable for edge creation
new_node_entries = Node.objects.bulk_create(new_nodes)
index = 0
for node_info in nodes_info:
if (new_node_entries[index].pub_key != node_info["pub_key"]):
raise Exception("Node identity mismatch")
for adAdr in node_info["addresses"]:
new_addresses.append(Address(date_logged=node_date,
node=new_node_entries[index],
addr=adAdr["addr"],
network=adAdr["network"]))
index += 1
new_addresses_entries = Address.objects.bulk_create(new_addresses)
return new_nodes_dict, new_addresses
def createChanEntries(edges_info, edge_date, nodes_entries, network_origin):
# print(edge_info)
# print("Got friends" + str(nodes_entries[edge_info["node1_pub"]][0]) + " ------AND-----" + str(nodes_entries[edge_info["node2_pub"]][0]))
new_chans = []
new_entries_policies = []
for edge_info in edges_info:
new_chans.append(Channel(date_logged=edge_date,
chan_id=edge_info["channel_id"],
last_update=edge_info["last_update"],
node1_pub=nodes_entries[edge_info["node1_pub"]],
# As first elem is node, others are the addresses
node2_pub=nodes_entries[edge_info["node2_pub"]],
capacity=edge_info["capacity"],
chan_point=edge_info["chan_point"],
network=network_origin))
new_chan_entries = Channel.objects.bulk_create(new_chans)
index = 0
for edge_info in edges_info:
if new_chans[index].chan_id != edge_info["channel_id"]:
raise Exception("Channel identity mismatch")
if edge_info["node1_policy"] != None:
new_entries_policies.append(Node_Policy(date_logged=edge_date, network=network_origin,
channel=new_chans[index],
node=nodes_entries[edge_info["node1_pub"]],
time_lock_delta=edge_info["node1_policy"]["time_lock_delta"],
min_htlc=edge_info["node1_policy"]["min_htlc"],
fee_base_msat=edge_info["node1_policy"]["fee_base_msat"],
fee_rate_milli_msat=edge_info["node1_policy"][
"fee_rate_milli_msat"]))
if (int(edge_info["node1_policy"]["time_lock_delta"]) > 2147483647 or int(
edge_info["node1_policy"]["min_htlc"]) > 2147483647):
print(edge_info["node1_policy"])
# print("\n\n\n\n")
# if("node2_policy" in edge_info):
if edge_info["node2_policy"] != None:
new_entries_policies.append(Node_Policy(date_logged=edge_date,
node=nodes_entries[edge_info["node2_pub"]],
channel=new_chans[index],
time_lock_delta=edge_info["node2_policy"]["time_lock_delta"],
min_htlc=edge_info["node2_policy"]["min_htlc"],
fee_base_msat=edge_info["node2_policy"]["fee_base_msat"],
fee_rate_milli_msat=edge_info["node2_policy"][
"fee_rate_milli_msat"]))
if (int(edge_info["node2_policy"]["time_lock_delta"]) > 2147483647 or int(
edge_info["node2_policy"]["min_htlc"]) > 2147483647):
print(edge_info["node2_policy"])
index += 1
new_entries_policies = Node_Policy.objects.bulk_create(new_entries_policies)
return new_chan_entries, new_entries_policies
def createDBentries(full_data_path, network, hourly=False):
nodes_entries = {}
edges_entries = []
policy_entries = []
data_folders = get_data_files(full_data_path) # One per day
index = 0
print(f"[DB Populate][{network}] Have to process: {len(data_folders)} folders, hourly: {hourly}")
current_hour = -1
current_day = -1
for file in sorted(data_folders):
index += 1
try:
if (hourly): # Only go through this is hourly flag is set
summaryTime = datetime.strptime(file.split(os.sep)[-1].split(".")[0], "%Y-%m-%d-%H-%M-%S")
if len(Node.objects.filter(date_logged=summaryTime)) > 0:
print("[Data Update][" + network + "] Date already in database\t" + str(summaryTime))
continue
if current_hour != summaryTime.hour or current_day != summaryTime.day:
print("[DB Populate][" + network + "][Hourly process] Process Hour: " + str(
summaryTime.hour) + " Day: " + str(summaryTime.day) + "compare to Hour:" + str(
current_hour) + " on Day:" + str(current_day))
current_hour = summaryTime.hour
current_day = summaryTime.day
else:
print("[DB Populate][" + network + "][Hourly process] Continue Hour:" + str(
summaryTime.hour) + " on Day:" + str(summaryTime.day) + "compare to last seen " + str(
current_hour) + " on " + str(current_day))
continue
date, nodes, chans = get_net_data(file)
# print(f"Got file: {file}\t with {len(nodes)} nodes\t{len(chans)} channels")
node_extra_info = [get_node_capacity(node["pub_key"], chans) for node in nodes]
nodes_entries, address_entries = createNodeEntries(nodes, date, [x for [x, y] in node_extra_info],
[y for [x, y] in node_extra_info], network)
# for node in nodes:
# node_chans,node_capacity = get_node_capacity(node["pub_key"],chans)
# nodes_entries[node["pub_key"]] =createNodeEntry(node,date,node_chans,node_capacity) #May be a list
edges_entries, policies = createChanEntries(chans, date, nodes_entries, network)
print("[DB Populate][" + network + "][ " + str(index) + "/" + str(
len(data_folders)) + " ]\t" + "Created entries for " + str(len(nodes_entries)) + " nodes and " + str(
len(edges_entries)) + " channels " + " date:" + date.strftime("%Y-%m-%d %H:%M:%S"))
except Exception as e:
print("[DB Populate][" + network + "][ " + str(index) + "/" + str(
len(data_folders)) + " ]\t" + "ERROR ON FILE: " + file + "\t" + str(e))
if ("out of range" in str(e)):
raise e
def clear_db():
print("[DB Populate] Removing all data")
print(Node.objects.all().delete())
print(Channel.objects.all().delete())
if __name__ == "__main__":
"""
Run on command line, takes info from data folder (config-given), puts it in Django-accessible db
arg1 network:
mainnet or testnet
or unsafe_reset_db (clears the local db)
arg2 data frequency:
alldata for getting all data (otherwise hourly)
"""
# Get env settings
site_config = config.get_site_config()
data_location = site_config["lndmon_data_location"]
data_location_mainnet = site_config["lndmon_data_location_mainnet"]
if len(sys.argv) > 1:
if len(sys.argv) > 2:
hourly = (sys.argv[2] != "alldata")
else:
hourly = True
print("[DB Populate] Hourly interval:\t" + str(hourly))
if sys.argv[1] == "mainnet":
print("[DB Populate] Adding mainnet data")
createDBentries(data_location_mainnet, "mainnet", hourly)
elif sys.argv[1] == "testnet":
print("[DB Populate] Adding testnet data")
createDBentries(data_location, "testnet", hourly)
elif sys.argv[1] == "unsafe_reset_db":
clear_db()
else:
print("[DB Populate] Unrecognised first parameter, please use one of mainnet|testnet|unsafe_reset_db")
else:
print("[DB Populate] Adding all network data (both networks)")
if input("Want to rebuild the database? (LOSE ALL CURRENT DATA) [y/n] ") == "y":
clear_db()
hourly_setting = input("Add all times?(default is hourly) [y/n]\t") != "y"
if input("Add new entries? [y/n] ") == "y":
createDBentries(data_location, "testnet", hourly_setting)
createDBentries(data_location_mainnet, "mainnet", hourly_setting)
'''
#For use in django shell
pathScript ="/path/to/DataBasePopulate.py"
exec(open(pathScript).read())
scriptName = "DataBasePopulate.py"
exec(open(scriptName).read()) #Yes, I know
''' | [
"bogdan.stoicescu95@gmail.com"
] | bogdan.stoicescu95@gmail.com |
621a9809e8f9a0c711fccec07ffb4f43131cc423 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/c19541276c8f809733d4587fdcc04a8c7add54b3-<draw_text>-bug.py | 4b03e81403d0a80992ec3ff20502160971ac0508 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py |
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
s = common_texification(s)
prop_cmds = _font_properties_str(prop)
s = ('%s %s' % (prop_cmds, s))
writeln(self.fh, '\\begin{pgfscope}')
alpha = gc.get_alpha()
if (alpha != 1.0):
writeln(self.fh, ('\\pgfsetfillopacity{%f}' % alpha))
writeln(self.fh, ('\\pgfsetstrokeopacity{%f}' % alpha))
rgb = tuple(gc.get_rgb())[:3]
if (rgb != (0, 0, 0)):
writeln(self.fh, ('\\definecolor{textcolor}{rgb}{%f,%f,%f}' % rgb))
writeln(self.fh, '\\pgfsetstrokecolor{textcolor}')
writeln(self.fh, '\\pgfsetfillcolor{textcolor}')
s = ('\\color{textcolor}' + s)
f = (1.0 / self.figure.dpi)
text_args = []
if (mtext and (((angle == 0) or (mtext.get_rotation_mode() == 'anchor')) and (mtext.get_va() != 'center_baseline'))):
(x, y) = mtext.get_transform().transform_point(mtext.get_position())
text_args.append(('x=%fin' % (x * f)))
text_args.append(('y=%fin' % (y * f)))
halign = {
'left': 'left',
'right': 'right',
'center': '',
}
valign = {
'top': 'top',
'bottom': 'bottom',
'baseline': 'base',
'center': '',
}
text_args.append(halign[mtext.get_ha()])
text_args.append(valign[mtext.get_va()])
else:
text_args.append(('x=%fin' % (x * f)))
text_args.append(('y=%fin' % (y * f)))
text_args.append('left')
text_args.append('base')
if (angle != 0):
text_args.append(('rotate=%f' % angle))
writeln(self.fh, ('\\pgftext[%s]{%s}' % (','.join(text_args), s)))
writeln(self.fh, '\\end{pgfscope}')
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
913058c844e36dd690c3377ac8a3221f0b13c84d | 104070a8555d0f2cddf67c1db9d7943aa1756893 | /python/InvestmentUSD.py | 36dbb4d81ccf79e1cc4d32f55083552426afd1f5 | [] | no_license | pangeon/SE-Wallet | 2cbf0c6972a21e15e080ddcaecca86f05a2fb5a8 | a6f8a3b0343b00e75fea5275cc0d70680112af85 | refs/heads/main | 2023-06-23T20:41:48.547211 | 2021-07-15T14:57:57 | 2021-07-15T14:57:57 | 376,570,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from decimal import Decimal
from python.Investment import Investment
class InvestmentUSD(Investment):
def __init__(self, name, amount, buy_price, actual_price, usd_price):
super().__init__(name, amount, buy_price, actual_price)
self._usd_price = Decimal(usd_price)
def __str__(self) -> str:
return super().__str__() + ", USD price = {}".format(round(self._usd_price, 2))
@property
def usd_price(self):
return self._usd_price
@usd_price.setter
def usd_price(self, value):
self._usd_price = value
| [
"kamil.cecherz@gmail.com"
] | kamil.cecherz@gmail.com |
6eaa337389048bb068373f464bf2fa25030f8efd | 680aa5d54a34a9a31d9ffca5b9357440e6ebc909 | /hsc/pluginnet/sun397/testing.py | e46a5cb710b8a25572113d0ba358d72f8a5e5062 | [
"MIT"
] | permissive | lemmersj/ground-truth-or-daer | 356ff4b5b5b3374994ca34688d72cf38f3f49c01 | e4e7ba43123bb97ab1fa0242093b56a15c7ba54b | refs/heads/main | 2023-08-02T18:28:41.292183 | 2021-10-06T21:27:40 | 2021-10-06T21:27:40 | 325,838,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,116 | py | from pluginnet.sun397.sun397 import sun397_data_transform_val, CrossEntropyLossOneHot
from pluginnet.sun397.metrics import create_softmax_metrics_dictionary
from pluginnet.sun397.partial_evidence import build_plugins, AlexNetPartialEvidence, SUN397PE
from pluginnet.sun397.sun397 import SUN397
import torchvision
import torch
import torchvision.models as models
import pandas as pd
def create_mode_pe(conf):
data_set, net, metrics_dict, aggregator = _create__alexnet_cross_entropy_model_pe(conf)
return data_set, net, metrics_dict, aggregator
def create_mode_base(conf):
data_set, net, metrics_dict, aggregator = _create__alexnet_cross_entropy_model_base(conf)
return data_set, net, metrics_dict, aggregator
def _create__alexnet_cross_entropy_model_base(conf):
def target_transform(x):
return x[-397:]
if conf['split'] == 'train':
conf['split_file'] = conf['split_file_train']
else:
conf['split_file'] = conf['split_file_test']
data_set = get_dataset_base(conf['dataset_root'], conf['split_file'], conf['split'],
conf['hierarchy_file'], use_fraction=conf.get('train_set_size'),
target_transform=target_transform, seed=conf.get('seed'))
net = models.__dict__['alexnet'](num_classes=397)
state_dict = torch.load(conf['base_model_file'])
net.load_state_dict(state_dict)
criterion = CrossEntropyLossOneHot()
aggregator = PredictionAggregatorSUN397(data_set.samples)
return data_set, net, create_softmax_metrics_dictionary(criterion), aggregator
def _create__alexnet_cross_entropy_model_pe(conf):
def target_transform(x): return x[-397:]
if conf['split'] == 'train':
conf['split_file'] = conf['split_file_train']
else:
conf['split_file'] = conf['split_file_test']
data_set = get_dataset(conf['dataset_root'], conf['split_file'], conf['split'],
conf['hierarchy_file'], use_fraction=conf.get('train_set_size'),
target_transform=target_transform, seed=conf.get('seed'))
base_net = models.__dict__['alexnet'](num_classes=397)
plugins = build_plugins(conf['plugins'])
net = AlexNetPartialEvidence(base_net, plugins)
state_dict = torch.load(conf['base_model_file'])
net.load_state_dict(state_dict)
criterion = CrossEntropyLossOneHot()
aggregator = PredictionAggregatorSUN397(data_set.samples)
return data_set, net, create_softmax_metrics_dictionary(criterion), aggregator
def get_dataset(dataset_root, split_file, split, hierarchy_file, data_transform=sun397_data_transform_val,
use_fraction=0, target_transform=None, seed=0):
if seed is None:
seed = 0
loader_f = torchvision.datasets.folder.pil_loader
if split == 'train':
data_set = SUN397PE(dataset_root, split_file, hierarchy_file, split='train',
validation_size=0, transform=data_transform,
target_transform=target_transform, loader=loader_f,
use_fraction=use_fraction, random_seed=seed)
else:
data_set = SUN397PE(dataset_root, split_file, hierarchy_file, split=split,
validation_size=10, transform=data_transform,
target_transform=target_transform)
return data_set
def get_dataset_base(dataset_root, split_file, split, hierarchy_file, data_transform=sun397_data_transform_val,
use_fraction=0, target_transform=None, seed=0):
if seed is None:
seed = 0
loader_f = torchvision.datasets.folder.pil_loader
if split == 'train':
data_set = SUN397(dataset_root, split_file, hierarchy_file, split='train',
validation_size=0, transform=data_transform,
target_transform=target_transform, loader=loader_f,
use_fraction=use_fraction, random_seed=seed)
else:
data_set = SUN397(dataset_root, split_file, hierarchy_file, split=split,
validation_size=10, transform=data_transform,
target_transform=target_transform)
return data_set
class PredictionAggregatorSUN397(object):
def __init__(self, files=None):
self.predictions = []
self.ground_truth = []
self.files = files
def __call__(self, engine):
self.add_result_(engine)
def add_result_(self, engine):
out = engine.state.output
self.predictions.extend(out[0].detach().cpu().numpy())
self.ground_truth.extend(out[1].detach().cpu().numpy())
def save_results(self, file_name):
predictions = pd.DataFrame(self.predictions)
gt = pd.DataFrame(self.ground_truth)
if self.files is not None:
predictions = predictions.set_index(pd.Index(self.files))
gt = gt.set_index(pd.Index(self.files))
results = pd.concat([predictions, gt], axis=1, keys=['predictions', 'ground_truth'])
results.to_hdf(file_name, key='results', mode='w')
| [
"lemmersj@umich.edu"
] | lemmersj@umich.edu |
c2c4feec8873df24d9d55a2b2b043ad737963ac0 | c4cd01d4dff72486361c76d36e62bd9ca86c47b9 | /kiet/btech/b_first/en/.EN2ndsem_1st.py | a2369455bfc48c8293bf742bece698dd53099909 | [] | no_license | anuj72/result_analysis | 50e92606410ba4cc8719f7955e4ff02160d2be5d | bfcefab9ea5d03772758b9665d63338e784fe327 | refs/heads/master | 2020-11-27T09:53:38.997512 | 2018-02-14T09:25:33 | 2018-02-14T09:25:33 | 229,387,012 | 1 | 0 | null | 2019-12-21T06:27:10 | 2019-12-21T06:27:09 | null | UTF-8 | Python | false | false | 2,398 | py | from bs4 import BeautifulSoup
import os
import glob
import sys
from xlrd import open_workbook
from xlwt import Workbook
import xlsxwriter
workbook = xlsxwriter.Workbook('EN_2nd_sem_1st.xlsx') #NAME OF GENERATED FILE
worksheet = workbook.add_worksheet()
row = 1
for filename in glob.glob('*.html'):
soup = BeautifulSoup(open(filename),'html.parser')
n=0
c=0
for b in soup.table():
if(str(b.get('id'))!="None"):
n=n+1
x=str(b.get('id'))
for b in soup.table():
if(str(b.get('id'))!="None"):
c=c+1
if(c==n-1):
x=str(b.get('id'))
id_selector=x[3:5]
print(id_selector)
rollnumber = str(soup.find(id='lblRollNo').text)
name = str(soup.find(id='lblFullName').text)
fathername = str(soup.find(id='lblFatherName').text)
marks = str(soup.find(id='ctl'+id_selector+'_ctl01_lblSemesterTotalMarksObtained').text)
cp = str(soup.find(id='ctl'+id_selector+'_ctl01_lblResultStatus').text)
cop = str(soup.find(id='ctl'+id_selector+'_lblCOP').text)
i=soup.find(id='ctl'+id_selector+'_ctl01_ctl00_grdViewSubjectMarksheet')
print(rollnumber+" \n"+name+" \n"+fathername+"\n"+marks+"\n"+cp+"\n"+cop)
subjects=["RollNumber","Name","FatherName","RAS201_I","RAS201_E",'RAS251_I','RAS251_E','REE201_I','REE201_E','REE251_I','REE251_E','RAS203_I','RAS203_E','RAS254_I','RAS254_E','RAS204_I','RAS204_E','RME252_I','RME252_E','REC201_I','REC201_E',"Total","CP","Result Status"]
for heading in range(len(subjects)): # subjects name
worksheet.write(0,heading,subjects[heading])
subject_code = str("\nRAS251\n") # first subject code
t=1
flag = 0
for j in i.findAll('td'):
if (t == 8):
code = str(j.text)
#print list(code)
#print list(subject_code)
if (code == subject_code):
flag= 1
#print (code)
break;
t += 1
if(flag == 1 ):
t = 1
l = []
for j in i.findAll('td'):
#print(j.text)
if (t % 7 == 4 or t % 7 == 5) : # subject code
l.append(str(j.text))
#print(j.text)
t += 1
try:
worksheet.write(row,0,rollnumber) # subject table cell number
worksheet.write(row,1,name)
worksheet.write(row,2,fathername)
for col in range(18):
worksheet.write(row,3+col,l[col])
#print l[col]
#print col
worksheet.write(row,21,marks)
worksheet.write(row,22,cop)
worksheet.write(row,23,cp)
print("check")
except:
print("check1")
row+=1
workbook.close()
| [
"root@college.edu"
] | root@college.edu |
bf604f8d8770be82c55c3259acfbd9957a6deac9 | 1cfbe55b6f602bc8321e2294bfe73d64890326ae | /votes/migrations/0002_auto_20190123_1819.py | 5bff6e96f2b70d54d372f3fb66c73b5ddfd6fd26 | [] | no_license | BlacKnight23/midexamapigo | 60481cf2c6d4e795cd4438e535cf158d8f97fc26 | 02bfff0adb961045715cc67568e8f4a445545b30 | refs/heads/master | 2020-04-19T17:33:18.041493 | 2019-01-23T18:35:02 | 2019-01-23T18:35:02 | 168,338,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # Generated by Django 2.1 on 2019-01-23 18:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('votes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='birthdate',
field=models.DateField(),
),
]
| [
"amrapigo@addu.edu.ph"
] | amrapigo@addu.edu.ph |
3b96e5ce191ac951020d3af07454faec70bbb18a | 6879a8596df6f302c63966a2d27f6b4d11cc9b29 | /abc/problems030/021/b.py | 8a85c8e452bae3d0deb92785c327dd6922669e59 | [] | no_license | wkwkgg/atcoder | 41b1e02b88bf7a8291b709306e54cb56cb93e52a | 28a7d4084a4100236510c05a88e50aa0403ac7cd | refs/heads/master | 2020-07-26T03:47:19.460049 | 2020-03-01T18:29:57 | 2020-03-01T18:29:57 | 208,523,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | N = int(input())
A, B = map(int, input().split())
K = int(input())
P = list(map(int, input().split()))
print("YES" if len(P) + 2 == len(set(P + [A, B])) else "NO")
| [
"yujin@komachi.live"
] | yujin@komachi.live |
98d504f1aa5a2b0be4576b543986a18bd545d895 | 161be705c298acc6a18c0d2b391d3a75a13c99be | /fonksiyonlar2-4.soru.py | 1a9db4ea3e921c588118e24bfb6b788125c9c0c0 | [] | no_license | esrabozkurt/programlama | ba274652727eca1d7b6f8bc31b114dcaa7f610d6 | eaafc0d05bb64151aae047e08ca17d163e4cbab6 | refs/heads/master | 2021-04-26T22:25:11.944276 | 2018-04-27T12:34:46 | 2018-04-27T12:34:46 | 124,088,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py |
def donemBasi(koltuk,yatak,dolap):
stok=koltuk+yatak+dolap
global donemBasi
return stok
def donemSonu (satilanKoltuk=25,satilanYatak=20,satilanDolap=10,alinanKoltuk=10,alinanYatak=15,alinanDolap=5):
stokSon=(satilanKoltuk+satilanYatak+satilanDolap)-(alinanKoltuk+alinanYatak+alinanDolap)
global donemSonu
return stokSon
def ortalama (donemBasiStok,donemSonuStok,donem=2):
ortalamaStok=(donemBasiStok+donemSonuStok)/donem
global ortalama
return ortalamaStok
a=int(input("Dönem Başı Koltuk Sayısını Giriniz:"))
b=int(input("Dönem Başı Yatak Sayısını Giriniz:"))
c=int(input("Dönem Başı Dolap Sayısını Giriniz:"))
x=donemBasi(a,b,c)
y=x-(donemSonu (satilanKoltuk=25,satilanYatak=20,satilanDolap=10,alinanKoltuk=10,alinanYatak=15,alinanDolap=5))
z=ortalama(x,y)
print("Dönem Başı Stok Durumunuz",x)
print("Dönem İçi Satılan Koltuk Sayısı=25")
print("Dönem İçi Satılan Yatak Sayısı=20")
print("Dönem İçi Satılan Dolap Sayısı=10")
print("Dönem İçi Alınan Koltuk Sayısı=10")
print("Dönem İçi Alınan Yatak Sayısı=15")
print("Dönem İçi Alınan Dolap Sayısı=5")
print("Dönem Sonu Stok Durumunuz",y)
print("Yıllık Ortalama Stok Durumunuz=",z)
| [
"noreply@github.com"
] | esrabozkurt.noreply@github.com |
c4cdd27e818ae51036e0d3597520bdda05745122 | 4dd75662f7315e8b0e2e28c76dce7654c1b03c4c | /kaguya/lib/models/quad.py | 0d53dd61216d1334bebe3422f2deb1e9dbf4c861 | [] | no_license | quagzlor/Kaguya-Seeker | 2dec0d878a6f454040a57b2bb255d8fde8f7a0be | ab6b8a0cbb9dd394a6935afb3193a74f23103ea6 | refs/heads/main | 2023-05-01T11:31:20.091661 | 2021-05-23T07:13:21 | 2021-05-23T07:13:21 | 332,379,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from lib.db import db
class Quad(db.Model):
__tablename__ ="quad"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False)
def __init__(self, name):
self.name = name
| [
"noreply@github.com"
] | quagzlor.noreply@github.com |
891c04f0e9ac631bf85675e4d0782506fb41e0c9 | c9643f2e644875d3781412bb9a9f84274bdf55d5 | /lib/quaternion.py | 7cb9ed5510324b4278be5d676982abb1cc46af02 | [] | no_license | Humhu/python-lib | b507ae7dd321b624f17c651e9a8975f7d707ac7a | 96fe2186ade753835ad99330324c565b3a298b3e | refs/heads/master | 2020-04-02T12:28:20.600797 | 2012-10-24T01:57:39 | 2012-10-24T01:57:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | from math import *
def quatGenerate(angle, axis):
a_2 = angle/2.0;
w = cos(a_2);
sina_2 = sin(a_2);
x = axis[0]*sina_2;
y = axis[1]*sina_2;
z = axis[2]*sina_2;
return (w, x, y, z)
def eulerToQuaternion(yaw, pitch, roll):
psi_2 = yaw/2.0
theta_2 = pitch/2.0
phi_2 = roll/2.0
w = cos(phi_2)*cos(theta_2)*cos(psi_2) + \
sin(phi_2)*sin(theta_2)*sin(psi_2)
x = sin(phi_2)*cos(theta_2)*cos(psi_2) - \
cos(phi_2)*sin(theta_2)*sin(psi_2)
y = cos(phi_2)*sin(theta_2)*cos(psi_2) + \
sin(phi_2)*cos(theta_2)*sin(psi_2)
z = cos(phi_2)*cos(theta_2)*sin(psi_2) - \
sin(phi_2)*sin(theta_2)*cos(psi_2)
return (w, x, y, z)
def eulerToQuaternionDeg(yaw, pitch, roll):
return eulerToQuaternion(radians(yaw), radians(pitch), radians(roll))
POLE_LIMIT = 0.499
def quaternionToEuler(q):
w = q[0];
x = q[1];
y = q[2];
z = q[3];
temp1 = w*y - z*x;
if temp1 > POLE_LIMIT:
psi = 2*atan2(w, x)
theta = -pi/2.0;
phi = 0.0;
elif temp1 < - POLE_LIMIT:
psi = -2*atan2(w, x)
theta = pi/2.0;
phi = 0.0;
else:
theta = asin(2.0*temp1)
phi = atan2(2.0*(w*x + y*z), 1.0 - 2.0*(x*x + y*y))
psi = atan2(2.0*(w*z + x*y), 1.0 - 2.0*(y*y + z*z))
return (psi, theta, phi)
| [
"humphrey.hu@gmail.com"
] | humphrey.hu@gmail.com |
82449f43a77d7008703082bf0d83768860297c65 | bd48e8af13abb5a8574b47ea3337e64a45e8f672 | /nanum/search/apis.py | c9621326855333e4e5c41e1bd2a515cdc0b21840 | [] | no_license | markui/nanum-project | d221cacfaed9d6e2e882f3d4f29dc77055a4e97b | 399064b62a7c8049b37efd77a98f17a903754070 | refs/heads/master | 2021-09-08T08:03:30.667750 | 2018-01-09T07:06:11 | 2018-01-09T07:06:11 | 110,780,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | from rest_framework import generics, permissions
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.views import APIView
from topics.models import Topic
from topics.serializers import TopicSerializer
from . import search
class TopicSearchAPIView(generics.RetrieveAPIView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
authentication_classes = (
permissions.IsAuthenticated,
)
def retrieve(self, request, *args, **kwargs):
query_params = self.request.query_params
topic_name = query_params.get("name", None)
if not topic_name:
raise ParseError(detail={"error": "name 필드가 비어있습니다."})
queryset = Topic.objects.filter(name__contains=topic_name)
if not queryset:
return Response({"result": "결과가 없습니다."})
serializer = self.get_serializer(queryset, many=True)
result = {"result": serializer.data}
return Response(result)
class SearchAPIView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, format=None):
"""
Return a list of all users.
"""
query_params = self.request.query_params
query = query_params.get("query", None)
if not query:
raise ParseError({"error": "query 필드가 비어있습니다."})
result = search.search(query)
return Response(result)
| [
"johnsuh94@gmail.com"
] | johnsuh94@gmail.com |
9b9afbb047cf6727bb42595fed496738377aa805 | 64c6134c2873ded7e84b93f10162fb6f27f25139 | /PPPDebug.py | 30ce0f3868cb09886d2cbb64b184695648871941 | [
"BSD-2-Clause"
] | permissive | umd-lhcb/UT-Aux-mapping | 1c22e1aec6eeefaa9d54f0cc48486a8162784c99 | 69f611f133ddcf1df18a9256c9ba1e9a577c1019 | refs/heads/master | 2022-01-19T11:54:26.101859 | 2022-01-09T04:31:49 | 2022-01-09T04:31:49 | 162,521,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,246 | py | #!/usr/bin/env python
#
# Author: Yipeng Sun
# License: BSD 2-clause
# Last Change: Fri May 28, 2021 at 03:43 AM +0200
from pathlib import Path
from itertools import permutations
from collections.abc import Iterable
from pyUTM.io import WirelistNaiveReader, PcadNaiveReader
from UT_Aux_mapping.const import input_dir
from UT_Aux_mapping.helpers import ppp_netname_regulator
#####################
# Read all netlists #
#####################
netlists = {}
def read_net(path, name, ext='wirelist', reader=WirelistNaiveReader):
loc_reader = reader(path / Path(name+'.'+ext))
return loc_reader.read()
ppp_vars = ['c_true_ppp_mag', 'c_mirror_ppp_mag']
netlists.update({k: read_net(input_dir, k) for k in ppp_vars})
p2b2_vars = ['true_p2b2', 'mirror_p2b2']
netlists.update({k: read_net(input_dir, k, 'net', PcadNaiveReader)
for k in p2b2_vars})
##########
# Checks #
##########
netnames = {}
def flatten(iterable, depth=0, max_depth=-1):
output = []
for item in iterable:
if isinstance(item, Iterable) and not isinstance(item, str):
if depth == max_depth:
output.append(item)
else:
output += flatten(item, depth+1, max_depth)
else:
output.append(item)
return output
def uniq_elems(l1, l2):
return [i for i in l1 if i not in l2]
def print_uniq(uniq_d):
for rule, result in uniq_d.items():
if result:
print('The following nets are {}:'.format(rule))
print('\n'.join(result))
print('')
# Check if there's nets that a unique to one variant
netnames.update({k: [ppp_netname_regulator(n) for n in netlists[k].keys()]
for k in ppp_vars})
uniq_ppp = {'in {} not {}'.format(k1, k2):
uniq_elems(netnames[k1], netnames[k2])
for k1, k2 in permutations(ppp_vars, 2)}
print_uniq(uniq_ppp)
# Check nets that are unique to P2B2
netnames.update({k: [n for n in netlists[k].keys()] for k in p2b2_vars})
uniq_p2b2 = {'in {} not {}'.format(k1, k2):
uniq_elems(netnames[k1], netnames[k2])
for k1, k2 in
flatten(map(permutations, zip(ppp_vars, p2b2_vars)), max_depth=1)}
print_uniq(uniq_p2b2)
| [
"syp@umd.edu"
] | syp@umd.edu |
fbca1cf8b47af5a1c721e5488d0216af80fd1bbd | 1f8360d8a94869b7074d47458626d8b65509a153 | /HackerRank/Easy/MinimumDistances/solution.py | 121aadca0421f808b6b3f44a903491e5b07e2de6 | [] | no_license | Duaard/ProgrammingProblems | 954812332632a5932ada310976a7f79d9285d415 | baeef97663f85aa184e93b46a26602711d3c3c94 | refs/heads/master | 2021-08-09T01:48:55.108061 | 2020-12-21T06:17:34 | 2020-12-21T06:17:34 | 233,727,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def minimumDistances(a):
# Map used to store values from a
distanceMap = {}
minDistance = None
# Loop through the entire array
# O(n) complexity
for i in range(len(a)):
# Check if value does not exists in map
if a[i] not in distanceMap:
# Register current index
distanceMap[a[i]] = i
else:
# Calculate distance from previous index
dist = i - distanceMap[a[i]]
# Check if lower than existing min val
if minDistance is None or dist < minDistance:
minDistance = dist
# Check if min is already at least value
if minDistance == 1:
break
# Register latest index
distanceMap[a[i]] = i
# Check if minDistance exists
if minDistance:
return minDistance
else:
return -1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
a = list(map(int, input().rstrip().split()))
result = minimumDistances(a)
fptr.write(str(result) + '\n')
fptr.close()
| [
"ejllikwong@gmail.com"
] | ejllikwong@gmail.com |
ff62f296b863828db43fd5fe482e00773f5c6e5f | d8d7e40199334fb104d0d6aeff537007858e0ddd | /basic_app/migrations/0001_initial.py | eb1af47a48c09cde3803edec51e5bb24dcd14ad9 | [] | no_license | dchida3/RMJ_Final | f2816dc89dfcf3cabc812ef21ff8814ffda631ec | 635c77fff6736f8d9c2238027c3766e989356635 | refs/heads/master | 2021-08-08T00:30:08.931505 | 2020-06-26T22:23:54 | 2020-06-26T22:23:54 | 194,914,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # Generated by Django 2.1.7 on 2019-03-27 20:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfileInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, null=True)),
('portfolio_site', models.URLField(blank=True)),
('profile_pic', models.ImageField(blank=True, upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"36213822+dchida3@users.noreply.github.com"
] | 36213822+dchida3@users.noreply.github.com |
de8847b98859f45a2aa099cc3edc51818cb87fd7 | fbcdb3e66f9fce9bf8596ae9f28e14ad23da30a2 | /lib/elf/header.py | 53d8e10256a8ee921189b7ecba34a03f83eb3a03 | [
"BSD-2-Clause"
] | permissive | arizvisa/syringe | 38349e6ff81bc1d709d520b8a8d949a47a3b5f6c | e02b014dc764ed822288210248c9438a843af8a9 | refs/heads/master | 2023-08-18T11:44:50.096141 | 2023-08-16T21:15:58 | 2023-08-16T21:15:58 | 22,565,979 | 36 | 9 | BSD-2-Clause | 2021-05-24T19:38:31 | 2014-08-03T03:24:16 | Python | UTF-8 | Python | false | false | 23,782 | py | import ptypes, time, datetime, functools, operator, bisect
from . import EV_, E_IDENT, section, segment
from .base import *
class ET_(pint.enum, Elf32_Half):
_values_ = [
('NONE', 0),
('REL', 1),
('EXEC', 2),
('DYN', 3),
('CORE', 4),
# ET_LOOS(0xfe00) - ET_HIOS(0xfeff)
# ET_LOPROC(0xff00) - ET_HIPROC(0xffff)
]
class EM_(pint.enum, Elf32_Half):
_values_ = [
('EM_NONE', 0),
('EM_M32', 1),
('EM_SPARC', 2),
('EM_386', 3),
('EM_68K', 4),
('EM_88K', 5),
('EM_IAMCU', 6),
('EM_860', 7),
('EM_MIPS', 8),
('EM_S370', 9),
('EM_MIPS_RS4_BE', 10),
# ('RESERVED', 11-14),
('EM_PARISC', 15),
# ('RESERVED', 16),
('EM_VPP500', 17),
('EM_SPARC32PLUS', 18),
('EM_960', 19),
('EM_PPC', 20),
('EM_PPC64', 21),
('EM_S390', 22),
('EM_SPU', 23),
# ('RESERVED', 24-35),
('EM_V800', 36),
('EM_FR20', 37),
('EM_RH32', 38),
('EM_RCE', 39),
('EM_ARM', 40),
('EM_ALPHA', 41),
('EM_SH', 42),
('EM_SPARCV9', 43),
('EM_TRICORE', 44),
('EM_ARC', 45),
('EM_H8_300', 46),
('EM_H8_300H', 47),
('EM_H8S', 48),
('EM_H8_500', 49),
('EM_IA_64', 50),
('EM_MIPS_X', 51),
('EM_COLDFIRE', 52),
('EM_68HC12', 53),
('EM_MMA', 54),
('EM_PCP', 55),
('EM_NCPU', 56),
('EM_NDR1', 57),
('EM_STARCORE', 58),
('EM_ME16', 59),
('EM_ST100', 60),
('EM_TINYJ', 61),
('EM_X86_64', 62),
('EM_PDSP', 63),
('EM_PDP10', 64),
('EM_PDP11', 65),
('EM_FX66', 66),
('EM_ST9PLUS', 67),
('EM_ST7', 68),
('EM_68HC16', 69),
('EM_68HC11', 70),
('EM_68HC08', 71),
('EM_68HC05', 72),
('EM_SVX', 73),
('EM_ST19', 74),
('EM_VAX', 75),
('EM_CRIS', 76),
('EM_JAVELIN', 77),
('EM_FIREPATH', 78),
('EM_ZSP', 79),
('EM_MMIX', 80),
('EM_HUANY', 81),
('EM_PRISM', 82),
('EM_AVR', 83),
('EM_FR30', 84),
('EM_D10V', 85),
('EM_D30V', 86),
('EM_V850', 87),
('EM_M32R', 88),
('EM_MN10300', 89),
('EM_MN10200', 90),
('EM_PJ', 91),
('EM_OPENRISC', 92),
('EM_ARC_COMPACT', 93),
('EM_XTENSA', 94),
('EM_VIDEOCORE', 95),
('EM_TMM_GPP', 96),
('EM_NS32K', 97),
('EM_TPC', 98),
('EM_SNP1K', 99),
('EM_ST200', 100),
('EM_IP2K', 101),
('EM_MAX', 102),
('EM_CR', 103),
('EM_F2MC16', 104),
('EM_MSP430', 105),
('EM_BLACKFIN', 106),
('EM_SE_C33', 107),
('EM_SEP', 108),
('EM_ARCA', 109),
('EM_UNICORE', 110),
('EM_EXCESS', 111),
('EM_DXP', 112),
('EM_ALTERA_NIOS2', 113),
('EM_CRX', 114),
('EM_XGATE', 115),
('EM_C166', 116),
('EM_M16C', 117),
('EM_DSPIC30F', 118),
('EM_CE', 119),
('EM_M32C', 120),
# ('RESERVED', 121-130),
('EM_TSK3000', 131),
('EM_RS08', 132),
('EM_SHARC', 133),
('EM_ECOG2', 134),
('EM_SCORE7', 135),
('EM_DSP24', 136),
('EM_VIDEOCORE3', 137),
('EM_LATTICEMICO32', 138),
('EM_SE_C17', 139),
('EM_TI_C6000', 140),
('EM_TI_C2000', 141),
('EM_TI_C5500', 142),
('EM_TI_ARP32', 143),
('EM_TI_PRU', 144),
# ('RESERVED', 145-159),
('EM_MMDSP_PLUS', 160),
('EM_CYPRESS_M8C', 161),
('EM_R32C', 162),
('EM_TRIMEDIA', 163),
('EM_QDSP6', 164),
('EM_8051', 165),
('EM_STXP7X', 166),
('EM_NDS32', 167),
('EM_ECOG1', 168),
('EM_ECOG1X', 168),
('EM_MAXQ30', 169),
('EM_XIMO16', 170),
('EM_MANIK', 171),
('EM_CRAYNV2', 172),
('EM_RX', 173),
('EM_METAG', 174),
('EM_MCST_ELBRUS', 175),
('EM_ECOG16', 176),
('EM_CR16', 177),
('EM_ETPU', 178),
('EM_SLE9X', 179),
('EM_L10M', 180),
('EM_K10M', 181),
# ('RESERVED', 182),
('EM_AARCH64', 183),
# ('RESERVED', 184),
('EM_AVR32', 185),
('EM_STM8', 186),
('EM_TILE64', 187),
('EM_TILEPRO', 188),
('EM_MICROBLAZE', 189),
('EM_CUDA', 190),
('EM_TILEGX', 191),
('EM_CLOUDSHIELD', 192),
('EM_COREA_1ST', 193),
('EM_COREA_2ND', 194),
('EM_ARC_COMPACT2', 195),
('EM_OPEN8', 196),
('EM_RL78', 197),
('EM_VIDEOCORE5', 198),
('EM_78KOR', 199),
('EM_56800EX', 200),
('EM_BA1', 201),
('EM_BA2', 202),
('EM_XCORE', 203),
('EM_MCHP_PIC', 204),
('EM_INTEL205', 205),
('EM_INTEL206', 206),
('EM_INTEL207', 207),
('EM_INTEL208', 208),
('EM_INTEL209', 209),
('EM_KM32', 210),
('EM_KMX32', 211),
('EM_KMX16', 212),
('EM_KMX8', 213),
('EM_KVARC', 214),
('EM_CDP', 215),
('EM_COGE', 216),
('EM_COOL', 217),
('EM_NORC', 218),
('EM_CSR_KALIMBA', 219),
('EM_Z80', 220),
('EM_VISIUM', 221),
('EM_FT32', 222),
('EM_MOXIE', 223),
('EM_AMDGPU', 224),
# ('RESERVED', 225-242),
('EM_RISCV', 243),
('EM_LOONGARCH', 258),
]
class E_VERSION(EV_, Elf32_Word):
pass
class E_FLAGS(ptype.definition):
cache = {}
default = Elf32_Word
@E_FLAGS.define(type=EM_.byname('EM_SPARC'))
@E_FLAGS.define(type=EM_.byname('EM_SPARC32PLUS'))
@E_FLAGS.define(type=EM_.byname('EM_SPARCV9'))
class E_FLAGS_SPARC(pbinary.flags):
VENDOR_MASK = 0x00ffff00
class EF_SPARCV9_MM(pbinary.enum):
length, _values_ = 2, [
('EF_SPARCV9_TSO', 0),
('EF_SPARCV9_PSO', 1),
('EF_SPARCV9_RMO', 2),
]
class EF_SPARC_EXT_MASK(pbinary.flags):
_fields_ = [
(12, 'EF_SPARC_EXT'),
(1, 'EF_SPARC_SUN_US3'),
(1, 'EF_SPARC_HAL_R1'),
(1, 'EF_SPARC_SUN_US1'),
(1, 'EF_SPARC_32PLUS'),
]
_fields_ = [
(8, 'EF_SPARC_NONE'),
(EF_SPARC_EXT_MASK, 'EF_SPARC_EXT_MASK'),
(6, 'EF_SPARC_UNKNOWN'),
(EF_SPARCV9_MM, 'EF_SPARCV9_MM'),
]
@E_FLAGS.define
class E_FLAGS_ARM(pbinary.flags):
type = EM_.byname('EM_ARM')
ABI_MASK = 0xff000000
GCC_MASK = 0x00400FFF
class EF_ARM_GCC_MASK(pbinary.struct):
_fields_ = [
(1, 'EF_ARM_ABI_UNKNOWN'),
(1, 'EF_ARM_ABI_FLOAT_HARD'),
(1, 'EF_ARM_ABI_FLOAT_SOFT'),
(9, 'EF_ARM_GCC_UNKNOWN'),
]
_fields_ = [
(8, 'EF_ARM_ABI'),
(1, 'EF_ARM_BE8'),
(1, 'EF_ARM_GCC_LEGACY'),
(2, 'EF_ARM_GCC_ALIGN'),
(8, 'EF_ARM_UNKNOWN'),
(EF_ARM_GCC_MASK, 'EF_ARM_GCC_MASK'),
]
def summary(self):
gcc_mask_name, unk_name, subunk_name = 'EF_ARM_GCC_MASK', 'EF_ARM_UNKNOWN', 'EF_ARM_GCC_UNKNOWN'
gcc_mask, unknown = self[gcc_mask_name], self[unk_name]
flags = [field for field in ['EF_ARM_BE8', 'EF_ARM_GCC_LEGACY'] if self[field]]
subunknown, subflags = gcc_mask[subunk_name], [field for field in ['EF_ARM_ABI_UNKNOWN', 'EF_ARM_ABI_FLOAT_HARD', 'EF_ARM_ABI_FLOAT_SOFT'] if gcc_mask[field]]
summary = "({:#0{:d}x},{:d}) :> {:s}=({:#0{:d}x},{:d}){:s}".format(gcc_mask.int(), 2 + gcc_mask.bits() // 4, gcc_mask.bits(), subunk_name, subunknown, 2 + (9+3) // 4, 9, " {:s}".format(' '.join(subflags)) if subflags else '')
return "EF_ARM_ABI={:#0{:d}x} EF_ARM_GCC_ALIGN={:d}{:s}{:s} {:s}={:s}".format(self['EF_ARM_ABI'], 2 + 2, self['EF_ARM_GCC_ALIGN'], " {:s}".format(' '.join(flags)) if flags else '', " {:s}={:#0{:d}x}".format(unk_name, unknown, 2+2) if unknown else '', gcc_mask_name, summary if gcc_mask.int() else "({:#0{:d}x},{:d})".format(gcc_mask.int(), 2 + gcc_mask.bits() // 4, gcc_mask.bits()))
@E_FLAGS.define
class E_FLAGS_MIPS(pbinary.flags):
type = EM_.byname('EM_MIPS')
class EF_MIPS_ARCH_(pbinary.enum):
length, _values_ = 4, [
('EF_MIPS_ARCH_1', 0),
('EF_MIPS_ARCH_2', 1),
('EF_MIPS_ARCH_3', 2),
('EF_MIPS_ARCH_4', 3),
('EF_MIPS_ARCH_5', 4),
('EF_MIPS_ARCH_32', 5),
('EF_MIPS_ARCH_64', 6),
('EF_MIPS_ARCH_32R2', 7),
('EF_MIPS_ARCH_64R2', 8),
]
class EF_MIPS_ARCH_ASE_(pbinary.enum):
length, _values_ = 4, [
('EF_MIPS_ARCH_ASE_MDMX', 8),
('EF_MIPS_ARCH_ASE_M16', 4),
('EF_MIPS_ARCH_ASE_MICROMIPS', 2),
]
class E_MIPS_ABI_(pbinary.enum):
length, _values_ = 4, [
('E_MIPS_ABI_O32', 1),
('E_MIPS_ABI_O64', 2),
('E_MIPS_ABI_EABI32', 3),
('E_MIPS_ABI_EABI64', 4),
]
_fields_ = [
(EF_MIPS_ARCH_, 'ARCH'),
(EF_MIPS_ARCH_ASE_, 'ASE'),
(8, 'EF_MIPS_ARCH_UNUSED'),
(E_MIPS_ABI_, 'ABI'),
(1, 'EF_MIPS_ARCH_RESERVED'),
(1, 'E_MIPS_NAN2008'),
(1, 'E_MIPS_FP64'),
(1, 'EF_MIPS_32BITMODE'),
(1, 'EF_MIPS_OPTIONS_FIRST'),
(1, 'EF_MIPS_ABI_ON32'),
(1, 'EF_MIPS_ABI2'),
(1, 'EF_MIPS_64BIT_WHIRL'),
(1, 'EF_MIPS_XGOT'),
(1, 'EF_MIPS_CPIC'),
(1, 'EF_MIPS_PIC'),
(1, 'EF_MIPS_NOREORDER'),
]
@E_FLAGS.define
class E_FLAGS_LOONGARCH(pbinary.flags):
type = EM_.byname('EM_LOONGARCH')
_fields_ = [
(24, 'reserved'),
(2, 'version'),
(3, 'extension'),
(3, 'base modifier'),
]
class PN_(pint.enum):
_values_ = [
('XNUM', 0xffff),
]
class XhdrEntries(parray.type):
def iterate(self):
for index, item in self.enumerate():
yield item
return
def enumerate(self):
for index, item in enumerate(self):
yield index, item
return
def sorted(self, field, *fields):
Fgetfields = operator.itemgetter(field, *fields) if fields else operator.itemgetter(field)
Finteger = functools.partial(map, operator.methodcaller('int'))
# Start by building an index of the entire collection of elements
# by extracting the requested keys from each element.
collection = {}
for index, item in enumerate(self):
key = Fgetfields(item) if len(fields) else [Fgetfields(item)]
# Now that we have each field, convert it into a key and
# insert the array index of the item into our collection.
items = collection.setdefault(tuple(Finteger(key)), [])
bisect.insort(items, index)
# Now we can sort our collection of indices by the suggested
# fields, and fetch the index for a specific key.
for key in sorted(collection):
indices = collection[key]
# Lastly, we just need to iterate each index since they
# were inserted into the collection already sorted. With
# the index, we can then yield the item it references.
for index in indices:
yield index, self[index]
continue
return
class ShdrEntries(XhdrEntries):
def by_offset(self, ofs):
iterable = (item for item in self if item.containsoffset(ofs))
try:
result = next(iterable)
except StopIteration:
raise ptypes.error.ItemNotFoundError(self, 'ShdrEntries.by_offset', "Unable to locate Shdr with the specified offset ({:#x})".format(ofs))
return result
byoffset = by_offset
def by_address(self, va):
iterable = (item for item in self if item.containsaddress(va))
try:
result = next(iterable)
except StopIteration:
raise ptypes.error.ItemNotFoundError(self, 'ShdrEntries.by_address', "Unable to locate Shdr with the specified virtual address ({:#x})".format(va))
return result
byaddress = by_address
def sorted(self):
for index, item in super(ShdrEntries, self).sorted('sh_offset', 'sh_size'):
yield index, item
return
def filter(self, predicate):
iterable = (item for item in self if predicate(item))
return iterable
def by_field(self, field, predicate):
iterable = (item for item in self if predicate(item[field]))
return next(iterable)
def by_name(self, name):
Fcompose = lambda *Fa: functools.reduce(lambda F1, F2: lambda *a: F1(F2(*a)), builtins.reversed(Fa))
Fpredicate = Fcompose(operator.methodcaller('str'), functools.partial(operator.eq, name))
return self.by_field('sh_name', Fpredicate)
def by_type(self, type):
Fpredicate = operator.itemgetter(type)
return self.by_field('sh_type', Fpredicate)
by = by_type
class PhdrEntries(XhdrEntries):
def by_offset(self, ofs):
if isinstance(self.source, ptypes.provider.memorybase):
iterable = (item for item in self if item.loadableQ() and item.containsoffset(ofs))
else:
iterable = (item for item in self if item.containsoffset(ofs))
# Now that we have an iterable, return the first result we find
try:
result = next(iterable)
except StopIteration:
raise ptypes.error.ItemNotFoundError(self, 'PhdrEntries.by_offset', "Unable to locate Phdr with the specified offset ({:#x})".format(ofs))
return result
byoffset = by_offset
def by_address(self, va):
iterable = (item for item in self if item.loadableQ() and item.containsaddress(va))
# Now that we have an iterable, return the first result we find.
try:
result = next(iterable)
# If our iterator has no items, then we weren't able to find a match
# and we'll need to raise an exception.
except StopIteration:
raise ptypes.error.ItemNotFoundError(self, 'PhdrEntries.by_address', "Unable to locate Phdr with the specified virtual address ({:#x})".format(va))
return result
byaddress = by_address
def enumerate(self):
for index, item in super(PhdrEntries, self).enumerate():
# If our source is memory-backed, then we'll want to filter our
# items by whether they're loaded or not. So, we'll just check the
# phdr flags in order to figure that out.
if isinstance(self.source, ptypes.provider.memorybase):
flags = item['p_type']
if any(flags[fl] for fl in ['LOAD', 'DYNAMIC']):
yield index, item
continue
# Otherwise we'll just yield everything because it's in the file.
yield index, item
return
def sorted(self):
fields = ('p_vaddr', 'p_memsz') if isinstance(self.source, ptypes.provider.memorybase) else ('p_offset', 'p_filesz')
for index, item in super(PhdrEntries, self).sorted(*fields):
# If we are actually dealing with a source that's backed by
# actual memory, then only yield a phdr if it's actually loaded.
if isinstance(item.source, ptypes.provider.memory):
if item.loadableQ():
yield index, item
continue
# Otherwise, we can just yield everything without having to filter.
yield index, item
return
def by_field(self, field, predicate):
iterable = (item for item in self if predicate(item[field]))
return next(iterable)
def by_type(self, type):
Fpredicate = operator.itemgetter(type)
return self.by_field('p_type', Fpredicate)
by = by_type
### 32-bit
class Elf32_Ehdr(pstruct.type, ElfXX_Ehdr):
def _ent_array(self, entries, type, size, length):
t = dyn.clone(type, blocksize=lambda self, cb=size.int(): cb)
return dyn.clone(entries, _object_=t, length=length.int())
def _phent_array(self, type, size, length):
return self._ent_array(PhdrEntries, type, size, length)
def _shent_array(self, type, size, length):
return self._ent_array(ShdrEntries, type, size, length)
def __e_flags(self):
res = self['e_machine'].li.int()
return E_FLAGS.withdefault(res, type=res)
class e_phnum(PN_, Elf32_Half): pass
def __padding(self):
res = self['e_ehsize'].li
cb = sum(self[fld].li.size() for fld in self.keys()[:-1]) + E_IDENT().a.blocksize()
return dyn.block(res.int() - cb)
_fields_ = [
(ET_, 'e_type'),
(EM_, 'e_machine'),
(E_VERSION, 'e_version'),
(Elf32_VAddr, 'e_entry'),
(lambda self: dyn.clone(Elf32_BaseOff, _object_=lambda s: self._phent_array(segment.Elf32_Phdr, self['e_phentsize'].li, self['e_phnum'].li)), 'e_phoff'),
(lambda self: dyn.clone(Elf32_Off, _object_=lambda s: self._shent_array(section.Elf32_Shdr, self['e_shentsize'].li, self['e_shnum'].li)), 'e_shoff'),
(__e_flags, 'e_flags'),
(Elf32_Half, 'e_ehsize'),
(Elf32_Half, 'e_phentsize'),
(e_phnum, 'e_phnum'),
(Elf32_Half, 'e_shentsize'),
(Elf32_Half, 'e_shnum'),
(Elf32_Half, 'e_shstrndx'),
(__padding, 'padding'),
]
def stringtable(self):
res, index = self['e_shoff'].d.li, self['e_shstrndx'].int()
if index < len(res):
return res[index]['sh_offset'].d.li
raise ptypes.error.ItemNotFoundError(self, 'stringtable')
### 64-bit
class Elf64_Ehdr(pstruct.type, ElfXX_Ehdr):
def _ent_array(self, entries, type, size, length):
t = dyn.clone(type, blocksize=lambda self, cb=size.int(): cb)
return dyn.clone(entries, _object_=t, length=length.int())
def _phent_array(self, type, size, length):
return self._ent_array(PhdrEntries, type, size, length)
def _shent_array(self, type, size, length):
return self._ent_array(ShdrEntries, type, size, length)
def __e_flags(self):
res = self['e_machine'].li.int()
return E_FLAGS.withdefault(res, type=res)
class e_phnum(PN_, Elf64_Half): pass
def __padding(self):
res = self['e_ehsize'].li
cb = sum(self[fld].li.size() for fld in self.keys()[:-1]) + E_IDENT().a.blocksize()
return dyn.block(res.int() - cb)
_fields_ = [
(ET_, 'e_type'),
(EM_, 'e_machine'),
(E_VERSION, 'e_version'),
(Elf64_VAddr, 'e_entry'),
(lambda self: dyn.clone(Elf64_BaseOff, _object_=lambda s: self._phent_array(segment.Elf64_Phdr, self['e_phentsize'].li, self['e_phnum'].li)), 'e_phoff'),
(lambda self: dyn.clone(Elf64_Off, _object_=lambda s: self._shent_array(section.Elf64_Shdr, self['e_shentsize'].li, self['e_shnum'].li)), 'e_shoff'),
(__e_flags, 'e_flags'),
(Elf64_Half, 'e_ehsize'),
(Elf64_Half, 'e_phentsize'),
(e_phnum, 'e_phnum'),
(Elf64_Half, 'e_shentsize'),
(Elf64_Half, 'e_shnum'),
(Elf64_Half, 'e_shstrndx'),
(__padding, 'padding'),
]
def stringtable(self):
res, index = self['e_shoff'].d.li, self['e_shstrndx'].int()
if index < len(res):
return res[index]['sh_offset'].d.li
raise ptypes.error.ItemNotFoundError(self, 'stringtable')
### Archives
class Elf_Armag(pstr.string):
length = 8
def default(self, **kwargs):
archiveQ = next((kwargs.get(item) for item in kwargs if item in {'thin', 'archive'}), True)
if archiveQ:
return self.set('!<arch>\012')
return self.set('!<thin>\012')
def valid(self):
res = self.str()
if res == self.copy().default(archive=True).str():
return True
elif res == self.copy().default(thin=True).str():
return True
return False
def properties(self):
res = super(Elf_Armag, self).properties()
if self.initializedQ():
res['valid'] = self.valid()
return res
class Elf_Arhdr(pstruct.type):
class time_t(stringinteger):
length = 12
def datetime(self):
res = self.int()
return datetime.datetime.fromtimestamp(res, datetime.timezone.utc)
def gmtime(self):
res = self.int()
return time.gmtime(res)
def details(self):
tzinfo = datetime.timezone(datetime.timedelta(seconds=-(time.altzone if time.daylight else time.timezone)))
try:
res = self.datetime().astimezone(tzinfo)
except (ValueError, OverflowError):
return super(Elf_Arhdr.time_t, self).details() + '\n'
return "({:d}) {!s}".format(self.int(), res.isoformat())
repr = details
def summary(self):
tzinfo = datetime.timezone(datetime.timedelta(seconds=-(time.altzone if time.daylight else time.timezone)))
try:
res = self.datetime().astimezone(tzinfo)
except (ValueError, OverflowError):
return super(Elf_Arhdr.time_t, self).summary()
return "({:d}) {!s}".format(self.int(), res.isoformat())
class uid_t(stringinteger): length = 6
class gid_t(stringinteger): length = 6
class mode_t(octalinteger): length = 8
class size_t(stringinteger): length = 10
class _fmag(pstr.string):
length = 2
def default(self):
return self.set('`\012')
_fields_ = [
(dyn.clone(padstring, length=0x10), 'ar_name'),
(time_t, 'ar_date'),
(uid_t, 'ar_uid'),
(gid_t, 'ar_gid'),
(mode_t, 'ar_mode'),
(size_t, 'ar_size'),
(_fmag, 'ar_fmag'),
]
def summary(self):
try:
name, ts = self['ar_name'], self['ar_date'].summary()
mode, size, uid, gid = (self[fld].int() for fld in ['ar_mode', 'ar_size', 'ar_uid', 'ar_gid'])
return "ar_name=\"{!s}\" ar_mode={:o} ar_size={:+d} ar_date={:s} ar_uid/ar_gid={:d}/{:d}".format(name.str(), mode, size, ts.isoformat(), uid, gid)
except ValueError:
pass
return super(Elf_Arhdr, self).summary()
class Elf_Arnames(pstruct.type):
class _an_pointer(parray.type):
_object_ = pint.bigendian(pint.uint32_t)
def summary(self):
iterable = (item.int() for item in self)
return "[{:s}]".format(', '.join(map("{:#x}".format, iterable)))
def __an_pointer(self):
res = self['an_count'].li
return dyn.clone(self._an_pointer, length=res.int())
class _an_table(parray.type):
_object_ = pstr.szstring
def summary(self):
iterable = (item.str() for item in self)
return "[{:s}]".format(', '.join(iterable))
def __an_table(self):
res = self['an_count'].li
return dyn.clone(self._an_table, length=res.int())
_fields_ = [
(pint.bigendian(pint.uint32_t), 'an_count'),
(__an_pointer, 'an_pointer'),
(__an_table, 'an_table'),
]
class Elf_Armember(pstruct.type):
def __am_data(self):
res = self['am_hdr'].li
if res['ar_name'].str() == '//':
return dyn.clone(pstr.string, length=res['ar_size'].int())
elif res['ar_name'].str() == '/':
return Elf_Arnames
return dyn.block(res['ar_size'].int())
_fields_ = [
(Elf_Arhdr, 'am_hdr'),
(__am_data, 'am_data'),
]
| [
"arizvisa@gmail.com"
] | arizvisa@gmail.com |
e41738150e99fb09f7fe947af5117b5707b2b180 | 903822cbf59801231f44e02d4a3ee94079fe689a | /directionFinder.py | 924694c287d1626049a903a596532284731f9812 | [] | no_license | jackcmac/cuhackit-2020 | a48d12fb1aea8a27f2fdce1415a6f0fafbd2fa0e | 80df9ad72621547b1f566ce7bc576887e61cebc2 | refs/heads/master | 2020-12-21T00:44:07.315857 | 2020-01-26T12:54:41 | 2020-01-26T12:54:41 | 236,257,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | import numpy as np
import math
import pyproj
def latlong_to_3d(latr, lonr):
"""Convert a point given latitude and longitude in radians to
3-dimensional space, assuming a sphere radius of one."""
return np.array((
math.cos(latr) * math.cos(lonr),
math.cos(latr) * math.sin(lonr),
math.sin(latr)
))
def angle_between_vectors_degrees(u, v):
"""Return the angle between two vectors in any dimension space,
in degrees."""
return np.degrees(
math.acos(np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))))
# The points in tuple latitude/longitude degrees space
A = (-82.83707404144522, 34.67597218057034)
B = (-82.83708190352426, 34.67603521356209)
C = (-82.83709584891056, 34.67603192723286)
# Convert the points to numpy latitude/longitude radians space
a = np.radians(np.array(A))
b = np.radians(np.array(B))
c = np.radians(np.array(C))
# Vectors in latitude/longitude space
avec = a - b
cvec = c - b
# Adjust vectors for changed longitude scale at given latitude into 2D space
lat = b[0]
avec[1] *= math.cos(lat)
cvec[1] *= math.cos(lat)
# # Find the angle between the vectors in 2D space
# angle2deg = angle_between_vectors_degrees(avec, cvec)
#
#
# # The points in 3D space
# a3 = latlong_to_3d(*a)
# b3 = latlong_to_3d(*b)
# c3 = latlong_to_3d(*c)
#
# # Vectors in 3D space
# a3vec = a3 - b3
# c3vec = c3 - b3
#
# # Find the angle between the vectors in 2D space
# angle3deg = angle_between_vectors_degrees(a3vec, c3vec)
#
#
# # Print the results
# print('\nThe angle ABC in 2D space in degrees:', angle2deg)
# print('\nThe angle ABC in 3D space in degrees:', angle3deg)
def calculate_angle(point_a, point_b):
""" Calculate angle between two points """
ang_a = np.arctan2(*point_a[::-1])
ang_b = np.arctan2(*point_b[::-1])
return np.rad2deg((ang_a - ang_b) % (2 * np.pi))
angleResult = calculate_angle(avec, cvec)
print('angleResult = ', angleResult)
if angleResult < 135:
print('go right')
elif angleResult < 225:
print('go straight')
else:
print('go left')
geod = pyproj.Geod(ellps='WGS84')
lat0, lon0 = A
lat1, lon1 = B
azimuth1, azimuth2, distance = geod.inv(lon0, lat0, lon1, lat1)
print('distance in feet', distance * 3.28084)
if azimuth1 < 0:
azimuth1 += 360
if azimuth2 < 0:
azimuth2 += 360
print(' azimuth', azimuth1, azimuth2)
net = azimuth2 - azimuth1
if (net > 315 and net <= 365) or (net >= 0 and net < 45):
print('straight')
elif net > 0:
print('right')
else:
print('left')
| [
"noreply@github.com"
] | jackcmac.noreply@github.com |
b6560b4059ab59a2021f313d7423356d18ca79e8 | 7ec24b34aba5e0f15737a0e02a64e527cdb3b28e | /Fluid_Mechanics_White_7th/white_9_9.py | b86c6bb5c23005d9c350411761c902d9113577ac | [
"MIT"
] | permissive | ekremekc/compressibleflow | f76be7f3d39e8df041cd8741bc767791a0b98de4 | b72af73d8e5097d2f08452c9e0dee44f4ab7b038 | refs/heads/master | 2022-12-15T21:31:39.713211 | 2020-09-05T22:33:46 | 2020-09-05T22:33:46 | 272,946,332 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | from compressibleflow import Gas, RocketNozzle, Air
T0 = 500
P0 = 1000
gas = Air(T0,P0)
A_star = 0.002
A_exit = 0.008
#a
area_ratio = A_exit/A_star
Ma_e = Gas.ma_finder(gas, 'downward', area_ratio)
P_e = Gas.exit_pressure(gas, Ma_e)
m_throat = Gas.critical_m_dot(gas, 1,gas.diameter(A_star))
#b
#c
Ma_e_c = Gas.ma_finder(gas, 'upward', area_ratio)
P_e_c = Gas.exit_pressure(gas, Ma_e_c)
| [
"noreply@github.com"
] | ekremekc.noreply@github.com |
3b2eb6184a74fe1795d4723d8869074529581a3e | 3886c78ffed288379a54865ec6e494a514207d0d | /caravantone/run_app.py | 6401778510ae188dc7a1f2f93b0a3d52db940c00 | [] | no_license | Attsun1031/caravantone | 34a2a8b612af7bafc49b063f50953abe469d393b | bc5a9e481649e67f2c631aaf55384a4fce051ba7 | refs/heads/master | 2021-06-11T14:31:43.966237 | 2014-07-20T03:41:08 | 2014-07-20T03:41:08 | 18,760,513 | 0 | 0 | null | 2021-06-01T21:53:28 | 2014-04-14T12:55:19 | JavaScript | UTF-8 | Python | false | false | 226 | py | # -*- coding: utf-8 -*-
from caravantone import app
if __name__ == "__main__":
#app.run(host=app.config['HOST'], port=app.config['PORT'], ssl_context=context)
app.run(host=app.config['HOST'], port=app.config['PORT'])
| [
"atsumi.tatsuya@gmail.com"
] | atsumi.tatsuya@gmail.com |
d68fe861a80437aa7df982272ee1d513723f0492 | 69582e48fd7965df3f769c52e27caf0868a09e93 | /lista1/roberta/questao4.py | 23e35af4384697af77761db66b666f007e2d6d4d | [] | no_license | yurimalheiros/IP-2019-2 | b591cd48fd8f253dfd17f2f99d5a2327b17432c2 | 25b9e5802709a7491933d62d8300cbd7c3ef177f | refs/heads/master | 2020-09-18T16:59:50.937764 | 2020-02-26T15:42:41 | 2020-02-26T15:42:41 | 224,156,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # Função Definir alarme
# Autor Roberta de Lima
from datetime import datetime, timedelta
# Função Estática
print("ALARME")
dt = datetime(2019,11,3, 14)
hrAlarme = dt + timedelta(hours=51)
print("Sendo 14hrs, daqui a 51hrs o alarme tocará às ",hrAlarme.strftime("%H:%M "))
# Função dinâmica
#tempo = int(input("Digite o tempo para alarme(horas): "))
#hj = datetime.now()
#hrAlarme = hj + timedelta(hours=tempo)
#print("Hora do alarme: ", hrAlarme.strftime("%H:%M %d/%m/%Y"))
| [
"yurimalheiros@gmail.com"
] | yurimalheiros@gmail.com |
29ee841cd44cf44b514b4bb084b5ce230d1b30e5 | 42ae0da51d7099c53717b8626940eb3fde7bc3a3 | /routes/__init__.py | 0a305fc281d4e6c40eaf3303ab586f7a10381840 | [] | no_license | single-person/flask-web | ad300a9212252e875e343f758d61622a55bb5d70 | e224ad3fecee065ebbc01d4ad2bdbee287735dac | refs/heads/master | 2021-08-14T18:21:57.427354 | 2017-11-16T13:12:50 | 2017-11-16T13:12:50 | 110,972,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | import uuid
from functools import wraps
from flask import session, request, abort
from models.user import User
def current_user():
uid = session['user_id']
u = User.find_by(id=uid)
return u
csrf_tokens = dict()
def csrf_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = request.args.get('token')
u = current_user()
if token in csrf_tokens and csrf_tokens[token] == u.id:
csrf_tokens.pop(token)
return f(*args, **kwargs)
else:
abort(401)
return wrapper
def new_csrf_token():
u = current_user()
token = str(uuid.uuid4())
csrf_tokens[token] = u.id
return token
| [
"single_person@163.com"
] | single_person@163.com |
376950281aa4bac83042e8aa5ee07e19749f02c3 | 13cf6f650a15fac4b7d9e20d55b4fcbf85b8f977 | /seed.py | c4200598f465cd19f97215df98488d94004f49a3 | [
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mrethana/sqlalchemy-more-on-one-to-many-lab-nyc-career-ds-062518 | 1c5c32f3dd7fc0a7e57b1cba0898ac683997daf3 | 07d9ed7e2cf314617e24338270018cec67ce25c3 | refs/heads/master | 2020-03-22T22:21:08.526530 | 2018-07-11T21:21:30 | 2018-07-11T21:21:30 | 140,747,263 | 0 | 0 | null | 2018-07-12T17:54:32 | 2018-07-12T17:54:32 | null | UTF-8 | Python | false | false | 1,559 | py | from models import *
from sqlalchemy import create_engine
import pandas as pd
engine = create_engine('sqlite:///actors.db')
Session = sessionmaker(bind=engine)
session = Session()
# below we are reading the csv files to create the data we will need to create the players
# pandas returns a DataFrame object from reading the CSV
# we then tell the DataFrame object to turn each row into dictionaries
# by giving to_dict the argument "orient='records'"
# we are telling our DataFrame to make each row a dictionary using the column headers
# as the keys for the key value pairs in each new dictionary
# feel free to uncomment lines 18-21 to see each step of the process in your terminal
# ____ example ______
# la_dodgers0 = pd.read_csv('la_dodgers_baseball.csv')
# la_dodgers1 = pd.read_csv('la_dodgers_baseball.csv').to_dict()
# la_dodgers2 = pd.read_csv('la_dodgers_baseball.csv').to_dict(orient='records')
# import pdb; pdb.set_trace()
# __________________
la_dodgers = pd.read_csv('la_dodgers_baseball.csv').to_dict(orient='records')
la_lakers = pd.read_csv('la_lakers_basketball.csv').to_dict(orient='records')
ny_yankees = pd.read_csv('ny_yankees_baseball.csv').to_dict(orient='records')
ny_knicks = pd.read_csv('ny_knicks_basketball.csv').to_dict(orient='records')
# now that we have the data for each player
# add and commit the players, teams, sports and cities below
# we will need to probably write at least one function to iterate over our data and create the players
# hint: it may be a good idea to creat the Teams, Cities, and Sports first
| [
"terrancekoar@gmail.com"
] | terrancekoar@gmail.com |
3830086a02340a3597a265882c952d1206107884 | db1d2871e7958209d68a1f1e9d5c0d8a003a200e | /final/trainer.py | b884178e1bd5d06ea4425bb34e3a0a0bb523096c | [] | no_license | anpoli99/GenerativePCA | 84e4ffe739a59d3c6517a0707b180198c2e173c2 | 4bc79c247928fe9d515c0e3595adfd330ac6fab4 | refs/heads/master | 2021-01-07T01:01:03.405866 | 2020-02-20T13:32:01 | 2020-02-20T13:32:01 | 241,533,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,918 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import cv2
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Flatten, Reshape, Input, InputLayer, MaxPooling2D, UpSampling2D, Conv2D
from keras.models import Sequential, Model
import keras
from os import listdir
from os.path import isfile, join
import tensorflow as tf
import main as m
"""
Train the neural network, save to given path.
"""
IMG_SIZE = 128 ##size of image (all square)
set_start = 30000 ##starting idx of dataset
set_size = 30000 ##size of sample from dataset
load = True ##boolean: load or init. new neural network
load_path = 'hd2048'
dense_size = 4096 ##dense representation in autoencoder
epoch = 6
rep = 300 ##how many times repeat training (can be very large and just terminate manually when done)
save_path = 'hd2048'
show = 0 ##how often draw results of autoencoder (zero if never show results during training)
check = False ##show images from outside dataset (upload custom images)
def show_image(x):
plt.imshow(np.clip(x + 0.5, 0, 1))
def build_autoencoder(img_shape, code_size):
# The encoder
encoder = Sequential()
encoder.add(Flatten(input_shape= img_shape))
encoder.add(Dense(code_size))
# The decoder
decoder = Sequential()
decoder.add(Dense(np.prod(img_shape), input_shape=(code_size,)))
decoder.add(Reshape(img_shape))
return encoder, decoder
##draws original image + result from encoding
def visualize(img,encoder,decoder):\
code = encoder.predict(img[None])[0]
reco = decoder.predict(code[None])[0]
plt.subplot(1,3,1)
plt.title("Original")
show_image(img)
plt.subplot(1,3,2)
plt.title("Code")
plt.imshow(code.reshape([code.shape[-1]//2,-1]))
plt.subplot(1,3,3)
plt.title("Reconstructed")
show_image(reco)
plt.show()
##draws original image + result from encoding
def vis2(img, autoenc):
reco = autoenc.predict(img[None])[0]
plt.subplot(1,3,1)
plt.title("Original")
show_image(img)
plt.subplot(1,3,3)
plt.title("Reconstructed")
show_image(reco)
plt.show()
paths = m.initgroup("path to dataset here", set_size, set_start)
X = m.load_lfw_dataset(paths, IMG_SIZE)
X = X.astype('float32') / 255.0 - 0.5
IMG_SHAPE = X.shape[1:]
X_train, X_test = train_test_split(X, test_size=0.1, random_state=42)
if load:
encoder = keras.models.load_model('path containing load file here' + load_path + 'ec.h5')
encoder.name = 'enc'
decoder = keras.models.load_model('path containing load file here' + load_path + 'dc.h5')
decoder.name = 'dec'
else:
encoder, decoder = build_autoencoder(IMG_SHAPE, dense_size)
inp = Input(IMG_SHAPE)
code = encoder(inp)
reconstruction = decoder(code)
autoencoder = Model(inp,reconstruction)
autoencoder.compile(optimizer='adam', loss='mse')
ct = 1
for x in range(rep):
print("Trial " + str(x))
history = autoencoder.fit(x=X_train, y=X_train, batch_size=2000, epochs=epoch,
validation_data=[X_test, X_test])
encoder.save('path containing load file here' + save_path +'ec.h5')
decoder.save('path containing load file here' + save_path +'dc.h5')
autoencoder.save('path containing load file here' + save_path +'ac.h5')
ct += 1
if show != 0 and ct % show == 0:
for i in range(20):
img = X_test[i]
visualize(img,encoder,decoder)
if check:
x_test = m.load_lfw_dataset(m.initgroup('path containing test image dataset here'))
x_test = x_test.astype('float32') / 255.0 - 0.5
for x in x_test:
visualize(x,encoder,decoder)
for i in range(20):
img = X_test[i]
visualize(img,encoder,decoder)
| [
"noreply@github.com"
] | anpoli99.noreply@github.com |
9a77c4750eb1a8a2defb42f47f243ebc67da8d86 | 8da427b5619ecda9b611ab3cc0646832a0e60253 | /TelecomChurnPrediction_Flusk_Deployment/app.py | 4ae354edad3d74f8e00048d1cfa7f413eb7efebf | [] | no_license | anirbansarkar823/Telecom_Churn_Predection | 875135f371f92a3044ec8894e2304a70e5fc45a8 | 3e00529febc49b6471a305753cd56146b67b7cd7 | refs/heads/master | 2023-06-29T02:53:29.881001 | 2021-08-01T16:43:33 | 2021-08-01T16:43:33 | 391,682,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,154 | py |
# coding: utf-8
# loading libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from flask import Flask, request, render_template
import pickle
app = Flask("__name__") # instantiating the flask
#df_1=pd.read_csv("first_telc.csv") # specific requirement
q = ""
@app.route("/")
def loadPage():
return render_template('home.html', query="") # traversing to the templates folder, and running the home.html file
@app.route("/", methods=['POST'])
def predict():
'''
SeniorCitizen
MonthlyCharges
TotalCharges
gender
Partner
Dependents
PhoneService
MultipleLines
InternetService
OnlineSecurity
OnlineBackup
DeviceProtection
TechSupport
StreamingTV
StreamingMovies
Contract
PaperlessBilling
PaymentMethod
tenure
'''
# 19 inputs
inputQuery1 = request.form['query1']
inputQuery2 = request.form['query2']
inputQuery3 = request.form['query3']
inputQuery4 = request.form['query4']
inputQuery5 = request.form['query5']
inputQuery6 = request.form['query6']
inputQuery7 = request.form['query7']
inputQuery8 = request.form['query8']
inputQuery9 = request.form['query9']
inputQuery10 = request.form['query10']
inputQuery11 = request.form['query11']
inputQuery12 = request.form['query12']
inputQuery13 = request.form['query13']
inputQuery14 = request.form['query14']
inputQuery15 = request.form['query15']
inputQuery16 = request.form['query16']
inputQuery17 = request.form['query17']
inputQuery18 = request.form['query18']
inputQuery19 = request.form['query19']
model = pickle.load(open("model.sav", "rb")) # loading the model
data = [[inputQuery1, inputQuery2, inputQuery3, inputQuery4, inputQuery5, inputQuery6, inputQuery7,
inputQuery8, inputQuery9, inputQuery10, inputQuery11, inputQuery12, inputQuery13, inputQuery14,
inputQuery15, inputQuery16, inputQuery17, inputQuery18, inputQuery19]]
new_df = pd.DataFrame(data, columns = ['SeniorCitizen', 'MonthlyCharges', 'TotalCharges', 'gender',
'Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'InternetService',
'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling',
'PaymentMethod', 'tenure'])
#df_2 = pd.concat([df_1, new_df], ignore_index = True)
df_2 = new_df.copy()
# Group the tenure in bins of 12 months
labels = ["{0} - {1}".format(i, i + 11) for i in range(1, 72, 12)]
df_2['tenure_group'] = pd.cut(df_2.tenure.astype(int), range(1, 80, 12), right=False, labels=labels)
#drop column customerID and tenure
df_2.drop(columns= ['tenure'], axis=1, inplace=True)
new_df__dummies = pd.get_dummies(df_2[['gender', 'SeniorCitizen', 'Partner', 'Dependents', 'PhoneService',
'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup',
'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies',
'Contract', 'PaperlessBilling', 'PaymentMethod','tenure_group']])
#final_df=pd.concat([new_df__dummies, new_dummy], axis=1)
single = model.predict(new_df__dummies.tail(1))
probablity = model.predict_proba(new_df__dummies.tail(1))[:,1] # probabilistic score
if single==1:
o1 = "This customer is likely to be churned!!"
o2 = "Confidence: {}".format(probablity*100)
else:
o1 = "This customer is likely to continue!!"
o2 = "Confidence: {}".format(probablity*100)
return render_template('home.html', output1=o1, output2=o2,
query1 = request.form['query1'],
query2 = request.form['query2'],
query3 = request.form['query3'],
query4 = request.form['query4'],
query5 = request.form['query5'],
query6 = request.form['query6'],
query7 = request.form['query7'],
query8 = request.form['query8'],
query9 = request.form['query9'],
query10 = request.form['query10'],
query11 = request.form['query11'],
query12 = request.form['query12'],
query13 = request.form['query13'],
query14 = request.form['query14'],
query15 = request.form['query15'],
query16 = request.form['query16'],
query17 = request.form['query17'],
query18 = request.form['query18'],
query19 = request.form['query19'])
app.run()
| [
"anirbansarkar823@gmail.com"
] | anirbansarkar823@gmail.com |
33315eec8cad01b618f9fb0490797a465a9ff420 | 78f08e427cceaa18d1e87d400ca90acd171a3d9c | /extjs_cc/js_profile.py | af2edd5f136a3eec8bf89c00e03ce5281d09b733 | [] | no_license | joeedh/FunLittleProject | 29e13ef743092a3c89605efd722a375dcc9be3af | ad99c8310f54e631bdc05716175108633c5c8619 | refs/heads/master | 2021-01-17T17:38:08.821410 | 2016-06-06T02:54:09 | 2016-06-06T02:54:09 | 60,484,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | from js_process_ast import *
def type_logger(node, typespace):
def arg_log():
n = js_parse("""
var _args = "";
for (var i=0; i<arguments.length; i++) {
if (i > 0)
_args += ","
if (typeof arguments[i] == "object")
_args += arguments[i].constructor.name;
else if (typeof arguments[i] == "number")
_args += "number";
else if (typeof arguments[i] == "boolean")
_args += "boolean";
else if (typeof arguments[i] == "string")
_args += "string";
else if (arguments[i] == null)
_args += "null";
else if (arguments[i] == undefined)
_args += "undefined";
else
_args += "[type error]";
}
""");
return n
def funclog(name):
log = arg_log()
n2 = js_parse("""
$n;
_profile_log("$s", _args, get_callstack());
""", [log, name]);
return n2
def func(n):
n.prepend(funclog("FUNC"))
def method(n):
n.prepend(funclog("METH"))
def setter(n):
n.prepend(funclog("SETR"))
def getter(n):
n.prepend(funclog("GETR"))
traverse(node, FunctionNode, func)
traverse(node, MethodNode, method)
traverse(node, MethodSetter, setter)
traverse(node, MethodGetter, getter)
def crash_logger(node, typespace):
pass
| [
"joeedh@JOESLAPTOP.localdomain"
] | joeedh@JOESLAPTOP.localdomain |
bbb923534301c4cae5c32ca5b00cd3f7312c7a8e | e89de171ca5b4762cde957dd35048af341dd457a | /metrics_mem/metrics_mem.py | 501a086ecbdd60dc60e1185aa34552dceee97585 | [] | no_license | Alexcd83/git_init | 553ac3c75e0d31279ce08791edb8fc650ed9b147 | b70a0b177361c2195a03a92d45bc6dd02547d2d8 | refs/heads/master | 2021-01-14T15:48:59.693148 | 2020-02-25T13:20:56 | 2020-02-25T13:20:56 | 242,668,651 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | #!/usr/bin/env python3
import psutil
m=psutil.virtual_memory()
print("Memory Metrics:")
print("virtual total - %r" % m.total)
print("virtual used - %r" % m.used)
print("virtual free - %r" % m.free)
print("virtual shared - %r" % m.shared)
w=psutil.swap_memory()
print("swap total - %r" % w.total)
print("swap used - %r" % w.used)
print("swap free - %r" % w.free)
| [
"localadmin@SEAD2MONGRAF1.3jzp01jr1hduhhq4whxfbs3qfd.ix.internal.cloudapp.net"
] | localadmin@SEAD2MONGRAF1.3jzp01jr1hduhhq4whxfbs3qfd.ix.internal.cloudapp.net |
81ca441693e2d19cef17640464f2ec14d72e34a1 | 9297cf5f12e1b361535653622f9c4f9571a89de1 | /VoiceActivityDetection.py | 12352c71bc47993aa6299a834e7c81b1a3fe4e59 | [
"MIT"
] | permissive | nayyarv/python_speech_features | 81b2004e1be9492705367c0a1ffd290a448c7e3e | 385af774df7bcccc4b836b7a093990ff75d47eb5 | refs/heads/master | 2021-01-15T09:09:16.197491 | 2014-11-05T05:08:08 | 2014-11-05T05:08:08 | 26,201,908 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | __author__ = 'Varun Nayyar'
__doc__ = \
"""
This file is to be modified by users to provide their own Voice Activity Detection (VAD) functions.
I.e. not all frames will have speech present and it is common to remove these frames in many situations
These functions can be used in most base functions by passing VAD = myVADfunction where
myVADfunction follows the template provided.
"""
import numpy as np
def templateVAD(frames, sig):
"""
:param frames: numpy array of [NumFrames][SamplesPerFrame] of all the speech frames
:param sig: The entire signal [signLen]
:return: the subset of frames where there is voiced activity detected
"""
raise NotImplementedError
def simpleVAD(frames, sig, threshold=0.01):
"""
:param frames: numpy array of [NumFrames][SamplesPerFrame] of all the speech frames
:param sig: The entire signal [signLen]
:param threshold: above what level of average power must the frame be to be considered to have activity
:return: the subset of frames where there is voiced activity detected
Note that the variance of frame/signal represents the average power of the frame/signal
so this is a power threshold activity detector applied along the frames
"""
frameVars = np.var(frames, 1)
reducedFrames = frames[np.where(frameVars > sig.var() * threshold)]
return reducedFrames
| [
"nayyarv@gmail.com"
] | nayyarv@gmail.com |
390a91d3204e239532b30e5056f92b38c109f406 | 4576432964a20bcd247b7705f022055e4f660681 | /1학기/생성자메소드정의.py | 8a0149ef0399c257743e672cf11e74b9471c8913 | [] | no_license | hamjihyeon/Programming-Python- | 2206d8ce01379de007bb12c7e0e09deee03ae697 | 8d62b78b4463d7e84449cdd0441e1e964771f49c | refs/heads/master | 2020-07-07T06:58:15.121982 | 2020-04-02T12:06:49 | 2020-04-02T12:06:49 | 203,284,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | class Car:
def __init__(self, type, speed):
self.type = type
self.speed = speed
def move(self):
print(self.type + "가 " +str(self.speed) + " 속도로 움직입니다.")
def speed_up(self, amount):
self.speed += amount
def speed_down(self, amount):
self.speed -=amount
c = Car("스포츠카", 100)
c.speed_up(10)
c.move()
c.speed_down(10)
c.move() | [
"gkawlgus01@naver.com"
] | gkawlgus01@naver.com |
d08f60cd524554297d85e4fe7e504e3eb05bea32 | 49d4d4ebf3a6552714b154cb53fd8efdafb08eba | /stats/forms.py | 1ca2b83f83cc337533b34bc5389342618f9f5e20 | [] | no_license | iamkjw/Stats-Website | 408e3ef3148257511a9a63e92f9aefc6c66d823a | 200576a13c206a348830b404dc859b083fff9059 | refs/heads/master | 2021-03-17T10:32:01.244204 | 2020-03-14T23:34:36 | 2020-03-14T23:34:36 | 246,983,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from django import forms
from .models import Game
class GameRegisterForm(forms.ModelForm):
class Meta:
model = Game
fields = ['title', 'company']
| [
"jakekim18@hotmail.com"
] | jakekim18@hotmail.com |
a2101c723405d6479327e1d9f436f0b70b2118dd | 6eb5e75507d0159ce5a587a2aa897cd77d4796d1 | /flask_app/app.py | 88830b05a23df64a450e0875c9ee558efff873a8 | [] | no_license | GenerationTRS80/JS_projects | a53a0ee902fd9c48d38a10492fcbbf169aac8fb6 | 0b2441c8f1e8fd8f4edd5ed36605860a4ac0d9da | refs/heads/master | 2021-06-01T22:38:02.947045 | 2020-07-29T05:11:48 | 2020-07-29T05:11:48 | 146,136,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from flask import Flask
app = Flask(__name__)
@app.route('/<name>')
def index(name):
return '<h1>Hello {}!</h1>'.format(name) | [
"gentrs80@gmail.com"
] | gentrs80@gmail.com |
0507e491e028162a6c7b92b66d02a8189977e890 | f4e8261006265b019d57ceadd52063b43f6cd0b8 | /Set4/[29][Break SHA1]/SHA1.py | 4bcdbe886251d877ed529d34a8900c8da82cbc2b | [] | no_license | rnehra01/cryptopals-solutions | 5aeb5a5792f48df4b1c51a0567951914ebdfef8d | 0142799e10554828919f3dfa4ec650aba90a0faf | refs/heads/master | 2020-06-11T17:42:23.347228 | 2017-01-21T04:36:33 | 2017-01-21T04:36:33 | 75,634,351 | 0 | 0 | null | 2016-12-16T09:22:17 | 2016-12-05T14:49:48 | Python | UTF-8 | Python | false | false | 2,519 | py |
class __SHA1():
"""SHA1 implementation"""
'''
data : new data if prev_data_len=0 else addidtional forge data
pre_data_len : 0 if new data else non-zero
a,b,c,d,e : show stating internal state
'''
def __init__(self, data, prev_data_len = 0,
a = 0x67452301,
b = 0xEFCDAB89,
c = 0x98BADCFE,
d = 0x10325476,
e = 0xC3D2E1F0):
self.h = [a, b, c, d, e]
print self.h
self.data = data
self.prev_data_len = prev_data_len
@staticmethod
def __ROTL(n, x, w=32):
return ((x << n) | (x >> w - n))
# Padding
# All hashing are padded until the length is 8 bytes less than a full (64-byte) block
# data + padding bytes 0x00(starting with 0x80) + 8 byte bit-length of data in big-endian
def pre_process(self):
ml = len(self.data) + self.prev_data_len
#Append bit '1'
self.data = self.data.encode('hex') + '80'
#Make len an multiple 0f 512 bits or 64 bytes
l = (55-ml) % 64
self.data = self.data + l*'00'
#ml to 64-bit big-endian
ml = hex(ml*8)[2:].rjust(16, '0')
#Append bit-length of data
self.data = self.data + ml
self.data = self.data.decode('hex')
# SHA1 is an iterating hashing algorithm
# Each internal state depends on the previous one
def hash(self):
MASK = 2**32-1
self.pre_process()
#break message into 512-bit(64 byte) block
for i in range(0,len(self.data),64):
block = self.data[i:i+64]
#break block into sixteen 32-bit big-endian words
w = [int(block[j:j+4].encode('hex'),16) for j in range(0, 64, 4)]
for j in range(16,80):
w.append(self.__ROTL(1, (w[j-3] ^ w[j-8] ^ w[j-14] ^ w[j-16])) & MASK )
#Initialize hash value for this block
a, b, c, d, e = self.h[:]
for j in range(80):
if j in range(0, 20):
f = (b & c) ^ (~b & d)
k = 0x5A827999
elif j in range(20, 40):
f = b ^ c ^ d
k = 0x6ED9EBA1
elif j in range(40, 60):
f = (b & c) ^ (b & d) ^ (c & d)
k = 0x8F1BBCDC
else :
f = b ^ c ^ d
k = 0xCA62C1D6
temp = (self.__ROTL(5,a) + f + e + k + w[j]) & MASK
e = d
d = c
c = self.__ROTL(30, b) & MASK
b = a
a = temp
#Add this block's hash to result so far:
self.h[0] = (a + self.h[0]) & MASK
self.h[1] = (b + self.h[1]) & MASK
self.h[2] = (c + self.h[2]) & MASK
self.h[3] = (d + self.h[3]) & MASK
self.h[4] = (e + self.h[4]) & MASK
#Produce the final hash value (big-endian) as a 160 bit number:
hh = ''
for h in self.h:
hh += (hex(h)[2:]).rjust(8, '0')
return hh
| [
"hnehra1@gmail.com"
] | hnehra1@gmail.com |
3e1b72386f1c5898b627f85329a56d1f26b86e57 | 1a334401b4a951222620d9432c0714480e4bf587 | /Infodate/models.py | 9724f6ad11af4981f853ab80986577f398a3f536 | [] | no_license | maxproj/max-project | 9f3e1c7302eaeff96a0a98915980ff47926e7f4e | b27146b36861d953e169fda4acdce35589ee62d8 | refs/heads/master | 2020-04-05T06:49:59.802013 | 2018-11-22T12:17:52 | 2018-11-22T12:17:52 | 156,653,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,396 | py | from django.db import models
from django.utils import timezone
STATUS_CHOICES_TYPE = (
('новый', 'новый'),
('б/у', 'б/у'),
)
STATUS_CHOICES = (
('да', 'да'),
('нет', 'нет'),
)
STATUS_CHOICES_ENG = (
('1,4л/75л.с.(K7J)', '1,4л/75л.с.(K7J)'),
('1,6л/82л.с.(K7M710)', '1,6л/82л.с.(K7M710)'),
('1,6л/86л.с.(K7M710)', '1,6л/86л.с.(K7M710)'),
('1,6л/102л.с.(K7M800)', '1,6л/102л.с.(K7M800)'),
('1,6л/113л.с. (К7М800)', '1,6л/113л.с.(К7М800)')
)
STATUS_CHOICES_KPP = (
('АКПП', 'АКПП'),
('МКПП', 'МКПП'),
)
class Infodate(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
probeg_tek = models.CharField(max_length=8, verbose_name="текущий пробег",)
date_create = models.DateField(auto_now=False, verbose_name="дата приобретения",)
probeg_all = models.CharField(max_length=8, verbose_name="общий пробег",)
date_publish = models.DateTimeField(auto_now_add=True, verbose_name="дата публикации",)
conder = models.CharField(max_length=3, choices = STATUS_CHOICES, default='да', verbose_name="наличие кондиционера",)
type_expl = models.CharField(max_length=5, choices = STATUS_CHOICES_TYPE, default='новый', verbose_name="тип эксплуатации",)
type_engine = models.CharField(max_length=20, choices = STATUS_CHOICES_ENG, default='1,4л/75л.с.(K7J)', verbose_name="тип двигателя",)
type_kpp = models.CharField(max_length=4, choices = STATUS_CHOICES_KPP, default='МКПП', verbose_name="тип КПП",)
date_oil_last = models.DateField(auto_now=False, verbose_name="дата замены масла(посл.)",)
date_liq_last = models.DateField(auto_now=False, verbose_name="дата замены ТЖ(посл.)",)
date_brake_last = models.DateField(auto_now=False, verbose_name="дата замены ОЖ(посл.)",)
date_to_last = models.DateField(auto_now=False, verbose_name="дата ТО(посл.)",)
to_make = models.CharField(max_length=3, choices = STATUS_CHOICES, default='да', verbose_name="прохождение ТО",)
def publish(self):
self.date_publish = timezone.now()
self.save()
def _str_(self):
return self.probeg_tek, self.probeg_all, self.conder
# Create your models here.
| [
"noreply@github.com"
] | maxproj.noreply@github.com |
4df396dfafec126bad4587eff4e631e324b7f381 | 7e1d1d75fe1d32784ebb892f7a8cf1a3ef394af3 | /gameoflife.py | 6b73d65b5200d7917973a9ef2b237ec0963dc974 | [] | no_license | samhithaaaa/Array-2 | ca0d109ac5b08cb7765006447ecc6bb2e0052cf4 | cdd2f636feb9c960f6c9ce6f41ef727b455c6fbe | refs/heads/master | 2020-09-24T13:45:29.277034 | 2019-12-04T05:23:42 | 2019-12-04T05:23:42 | 225,772,085 | 0 | 0 | null | 2019-12-04T03:36:14 | 2019-12-04T03:36:13 | null | UTF-8 | Python | false | false | 1,036 | py | class Solution:
def gameOfLife(self, board: List[List[int]]) -> None:
if not board:
return
for i in range(len(board)):
for j in range(len(board[0])):
live = self.validneighbours(board,i,j)
if board[i][j] == 0 and live == 3:
board[i][j] = 3
if board[i][j] == 1 and (live < 2 or live > 3):
board[i][j] = 2
for i in range(len(board)):
for j in range(len(board[0])):
board[i][j] = board[i][j] % 2
def validneighbours(self, board,i,j):
directions = [(-1,0), (0,-1),(1,0),(0,1),(-1,-1),(-1,1),(1,-1),(1,1)]
lives = 0
for direction in directions:
nr = i + direction[0]
nc = j + direction[1]
if nr >= 0 and nr < len(board) and nc >= 0 and nc < len(board[0]):
if board[nr][nc] == 1 or board[nr][nc] == 2:
lives += 1
return lives
| [
"samhithamamindla037@gmail.com"
] | samhithamamindla037@gmail.com |
370ff42546afff8468e9b3558b409c47aac52b59 | e63f01b9409f11322792ac9027a72576c2c46f50 | /core/urls.py | f3b219c0aa1792462fc7a952c98f23124edeeb88 | [] | no_license | danyel66/Exampleblog | b234c53343ec62e15a5ab826370db34da5209ecb | 4afe7cfc91bfbb2a5101db6ee8f118f10d452929 | refs/heads/master | 2021-06-13T21:32:33.898004 | 2020-04-09T18:43:57 | 2020-04-09T18:43:57 | 254,451,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | from django.urls import path
from .views import post_list, post_detail, post_share, post_search
from django.contrib.sitemaps.views import sitemap
from core.sitemaps import PostSitemap
from core.feeds import LatestPostsFeed
app_name = 'core'
sitemaps = {
'posts': PostSitemap,
}
urlpatterns = [
path('', post_list, name='post_list'),
# path('', PostListView.as_view(), name='post_list'),
path('tag/<slug:tag_slug>/', post_list, name='post_list_by_tag'),
path('<int:year>/<int:month>/<int:day>/<slug:post>/', post_detail,
name='post_detail'),
path('<int:post_id>/share/', post_share, name='post_share'),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
path('feed/', LatestPostsFeed(), name='post_feed'),
path('search/', post_search, name='post_search'),
]
| [
"danielonyeka247@gmail.com"
] | danielonyeka247@gmail.com |
5f65118ec3e3f6e131fe983f21f4e4c718a82344 | a03d595545ae7fe698ce359381551a7665bbf946 | /calc/views.py | a53750a1f08fec3a27ad3b2d1563c4ff4e704868 | [] | no_license | darwell/django_travello | f359561dca7206f5c9c0f62a4380a332fdb7498e | cd1def12745df4fcd38a621ffb41762ad777d402 | refs/heads/master | 2021-01-14T02:18:26.335109 | 2020-02-23T18:36:19 | 2020-02-23T18:36:19 | 242,555,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return render(request, 'home.html', {'name': 'Darko!!!'})
def add(request):
val1 = int(request.POST['num1'])
val2 = int(request.POST['num2'])
return render(request, 'result.html', {'result': val1+val2})
| [
"darusohrid@yahoo.com"
] | darusohrid@yahoo.com |
245e30a12a1d2ad46ca40d3018cb3e900a6d25a6 | 24f664aa2344d4f5d5e7b048ac4e85231715c4c8 | /experimental/dsmith/glsl/generators.py | 145d0ae0ab124b97f48c357842f95e93fd61bc24 | [] | no_license | speycode/clfuzz | 79320655e879d1e0a06a481e8ec2e293c7c10db7 | f2a96cf84a7971f70cb982c07b84207db407b3eb | refs/heads/master | 2020-12-05T13:44:55.486419 | 2020-01-03T14:14:03 | 2020-01-03T14:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,783 | py | #
# Copyright 2017, 2018 Chris Cummins <chrisc.101@gmail.com>.
#
# This file is part of DeepSmith.
#
# DeepSmith is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# DeepSmith is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# DeepSmith. If not, see <http://www.gnu.org/licenses/>.
#
"""
GLSL program generators.
"""
import math
import random
import string
from time import time
from experimental.dsmith.glsl.db import *
from experimental.dsmith.langs import Generator
from labm8.py import fs
class GlslGenerator(Generator):
"""
Common baseclass for program generators.
"""
# Abstract methods (must be implemented):
def generate_one(self, session: session_t) -> ProgramProxy:
""" Generate a single program. """
raise NotImplementedError("abstract class")
# Default methods (may be overriden):
def __repr__(self):
return f"{Colors.BOLD}{Colors.GREEN}{self.__name__}{Colors.END}"
def num_programs(self, session: session_t = None) -> int:
""" return the number of generated programs in the database """
with ReuseSession(session) as s:
return (
s.query(func.count(Program.id))
.filter(Program.generator == self.id)
.scalar()
)
def sloc_total(self, session: session_t = None) -> int:
""" return the total linecount of generated programs """
with ReuseSession(session) as s:
return (
s.query(func.sum(Program.linecount))
.filter(Program.generator == self.id)
.scalar()
)
def generation_time(self, session: session_t = None) -> float:
""" return the total generation time of all programs """
with ReuseSession(session) as s:
return (
s.query(func.sum(Program.generation_time))
.filter(Program.generator == self.id)
.scalar()
or 0
)
def num_testcases(self, session: session_t = None) -> int:
""" return the total number of testcases """
with ReuseSession(session) as s:
return (
s.query(func.count(Testcase.id))
.join(Program)
.filter(Program.generator == self.id)
.scalar()
)
def generate(self, n: int = math.inf, up_to: int = math.inf) -> None:
""" generate 'n' new programs 'up_to' this many exist in db """
with Session() as s:
num_progs = self.num_programs(s)
# Determine the termination criteria:
if n == math.inf and up_to == math.inf:
max_value = math.inf
bar_max = progressbar.UnknownLength
elif n == math.inf:
max_value = up_to
bar_max = max_value
else:
max_value = num_progs + n
bar_max = max_value
# Exit early if possible:
if num_progs >= max_value:
print(
f"There are already {Colors.BOLD}{num_progs}{Colors.END} "
"programs in the database. Nothing to be done."
)
return
# Print a preamble message:
num_to_generate = max_value - num_progs
if num_to_generate < math.inf:
estimated_time = (
self.generation_time(s) / max(num_progs, 1)
) * num_to_generate
eta = humanize.Duration(estimated_time)
print(
f"{Colors.BOLD}{num_to_generate}{Colors.END} programs are "
"to be generated. Estimated generation time is "
+ f"{Colors.BOLD}{eta}{Colors.END}."
)
else:
print(f"Generating programs {Colors.BOLD}forever{Colors.END} ...")
bar = progressbar.ProgressBar(
initial_value=num_progs, max_value=bar_max, redirect_stdout=True
)
# The actual generation loop:
buf = []
while num_progs < max_value:
buf.append(self.generate_one(s))
# Update progress bar
num_progs += 1
bar.update(num_progs)
if len(buf) >= dsmith.DB_BUF_SIZE:
save_proxies_uniq_on(s, buf, "sha1")
num_progs = self.num_programs(s)
buf = []
save_proxies_uniq_on(s, buf, "sha1")
print(
f"All done! You now have {Colors.BOLD}{num_progs}{Colors.END} "
f"{self} programs in the database"
)
def import_from_dir(self, indir: Path) -> None:
""" import program sources from a directory """
with Session() as s:
start_num_progs = self.num_programs(s)
def _save(proxies):
# Create records from proxies:
programs = [proxy.to_record(s) for proxy in proxies]
app.Warning(getattr(type(programs[0]), "sha1"))
import sys
sys.exit(0)
# Filter duplicates in the set of new records:
programs = dict(
(program.sha1, program) for program in programs
).values()
# Fetch a list of dupe keys already in the database:
sha1s = [program.sha1 for program in programs]
dupes = set(
x[0] for x in s.query(Program.sha1).filter(Program.sha1.in_(sha1s))
)
# Filter the list of records to import, excluding dupes:
uniq = [program for program in programs if program.sha1 not in dupes]
# Import those suckas:
s.add_all(uniq)
s.commit()
nprog, nuniq = len(programs), len(uniq)
app.Log(1, f"imported {nuniq} of {nprog} unique programs")
num_progs = self.num_programs(s)
# Print a preamble message:
paths = fs.ls(indir, abspaths=True)
num_to_import = humanize.Commas(len(paths))
print(
f"{Colors.BOLD}{num_to_import}{Colors.END} files are " "to be imported."
)
bar = progressbar.ProgressBar(redirect_stdout=True)
# The actual import loop:
buf = []
for i, path in enumerate(bar(paths)):
buf.append(self.import_from_file(s, path))
if len(buf) >= dsmith.DB_BUF_SIZE:
save_proxies_uniq_on(s, buf, "sha1")
buf = []
save_proxies_uniq_on(s, buf, "sha1")
num_imported = humanize.Commas(self.num_programs(s) - start_num_progs)
num_progs = humanize.Commas(self.num_programs(s))
print(
f"All done! Imported {Colors.BOLD}{num_imported}{Colors.END} "
f"new {self} programs. You now have "
f"{Colors.BOLD}{num_progs}{Colors.END} {self} programs in the "
"database"
)
def import_from_file(
self, session: session_t, path: Path
) -> Union[None, ProgramProxy]:
""" Import a program from a file. """
# app.Log(2, f"importing '{path}'")
# Simply ignore non-ASCII chars:
src = "".join([i if ord(i) < 128 else "" for i in fs.Read(path).strip()])
return ProgramProxy(generator=self.id, generation_time=0, src=src)
class RandChar(GlslGenerator):
"""
This generator produces a uniformly random sequence of ASCII characters, of
a random length.
"""
__name__ = "randchar"
id = Generators.RANDCHAR
# Arbitrary range
charcount_range = (100, 100000)
def generate_one(self, session: session_t) -> ProgramProxy:
""" Generate a single program. """
start_time = time()
charcount = random.randint(*self.charcount_range)
src = "".join(random.choices(string.printable, k=charcount))
runtime = time() - start_time
return ProgramProxy(generator=self.id, generation_time=runtime, src=src)
class GitHub(GlslGenerator):
"""
Programs mined from GitHub.
"""
__name__ = "github"
id = Generators.GITHUB
class DSmith(GlslGenerator):
__name__ = "dsmith"
id = Generators.DSMITH
| [
"chrisc.101@gmail.com"
] | chrisc.101@gmail.com |
e9f4ad51b2f1ff568dd13e7270be99155fef7ddc | e5b4a7a9f2e734d15c7b6e28216cf43d70ea3cfd | /exercises/utils.py | 3297a09ef4c3187670578623e6417f2b67fed2cc | [] | no_license | emarcey/cryptopals | b9eee6d1d92199a522cab3a653f56eb846f9f734 | b825a85d5e77cc6f9b998d6c34c76bcb6d120fea | refs/heads/main | 2023-09-01T19:25:54.610208 | 2021-10-14T22:40:38 | 2021-10-14T22:40:38 | 406,948,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | import time
import secrets
from typing import List
def str_to_chunks(s: str, chunk_size: int, max_chunks: int = -1, allow_partials: bool = True) -> List[str]:
chunks = []
num_chunks = 0
for i in range(0, len(s), chunk_size):
if max_chunks != -1 and num_chunks >= max_chunks:
break
chunk = s[i : i + chunk_size]
if not allow_partials and len(chunk) < chunk_size:
break
chunks.append(chunk)
num_chunks += 1
return chunks
# Challenge 9
def pkcs7_pad(s: str, block_size: int) -> str:
len_s = len(s)
if len_s > 0 and len_s % block_size == 0:
return s
num_pads = block_size - (len_s % block_size)
return (s.encode() + (bytes([num_pads]) * num_pads)).decode()
def pkcs7_unpad(s: str) -> str:
if len(s) == 0:
return s
byte_s = s.encode()
pad_range = byte_s[-byte_s[-1] :]
if len(set(pad_range)) != 1:
return s
return (byte_s[: -byte_s[-1]]).decode()
def is_pkcs7_padded(s: str) -> bool:
if len(s) == 0:
return s
byte_s = s.encode()
pad_range = byte_s[-byte_s[-1] :]
return all(pad_range[b] == len(pad_range) for b in range(0, len(pad_range)))
def gen_aes_key(key_len: int = 16) -> bytes:
return secrets.token_bytes(key_len)
def _make_salt(min_len: int = 5, max_len: int = 10) -> bytes:
salt_len = secrets.randbelow(max_len - min_len + 1) + min_len
return secrets.token_bytes(salt_len)
def salt_bytes(b: bytes) -> bytes:
return _make_salt() + b + _make_salt()
def rand_sleep(min_val: int, max_val: int) -> None:
t = secrets.randbelow(max_val - min_val) + min_val
time.sleep(t)
| [
"evanmarcey@gmail.com"
] | evanmarcey@gmail.com |
34c7c8949c4802c431dceaa21353344f19f869fe | 39bf07efcfa20344216162d0171024c9d9628a61 | /Example_2/Git_Capon_mseed.py | cba2da6a10491a53ee23bc62a1e87e5e86ff929e | [] | no_license | davcra/IAS-Capon | 05f191abe22ca5cf94d801a93d77d06fccfba5ef | 8447a02945baf9fe1b865418a5e496f1aafc7ac8 | refs/heads/master | 2020-05-29T11:51:41.322964 | 2014-12-17T18:32:54 | 2014-12-17T18:32:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,862 | py | import matplotlib.pyplot as plt
from subroutines import *
from obspy.core import read
import scipy as sp
# ==== USER INPUT PARAMETER ===
nsamp = 4000
smin = -50.0
smax = 50.0
sinc = 0.5
cap_find = 160
cap_fave = 15
dl = 1
st = read('ASAR.2012.146.00.00.SHZ.mseed',format='MSEED')
dic_meta = get_metadata('ASAR.metadata')
# =============================
nr = st.count()
dt = st[0].stats.delta
rx,ry = metric_mseed(st,dic_meta,nr)
#fk,sxopt,syopt,vel,rp,baz = FK(nsamp,nr,rx,ry,st,smin,smax,sinc,cap_find,cap_fave,dt,overlap=True,taper=True)
#fk,sxopt,syopt,vel,rp,baz = IAS_FK(nsamp,nr,rx,ry,st,smin,smax,sinc,cap_find,cap_fave,dt,overlap=True,taper=True)
#fk,sxopt,syopt,vel,rp,baz,maa,pwe = Capon(nsamp,nr,rx,ry,st,smin,smax,sinc,cap_find,cap_fave,dt,dl,overlap=True,taper=True)
fk,sxopt,syopt,vel,rp,baz,maa,pwe = IAS_Capon(nsamp,nr,rx,ry,st,smin,smax,sinc,cap_find,cap_fave,dt,dl,overlap=True,taper=True)
#fk,sxopt,syopt,vel,rp,baz = CAS_Capon(nsamp,nr,rx,ry,st,smin,smax,sinc,cap_find,cap_fave,dt,overlap=True,taper=True)
#print arrival stats
print_stats(fk,threshold=0.15)
#generating figure
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
im = ax.imshow(fk.T,extent=[smin,smax, smax, smin],cmap='gist_stern_r',interpolation='none')
plt.title('Slowness Spectrum at %.03f +- %.03f[Hz]' %(cap_find/(nsamp*dt),cap_fave/(nsamp*dt)))
ax.set_xlim([smin,smax])
ax.set_ylim([smin,smax])
ax.set_xlabel('East/West Slowness [s/km]')
ax.set_ylabel('North/South Slowness [s/km]')
circle=plt.Circle((0,0),sp.sqrt((0.3*111.19)**2),color='w',fill=False,alpha=0.4)
plt.gcf().gca().add_artist(circle)
circle=plt.Circle((0,0),sp.sqrt((0.24*111.19)**2),color='w',fill=False,alpha=0.4)
plt.gcf().gca().add_artist(circle)
cbar = fig.colorbar(im)
cbar.set_label('relative power (dB)',rotation=270)
plt.show()
| [
"martin.gal@utas.edu.au"
] | martin.gal@utas.edu.au |
06cb3dba995469d9d43334f466f944fca9c7cb06 | d726465b0e79d267a5116c35453051af38503620 | /Slack/MonitorML/send_alerts.py | e588aa2468cdd504c48d29f8b4752c3f31ea5445 | [] | no_license | anshuln/Bots | 9f9d27f7fd8a24a9029783f1830978eb9905536c | e5cd4166761a0a128add10a0716d2591ce431445 | refs/heads/master | 2020-06-22T18:57:44.514950 | 2020-03-27T17:22:36 | 2020-03-27T17:22:36 | 197,780,550 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | import requests
import json
import sys
from time import time
#TODO - ideas - 1. Sending images, 2. Read messages according to time, user to respond, can respond with either stats/ do some other action later
def send_alert(text,url):
headers = {"Content-type":"application/json"}
data = {"text":text}
response = requests.post(url,json=data,headers=headers)
# print(response.text)
def get_commands(config_dict_path,log_file_path):
'''
Returns a list of commands
'''
# TODO update timestamp
# TODO convert all to a class
config = json.load(open(config_dict_path,"r"))
headers = {"Content-type":"application/json"}
url = "https://slack.com/api/conversations.history?token={}&channel={}".format(config["token"],config["channel_id"])
user = config["user"]
ts = config["ts"]
response = requests.get(url,headers=headers)
print(response)
commands = [(x['text'],x['ts']) for x in response.json()['messages'] if 'user' in x.keys() and x['user'] == user and float(x['ts']) > ts]
config['ts'] = time()
json.dump(config,open(config_dict_path,"w"))
with open(log_file_path, "a") as logfile:
logfile.write('\n'.join(["{}: {}".format(x[1],x[0]) for x in commands]))
return commands
print(get_commands("config.json","log.txt"))
| [
"anshulnasery@gmail.com"
] | anshulnasery@gmail.com |
fc4c273175f45a1ada5dd909d0745d74d7571f1d | cf76ea1c17414624c6b610f5384400dd8b56f2cc | /joinQuantTest/factorsTest.py | ea34f682d7ee3e1eb5ded925db0b968be8989310 | [] | no_license | yuanxiaofan/stockAnalysis | 64c1010d9f692fde9374d8d517e495f3f129c5b1 | e4395b2adc2496f3bed58f0aa6c141127ef267bc | refs/heads/master | 2023-01-24T12:03:51.392811 | 2020-11-28T13:27:28 | 2020-11-28T13:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from jqdatasdk import *
import pandas as pd
id='18518326872'
password='84559515A'
auth(id,password)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
# from jqdatasdk import *
# #获取聚宽因子库所有因子
# df = get_all_factors()
# print(df)
#获取聚宽因子库营业收入TTM因子“operating_revenue_ttm”的分层回测收益
result=get_factor_effect('000300.XSHG','2016-07-29','2020-03-20','4W','size',5)
print(result)
| [
"thrallotaku@yeah.net"
] | thrallotaku@yeah.net |
90d24bee7d321b2f8802b9526348cdbbf68578f3 | b2414d1edac3ff490766f5d3265d0290c057c467 | /ballast/util.py | 1e6f5b5010f6edbeb2006b441a24036fad28d01b | [
"Apache-2.0"
] | permissive | git-naren/ballast | c482cacb1e61bb8a386b2a194534ee7a4f348cbb | 083b2fa649321f85ab6d5ff686c2d61917a91b7e | refs/heads/master | 2021-06-22T12:42:28.136247 | 2017-08-18T21:09:29 | 2017-08-18T21:09:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,195 | py | from ballast.compat import basestring, unicode
try:
from urllib.parse import urlparse, urlunparse, urljoin, parse_qs, urlencode
except ImportError:
from urlparse import urlparse, urlunparse, urljoin, parse_qs
from urllib import urlencode
class UrlBuilder(object):
DEFAULT_SCHEME = 'http'
DEFAULT_PORT = 80
def __init__(self):
self._scheme = ''
self._hostname = ''
self._port = ''
self._path = ''
self._query = dict()
self._username = ''
self._password = ''
self._fragment = ''
@staticmethod
def from_url(url):
parts = urlparse(url)
return UrlBuilder.from_parts(
parts.scheme,
parts.username,
parts.password,
parts.hostname,
parts.port,
parts.path,
parts.query,
parts.fragment
)
@staticmethod
def from_parts(
scheme=DEFAULT_SCHEME,
username='',
password='',
hostname='',
port=None,
path='',
query='',
fragment=''
):
builder = UrlBuilder().\
scheme(scheme).\
username(username).\
password(password).\
hostname(hostname).\
port(port).\
path(path).\
fragment(fragment)
if query is not None:
if isinstance(query, basestring):
builder._query = parse_qs(query)
elif isinstance(query, dict):
# TODO: validate structure?
builder._query = query
else:
raise Exception('Query format unexpected!')
return builder
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
self.build()
)
def __str__(self):
return self.build()
def __unicode__(self):
return self.build()
def scheme(self, value):
assert value is None or isinstance(value, basestring)
self._scheme = value
return self
def http(self):
self._scheme = 'http'
return self
def https(self):
self._scheme = 'https'
return self
def hostname(self, value):
assert value is None or isinstance(value, basestring)
self._hostname = value if value is not None else ''
return self
def port(self, value):
assert value is None or isinstance(value, int)
self._port = value
return self
def path(self, value):
assert value is None or isinstance(value, basestring)
self._path = value
return self
def username(self, value):
assert value is None or isinstance(value, basestring)
self._username = value
return self
def password(self, value):
assert value is None or isinstance(value, basestring)
self._password = value
return self
def fragment(self, value):
assert value is None or isinstance(value, basestring)
self._fragment = value
return self
def add_query_param(self, key, value):
assert key is None or isinstance(key, basestring)
if key not in self._query:
self._query[key] = []
self._query[key].append(unicode(value))
return self
def remove_query_param(self, key, value=None):
assert key is None or isinstance(key, basestring)
# nothing to do if the key isn't in there
if key not in self._query:
return self
# if value is None, remove all
# params for the specified key
if value is None:
del self._query[key]
return self
# otherwise, just remove the specified
# value from the query param list for
# the specified key
l = self._query[key]
l.remove(unicode(value))
# if there are no more values,
# remove the key from the dictionary
if len(l) == 0:
del self._query[key]
return self
def append_path(self, path):
assert path is None or isinstance(path, basestring)
base = self._path
if not base.endswith('/'):
base += '/'
self._path = urljoin(base, path)
return self
def build(self):
# create the array the unparse method expects
# and populate with our values
parts = [''] * 6
parts[0] = self._scheme
parts[1] = self._build_host()
parts[2] = self._path
parts[4] = urlencode(self._query, 1)
parts[5] = self._fragment
# finally, create the url from the parts
return urlunparse(parts)
def _build_host(self):
if self._username is not None and self._username != '':
host = '{}:{}@{}'.format(
self._username,
self._password,
self._hostname
)
else:
host = self._hostname
if self._port is not None and self._port != self.DEFAULT_PORT:
host += ':{}'.format(self._port)
return host
| [
"smith.justin.c@gmail.com"
] | smith.justin.c@gmail.com |
c7ba61c0a3b5a899b8ee49ba4ba2fc8900cae79b | b1f748d761751e89f62cf5b8a2b13adac5bf3a29 | /setprice/apps.py | f40a45dfd11be0925a3a5178812e4788c49ae528 | [] | no_license | sangeeth-subramoniam/buildingmanagementheroku | 7b77be693fa73dbd2dff9c816bf50daf1e501029 | db26de549f7088d2ff80a303abeeaaa548d43e0b | refs/heads/master | 2023-07-08T13:46:06.384694 | 2021-08-10T06:50:14 | 2021-08-10T06:50:14 | 392,492,925 | 0 | 0 | null | 2021-08-04T02:46:57 | 2021-08-04T00:14:10 | Python | UTF-8 | Python | false | false | 148 | py | from django.apps import AppConfig
class SetpriceConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'setprice'
| [
"s-sangeeth-k@sicis.co.jp"
] | s-sangeeth-k@sicis.co.jp |
b9815f46bae9c60f624d117e3a46dbb0640ad0ee | 6257aac89e77147527ed64ba9623f930e0dffb1a | /timeseries/generate_timeseries_all.py | 7479c4ddd2c7b27c8f10e621a36b21451722391e | [] | no_license | dveyarangi/wrf-visualization-scripts | f119e7dcd6ea3c2b4784c27b19c171aa4134db75 | da6fe180a878a320548ef6e2397f04b8370f5c3f | refs/heads/main | 2023-06-23T11:00:49.471340 | 2021-07-21T09:11:32 | 2021-07-21T09:11:32 | 388,043,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,647 | py | from timeseries.generate_surface_timeseries_statistics_plots import generate as generate_station_event_statistics
from timeseries.generate_surface_timeseries_plots import generate as generate_station_event_plots
from timeseries.generate_surface_timeseries_avg_plots import generate as generate_stationma_event_plots
from timeseries.generate_surface_timeseries_correlation_plots import generate as generate_station_event_corr
from timeseries.generate_surface_timeseries_allstations_statistics_plots import generate as generate_allstations_event_statistics
from timeseries.generate_surface_timeseries_station_eventavg_statistics_plots import generate as generate_station_eventavg_statistics
from timeseries.generate_surface_timeseries_stationavg_statistics_plots import generate as generate_stationavg_event_statistics
from timeseries.generate_surface_timeseries_allavg_statistics_plots import generate as generate_stationavg_eventavg_statistics
from timeseries.timeseries_cfg import *
#generate_station_event_plots(configs, stations, domain_groups, time_groups)
#generate_stationma_event_plots(configs, stations, domain_groups, time_groups)
generate_station_event_statistics(configs, stations, domain_groups, time_groups)
#generate_station_event_corr(configs, stations, domain_groups, time_groups)
#generate_allstations_event_statistics(configs, stations, domain_groups, time_groups)
#generate_station_eventavg_statistics(configs, stations, domain_groups, time_groups)
#generate_stationavg_event_statistics(configs, stations, domain_groups, time_groups)
generate_stationavg_eventavg_statistics(configs, stations, domain_groups, time_groups)
| [
"fima.rotter@gmail.com"
] | fima.rotter@gmail.com |
30c2f318bc88b67b64ad85e35405b267191a84e7 | d4dfce43d228fb128da6c2376d480670a42b6149 | /gc-cdcl/tclap/tests/test82.py | 0916b0028df0d22ee7292b6b149b6796602aacb8 | [
"MIT"
] | permissive | marijnheule/clicolcom | e4eb7ebe37f43e02bbe3120fcd7dba4106c6edd2 | bfa3c893bb5182ccaec2ce5f0cd04bd48833a85e | refs/heads/main | 2023-02-24T01:48:59.179761 | 2023-02-10T22:53:27 | 2023-02-10T22:53:27 | 454,551,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | #!/usr/bin/python
import simple_test
simple_test.test("test23", ["blah", "--blah", "-s=bill", "-i=9", "-i=8", "-B", "homer", "marge", "bart", ])
| [
"akarahal@phd-mbpro14-aka.wifi.local.cmu.edu"
] | akarahal@phd-mbpro14-aka.wifi.local.cmu.edu |
ceb926debbc962f85988a460885f5fda6e37b377 | 6de91b857a2713d4711bef0031505e7d529ea51a | /venv/bin/pip3.7 | 492dd2b5bcdba5081a40fa081334b3d528e1ed09 | [] | no_license | Jaman-dedy/flaskPython1step | 80f58d19ece6830fea7c6f69f42dcee776a63d50 | 1206a1566e85d28605cfc2078885de15a27eec05 | refs/heads/master | 2020-06-22T17:18:17.486399 | 2019-07-19T10:19:35 | 2019-07-19T10:19:35 | 197,752,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | 7 | #!/Users/workspace/PycharmProjects/flaskBlog/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"jeandedieuam@gmail.com"
] | jeandedieuam@gmail.com |
89ca08a129bced908f6a12ded9781432a25b497a | 81220b8d60660300ba341015222d419d2b2f1515 | /GUI Development/Server/Logging.py | 4fedfc00061176932504cc4661489600961d1273 | [] | no_license | awoerp/Seljan-Scheduler | 32bb9b3329f88aa9f0eb1e344f51f50153c23e4d | 6938284d183a29516917eeda19771de0de07cb16 | refs/heads/master | 2021-01-10T14:00:37.860353 | 2016-02-18T04:31:24 | 2016-02-18T04:31:24 | 48,778,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | from datetime import date, datetime
from os import getcwd, chdir
class Log:
def __init__(self):
cwd = getcwd()
self.date = date.today()
day = str(self.date.day)
month = str(self.date.month)
year = str(self.date.year)
chdir("Logs")
self.logFile = open("ServerLog %s-%s-%s.txt" % (month, day, year), 'a')
self.WriteToLogWithTimeStamp("Server Stated")
chdir(cwd)
def WriteToLogWithTimeStamp(self, body):
timeStamp = str(datetime.now())
message = timeStamp + ": " + body + "\n"
self.logFile.write(message)
self.logFile.flush()
def WriteToLog(self, body):
timeStamp = str(datetime.now())
message = body + "\n"
self.logFile.write(message)
self.logFile.flush()
def NewLine(self):
self.logFile.write("\n")
self.logFile.flush()
def Close(self):
self.logFile.close()
| [
"woerpela@uwplatt.edu"
] | woerpela@uwplatt.edu |
1c533906c8f8df75bddf91bf93afad8f57aaff14 | c1aa16af25d8701d2ae293dfef48d7522604dfe6 | /core/mo_del.py | 18ca0674aabafaefa49030cba194de19800801cd | [] | no_license | jekiapp/webpy-dashboard | 0ddda9f082db79b885c584d7fb35e6bef86ee238 | b9c8763bef67917966ca999390502b1b5034965e | refs/heads/master | 2021-06-01T06:27:52.331704 | 2016-07-16T02:54:46 | 2016-07-16T02:54:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | import web
from pymongo import MongoClient
from bson.son import SON
from bson.objectid import ObjectId
class mo_del:
password = web.config.db_password;
def __init__(self,cl=None):
client = MongoClient('mongodb://localhost:27017/')
self.db = client.pilkada
self.Id = ObjectId
self.SON = SON
if cl: self.set_collection(cl)
def set_collection(self,collection):
self.cl = self.db[collection]
| [
"muzakki.ahmad29@gmail.com"
] | muzakki.ahmad29@gmail.com |
5f5792a19f5bcf3a92dc5b959a804dbb0c63ee67 | 886e1f1ac3a03051ecd92992b917345fa62cab17 | /Atari_v1/optimize_model.py | 8a2b07cea058ffdde227a22014258bbaa4893338 | [] | no_license | kjain20/ESE_546_Project | b59f930d603e3a07bc15db7ff6a7e5423d4b590b | d122ee558f676b91c53a2d29d330114f4b5d5f86 | refs/heads/master | 2022-10-09T01:37:01.435580 | 2020-06-05T19:12:37 | 2020-06-05T19:12:37 | 269,746,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,701 | py | import copy
from collections import namedtuple
from itertools import count
import math
import random
import numpy as np
import time
from torch.autograd import Variable
import gym
from atari_wrappers import *
from models import *
from utils import *
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
from pdb import set_trace as debug
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
def nstep_target(idx, policy_net,target_net, memory, steps = 20, device = 'cpu', BATCH_SIZE = 32, GAMMA = 0.99, double_dqn = False):
range_ = np.arange(1, steps + 1)
idx_nReward = idx.reshape(-1, 1) + range_
_batch, _ = memory.sample(idx = idx_nReward.ravel())
n_batch = Transition(*zip(*_batch))
rewards = tuple((map(lambda r: torch.tensor([r], device=device), n_batch.reward)))
n_rewards = torch.cat(rewards).view(idx_nReward.shape)
gamma_n = np.geomspace(1, GAMMA**(steps - 1), steps)
discounted_rewards = n_rewards*torch.from_numpy(gamma_n).float().to(device)
discounted_rewards = torch.sum(discounted_rewards, axis = 1).to(device)
batch_future, _ = memory.sample(idx + steps)
batch_ = Transition(*zip(*batch_future))
non_final_mask = torch.tensor(
tuple(map(lambda s: s is not None, batch_.next_state)),
device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch_.next_state
if s is not None]).to(device)
next_state_values = torch.zeros(BATCH_SIZE, device=device)
if double_dqn:
max_action = policy_net(non_final_next_states).max(1, keepdim=True)[1].detach()
next_state_values[non_final_mask] = target_net(non_final_next_states).gather(1, max_action).detach().squeeze(1)
else:
next_state_values[non_final_mask] = target_net(non_final_next_states, double_dqn = double_dqn).max(1)[0].detach()
# next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
expected_state_action_values = (next_state_values * (GAMMA**steps)) + discounted_rewards
return expected_state_action_values
def optimize_model(optimizer,policy_net, target_net, memory, device, GAMMA = 0.99, BATCH_SIZE = 32, n_steps = 20, double_dqn = False):
torch.autograd.set_detect_anomaly(True)
if len(memory) < BATCH_SIZE:
return
transitions, idx = memory.sample()
"""
zip(*transitions) unzips the transitions into
Transition(*) creates new named tuple
batch.state - tuple of all the states (each state is a tensor)
batch.next_state - tuple of all the next states (each state is a tensor)
batch.reward - tuple of all the rewards (each reward is a float)
batch.action - tuple of all the actions (each action is an int)
"""
batch = Transition(*zip(*transitions))
actions = tuple((map(lambda a: torch.tensor([[a]], device=device), batch.action)))
rewards = tuple((map(lambda r: torch.tensor([r], device=device), batch.reward)))
non_final_mask = torch.tensor(
tuple(map(lambda s: s is not None, batch.next_state)),
device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None]).to(device)
state_batch = torch.cat(batch.state).to(device)
action_batch = torch.cat(actions)
reward_batch = torch.cat(rewards)
state_action_values = policy_net(state_batch).gather(1, action_batch)
if n_steps == 1:
next_state_values = torch.zeros(BATCH_SIZE, device=device)
if double_dqn:
max_action = policy_net(non_final_next_states).max(1, keepdim = True)[1].detach()
next_state_values[non_final_mask] = target_net(non_final_next_states).gather(1, max_action).squeeze(1).detach()
else:
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# next_state_values.requires_grad = False
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
else:
expected_state_action_values = nstep_target(idx=idx, policy_net=policy_net,target_net=target_net, steps=n_steps, memory=memory, device=device, double_dqn=double_dqn)
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
return policy_net
| [
"karishma@KARISHMAs-MBP-2.fios-router.home"
] | karishma@KARISHMAs-MBP-2.fios-router.home |
4d4c4e63612c45c4b704603eece349d513f62079 | 6cf08320cf675eede64328958ab791911a6c60f6 | /venv/lib/python3.5/site-packages/tbselenium/test/test_tbdriver.py | ce92fd118c786171afdbb624afa11dfe1e9d738b | [] | no_license | giao1989/Scraping | ed48aacc5d5a96612f946bf695bc9ff8205b46ec | 3994683c471bdcd9ca0f2bba67a5d27d310ca1e9 | refs/heads/master | 2023-06-01T22:57:20.441102 | 2020-05-21T06:56:28 | 2020-05-21T06:56:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,293 | py | import tempfile
import unittest
import pytest
from os import environ
from os.path import join, isdir, getmtime
from tbselenium import common as cm
from tbselenium.test import TBB_PATH
from tbselenium.test.fixtures import TBDriverFixture
class TBDriverTest(unittest.TestCase):
def setUp(self):
self.tb_driver = TBDriverFixture(TBB_PATH)
def tearDown(self):
self.tb_driver.quit()
def test_should_load_check_tpo(self):
congrats = "Congratulations. This browser is configured to use Tor."
self.tb_driver.load_url_ensure(cm.CHECK_TPO_URL)
status = self.tb_driver.find_element_by("h1.on")
self.assertEqual(status.text, congrats)
def test_should_load_hidden_service(self):
self.tb_driver.load_url_ensure("http://3g2upl4pq6kufc4m.onion")
self.assertIn("DuckDuckGo", self.tb_driver.title)
def test_should_check_environ_in_prepend(self):
self.tb_driver.quit()
self.tb_driver = TBDriverFixture(TBB_PATH)
paths = environ["PATH"].split(':')
tbbpath_count = paths.count(self.tb_driver.tbb_browser_dir)
self.assertEqual(tbbpath_count, 1)
class TBDriverCleanUp(unittest.TestCase):
def setUp(self):
self.tb_driver = TBDriverFixture(TBB_PATH)
def test_browser_process_should_be_terminated_after_quit(self):
driver = self.tb_driver
fx_process = driver.binary.process
self.assertEqual(fx_process.poll(), None)
driver.quit()
self.assertNotEqual(fx_process.poll(), None)
def test_profile_dirs_should_be_removed(self):
driver = self.tb_driver
tempfolder = driver.profile.tempfolder
profile_path = driver.profile.path
self.assertTrue(isdir(tempfolder))
self.assertTrue(isdir(profile_path))
driver.quit()
self.assertFalse(isdir(profile_path))
self.assertFalse(isdir(tempfolder))
class TBDriverTorDataDir(unittest.TestCase):
TOR_DATA_PATH = join(TBB_PATH, cm.DEFAULT_TOR_DATA_PATH)
@pytest.mark.skipif(cm.TRAVIS, reason="Requires Tor bootstrap,"
"unreliable on Travis")
def test_temp_tor_data_dir(self):
"""Tor data directory in TBB should not be modified if
we use a separate tor_data_dir.
"""
tmp_dir = tempfile.mkdtemp()
mod_time_before = getmtime(self.TOR_DATA_PATH)
with TBDriverFixture(TBB_PATH, tor_data_dir=tmp_dir,
tor_cfg=cm.LAUNCH_NEW_TBB_TOR) as driver:
driver.load_url_ensure(cm.CHECK_TPO_URL)
mod_time_after = getmtime(self.TOR_DATA_PATH)
self.assertEqual(mod_time_before, mod_time_after)
@pytest.mark.skipif(cm.TRAVIS, reason="Requires Tor bootstrap,"
"unreliable on Travis")
def test_non_temp_tor_data_dir(self):
"""Tor data directory in TBB should be modified if we don't
use a separate tor_data_dir.
"""
mod_time_before = getmtime(self.TOR_DATA_PATH)
with TBDriverFixture(TBB_PATH,
tor_cfg=cm.LAUNCH_NEW_TBB_TOR) as driver:
driver.load_url_ensure(cm.CHECK_TPO_URL)
mod_time_after = getmtime(self.TOR_DATA_PATH)
self.assertNotEqual(mod_time_before, mod_time_after)
| [
"1126happy"
] | 1126happy |
97a357ea5ffdef6f835f86617addc0cc7f749d5c | 87d5b21265c381104de8f45aa67842a4adc880eb | /257. Binary Tree Paths.py | 51ff3604e23dc618adedd7bd3014b6538442da6f | [] | no_license | MYMSSENDOG/leetcodes | ac047fe0d951e0946740cb75103fc94aae967166 | 8a52a417a903a0742034161471a084bc1e494d68 | refs/heads/master | 2020-09-23T16:55:08.579319 | 2020-09-03T19:44:26 | 2020-09-03T19:44:26 | 225,543,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from tree_node_lib import *
class Solution:
def binaryTreePaths(self, root: TreeNode) :
if not root:
return []
ret = []
def dfs(cur_node, path):
if path:
path = path + "->" + str(cur_node.val)
else:
path = str(cur_node.val)
if not cur_node.left and not cur_node.right:
ret.append(path)
return
if cur_node.left:
dfs(cur_node.left, path)
if cur_node.right:
dfs(cur_node.right, path)
dfs(root, "")
return ret
root = makeTree([1,2,3,None,5])
sol = Solution()
print(sol.binaryTreePaths(root))
| [
"fhqmtkfkd@naver.com"
] | fhqmtkfkd@naver.com |
d741e22a98c934c5375e77ef20a1a5d2583d51e3 | 1c63d082b8200e2a52621c8e0faf7ee76687ec7b | /initial/venv/Scripts/easy_install-script.py | afa1a226f67bab1459f84737348e594cc585429d | [] | no_license | mantuonweb/pynew | 072295e2e41b7e343628bc1c7fe567e18d29a2f9 | 4172b2ce9edad54d1e18165170b085afbaeb5744 | refs/heads/master | 2020-12-02T07:00:54.887961 | 2019-12-30T13:57:23 | 2019-12-30T13:57:23 | 230,924,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!D:\pylearn\initial\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"mantuonweb@hotmail.com"
] | mantuonweb@hotmail.com |
81ec603da691950b790964179afd8a252a531729 | 6887e654219cf928c2167f1baad5278d01758274 | /.copy_file_to_all.py | f711d5dd581873ada13d4f539cdf904e73fc98cf | [] | no_license | RoboticsBrno/roboruka-examples | 24e29d286596527f37e7a772629fd6b7945d9a62 | b82ac408b529bc58faa898dd0089c1429c9368c6 | refs/heads/master | 2022-05-13T08:50:17.960991 | 2022-05-06T08:08:32 | 2022-05-06T08:08:32 | 195,669,422 | 2 | 1 | null | 2020-06-14T16:02:05 | 2019-07-07T15:47:15 | Python | UTF-8 | Python | false | false | 1,009 | py | #!/usr/bin/env python3
import os
import shutil
import sys
if __name__ == "__main__":
source = os.path.normpath(sys.argv[1])
abs_source = os.path.abspath(source)
abs_pwd = os.path.abspath(".")
if os.path.commonprefix([ abs_source, abs_pwd ]) != abs_pwd:
raise Exception()
tokens = source.split(os.sep, 1)
if len(tokens) != 2:
raise Exception()
rel_path = tokens[1]
for example_dir in os.listdir("."):
if not os.path.isdir(example_dir) or example_dir.startswith(".") or example_dir == tokens[0]:
continue
dest_path = os.path.join(example_dir, rel_path)
dest_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir, 0o0755)
print(source, "->", dest_path)
if os.path.isdir(source):
shutil.copytree(source, dest_path, symlinks=True, dirs_exist_ok=True)
else:
shutil.copy2(source, dest_path, follow_symlinks=False)
| [
"vbocek@gmail.com"
] | vbocek@gmail.com |
3828a0b1c4613505ab9b4cda45351013c7a86543 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03608/s343365481.py | d46c782a99ce70c039b4156e5d7b06c88335bcd8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | import itertools
n, m, r = map(int, input().split())
d = [[float('inf')]*n for _ in range(n)]
rs = list(map(int, input().split()))
for _ in range(m):
a, b, t = map(int, input().split())
a -= 1
b -= 1
d[a][b] = t
d[b][a] = t
for i in range(n):
d[i][i] = 0
def warshall_floyd(d):
#d[i][j]: iからjへの最短距離
for k in range(n):
for i in range(n):
for j in range(n):
d[i][j] = min(d[i][j],d[i][k] + d[k][j])
return d
d = warshall_floyd(d)
ans = float('inf')
for p in itertools.permutations(rs):
tmp = 0
for i in range(len(p)-1):
tmp += d[p[i]-1][p[i+1]-1]
ans = min(ans, tmp)
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
44ff709db2626bf98ae899ca915c78fc40c26e2f | 2eadd7380b12ac5502a2c4bd146e33e2c144ae90 | /TrafficPenaltySystem/PenaltyManager/admin.py | 15bf5fd9db4f1458a8bbca7becc415b47441aa0e | [] | no_license | Tswarelo1/Traffic-Penalty-System | b1ccc052552deace0fc856acb7f53dd108dbee8b | bff3a876e67f807824ca6266c1bbe1781dc33da6 | refs/heads/master | 2023-07-22T04:53:26.514517 | 2020-05-15T01:10:39 | 2020-05-15T01:10:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from django.contrib import admin
from .models import Driver, Penalty
# Register your models here.
admin.site.register(Driver)
admin.site.register(Penalty)
| [
"shashwatkathuria@yahoo.co.in"
] | shashwatkathuria@yahoo.co.in |
113fed38febc8bc10f4e36ff65379425472cb200 | 7cf5019645fbedafb8e1fb1580ba5840b82205a5 | /HRS/catalog/main_test.py | 011960a59984d99fcc3c9fb24aa559f716723495 | [] | no_license | JassiGhuman/RecommenderSystem | fa459e9b277f86a633179dda7336c25cda4a4e7a | 68269e9847b66e3ba259e522b147b98a82b60d73 | refs/heads/main | 2023-05-04T00:53:54.923561 | 2021-04-21T06:55:53 | 2021-04-21T06:55:53 | 358,151,947 | 1 | 1 | null | 2021-04-15T08:21:51 | 2021-04-15T06:30:32 | Python | UTF-8 | Python | false | false | 129 | py | import sys
def main():
print(sys.argv)
for arg in sys.argv[1:]:
print(arg)
if __name__ == "__main__":
main() | [
"pamilatina.92@gmail.com"
] | pamilatina.92@gmail.com |
50b5f7129a2387157eed7c7deed5af6311e3a6ee | d6550314722c88db7c5854b62b2f228a08b30861 | /django_chinook/employee/migrations/0001_initial.py | e0c8373d7f5c22e101327250de69d46865f3472c | [] | no_license | abxsantos/django-chinook | b7549c00d728b3e3397b6e1d10eaa9e5860b6e41 | 4da79836f7a4a0cce1468cd5690c30360ddb4fff | refs/heads/main | 2023-03-31T18:31:35.062804 | 2021-04-03T22:18:46 | 2021-04-03T22:18:46 | 354,406,048 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | # Generated by Django 3.1.7 on 2021-04-03 21:38
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('first_name', models.CharField(max_length=40)),
('last_name', models.CharField(max_length=40)),
('title', models.CharField(max_length=20)),
('birth_date', models.DateTimeField()),
('hire_date', models.DateTimeField()),
('address', models.CharField(max_length=70)),
('city', models.CharField(max_length=40)),
('state', models.CharField(max_length=40)),
('country', models.CharField(max_length=40)),
('postal_code', models.CharField(max_length=10)),
('phone', models.CharField(max_length=24)),
('fax', models.CharField(max_length=24)),
('email', models.CharField(max_length=60)),
('reports_to', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='employee.employee')),
],
options={
'ordering': ('last_name',),
},
),
]
| [
"ale.bxsantos@gmail.com"
] | ale.bxsantos@gmail.com |
95ab103056e37f69c331b832b2fb1d1879059e9c | 3709da4d2e4a502fec65b410b54d19d42e829d2b | /gans_package/models/C_VAE.py | 564e7280b4043b0432bce5526df07b699f4113ab | [
"MIT"
] | permissive | chenshaogang/GANs-Implementations | 0f8c888c48c86c25fe7b1568465795d199a7c0d6 | 60aee8a48dc3cf3a6f1240f44ff9bf6c138e3e38 | refs/heads/main | 2023-07-12T03:37:16.958351 | 2021-08-15T18:05:10 | 2021-08-15T18:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,218 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class VAE(nn.Module):
def __init__(self, latent_size=100):
super(VAE, self).__init__()
self.latent_size = latent_size
self.l1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1)
self.l1b = nn.BatchNorm2d(32)
self.l2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=1)
self.l2b = nn.BatchNorm2d(64)
self.l3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1)
self.l3b = nn.BatchNorm2d(128)
self.l4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1)
self.l4b = nn.BatchNorm2d(256)
self.l41 = nn.Linear(256 * 4 * 4, self.latent_size)
self.l42 = nn.Linear(256 * 4 * 4, self.latent_size)
self.f = nn.Linear(self.latent_size, 256 * 4 * 4)
self.l5 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=4, stride=2, padding=1)
self.l6 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=4, stride=2, padding=1)
self.l7 = nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1)
self.l8 = nn.ConvTranspose2d(in_channels=32, out_channels=3, kernel_size=4, stride=2, padding=1)
def encoder(self, x_in):
h = F.leaky_relu(self.l1b(self.l1(x_in)))
h = F.leaky_relu(self.l2b(self.l2(h)))
h = F.leaky_relu(self.l3b(self.l3(h)))
h = F.leaky_relu(self.l4b(self.l4(h)))
h = h.view(h.size(0), -1)
return self.l41(h), self.l42(h)
def decoder(self, z):
z = self.f(z)
z = z.view(-1, 256, 4, 4)
z = F.leaky_relu(self.l5(z))
z = F.leaky_relu(self.l6(z))
z = F.leaky_relu(self.l7(z))
z = torch.sigmoid(self.l8(z))
return z
def sampling(self, mu, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return torch.add(eps.mul(std), mu)
def forward(self, x_in):
mu, log_var = self.encoder(x_in)
z = self.sampling(mu, log_var)
return self.decoder(z), mu, log_var
| [
"udbhavprasad072300@gmail.com"
] | udbhavprasad072300@gmail.com |
d115af47529c1ae4c0971a4c78a3cc0a5a73fd00 | 6433f34f3d935264217d2443725040e65178e606 | /hw_C1.py | b9e9fb97d9c345789d89f9d9534c403416714f50 | [] | no_license | anna-kapitonova/PythonCourse | fbe5be734d72820a76a9e7bd715f0c05e9df0ced | dbaa77986426eae94b14599f7f46617f1c6f88ae | refs/heads/master | 2020-04-27T11:19:00.528153 | 2019-04-25T14:32:49 | 2019-04-25T14:32:49 | 174,290,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | a=input()
b=[int(symbol) for symbol in a.split(',')]
c=[]
for i in range(len(b)-1):
if b[i+1]>=b[i]:
c.append(b[i+1])
print(c)
| [
"noreply@github.com"
] | anna-kapitonova.noreply@github.com |
155c17eaefb774073edb9ca8bce87a5b3cfdc588 | 812c6a3fa49ef9163e2e60a3b13478a6ea4d328b | /testing/views.py | 8be17bfa64212da32cf9dcb18a4f4c343794d501 | [] | no_license | jskonst/Reestr | 6635000868f5c9bbd67468d78eb526772236510c | 0abddd86745160df3dc7de73680aa0a937014fc3 | refs/heads/master | 2021-01-01T18:03:16.949268 | 2012-10-02T14:51:44 | 2012-10-02T14:51:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # Create your views here.
from django.shortcuts import render_to_response
from forms import testForms
def contsct(request):
if request.method=='POST':
form=testForms(request.POST)
if form.is_valid():
cd=form.cleaned_data
| [
"jskonst@yandex.ru"
] | jskonst@yandex.ru |
127c498e0eddeca1c8a6e6e61f30f47e9b5ff49d | 03e004a37171a552d1975741ff2e8b2ed4c1c0ca | /src/exp2_UxIt/exp_150_5_radial.py | 27bfaed9ad062849ca2d098c4d0b3824441872db | [] | no_license | rezeck/segregation | 7baf2ddaaaf5148caafbd9d93641d7a8a6f82c76 | 1555ae6083cb15d3af759df19d3c6ec9c5c16fc2 | refs/heads/master | 2022-09-28T12:49:26.972709 | 2020-06-05T16:28:18 | 2020-06-05T16:28:18 | 100,524,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | #!/usr/bin/env python
from segregation import Segregation
from termcolor import colored
import progressbar
import numpy as np
import time
bar = progressbar.ProgressBar(maxval=10000, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
# Filename
filename = "data/exp_150_5_radial.npy"
# Setup
ROBOTS = 150
GROUPS = 5
WORLD = 40.0
alpha = 1.0
dAA = np.linspace(5, GROUPS*5, GROUPS) # radial
dAB = 7.5
# Structure data
datas = []
print colored("[Initializing the experiments] exp_150_5_radial_0_noise", 'yellow')
noises = [0.0, 0.01, 0.05, 0.1, 0.2]
# Make 100 Experiments
for noise in noises:
print colored("[Experiment] With Noise", "grey"), colored(noise, 'blue')
s = Segregation(ROBOTS=ROBOTS, GROUPS=GROUPS, WORLD=WORLD, alpha=alpha, noise=noise, dAA=dAA, dAB=dAB, seed=0)
bar.start()
data_control = []
for j in range(10000):
bar.update(j)
s.update()
a = sum(sum(abs(s.a)))
data_control.append(a)
datas.append(data_control)
print "\n"
bar.finish()
print colored("[Experiment has been completed!]", 'green')
print colored("[Saving Experiments]", 'grey')
np.save(filename, datas)
print colored("[Finish]", 'green')
| [
"rezeck@dcc.ufmg.br"
] | rezeck@dcc.ufmg.br |
5f9f061affb83b7c29e56f782a0ab701b3dc88bb | cad91ae76d2746a6c28ddda0f33a58f9d461378f | /CUDA-Optimized/FastSpeech/fastspeech/trt/waveglow_trt_inferencer.py | 5a71fdf6e10c5f822a6a3fbe753f8663bce8a2dc | [] | no_license | NVIDIA/DeepLearningExamples | fe677521e7e2a16e3cb0b77e358f9aab72f8c11a | a5388a45f71a949639b35cc5b990bd130d2d8164 | refs/heads/master | 2023-08-31T20:57:08.798455 | 2023-08-23T10:09:12 | 2023-08-23T10:09:12 | 131,881,622 | 11,838 | 3,124 | null | 2023-08-28T16:57:33 | 2018-05-02T17:04:05 | Jupyter Notebook | UTF-8 | Python | false | false | 4,696 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import torch
import tensorrt as trt
from fastspeech.trt import TRT_BASE_PATH, TRT_LOGGER
import fastspeech.trt.common as common
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy, to_gpu_async
from fastspeech.inferencer.waveglow_inferencer import WaveGlowInferencer
from fastspeech.inferencer.denoiser import Denoiser
import pycuda.driver as cuda
class WaveGlowTRTInferencer(object):
def __init__(self, ckpt_file, engine_file, use_fp16=False, use_denoiser=False, stride=256, n_groups=8):
self.ckpt_file = ckpt_file
self.engine_file = engine_file
self.use_fp16 = use_fp16
self.use_denoiser = use_denoiser
self.stride = stride
self.n_groups = n_groups
if self.use_denoiser:
sys.path.append('waveglow')
waveglow = torch.load(self.ckpt_file)['model']
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow.eval()
self.denoiser = Denoiser(waveglow)
self.denoiser = to_gpu_async(self.denoiser)
tprint('Using WaveGlow denoiser.')
# after initialization, we don't need WaveGlow PyTorch checkpoint
# anymore - deleting
del waveglow
torch.cuda.empty_cache()
# load engine
with open(self.engine_file, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
if self.engine:
tprint('TRT Engine Loaded from {} successfully.'.format(self.engine_file))
return
else:
tprint('Loading TRT Engine from {} failed.'.format(self.engine_file))
def __enter__(self):
self.context = self.engine.create_execution_context()
def __exit__(self, exception_type, exception_value, traceback):
self.context.__del__()
self.engine.__del__()
def infer(self, mels):
batch_size, _, mel_size = mels.shape
mels = mels.unsqueeze(3)
z = torch.randn(batch_size, self.n_groups, mel_size * self.stride // self.n_groups, 1)
wavs = torch.zeros(batch_size, mel_size * self.stride)
if self.use_fp16:
z = z.half()
mels = mels.half()
wavs = wavs.half()
mels = to_gpu_async(mels)
z = to_gpu_async(z)
wavs = to_gpu_async(wavs)
# create inputs/outputs buffers
input_buffers = common.create_inputs_from_torch(self.engine, [mels, z])
output_buffers = common.create_outputs_from_torch(self.engine, [wavs.shape])
# set shapes of inputs
self.context = common.set_input_shapes(self.engine, self.context, input_buffers)
# execute
stream = cuda.Stream()
bindings = [int(data.data_ptr()) for data in (input_buffers + output_buffers)]
self.context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
stream.synchronize()
wavs = output_buffers[0]
# denoise
if self.use_denoiser:
wavs = self.denoiser(wavs, strength=0.01)
return wavs.float() | [
"andabi412@gmail.com"
] | andabi412@gmail.com |
be5cecfecdba0f74b4324074d8792ebd7a8fe447 | 2ae84e64d664e998334d42b44fab3798a90c0e48 | /com/demo/lib/__init__.py | 976f825fbe5d1c57a2169bb327455cb7658aea20 | [] | no_license | DXShelley/python-demo | 18ac35dfe15a15e481125b15f3e174c2e2ecd7bc | 04d743699af32391510dbdce2a6b499722669715 | refs/heads/master | 2022-12-11T04:25:39.206757 | 2020-09-11T10:04:14 | 2020-09-11T10:04:14 | 281,336,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2020/8/4 8:35
# @Author: yuzq
# @File : __init__.py
| [
"yu_zhenquan@163.com"
] | yu_zhenquan@163.com |
584cbb8742d2e854eb265d929bcec20f96ad30f2 | 43593a8fec43fea686f62aab03715bb683afa24f | /socorro/unittest/external/test_crashstorage_base.py | 64ec42653a6aa23a4ca9e3d02b42a20142180992 | [] | no_license | jfaust/socorro | d2130c63da006cf3f09dd892c6ea96229a4bf60a | 0bc8501b0f3dce761e48d702abdb7dc8a54ba552 | refs/heads/master | 2021-01-21T01:19:59.353202 | 2012-07-12T00:48:02 | 2012-07-12T00:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,735 | py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
import mock
from socorro.external.crashstorage_base import CrashStorageBase, \
PolyStorageError, \
PolyCrashStorage, \
FallbackCrashStorage
from configman import Namespace, ConfigurationManager
from mock import Mock
class A(CrashStorageBase):
required_config = Namespace()
required_config.add_option('x',
default=1)
required_config.add_option('y',
default=2
)
def __init__(self, config):
super(A, self).__init__(config)
self.raw_crash_count = 0
def save_raw_crash(self, raw_crash, dump):
pass
def save_processed_crash(self, processed_crash):
pass
class B(A):
required_config = Namespace()
required_config.add_option('z',
default=2
)
def fake_quit_check():
return False
class TestBase(unittest.TestCase):
def test_basic_crashstorage(self):
required_config = Namespace()
mock_logging = Mock()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
}]
)
with config_manager.context() as config:
crashstorage = CrashStorageBase(
config,
quit_check_callback=fake_quit_check
)
crashstorage.save_raw_crash({}, 'payload', 'ooid')
crashstorage.save_processed({})
self.assertRaises(NotImplementedError,
crashstorage.get_raw_crash, 'ooid')
self.assertRaises(NotImplementedError,
crashstorage.get_raw_dump, 'ooid')
self.assertRaises(NotImplementedError,
crashstorage.get_processed, 'ooid')
self.assertRaises(NotImplementedError,
crashstorage.remove, 'ooid')
self.assertRaises(StopIteration, crashstorage.new_crashes)
crashstorage.close()
def test_polyerror(self):
p = PolyStorageError('hell')
try:
try:
raise NameError('dwight')
except NameError:
p.gather_current_exception()
try:
raise KeyError('wilma')
except KeyError:
p.gather_current_exception()
try:
raise AttributeError('sarita')
except AttributeError:
p.gather_current_exception()
raise p
except PolyStorageError, x:
self.assertEqual(len(x), 3)
self.assertTrue(x.has_exceptions())
types = [NameError, KeyError, AttributeError]
[self.assertEqual(a[0], b) for a, b in zip(x, types)]
self.assertTrue(1 not in x)
self.assertTrue(str(x[0][1]), 'dwight')
x[0] = x[1]
self.assertEqual(x[0], x[1])
def test_poly_crash_storage(self):
n = Namespace()
n.add_option(
'storage',
default=PolyCrashStorage,
)
n.add_option(
'logger',
default=mock.Mock(),
)
value = {'storage_classes':
'socorro.unittest.external.test_crashstorage_base.A,'
'socorro.unittest.external.test_crashstorage_base.A,'
'socorro.unittest.external.test_crashstorage_base.B',
'storage1.y': 37,
}
cm = ConfigurationManager(n, values_source_list=[value])
with cm.context() as config:
self.assertEqual(config.storage0.store, A)
self.assertEqual(config.storage1.store, A)
self.assertEqual(config.storage1.y, 37)
self.assertEqual(config.storage2.store, B)
poly_store = config.storage(config)
l = len(poly_store.storage_namespaces)
self.assertEqual(l, 3, 'expected poly_store to have lenth of 3, '
'but %d was found instead' % l)
self.assertEqual(poly_store.storage_namespaces[0], 'storage0')
self.assertEqual(poly_store.storage_namespaces[1], 'storage1')
self.assertEqual(poly_store.storage_namespaces[2], 'storage2')
l = len(poly_store.stores)
self.assertEqual(l, 3,
'expected poly_store.store to have lenth of 3, '
'but %d was found instead' % l)
self.assertTrue(isinstance(poly_store.stores.storage0, A))
self.assertTrue(isinstance(poly_store.stores.storage1, A))
self.assertTrue(isinstance(poly_store.stores.storage2, B))
raw_crash = {'ooid': ''}
dump = '12345'
processed_crash = {'ooid': '', 'product': 17}
for v in poly_store.stores.itervalues():
v.save_raw_crash = Mock()
v.save_processed = Mock()
v.close = Mock()
poly_store.save_raw_crash(raw_crash, dump, '')
for v in poly_store.stores.itervalues():
v.save_raw_crash.assert_called_once_with(raw_crash, dump, '')
poly_store.save_processed(processed_crash)
for v in poly_store.stores.itervalues():
v.save_processed.assert_called_once_with(processed_crash)
poly_store.save_raw_and_processed(
raw_crash,
dump,
processed_crash,
'n'
)
for v in poly_store.stores.itervalues():
v.save_raw_crash.assert_called_with(raw_crash, dump, 'n')
v.save_processed.assert_called_with(processed_crash)
raw_crash = {'ooid': 'oaeu'}
dump = '5432'
processed_crash = {'ooid': 'aoeu', 'product': 33}
poly_store.stores['storage1'].save_raw_crash = Mock()
poly_store.stores['storage1'].save_raw_crash.side_effect = \
Exception('this is messed up')
poly_store.stores['storage2'].save_processed = Mock()
poly_store.stores['storage2'].save_processed.side_effect = \
Exception('this is messed up')
self.assertRaises(PolyStorageError,
poly_store.save_raw_crash,
raw_crash,
dump,
'')
for v in poly_store.stores.itervalues():
v.save_raw_crash.assert_called_with(raw_crash, dump, '')
self.assertRaises(PolyStorageError,
poly_store.save_processed,
processed_crash)
for v in poly_store.stores.itervalues():
v.save_processed.assert_called_with(processed_crash)
poly_store.stores['storage2'].close.side_effect = \
Exception
self.assertRaises(PolyStorageError,
poly_store.close)
for v in poly_store.stores.itervalues():
v.close.assert_called_with()
def test_fallback_crash_storage(self):
n = Namespace()
n.add_option(
'storage',
default=FallbackCrashStorage,
)
n.add_option(
'logger',
default=mock.Mock(),
)
value = {'primary.storage_class':
'socorro.unittest.external.test_crashstorage_base.A',
'fallback.storage_class':
'socorro.unittest.external.test_crashstorage_base.B',
}
cm = ConfigurationManager(n, values_source_list=[value])
with cm.context() as config:
self.assertEqual(config.primary.storage_class, A)
self.assertEqual(config.fallback.storage_class, B)
raw_crash = {'ooid': ''}
dump = '12345'
processed_crash = {'ooid': '', 'product': 17}
fb_store = config.storage(config)
# save_raw tests
fb_store.primary_store.save_raw_crash = Mock()
fb_store.fallback_store.save_raw_crash = Mock()
fb_store.save_raw_crash(raw_crash, dump)
fb_store.primary_store.save_raw_crash.assert_called_with(
raw_crash,
dump
)
self.assertEqual(fb_store.fallback_store.save_raw_crash.call_count,
0)
fb_store.primary_store.save_raw_crash = Mock()
fb_store.primary_store.save_raw_crash.side_effect = Exception('!')
fb_store.save_raw_crash(raw_crash, dump)
fb_store.primary_store.save_raw_crash.assert_called_with(
raw_crash,
dump
)
fb_store.fallback_store.save_raw_crash.assert_called_with(
raw_crash,
dump
)
fb_store.fallback_store.save_raw_crash = Mock()
fb_store.fallback_store.save_raw_crash.side_effect = Exception('!')
self.assertRaises(PolyStorageError,
fb_store.save_raw_crash,
raw_crash,
dump
)
fb_store.primary_store.save_raw_crash.assert_called_with(
raw_crash,
dump
)
fb_store.fallback_store.save_raw_crash.assert_called_with(
raw_crash,
dump
)
# save_processed tests
fb_store.primary_store.save_processed = Mock()
fb_store.fallback_store.save_processed = Mock()
fb_store.save_processed(processed_crash)
fb_store.primary_store.save_processed.assert_called_with(
processed_crash
)
self.assertEqual(fb_store.fallback_store.save_processed.call_count,
0)
fb_store.primary_store.save_processed = Mock()
fb_store.primary_store.save_processed.side_effect = Exception('!')
fb_store.save_processed(processed_crash)
fb_store.primary_store.save_processed.assert_called_with(
processed_crash
)
fb_store.fallback_store.save_processed.assert_called_with(
processed_crash
)
fb_store.fallback_store.save_processed = Mock()
fb_store.fallback_store.save_processed.side_effect = Exception('!')
self.assertRaises(PolyStorageError,
fb_store.save_processed,
processed_crash
)
fb_store.primary_store.save_processed.assert_called_with(
processed_crash
)
fb_store.fallback_store.save_processed.assert_called_with(
processed_crash
)
# close tests
fb_store.primary_store.close = Mock()
fb_store.fallback_store.close = Mock()
fb_store.close()
fb_store.primary_store.close.assert_called_with()
fb_store.fallback_store.close.assert_called_with()
fb_store.primary_store.close = Mock()
fb_store.fallback_store.close = Mock()
fb_store.fallback_store.close.side_effect = NotImplementedError()
fb_store.close()
fb_store.primary_store.close.assert_called_with()
fb_store.fallback_store.close.assert_called_with()
fb_store.primary_store.close = Mock()
fb_store.primary_store.close.side_effect = Exception('!')
fb_store.close()
fb_store.primary_store.close.assert_called_with()
fb_store.fallback_store.close.assert_called_with()
fb_store.fallback_store.close = Mock()
fb_store.fallback_store.close.side_effect = Exception('!')
self.assertRaises(PolyStorageError,
fb_store.close)
fb_store.primary_store.close.assert_called_with()
fb_store.fallback_store.close.assert_called_with()
| [
"twobraids@gmail.com"
] | twobraids@gmail.com |
9b98d7bfabb96ee12222318b92253a3d5c1d05a3 | f981775b00980b1d8fabed01f0cd49f49c32719a | /core/consumers/__init__.py | a15efc1eccc5c709e9e068547a1139951528415e | [] | no_license | code2duo/backend | c12a6768eebbc7a22758ae7f11caaf96243ee4b4 | 73f3002d8e1be354e85cf1e5ae9561dbc551dd41 | refs/heads/main | 2023-05-06T05:02:51.152553 | 2021-06-07T06:56:42 | 2021-06-07T06:56:42 | 338,546,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | from .link_matchmaking import *
| [
"debdutgoswami@gmail.com"
] | debdutgoswami@gmail.com |
60210a71a62757ae874cc9ded2d4021e2125873f | 86df33d911606ecae9996752d79b530a124c472b | /hipsr-converter-tandem.py | 0147ac8002f3f239f71b190911e4b8564052e80f | [] | no_license | telegraphic/hipsr_reduction | 905248c004e46ad7e62d05652e831c0913b32b90 | 1ec8e8877f68ee6d4ba0c7c103015c7749400280 | refs/heads/master | 2020-05-20T10:56:43.677174 | 2019-01-31T05:57:57 | 2019-01-31T05:57:57 | 9,489,410 | 0 | 0 | null | 2019-01-31T05:57:58 | 2013-04-17T04:48:16 | Python | UTF-8 | Python | false | false | 8,541 | py | #!/usr/bin/env python
"""
hipsr-converter.py
==================
This script starts a graphical user interface for converting HIPSR + MBCORR data to SD-FITS.
Use this script when converting HIPSR + MBCORR data taken in tandem.
"""
# Imports
import sys
from lib.sdfits import *
from lib import eager_weaver
try:
from termcolor import cprint
except ImportError:
def cprint_fallback(textstr, color):
print textstr
cprint = cprint_fallback
# Python metadata
__version__ = "v2.0 - Ballistic Bandicoot"
__author__ = "Danny Price"
__email__ = "dprice@cfa.harvard.edu"
__modified__ = datetime.fromtimestamp(os.path.getmtime(os.path.abspath( __file__ )))
try:
import lib.qt_compat as qt_compat
QtGui = qt_compat.import_module("QtGui")
QtCore = qt_compat.QtCore
USES_PYSIDE = qt_compat.is_pyside()
except:
print "Error: cannot load PySide or PyQt4. Please check your install."
exit()
try:
import numpy as np
except:
print "Error: cannot load Numpy. Please check your install."
exit()
try:
import pyfits as pf
except ImportError:
try:
from astropy.io import fits as pf
print "Using Astropy for FITS I/O"
except:
print "Error: cannot load PyFITS or AstroPY I/O. Please check your install."
exit()
try:
import tables as tb
except:
print "Error: cannot load PyTables. Please check your install."
exit()
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
last_in, last_mb, last_out = self.load_last()
self.in_combox = self.createComboBox(last_in)
self.in_label = QtGui.QLabel("HIPSR input directory:")
self.in_browse = self.createButton("&Browse...", self.in_set)
self.in_label.setToolTip("Select input directory (HDF files)")
self.in_combox.setToolTip("Select input directory (HDF files)")
self.mb_combox = self.createComboBox(last_mb)
self.mb_label = QtGui.QLabel("MBCORR input directory:")
self.mb_browse = self.createButton("&Browse...", self.mb_set)
self.mb_label.setToolTip("Select MBCORR input directory (SD-FITS files)")
self.mb_combox.setToolTip("Select MBCORR input directory (SD-FITS files)")
self.out_combox = self.createComboBox(last_out)
self.out_label = QtGui.QLabel("Output directory:")
self.out_browse = self.createButton("&Browse...", self.out_set)
self.out_label.setToolTip("Select output directory (SD-FITS)")
self.out_combox.setToolTip("Select output directory (SD-FITS")
self.convert_button = self.createButton("&Convert", self.convert)
#self.rb_autos = QtGui.QRadioButton("Write autocorrs", self)
#self.rb_xpol = QtGui.QRadioButton("Write cross-pol", self)
#self.rb_stokes = QtGui.QRadioButton("Write Stokes", self)
#self.rb_autos.setChecked(True)
mainLayout = QtGui.QGridLayout()
mainLayout.addWidget(self.in_label, 0, 0)
mainLayout.addWidget(self.in_combox, 0, 1)
mainLayout.addWidget(self.in_browse, 0, 2)
mainLayout.addWidget(self.mb_label, 1, 0)
mainLayout.addWidget(self.mb_combox, 1, 1)
mainLayout.addWidget(self.mb_browse, 1, 2)
mainLayout.addWidget(self.out_label, 2, 0)
mainLayout.addWidget(self.out_combox, 2, 1)
mainLayout.addWidget(self.out_browse, 2, 2)
#mainLayout.addWidget(self.rb_autos, 3, 1)
#mainLayout.addWidget(self.rb_xpol, 4, 1)
#mainLayout.addWidget(self.rb_stokes, 5, 1)
mainLayout.addWidget(self.convert_button, 3, 2)
self.setLayout(mainLayout)
self.setWindowTitle("HIPSR-MBCORR tandem observation data converter")
def load_last(self):
try:
f = open(QtCore.QDir.currentPath()+'/.last_tandem')
last_in = f.readline().strip('\n')
last_mb = f.readline().strip('\n')
last_out = f.readline().strip('\n')
f.close()
if os.path.exists(last_in) and os.path.exists(last_out):
return last_in, last_mb, last_out
else:
raise IOError
except:
return QtCore.QDir.currentPath(), QtCore.QDir.currentPath(), QtCore.QDir.currentPath()
def save_last(self):
try:
f = open(QtCore.QDir.currentPath()+'/.last_tandem', 'w')
f.write(self.in_combox.currentText()+'\n')
f.write(self.mb_combox.currentText()+'\n')
f.write(self.out_combox.currentText()+'\n')
f.close()
except IOError:
pass
def in_set(self):
last_in, last_mb, last_out = self.load_last()
directory = QtGui.QFileDialog.getExistingDirectory(self, "Select HIPSR input directory",
last_in + '/..')
if directory:
if self.in_combox.findText(directory) == -1:
self.in_combox.addItem(directory)
self.in_combox.setCurrentIndex(self.in_combox.findText(directory))
def mb_set(self):
last_in, last_mb, last_out = self.load_last()
directory = QtGui.QFileDialog.getExistingDirectory(self, "Select MBCORR input directory",
last_mb + '/..')
if directory:
if self.mb_combox.findText(directory) == -1:
self.mb_combox.addItem(directory)
self.mb_combox.setCurrentIndex(self.mb_combox.findText(directory))
def out_set(self):
last_in, last_mb, last_out = self.load_last()
directory = QtGui.QFileDialog.getExistingDirectory(self, "Select SD-FITS ouput directory",
last_out + '/..')
if directory:
if self.out_combox.findText(directory) == -1:
self.out_combox.addItem(directory)
self.out_combox.setCurrentIndex(self.out_combox.findText(directory))
def updateComboBox(comboBox):
if comboBox.findText(comboBox.currentText()) == -1:
comboBox.addItem(comboBox.currentText())
def createButton(self, text, member):
button = QtGui.QPushButton(text)
button.clicked.connect(member)
return button
def createComboBox(self, text=""):
comboBox = QtGui.QComboBox()
comboBox.setEditable(True)
comboBox.addItem(text)
comboBox.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred)
return comboBox
def convert(self):
self.save_last()
print("HIPSR-MBCORR tandem converter")
print("-----------------------------")
print("Input directory (HIPSR): %s"%self.in_combox.currentText())
print("Input directory (MBCORR): %s"%self.mb_combox.currentText())
print("Output directory: %s"%self.out_combox.currentText())
hipsr_dir = self.in_combox.currentText()
mbcorr_dir = self.mb_combox.currentText()
mbcorr_files = eager_weaver.findMbcorrFiles(self.mb_combox.currentText())
output_dir = self.out_combox.currentText()
# Make sure output directory exists
if not os.path.exists(output_dir):
print("Creating directory %s"%output_dir)
os.makedirs(output_dir)
i = 0
for mb_filename in mbcorr_files:
i += 1
cprint("\nfile %i of %i (%02d%%)"%(i, len(mbcorr_files), float(i)/len(mbcorr_files)*100), 'green')
cprint("-------------------", 'green')
hp_filename, t_diff = eager_weaver.filePairer(mb_filename, hipsr_dir)
if t_diff >= 60:
print "No match found for %s"%mb_filename
break
if t_diff <= 60:
print "MBCORR input file: %s"%mb_filename
print "Closest matching file: %s"%hp_filename
print "Time delta: %d\n"%t_diff
out_filename = os.path.join(output_dir, 'hipsr_'+os.path.basename(mb_filename))
eager_weaver.eagerWeaver(mb_filename, hp_filename, out_filename,
hp_search_dir=hipsr_dir, sd_search_dir=mbcorr_dir, gmt_diff=0)
else:
print "No matching file found. Skipping..."
print("DONE!")
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
app.exec_() | [
"dan@thetelegraphic.com"
] | dan@thetelegraphic.com |
564bc388405b583bc46791b82109ca7ea6a2ee5d | 77a2818ad2c81dac096a3da038bd8e90b54f5dd0 | /config.py | 8f0a9f919a927b16e422d9f904d790313c50de5c | [] | no_license | leonfrench/CLP_shared_task_code | ea7e2ad700a8a92e80b0c657b8814b346dcc34d9 | e8ac1c47e07e428d85b3410359b050a1f394ed0d | refs/heads/master | 2021-09-05T20:40:56.649857 | 2018-01-30T21:50:39 | 2018-01-30T21:50:39 | 102,999,333 | 0 | 1 | null | 2018-01-30T21:50:41 | 2017-09-10T02:59:39 | Python | UTF-8 | Python | false | false | 953 | py | import os
import platform
if platform.node() == 'RES-C02RF0T2.local': #example of using a computer specific path
DATA_DIR = "/Users/lfrench/Downloads/testseate/clp2017_release/data/"
CORES = 2
else: #using working directory as per setup in the readme file
cwd = os.getcwd()
DATA_DIR = os.path.join(cwd, "data")
CORES = 1
if not (os.path.exists(os.path.join(DATA_DIR, "raw", "clpsych17-test-labels.tsv")) & os.path.exists(
os.path.join(DATA_DIR, "raw", "clpsych16-data")) & os.path.exists(
os.path.join(DATA_DIR, "raw", "clpsych17-data")) & os.path.exists(
os.path.join(DATA_DIR, "raw", "clpsych17-test"))):
raise RuntimeError(
'ERROR: this computer (' + platform.node() + ') is not configured. Please change this in config.py')
POSTS_DIR = os.path.join(DATA_DIR, "raw")
interim_folder = os.path.join(DATA_DIR, 'interim')
if not os.path.exists(interim_folder):
os.makedirs(interim_folder)
| [
"noreply@github.com"
] | leonfrench.noreply@github.com |
668f3c2cfd522133df6c248d9298ef641cdec07a | abe81cbad04413e49b969f2a66393d01fbc8d28e | /pig_dice_game_global.py | 1b54cd46039b6da7ace66d392cd9349bb23b567d | [] | no_license | JCWDFCS/Python-Handy-Programs-Checklist | cf6ee6c6637bcb0cdfe62c10843207edca9042e4 | 4f19368d6f2123dd0ccf517c1aea2139bd45e9f2 | refs/heads/master | 2021-05-16T13:32:33.454661 | 2017-10-16T14:55:56 | 2017-10-16T14:55:56 | 105,407,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py |
import random
# initialize accumulators
turn = 1
score = 0
score_this_turn = 0
turn_over = False
game_over = False
def display_rules():
print("Let's play the PIG.")
print()
print("* See how many turns it takes you to get to 20.")
print('* Turn ends when you hold or roll a 1.')
print("* If you roll a 1, you lose all points for the turn.")
print('* If you hold, you save all points for the turn.')
print()
def play_game():
while not game_over:
take_turn()
print()
print('Game over!')
def take_turn():
global turn_over
print('Turn', turn)
turn_over = False
while not turn_over:
choice = input('Roll or hold? (r/h):')
if choice == 'r':
roll_die()
elif choice == 'h':
hold_turn()
else:
print('Invalid choice, Try again.')
def roll_die():
global turn, score_this_turn,turn_ove
die = random.randint(1, 6)
print('Die:', die)
if die == 1:
score_this_turn = 0
turn += 1
print("Turn over.No score.\n")
turn_over = True
else:
score_this_turn += die
print('Scores in hand: ',score_this_turn)
def hold_turn():
global turn, score_this_turn, score, turn_over, game_over
print("Score for turn:", score_this_turn)
score += score_this_turn
# reset to zero
score_this_turn = 0
print("Total score:", score, '\n')
turn_over = True
if score >= 20:
game_over = True
print("You finished in", turn, 'turns!')
turn += 1
#if started as the main module, call the main() function
def main():
display_rules()
play_game()
if __name__ == "__main__":
main()
| [
"342143690@qq.com"
] | 342143690@qq.com |
babee146b346fe4e1012cde9cba57b1cf883c4b5 | 4763ed635f98a530c0f378c3ffe8d6ccb3752b19 | /redditbot/onetime.py | 8ac99f0d3055d3e817e0c81c128f2e09f0479428 | [
"MIT"
] | permissive | mattkleinberg/AutoVodResponseBot | 661dd2de075e7ae06e3464c22de959897512f79f | 7986048c5c8e871aa40701a7cbfd6cee5578d210 | refs/heads/master | 2021-01-10T07:24:49.114981 | 2016-03-13T01:10:49 | 2016-03-13T01:10:49 | 53,755,200 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import praw
from prawoauth2 import PrawOAuth2Server
from settings import app_key, app_secret, user_agent, scopes
reddit_client = praw.Reddit(user_agent=user_agent)
oauthserver = PrawOAuth2Server(reddit_client, app_key=app_key,
app_secret=app_secret, state=user_agent,
scopes=scopes)
# start the server, this will open default web browser
# asking you to authenticate
oauthserver.start()
tokens = oauthserver.get_access_codes()
print(tokens)
| [
"mkleinbe@ist.ucf.edu"
] | mkleinbe@ist.ucf.edu |
998f5b3d89a07a14d7dc41bd878db07e4902b536 | 6022fcef8175c71e3f9d702fc7098eee2a3eb9ac | /game/steam/review.py | adc766e91db223836ccb85b430ef0afc1fcb34f9 | [] | no_license | yxw19870806/Py3Crawler | f8fe8d68138dcfe3c63cc086d8b0042c814eab20 | e79889d0dbc13df90bca29e616ca5024ad2cdf18 | refs/heads/master | 2023-08-22T19:19:43.458412 | 2023-05-17T06:15:31 | 2023-05-17T06:15:31 | 139,689,380 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,030 | py | # -*- coding:UTF-8 -*-
"""
获取steam可以发布评测的游戏
https://store.steampowered.com/
@author: hikaru
email: hikaru870806@hotmail.com
如有问题或建议请联系
"""
import os
from common import *
from game.steam.lib import steam
# 打印列表
# print_type 0 全部游戏
# print_type 1 只要本体
# print_type 2 只要DLC
# print_type 3 只要本体已评测的DLC
def print_list(apps_cache_data, game_dlc_list, print_type=0):
for game_id in apps_cache_data["can_review_lists"]:
# 是DLC
if game_id in game_dlc_list:
if print_type == 1:
continue
# 本体没有评测过
if game_dlc_list[game_id] in apps_cache_data["can_review_lists"]:
if print_type == 3:
continue
else:
if print_type == 2 or print_type == 3:
continue
console.log("https://store.steampowered.com/app/%s" % game_id)
def main(check_game=True):
# 获取登录状态
steam_class = steam.Steam(need_login=True)
# 历史记录
apps_cache_data = steam_class.load_cache_apps_info()
# 已检测过的游戏列表
checked_apps_file_path = os.path.join(steam_class.cache_data_path, "review_checked.txt")
checked_apps_string = file.read_file(checked_apps_file_path)
if checked_apps_string:
checked_apps_list = checked_apps_string.split(",")
else:
checked_apps_list = []
# 已删除的游戏
deleted_app_list = steam_class.load_deleted_app_list()
# 已资料受限制的游戏
restricted_app_list = steam_class.load_restricted_app_list()
# 游戏的DLC列表
game_dlc_list = steam_class.load_game_dlc_list()
# 获取自己的全部玩过的游戏列表
try:
played_game_list = steam.get_account_owned_app_list(steam_class.account_id, True)
except crawler.CrawlerException as e:
console.log(e.http_error("个人游戏主页"))
raise
if check_game:
while len(played_game_list) > 0:
game_id = played_game_list.pop()
if game_id in deleted_app_list:
continue
if game_id in checked_apps_list:
continue
console.log("开始解析游戏 %s,剩余数量:%s" % (game_id, len(played_game_list)))
# 获取游戏信息
try:
game_data = steam.get_game_store_index(game_id)
except crawler.CrawlerException as e:
console.log("游戏 %s 解析失败,原因:%s" % (game_id, e.message))
console.log(e.http_error("游戏%s" % game_id))
continue
is_change = False
# 已删除
if game_data["deleted"]:
deleted_app_list.append(game_id)
# 保存数据
steam_class.save_deleted_app_list(deleted_app_list)
else:
# 有DLC的话,遍历每个DLC
for dlc_id in game_data["dlc_list"]:
# 已经评测过了,跳过检查
if dlc_id in apps_cache_data["review_list"]:
continue
# DLC和游戏本体关系字典
if dlc_id not in game_dlc_list:
game_dlc_list[dlc_id] = game_id
is_change = True
# 获取DLC信息
try:
dlc_data = steam.get_game_store_index(dlc_id)
except crawler.CrawlerException as e:
console.log(e.http_error("游戏%s" % dlc_id))
continue
if dlc_data["owned"]:
# 已经评测过了
if dlc_data["reviewed"]:
# 从待评测列表中删除
if dlc_id in apps_cache_data["can_review_lists"]:
apps_cache_data["can_review_lists"].remove(dlc_id)
# 增加已评测记录
if dlc_id not in apps_cache_data["review_list"]:
apps_cache_data["review_list"].append(dlc_id)
# 新的可以评测游戏
else:
if dlc_id not in apps_cache_data["can_review_lists"]:
apps_cache_data["can_review_lists"].append(dlc_id)
# 已经评测过了
if game_data["reviewed"]:
# 从待评测列表中删除
if game_id in apps_cache_data["can_review_lists"]:
apps_cache_data["can_review_lists"].remove(game_id)
# 增加已评测记录
if game_id not in apps_cache_data["review_list"]:
apps_cache_data["review_list"].append(game_id)
# 新的可以评测游戏
else:
if game_id not in apps_cache_data["can_review_lists"]:
apps_cache_data["can_review_lists"].append(game_id)
if is_change:
steam_class.save_game_dlc_list(game_dlc_list)
# 已资料受限制
if game_data["restricted"]:
if game_id not in restricted_app_list:
restricted_app_list.append(game_id)
# 保存数据
steam_class.save_restricted_app_list(restricted_app_list)
# 增加检测标记
steam_class.save_cache_apps_info(apps_cache_data)
# 保存数据
checked_apps_list.append(game_id)
file.write_file(",".join(checked_apps_list), checked_apps_file_path, const.WriteFileMode.REPLACE)
# 输出
print_list(apps_cache_data, game_dlc_list)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| [
"hikaru870806@hotmail.com"
] | hikaru870806@hotmail.com |
12d4022463c4e14e4a8d07430052771096ea3c82 | 48894ae68f0234e263d325470178d67ab313c73e | /sa/profiles/Supertel/K2X/get_arp.py | 000b19c788f9e60b4779efc75da73afe20e2447b | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 2,424 | py | # -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## Supertel.K2X.get_arp
##----------------------------------------------------------------------
## Copyright (C) 2007-2014 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
## Python modules
import re
## NOC modules
from noc.sa.script import Script as NOCScript
from noc.sa.interfaces import IGetARP
class Script(NOCScript):
name = "Supertel.K2X.get_arp"
implements = [IGetARP]
cache = True
rx_line = re.compile(
r"^(VLAN\s+\d+|)\s+(?P<interface>\S+)\s+(?P<ip>\S+)\s+"
r"(?P<mac>\S+)\s+(Dynamic|Static)\s*$",
re.MULTILINE | re.IGNORECASE)
def execute(self):
r = []
"""
# Try SNMP first
#
# Some time return vlan ID, not interface name!!!
#
if self.snmp and self.access_profile.snmp_ro:
try:
for v in self.snmp.get_tables(["1.3.6.1.2.1.4.22.1.1",
"1.3.6.1.2.1.4.22.1.2",
"1.3.6.1.2.1.4.22.1.3"],
bulk=True):
iface = self.snmp.get("1.3.6.1.2.1.31.1.1.1.1." + v[1],
cached=True)
mac = ":".join(["%02x" % ord(c) for c in v[2]])
ip = ["%02x" % ord(c) for c in v[3]]
ip = ".".join(str(int(c, 16)) for c in ip)
r.append({
"ip": ip,
"mac": mac,
"interface": iface,
})
return r
except self.snmp.TimeOutError:
pass
"""
# Fallback to CLI
for match in self.rx_line.finditer(self.cli("show arp", cached=True)):
mac = match.group("mac")
if mac.lower() == "incomplete":
r.append({
"ip": match.group("ip"),
"mac": None,
"interface": None
})
else:
r.append({
"ip": match.group("ip"),
"mac": match.group("mac"),
"interface": match.group("interface")
})
return r
| [
"dmitryluhtionov@gmail.com"
] | dmitryluhtionov@gmail.com |
eb56affbd27618bde4b06d66b69eb0980d51825f | 7cf69f4a3607de56845660d684e2bc6d016c999c | /test.py | 3a409d376454c1923a704f73935c5f5c34b6050e | [] | no_license | niubencoolboy/ids-python | f5e5d503c6f946f05f33350d7abb49053733369b | 1e9b16d1c6bc74965daaeb677abaa38ee4929e4a | refs/heads/master | 2020-05-30T13:03:53.416358 | 2017-02-23T08:47:35 | 2017-02-23T08:47:35 | 82,631,029 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | import os
import linecache
config_file = "/home/john/myrules/test.conf"
filedata = linecache.getlines(config_file)
def generate_replacestr(rulename):
path = '/home/john/myrules/'
fullname = path + rulename + '.rules'
fullname_str = fullname + '\n'
fullname_replacestr = '#' + fullname_str
return [fullname_str,fullname_replacestr]
def add_rules(rulename):
rules_lists = generate_replacestr(rulename)
for i in range(len(filedata)):
if filedata[i] == rules_lists[1]:
filedata[i] = rules_lists[0]
def del_rules(rulename):
rules_lists = generate_replacestr(rulename)
for i in range(len(filedata)):
if filedata[i] == rules_lists[0]:
filedata[i] = rules_lists[1]
def bakfile(config_file):
bakfilename = config_file + '.bak'
if os.path.exists(bakefilename) == True:
os.system("sudo rm bakefilename")
os.system("sudo cp %s %s" %(config_file,bakefilename))
def change_rulesfile(config_file):
bakfile(config_file)
f = open(config_file,'w+')
for i in filedata:
f.write(i)
f.close()
| [
"niubencoolboy@icloud.com"
] | niubencoolboy@icloud.com |
99c86ae8260bc161f018f3421b233142726ddba9 | 14cf596d556fbf239fd16d79643c32ddd3b427d6 | /examples/example-era5-update.py | 5749e846469246fc2165feab679230995e8b0c5d | [
"Apache-2.0"
] | permissive | fxi/cdsapi | b7b668686ec60938c75de78b8f87b2c4c56018ca | 75f47f62253b51066302cdb443c263c5c931959a | refs/heads/master | 2022-11-19T23:46:06.640125 | 2020-07-20T10:01:56 | 2020-07-20T10:01:56 | 281,077,366 | 0 | 0 | Apache-2.0 | 2020-07-20T09:47:49 | 2020-07-20T09:47:48 | null | UTF-8 | Python | false | false | 1,499 | py | #!/usr/bin/env python
# (C) Copyright 2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import time
import cdsapi
c = cdsapi.Client(debug=True, wait_until_complete=False)
r = c.retrieve(
"reanalysis-era5-single-levels",
{
"variable": "2t",
"product_type": "reanalysis",
"date": "2015-12-01",
"time": "14:00",
"format": "netcdf",
},
)
sleep = 30
while True:
r.update()
reply = r.reply
r.info("Request ID: %s, state: %s" % (reply['request_id'], reply['state']))
if reply['state'] == 'completed':
break
elif reply['state'] in ('queued', 'running'):
r.info("Request ID: %s, sleep: %s", reply['request_id'], sleep)
time.sleep(sleep)
elif reply['state'] in ('failed',):
r.error("Message: %s", reply['error'].get('message'))
r.error("Reason: %s", reply['error'].get('reason'))
for n in reply.get('error', {}).get('context', {}).get('traceback', '').split('\n'):
if n.strip() == '':
break
r.error(" %s", n)
raise Exception("%s. %s." % (reply['error'].get('message'), reply['error'].get('reason')))
r.download("test.nc")
| [
"jbl@greensteam.com"
] | jbl@greensteam.com |
3a47f530d3967f60e3f9aedf78f389ee9e35e612 | 5b3a3c6df239e8dfec2d0e49a099a777259f0b15 | /repeated_string/main.py | d584986e8fb8ed324dd5ddce7781b8360ed73c99 | [] | no_license | ruthaleks/hackerank | e6090945b9f7a3805ec65c69ce48288028d173a4 | ad06966b82fb777b763e0caba078de70766a771d | refs/heads/master | 2021-07-24T21:07:48.989361 | 2020-09-12T17:05:39 | 2020-09-12T17:05:39 | 216,346,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | #!/bin/python3
import sys
from collections import Counter
from itertools import accumulate
from functools import reduce
def read_input():
n = int(sys.stdin.readline())
data = sys.stdin.readlines()
return data[0].strip(), n
def repeat_string(n, s):
print("n = ", n)
print("string = ", s)
c = Counter(s)
print(c)
print(c["a"])
# part 1
num_of_s = n // len(s)
print("number of whole s in n: ", num_of_s)
num_a = num_of_s * c['a']
print("number of a's exluding the rest: ", num_a)
# part 2, the rest
rest = n % len(s)
print("rest: ", rest)
a_in_string = [1 for i in range(rest) if s[i] == "a"]
print("rest of a's: ", len(a_in_string))
return num_a + len(a_in_string)
def main():
data, n = read_input()
print(repeat_string(n, data))
if __name__ == "__main__":
main()
| [
"ruth.aleks@gmail.com"
] | ruth.aleks@gmail.com |
778f8a53deea26f4067bbf8916a048fc8c644981 | 3fa2e4e97db1dace11f58f02b56ac42da58d1e91 | /statslib/_lib/gcalib.py | 7657d66cf6ef9027db4b4a72c778c4a05510f891 | [
"Apache-2.0"
] | permissive | a4shubert/statslib | 4ab932f43185ea4fe115a7e36bcf843d29487809 | 5a35c0d10c3ca44c2d48f329c4f3790c91c385ac | refs/heads/main | 2023-06-27T00:24:34.662281 | 2021-05-13T09:33:16 | 2021-05-13T09:33:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | from enum import Enum
import inspect
class CalibType(Enum):
sm = 'statsmodels'
sk = 'sklearn'
class GeneralCalibrator:
def __init__(self, cf, kwargs=None):
self.cf = cf
self.kwargs = kwargs
if 'sklearn' in cf.__module__:
self.calib_type = CalibType.sk
elif 'statsmodels' in cf.__module__:
self.calib_type = CalibType.sm
else:
raise NotImplementedError(f'calibrator from package {cf.__module__} not accommodated yet') | [
"ashubertt@gmail.com"
] | ashubertt@gmail.com |
8a1eba8f7b2520022acab7f6eb9867e9b7be9cc9 | 39d2c315ad38426a6ad799fce1c742263d793fba | /app/routing.py | 6abdddda25ce4bc0623818b3af39756e4bb3e5b6 | [] | no_license | zhangyong1005/Django | 0df02843915d2dfafdbd2dd2c589ce76c1ca1cb3 | 0cc8666271a0314dcac54a4cbd38959c3b7af0e2 | refs/heads/master | 2020-08-25T02:19:51.603325 | 2019-10-24T02:07:31 | 2019-10-24T02:07:31 | 216,947,828 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from django.urls import path
from app.consumers import ChatConsumer
websocket_urlpatterns = [
path('ws/chat/test', ChatConsumer),
]
| [
"932382068@qq.com"
] | 932382068@qq.com |
09797f52f22903fa95a3d95e0f017996c94b5f55 | 1cb3e9d74f6b7047a3fd1d04cd3028df3f4c399d | /matplot_pyplot/plt1.py | bc9b199d3aa2baaad9f717ced2fa5567fe6e3ffc | [] | no_license | twilight1223/Deep-Learning-with-TensorFlow-book | 9bcc52c506d0186b445a023d9a88ca8c5b50a257 | 0dd79b356b5548052c160bc81e05a5f136ce982c | refs/heads/master | 2020-09-15T00:06:08.081192 | 2019-11-29T01:18:47 | 2019-11-29T01:18:47 | 223,300,808 | 0 | 0 | null | 2019-11-22T01:49:03 | 2019-11-22T01:49:02 | null | UTF-8 | Python | false | false | 85 | py | # __author__:zsshi
# __date__:2019/11/28
import matplotlib.pyplot as plt
plt.show() | [
"zhangshan@hzyatop.com"
] | zhangshan@hzyatop.com |
2b1e5644ef133ca17895d3b65cfd195bb32c20b6 | 801f12dec57b18598d36aeae3a6d8fd1013defa0 | /ecommerce/settings.py | 9f24716404b48af1b50f528dcffd14c66833aef4 | [] | no_license | aryan2621/Ecommerce_Website | b489c8e990fbeae0856bbeb4306893b66b77950f | d70012b279a0415126ce1c09bae388478dd00235 | refs/heads/main | 2023-02-21T06:36:01.460946 | 2021-01-22T17:55:24 | 2021-01-22T17:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,349 | py | """
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j7vq3s@l$gotb*i4tbuu*ivyu#rxm#d_0ahjuf$k#z4nw3#1ur'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'store'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
MEDIA_URL='/images/'
MEDIA_ROOT =os.path.join(BASE_DIR,'static/images') | [
"noreply@github.com"
] | aryan2621.noreply@github.com |
52e3cfdf0466002035ca447e43f1cfaa5a19d947 | 5d4d5296406e7da42ce8b6abf524c601783000e0 | /main.py | 58f94546b9efcc833fa6e03906a9d2ae930abc14 | [
"BSD-2-Clause"
] | permissive | melkims/silviux | 7c0b00607320c5aa49712729850cc51278993ad3 | 69ab953be83e920e8bf397a3549ab3d955f653c1 | refs/heads/main | 2023-01-21T04:35:10.276672 | 2020-11-20T21:25:11 | 2020-11-20T21:25:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | import logging
import logging.config
import yaml
with open('logger.yaml', 'r') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
import silviux.stream
silviux.stream.main()
| [
"greg@weisman.dev"
] | greg@weisman.dev |
0bf7bee662a64af9821014f2e78d6df5b62b41fd | 62c2d130e1cde4a78123056093936fa79bf08ae4 | /csit/libraries/IoTDM/client_libs/onem2m_json_primitive.py | a0afa3accb63315efa18d681ee2f56ad4a4f1710 | [] | no_license | anterodev/integration-test | 5b06f7554315abfa9000f058dd8f07fca802b93d | 5f53fc0f287f26c07905f1e7efd239a0e75d68b0 | refs/heads/master | 2021-01-21T22:09:49.642573 | 2017-06-20T19:12:04 | 2017-06-22T23:40:09 | 95,173,160 | 1 | 0 | null | 2017-06-23T01:57:03 | 2017-06-23T01:57:03 | null | UTF-8 | Python | false | false | 13,940 | py | """
Specific implementation of OneM2MPrimitive abstract class which uses JSON
strings and dictionaries as well as JSON pointers to store and access data
as OneM2M primitive objects
"""
#
# Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
#
import json
from onem2m_primitive import OneM2M
from onem2m_primitive import OneM2MPrimitive
from onem2m_primitive import OneM2MPrimitiveBuilder
from onem2m_primitive import OneM2MPrimitiveBuilderException
from jsonpointer import JsonPointer
from jsonpointer import JsonPointerException
class OneM2MJsonPrimitive(OneM2MPrimitive):
"""
Implementation of OneM2M primitive which allows to use JSON as strings or dictionaries
to work with the request/response primitives.
Using particular encoder/decoder, this primitive can be encoded/decoded to/from desired
content type:
JSON short scheme, JSON long scheme,
XML short scheme, XML long scheme
"""
def __init__(self, parameters, content,
protocol_name, protocol_parameters, short_scheme=True):
self.parameters = parameters
self.content = content
self.protocol = protocol_name
self.proto_params = protocol_parameters
self.short_scheme = short_scheme
def get_parameters(self):
return self.parameters
def get_parameters_str(self):
return json.dumps(self.parameters)
def _create_json_pointer(self, pointer_string):
try:
json_pointer = str(pointer_string)
# add leading slash if missing
if json_pointer[0] != '/':
json_pointer = '/' + json_pointer
# remove slash from the end if exists
if json_pointer[-1] == '/':
json_pointer = json_pointer[:-1]
json_pointer = JsonPointer(json_pointer)
except Exception as e:
raise RuntimeError("Invalid JSON pointer passed: {}, error: {}".format(pointer_string, e.message))
return json_pointer
def _get_item_by_pointer(self, data_dict, pointer):
if None is data_dict:
raise AttributeError("No JSON data passed")
if not isinstance(pointer, JsonPointer):
json_pointer = self._create_json_pointer(pointer)
else:
json_pointer = pointer
try:
item = json_pointer.resolve(data_dict)
except JsonPointerException as e:
raise RuntimeError("Failed to get JSON item by JSON pointer: {}, error: {}".format(pointer, e.message))
return item
def _has_item_by_pointer(self, data_dict, pointer):
if None is data_dict:
raise AttributeError("No JSON data passed")
if not isinstance(pointer, JsonPointer):
json_pointer = self._create_json_pointer(pointer)
else:
json_pointer = pointer
try:
item = json_pointer.resolve(data_dict)
except JsonPointerException as e:
return False
return True
def get_param(self, param):
"""Returns container or item value identified by string or JsonPointer object"""
return self._get_item_by_pointer(self.parameters, param)
def has_param(self, param):
"""Returns True if parameter identified by string or JsonPointer object exists, False otherwise"""
return self._has_item_by_pointer(self.parameters, param)
def get_content(self):
return self.content
def get_content_str(self):
if not self.content:
return ""
return json.dumps(self.content)
def get_attr(self, attr):
"""Returns container or item value identified by string or JsonPointer object"""
return self._get_item_by_pointer(self.content, attr)
def has_attr(self, attr):
"""Returns True if attribute identified by string or JsonPointer object exists, False otherwise"""
return self._has_item_by_pointer(self.content, attr)
def get_protocol_specific_parameters(self):
return self.proto_params
def get_protocol_specific_parameters_str(self):
return json.dumps(self.proto_params)
def get_proto_param(self, proto_param):
"""Returns container or item value identified by string or JsonPointer object"""
return self._get_item_by_pointer(self.proto_params, proto_param)
def has_proto_param(self, proto_param):
"""Returns True if parameter identified by string or JsonPointer object exists, False otherwise"""
return self._has_item_by_pointer(self.proto_params, proto_param)
def get_primitive_str(self):
"""
Returns whole OneM2M primitive as JSON string including primitive
parameters and primitive content
"""
primitive = {}
if self.parameters:
primitive = self.parameters.copy()
if self.content:
primitive[OneM2M.short_primitive_content] = self.content.copy()
return json.dumps(primitive)
def get_communication_protocol(self):
return self.protocol
def _check_protocol_of_request(self):
if not self.get_communication_protocol():
raise AssertionError("Communication protocol of request primitive not set")
def _check_protocol_of_response(self, response_primitive):
if not response_primitive.get_communication_protocol():
raise AssertionError("Communication protocol of response primitive not set")
def _check_exchange_protocols(self, response_primitive):
self._check_protocol_of_request()
self._check_protocol_of_response(response_primitive)
if not self.get_communication_protocol() == response_primitive.get_communication_protocol():
raise AssertionError("Request {} and response {} primitives' communication protocols doesn't match.".
format(self.get_communication_protocol(),
response_primitive.get_communication_protocol()))
def _check_request_common(self):
op = self.get_param(OneM2M.short_operation)
if not op:
raise AssertionError("Request primitive without operation set")
if not isinstance(op, int):
raise AssertionError("Invalid data type ({}) of operation where integer is expected".format(op.__class__))
if op not in OneM2M.operation_valid_values:
raise AssertionError("Request primitive with unknown operation set: {}".format(op))
rqi = self.get_param(OneM2M.short_request_identifier)
if not rqi:
raise AssertionError("Request primitive without request id")
if not isinstance(rqi, basestring):
raise AssertionError("Invalid data type ({}) of request identifier where string is expected".
format(rqi.__class__))
return op, rqi
def _check_response_common(self, response_primitive, rqi=None, rsc=None):
rsp_rqi = response_primitive.get_param(OneM2M.short_request_identifier)
if not rsp_rqi:
raise AssertionError("Response primitive without request id")
if not isinstance(rsp_rqi, basestring):
raise AssertionError("Invalid data type ({}) of request identifier where string is expected".
format(rsp_rqi.__class__))
if rqi and rqi != rsp_rqi:
raise AssertionError("Request IDs mismatch: req: {}, rsp: {}".format(rqi, rsp_rqi))
r_rsc = response_primitive.get_param(OneM2M.short_response_status_code)
if not r_rsc:
raise AssertionError("Response primitive without status code")
if not isinstance(r_rsc, int):
raise AssertionError("Invalid data type ({}) of response status code where integer is expected".
format(r_rsc.__class__))
if r_rsc not in OneM2M.supported_result_codes:
raise AssertionError("Unsupported response primitive result code: {}".format(r_rsc))
if None is not rsc:
if r_rsc != rsc:
raise AssertionError("Unexpected result code: {}, expected: {}".format(r_rsc, rsc))
return r_rsc
def _check_exchange_common(self, response_primitive, rsc=None):
self._check_exchange_protocols(response_primitive)
op, rqi = self._check_request_common()
r_rsc = self._check_response_common(response_primitive, rqi, rsc)
return op, r_rsc
def _check_response_positive_result(self, response_rsc=None, request_operation=None):
if response_rsc and response_rsc not in OneM2M.positive_result_codes:
raise AssertionError("Response with negative status code: {}".format(response_rsc))
if None is request_operation:
return
expected_rsc = OneM2M.expected_result_codes[request_operation]
if expected_rsc != response_rsc:
raise AssertionError("Unexpected positive result code for operation: {}, received: {}, expected: {}".format(
request_operation, response_rsc, expected_rsc))
def check_exchange(self, response_primitive, rsc=None):
op, r_rsc = self._check_exchange_common(response_primitive, rsc)
self._check_response_positive_result(r_rsc, op)
def _check_response_negative_result(self, response_primitive, error_message):
if not response_primitive:
raise AttributeError("Response primitive not passed")
if not error_message:
return
msg = response_primitive.get_attr(OneM2M.error_message_item)
if not msg:
raise AssertionError("Negative response primitive without error message, expected message: {}".format(
error_message))
if not isinstance(msg, basestring):
raise AssertionError("Invalid data type ({}) of response error message where string is expected".
format(msg.__class__))
if not msg == error_message:
raise AssertionError("Negative response with unexpected error message: {}, expected: {}".format(
msg, error_message))
def check_exchange_negative(self, response_primitive, rsc, error_message=None):
op, r_rsc = self._check_exchange_common(response_primitive, rsc)
self._check_response_negative_result(response_primitive, error_message)
def check_request(self):
self._check_protocol_of_request()
self._check_request_common()
def check_response(self, rqi=None, rsc=None, request_operation=None):
self._check_protocol_of_response(self)
self._check_response_common(self, rqi, rsc)
self._check_response_positive_result(rsc, request_operation)
def check_response_negative(self, rqi=None, rsc=None, error_message=None):
self._check_protocol_of_response(self)
self._check_response_common(self, rqi, rsc)
self._check_response_negative_result(self, error_message)
def _compare(self, primitive2):
raise NotImplementedError()
class OneM2MJsonPrimitiveBuilder(OneM2MPrimitiveBuilder, OneM2MJsonPrimitive):
"""Generic implementation of builder class for OneM2M JSON primitives"""
def __init__(self):
self.parameters = {}
self.content = {}
self.protocol = None
self.proto_params = {}
self.short_scheme = None
def _prepare_params(self, params):
if not params:
return {}
if isinstance(params, unicode):
params = str(params)
if isinstance(params, basestring):
params = json.loads(params)
return params
if isinstance(params, dict):
return params.copy()
raise OneM2MPrimitiveBuilderException("Unsupported parameters object type")
def set_parameters(self, parameters):
self.parameters = self._prepare_params(parameters)
return self
def append_parameters(self, parameters):
if not parameters:
return self
parameters = self._prepare_params(parameters)
self.parameters.update(parameters)
return self
def set_param(self, param_name, param_value):
self.parameters.update({param_name: param_value})
return self
def set_content(self, attributes):
self.content = self._prepare_params(attributes)
return self
def append_content_attributes(self, attributes):
if not attributes:
return self
attributes = self._prepare_params(attributes)
self.content.update(attributes)
return self
def set_att(self, attr_name, attr_value):
self.content.update({attr_name: attr_value})
return self
def set_communication_protocol(self, proto_name):
self.protocol = proto_name
return self
def set_protocol_specific_parameters(self, proto_params):
self.proto_params = self._prepare_params(proto_params)
return self
def append_protocol_specific_parameters(self, proto_params):
if not proto_params:
return self
proto_params = self._prepare_params(proto_params)
self.proto_params.update(proto_params)
return self
def set_proto_param(self, param_name, param_value):
self.proto_params.update({param_name: param_value})
return self
def clone(self):
raise NotImplementedError()
def build(self):
return OneM2MJsonPrimitive(self.parameters,
self.content,
self.protocol,
self.proto_params)
| [
"dfarrell@redhat.com"
] | dfarrell@redhat.com |
cc07adfe77b3e7b7202ad6ff5b3d8f20ca37b6c7 | a9c487594e24fbb2ee48acf216f9d5e243a246c5 | /client/scanner_hotelmarcopolo_caorle_alike_ws.py | 62ec69d49727821a2c2bddebf5c130ba117fdb18 | [] | no_license | davidedr/meteo_data_repo | ff66770c256242354010d2deadc0538679ee4f26 | 7701e1d1c95bdb038252cd4851f60fb3e5eef11b | refs/heads/master | 2023-06-19T20:40:32.197398 | 2021-07-20T14:30:33 | 2021-07-20T14:30:33 | 304,581,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,138 | py | from datetime import datetime
import logging
import utility
#
#
#
def scan_hotelmarcopolo_caorle_alike(last_seen_timestamp, server, save=True, log=True):
location_id=server["location_id"]
server_name=server["name"]
weather_station_url=server["url"]
tree, _ = utility.get_tree(weather_station_url, location_id, server_name)
if tree is None:
return last_seen_timestamp
timestamp_string=None
try:
timestamp_list=tree.xpath('/html/body/span')
timestamp_ele=timestamp_list[0].text
timestamp_string=timestamp_ele[-len('Dati in real-time aggiornati alle: ')+4:].strip()
timestamp_obj=datetime.strptime(timestamp_string, "%a, %d %b %Y %H:%M:%S %z")
timestamp_string=timestamp_obj.strftime("%d/%m/%Y %H:%M:%S")
timestamp_string_date=timestamp_obj.strftime("%d/%m/%Y")
timestamp_string_time=timestamp_obj.strftime("%H:%M:%S")
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting timestamp: "{e}"!')
return last_seen_timestamp
if timestamp_string==last_seen_timestamp:
# Weather station is not updating data
logging.info(f'{utility.get_identification_string(location_id, server_name)}, timestamp_string: {timestamp_string}, last_seen_timestamp: {last_seen_timestamp}, skip saving!')
# TODO Raise an alert
return last_seen_timestamp
wind_speed_knots=None
try:
wind_speed_kmh_elems=tree.xpath('/html/body/table/tbody/tr[2]/td[2]/h1[2]/big/big/big/span/text()')
wind_speed_kmh=wind_speed_kmh_elems[0].strip()
if wind_speed_kmh:
wind_speed_knots=float(wind_speed_kmh)/1.852
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting wind_speed_knots: "{e}"!')
wind_direction_deg=None
try:
wind_direction_ele=tree.xpath('/html/body/table/tbody/tr[2]/td[2]/h4/big/big/span/big/big/text()')
wind_direction=wind_direction_ele[0]
wind_direction_deg=utility.convert_wind_direction_to_deg(wind_direction)
if wind_direction_deg is None:
logging.info(f'{utility.get_identification_string(location_id, server_name)}, Unknown wind_direction: "{wind_direction}"!')
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting wind_direction_deg: "{e}"!')
barometric_pressure_ssl_hPa=None
try:
barometric_pressure_ssl_ele=tree.xpath('/html/body/table/tbody/tr[2]/td[3]/h1[2]/big/span')
barometric_pressure_ssl=barometric_pressure_ssl_ele[0].text.strip()
if barometric_pressure_ssl:
barometric_pressure_ssl_hPa=float(barometric_pressure_ssl)
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting barometric_pressure_ssl_hPa: "{e}"!')
rain_today_mm=None
try:
rain_today_ele = tree.xpath('/html/body/table/tbody/tr[4]/td[2]/h1[2]/big/span')
rain_today=rain_today_ele[0].text.split()[0].strip()
if rain_today:
rain_today_mm=float(rain_today)
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting rain_today_mm: "{e}"!')
rain_rate_mmh=None
try:
rain_rate_ele=tree.xpath('/html/body/table/tbody/tr[4]/td[2]/h2')
rain_rate=rain_rate_ele[0].text.split(" ")[1].strip()
if rain_rate:
rain_rate_mmh=float(rain_rate)
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting rain_rate_mmh: "{e}"!')
temperature_cels=None
try:
temperature_ele=tree.xpath('/html/body/table/tbody/tr[2]/td[1]/h1[3]/big/big/big')
temperature=temperature_ele[0].text.split("°")[0].strip()
if temperature:
temperature_cels=float(temperature)
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting temperature_cels: "{e}"!')
rel_humidity=None
try:
humidity_ele=tree.xpath('/html/body/table/tbody/tr[3]/td/h1[2]/big/span')
humidity=humidity_ele[0].text.split(" %")[0].strip()
if humidity:
rel_humidity=float(humidity)/100
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting rel_humidity: "{e}"!')
uv_index=None
try:
uv_index_ele=tree.xpath('/html/body/table/tbody/tr[4]/td[1]/h1[2]/big/span')
uv_index=uv_index_ele[0].text.strip()
uv_index=float(uv_index)
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting uv_index: "{e}"!')
heat_index_cels=None
try:
heat_index_ele=tree.xpath('/html/body/table/tbody/tr[2]/td[1]/h3[4]/big/span')
heat_index=heat_index_ele[0].text.strip().split(" ")[3].strip()
if heat_index:
heat_index_cels=float(heat_index)
except Exception as e:
logging.exception(f'{utility.get_identification_string(location_id, server_name)}, exception getting heat_index_cels: "{e}"!')
#
meteo_data_dict={}
meteo_data_dict["timestamp_string"]=timestamp_string
meteo_data_dict["timestamp_string_date"]=timestamp_string_date
meteo_data_dict["timestamp_string_time"]=timestamp_string_time
meteo_data_dict["wind_speed_knots"]=wind_speed_knots
meteo_data_dict["wind_direction_deg"]=wind_direction_deg
meteo_data_dict["barometric_pressure_ssl_hPa"]=barometric_pressure_ssl_hPa
meteo_data_dict["rain_today_mm"]=rain_today_mm
meteo_data_dict["rain_rate_mmh"]=rain_rate_mmh
meteo_data_dict["temperature_cels"]=temperature_cels
meteo_data_dict["rel_humidity"]=rel_humidity
meteo_data_dict["uv_index"]=uv_index
meteo_data_dict["heat_index_cels"]=heat_index_cels
if log:
utility.log_sample(location_id, server_name, meteo_data_dict)
if not utility.check_minimum_data(location_id, server_name, meteo_data_dict):
return last_seen_timestamp
utility.save(location_id, server_name, meteo_data_dict)
return timestamp_string
if __name__=="__main__":
utility.test_starter(1) # Location id
| [
"d.dr@libero.it"
] | d.dr@libero.it |
e193c720a834640102192c91e1ebc0f7a0c50778 | 8fa191cd4a67431a04eff62d35122ee83cc7b0af | /bookwyrm/migrations/0100_shelf_description.py | 18185b17def91702d69be55cd555a444186df05a | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | bookwyrm-social/bookwyrm | 24678676a7a58dba96641194dfae3fffbf01574d | 0f8da5b738047f3c34d60d93f59bdedd8f797224 | refs/heads/main | 2023-08-20T21:45:30.957277 | 2023-08-19T23:41:50 | 2023-08-19T23:41:50 | 236,415,735 | 1,398 | 216 | NOASSERTION | 2023-09-08T20:43:06 | 2020-01-27T03:51:54 | Python | UTF-8 | Python | false | false | 416 | py | # Generated by Django 3.2.5 on 2021-09-28 23:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0099_readthrough_is_active"),
]
operations = [
migrations.AddField(
model_name="shelf",
name="description",
field=models.TextField(blank=True, max_length=500, null=True),
),
]
| [
"mousereeve@riseup.net"
] | mousereeve@riseup.net |
d4bafe4fbfa94d284fa47da04b1100c99714a4c5 | 39665cb5ec2481bc3503a32c48d37e4ff86bece9 | /use_django/venv/Scripts/easy_install-script.py | 1c1a22b9cc10dcbc4e4c89ebaf345c085a73c2fd | [] | no_license | bingjiegu/for_django | 60d96e5678ec809a4e56388e1acf9e23f8c98d95 | cf37f517829d07f6d9ca5ef9b93e3ed0d20d8a75 | refs/heads/master | 2020-04-10T20:29:51.471653 | 2019-11-05T04:25:19 | 2019-11-05T04:25:19 | 161,269,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | #!D:\git\use_django\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"bingjie07191223@sina.com"
] | bingjie07191223@sina.com |
02391386ed128318c6a077cce242735d4d2cb214 | aab9c365899202cffb144b682458389ae3ad3169 | /mysite/settings.py | 5e75d7d74968f203074423c30e0485c5beac1276 | [] | no_license | rocio-obeso/my-first-blog | f6ec5a8c79a01be3ddbbcf265fef533db6b66f02 | 5e54c6e16dc743a47dc9599e452ccf49af56d931 | refs/heads/master | 2022-12-25T22:15:09.781583 | 2020-10-02T17:21:57 | 2020-10-02T17:21:57 | 300,689,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,206 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.16.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '09vcs)_$336c6v-swyj%t(g)g97ri85^+%s)9k5h6$4iez1cmz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"rocio.obeso@gmail.com"
] | rocio.obeso@gmail.com |
ed78e6a932f361509c7b2a9d7e57906f6d9a5d0f | b0485dc3e30dc0170391e131ec00ccb335768817 | /_build.py | afeb10aaecab2d7aba8fdf9720bc4faddcca2854 | [] | no_license | leisvip/djc_helper | 8a54483780bcb6ec3a5316a869d5652cfad393f7 | 9e5982047ce6db05f09db3d81e7f4df6303f21d7 | refs/heads/master | 2023-06-25T07:06:13.307176 | 2021-07-19T02:09:09 | 2021-07-19T02:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,837 | py | # 编译脚本
import argparse
import os
import shutil
import subprocess
from _init_venv_and_requirements import init_venv_and_requirements
from log import logger, color
from util import human_readable_size, show_head_line
def build(disable_douban=False):
# 初始化相关路径变量
venv_path = ".venv"
pyinstaller_path = os.path.join(venv_path, "Scripts", "pyinstaller")
# 初始化venv和依赖
init_venv_and_requirements(".venv", disable_douban)
show_head_line(f"将使用.venv环境进行编译", color("bold_yellow"))
build_configs = [
("main.py", "DNF蚊子腿小助手.exe", "utils/icons/DNF蚊子腿小助手.ico", ".", ["PyQt5"], []),
("auto_updater.py", "auto_updater.exe", "", "utils", ["PyQt5"], []),
("ark_lottery_special_version.py", "DNF蚊子腿小助手_集卡特别版.exe", "utils/icons/ark_lottery_special_version.ico", ".", ["PyQt5"], []),
("config_ui.py", "DNF蚊子腿小助手配置工具.exe", "utils/icons/config_ui.ico", ".", [], ["--noconsole"]),
]
for idx, config in enumerate(build_configs):
prefix = f"{idx + 1}/{len(build_configs)}"
src_path, exe_name, icon_path, target_dir, exclude_modules, extra_args = config
logger.info(color("bold_yellow") + f"{prefix} 开始编译 {exe_name}")
cmd_build = [
pyinstaller_path,
'--name', exe_name,
'-F',
src_path,
]
if icon_path != "":
cmd_build.extend(['--icon', icon_path])
for module in exclude_modules:
cmd_build.extend(['--exclude-module', module])
cmd_build.extend(extra_args)
logger.info(f"{prefix} 开始编译 {exe_name},命令为:{' '.join(cmd_build)}")
subprocess.call(cmd_build)
logger.info(f"编译结束,进行善后操作")
# 复制二进制
logger.info(f"复制{exe_name}到目标目录{target_dir}")
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
target_path = os.path.join(target_dir, exe_name)
shutil.copyfile(os.path.join("dist", exe_name), target_path)
# 删除临时文件
logger.info("删除临时文件")
for directory in ["build", "dist", "__pycache__"]:
shutil.rmtree(directory, ignore_errors=True)
os.remove(f"{exe_name}.spec")
filesize = os.path.getsize(target_path)
logger.info(color("bold_green") + f"{prefix} 编译{exe_name}结束,最终大小为{human_readable_size(filesize)}")
logger.info("done")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--disable_douban", action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
build(args.disable_douban)
| [
"fzls.zju@gmail.com"
] | fzls.zju@gmail.com |
42e95b4be95d83bcba4b00923df10849d38dd895 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03393/s345995802.py | 343e2097a0d4f2b25bd715fd2830e3222965ec14 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | S = input()
se = set()
for s in S:
se.add(s)
if len(S) < 26:
for i in range(26):
s = chr(ord('a')+i)
if not s in se:
print(S+s)
exit()
else:
while len(S) > 1:
se.remove(S[-1])
S = S[:-1]
for i in range(ord(S[-1]), ord('z')+1):
s = chr(i)
if not s in se:
print(S[:-1]+s)
exit()
print(-1)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.