blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6acf9c394b3567e8c8c10cc9511edefca51eb2c1 | d9d8f69d3c8bcde622f77335844266d837c0e869 | /computer-vision/image-classification/mnist_rmdl/cnn.py | 5222182808c98751a6b8076f3469746dbb3186ac | [
"MIT"
] | permissive | tyburam/paperswithcode | 3ec29bc5389170a5b747d98d9d82e0cebea2d95d | fcea3fac37e5bf10bb0284216ef7aded4c0c778b | refs/heads/master | 2020-05-03T11:08:22.282849 | 2019-03-31T09:46:17 | 2019-03-31T09:46:17 | 178,594,261 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,571 | py | import tensorflow as tf
import random
from tensorflow.keras.layers import Flatten, Dense, Dropout, Conv2D, MaxPooling2D
from tensorflow.keras.constraints import MaxNorm
class CNN(tf.keras.Model):
def __init__(self, shape, number_of_classes, min_hidden_layer_cnn=3, max_hidden_layer_cnn=10,
min_nodes_cnn=128, max_nodes_cnn=512, dropout=0.05):
super(CNN, self).__init__()
values = list(range(min_nodes_cnn, max_nodes_cnn))
l_values = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn))
n_layers = random.choice(l_values)
conv_count = random.choice(values)
self.conv0 = Conv2D(conv_count, (3, 3), padding='same', input_shape=shape, activation='relu')
self.conv1 = Conv2D(conv_count, (3, 3), activation='relu')
self.n_conv = []
for i in range(n_layers):
conv_count = random.choice(values)
self.n_conv.append(Conv2D(conv_count, (3, 3), padding='same', activation='relu'))
self.n_conv.append(MaxPooling2D(pool_size=(2, 2)))
self.n_conv.append(Dropout(dropout))
self.flat = Flatten()
self.d0 = Dense(256, activation='relu')
self.drop = Dropout(dropout)
self.d1 = Dense(number_of_classes, activation='softmax', kernel_constraint=MaxNorm(3))
def call(self, x):
x = self.conv0(x)
x = self.conv1(x)
for i in range(len(self.n_conv)):
x = self.n_conv[i](x)
x = self.flat(x)
x = self.d0(x)
x = self.drop(x)
return self.d1(x)
| [
"tyburam@hotmail.com"
] | tyburam@hotmail.com |
8d77c1ca5725c5efe3918715e630d4c0b280af6f | cf803d382d6e0bc7492d787e91a695a2fda944b8 | /model.py | a1971dd66b502e9a7ab9cad39d075165745a907a | [
"BSD-2-Clause"
] | permissive | parenthetical-e/fmri | d676d524cf1606f098864c5bf9e98607674db1ab | 32c5571d8767684ec6586320e85485cd89ed9327 | refs/heads/master | 2021-01-02T22:17:28.243141 | 2020-04-07T06:07:26 | 2020-04-07T06:07:26 | 3,848,746 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | """
A set of functions for creating or maipulating files needed for design
matrices, both in spm and python.
"""
def spm_onsets(trialfile='', durations=3, recode=None):
"""
Map <trialfile> (a 1d csv) into onset/TR time which is determind by
<durations> (which can be an int if every trial had the same length in the
model or a list if not).
If <recode> is a dict of the form {1:1,2:1,3:1} where the key is the
current code in trialfile and value is what you would
like that one to be recoded as. In this example, 1,2,3 all become 1.
Any value without a key, but with an entry in trialfile is silently left
as is.
"""
import csv
fs = open(trialfile, 'r')
trials = csv.reader(fs).next()
fs.close()
if isinstance(durations, int):
tmp = [durations, ] * len(trials)
elif isinstance(duration,(list,tuple)):
pass
else:
raise TypeError('<durations> must be an int, list or tuple.')
if recode != None:
print('Recoding....')
[rtrials.extend(recode.get(t)) for t in trials]
# mMap the trialfile data into TR/onset time.
onsets = []
for t,d in zip(trials,durations):
onsets.extend([t,] + [0,]*(d-1))
## if t = 2 and d = 3 then [t,] + [0,]*(d-1)
## should give the list: [2 0 0]
return onsets,durations
| [
"Erik.Exists@gmail.com"
] | Erik.Exists@gmail.com |
637b2c8cd0de5637b00c4895aefbfe5ad720f5b4 | fbac1fc19ca5736aa2ed4a8d846760bec35d9ec6 | /django_tutorial/django_tutorial/settings.py | c5b9d2ade23c39d7b1a6ac6b3d5c93cde9090ed8 | [] | no_license | dsmsfans/Router | 390984b0ee1045d13f55f8935d4a974ce3a23a36 | c685d50c799abadc8405c7bb64df7781cab08587 | refs/heads/master | 2020-03-27T02:41:02.178550 | 2018-09-04T06:22:09 | 2018-09-04T06:22:09 | 145,810,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,344 | py | """
Django settings for django_tutorial project.
Generated by 'django-admin startproject' using Django 1.11.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')9&7j0-_@s=9bnh#8*7*rlr^^%d7g21=i+bhxwy$0y*iv48v1)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Router'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join('static'),)
| [
"aaa40066@gmail.com"
] | aaa40066@gmail.com |
4906a33c2bde49d3d89e48c0aa86f333a1aef478 | 1602b8f6f40e27269a6d9fe42dbc720a5127b175 | /fleet/category.py | 9b756897fb0f2b29b020ab0444a68c1526aa3707 | [] | no_license | mit-jp/fleet-model | a9f581c2cb56196a13e2db8ef883c1f8b61b2682 | 2c1b293299741a076384114572dc74a988bb8581 | refs/heads/master | 2020-04-11T01:30:26.634473 | 2017-01-29T04:08:31 | 2017-01-29T04:08:31 | 32,412,401 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py |
class Category:
"""Rudimentary ordered tree data structure for vehicle classes."""
_parent = None
_children = []
_label = ''
def __init__(self, label, children=dict(), parent=None):
self._parent = parent
self._label = label
try:
self._children = list([Category(k, v, self) for k, v in
children.items()])
except AttributeError:
pass
def __str__(self):
return self._label
def children(self):
return list(map(str, self._children))
def parent(self):
return str(self._parent)
def nodes(self):
return sum([child.nodes() for child in self._children], [self._label])
def leaves(self, root):
if len(self._children) == 0:
return self._label
else:
return sum([child.leaves() for child in self._children], [])
def find(self, label):
"""Return the subtree with *label* at its head."""
if label == self._label:
return self
for child in self._children:
result = child.find(label)
if result:
return result
return None
| [
"mail@paul.kishimoto.name"
] | mail@paul.kishimoto.name |
62d106841b8e0b9e817e8697892d3be053ff2269 | 7eab259c9134c862d423a3a2b98593ac9dea5cb4 | /pommerman/research/roberta_run_on_cluster.py | 018fc42b0a33cae3d8fe971b7284b17d91ccf21f | [
"Apache-2.0"
] | permissive | cinjon/playground | 73b8dc11f4dd7c3e4e4a5ad0ab219510c635a449 | ad9ec58b2157fa6102636e7f61ef9a712c507a90 | refs/heads/master | 2021-04-15T12:33:46.096603 | 2018-11-25T23:58:04 | 2018-11-25T23:58:04 | 126,177,416 | 1 | 1 | null | 2018-03-21T12:46:52 | 2018-03-21T12:46:52 | null | UTF-8 | Python | false | false | 5,020 | py | """Run on the cluster
NOTE: See local_config.template.py for a local_config template.
"""
import os
import sys
import itertools
import local_config
directory = os.path.join(local_config.cluster_directory, 'florensa')
email = local_config.email
slurm_logs = os.path.join(directory, "slurm_logs")
slurm_scripts = os.path.join(directory, "slurm_scripts")
if not os.path.exists(slurm_logs):
os.makedirs(slurm_logs)
if not os.path.exists(slurm_scripts):
os.makedirs(slurm_scripts)
abbr = {
'lr': 'lr',
'board-size': 'bs',
'how-train': 'ht-',
'num-steps': 'ns',
'distill-epochs': 'dstlepi',
'num-battles-eval': 'nbe',
'gamma': 'gma',
'set-distill-kl': 'sdkl',
'num-channels': 'nc',
'num-processes': 'np',
'config': 'cfg-',
'model-str': 'm-',
'num-mini-batch': 'nmbtch',
'minibatch-size': 'mbs',
'log-interval': 'log',
'save-interval': 'sav',
'expert-prob': 'exprob',
'num-steps-eval': 'nse',
'use-value-loss': 'uvl',
'num-episodes-dagger': 'ned',
'num-mini-batch': 'nmb',
'use-lr-scheduler': 'ulrs',
'half-lr-epochs': 'hlre',
'use-gae': 'gae',
'init-kl-factor': 'ikl',
'state-directory-distribution': 'sdd',
'anneal-bomb-penalty-epochs': 'abpe',
'begin-selfbombing-epoch': 'bsbe',
'item-reward': 'itr',
'use-second-place': 'usp',
}
def train_ppo_job(flags, jobname=None, is_fb=False,
partition="uninterrupted", time=24):
num_processes = flags["num-processes"]
jobname = jobname or 'pman'
jobnameattrs = '%s.%s' % (
jobname, '.'.join(['%s%s' % (abbr[k], str(flags[k])) for k in sorted(flags.keys()) if k in abbr])
)
jobcommand = "python train_ppo.py "
args = ["--%s %s" % (flag, str(flags[flag])) for flag in sorted(flags.keys())]
jobcommand += " ".join(args)
print(jobcommand)
slurmfile = os.path.join(slurm_scripts, jobnameattrs + '.slurm')
with open(slurmfile, 'w') as f:
f.write("#!/bin/bash\n")
f.write("#SBATCH --job-name" + "=" + jobname + "\n")
f.write("#SBATCH --output=%s\n" % os.path.join(slurm_logs, jobnameattrs + ".out"))
f.write("#SBATCH --error=%s\n" % os.path.join(slurm_logs, jobnameattrs + ".err"))
if is_fb:
f.write("#SBATCH --partition=%s\n" % partition)
else:
f.write("#SBATCH --qos=batch\n")
f.write("#SBATCH --mail-type=END,FAIL\n")
f.write("#SBATCH --mail-user=%s\n" % email)
f.write("module purge\n")
local_config.write_extra_sbatch_commands(f)
f.write(jobcommand + "\n")
if is_fb:
s = "sbatch --gres=gpu:1 --nodes=1 "
else:
s = "sbatch --qos batch --gres=gpu:1 --nodes=1 "
s += "--cpus-per-task=%s " % num_processes
s += "--mem=64000 --time=%d:00:00 %s &" % (
time, os.path.join(slurm_scripts, jobnameattrs + ".slurm"))
os.system(s)
### Running everything seeds 1,2 for everything except 4,5 for reg-grUBnB for the ICLR paper.
### Reg is the dataset being the optimal one.
job = {
"how-train": "grid", "log-interval": 10, "save-interval": 1000,
"log-dir": os.path.join(directory, "logs"), "num-stack": 1,
"save-dir": os.path.join(directory, "models"), "num-channels": 32,
"config": "GridWalls-v4", "model-str": "GridCNNPolicy", "use-gae": "",
"num-processes": 8, "gamma": 0.99, "board-size": 24,
"state-directory": os.path.join(directory, "astars110-s100"),
"batch-size": 102400, "num-mini-batch": 20, "num-frames": 2000000000,
"state-directory-distribution": "florensa", "genesis-epoch": 1750,
"step-loss": 0.03,
}
counter = 0
for state_directory in [
"",
"-5opt",
"-10opt",
]:
for (name, distro) in [
("florensa", "florensa"),
]:
for seed in [4,5]:
for learning_rate in [1e-3]:
for step_loss in [.03]:
j = {k:v for k,v in job.items()}
j["run-name"] = "%s-%d" % (name, counter)
j["run-name"] = "iclr%d-florensa-%s" % (seed, j["run-name"])
j["run-name"] += state_directory
if state_directory == "":
j["state-directory"] = os.path.join(
directory, "astars110-s100", "train")
j["log-dir"] += "-100"
else:
j["state-directory"] += "%s/train" % state_directory
time = 48
j["seed"] = seed
j["step-loss"] = step_loss
j["state-directory-distribution"] = distro
j["lr"] = learning_rate
j["florensa-starts-dir"] = os.path.join(directory, \
"starts-%s-seed%d" % \
(state_directory, seed))
train_ppo_job(j, j["run-name"], is_fb=False, time=time)
counter += 1
| [
"raileanu.roberta@gmail.com"
] | raileanu.roberta@gmail.com |
438250392a8ae34b737cfa93c6142f5cde3b8700 | f7f66d1327238f34d0b3b85c1e221616a95aae8c | /memex_dossier/models/tests/test_soft_selectors.py | d39a3a1aee1c05eb19183ae075f968e1dc3530ee | [
"MIT"
] | permissive | biyanisuraj/memex-dossier-open | 820d5afc8a5cf93afc1364fb2a960ac5ab245217 | 43bab4e42d46ab2cf1890c3c2935658ae9b10a3a | refs/heads/master | 2020-06-07T01:34:51.467907 | 2018-10-09T15:44:58 | 2018-10-09T15:44:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py |
from memex_dossier.models.soft_selectors import make_ngram_corpus
def test_make_ngram_corpus():
corpus_clean_visibles = ['''
Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture
''',
'''
Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture
''',
'''
Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture
''',
'''
Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture
''',
'''
Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture Enlarge Picture
''',
'''
to decipher a hastily scribbled note
''',
'''
to decipher a hastily scribbled note
''',
'''
to decipher a hastily scribbled note
''',
'''
to decipher a hastily scribbled note
''',
'''
to decipher a hastily scribbled note
''',
]
candidates = make_ngram_corpus(corpus_clean_visibles, 6, True)
count = 0
for c in candidates:
if not c: continue
assert c == ['to decipher a hastily scribbled note']
count += 1
assert count == 5
| [
"jrf@diffeo.com"
] | jrf@diffeo.com |
d7e0e79c5cab0e0de407fa78ce58b54aa7a2fbb8 | 04586203f8a68e114cfcf3e1ffdea96e93a78712 | /Drowsiness_Detector.py | be535a3f694bbd9022806a2be49276f123847082 | [] | no_license | aaronsum1102/drowsiness_detection | a6c4f5e65a1565ccc4fee8f0ae6453e61b567c7d | 6e4a0e85edf642acab204f41f3909f7de355b11d | refs/heads/master | 2021-09-06T14:18:38.588739 | 2018-02-07T12:33:13 | 2018-02-07T12:33:13 | 111,120,988 | 0 | 0 | null | 2018-02-05T14:36:59 | 2017-11-17T15:44:59 | Python | UTF-8 | Python | false | false | 3,684 | py | import cv2
import dlib
import numpy as np
from scipy.spatial import distance as dist
import playsound
from threading import Thread
import os
folder_path = "/Users/PNCHEE/Virtualenvs/tensorflow/dlib"
predictor_path = "shape_predictor_68_face_landmarks.dat"
alarm_path = "Wake-up-sounds.mp3"
image_path = "photo2.jpg"
# global variable for determine of drowsiness
ear_base = 0.28
ear_threshold_consec_frames = 15
try:
os.chdir(folder_path)
except FileNotFoundError:
print("\nNo such directory!\n")
# helper function
def rect_to_boundingbox(rect):
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
return (x,y,w,h)
def shape_to_np(shape, dtype="int"):
"""
create tuple of (x, y) coordinates.
"""
coords = np.zeros((12, 2), dtype=dtype)
index = 0
for i in range(36, 48):
coords[index] = (shape.part(i).x, shape.part(i).y)
index += 1
return coords
def eye_aspect_ratio(eye):
"""
compute eye aspect ratio.
"""
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2 * C)
return ear
def warning(alarm):
playsound.playsound(alarm)
face_detector = dlib.get_frontal_face_detector()
try:
eye_predictor = dlib.shape_predictor(predictor_path)
except RuntimeError:
print("\nPlease double check file name of the predictor module!\n")
try:
# local variable
counter = 0
cap = cv2.VideoCapture(0)
while True:
# getting video stream
ret, video = cap.read()
video_grey = cv2.cvtColor(video, cv2.COLOR_BGR2GRAY)
video_grey = cv2.resize(video_grey, (640,480))
# detector start
dets = face_detector(video_grey,0)
if len(dets) > 1:
cv2.putText(video, "Multiple face detected, adjust camera.", (160, 400),
cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 3)
elif len(dets) == 1:
preds = eye_predictor(video_grey, dets[0])
preds = shape_to_np(preds)
left_eye = preds[0:6]
right_eye = preds[6:]
for x,y in left_eye:
cv2.circle(video, (x,y), 1, (0, 0, 255), -1)
for x,y in right_eye:
cv2.circle(video, (x,y), 1, (0, 0, 255), -1)
# compute average eye aspect ratio
ear_left = eye_aspect_ratio(left_eye)
ear_right = eye_aspect_ratio(right_eye)
ear_average = (ear_left + ear_right) / 2
# show comparison of the ear
cv2.putText(video, "EAR_Base: {:.2f}".format(ear_base), (400, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.putText(video, "EAR: {:.2f}".format(ear_average), (400, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
if ear_average < ear_base:
counter += 1
cv2.putText(video, "Counter: {}".format(counter), (400, 110),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
if counter >= ear_threshold_consec_frames:
counter = 0
cv2.putText(video, "Warning!!!", (10, 80),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
t = Thread(target=warning, args = (alarm_path,))
t.deamon = True
t.start()
else:
counter = 0
cv2.imshow("Detection", cv2.resize(video, (640,480)))
if cv2.waitKey(30) & 0xFF == ord("q"):
break
finally:
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | aaronsum1102.noreply@github.com |
87e8b16a2d83845e4d137ca080069e56f6a1690d | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/awentzonline_keras-rtst/keras-rtst-master/keras_rtst/models/style_xfer.py | d88c20d29b26ef489cc52a716031330e201234f5 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 5,226 | py | '''Texture Network for style transfer.'''
import time
import keras_vgg_buddy
import numpy as np
from keras import activations
from keras import backend as K
from keras.layers import advanced_activations
from keras.layers.core import Layer
from keras.layers.convolutional import AveragePooling2D
from keras.models import Graph
from keras.optimizers import Adam
from .base import create_res_texture_net, create_sequential_texture_net, dumb_objective
from .regularizers import (
AnalogyRegularizer, FeatureContentRegularizer, FeatureStyleRegularizer,
MRFRegularizer, TVRegularizer)
def make_model(args, style_img=None):
model = Graph()
model.add_input('content', batch_input_shape=(args.batch_size, 3, args.max_height, args.max_width))
try: # if it's a standard activation then just keep the string
activations.get(args.activation)
activation = args.activation
except: # otherwise we need to look up the class in advanced activations (e.g. LeakyReLU)
activation = getattr(advanced_activations, args.activation, 'activation function')
if args.sequential_model:
texnet = create_sequential_texture_net(args.max_height, args.max_width,
activation=activation, num_res_filters=args.num_res_filters,
num_inner_blocks=args.num_blocks)
else:
texnet = create_res_texture_net(args.max_height, args.max_width,
activation=activation, num_res_filters=args.num_res_filters,
num_res_blocks=args.num_blocks)
# add the texture net to the model
model.add_node(texnet, 'texnet', 'content')
model.add_output('texture_rgb', 'texnet')
# hook up the training network stuff
if args.train:
model.add_node(Layer(), 'vgg_concat', inputs=['texnet', 'content'], concat_axis=0)
# add VGG and the constraints
keras_vgg_buddy.add_vgg_to_graph(model, 'vgg_concat', pool_mode=args.pool_mode,
trainable=False, weights_path=args.vgg_weights)
# add the regularizers for the various feature layers
vgg = keras_vgg_buddy.VGG16(args.max_height, args.max_width, pool_mode=args.pool_mode, weights_path=args.vgg_weights)
print('computing static features')
feature_layers = set()
if args.style_weight:
feature_layers.update(args.style_layers)
if args.content_weight:
feature_layers.update(args.content_layers)
if args.mrf_weight:
feature_layers.update(args.mrf_layers)
if args.analogy_weight:
feature_layers.update(args.analogy_layers)
style_features = vgg.get_features(np.expand_dims(style_img, 0), feature_layers)
regularizers = []
if args.style_weight != 0.0:
for layer_name in args.style_layers:
layer = model.nodes[layer_name]
style_regularizer = FeatureStyleRegularizer(
target=style_features[layer_name],
weight=args.style_weight / len(args.style_layers))
style_regularizer.set_layer(layer)
regularizers.append(style_regularizer)
if args.content_weight != 0.0:
for layer_name in args.content_layers:
layer = model.nodes[layer_name]
content_regularizer = FeatureContentRegularizer(
weight=args.content_weight / len(args.content_layers))
content_regularizer.set_layer(layer)
regularizers.append(content_regularizer)
if args.mrf_weight != 0.0:
for layer_name in args.mrf_layers:
layer = model.nodes[layer_name]
mrf_regularizer = MRFRegularizer(
K.variable(style_features[layer_name]),
weight=args.mrf_weight / len(args.mrf_layers))
mrf_regularizer.set_layer(layer)
regularizers.append(mrf_regularizer)
if args.analogy_weight != 0.0:
style_map_img = keras_vgg_buddy.load_and_preprocess_image(args.style_map_image_path, width=args.max_width, square=True)
style_map_features = vgg.get_features(np.expand_dims(style_map_img, 0), args.analogy_layers)
for layer_name in args.analogy_layers:
layer = model.nodes[layer_name]
analogy_regularizer = AnalogyRegularizer(
style_map_features[layer_name],
style_features[layer_name],
weight=args.analogy_weight / len(args.analogy_layers))
analogy_regularizer.set_layer(layer)
regularizers.append(analogy_regularizer)
if args.tv_weight != 0.0:
tv_regularizer = TVRegularizer(weight=args.tv_weight)
tv_regularizer.set_layer(model.nodes['texnet'])
regularizers.append(tv_regularizer)
setattr(model.nodes['vgg_concat'], 'regularizers', regularizers) # Gotta put em somewhere?
print('compiling')
start_compile = time.time()
adam = Adam(lr=args.learn_rate, beta_1=0.7)
model.compile(optimizer=adam, loss=dict(texture_rgb=dumb_objective))
print('Compiled model in {:.2f}'.format(time.time() - start_compile))
return model
| [
"659338505@qq.com"
] | 659338505@qq.com |
5d3214e939d97eed810c36824298bc5702d18f09 | c654a6f4a5168e4a5eefec1bb207a289a6a4f2fd | /mainpage/migrations/0001_initial.py | 1b0fa1ce32e6819a4c3128a9f937c4ee70a77896 | [
"MIT"
] | permissive | EnzioKam/nusmm | 98ed693b61dd0ac11a8a59ddf035a870e1519639 | ffd3c42b063ebbdefb4ab7114e216575a6b7ea2a | refs/heads/master | 2020-03-18T23:26:34.089809 | 2018-10-05T04:03:59 | 2018-10-05T04:03:59 | 135,403,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # Generated by Django 2.0.6 on 2018-06-03 15:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Mapping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nus_code', models.CharField(max_length=200)),
('nus_credits', models.FloatField()),
('pu_name', models.CharField(max_length=200)),
('pu_code', models.CharField(max_length=200)),
('pu_title', models.CharField(max_length=200)),
('pu_credits', models.FloatField()),
],
),
]
| [
"enziokamhh@gmail.com"
] | enziokamhh@gmail.com |
366ad807aedcc7af54f5060dcaa12dc46f0f7613 | 4beb10c8a8023f4945c996a1487ec1b3968cb5da | /f5_lbaas_dashboard/enabled/_1480_project_loadbalancersv2_panel.py | d89ef94a631dd6c077b8bc716031d61c12c3ef69 | [
"Apache-2.0"
] | permissive | F5Networks/f5-lbaas-dashboard | 7aebb669a27d8ebdc9feaa7f088f9158fb157046 | 62cb1dfbb87c94bdcb3f53f6ec2ab0004ac43d54 | refs/heads/master | 2023-03-28T01:59:58.666570 | 2022-09-27T01:16:34 | 2022-09-27T01:16:34 | 147,327,541 | 0 | 0 | Apache-2.0 | 2022-09-27T01:16:37 | 2018-09-04T10:15:51 | JavaScript | UTF-8 | Python | false | false | 993 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'loadbalancersv2'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'network'
# Python panel class of the PANEL to be added.
ADD_PANEL = (
'f5_lbaas_dashboard.dashboards.project.loadbalancersv2.panel'
'.LoadBalancersUI')
| [
"a.zong@f5.com"
] | a.zong@f5.com |
d8017250287f045e7fcc6c74bbd3730a455096c8 | 582cc4d5d711b26d16c67795e6d0e035c737b62f | /src/data/datasets/mimic_cxr/section_parser.py | 7904075b7c11dede08810f8bfc4e12b090cad734 | [
"MIT"
] | permissive | xiaoman-zhang/lovt | 821a1731be2f35f6b641426da4de6c5fc1a0474a | 91cf2094a0e140b8431b8e4ebadc56547a8df6b2 | refs/heads/main | 2023-09-02T12:16:49.168300 | 2021-11-15T14:30:10 | 2021-11-15T14:30:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,850 | py | """
From https://zenodo.org/record/3539363
"""
import re
def section_text(text):
"""Splits text into sections.
Assumes text is in a radiology report format, e.g.:
COMPARISON: Chest radiograph dated XYZ.
IMPRESSION: ABC...
Given text like this, it will output text from each section,
where the section type is determined by the all caps header.
Returns a three element tuple:
sections - list containing the text of each section
section_names - a normalized version of the section name
section_idx - list of start indices of the text in the section
"""
p_section = re.compile(
r'\n ([A-Z ()/,-]+):\s', re.DOTALL)
sections = list()
section_names = list()
section_idx = list()
idx = 0
s = p_section.search(text, idx)
if s:
sections.append(text[0:s.start(1)])
section_names.append('preamble')
section_idx.append(0)
while s:
current_section = s.group(1).lower()
# get the start of the text for this section
idx_start = s.end()
# skip past the first newline to avoid some bad parses
idx_skip = text[idx_start:].find('\n')
if idx_skip == -1:
idx_skip = 0
s = p_section.search(text, idx_start + idx_skip)
if s is None:
idx_end = len(text)
else:
idx_end = s.start()
sections.append(text[idx_start:idx_end])
section_names.append(current_section)
section_idx.append(idx_start)
else:
sections.append(text)
section_names.append('full report')
section_idx.append(0)
section_names = normalize_section_names(section_names)
# remove empty sections
# this handles when the report starts with a finding-like statement
# .. but this statement is not a section, more like a report title
# e.g. p10/p10103318/s57408307
# CHEST, PA LATERAL:
#
# INDICATION: This is the actual section ....
# it also helps when there are multiple findings sections
# usually one is empty
for i in reversed(range(len(section_names))):
if section_names[i] in ('impression', 'findings'):
if sections[i].strip() == '':
sections.pop(i)
section_names.pop(i)
section_idx.pop(i)
if ('impression' not in section_names) & ('findings' not in section_names):
# create a new section for the final paragraph
if '\n \n' in sections[-1]:
sections.append('\n \n'.join(sections[-1].split('\n \n')[1:]))
sections[-2] = sections[-2].split('\n \n')[0]
section_names.append('last_paragraph')
section_idx.append(section_idx[-1] + len(sections[-2]))
return sections, section_names, section_idx
def normalize_section_names(section_names):
# first, lower case all
section_names = [s.lower().strip() for s in section_names]
frequent_sections = {
"preamble": "preamble", # 227885
"impression": "impression", # 187759
"comparison": "comparison", # 154647
"indication": "indication", # 153730
"findings": "findings", # 149842
"examination": "examination", # 94094
"technique": "technique", # 81402
"history": "history", # 45624
"comparisons": "comparison", # 8686
"clinical history": "history", # 7121
"reason for examination": "indication", # 5845
"notification": "notification", # 5749
"reason for exam": "indication", # 4430
"clinical information": "history", # 4024
"exam": "examination", # 3907
"clinical indication": "indication", # 1945
"conclusion": "impression", # 1802
"chest, two views": "findings", # 1735
"recommendation(s)": "recommendations", # 1700
"type of examination": "examination", # 1678
"reference exam": "comparison", # 347
"patient history": "history", # 251
"addendum": "addendum", # 183
"comparison exam": "comparison", # 163
"date": "date", # 108
"comment": "comment", # 88
"findings and impression": "impression", # 87
"wet read": "wet read", # 83
"comparison film": "comparison", # 79
"recommendations": "recommendations", # 72
"findings/impression": "impression", # 47
"pfi": "history",
'recommendation': 'recommendations',
'wetread': 'wet read',
'ndication': 'impression', # 1
'impresson': 'impression', # 2
'imprression': 'impression', # 1
'imoression': 'impression', # 1
'impressoin': 'impression', # 1
'imprssion': 'impression', # 1
'impresion': 'impression', # 1
'imperssion': 'impression', # 1
'mpression': 'impression', # 1
'impession': 'impression', # 3
'findings/ impression': 'impression', # ,1
'finding': 'findings', # ,8
'findins': 'findings',
'findindgs': 'findings', # ,1
'findgings': 'findings', # ,1
'findngs': 'findings', # ,1
'findnings': 'findings', # ,1
'finidngs': 'findings', # ,2
'idication': 'indication', # ,1
'reference findings': 'findings', # ,1
'comparision': 'comparison', # ,2
'comparsion': 'comparison', # ,1
'comparrison': 'comparison', # ,1
'comparisions': 'comparison' # ,1
}
p_findings = [
'chest',
'portable',
'pa and lateral',
'lateral and pa',
'ap and lateral',
'lateral and ap',
'frontal and',
'two views',
'frontal view',
'pa view',
'ap view',
'one view',
'lateral view',
'bone window',
'frontal upright',
'frontal semi-upright',
'ribs',
'pa and lat'
]
p_findings = re.compile('({})'.format('|'.join(p_findings)))
main_sections = [
'impression', 'findings', 'history', 'comparison',
'addendum'
]
for i, s in enumerate(section_names):
if s in frequent_sections:
section_names[i] = frequent_sections[s]
continue
main_flag = False
for m in main_sections:
if m in s:
section_names[i] = m
main_flag = True
break
if main_flag:
continue
m = p_findings.search(s)
if m is not None:
section_names[i] = 'findings'
# if it looks like it is describing the entire study
# it's equivalent to findings
# group similar phrasings for impression
return section_names
def custom_mimic_cxr_rules():
custom_section_names = {
's50913680': 'recommendations', # files/p11/p11851243/s50913680.txt
's59363654': 'examination', # files/p12/p12128253/s59363654.txt
's59279892': 'technique', # files/p13/p13150370/s59279892.txt
's59768032': 'recommendations', # files/p13/p13249077/s59768032.txt
's57936451': 'indication', # files/p14/p14325424/s57936451.txt
's50058765': 'indication', # files/p14/p14731346/s50058765.txt
's53356173': 'examination', # files/p15/p15898350/s53356173.txt
's53202765': 'technique', # files/p16/p16076182/s53202765.txt
's50808053': 'technique', # files/p16/p16631485/s50808053.txt
's51966317': 'indication', # files/p10/p10817099/s51966317.txt
's50743547': 'examination', # files/p11/p11388341/s50743547.txt
's56451190': 'note', # files/p11/p11842879/s56451190.txt
's59067458': 'recommendations', # files/p11/p11984647/s59067458.txt
's59215320': 'examination', # files/p12/p12408912/s59215320.txt
's55124749': 'indication', # files/p12/p12428492/s55124749.txt
's54365831': 'indication', # files/p13/p13876470/s54365831.txt
's59087630': 'recommendations', # files/p14/p14267880/s59087630.txt
's58157373': 'recommendations', # files/p15/p15032392/s58157373.txt
's56482935': 'recommendations', # files/p15/p15388421/s56482935.txt
's58375018': 'recommendations', # files/p15/p15505556/s58375018.txt
's54654948': 'indication', # files/p17/p17090359/s54654948.txt
's55157853': 'examination', # files/p18/p18975498/s55157853.txt
's51491012': 'history', # files/p19/p19314266/s51491012.txt
}
custom_indices = {
's50525523': [201, 349], # files/p10/p10602608/s50525523.txt
's57564132': [233, 554], # files/p10/p10637168/s57564132.txt
's59982525': [313, 717], # files/p11/p11989982/s59982525.txt
's53488209': [149, 475], # files/p12/p12458657/s53488209.txt
's54875119': [234, 988], # files/p13/p13687044/s54875119.txt
's50196495': [59, 399], # files/p13/p13894879/s50196495.txt
's56579911': [59, 218], # files/p15/p15394326/s56579911.txt
's52648681': [292, 631], # files/p15/p15666238/s52648681.txt
's59889364': [172, 453], # files/p15/p15835529/s59889364.txt
's53514462': [73, 377], # files/p16/p16297706/s53514462.txt
's59505494': [59, 450], # files/p16/p16730991/s59505494.txt
's53182247': [59, 412], # files/p16/p16770442/s53182247.txt
's51410602': [47, 320], # files/p17/p17069955/s51410602.txt
's56412866': [522, 822], # files/p17/p17612000/s56412866.txt
's54986978': [59, 306], # files/p17/p17912487/s54986978.txt
's59003148': [262, 505], # files/p17/p17916384/s59003148.txt
's57150433': [61, 394], # files/p18/p18335791/s57150433.txt
's56760320': [219, 457], # files/p18/p18418794/s56760320.txt
's59562049': [158, 348], # files/p18/p18502016/s59562049.txt
's52674888': [145, 296], # files/p19/p19381919/s52674888.txt
's55258338': [192, 568], # files/p13/p13719117/s55258338.txt
's59330497': [140, 655], # files/p15/p15479218/s59330497.txt
's52119491': [179, 454], # files/p17/p17959278/s52119491.txt
# below have no findings at all in the entire report
's58235663': [0, 0], # files/p11/p11573679/s58235663.txt
's50798377': [0, 0], # files/p12/p12632853/s50798377.txt
's54168089': [0, 0], # files/p14/p14463099/s54168089.txt
's53071062': [0, 0], # files/p15/p15774521/s53071062.txt
's56724958': [0, 0], # files/p16/p16175671/s56724958.txt
's54231141': [0, 0], # files/p16/p16312859/s54231141.txt
's53607029': [0, 0], # files/p17/p17603668/s53607029.txt
's52035334': [0, 0], # files/p19/p19349312/s52035334.txt
}
return custom_section_names, custom_indices
| [
"philip.j.mueller@tum.de"
] | philip.j.mueller@tum.de |
b26436e93cc297033d7872567d5a1e8d9bfb742f | 741997b9a078969c889e3602537767341bc6fd86 | /IMDB_scrape_Top_rated.py | 7ac3f20015b8f6f5250c1cf793864ddade998b86 | [] | no_license | Jaseem-Mohammed/Scraping-IMDB-for-Movie-deatails | 8042c44776af5adb3763c5b4d4568c631bb61155 | 98b7954c444962ad771f544b4dd28462307028bc | refs/heads/main | 2023-02-05T13:03:30.537910 | 2020-12-22T07:03:16 | 2020-12-22T07:03:16 | 323,547,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,900 | py | from time import sleep
import requests
from bs4 import BeautifulSoup
import pandas as pd
rank = []
name = []
year = []
certificate = []
runtime = []
genre = []
rating = []
story = []
director = []
cast1 = []
cast2 = []
cast3 = []
cast4 = []
gross = []
base = 'https://www.imdb.com/search/title/?title_type=feature&num_votes=10000'
for i in range(1, 6452, 50):
response = requests.get(
f'{base},&countries=us&sort=user_rating,desc&start={i}&ref_=adv_nxt')
print(f'Scraping progress : {i} / 6452...')
if response.status_code == 200:
print('Response : OK')
soup = BeautifulSoup(response.content, 'html.parser')
full = soup.find_all(class_='mode-advanced')
for d in full:
rank.append(d.h3.span.get_text())
name.append(d.h3.a.get_text())
try:
year.append(d.h3.select('span')[1].get_text())
except IndexError:
year.append('NaN')
try:
certificate.append(d.p.span.get_text())
except IndexError:
certificate.append('NaN')
try:
runtime.append(d.p.find(class_='runtime').get_text())
except IndexError:
runtime.append('NaN')
try:
genre.append(d.p.find(class_='genre').get_text())
except IndexError:
genre.append('NaN')
rating.append(d.find(class_='ratings-bar').find('div')['data-value'])
try:
story.append(d.select('.text-muted')[2].get_text())
except IndexError:
story.append('NaN')
director.append(d.select('p')[2].select('a')[0].get_text())
try:
cast1.append(d.select('p')[2].select('a')[1].get_text())
except IndexError:
cast1.append('NaN')
try:
cast2.append(d.select('p')[2].select('a')[2].get_text())
except IndexError:
cast2.append('NaN')
try:
cast3.append(d.select('p')[2].select('a')[3].get_text())
except IndexError:
cast3.append('NaN')
try:
cast4.append(d.select('p')[2].select('a')[4].get_text())
except IndexError:
cast4.append('NaN')
try:
gross.append(d.select('div')[3].select('p')[
3].select('span')[4].get_text())
except IndexError:
gross.append('NaN')
#print('waiting 2 seconds...')
#sleep(2)
df = pd.DataFrame({
'rank': rank,
'name': name,
'year': year,
'certificate': certificate,
'runtime': runtime,
'genre': genre,
'rating': rating,
'story': story,
'director': director,
'cast1': cast1,
'cast2': cast2,
'cast3': cast3,
'cast4': cast4,
'gross': gross})
df.to_csv('imdb_top_movies_12-19-20.csv', index=False)
print(df.info())
| [
"noreply@github.com"
] | Jaseem-Mohammed.noreply@github.com |
f0708c24fd6f6ad48737cffa37907961622cd1ca | 911e7b25961067339c31957ff41ebdb3c355d948 | /_utils/python/libs_my_test/test_http_util2.py | bce813fa41030ee2228f882b54d33f586e0f1d73 | [] | no_license | qlong2012/notes-1 | c93efcc9a70f786929ef7e4c053e266e2bf354ad | 78913e8235845d4a94dd19f730d607df754da7fe | refs/heads/master | 2020-05-20T01:05:04.678662 | 2019-04-25T10:06:37 | 2019-04-25T10:06:53 | 185,303,355 | 1 | 0 | null | 2019-05-07T02:10:14 | 2019-05-07T02:10:14 | null | UTF-8 | Python | false | false | 17,681 | py | #!python
# -*- coding:utf-8 -*-
"""
公用函数(http请求处理) http_util2.py 的测试
Created on 2019/3/14
Updated on 2019/3/14
@author: Holemar
依赖第三方库:
tornado==3.1.1
通过用线程启动一个 tornado 服务器来测试 http 请求
(使用 mock 的方式需要很了解 urllib 库,暂没那功力,做不到)
todo:测试 压缩 和 线程 时,使用读取日志的方式,不太精确。后续需优化
"""
import os
import logging
import unittest
import threading
import __init__
from libs_my import http_util2 as http_util
from libs_my import str_util, tornado_util
# 用 Filter 类获取日志信息
NOW_LOG_RECORD = []
class TestFilter(logging.Filter):
def filter(self, record):
global NOW_LOG_RECORD
NOW_LOG_RECORD.append(record) # 把 Filter 获取到的日志信息传递出去,供测试使用
return True
http_util.logger.addFilter(TestFilter())
class TestHttpUtil(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""测试这个类前的初始化动作"""
super(TestHttpUtil, cls).setUpClass()
# 启动请求接收线程
cls.port = tornado_util.get_port()
cls.web = tornado_util.run(cls.port, worker='tornado', threads=True)
@classmethod
def tearDownClass(cls):
"""测试这个类所有函数后的结束动作"""
super(TestHttpUtil, cls).tearDownClass()
cls.web._Thread__stop() # 停掉web线程
# GET 测试
def test_get(self):
url = 'http://127.0.0.1:%d/test_get' % self.port
param_url = url + '?a=11&b=22&c=%E5%93%88&d=%E5%93%88&e='
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
@tornado_util.fn(url=r"/test_get/?", method='get')
def get_test_get(self, **kwargs):
if self.request.body:
return {"result": -1, "reason":'这是POST请求,请求方式有误!'}
if (self.get_argument('a', '') == '11' and self.get_argument('b', '') == '22' and self.get_argument('c', '') == u'哈' and
self.get_argument('d', '') == u'哈' and self.get_argument('e', '') == ''):
return result
else:
return kwargs
# 普通请求,原样返回
res = http_util.get(param_url)
assert isinstance(res, basestring)
assert res == result
# 参数转换
res = http_util.get(url, {'a':11,'b':'22','c':'哈','d':u'哈','e':None})
assert isinstance(res, basestring)
assert res == result
res2 = http_util.get(url, {'b':'22','c':'哈','d':u'哈','e':0})
assert isinstance(res2, basestring)
assert str_util.to_json(res2) == {'b':'22','c':u'哈','d':u'哈','e':'0'}
# return_json 返回结果转换
res = http_util.get(param_url, return_json=True)
assert isinstance(res, dict)
assert res == {"use_time": "0.0003", "reason": u"访问成功", "version": "2.0.0", "result": 0}
# POST 测试
def test_post(self):
url = 'http://127.0.0.1:%d/test_post' % self.port
param = {'a':11,'b':'22','c':'哈','d':u'哈','e':None}
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
@tornado_util.fn(url=r"/test_post/?", method='post')
def get_test_post(self, **kwargs):
if self.request.body is None:
return {"result": -1, "reason":'这是GET请求,请求方式有误!'}
if (self.get_argument('a', '') == '11' and self.get_argument('b', '') == '22' and self.get_argument('c', '') == u'哈' and
self.get_argument('d', '') == u'哈' and self.get_argument('e', '') == ''):
return result
else:
return kwargs
# 无参数请求,原样返回
res = http_util.post(url)
assert isinstance(res, basestring)
assert res == "{}"
# 参数转换
res = http_util.post(url, param)
assert isinstance(res, basestring)
assert res == result
res2 = http_util.post(url, {'b':'22','c':'哈','d':u'哈','e':0})
assert isinstance(res2, basestring)
assert str_util.to_json(res2) == {'b':'22','c':u'哈','d':u'哈','e':'0'}
# return_json 返回结果转换
res = http_util.post(url, param, return_json=True)
assert isinstance(res, dict)
assert res == {"use_time": "0.0003", "reason": u"访问成功", "version": "2.0.0", "result": 0}
# put,patch 请求 测试
def test_put_patch(self):
url = 'http://127.0.0.1:%d/test_put_patch' % self.port
param = {'a':11,'b':'22','c':'哈','d':u'哈','e':None}
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
methods = ['put','patch']
@tornado_util.fn(url=r"/test_put_patch/?", method=methods)
def test_put_patch(self, **kwargs):
if self.request.body is None:
return {"result": -1, "reason":'这是GET请求,请求方式有误!'}
if (self.get_argument('a', '') == '11' and self.get_argument('b', '') == '22' and self.get_argument('c', '') == u'哈' and
self.get_argument('d', '') == u'哈' and self.get_argument('e', '') == ''):
return result
else:
return kwargs
for method in methods:
fun = getattr(http_util, method)
# 无参数请求,原样返回
res = fun(url)
assert isinstance(res, basestring)
assert res == "{}"
# 参数转换
res = fun(url, param)
assert isinstance(res, basestring)
assert res == result
res2 = fun(url, {'b':'22','c':'哈','d':u'哈','e':0})
assert isinstance(res2, basestring)
assert str_util.to_json(res2) == {'b':'22','c':u'哈','d':u'哈','e':'0'}
# return_json 返回结果转换
res = fun(url, param, return_json=True)
assert isinstance(res, dict)
assert res == {"use_time": "0.0003", "reason": u"访问成功", "version": "2.0.0", "result": 0}
# get 请求,访问不了
res = http_util.get(url)
assert isinstance(res, basestring)
assert res == "<html><title>405: Method Not Allowed</title><body>405: Method Not Allowed</body></html>"
# post 请求,访问不了
res = http_util.post(url)
assert isinstance(res, basestring)
assert res == "<html><title>405: Method Not Allowed</title><body>405: Method Not Allowed</body></html>"
# delete,options 请求 测试
def test_delete_options(self):
url = 'http://127.0.0.1:%d/test_delete_options' % self.port
param_url = url + '?a=11&b=22&c=%E5%93%88&d=%E5%93%88&e='
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
methods = ['delete','options']
@tornado_util.fn(url=r"/test_delete_options/?", method=methods)
def test_delete_options(self, **kwargs):
if self.request.body is None:
return {"result": -1, "reason":'这是GET请求,请求方式有误!'}
return result
for method in methods:
fun = getattr(http_util, method)
# 普通请求,原样返回
res = fun(param_url)
assert isinstance(res, basestring)
assert res == result
# get 请求,访问不了
res = http_util.get(param_url)
assert isinstance(res, basestring)
assert res == "<html><title>405: Method Not Allowed</title><body>405: Method Not Allowed</body></html>"
# post 请求,访问不了
res = http_util.post(param_url)
assert isinstance(res, basestring)
assert res == "<html><title>405: Method Not Allowed</title><body>405: Method Not Allowed</body></html>"
# 提交 json 测试
def test_send_json(self):
url = 'http://127.0.0.1:%d/test_send_json' % self.port
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
@tornado_util.fn(url=r"/test_send_json/?", method='post')
def test_send_json(self, **kwargs):
if self.request.body is None:
return {"result": -1, "reason":'这是GET请求,请求方式有误!'}
return str_util.to_json(self.request.body)
# 提交 json
param = {'a':11,'b':'22','c':'哈','d':u'哈','e':None}
res = http_util.post(url, param, send_json=True)
assert isinstance(res, basestring)
assert str_util.to_json(res) == param
param2 = {'b':'22','c':'哈','d':u'哈','e':0}
res2 = http_util.post(url, param2, send_json=True)
assert isinstance(res2, basestring)
assert str_util.to_json(res2) == param2
# gzip 压缩
def test_gzip(self):
url = 'http://127.0.0.1:%d/test_gzip' % self.port
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}' * 100
@tornado_util.fn(url=r"/test_gzip/?", gzip_length=10)
def get_test_gzip(self, **kwargs):
if self.request.headers.get("Accept-Encoding", "") in ('gzip', 'deflate'):
return result
else:
return {"result": -1, "reason":'这是没有压缩的请求,请求方式有误!'}
# get 请求
global NOW_LOG_RECORD
NOW_LOG_RECORD = []
res = http_util.get(url, gzip=True)
assert isinstance(res, basestring)
assert res == result
assert len(NOW_LOG_RECORD) >= 2
record = NOW_LOG_RECORD[-2] # 倒数第二条日志,正是解压日志
assert record is not None
assert record.levelno == logging.INFO
assert u'压缩请求' in record.msg
assert record.method == 'GET'
assert record.before_length # 有写入解压前长度
assert record.after_length # 有写入解压后长度
assert record.after_length == len(result)
assert record.before_length < record.after_length # 解压后长度更长
# post 请求
NOW_LOG_RECORD = []
res = http_util.post(url, gzip=True)
assert isinstance(res, basestring)
assert res == result
assert len(NOW_LOG_RECORD) >= 2
record = NOW_LOG_RECORD[-2] # 倒数第二条日志,正是解压日志
assert record is not None
assert record.levelno == logging.INFO
assert u'压缩请求' in record.msg
assert record.method == 'POST'
assert record.before_length # 有写入解压前长度
assert record.after_length # 有写入解压后长度
assert record.after_length == len(result)
assert record.before_length < record.after_length # 解压后长度更长
# headers 设值
def test_headers(self):
url = 'http://127.0.0.1:%d/test_headers/' % self.port
token = 'dfwerwer1548hgjhfre35656'
@tornado_util.fn(url=r"/test_headers/")
def get_test_test_headers(self, **kwargs):
if self.request.headers.get("Accept-Token", "") == token:
return 111
else:
return 222
# get 请求
res = http_util.get(url)
assert isinstance(res, basestring)
assert res == '222'
res = http_util.get(url, headers={"Accept-Token":token})
assert isinstance(res, basestring)
assert res == '111'
# post 请求
res = http_util.post(url)
assert isinstance(res, basestring)
assert res == '222'
res = http_util.post(url, headers={"Accept-Token":token})
assert isinstance(res, basestring)
assert res == '111'
# 出错测试
def test_error(self):
# 设成默认异步请求
http_util.init(repeat_time=3)
url = 'http://127.0.0.1:%d/test_error' % self.port
global error_times
error_times = 0
# 自定义一个出错页面
class _ExceptionHandler(tornado_util.RequestHandler):
def get(self):
global error_times
error_times += 1
raise Exception('出错测试')
post = get
# 添加到请求地址列表
tornado_util.add_apps(r"/test_error/?", _ExceptionHandler)
# GET 请求,返回 None
res = http_util.get(url)
assert res == '<html><title>500: Internal Server Error</title><body>500: Internal Server Error</body></html>'
assert error_times == 3 # 请求次数
# POST 请求,返回 None
error_times = 0
res = http_util.post(url)
assert res == '<html><title>500: Internal Server Error</title><body>500: Internal Server Error</body></html>'
assert error_times == 3 # 请求次数
# 改回默认值,避免影响其它测试
http_util.init(repeat_time=1)
# 异步测试
def test_threads(self):
# 设成默认异步请求
http_util.init(threads=True)
url = 'http://127.0.0.1:%d/test_threads' % self.port
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
@tornado_util.fn(url=r"/test_threads/?")
def get_test_threads(self, **kwargs):
return result
# 异步 GET 请求,返回线程
global NOW_LOG_RECORD
NOW_LOG_RECORD = []
th1 = http_util.get(url)
assert len(NOW_LOG_RECORD) == 0 # 通过日志查看有没有发启线程,因为发启线程肯定没有这么快打印日志
assert isinstance(th1, threading.Thread)
th1.join() # 等待线程返回,以便检查日志
assert len(NOW_LOG_RECORD) >= 1
record = NOW_LOG_RECORD[0]
assert record is not None
assert record.levelno == logging.INFO
assert record.method == 'GET'
log_msg = record.getMessage()
assert url in log_msg
assert result in log_msg
# 异步 POST 请求,返回线程
NOW_LOG_RECORD = []
th2 = http_util.post(url)
assert len(NOW_LOG_RECORD) == 0 # 通过日志查看有没有发启线程,因为发启线程肯定没有这么快打印日志
assert isinstance(th2, threading.Thread)
th2.join() # 等待线程返回,以便检查日志
assert len(NOW_LOG_RECORD) >= 1
record = NOW_LOG_RECORD[0]
assert record is not None
assert record.levelno == logging.INFO
assert record.method == 'POST'
log_msg = record.getMessage()
assert url in log_msg
assert result in log_msg
# 改回默认值,避免影响其它测试
http_util.init(threads=False)
# 参数转换
def test_url_encode(self):
# 字典转请求参数
param1 = {'name' : '测试用户', 'password' : 123456}
assert http_util.url_encode(param1) == 'password=123456&name=%E6%B5%8B%E8%AF%95%E7%94%A8%E6%88%B7'
assert http_util.url_encode(param1, encode='gbk') == 'password=123456&name=%B2%E2%CA%D4%D3%C3%BB%A7'
param2 = {'name' : '测试用户', 'password' : {u'哈':[1,2,'3',u'测试']}}
assert http_util.url_encode(param2) == 'password=%7B%22%5Cu54c8%22%3A+%5B1%2C+2%2C+%223%22%2C+%22%5Cu6d4b%5Cu8bd5%22%5D%7D&name=%E6%B5%8B%E8%AF%95%E7%94%A8%E6%88%B7'
# 请求参数转字典
assert str_util.deep_str(http_util.getRequestParams(http_util.url_encode(param1))) == str_util.deep_str(param1, all2str=True)
assert str_util.deep_str(http_util.getRequestParams('http://xx.xx.com:8080/te?password=123456&name=%E6%B5%8B%E8%AF%95%E7%94%A8%E6%88%B7')) == str_util.deep_str(param1, all2str=True)
assert str_util.deep_str(http_util.getRequestParams(http_util.url_encode(param2))) == {u'password': u'{"\\u54c8": [1, 2, "3", "\\u6d4b\\u8bd5"]}', u'name': u'测试用户'}
# 请求方式测试
def test_method(self):
url = 'http://127.0.0.1:%d/get_test_method/' % self.port
# 定义处理各种请求方式的类
class MethodHandler(tornado_util.RequestHandler):
def get(self): return self.finish('get')
def post(self): return self.finish('post')
def put(self): return self.finish('put')
def delete(self): return self.finish('delete')
def patch(self): return self.finish('patch')
def options(self): return self.finish('options')
tornado_util.add_apps(r"/get_test_method/", MethodHandler)
# 测试请求
res = http_util.get(url)
assert isinstance(res, basestring) and res == "get"
res = http_util.post(url)
assert isinstance(res, basestring) and res == "post"
res = http_util.put(url)
assert isinstance(res, basestring) and res == "put"
res = http_util.delete(url)
assert isinstance(res, basestring) and res == "delete"
res = http_util.patch(url)
assert isinstance(res, basestring) and res == "patch"
res = http_util.options(url)
assert isinstance(res, basestring) and res == "options"
if __name__ == "__main__":
unittest.main() # 执行所有
'''
# 执行指定函数
suite = unittest.TestSuite()
suite.addTest(TestHttpUtil("test_send_json"))
suite.addTest(TestHttpUtil("test_delete_options"))
unittest.TextTestRunner().run(suite)
'''
| [
"daillow@gmail.com"
] | daillow@gmail.com |
0d71b5f5dcdcc4de44f8fc1705dd0fe879f6ca04 | 1494d703aeea5de217731b9c60d1b724cba299dd | /网络图片你的爬取和存储.py | 186949917035a199f47010734a7cc679db2b4b3c | [] | no_license | Thomas-Sue/Python | a4d55f04e94c70c7f6f47b6411dcd1be74b9d602 | 0f28d825e73cdb89cd8beb61fb3484ebe38b17ca | refs/heads/master | 2021-01-12T03:53:15.750428 | 2017-03-30T07:15:12 | 2017-03-30T07:15:12 | 78,280,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 11:14:56 2017
@author: hp
"""
import requests
import os
url = "http://image.nationalgeographic.com.cn/2017/0309/20170309051020267.jpg"
root = "F://"
path = root +url.split('/')[-1]
try:
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(url)
with open(path,'wb') as f:
f.write(r.content)
f.close()
print("文件保存成功")
else:
print("文件已存在")
except:
print("爬取失败") | [
"noreply@github.com"
] | Thomas-Sue.noreply@github.com |
5bb9776224c4813a523963dc2805bc70a092fa60 | 40d5394eea0c1288fcdd57180a0141672cb198fa | /users/views.py | d8f37654f883f0bde0e2232915ec025e0a7e6ec4 | [
"MIT"
] | permissive | mornicamwende/ranker | 6c12b0297703ac559de84bb0b36396ec2738f970 | 107bcaad61bb5e726570a8250b55eb2e6245dc7a | refs/heads/master | 2023-01-07T00:18:54.192155 | 2020-10-27T17:07:21 | 2020-10-27T17:07:21 | 306,592,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Profile
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
# Create your views here..
def register(request):
if request.method =='POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
# profile = UserProfile.objects.create(user=request.user)
username = form.cleaned_data.get('username')
messages.success(request, f' Your account has been created! You are now able to log in!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form':form})
@login_required
def profile(request):
if request.method =='POST':
u_form=UserUpdateForm(request.POST, instance=request.user)
p_form=ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f' Your account has been updated!')
return redirect('profile')
else:
u_form=UserUpdateForm(instance=request.user)
p_form=ProfileUpdateForm(instance=request.user.profile)
context={
'u_form':u_form,
'p_form':p_form
}
return render(request, 'users/profile.html', context)
| [
"mornicamwende@gmail.com"
] | mornicamwende@gmail.com |
38772a45c69fa1ed780c9de5395c76f6ebc40017 | c035b7d588692d321a34c6c11d5064e99143a1fe | /idgo_admin/migrations/0038_auto_20180215_1517.py | 0abdf440cd07eec7eb8ff7068218abcc85a36da3 | [
"Apache-2.0"
] | permissive | jerbou/idgo | 2fbaf35ec3bdf62cd883c0465c3dccaaddceab75 | 89e5ddefd4d0cb1d1aea3820b0c17ba99b4cf8d0 | refs/heads/master | 2020-08-07T10:49:14.079562 | 2019-09-25T13:48:03 | 2019-09-25T13:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-02-15 14:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('idgo_admin', '0037_auto_20180215_1135'),
]
operations = [
migrations.RenameField(
model_name='accountactions',
old_name='org_extras',
new_name='organisation',
),
]
| [
"niark.hipps@gmail.com"
] | niark.hipps@gmail.com |
449e5f4d3e112507dc7540c319584658b70805eb | 560df0c3f859ae2d4c279f4669f9ab8758c486fb | /old/Euler063.py | defeccb9479085d6e4df2722c16167b7442a9de6 | [] | no_license | gronkyzog/Puzzles | 0e7cdd7fa5ab8139d63a721cac5ee30e80728c7a | cdc145857f123a98f1323c95b5744d36ce50355f | refs/heads/master | 2021-03-13T00:01:17.715403 | 2015-02-22T11:59:03 | 2015-02-22T11:59:03 | 17,100,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import math
counter = 0
for p in range(1,1000):
for n in range(1,1000):
x = p**n
z = len(str(x))
if z == n:
counter +=1
print counter,p,n,x
if z > n:
break
| [
"smcnicol@gmail.com"
] | smcnicol@gmail.com |
ffa2de1f91cfcb7a68bd7165e4c32a091870427e | 3ee11cf1059030c5bd53c4f15c2a1c950df184ac | /app1/dino.py | f5803ebc09fe9bbbb46c7866727f36bf686ea681 | [] | no_license | kz2d/python_project | 6cb9216d029053574450d897ee75404ebd074ed5 | 7932e7d774476fcfb9ce99d3199e663a866bf94a | refs/heads/master | 2022-12-24T12:21:35.130070 | 2020-09-24T07:30:03 | 2020-09-24T07:30:03 | 296,802,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | from PIL import ImageGrab
import os
import pyautogui as pq
import keyboard
def Jump():
#pq.screenshot('C:\\Users\\Professional\\PycharmProjects\\app1\\screen.png')
#f = Image.open('C:\\Users\\Professional\\PycharmProjects\\app1\\screen.png')
#g=f.load()
x=pq.position().x
y=pq.position().y
z=pq.pixel(x, y)
if z[0]<90 or pq.pixel(x-20, y)[0]<90 or pq.pixel(x-40, y)[0]<90 or pq.pixel(x-30, y-40)[0]<90:
pq.keyDown('space')
#pq.keyDown('space')
#pq.moveTo(700,520)
def Jump1():
f=ImageGrab.grab()
x = pq.position().x
y = pq.position().y
z = pq.pixel(x, y)
c=True
print(f.getpixel((x, y)))
for i in range(0,160,5):
if f.getpixel((x - i, y))[0] < 90:
if c:
pq.keyDown('space')
c=False
for i in range(-30,120, 10):
if f.getpixel((x - i, y-30))[0] < 90:
pq.keyDown('space')
#pq.keyUp('space')
s+=0.01
if s==1:
pq.move(f,0)
s=0
s=0
while(True):
f = ImageGrab.grab()
x = pq.position().x
y = pq.position().y
z = pq.pixel(x, y)
c = True
print(f.getpixel((x, y)))
for i in range(0, 160, 5):
if f.getpixel((x - i, y))[0] < 90:
if c:
pq.keyDown('space')
c = False
for i in range(-30, 120, 10):
if f.getpixel((x - i, y - 30))[0] < 90:
pq.keyDown('space')
# pq.keyUp('space')
s += 0.01
if s == 1:
pq.move(s, 0)
s = 0
| [
"noreply@github.com"
] | kz2d.noreply@github.com |
b44c18c0337ef4ede7f2ee27dff0c56a32873a98 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5639104758808576_0/Python/hongkai/standing_ovation.py | 85c7aaf75714250e3ffa80b7c69e7aa3067301b0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | fin = open("../../Downloads/A-small-attempt0 (1).in", "r")
out = open("standing_ovation.out", "w")
cases = int(fin.readline())
for i in range(cases):
d, shy = fin.readline().split()
min = 0
curr = 0
for x in shy:
curr += int(x)
curr -= 1
if curr < min:
min = curr
out.write("Case #%d: %d\n" % (i + 1, -min))
out.close()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
df50bd9305125ca4d70a7b757301e0385e8002ee | 61f21d076d1163529e37ef6cfe9911ce246fb653 | /hfcs-fffit/runs/r125-density-iter1/project.py | f501260a1afc15dcdd6f57f0eb9798c3bd06ab01 | [
"MIT"
] | permissive | helpscott/hfcs-fffit | e0e4cde1ef882e2146ea23781083b09d60b2d8fb | 4f94145a9473fa4b7f16ca4a2d18966d34f901b2 | refs/heads/main | 2023-06-20T07:54:56.610123 | 2021-07-13T20:55:53 | 2021-07-13T20:55:53 | 436,075,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,911 | py | import flow
from flow import FlowProject, directives
import templates.ndcrc
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
class Project(FlowProject):
pass
@Project.operation
@Project.post.isfile("ff.xml")
def create_forcefield(job):
"""Create the forcefield .xml file for the job"""
content = _generate_r125_xml(job)
with open(job.fn("ff.xml"), "w") as ff:
ff.write(content)
@Project.operation
@Project.pre.after(create_forcefield)
@Project.post.isfile("system.gro")
@Project.post.isfile("unedited.top")
def create_system(job):
"""Construct the system in mbuild and apply the forcefield"""
import mbuild
import foyer
import shutil
r125 = mbuild.load("C(F)(F)C(F)(F)F", smiles=True)
system = mbuild.fill_box(r125, n_compounds=150, density=1000)
ff = foyer.Forcefield(job.fn("ff.xml"))
system_ff = ff.apply(system)
system_ff.combining_rule = "lorentz"
system_ff.save(job.fn("unedited.top"))
# Get pre-minimized gro file
shutil.copy("data/initial_config/system_em.gro", job.fn("system.gro"))
@Project.operation
@Project.pre.after(create_system)
@Project.post.isfile("system.top")
def fix_topology(job):
"""Fix the LJ14 section of the topology file
Parmed is writing the lj14 scaling factor as 1.0.
GAFF uses 0.5. This function edits the topology
file accordingly.
"""
top_contents = []
with open(job.fn("unedited.top")) as fin:
for (line_number, line) in enumerate(fin):
top_contents.append(line)
if line.strip() == "[ defaults ]":
defaults_line = line_number
top_contents[
defaults_line + 2
] = "1 2 no 0.5 0.8333333\n"
with open(job.fn("system.top"), "w") as fout:
for line in top_contents:
fout.write(line)
@Project.operation
@Project.post.isfile("em.mdp")
@Project.post.isfile("eq.mdp")
@Project.post.isfile("prod.mdp")
def generate_inputs(job):
"""Generate mdp files for energy minimization, equilibration, production"""
content = _generate_em_mdp(job)
with open(job.fn("em.mdp"), "w") as inp:
inp.write(content)
content = _generate_eq_mdp(job)
with open(job.fn("eq.mdp"), "w") as inp:
inp.write(content)
content = _generate_prod_mdp(job)
with open(job.fn("prod.mdp"), "w") as inp:
inp.write(content)
@Project.label
def em_complete(job):
if job.isfile("em.gro"):
return True
else:
return False
@Project.label
def eq_complete(job):
if job.isfile("eq.gro"):
return True
else:
return False
@Project.label
def prod_complete(job):
if job.isfile("prod.gro"):
return True
else:
return False
@Project.operation
@Project.pre.after(create_system)
@Project.pre.after(fix_topology)
@Project.pre.after(generate_inputs)
@Project.post(em_complete)
@Project.post(eq_complete)
@Project.post(prod_complete)
@flow.with_job
@flow.cmd
def simulate(job):
"""Run the minimization, equilibration, and production simulations"""
command = (
"gmx_d grompp -f em.mdp -c system.gro -p system.top -o em && "
"gmx_d mdrun -v -deffnm em -ntmpi 1 -ntomp 1 && "
"gmx_d grompp -f eq.mdp -c em.gro -p system.top -o eq && "
"gmx_d mdrun -v -deffnm eq -ntmpi 1 -ntomp 1 && "
"gmx_d grompp -f prod.mdp -c eq.gro -p system.top -o prod && "
"gmx_d mdrun -v -deffnm prod -ntmpi 1 -ntomp 1"
)
return command
@Project.operation
@Project.pre.after(simulate)
@Project.post(lambda job: "density" in job.doc)
@Project.post(lambda job: "density_unc" in job.doc)
def calculate_density(job):
"""Calculate the density"""
import panedr
import numpy as np
from block_average import block_average
# Load the thermo data
df = panedr.edr_to_df(job.fn("prod.edr"))
# pull density and take average
density = df[df.Time > 500.0].Density.values
ave = np.mean(density)
# save average density
job.doc.density = ave
(means_est, vars_est, vars_err) = block_average(density)
with open(job.fn("density_blk_avg.txt"), "w") as ferr:
ferr.write("# nblk_ops, mean, vars, vars_err\n")
for nblk_ops, (mean_est, var_est, var_err) in enumerate(
zip(means_est, vars_est, vars_err)
):
ferr.write("{}\t{}\t{}\t{}\n".format(nblk_ops, mean_est, var_est, var_err))
job.doc.density_unc = np.max(np.sqrt(vars_est))
#####################################################################
################# HELPER FUNCTIONS BEYOND THIS POINT ################
#####################################################################
def _generate_r125_xml(job):
content = """<ForceField>
<AtomTypes>
<Type name="C1" class="c3" element="C" mass="12.011" def="C(C)(H)(F)(F)" desc="carbon bonded to 2 Fs, a H, and another carbon"/>
<Type name="C2" class="c3" element="C" mass="12.011" def="C(C)(F)(F)(F)" desc="carbon bonded to 3 Fs and another carbon"/>
<Type name="F1" class="f" element="F" mass="18.998" def="FC(C)(F)H" desc="F bonded to C1"/>
<Type name="F2" class="f" element="F" mass="18.998" def="FC(C)(F)F" desc="F bonded to C2"/>
<Type name="H1" class="h2" element="H" mass="1.008" def="H(C)" desc="single H bonded to C1"/>
</AtomTypes>
<HarmonicBondForce>
<Bond class1="c3" class2="c3" length="0.15375" k="251793.12"/>
<Bond class1="c3" class2="f" length="0.13497" k="298653.92"/>
<Bond class1="c3" class2="h2" length="0.10961" k="277566.56"/>
</HarmonicBondForce>
<HarmonicAngleForce>
<Angle class1="c3" class2="c3" class3="f" angle="1.9065976748786053" k="553.1248"/>
<Angle class1="c3" class2="c3" class3="h2" angle="1.9237019015481498" k="386.6016"/>
<Angle class1="f" class2="c3" class3="f" angle="1.8737854849411122" k="593.2912"/>
<Angle class1="f" class2="c3" class3="h2" angle="1.898743693244631" k="427.6048"/>
</HarmonicAngleForce>
<PeriodicTorsionForce>
<Proper class1="f" class2="c3" class3="c3" class4="f" periodicity1="3" k1="0.0" phase1="0.0" periodicity2="1" k2="5.0208" phase2="3.141592653589793"/>
<Proper class1="" class2="c3" class3="c3" class4="" periodicity1="3" k1="0.6508444444444444" phase1="0.0"/>
</PeriodicTorsionForce>
<NonbondedForce coulomb14scale="0.833333" lj14scale="0.5">
<Atom type="C1" charge="0.224067" sigma="{sigma_C1:0.6f}" epsilon="{epsilon_C1:0.6f}"/>
<Atom type="C2" charge="0.500886" sigma="{sigma_C2:0.6f}" epsilon="{epsilon_C2:0.6f}"/>
<Atom type="F1" charge="-0.167131" sigma="{sigma_F1:0.6f}" epsilon="{epsilon_F1:0.6f}"/>
<Atom type="F2" charge="-0.170758" sigma="{sigma_F2:0.6f}" epsilon="{epsilon_F2:0.6f}"/>
<Atom type="H1" charge="0.121583" sigma="{sigma_H1:0.6f}" epsilon="{epsilon_H1:0.6f}"/>
</NonbondedForce>
</ForceField>
""".format(
sigma_C1=job.sp.sigma_C1,
sigma_C2=job.sp.sigma_C2,
sigma_F1=job.sp.sigma_F1,
sigma_F2=job.sp.sigma_F2,
sigma_H1=job.sp.sigma_H1,
epsilon_C1=job.sp.epsilon_C1,
epsilon_C2=job.sp.epsilon_C2,
epsilon_F1=job.sp.epsilon_F1,
epsilon_F2=job.sp.epsilon_F2,
epsilon_H1=job.sp.epsilon_H1,
)
return content
def _generate_em_mdp(job):
contents = """
; MDP file for energy minimization
integrator = steep ; Algorithm (steep = steepest descent minimization)
emtol = 100.0 ; Stop minimization when the maximum force < 100.0 kJ/mol/nm
emstep = 0.01 ; Energy step size
nsteps = 50000 ; Maximum number of (minimization) steps to perform
nstlist = 1 ; Frequency to update the neighbor list and long range forces
cutoff-scheme = Verlet
ns-type = grid ; Method to determine neighbor list (simple, grid)
verlet-buffer-tolerance = 1e-5 ; kJ/mol/ps
coulombtype = PME ; Treatment of long range electrostatic interactions
rcoulomb = 1.0 ; Short-range electrostatic cut-off
rvdw = 1.0 ; Short-range Van der Waals cut-off
pbc = xyz ; Periodic Boundary Conditions (yes/no)
constraints = all-bonds
lincs-order = 8
lincs-iter = 4
"""
return contents
def _generate_eq_mdp(job):
contents = """
; MDP file for NVT simulation
; Run parameters
integrator = md ; leap-frog integrator
nsteps = {nsteps} ;
dt = 0.001 ; 1 fs
; Output control
nstxout = 10000 ; save coordinates every 10.0 ps
nstvout = 0 ; don't save velocities
nstenergy = 100 ; save energies every 0.1 ps
nstlog = 100 ; update log file every 0.1 ps
; Neighborsearching
cutoff-scheme = Verlet
ns-type = grid ; search neighboring grid cells
nstlist = 10 ; 10 fs, largely irrelevant with Verlet
verlet-buffer-tolerance = 1e-5 ; kJ/mol/ps
; VDW
vdwtype = Cut-off
rvdw = 1.0 ; short-range van der Waals cutoff (in nm)
vdw-modifier = None
; Electrostatics
rcoulomb = 1.0 ; short-range electrostatic cutoff (in nm)
coulombtype = PME ; Particle Mesh Ewald for long-range electrostatics
pme-order = 4 ; cubic interpolation
fourier-spacing = 0.12 ; effects accuracy of pme
ewald-rtol = 1e-5
; Temperature coupling is on
tcoupl = v-rescale ; modified Berendsen thermostat
tc-grps = System ; Single coupling group
tau-t = 0.1 ; time constant, in ps
ref-t = {temp} ; reference temperature, one for each group, in K
; Pressure coupling is off
pcoupl = berendsen
pcoupltype = isotropic
ref-p = {press}
tau-p = 0.5
compressibility = 4.5e-5
; Periodic boundary conditions
pbc = xyz ; 3-D PBC
; Dispersion correction
DispCorr = EnerPres ; apply analytical tail corrections
; Velocity generation
gen-vel = yes ; assign velocities from Maxwell distribution
gen-temp = {temp} ; temperature for Maxwell distribution
gen-seed = -1 ; generate a random seed
constraints = all-bonds
lincs-order = 8
lincs-iter = 4
""".format(
temp=job.sp.T, press=job.sp.P, nsteps=job.sp.nstepseq
)
return contents
def _generate_prod_mdp(job):
contents = """
; MDP file for NVT simulation
; Run parameters
integrator = md ; leap-frog integrator
nsteps = {nsteps} ;
dt = 0.001 ; 1 fs
; Output control
nstxout = 10000 ; save coordinates every 10.0 ps
nstvout = 0 ; don't save velocities
nstenergy = 100 ; save energies every 0.1 ps
nstlog = 100 ; update log file every 0.1 ps
; Neighborsearching
cutoff-scheme = Verlet
ns-type = grid ; search neighboring grid cells
nstlist = 10 ; 10 fs, largely irrelevant with Verlet
verlet-buffer-tolerance = 1e-5 ; kJ/mol/ps
; VDW
vdwtype = Cut-off
rvdw = 1.0 ; short-range van der Waals cutoff (in nm)
vdw-modifier = None ; standard LJ potential
; Electrostatics
rcoulomb = 1.0 ; short-range electrostatic cutoff (in nm)
coulombtype = PME ; Particle Mesh Ewald for long-range electrostatics
pme-order = 4 ; cubic interpolation
fourier-spacing = 0.12 ; effects accuracy of pme
ewald-rtol = 1e-5
; Temperature coupling is on
tcoupl = v-rescale ; Bussi thermostat
tc-grps = System ; Single coupling group
tau-t = 0.5 ; time constant, in ps
ref-t = {temp} ; reference temperature, one for each group, in K
; Pressure coupling is off
pcoupl = parrinello-rahman
pcoupltype = isotropic
ref-p = {press}
tau-p = 1.0
compressibility = 4.5e-5
; Periodic boundary conditions
pbc = xyz ; 3-D PBC
; Dispersion correction
DispCorr = EnerPres ; apply analytical tail corrections
; Velocity generation
gen-vel = no ; assign velocities from Maxwell distribution
gen-temp = {temp} ; temperature for Maxwell distribution
gen-seed = -1 ; generate a random seed
constraints = all-bonds
lincs-order = 8
lincs-iter = 4
""".format(
temp=job.sp.T, press=job.sp.P, nsteps=job.sp.nstepsprod
)
return contents
if __name__ == "__main__":
Project().main()
| [
"r.s.defever@gmail.com"
] | r.s.defever@gmail.com |
399be31c3aaa0c2b7e32f93aef8afcabd90b474c | 647ef582232f0f6c35dc31a89c1cabde60caaadf | /sec7_bunnsann.py | 3c6fdce15870f34df43980944766bdb57655283d | [] | no_license | yuko-sasaki/my_chatbot | 0472d4976e01b6bf54c0d4cb6523cd058f0ea54c | 7a77c97b27cae3aa293c0b9dc7abadaf87849d58 | refs/heads/master | 2022-12-13T15:10:25.947397 | 2020-09-08T12:45:02 | 2020-09-08T12:45:02 | 292,828,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | from gensim.models import word2vec
import pickle
with open("dataset/ginga_words.pickle", mode='rb') as f:
ginga_words = pickle.load(f)
model = word2vec.Word2Vec(ginga_words,
size=100,
min_count=5,
window=5,
iter=20,
sg=0)
print(model.wv.vectors.shape) # 分散表現の形状、(単語数,中間層のニューロン数)
print(model.wv.vectors) # 分散表現
print(len(model.wv.index2word)) # 語彙の数
print(model.wv.index2word[:10]) # 最初の10単語
print(model.wv.vectors[0]) # 最初のベクトル
print(model.wv.__getitem__("の")) # "の"のベクトル
| [
"sasakiy.2233@gmail.com"
] | sasakiy.2233@gmail.com |
97f78d057353db5df358e1e31bac1b98392279f5 | 646b0a41238b96748c7d879dd1bf81858651eb66 | /archive/memd.archive/gulp/Potential.py | ac129df6be3cb0a4594a14181a7914d174181b84 | [] | no_license | danse-inelastic/molDynamics | ded0298f8219064e086472299e1383d3dff2dac3 | c8e0bfd9cb65bcfc238e7993b6e7550289d2b219 | refs/heads/master | 2021-01-01T19:42:29.904390 | 2015-05-03T17:27:38 | 2015-05-03T17:27:38 | 34,993,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,489 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Brandon Keith
# California Institute of Technology
# (C) 2005 All Rights Reserved All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.components.Component import Component
#from molDynamics.gulp.forcefields.ForcefieldLoader import ForcefieldLoader
from memd.gulp.forcefields.InputFile import InputFile
class Potential(Component):
'''This class serves as an API/interface for gulp potential construction.'''
class Inventory(Component.Inventory):
import pyre.inventory as inv
dispersionInRecipSpace = inv.bool('dispersionInRecipSpace', default = False)
dispersionInRecipSpace.meta['tip'] = '''whether to calculate dispersion forces
partly in reciprocal space'''
useInitialBondingOnly = inv.bool('useInitialBondingOnly', default = False)
useInitialBondingOnly.meta['tip'] = '''instead of reassigning
bonding based on every optimization or time step, use intial geometry only to assign bonding'''
forcefield = inv.facility('forcefield', default=InputFile('gulpLibrary'))
forcefield.meta['tip'] = 'a class containing forcefield types'
#forcefield.meta['known_plugins'] = ['gulpLibrary','manualEntry']
moleculeIdentification = inv.str('moleculeIdentification', default = 'None')
moleculeIdentification.meta['tip'] = '''identify molecules based on covalent radii
and deal with intramolecular coulomb interactions'''
moleculeIdentification.validator=inv.choice(['None','identify molecules; remove intramolecular Coulomb forces',
'identify molecules; retain intramolecular Coulomb forces'])
def __init__(self, name='potential', facility='Potential'):
Component.__init__(self, name, facility)
self.i=self.inventory
# def _configure(self):
# Component._configure(self)
# #self.sample = self.i.sample
def identifyOptions( self, visitor):
return visitor.writePotentialOptions(self)
def identifyKeywords( self, visitor):
return visitor.writePotentialKeywords(self)
# version
__id__ = "$Id$"
# Generated automatically by PythonMill on Mon Apr 16 12:44:30 2007
# End of file | [
"jbrkeith@gmail.com"
] | jbrkeith@gmail.com |
9450412ca95624708fe0ba54ba1780d0d0691d95 | 4c639c521834f4349ba2165e72c5857ddecee625 | /acoustic_X_text_X_visual/AttComb_aXtXv/gender/attention_fusion_network/archived_models/archived_model_1_(MSE_best)/metrics.py | 9b7d3a09c1cd9ee948834703374dc115f06d923e | [] | no_license | arbaazQureshi/attention_based_multimodal_fusion_for_estimating_depression | f4ea86746d9961fe4b9cf4f88f6cec604a201656 | e4c57ac51c271c36c244c260b01a22fa1897ffcb | refs/heads/master | 2020-05-19T22:48:03.665953 | 2019-05-06T19:34:31 | 2019-05-06T19:34:31 | 185,252,875 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | import numpy as np
import sklearn.metrics
from load_data import load_development_data
from load_model import load_model
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
if __name__ == "__main__":
model = load_model()
model.load_weights('optimal_weights.h5')
dev_COVAREP_X_FORMANT, dev_facial_X_pose, dev_gaze_X_action, dev_transcript, dev_Y, dev_X_gender = load_development_data()
model.compile(loss = 'mse', optimizer = 'adam', metrics = ['mean_absolute_error'])
dev_Y_hat = model.predict([dev_COVAREP_X_FORMANT, dev_facial_X_pose, dev_gaze_X_action, dev_X_gender, dev_transcript])
dev_Y = np.array(dev_Y)
dev_Y_hat = dev_Y_hat.reshape((dev_Y.shape[0],))
RMSE = np.sqrt(sklearn.metrics.mean_squared_error(dev_Y, dev_Y_hat))
MAE = sklearn.metrics.mean_absolute_error(dev_Y, dev_Y_hat)
EVS = sklearn.metrics.explained_variance_score(dev_Y, dev_Y_hat)
print('RMSE :', RMSE)
print('MAE :', MAE)
#print(np.std(dev_Y - dev_Y_hat))
print('EVS :', EVS)
with open('regression_metrics.txt', 'w') as f:
f.write('RMSE\t:\t' + str(RMSE) + '\nMAE\t\t:\t' + str(MAE) + '\nEVS\t\t:\t' + str(EVS)) | [
"arbaaz.qureshi29@gmail.com"
] | arbaaz.qureshi29@gmail.com |
a2453d90db22aca756d619b74b525d6186f4875d | 699c7f26a91106a2fc79bb15299ce0cee532a2dd | /xrayspecprocessing/multi.combine.group.py | ff57d3b4c05ec312c219fc8fc8133076e2dafd82 | [] | no_license | samconnolly/astro | 70581a4d3f2086716aace3b5db65b74aaaa5df95 | 3731be313592c13dbb8af898e9734b98d83c0cc2 | refs/heads/master | 2020-04-06T03:40:27.454279 | 2014-03-12T14:36:34 | 2014-03-12T14:36:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | # multi.combine.group.py
# Uses the HEADAS tool addspec and grppha to sum sets of spectra, assign their
# summed background and response files and produce a grouped spectrum
# Uses a text file of input spectra. Does so from output file from listbinmin.py
# Sam Connolly 4/3/13
import os
# ====================== PARAMATERS ============================================
# file route - directory containing spectra and spectrum list
inroute = "/disks/raid/raid1/xray/raid/sdc1g08/NetData"\
+"/ngc1365/spectra/all/"
outroute = "/disks/raid/raid1/xray/raid/sdc1g08/NetData"\
+"/ngc1365/spectra/summed/"
# file names
fname = "speclist.txt"
outname = "13.14.25.summed"
# Grouping command (e.g. "group min 15" for min of 15 counts per bin,
# "group 25 150 4" to group channels 25-150 into groups of 4
# [Swift XRT has 1024 channels] )
groupcommand = 'group min 15'
# overwrite existing files?
overwrite = False
# ==============================================================================
# get current directory, to return to
originaldir = os.getcwd()
# change to directory of spectra
os.chdir(inroute)
#===============================================================================
# sum spectra
#===============================================================================
# creat sum command
sumcommand = "addspec " + fname + " " + outname + " qaddrmf = yes"\
+ " qsubback = yes" + " clobber = " + str(overwrite)
# add spectra
os.system(sumcommand)
#===============================================================================
# group spectra
#===============================================================================
# file names
spectrum = outname + ".pha"
back = outname + ".bak"
rmf = outname + ".rsp"
output = outname + "_grp.pha"
# overwriting or not
if overwrite == True:
over = '!'
else:
over = ''
# generate grppha command
gcommand = 'grppha ' + spectrum + ' ' + over + output + ' comm = "' + \
'chkey BACKFILE ' + back + \
' & chkey RESPFILE ' + rmf + \
' & ' + groupcommand + ' & exit"'
# execute command
os.system(gcommand)
# move files to output folder
movecommand = "mv " + spectrum + " " + outroute \
+ " & mv " + back + " " + outroute \
+ " & mv " + rmf + " " + outroute\
+ " & mv " + output + " " + outroute
os.system(movecommand)
#-------------------------------------------------------------------------------
# switch back to original directory
os.chdir(originaldir)
| [
"sdc1g08@soton.ac.uk"
] | sdc1g08@soton.ac.uk |
afe6fb6ea78861b1fe883e0bd6d315b4ab07ef6f | deeb2ecb908bff31d529d2a62f55402f31b6f6d4 | /lib/bracket/match.py | 571746ee1a95978cf691490ea01f4fc5c54d6582 | [] | no_license | kaahanmotwani/BracketOdds | 15c1cf47d6a63026f4ea0a16208bd93c8c916b77 | 9eba08a8eb92bab49e1432e6e322c54d13cfd48b | refs/heads/main | 2023-04-01T20:22:54.294503 | 2021-04-04T19:24:17 | 2021-04-04T19:24:17 | 354,626,772 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | #!/usr/bin/env python
import json, random
from .round import Rounds
from .team import Team
class Match():
'''
Defines one game between two teams.
Attributes
----------
t1 (Team) : Team name representing one team in the game.
t2 (Team) : Team name representing the other team in the game.
rnd (Rounds) : A Rounds enum value representing the round in which the match takes place.
alpha (float) : The alpha (actual or default) value for the pair of seeds and round.
prob (float) : The probability that team 1 wins according to the model.
winner (Team) : The team object that wins according to a simulation with the probability.
'''
def __init__(self, team1: Team, team2: Team, rnd: Rounds, alpha: float=None, winner: Team=None):
'''
Constructs a Match object.
Parameters
----------
team1 (Team) : the Team object for t1.
team2 (Team) : the Team object for t2.
rnd (Rounds) : the rnd enum value representing the current round.
alpha (float) : an alpha value for the matchup
'''
self.t1 = team1
self.t2 = team2
self.rnd = rnd
self.alpha = alpha
self.prob = self.win_prob() if alpha != None else None
self.winner = winner if winner else self.get_winner()
def win_prob(self) -> float:
'''
Returns the probability that team 1 wins.
'''
return (1.0 * self.t2.seed ** self.alpha) / (self.t1.seed ** self.alpha + self.t2.seed ** self.alpha)
def get_winner(self) -> Team:
'''
Returns the winning Team object.
'''
rand = random.random()
return self.t1 if rand < self.prob else self.t2
def bits(self) -> bool:
'''
Return 0 if Team 1 wins, else return 1.
'''
return 0 if self.winner == self.t1 else 1
def to_json(self) -> str:
'''
Returns a json string represenation of a Match which is json serializeable.
'''
return json.dumps({
't1': self.t1.to_json(),
't2': self.t2.to_json(),
'rnd': self.rnd.value,
'winner': self.winner.to_json()
}) | [
"noreply@github.com"
] | kaahanmotwani.noreply@github.com |
d6be39975503b9ee74494283f53f20958abe1ebb | ed4ac9c066c717e633cabd9c5e2468536fa62eae | /csv2coco.py | 55562a83e0e01fbcdbaba8a9eee312ef138ed098 | [] | no_license | riven314/Image-AffineSynthesis | 252c9fc1eacc919dfb210fba6b76712e4542e5c9 | d6253a3f18a0c149d261fb1b48ebcb64828bb8df | refs/heads/master | 2022-03-26T02:36:20.032469 | 2020-01-07T09:29:06 | 2020-01-07T09:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,466 | py | """
REFERENCE:
https://github.com/spytensor/prepare_detection_dataset/blob/master/csv2coco.py
"""
import os
import json
import numpy as np
import pandas as pd
import glob
import cv2
import os
import shutil
from IPython import embed
from sklearn.model_selection import train_test_split
np.random.seed(41)
# 0 is background
classname_to_id = {"c": 1}
class Csv2CoCo:
def __init__(self, image_dir, total_annos):
self.images = []
self.annotations = []
self.categories = []
self.img_id = 0
self.ann_id = 0
self.image_dir = image_dir
self.total_annos = total_annos
def save_coco_json(self, instance, save_path):
json.dump(instance, open(save_path, 'w'), ensure_ascii=False, indent=2)
# from txt to COCO format
def to_coco(self, keys):
self._init_categories()
for key in keys:
self.images.append(self._image(key))
shapes = self.total_annos[key]
for shape in shapes:
bboxi = []
for cor in shape[:-1]:
bboxi.append(int(cor))
label = shape[-1]
annotation = self._annotation(bboxi,label)
self.annotations.append(annotation)
self.ann_id += 1
self.img_id += 1
instance = {}
instance['info'] = 'spytensor created'
instance['license'] = ['license']
instance['images'] = self.images
instance['annotations'] = self.annotations
instance['categories'] = self.categories
return instance
def _init_categories(self):
for k, v in classname_to_id.items():
category = {}
category['id'] = v
category['name'] = k
self.categories.append(category)
# init COCO image dict
def _image(self, path):
image = {}
print(path)
img = cv2.imread(os.path.join(self.image_dir, path), cv2.IMREAD_GRAYSCALE)
image['height'] = img.shape[0]
image['width'] = img.shape[1]
image['id'] = self.img_id
image['file_name'] = path
return image
# init COCO annotation dict
def _annotation(self, shape,label):
# label = shape[-1]
points = shape[:4]
annotation = {}
annotation['id'] = self.ann_id
annotation['image_id'] = self.img_id
print('classname_to_id: {}'.format(classname_to_id))
print('label: {}'.format(label))
annotation['category_id'] = int(classname_to_id[label])
annotation['segmentation'] = self._get_seg(points)
annotation['bbox'] = self._get_box(points)
annotation['iscrowd'] = 0
annotation['area'] = self._get_area(points)
return annotation
# box format: [x1, y1, w, h]
def _get_box(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
return [min_x, min_y, max_x - min_x, max_y - min_y]
# calc area
def _get_area(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
assert max_x != min_x, '[ERROR] max_x cant be equal to min_x'
assert max_y != min_y, '[ERROR] max_y cant be equal to min_y'
return (max_x - min_x) * (max_y - min_y)
# segmentation
def _get_seg(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
h = max_y - min_y
w = max_x - min_x
a = []
a.append([min_x, min_y, min_x, min_y + 0.5*h, min_x, max_y,
min_x + 0.5 * w, max_y, max_x, max_y, max_x, max_y - 0.5 * h,
max_x, min_y, max_x - 0.5 * w, min_y])
return a
if __name__ == '__main__':
# preprocessing
import argparse
parser = argparse.ArgumentParser(description = 'convert csv-format dataset to VOC-format dataset')
parser.add_argument('--csv_base_dir', help = 'base dir to csv format data, the csv folder', type = str)
parser.add_argument('--coco_base_dir', help = 'base dir to COCO format data, this dir build coco_', type = str)
parser.add_argument('--test_fraction', help = 'how much portion for validation set (0.-1.)', type = float, default = 0.2)
args = parser.parse_args()
csv_base_dir = args.csv_base_dir
csv_file = os.path.join(csv_base_dir, 'labels.csv')
image_dir = os.path.join(csv_base_dir, 'images')
saved_coco_path = args.coco_base_dir
test_fraction = args.test_fraction
assert os.path.isdir(csv_base_dir), '[ERROR] csv data base dir not exist: {}'.format(csv_base_dir)
assert os.path.isdir(saved_coco_path), '[ERROR] COCO data base dir not exist: {}'.format(saved_coco_path)
# collect csv format .xml files
total_csv_annotations = {}
annotations = pd.read_csv(csv_file,header=None).values
for annotation in annotations:
key = annotation[0].split(os.sep)[-1]
value = np.array([annotation[1:]])
if key in total_csv_annotations.keys():
total_csv_annotations[key] = np.concatenate((total_csv_annotations[key],value),axis=0)
else:
total_csv_annotations[key] = value
# sort train, val split
inst_cnt_set = set([i[:5] for i in total_csv_annotations.keys()])
train_keys, val_keys = [], []
for inst_cnt in inst_cnt_set:
target_files = [i for i in total_csv_annotations.keys() if inst_cnt in i]
tmp_train_files, tmp_val_files = train_test_split(target_files,
test_size = test_fraction,
random_state = 42)
train_keys.extend(tmp_train_files)
val_keys.extend(tmp_val_files)
print("train_n:", len(train_keys), 'val_n:', len(val_keys))
# init COCO folder structure
if not os.path.exists(os.path.join(saved_coco_path, 'coco', 'annotations')):
os.makedirs(os.path.join(saved_coco_path, 'coco', 'annotations'))
if not os.path.exists(os.path.join(saved_coco_path, 'coco', 'images', 'train2017')):
os.makedirs(os.path.join(saved_coco_path, 'coco', 'images', 'train2017'))
if not os.path.exists(os.path.join(saved_coco_path, 'coco', 'images', 'val2017')):
os.makedirs(os.path.join(saved_coco_path, 'coco', 'images', 'val2017'))
# convert training data into COCO format
l2c_train = Csv2CoCo(image_dir = image_dir, total_annos = total_csv_annotations)
train_instance = l2c_train.to_coco(train_keys)
l2c_train.save_coco_json(train_instance, os.path.join(saved_coco_path, 'coco', 'annotations', 'instances_train2017.json'))
for file in train_keys:
shutil.copy(os.path.join(image_dir, file), os.path.join(saved_coco_path, 'coco', 'images', 'train2017'))
for file in val_keys:
shutil.copy(os.path.join(image_dir, file), os.path.join(saved_coco_path, 'coco', 'images', 'val2017'))
# convert val data into COCO format
l2c_val = Csv2CoCo(image_dir = image_dir, total_annos = total_csv_annotations)
val_instance = l2c_val.to_coco(val_keys)
l2c_val.save_coco_json(val_instance, os.path.join(saved_coco_path, 'coco', 'annotations', 'instances_val2017.json')) | [
"alexlauwh@gmail.com"
] | alexlauwh@gmail.com |
6081db8cf57edcbf12e218e1c892028efc17a9ee | 7bfa66eab25be71e8d7552e873904879fa86d3e4 | /WorkSpace/DL_STUDY/GradientDescent.py | 219df9a0ed7ece615c00ce82c36eb3eeff663acc | [] | no_license | imzkj/BigDataNote | 2393338cba0db1f32beb466c20d1253fff681605 | e1760e73af3b7581a07f3fe48ee438407cb52adc | refs/heads/master | 2022-12-24T12:05:58.238614 | 2020-11-14T11:00:23 | 2020-11-14T11:00:23 | 88,461,322 | 4 | 2 | null | 2022-12-16T11:39:09 | 2017-04-17T02:50:58 | Jupyter Notebook | UTF-8 | Python | false | false | 4,496 | py | #-*- coding: utf-8 -*-
# y=3*x1+4*x2
# BGD(Batch gradient descent)批量梯度下降法:每次迭代使用所有的样本
#用y = Θ1*x1 + Θ2*x2来拟合下面的输入和输出
#input1 1 2 5 4
#input2 4 5 1 2
#output 19 26 19 20
input_x = [[1,4], [2,5], [5,1], [4,2]] #输入
y = [19,26,19,20] #输出
theta = [1,1] #θ参数初始化
loss = 10 #loss先定义一个数,为了进入循环迭代
step_size = 0.01 #步长
eps =0.0001 #精度要求
max_iters = 10000 #最大迭代次数
error =0 #损失值
iter_count = 0 #当前迭代次数
err1=[0,0,0,0] #求Θ1梯度的中间变量1
err2=[0,0,0,0] #求Θ2梯度的中间变量2
while( loss > eps and iter_count < max_iters): #迭代条件
loss = 0
err1sum = 0
err2sum = 0
for i in range (4): #每次迭代所有的样本都进行训练
pred_y = theta[0]*input_x[i][0]+theta[1]*input_x[i][1] #预测值
err1[i]=(pred_y-y[i])*input_x[i][0]
err1sum=err1sum+err1[i]
err2[i]=(pred_y-y[i])*input_x[i][1]
err2sum=err2sum+err2[i]
theta[0] = theta[0] - step_size * err1sum/4 #对应5式
theta[1] = theta[1] - step_size * err2sum/4 #对应5式
for i in range (4):
pred_y = theta[0]*input_x[i][0]+theta[1]*input_x[i][1] #预测值
error = (1.0/(2*4))*(pred_y - y[i])**2 #损失值
loss = loss + error #总损失值
iter_count += 1
print ('BGD theta: ',theta )
print ('BGD final loss: ', loss)
print ('BGD iters: ', iter_count)
# SGD(Stochastic Gradient Descent)随机梯度下降法:每次迭代使用一组样本
import random
#用y = Θ1*x1 + Θ2*x2来拟合下面的输入和输出
#input1 1 2 5 4
#input2 4 5 1 2
#output 19 26 19 20
input_x = [[1,4], [2,5], [5,1], [4,2]] #输入
y = [19,26,19,20] #输出
theta = [1,1] #θ参数初始化
loss = 10 #loss先定义一个数,为了进入循环迭代
step_size = 0.01 #步长
eps =0.0001 #精度要求
max_iters = 10000 #最大迭代次数
error =0 #损失值
iter_count = 0 #当前迭代次数
while( loss > eps and iter_count < max_iters): #迭代条件
loss = 0
i = random.randint(0,3) #每次迭代在input_x中随机选取一组样本进行权重的更新
pred_y = theta[0]*input_x[i][0]+theta[1]*input_x[i][1] #预测值
theta[0] = theta[0] - step_size * (pred_y - y[i]) * input_x[i][0]
theta[1] = theta[1] - step_size * (pred_y - y[i]) * input_x[i][1]
for i in range (3):
pred_y = theta[0]*input_x[i][0]+theta[1]*input_x[i][1] #预测值
error = 0.5*(pred_y - y[i])**2
loss = loss + error
iter_count += 1
print ('SGD theta: ',theta )
print ('SGD final loss: ', loss)
print ('SGD iters: ', iter_count)
# MBGD(Mini-batch gradient descent)小批量梯度下降:每次迭代使用b组样本
import random
#用y = Θ1*x1 + Θ2*x2来拟合下面的输入和输出
#input1 1 2 5 4
#input2 4 5 1 2
#output 19 26 19 20
input_x = [[1,4], [2,5], [5,1], [4,2]] #输入
y = [19,26,19,20] #输出
theta = [1,1] #θ参数初始化
loss = 10 #loss先定义一个数,为了进入循环迭代
step_size = 0.01 #步长
eps =0.0001 #精度要求
max_iters = 10000 #最大迭代次数
error =0 #损失值
iter_count = 0 #当前迭代次数
while( loss > eps and iter_count < max_iters): #迭代条件
loss = 0
#这里每次批量选取的是2组样本进行更新,另一个点是随机点+1的相邻点
i = random.randint(0,3) #随机抽取一组样本
j = (i+1)%4 #抽取另一组样本,j=i+1
pred_y0 = theta[0]*input_x[i][0]+theta[1]*input_x[i][1] #预测值1
pred_y1 = theta[0]*input_x[j][0]+theta[1]*input_x[j][1] #预测值2
theta[0] = theta[0] - step_size * (1.0/2) * ((pred_y0 - y[i]) * input_x[i][0]+(pred_y1 - y[j]) * input_x[j][0]) #对应5式
theta[1] = theta[1] - step_size * (1.0/2) * ((pred_y0 - y[i]) * input_x[i][1]+(pred_y1 - y[j]) * input_x[j][1]) #对应5式
for i in range (3):
pred_y = theta[0]*input_x[i][0]+theta[1]*input_x[i][1] #总预测值
error = (1.0/(2*2))*(pred_y - y[i])**2 #损失值
loss = loss + error #总损失值
iter_count += 1
print ('MBGD theta: ',theta )
print ('MBGD final loss: ', loss)
print ('MBGD iters: ', iter_count) | [
"kejia_zheng@suishouji.com"
] | kejia_zheng@suishouji.com |
90f3a1579b03c885d357847d03aa880941393c76 | 8b3929c64c241a7c378a8c61370ab0446d4e9627 | /mysite/settings.py | 8fd3b3d637fe2b35a611cac53bade48699469348 | [] | no_license | nianzifan/zn2169_hw15 | 0de2e2370bfa1d1687fc58f19bfe993f370b2db3 | f620f29b9f374876161b0a9be235ac1ad1caef37 | refs/heads/master | 2020-09-13T12:42:12.315119 | 2019-11-19T20:45:04 | 2019-11-19T20:45:04 | 222,784,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l8utt-tawuy9unru1pr5-v7x14flsvef50$2_r7buqarxdk^1e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"zn2169@columbia.edu"
] | zn2169@columbia.edu |
0c625d404b4ddb129d061a2da51d7713e3882858 | ec765b7bfab4d064811545059f4adb91dc7925bb | /src/collective/hello/testing.py | 65edffc85b01fddd34758ab2d0b2429fcd027234 | [] | no_license | collective/collective.hello | e552bdc4384aa08105b1954b4a52938079ff18ce | 124dd900a63a40a6cdefea5118863ea5bbcb34e8 | refs/heads/master | 2023-06-26T15:39:12.434036 | 2015-12-15T14:15:24 | 2015-12-15T14:15:48 | 47,541,044 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | # -*- coding: utf-8 -*-
from plone.app.contenttypes.testing import PLONE_APP_CONTENTTYPES_FIXTURE
from plone.app.robotframework.testing import REMOTE_LIBRARY_BUNDLE_FIXTURE
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PloneSandboxLayer
from plone.testing import z2
from zope.configuration import xmlconfig
import collective.hello
class CollectiveHelloLayer(PloneSandboxLayer):
defaultBases = (PLONE_APP_CONTENTTYPES_FIXTURE,)
def setUpZope(self, app, configurationContext):
xmlconfig.file(
'configure.zcml',
collective.hello,
context=configurationContext
)
def setUpPloneSite(self, portal):
applyProfile(portal, 'collective.hello:default')
COLLECTIVE_HELLO_FIXTURE = CollectiveHelloLayer()
COLLECTIVE_HELLO_INTEGRATION_TESTING = IntegrationTesting(
bases=(COLLECTIVE_HELLO_FIXTURE,),
name='CollectiveHelloLayer:IntegrationTesting'
)
COLLECTIVE_HELLO_FUNCTIONAL_TESTING = FunctionalTesting(
bases=(COLLECTIVE_HELLO_FIXTURE,),
name='CollectiveHelloLayer:FunctionalTesting'
)
COLLECTIVE_HELLO_ACCEPTANCE_TESTING = FunctionalTesting(
bases=(
COLLECTIVE_HELLO_FIXTURE,
REMOTE_LIBRARY_BUNDLE_FIXTURE,
z2.ZSERVER_FIXTURE
),
name='CollectiveHelloLayer:AcceptanceTesting'
)
| [
"simon.previdente@free.fr"
] | simon.previdente@free.fr |
d3249edfbd3bfe038c605e6a6c80a59a783bba05 | 4bd5e9b67d98bfcc9611bd8b774c9ab9f4f4d446 | /Python基础笔记/19/代码/2.协程.py | b87a5c7b38c3ac5ebbe4f72a39d93ec54e0ed60b | [] | no_license | zhenguo96/test1 | fe21510aea7feb674e52fd7a86d4177666f841c5 | 0d8de7e73e7e635d26462a0bc53c773d999498be | refs/heads/master | 2020-05-03T13:09:53.592103 | 2019-04-06T07:08:47 | 2019-04-06T07:08:47 | 178,646,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # 协程
def sub():
print("开始")
x = yield
print("x = ",x)
y = yield x + 1
print("x = ", x, "y = ", y)
yield
x1 = sub()
next(x1)
print(x1.send(3))
x1.send(4)
| [
"1148421588@qq.com"
] | 1148421588@qq.com |
6fad72c47d5212ee3f01f4250924bd36d19c42dd | 9d59216e63ca62b8daa9033a3bab3fae390b3d07 | /solutions/python/shortest_word.py | 85d63cfeb9bfb1b65279cd40d6ae594140e98b08 | [
"MIT"
] | permissive | kgraves/codewars-kata | 6e3ffa0ba9717d3a812d15525ebf9a434fff499f | 47721fb0ec04fffdb43d41409a7b006a262d8d40 | refs/heads/master | 2021-01-18T17:58:55.232617 | 2017-05-25T06:02:08 | 2017-05-25T06:02:08 | 69,683,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | def find_short(s):
srted = sorted(s.split(' '), cmp=lambda x,y: len(x)-len(y))
return len(srted[0])
import unittest
class TestShortestWord(unittest.TestCase):
def test_shortest_word(self):
self.assertEquals(find_short("bitcoin take over the world maybe who knows perhaps"), 3)
self.assertEquals(find_short("turns out random test cases are easier than writing out basic ones"), 3)
self.assertEquals(find_short("lets talk about javascript the best language"), 3)
self.assertEquals(find_short("i want to travel the world writing code one day"), 1)
self.assertEquals(find_short("Lets all go on holiday somewhere very cold"), 2)
if __name__ == '__main__':
unittest.main()
| [
"kylgrvs@gmail.com"
] | kylgrvs@gmail.com |
50de13f4dab09e45adff88729a6fda2d7a4f4061 | 11207733bbb065123f14e42d5f2b12cbf2446317 | /prog/quast/quast_libs/fastaparser.py | 64f479b670ed6f56e8e94bcb2226424240d9294a | [] | no_license | evaldocosta/melc | 81a93bbe84cf8a55a5754931d903816d9d9210e2 | f9fd9f6994efe82cf1557f164cd8c85c43bd80a3 | refs/heads/master | 2022-03-17T17:24:42.571608 | 2022-02-27T22:45:42 | 2022-02-27T22:45:42 | 65,914,720 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,179 | py | ############################################################################
# Copyright (c) 2015-2018 Saint Petersburg State University
# Copyright (c) 2011-2015 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
from __future__ import with_statement
import os
import sys
import gzip
import zipfile
try:
from collections import OrderedDict
except ImportError:
from quast_libs.site_packages.ordered_dict import OrderedDict
try:
import bz2
except ImportError:
from quast_libs.site_packages import bz2
if sys.version_info[0] == 3:
import io
from quast_libs import qconfig
# There is a pyfasta package -- http://pypi.python.org/pypi/pyfasta/
# Use it!
from quast_libs.log import get_logger
logger = get_logger(qconfig.LOGGER_DEFAULT_NAME)
def _get_fasta_file_handler(fpath):
fasta_file = None
_, ext = os.path.splitext(fpath)
if not os.access(fpath, os.R_OK):
logger.error('Permission denied accessing ' + fpath, to_stderr=True, exit_with_code=1)
if ext in ['.gz', '.gzip']:
fasta_file = gzip.open(fpath, mode="rt")
elif ext in ['.bz2', '.bzip2']:
fasta_file = bz2.BZ2File(fpath, mode="r")
fasta_file = _read_compressed_file(fasta_file)
elif ext in ['.zip']:
try:
zfile = zipfile.ZipFile(fpath, mode="r")
except Exception:
exc_type, exc_value, _ = sys.exc_info()
logger.error('Can\'t open zip file: ' + str(exc_value), exit_with_code=1)
else:
names = zfile.namelist()
if len(names) == 0:
logger.error('Reading %s: zip archive is empty' % fpath, exit_with_code=1)
if len(names) > 1:
logger.warning('Zip archive must contain exactly one file. Using %s' % names[0])
try:
fasta_file = zfile.open(names[0])
fasta_file = _read_compressed_file(fasta_file)
except AttributeError:
logger.error('Use python 2.6 or newer to work with contigs directly in zip.', exit_with_code=20)
else:
try:
fasta_file = open(fpath)
except IOError:
exc_type, exc_value, _ = sys.exc_info()
logger.exception(exc_value, exit_code=1)
return fasta_file
def _read_compressed_file(compressed_file):
if sys.version_info[0] == 3:
return io.TextIOWrapper(io.BytesIO(compressed_file.read())) # return string instead of binary data
return compressed_file
def __get_entry_name(line):
"""
Extracts name from fasta entry line:
">chr1 length=100500; coverage=15;" ---> "chr1"
"""
return line[1:].split()[0]
def get_chr_lengths_from_fastafile(fpath):
"""
Takes filename of FASTA-file
Returns list of lengths of sequences in FASTA-file
"""
chr_lengths = OrderedDict()
l = 0
chr_name = None
fasta_file = _get_fasta_file_handler(fpath)
for raw_line in fasta_file:
if raw_line.find('\r') != -1:
lines = raw_line.split('\r')
else:
lines = [raw_line]
for line in lines:
if not line:
continue
if line[0] == '>':
if l: # not the first sequence in FASTA
chr_lengths[chr_name] = l
l = 0
chr_name = __get_entry_name(line)
else:
l += len(line.strip())
chr_lengths[chr_name] = l
fasta_file.close()
return chr_lengths
def get_genome_stats(fasta_fpath, skip_ns=False):
genome_size = 0
reference_chromosomes = {}
ns_by_chromosomes = {}
for name, seq in read_fasta(fasta_fpath):
chr_name = name.split()[0]
chr_len = len(seq)
genome_size += chr_len
ns_by_chromosomes[chr_name] = set(x + 1 for x, s in enumerate(seq) if s == 'N')
if skip_ns:
genome_size -= len(ns_by_chromosomes[chr_name])
reference_chromosomes[chr_name] = chr_len
return genome_size, reference_chromosomes, ns_by_chromosomes
def create_fai_file(fasta_fpath):
l = 0
total_offset = 0
chr_offset = 0
chr_name = None
fai_fpath = fasta_fpath + '.fai'
fai_fields = []
with open(fasta_fpath) as in_f:
for raw_line in in_f:
if raw_line.find('\r') != -1:
lines = raw_line.split('\r')
else:
lines = [raw_line]
for line in lines:
if not line:
continue
if line[0] == '>':
if l: # not the first sequence in FASTA
fai_fields.append([chr_name, l, total_offset, len(chr_line.strip()), len(chr_line)])
total_offset += chr_offset
l = 0
chr_offset = 0
chr_name = __get_entry_name(line)
total_offset += len(line)
else:
if not l:
chr_line = line
l += len(line.strip())
chr_offset += len(line)
fai_fields.append([chr_name, l, total_offset, len(chr_line.strip()), len(chr_line)])
with open(fai_fpath, 'w') as out_f:
for fields in fai_fields:
out_f.write('\t'.join([str(fs) for fs in fields]) + '\n')
def split_fasta(fpath, output_dirpath):
"""
Takes filename of FASTA-file and directory to output
Creates separate FASTA-files for each sequence in FASTA-file
Returns nothing
Oops, similar to: pyfasta split --header "%(seqid)s.fasta" original.fasta
"""
if not os.path.isdir(output_dirpath):
os.mkdir(output_dirpath)
outFile = None
for line in open(fpath):
if line[0] == '>':
if outFile:
outFile.close()
outFile = open(os.path.join(output_dirpath, __get_entry_name(line) + '.fa'), 'w')
if outFile:
outFile.write(line)
if outFile: # if filename is empty
outFile.close()
def read_fasta(fpath):
"""
Generator that returns FASTA entries in tuples (name, seq)
"""
first = True
seq = []
name = ''
fasta_file = _get_fasta_file_handler(fpath)
for raw_line in fasta_file:
lines = raw_line.split('\r')
for line in lines:
if not line:
continue
if line[0] == '>':
if not first:
yield name, "".join(seq)
first = False
name = __get_entry_name(line)
seq = []
else:
seq.append(line.strip())
if name or seq:
yield name, "".join(seq)
fasta_file.close()
def read_fasta_one_time(fpath):
"""
Returns list of FASTA entries (in tuples: name, seq)
"""
list_seq = []
for (name, seq) in read_fasta(fpath):
list_seq.append((name, seq))
return list_seq
def read_fasta_str(fpath):
"""
Returns string
"""
fasta_file = _get_fasta_file_handler(fpath)
list_seq = []
for raw_line in fasta_file:
lines = raw_line.split('\r')
for line in lines:
if not line:
continue
if line[0] != '>':
list_seq.append(line.strip())
fasta_file.close()
fasta_str = ''.join(list_seq)
return fasta_str
def print_fasta(fasta):
for name, seq in fasta:
print('>%s' % name)
for i in range(0, len(seq), 60):
print(seq[i:i + 60])
def write_fasta(fpath, fasta, mode='w'):
outfile = open(fpath, mode)
for name, seq in fasta:
outfile.write('>%s\n' % name)
for i in range(0, len(seq), 60):
outfile.write(seq[i:i + 60] + '\n')
outfile.close()
def comp(letter):
return {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}[letter.upper()]
def rev_comp(seq):
c = dict(zip('ATCGNatcgn', 'TAGCNtagcn'))
return ''.join(c.get(nucleotide, '') for nucleotide in reversed(seq))
| [
"evaldodacosta@gmail.com"
] | evaldodacosta@gmail.com |
2e6c96eebb6bfd7df53fac17a2a7426d3b7e2769 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /monai/metrics/regression.py | 044f99f1a540fd04348675877a6d73fce7eb1cd9 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 9,758 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from abc import abstractmethod
from functools import partial
from typing import Any, Union
import torch
from monai.metrics.utils import do_metric_reduction
from monai.utils import MetricReduction
from .metric import CumulativeIterationMetric
class RegressionMetric(CumulativeIterationMetric):
"""
Base class for regression metrics.
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
`y_preds` and `y` can be a list of channel-first Tensor (CHW[D]) or a batch-first Tensor (BCHW[D]).
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
Here `not_nans` count the number of not nans for the metric, thus its shape equals to the shape of the metric.
"""
def __init__(
self,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__()
self.reduction = reduction
self.get_not_nans = get_not_nans
def aggregate(self): # type: ignore
data = self.get_buffer()
if not isinstance(data, torch.Tensor):
raise ValueError("the data to aggregate must be PyTorch Tensor.")
f, not_nans = do_metric_reduction(data, self.reduction)
return (f, not_nans) if self.get_not_nans else f
def _check_shape(self, y_pred: torch.Tensor, y: torch.Tensor) -> None:
if y_pred.shape != y.shape:
raise ValueError(
"y_pred and y shapes dont match, received y_pred: [{}] and y: [{}]".format(y_pred.shape, y.shape)
)
# also check if there is atleast one non-batch dimension i.e. num_dims >= 2
if len(y_pred.shape) < 2:
raise ValueError("either channel or spatial dimensions required, found only batch dimension")
@abstractmethod
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor): # type: ignore
if not isinstance(y_pred, torch.Tensor) or not isinstance(y, torch.Tensor):
raise ValueError("y_pred and y must be PyTorch Tensor.")
self._check_shape(y_pred, y)
return self._compute_metric(y_pred, y)
class MSEMetric(RegressionMetric):
r"""Compute Mean Squared Error between two tensors using function:
.. math::
\operatorname {MSE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i} \right)^{2}.
More info: https://en.wikipedia.org/wiki/Mean_squared_error
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
"""
def __init__(
self,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
return mse_out
class MAEMetric(RegressionMetric):
r"""Compute Mean Absolute Error between two tensors using function:
.. math::
\operatorname {MAE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left|y_i-\hat{y_i}\right|.
More info: https://en.wikipedia.org/wiki/Mean_absolute_error
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
"""
def __init__(
self,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
self.abs_func = torch.abs
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mae_out = compute_mean_error_metrics(y_pred, y, func=self.abs_func)
return mae_out
class RMSEMetric(RegressionMetric):
r"""Compute Root Mean Squared Error between two tensors using function:
.. math::
\operatorname {RMSE}\left(Y, \hat{Y}\right) ={ \sqrt{ \frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i}\right)^2 } } \
= \sqrt {\operatorname{MSE}\left(Y, \hat{Y}\right)}.
More info: https://en.wikipedia.org/wiki/Root-mean-square_deviation
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
"""
def __init__(
self,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
rmse_out = torch.sqrt(mse_out)
return rmse_out
class PSNRMetric(RegressionMetric):
r"""Compute Peak Signal To Noise Ratio between two tensors using function:
.. math::
\operatorname{PSNR}\left(Y, \hat{Y}\right) = 20 \cdot \log_{10} \left({\mathit{MAX}}_Y\right) \
-10 \cdot \log_{10}\left(\operatorname{MSE\left(Y, \hat{Y}\right)}\right)
More info: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Help taken from:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py line 4139
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
max_val: The dynamic range of the images/volumes (i.e., the difference between the
maximum and the minimum allowed values e.g. 255 for a uint8 image).
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
"""
def __init__(
self,
max_val: Union[int, float],
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
self.max_val = max_val
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> Any:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
psnr_val = 20 * math.log10(self.max_val) - 10 * torch.log10(mse_out)
return psnr_val
def compute_mean_error_metrics(y_pred: torch.Tensor, y: torch.Tensor, func) -> torch.Tensor:
# reducing in only channel + spatial dimensions (not batch)
# reducion of batch handled inside __call__() using do_metric_reduction() in respective calling class
flt = partial(torch.flatten, start_dim=1)
error_metric = torch.mean(flt(func(y - y_pred)), dim=-1, keepdim=True)
return error_metric
| [
"noreply@github.com"
] | gagandaroach.noreply@github.com |
29d0e0ec5b2fe97cccdec9c22eb438321a537b2f | 68405fe5bec0b374867f44effda2cba3b6c1ebaa | /src/wscript | f00c671bc10cc29f13fb109cbb091ca449571257 | [] | no_license | unnonouno/oxelon | fce3dfd3d6d617d0268e34ed875e152989d60859 | 3686863b81db2dc23996cf305001e2ad56332086 | refs/heads/master | 2020-04-04T01:53:12.896018 | 2014-01-23T17:08:59 | 2014-01-23T17:08:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | def build(bld):
bld.recurse(['oxelon', 'cmd'])
| [
"unnonouno@gmail.com"
] | unnonouno@gmail.com | |
30d43569f10434bcbbcdb184ca740ab472a7f4f7 | 79e04c2bdf8ad0d44770fa7266c8d8a3b8f6679a | /server/search.py | 7949bffe550f405eab2e3e859618f890562a6f2b | [] | no_license | AmitProspeed/Songle | 313374082827fffd2b93490672440a6522e7fa4f | bdedcff371bc5dc08cee46bd43ca0951534ab61f | refs/heads/main | 2023-05-04T01:36:56.126669 | 2021-05-19T18:23:38 | 2021-05-19T18:23:38 | 368,962,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,239 | py | #assuming database is filled, and somebody is searching for a song by lyrics
#query database with the lyrics the user input, based on search algorithm
#return song names that match
import re
import regex
import gc
import sys
import copy
import nltk
import math
import bson
import pickle
import os.path
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
from collections import defaultdict
from array import array
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from functools import reduce
from pymongo import MongoClient
from os import path
class Search:
def __init__(self):
self.index = defaultdict(list) #the inverted index {'word': [[1,[0,2]],[2,[3,4]],[3,[2,7]]]}
self.stop_words = set(stopwords.words('english')) #stopwords set (and, the, a, an etc)
self.lemmatizer = WordNetLemmatizer() #initiating the lemmatizer. Eg:- car/cars will be reduced to car
self.client = MongoClient("mongodb+srv://testUser:bigData@cluster0.xn34j.mongodb.net")
self.db = self.client["MusicDataset"]
self.songIndex = self.db["index"]
self.songInfo = self.db["songInfo"]
self.numSongs = 0
self.tf=defaultdict(list) #term frequencies of terms in documents
self.df=defaultdict(int) #document frequencies of terms in the corpus
self.idf=defaultdict(float) #inverse document frequencies of terms in the corpus
#----------------------------------------------------------------------------------------------------------------------
#Creating Inverted index code starts ----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------
def parseSongsFromDB(self):
''' returns the id, title, lyrics etc of the entire collection '''
#collFile=open("testCollection.dat",'r', encoding="utf8") #replace with mongoDB call to fetch all songs
#skip call if index already exists in DB
print("Reading songs from MongoDB")
songs=[]
for song in self.songInfo.find():
d={}
d['id']=song['_id']
d['title']=song['title']
d['lyrics']=song['lyrics']
d['artist']=song['artist']
d['album']=song['album']
songs.append(d)
return songs #[{'id':1, 'title':'What I've Done', 'lyrics':'....', etc.},{},..]
def getWords(self, data):
'''data preprocessing-convert to lower case, remove non-alphanumeric chars, remove stopwords, lemmatize'''
data = data.lower()
data = re.sub(r'[^a-z0-9 ]',' ',data) #put spaces instead of non-alphanumeric characters
data_tokens = word_tokenize(data) # splitting given data in individual words
data = [w for w in data_tokens if not w in self.stop_words] # filtering out stopwords
data = [self.lemmatizer.lemmatize(w) for w in data] #lemmatization
return data
def writeIndexToDB(self):
'''write the inverted index to the MongoDB'''
f=open("indexFile.txt", 'w')
print(self.numSongs,file=f)
self.numSongs=float(self.numSongs)
for term in self.index.keys():
postinglist=[]
for p in self.index[term]:
docID=p[0]
positions=p[1]
postinglist.append(':'.join([str(docID) ,','.join(map(str,positions))]))
#print data
postingData=';'.join(postinglist)
tfData=','.join(map(str,self.tf[term]))
idfData='%.4f' % (self.numSongs/self.df[term])
print('|'.join((term, postingData, tfData, idfData)),file=f)
f.close()
#self.songIndex.insert(self.index)
#self.songIndex.remove( { } )
#for term in self.index.keys():
# item = {}
# item[term] = bson.Binary(pickle.dumps(self.index[term], protocol=2))
# self.songIndex.insert_one(item)
def readIndex(self):
#read main index
f=open("indexFile.txt", 'r');
#first read the number of documents
self.numSongs=int(f.readline().rstrip())
for line in f:
line=line.rstrip()
term, postings, tf, idf = line.split('|') #term='termID', postings='docID1:pos1,pos2;docID2:pos1,pos2'
postings=postings.split(';') #postings=['docId1:pos1,pos2','docID2:pos1,pos2']
postings=[x.split(':') for x in postings] #postings=[['docId1', 'pos1,pos2'], ['docID2', 'pos1,pos2']]
postings=[ [str(x[0]), map(int, x[1].split(','))] for x in postings ] #final postings list
self.index[term]=postings
#read term frequencies
tf=tf.split(',')
self.tf[term]=list(map(float, tf))
#read inverse document frequency
self.idf[term]=float(idf)
f.close()
def createIndex(self):
'''Creates the inverted index - mapping from words(of song data) to song element. Given a search query (single/multi word),
it will return the song entries associated with that query'''
if path.exists("indexFile.txt"):
print("Index exists, reading existing index.")
self.readIndex()
else:
print("Index doesn't exist, creating it.")
songDictArray = self.parseSongsFromDB() #mongoDB API call to load all song entries in the dictionary
for songDict in songDictArray:
#garbage collection
gc.disable()
if songDict != {}:
songId = str(songDict['id'])
songData ='\n'.join((songDict['title'],songDict['lyrics'],songDict['artist'],songDict['album']))
songTokenize = self.getWords(songData)
self.numSongs += 1
#build index for the current song element
songItem = {}
for position, word in enumerate(songTokenize):
try:
songItem[word][1].append(position)
except:
songItem[word]=[songId, array('I',[position])] #{"word":[id, [pos list]]}
#normalize the document vector
norm=0
for word, index in songItem.items():
norm+=len(index[1])**2 #length of pos list squared for every word
norm=math.sqrt(norm)
#calculate the tf and df weights
for word, index in songItem.items():
self.tf[word].append(round((len(index[1])/norm),4)) #list of tfs for every word (in dfs where it occurs)
self.df[word]+=1
#merge the current song item to the main index
for word, index in songItem.items():
self.index[word].append(index)
gc.enable()
#storing idf data
for term in self.index.keys():
self.idf[term] = round((self.numSongs/self.df[term]),4) #idf calculated for every word in index
#store the index back to mongo db
self.writeIndexToDB()
print("Index saved to Disk")
#----------------------------------------------------------------------------------------------------------------------
#Querying Inverted index code starts ----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------
def dotProduct(self, vec1, vec2):
if len(vec1)!=len(vec2):
return 0
return sum([ x*y for x,y in zip(vec1,vec2) ])
def getPostings(self, terms):
#all terms in the list are guaranteed to be in the index
return [ self.index[term] for term in terms ]
def getSongsFromPostings(self, postings):
#no empty list in postings
return [ [x[0] for x in p] for p in postings ]
def intersectLists(self,lists):
if len(lists)==0:
return []
#start intersecting from the smaller list
lists.sort(key=len)
return list(reduce(lambda x,y: set(x)&set(y),lists))
def rankSongResults(self, terms, songIds):
#term at a time evaluation
songVectors=defaultdict(lambda: [0]*len(terms)) #{"songId":[list of the length of no. of query terms]}
# - dict length is total matching songIds
queryVector=[0]*len(terms) #list of the length of no. of query terms
for termIndex, term in enumerate(terms):
if term not in self.index:
continue
queryVector[termIndex]=self.idf[term]
for songIndex, (songId, postings) in enumerate(self.index[term]):
if songId in songIds:
songVectors[songId][termIndex]=self.tf[term][songIndex]
#calculate the score of each song
#queryVectorDim - 1 x terms (idf values)
#songvectorDim - N_SongIds x terms (tf values for each songId)
songScores=[ [self.dotProduct(curSongVec, queryVector), songId] for songId, curSongVec in songVectors.items() ]
songScores.sort(reverse=True)
rankedSongResults=[x[1] for x in songScores]
return rankedSongResults #returning songIds in descending order of ranking scores
def owq(self,q):
'''One Word Query'''
originalQuery=q
q=self.getWords(q)
if len(q)==0:
print('Invalid query')
return []
elif len(q)>1:
return self.ftq(originalQuery)
#q contains only 1 term
term=q[0]
if term not in self.index:
print('Query returned no match')
return []
else:
p=self.index[term]
p=[x[0] for x in p]
return self.rankSongResults(q, p)
def ftq(self,q):
"""Free Text Query"""
q=self.getWords(q)
if len(q)==0:
print('Invalid query')
return []
li=set()
for term in q:
try:
p=self.index[term]
p=[x[0] for x in p]
li=li|set(p)
except:
#term not in index
pass
if not li:
print('Query returned no match')
return []
li=list(li)
#li.sort()
return self.rankSongResults(q, li)
def pq(self,q):
'''Phrase Query'''
originalQuery=q
q=self.getWords(q)
if len(q)==0:
print('Invalid query')
return []
elif len(q)==1:
return self.owq(originalQuery)
phraseSongResults=self.pqSongs(q)
return self.rankSongResults(q, phraseSongResults)
def pqSongs(self, q):
""" here q is not the query, it is the list of terms """
#first find matching docs
for term in q:
if term not in self.index:
#if any one term doesn't appear in the index, then unsuccessful match
print('Query returned no match')
return []
postings=self.getPostings(q) #all the terms in q are in the index
songs=self.getSongsFromPostings(postings)
#songs are the song items that contain every term in the query
songs=self.intersectLists(songs)
#postings are the postings list of the terms in the documents docs only
for i in range(len(postings)):
postings[i]=[x for x in postings[i] if x[0] in songs]
#check whether the term ordering in the docs is like in the phrase query
#subtract i from the ith terms location in the docs
postings=copy.deepcopy(postings) #this is important since we are going to modify the postings list
for i in range(len(postings)):
for j in range(len(postings[i])):
postings[i][j][1]=[x-i for x in postings[i][j][1]]
#intersect the locations
result=[]
for i in range(len(postings[0])):
li=self.intersectLists( [x[i][1] for x in postings] )
if li==[]:
continue
else:
result.append(postings[0][i][0]) #append the docid to the result
return result
def queryType(self,q):
if '"' in q:
return 'PQ' #Phrased Query - "What I have done" (match all query words in order)
elif len(q.split()) > 1:
return 'FTQ' #Free Test Query - What done (match any of the query words)
else:
return 'OWQ' #One word Query - What (single word match)
def queryIndex(self,query):
qt=self.queryType(query)
if qt=='OWQ':
return self.owq(query)
elif qt=='FTQ':
return self.ftq(query)
elif qt=='PQ':
return self.pq(query)
return
if __name__=="__main__":
#http://www.ardendertat.com/2011/05/30/how-to-implement-a-search-engine-part-1-create-index/
#http://www.ardendertat.com/2011/05/31/how-to-implement-a-search-engine-part-2-query-index/
#http://www.ardendertat.com/2011/07/17/how-to-implement-a-search-engine-part-3-ranking-tf-idf/
print("Loading Index...Please wait")
obj=Search()
obj.createIndex()
print("Index loaded successfully")
while True:
print("Enter search query")
q=sys.stdin.readline()
if q=='':
break
results = obj.queryIndex(q)
if results:
#display results
print(results)
| [
"amitbaranroy@gmail.com"
] | amitbaranroy@gmail.com |
3745e86fb8119a35d2f2f9caf1792e4035e1ef5a | 1ea184317bc7f54850a54853a0815a8947ea227d | /api/migrations/0001_initial.py | 9dc8fb8ecb8532e626a9450ee6a80e90f6be4025 | [] | no_license | loskuta42/api_yamdb | f94d2e8bba6a941e3b0d79a87bb20f67bb7b4252 | 685013943190697706289523b35925a165d8f06f | refs/heads/master | 2023-07-16T20:49:59.920392 | 2021-08-10T12:56:03 | 2021-08-10T12:56:03 | 372,467,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,821 | py | # Generated by Django 3.0.5 on 2021-06-05 13:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Категория')),
('slug', models.SlugField(max_length=30, unique=True, verbose_name='Ссылка')),
],
options={
'verbose_name': 'Категория',
'verbose_name_plural': 'Категории',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Дата публикации комментария')),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(max_length=30, unique=True)),
],
options={
'verbose_name': 'Жанр',
'verbose_name_plural': 'Жанры',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Дата публикации отзыва')),
('score', models.IntegerField()),
('rating', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Title',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Произведение')),
('year', models.IntegerField(blank=True, null=True, verbose_name='Год выпуска')),
('description', models.TextField()),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='titles', to='api.Category', verbose_name='Категория')),
],
options={
'verbose_name': 'Произведение',
'verbose_name_plural': 'Произведения',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='TitleGenre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Genre')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Title')),
],
),
migrations.AddField(
model_name='title',
name='genre',
field=models.ManyToManyField(through='api.TitleGenre', to='api.Genre'),
),
]
| [
"loskuta42@yandex.ru"
] | loskuta42@yandex.ru |
09a1c2eadde05478740f790fe20f7e55020adb08 | dfa77374ba7f3f8bf7965e646527dedc622085d7 | /gcastle/castle/algorithms/gradient/pnl/torch/utils.py | 7a978d2524e188ff65788d77e735f87e62c67e54 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/trustworthyAI | 7aa72721df59c3c4f75b7b9a037c5b71fb1284a1 | 238cbc41865ddf629bb6ae92c2e1445be27f98b8 | refs/heads/master | 2023-08-28T13:36:10.929075 | 2023-08-15T12:51:46 | 2023-08-15T12:51:46 | 248,501,097 | 832 | 206 | Apache-2.0 | 2023-08-15T12:51:47 | 2020-03-19T12:46:47 | Python | UTF-8 | Python | false | false | 2,031 | py | # coding=utf-8
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import Dataset, DataLoader
class SampleDataset(Dataset):
"""
construct class for DataLoader
Parameters
----------
data: sequential array
if data contains more than one samples set,
the number of samples in all data must be equal.
"""
def __init__(self, *data):
super(SampleDataset, self).__init__()
if len(set([x.shape[0] for x in data])) != 1:
raise ValueError("The number of samples in all data must be equal.")
self.data = data
self.n_samples = data[0].shape[0]
def __len__(self):
return self.n_samples
def __getitem__(self, index):
return [d[index] for d in self.data]
def batch_loader(*x, batch_size=64, **kwargs):
dataset = SampleDataset(*x)
loader = DataLoader(dataset, batch_size=batch_size, **kwargs)
return loader
def compute_jacobian(func, inputs):
"""
Function that computes the Jacobian of a given function.
See Also
--------
torch.autograd.functional.jacobian
"""
return torch.autograd.functional.jacobian(func, inputs, create_graph=True)
def compute_entropy(x):
"""Computation information entropy of x"""
distr = torch.distributions.Normal(loc=torch.mean(x),
scale=torch.std(x))
entropy = distr.entropy()
return entropy
| [
"zhangkeli1@huawei.com"
] | zhangkeli1@huawei.com |
69d188c6f93746880df493c31c31a89e158a418c | e4d9d73b13355abdc6f0832b213d17668a5f3e83 | /Restaurant.py | 1dbcf0bc0498073f71a36cf7313bb6952e1730c2 | [] | no_license | liuhpleon1/MenuApp | 1a8f49ff5fc68ff5aa05717721104c1927b637df | 2af5ebf85654b0c9f6d462dc6c55321b9cdf6b55 | refs/heads/master | 2021-06-10T08:24:19.073434 | 2017-01-26T10:34:53 | 2017-01-26T10:34:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | from flask import Flask, render_template, request, url_for, redirect, flash, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from DB_setup import base, Restaurant, Menu
engine = create_engine('sqlite:///restaurant_menu.db')
base.metadata.bind = engine
DBsession = sessionmaker(bind = engine)
session = DBsession()
app = Flask(__name__)
@app.route('/')
@app.route('/restaurants/')
def restaurants():
restaurants = session.query(Restaurant).all();
return render_template('restaurant.html',restaurants = restaurants)
@app.route('/restaurants/<int:restaurant_id>/')
def restaurant_Menu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(Menu).filter_by(restaurant_id=restaurant_id).all()
return render_template('menu.html',restaurant = restaurant,items = items)
'''
output = ""
output+=restaurant.name+"<br>"
output+="<br>"
for item in items:
output+=item.name+"<br>"
output+=item.price+"<br>"
output+=item.description+"<br>"
return output
'''
@app.route('/restaurant/<int:restaurant_id>/addItem/',methods=['GET','POST'])
def addItem(restaurant_id):
if request.method == 'POST':
newItem = Menu(name = request.form['name'],
description = request.form['description'],
course = request.form['course'],
price = request.form['price'],
restaurant_id = restaurant_id)
session.add(newItem)
session.commit()
flash("item created")
return redirect(url_for('restaurant_Menu',restaurant_id=restaurant_id))
else:
return render_template('add.html',restaurant_id=restaurant_id)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/edit/',methods=['GET','POST'])
def edit(restaurant_id,menu_id):
item = session.query(Menu).filter_by(id=menu_id).one()
if request.method == 'POST':
item.name = request.form['name']
item.description = request.form['description']
item.price = request.form['price']
item.course = request.form['course']
session.add(item)
session.commit()
flash("item edited")
return redirect(url_for('restaurant_Menu',restaurant_id=restaurant_id))
else:
return render_template('edit.html',restaurant_id = restaurant_id,menu_id=item.id,item = item)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/delete/',methods=['GET','POST'])
def delete(restaurant_id,menu_id):
name = session.query(Menu).filter_by(id=menu_id).one()
if request.method == 'POST':
session.delete(name)
flash("item deleted")
return redirect(url_for('restaurant_Menu',restaurant_id=restaurant_id))
else:
return render_template('delete.html',restaurant_id=restaurant_id,menu_id=name.id,name = name)
@app.route('/restaurants/<int:restaurant_id>/menu/JSON/')
def transferToJSON(restaurant_id):
items = session.query(Menu).filter_by(restaurant_id=restaurant_id).all()
return jsonify(Menus = [i.transfer for i in items])
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run('0.0.0.0',port = 5000)
| [
"liuhpleon@gmail.com"
] | liuhpleon@gmail.com |
5b4e3462306a648b157f6340293fdfc26fdf4b03 | d2e128aa8c13014b01f3a3add200c0a577a87b20 | /newspaper_project/settings.py | afa2abe90a16bd37585af6040a2056d8ed40bc8c | [] | no_license | wmbusse/news | 4e3cfe8bcba6f30c18e967af3ebd093384d1845b | 7e52517d12e5ce1890d462ff39b79729e408590a | refs/heads/master | 2023-08-01T06:37:50.224820 | 2020-07-27T18:09:43 | 2020-07-27T18:09:43 | 282,973,767 | 0 | 0 | null | 2021-09-22T19:38:40 | 2020-07-27T17:51:27 | Python | UTF-8 | Python | false | false | 4,039 | py | """
Django settings for newspaper_project project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r+w#wpv+=r4z%+z6ku9uzuj7*(*8c7wtjw9#1i_l7(t1)bwp50'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
# My Apps
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
'articles.apps.ArticlesConfig',
# 3rd party apps
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'newspaper_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'newspaper_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS =[os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE ='whitenoise.storage.CompressionManifestStaticFilesStorage'
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
DEFAULT_FROM_EMAIL = 'admin@3ktechdesigns.us'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = 'apikey'
EMAIL_HOST_PASSWORD = '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'
EMAIL_PORT = '587'
EMAIL_USE_TLS = True
| [
"wmbusse@gmail.com"
] | wmbusse@gmail.com |
f04c84b7d356a6ac5d58c10a9bbba43264002465 | d399440c95e932776dad729797df541c841832f4 | /accounts/migrations/0005_auto_20210103_1313.py | 28f6c02962235c087f210dc534392088339e058c | [] | no_license | Shanan93/Build_Django_Vezzeta_Site | acb0e0dece5a700c3777e2deb8bac782cdc81384 | 36260a90ea7822a1a33154548cd2e68d7f9812a0 | refs/heads/master | 2023-02-12T00:34:14.363582 | 2021-01-06T13:36:02 | 2021-01-06T13:36:02 | 326,609,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | # Generated by Django 2.2.7 on 2021-01-03 11:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20210103_1156'),
]
operations = [
migrations.AddField(
model_name='profile',
name='adress',
field=models.CharField(default=1, max_length=50, verbose_name='المحافظة: '),
preserve_default=False,
),
migrations.AddField(
model_name='profile',
name='adress_details',
field=models.CharField(default=1, max_length=50, verbose_name='العنوان بالتفصيل: '),
preserve_default=False,
),
migrations.AddField(
model_name='profile',
name='doctor',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='دكتور ؟'),
),
migrations.AddField(
model_name='profile',
name='doctor_specialist',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='متخصص في؟'),
),
migrations.AddField(
model_name='profile',
name='phone_number',
field=models.CharField(default=1, max_length=50, verbose_name='الهاتف'),
preserve_default=False,
),
migrations.AddField(
model_name='profile',
name='subtitle',
field=models.CharField(default=1, max_length=50, verbose_name='نبذه عنك: '),
preserve_default=False,
),
migrations.AddField(
model_name='profile',
name='waiting_time',
field=models.IntegerField(blank=True, null=True, verbose_name='مدة الانتظار '),
),
migrations.AddField(
model_name='profile',
name='working_hours',
field=models.CharField(default=1, max_length=50, verbose_name='عدد ساعات العمل: '),
preserve_default=False,
),
]
| [
"Mohamed.shanan48@gmail.com"
] | Mohamed.shanan48@gmail.com |
b1e654c2984d2ebd3ab95a77fda83557950cc4a3 | ed51198afe77d2000806665891f53c9ed1217c41 | /pythonfile.py | 35da26c0dbe1d00075c7b570891193f8ca741d96 | [] | no_license | mrubakh/Statistical-Regression-and-Python-Web-Scraping-Colgate-Enrollment-Numbers | 46c27a7361c0b744b679a7813119453f1c71e64e | 6f283598d20be043c17a0b415922fed5b695bc3b | refs/heads/master | 2020-05-30T10:46:19.876338 | 2019-07-03T15:59:14 | 2019-07-03T15:59:14 | 189,680,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py | import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import csv
dept_list = [] #This will contain all the department prefixes
term_list = ['201802','201801','201702','201701','201602','201601'] #Hard-coded for simplicity; we want a few years of data for comparison
##url = "http://www.colgate.edu/academics/courseofferings/results?term=201801&core=&distribution=&credits=&status=&dept=&before=22%3a30&after=07%3a00&level=&meets=M%3bT%3bW%3bR%3bF%3b&instructor=&firstYear="
url = "http://www.colgate.edu/academics/courseofferings"
outfile = open('C:\\Users\\vigor\\OneDrive\\Documents\\Sophomore Year\\data','a') #creates an output file
cf = csv.writer(outfile, lineterminator='\n')
response = requests.get(url) #retrieve the url
soup = BeautifulSoup(response.text, "html.parser") #create an instance of the BeautifulSoup object in order to parse the html code
thisTable = soup.find('table') #find the "table" will all of our deparements
cboxes = thisTable.findAll('input') #find all the checkboxes of the departments
for c in cboxes: # iterate through every department and get its "value"
d = c.get('value')
if d !=None:
dept_list.append(d)
print(dept_list)
su1="http://www.colgate.edu/academics/courseofferings/results?term="
su2="&core=&distribution=&credits=&status=&dept="
su3="&before=22:30&after=07:00&level=&meets=M;T;W;R;F;&instructor=&firstYear="
cf.writerow(["Term","DeptCode","Class Name","Actual Enrollment", "Maximum Enrollment"]) #create the header for our output file
for term in term_list:
for dept in dept_list:
urlt = su1+term+su2+dept+su3
thisPage = requests.get(urlt)
pageSoup = BeautifulSoup(thisPage.text, "html.parser")
for link in pageSoup.findAll('a'):
if "/academics/coursedetails" in link.get("href"):
thisClassLink = link.get("href")
new_url = "http://www.colgate.edu" + thisClassLink
response = requests.get(new_url)
soup = BeautifulSoup(response.text, "html.parser")
class_name= soup.find(id="lbCourseDetailHeading").string #find where the course name is located
class_name= class_name[:class_name.find("\xa0")] + " " + class_name[class_name.find("\xa0")+3:] #use string splicing to get rid of some wacky symbols
find_table = soup.findAll("div", {"class": "coursePad"})[1].table.find('table').div.findAll('table')[1] #locate where the enrollment numbers are
total_row = len(find_table.findAll('tr'))-1 #find the index of the "total" row where our desired numbers are
find_table_precise = find_table.findAll("tr")[total_row] #go to this index
actual_enroll = find_table_precise.findAll('td')[3].string #obtain the actual enrollment
max_enroll = find_table_precise.findAll('td')[2].string #obtain the max enrollment
cf.writerow([term, dept, class_name, actual_enroll, max_enroll])
| [
"noreply@github.com"
] | mrubakh.noreply@github.com |
bf35281edad5c88f696d993ba8f8d3de915998fa | 20fa612597a0cbc920d400864e09d54d6847aa29 | /data_visualizer.py | b0aae8a02f45745609f39175140e8f4b6947b05f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | inspiros/pcmvda | 87670fbcec04e39385e5dc5ba2c4172b5841bb5c | a05c0d5a297983675550f029495ff96073506161 | refs/heads/master | 2023-06-07T20:40:52.095547 | 2023-05-27T10:35:15 | 2023-05-27T10:35:15 | 307,844,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,629 | py | import os
from typing import Optional, Union, Sequence, Tuple
import matplotlib.colors as colors
import matplotlib.gridspec as gridspec
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import seaborn
from matplotlib.axes import Axes
from matplotlib.cm import get_cmap
from mpl_toolkits.mplot3d import Axes3D
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
plt.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
# 'text.usetex': True,
'pgf.rcfonts': False,
})
def _cycle(iterable, index, default=None):
if len(iterable) == 1 and default is not None:
return default
return iterable[index % len(iterable)]
def _tensor_depth(tensor):
if hasattr(tensor, 'shape'):
return len(tensor.shape)
elif hasattr(tensor, '__getitem__') and len(tensor) > 0 and not isinstance(tensor, str):
return 1 + _tensor_depth(tensor[0])
return 0
def _to_categorical(values, unique_values=None):
if unique_values is None:
unique_values = np.unique(values)
if isinstance(unique_values, np.ndarray):
unique_values = list(unique_values)
return np.array([unique_values.index(_) for _ in values], dtype=np.int)
def _orthogonal(X, dim):
if dim == 1:
return X[:, 0], np.zeros(X.shape[0])
elif dim == 2:
return X[:, 0], X[:, 1]
return X[:, 0], X[:, 1], X[:, 2]
def _group_by(Xs, values, unique_values=None):
if unique_values is None:
unique_values = np.unique(values)
ndim = _tensor_depth(Xs)
if not isinstance(Xs, np.ndarray):
Xs = [np.array(X) for X in Xs] if ndim > 2 else np.array(Xs)
if ndim <= 2:
return [Xs[np.where(values == c)[0]] for c in unique_values]
else:
Rs = []
for X in Xs:
Rs.append([X[np.where(values == c)[0]] for c in unique_values])
return Rs
# noinspection DuplicatedCode
class DataVisualizer:
ecmaps = ['none', 'black']
linestyles = ['-', ':', '--', '-.']
borderoptions = [
dict(edgecolor='none'),
*(dict(edgecolor='black', linestyle=ls) for ls in ['-', ':', '--', '-.'])
]
markers = ['o', '^', 's', '*', 'p', 'P', 'v', 'X', 'D', 'H', "2", '$...$']
figure_params = dict(figsize=(4, 4))
ax_params = dict(facecolor=(0.9, 0.9, 0.9))
plot_params = dict(linestyle='--', marker='o')
scatter_params = dict(linewidth=1, alpha=0.6)
legend_params = dict(fancybox=True, framealpha=0.4)
grid_params = dict(which='both', linestyle=':')
axe3d_scale = 1. # 1.22
axe3d_title_offset = 1.08
def __init__(self):
self.pausing: bool = False
self.fig: Optional[plt.Figure] = None
self.axes = []
def plot(self,
*args,
title=None,
ax=None,
**kwargs):
ax = self._init_axe(ax=ax)
ax.plot(*args, **{**self.plot_params, **kwargs})
if title is not None:
ax.set_title(title)
def bar(self,
data,
err=None,
groups=None,
categories=None,
width=0.2,
group_spacing=0.05,
cat_spacing=0.5,
bar_label=False,
bar_label_padding=1,
bar_label_rotation=0,
percentage=False,
pallete=None,
group_legend=False,
xlabel=None,
ylabel=None,
title=None,
ax=None,
**kwargs,
):
n_groups = data.shape[0]
n_categories = data.shape[1]
if err is None:
err = [None] * n_groups
if groups is None:
groups = np.arange(n_groups)
if categories is None:
categories = np.arange(n_categories)
offsets = np.arange(-(n_groups - 1) / 2 * (width + group_spacing),
(n_groups - 1) / 2 * (width + group_spacing) + np.finfo(np.float32).eps,
width + group_spacing)
ax = self._init_axe(ax)
origins = np.linspace(0,
((n_groups * width + (n_groups - 1) * group_spacing) + cat_spacing) * n_categories,
n_categories)
if pallete is None:
cmap = colors.ListedColormap(seaborn.color_palette(n_colors=n_groups, as_cmap=True))
else:
cmap = get_cmap(pallete, n_groups)
for gi in range(n_groups):
bars = ax.bar(origins + offsets[gi], data[gi],
yerr=err[gi],
capsize=width * 20 if err is not None else None,
width=width,
color=cmap(gi),
alpha=0.8,
**kwargs)
if bar_label:
ax.bar_label(bars, labels=[f'{_:.01%}' if percentage else f'{_:.01}' for _ in data[gi]],
padding=bar_label_padding, rotation=bar_label_rotation, fontsize=18)
ax.set_xlim(xmin=origins[0] - (n_groups - 1) * (width + group_spacing),
xmax=origins[-1] + (n_groups - 1) * (width + group_spacing))
ax.set_xticks(origins)
ax.set_xticklabels(categories, fontsize=16)
ax.set_ylim(ymin=0, ymax=1.05 if percentage else None)
if percentage:
ax.set_yticks(np.arange(0, 1.1, .1))
ax.set_yticklabels([f"{_}%" for _ in range(0, 101, 10)], fontsize=16)
ax.grid(axis='y', **self.grid_params)
if group_legend:
ax.add_artist(self.group_legend(groups, cmap, loc='upper center', ncol=n_groups))
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=20)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=20)
if title is not None:
ax.set_title(title)
return ax, origins
def scatter(self,
X,
y=None,
y_unique=None,
z=None,
z_unique=None,
dim=None,
normalize=True,
kde=False,
cmap=None,
title=None,
class_legend=True,
grid=True,
ax=None):
if dim is None:
if X.shape[1] <= 3:
dim = X.shape[1]
else:
dim = 2
n_samples = X.shape[0]
if y is None:
y = np.array([0 for _ in range(n_samples)])
with_categories = z is not None
if with_categories and not _tensor_depth(z) == 1:
raise ValueError(f"Categories must be 1 dimensional array.")
if y_unique is None:
y_unique = np.unique(y)
n_classes = len(y_unique)
if z_unique is None:
z_unique = np.unique(z)
X = self.embed(X, _to_categorical(y, unique_values=y_unique), dim, normalize=normalize)
dim = X.shape[1]
if not 0 < dim <= 3:
raise ValueError(f"Unable to plot {dim}-dimensional data.")
if dim == 3 and z_unique is not None and len(z_unique) > 1:
raise ValueError("Categories are not availabe for 3d plot.")
ax = self._init_axe(ax=ax, dim=dim)
X = _group_by(X, y, unique_values=y_unique)
z = _group_by(z, y, unique_values=y_unique) if with_categories else [None for _ in X]
if cmap is None:
cmap = get_cmap('Spectral', n_classes)
for i, (X_i, z_i) in enumerate(zip(X, z)):
if z_i is not None:
X_i = _group_by(X_i, z_i, z_unique)
for j, X_ij in enumerate(X_i):
if len(X_ij) > 0:
ax.scatter(*_orthogonal(X_ij, dim=dim),
marker=self.markers[0],
color=cmap(i),
s=50,
**_cycle(self.borderoptions, j),
**self.scatter_params)
else:
ax.scatter(*_orthogonal(X_i, dim=dim),
marker=self.markers[0],
color=cmap(i),
s=50,
**self.scatter_params)
if kde:
if dim == 1:
inset_ax = ax.inset_axes([0.0, 0.8, 1.0, 0.2])
for i, X_i in enumerate(X):
seaborn.kdeplot(np.squeeze(X_i, 1),
color=cmap(i),
fill=True,
ax=inset_ax)
inset_ax.set_xlim(ax.get_xlim())
inset_ax.patch.set_alpha(0)
inset_ax.set_facecolor(ax.get_facecolor())
inset_ax.set_xticks([])
inset_ax.set_yticks([])
inset_ax.set_xlabel('KDE')
inset_ax.set_ylabel('')
elif dim == 2:
# inset_ax = ax.inset_axes([0.0, 0.0, 1.0, 1.0])
# for i, X_i in enumerate(X):
# seaborn.kdeplot(x=X_i[:, 0],
# y=X_i[:, 1],
# levels=10,
# color=cmap(i),
# fill=True,
# alpha=self.scatter_params['alpha'] / 2,
# ax=inset_ax)
# inset_ax.set_xlim(ax.get_xlim())
# inset_ax.patch.set_alpha(0)
# inset_ax.set_facecolor(ax.get_facecolor())
# inset_ax.set_xticks([])
# inset_ax.set_yticks([])
# inset_ax.set_xlabel('')
# inset_ax.set_ylabel('')
inset_ax_x = ax.inset_axes([0.0, 0.9, 0.9, 0.1])
inset_ax_y = ax.inset_axes([0.9, 0.0, 0.1, 0.9])
for i, X_i in enumerate(X):
seaborn.kdeplot(x=X_i[:, 0],
color=cmap(i),
fill=True,
ax=inset_ax_x)
seaborn.kdeplot(y=X_i[:, 1],
color=cmap(i),
fill=True,
ax=inset_ax_y)
inset_ax_x.set_xlim(ax.get_xlim())
inset_ax_x.patch.set_alpha(0)
inset_ax_x.set_facecolor(ax.get_facecolor())
inset_ax_x.set_xticks([])
inset_ax_x.set_yticks([])
inset_ax_x.set_xlabel('')
inset_ax_x.set_ylabel('')
inset_ax_y.set_ylim(ax.get_ylim())
inset_ax_y.patch.set_alpha(0)
inset_ax_y.set_facecolor(ax.get_facecolor())
inset_ax_y.set_xticks([])
inset_ax_y.set_yticks([])
inset_ax_y.set_xlabel('')
inset_ax_y.set_ylabel('')
ax.set_xlim(xmin=None, xmax=ax.get_xlim()[0] + np.diff(ax.get_xlim()).item() / .9)
ax.set_ylim(ymin=None, ymax=ax.get_ylim()[0] + np.diff(ax.get_ylim()).item() / .9)
ax.annotate('KDE', (.95, .95), xycoords='axes fraction',
horizontalalignment='center', verticalalignment='center')
if class_legend:
ax.add_artist(self.class_legend(y_unique, cmap, z_unique))
ax.grid(b=grid, **self.grid_params)
ax.axes.get_xaxis().set_visible(grid)
ax.axes.get_yaxis().set_visible(grid)
if normalize:
ax.set_xticks(np.arange(np.ceil(ax.get_xlim()[0] * 2) / 2, np.ceil(ax.get_xlim()[1] * 2) / 2, .5))
ax.set_xticks(np.arange(np.ceil(ax.get_xlim()[0] * 2) / 2, np.ceil(ax.get_xlim()[1] * 2) / 2, .5))
if dim == 3:
ax.set_zticks(np.arange(np.ceil(ax.get_zlim()[0] * 2) / 2, np.ceil(ax.get_zlim()[1] * 2) / 2, .5))
if dim == 3:
ax.axes.get_zaxis().set_visible(grid)
if title is not None:
ax.set_title(title, y=self.axe3d_title_offset if dim == 3 else 1)
return ax
def multiview_scatter(self,
Xs,
y=None,
y_unique=None,
z=None,
z_unique=None,
dim=None,
normalize=True,
kde=False,
cmap=None,
view_titles=None,
title=None,
class_legend=True,
view_legend=True,
grid=True,
ax=None):
if dim is None:
if Xs[0].shape[1] <= 3:
dim = Xs[0].shape[1]
else:
dim = 2
n_views = len(Xs)
n_samples = Xs[0].shape[0] if n_views > 0 else 0
if y is None:
y = [np.array([0 for _ in range(n_samples)]) for _ in range(n_views)]
elif _tensor_depth(y) == 1:
y = [y for _ in range(n_views)]
elif _tensor_depth(y) != 2:
raise ValueError("Labels must be 1 or 2 dimensional array.")
with_categories = z is not None
if with_categories:
if _tensor_depth(z) == 1:
z = [z for _ in range(n_views)]
elif _tensor_depth(z) != 2:
raise ValueError("Categories must be 1 or 2 dimensional array.")
if y_unique is None:
y_unique = np.unique(np.concatenate([np.unique(y_v) for y_v in y]))
n_classes = len(y_unique)
if z_unique is None and z is not None:
z_unique = np.unique(np.concatenate([np.unique(z_v) for z_v in z]))
Xs = self.embed(Xs, y, dim, normalize=normalize)
dims = [X.shape[1] for X in Xs]
max_dim = np.max(dims)
if not 0 < max_dim <= 3:
raise ValueError(f"Unable to plot {max_dim}-dimensional data.")
if dim == 3 and z_unique is not None and len(z_unique) > 1:
raise ValueError("Categories are not availabe for 3d plot.")
ax = self._init_axe(ax=ax, dim=max_dim)
if cmap is None:
cmap = get_cmap('Spectral', n_classes)
for v in range(n_views):
if dims[v] < max_dim:
Xs[v] = [np.array([np.concatenate([x, np.zeros(max_dim - dims[v])], axis=0) for x in X_i])
for X_i in Xs[v]]
X_v = _group_by(Xs[v], y[v], unique_values=y_unique)
z_v = _group_by(z[v], y[v], unique_values=y_unique) if with_categories else [None for _ in X_v]
for i, (X_i, z_i) in enumerate(zip(X_v, z_v)):
if z_i is not None:
X_i = _group_by(X_i, z_i, z_unique)
for j, X_ij in enumerate(X_i):
ax.scatter(*_orthogonal(X_ij, dim=dim),
marker=_cycle(self.markers, v),
color=cmap(i),
s=50,
**_cycle(self.borderoptions, j),
**self.scatter_params)
else:
ax.scatter(*_orthogonal(X_i, dim=dim),
marker=_cycle(self.markers, v),
color=cmap(i),
s=50,
**self.scatter_params)
if kde:
if dim == 1:
inset_ax = ax.inset_axes([0.0, 0.8, 1.0, 0.2])
X_all_by_cls = _group_by(np.concatenate(Xs), np.concatenate(y))
for i, X_i in enumerate(X_all_by_cls):
seaborn.kdeplot(np.squeeze(X_i, 1),
color=cmap(i),
fill=True,
ax=inset_ax)
del X_all_by_cls
inset_ax.set_xlim(ax.get_xlim())
inset_ax.patch.set_alpha(0)
inset_ax.set_facecolor(ax.get_facecolor())
inset_ax.set_xticks([])
inset_ax.set_yticks([])
inset_ax.set_xlabel('KDE')
inset_ax.set_ylabel('')
elif dim == 2:
# inset_ax = ax.inset_axes([0.0, 0.0, 1.0, 1.0])
# X_all_by_cls = _group_by(np.concatenate(Xs), np.concatenate(y))
# for i, X_i in enumerate(X_all_by_cls):
# seaborn.kdeplot(x=X_i[:, 0],
# y=X_i[:, 1],
# levels=3,
# color=cmap(i),
# fill=True,
# alpha=self.scatter_params['alpha'] / 5,
# ax=inset_ax)
# del X_all_by_cls
# inset_ax.set_xlim(ax.get_xlim())
# inset_ax.set_ylim(ax.get_ylim())
# inset_ax.patch.set_alpha(0)
# inset_ax.set_facecolor(ax.get_facecolor())
# inset_ax.set_xticks([])
# inset_ax.set_yticks([])
# inset_ax.set_xlabel('')
# inset_ax.set_ylabel('')
inset_ax_x = ax.inset_axes([0.0, 0.9, 0.9, 0.1])
inset_ax_y = ax.inset_axes([0.9, 0.0, 0.1, 0.9])
X_all_by_cls = _group_by(np.concatenate(Xs), np.concatenate(y))
for i, X_i in enumerate(X_all_by_cls):
seaborn.kdeplot(x=X_i[:, 0],
color=cmap(i),
fill=True,
ax=inset_ax_x)
seaborn.kdeplot(y=X_i[:, 1],
color=cmap(i),
fill=True,
ax=inset_ax_y)
del X_all_by_cls
inset_ax_x.set_xlim(ax.get_xlim())
inset_ax_x.patch.set_alpha(0)
inset_ax_x.set_facecolor(ax.get_facecolor())
inset_ax_x.set_xticks([])
inset_ax_x.set_yticks([])
inset_ax_x.set_xlabel('')
inset_ax_x.set_ylabel('')
inset_ax_y.set_ylim(ax.get_ylim())
inset_ax_y.patch.set_alpha(0)
inset_ax_y.set_facecolor(ax.get_facecolor())
inset_ax_y.set_xticks([])
inset_ax_y.set_yticks([])
inset_ax_y.set_xlabel('')
inset_ax_y.set_ylabel('')
ax.set_xlim(xmin=None, xmax=ax.get_xlim()[0] + np.diff(ax.get_xlim()).item() / .9)
ax.set_ylim(ymin=None, ymax=ax.get_ylim()[0] + np.diff(ax.get_ylim()).item() / .9)
ax.annotate('KDE', (.95, .95), xycoords='axes fraction',
horizontalalignment='center', verticalalignment='center')
if class_legend:
ax.add_artist(self.class_legend(y_unique, cmap, z_unique))
if view_legend:
ax.add_artist(self.view_legend(n_views, view_titles))
ax.grid(b=grid, **self.grid_params)
ax.axes.get_xaxis().set_visible(grid)
ax.axes.get_yaxis().set_visible(grid)
if normalize:
ax.set_xticks(np.arange(np.ceil(ax.get_xlim()[0] * 2) / 2, np.ceil(ax.get_xlim()[1] * 2) / 2, .5))
ax.set_xticks(np.arange(np.ceil(ax.get_xlim()[0] * 2) / 2, np.ceil(ax.get_xlim()[1] * 2) / 2, .5))
if dim == 3:
ax.set_zticks(np.arange(np.ceil(ax.get_zlim()[0] * 2) / 2, np.ceil(ax.get_zlim()[1] * 2) / 2, .5))
if dim == 3:
ax.axes.get_zaxis().set_visible(grid)
if title is not None:
ax.set_title(title, y=self.axe3d_title_offset if max_dim == 3 else 1)
return ax
def resize(self, width, height):
"""
Resize the figure.
Args:
width: horizontal size in inches
height: vertical size in inches
Returns: None
"""
self.fig.set_size_inches(width, height)
def pause(self,
title: Optional[str] = None,
grids: Sequence[Union[str, Tuple[Union[int, slice], ...]]] = 'auto',
size: Optional[Tuple[float, float]] = None,
adjust: Optional[Tuple[float, ...]] = (0.06, 0.06, 0.94, 0.94, 0.15, 0.15),
interval: float = 0.001) -> None:
"""
Pause for rerender figure.
Args:
title: window title
grids: layout of axes inside window
size: figure size.
adjust: tuple of (left, bottom, right, top, wspace, hspace)
interval: time to pause in second
Returns: None
"""
if title is not None:
self.fig.suptitle(title)
self.pausing = True
self._rearrange_fig(grids, adjust)
if size is not None:
self.resize(*size)
plt.pause(interval)
def show(self,
title: Optional[str] = None,
grids: Sequence[Union[str, Tuple[Union[int, slice], ...]]] = 'auto',
figsize: Optional[Tuple[float, float]] = None,
adjust: Optional[Tuple[float, ...]] = (0.06, 0.06, 0.94, 0.94, 0.15, 0.15),
block: bool = True,
clear: bool = True) -> None:
"""
Show figure in window.
Args:
title: window title
grids: layout of axes inside window
figsize: figure size.
adjust: tuple of (left, bottom, right, top, wspace, hspace)
block: block the current thread. Default: True
clear: clear the fig after showing. Default: True
Returns: None
"""
if title is not None:
self.fig.suptitle(title)
self._rearrange_fig(grids, adjust)
if figsize is not None:
self.resize(*figsize)
plt.show(block=block)
if clear:
self._clear_fig()
def savefig(self,
fname: str,
grids: Sequence[Union[str, Tuple[Union[int, slice], ...]]] = 'auto',
figsize: Optional[Tuple[float, float]] = None,
adjust: Optional[Tuple[float, ...]] = (0.1, 0.1, 0.95, 0.95, 0.15, 0.15),
usetex: bool = False,
clear: bool = False):
"""
Save figure to file
Args:
fname: save file url
grids: layout of axes inside window
figsize: figure size
adjust: tuple of (left, bottom, right, top, wspace, hspace)
usetex: use latex font
clear: clear the fig after showing. Default: True
Returns: None
"""
self._rearrange_fig(grids, adjust)
os.makedirs(os.path.dirname(fname), exist_ok=True)
if figsize is not None:
self.resize(*figsize)
plt.rcParams.update({'text.usetex': usetex})
self.fig.savefig(fname)
plt.rcParams.update({'text.usetex': False})
if clear:
self._clear_fig()
def clear(self):
"""
clear figure.
Returns: None
"""
self._clear_fig()
def _init_axe(self, ax=None, dim=2):
assert 0 < dim <= 3
if self.fig is None:
self.fig = plt.figure(**self.figure_params)
if ax is None or (isinstance(ax, int) and ax >= len(self.axes)):
new_plot_pos = (1, len(self.axes) + 1, 1)
if dim <= 2:
ax = self.fig.add_subplot(*new_plot_pos)
else:
ax = self.fig.add_subplot(*new_plot_pos, projection='3d')
ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), np.diag([self.axe3d_scale] * 3 + [1]))
ax.patch.set_edgecolor('black')
ax.patch.set_linewidth(1)
self.axes.append(ax)
plt.rc('grid', linestyle=':', color='black', alpha=0.6)
else:
if isinstance(ax, Axes):
self.axes.append(ax)
elif isinstance(ax, int):
ax = self.axes[ax]
else:
raise ValueError(f'ax must be either number or Axes object, got {type(ax)}.')
ax.clear()
ax.set_facecolor(self.ax_params['facecolor'])
ax.set_axisbelow(True)
return ax
def _rearrange_fig(self, grids='auto', adjust=(0.1, 0.1, 0.95, 0.95, 0.15, 0.15)):
# type: (Sequence[Union[str, Tuple[Union[int, slice]]]]) -> None
if grids in [None, 'auto']:
gs = gridspec.GridSpec(1, len(self.axes))
for _ in range(len(self.axes)):
self.axes[_].set_position(gs[_].get_position(self.fig))
self.axes[_].set_subplotspec(gs[_])
fig_size = self.figure_params['figsize']
self.fig.set_size_inches(fig_size[0] * len(self.axes), fig_size[1])
else:
for _, grid in enumerate(grids):
grid = list(grid)
assert len(grid) >= 3
use_index = len(grid) == 3
grid = grid + [None] * (6 - len(grid))
gs = gridspec.GridSpec(*grid[:2])
if use_index:
self.axes[_].set_position(gs[grid[2]].get_position(self.fig))
self.axes[_].set_subplotspec(gs[grid[2]])
else:
self.axes[_].set_position(gs[grid[2]:grid[3], grid[4]:grid[5]].get_position(self.fig))
self.axes[_].set_subplotspec(gs[grid[2]:grid[3], grid[4]:grid[5]])
# TODO: resize
self.fig.subplots_adjust(*adjust)
def _clear_fig(self):
self.pausing = False
self.fig: Optional[plt.Figure] = None
self.axes.clear()
plt.close(self.fig)
def group_legend(self, groups, cmap=None, loc='upper right', fontsize=10, title="Groups", **legend_kwargs):
if hasattr(groups, 'tolist'):
groups = groups.tolist()
if cmap is None:
cmap = get_cmap('Spectral', len(groups))
handles = [lines.Line2D([0], [0],
label=groups[gi],
marker='s',
markerfacecolor=cmap(gi),
markersize=fontsize,
color='black',
linestyle='none',
alpha=0.8)
for gi in range(len(groups))]
legend = plt.legend(handles=handles, loc=loc, title=title, fontsize=fontsize, prop=dict(size=fontsize),
**{**self.legend_params, **legend_kwargs})
plt.setp(legend.get_title(), fontsize=fontsize)
return legend
def class_legend(self, y_unique, cmap=None, z_unique=None,
loc='upper right', fontsize=10, title="Classes", **legend_kwargs):
if hasattr(y_unique, 'tolist'):
y_unique = y_unique.tolist()
if cmap is None:
cmap = get_cmap('Spectral', len(y_unique))
handles = []
if len(y_unique) > 1:
handles.extend((patches.Patch(edgecolor=None, facecolor=cmap(ci), label=cls)
for ci, cls in enumerate(y_unique)))
else:
handles.append(patches.Patch(edgecolor=None, facecolor='grey', label=y_unique[0]))
if z_unique is not None and (len(z_unique) > 1 or z_unique[0] is not None):
handles.append(lines.Line2D([], [], linestyle='', label=''))
handles.extend((patches.Patch(facecolor='lightgray', label=z, **_cycle(self.borderoptions, _))
for _, z in enumerate(z_unique)))
legend = plt.legend(handles=handles, loc=loc, title=title, fontsize=fontsize, prop=dict(size=fontsize),
**{**self.legend_params, **legend_kwargs})
plt.setp(legend.get_title(), fontsize=fontsize)
return legend
def category_legend(self, z_unique=None, loc='upper right', fontsize=10, title="Splits", **legend_kwargs):
handles = [patches.Patch(facecolor='lightgray', label=z, **_cycle(self.borderoptions, _))
for _, z in enumerate(z_unique)]
legend = plt.legend(handles=handles, loc=loc, title=title, fontsize=fontsize, prop=dict(size=fontsize),
**{**self.legend_params, **legend_kwargs})
plt.setp(legend.get_title(), fontsize=fontsize)
return legend
def view_legend(self, n_views, view_titles=None, loc='lower right', fontsize=10, title="Views", **legend_kwargs):
handles = [lines.Line2D([0], [0],
label='${}^{{{}}}$'.format(vi,
'st' if vi % 10 == 1
else 'nd' if vi % 10 == 2
else 'rd' if vi % 10 == 3
else 'th')
if view_titles is None else view_titles[vi - 1],
marker=_cycle(self.markers, vi - 1),
markerfacecolor='w',
markersize=fontsize,
color='black',
fillstyle='none',
linestyle='none',
alpha=0.5)
for vi in range(1, n_views + 1)]
legend = plt.legend(handles=handles, loc=loc, title=title, fontsize=fontsize, prop=dict(size=fontsize),
**{**self.legend_params, **legend_kwargs})
plt.setp(legend.get_title(), fontsize=fontsize)
return legend
@staticmethod
def embed(Xs, ys, dim=None, normalize=False):
ndim = _tensor_depth(Xs)
src_dim = Xs.shape[1] if ndim == 2 else np.max([X.shape[1] for X in Xs]) if len(Xs) > 0 else 0
if 0 < src_dim <= 3 and (dim is None or dim == src_dim):
if _tensor_depth(Xs) == 2:
return StandardScaler().fit_transform(Xs) if normalize else Xs
else:
n_views = len(Xs)
X_all = np.concatenate([X for X in Xs])
X_embed = StandardScaler().fit_transform(X_all) if normalize else X_all
return [X_embed[int(len(X_embed) / n_views * _):int(len(X_embed) / n_views * (_ + 1))]
for _ in range(n_views)]
elif dim is None:
dim = 2
if _tensor_depth(Xs) == 2:
X_embed = TSNE(n_components=dim, perplexity=Xs.shape[0] / 2).fit_transform(Xs, ys)
return StandardScaler().fit_transform(X_embed)
else:
n_views = len(Xs)
if _tensor_depth(ys) == 1:
ys = [ys for _ in range(n_views)]
n_views = len(Xs)
X_all = np.concatenate([X for X in Xs])
y_all = np.concatenate([y for y in ys])
X_embed = TSNE(n_components=dim, perplexity=X_all.shape[0] / 2).fit_transform(X_all, y_all)
X_embed = StandardScaler().fit_transform(X_embed)
return [X_embed[int(len(X_embed) / n_views * _):int(len(X_embed) / n_views * (_ + 1))]
for _ in range(n_views)]
| [
"inspiros.tran@gmail.com"
] | inspiros.tran@gmail.com |
0ef155308b1d5158ba8cfdab7c3d8f932d9fa5ad | e7e0723b4df5a47a56dad23036f21a77b0600452 | /Old-web/src/admin.py | ae46cd78eabffd87ecb1bae4d4c3778ee7e96af8 | [] | no_license | aryanaces/MSC-Official-Website | 7c5803f34577c6cc32873df10abcf24d4e3c59bb | 7e6890701d7a81c467648c3bbfeb18f7bcca3fe3 | refs/heads/master | 2020-09-24T18:32:50.359613 | 2019-11-15T13:25:49 | 2019-11-15T13:25:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,095 | py | from django.contrib import admin
from .models import *
from .actions import export_as_csv_action
#from import_export.admin import ImportExportModelAdmin
# Register your models here.
class core_team_model(admin.ModelAdmin):
list_display=["name","post","fb_link","git_link","linkedin_link"]
class Meta:
model=Secretarie
class about_us_model(admin.ModelAdmin):
list_1=["text","img"]
class Meta:
model=About_us
class event_model(admin.ModelAdmin):
list_2=["logo","heading","description"]
class Meta:
model=Events
class team_model(admin.ModelAdmin):
list_display=["name","department"]
list_filter=["department"]
class Meta:
model=Team
class post_model(admin.ModelAdmin):
list_display=["title","timestamp"]
date_hierarchy = 'timestamp'
search_fields = ['title','content']
class Meta:
model=Post
class msweek_event_model(admin.ModelAdmin):
list_display=["title","date"]
date_hierarchy = 'date'
search_fields = ['title','description']
class Meta:
model=MSWeek_Event
class inspirus_event_model(admin.ModelAdmin):
list_display=["title","date"]
date_hierarchy = 'date'
search_fields = ['title','description']
class Meta:
model=Inspirus_Event
class rumble_event_model(admin.ModelAdmin):
list_display=["title","date"]
date_hierarchy = 'date'
search_fields = ['title','description']
class Meta:
model=Rumble_Event
class head_model(admin.ModelAdmin):
list_display=["name","department","post"]
list_filter=["department","post"]
class Meta:
model=Head_Team
@admin.register(event_registration)
class EventRegistration_model(admin.ModelAdmin):
list_display=["name","year","email"]
# list_filter=["a","b","c","d","e","year"]
list_filter=["year"]
# actions = [export_as_csv_action("CSV Export", fields=['name','year','email','contact','a','b','c','d','e'])]
actions = [export_as_csv_action("CSV Export", fields=['name','email','contact','year'])]
@admin.register(registration)
class registration_model(admin.ModelAdmin):
list_display=["name","year","email"]
# list_filter=["a","b","c","d","e","year"]
list_filter=["a","b","year"]
# actions = [export_as_csv_action("CSV Export", fields=['name','year','email','contact','a','b','c','d','e'])]
actions = [export_as_csv_action("CSV Export", fields=['name','email','roll_number','contact','year','a','b'])]
@admin.register(hkct_register)
class hkct_model(admin.ModelAdmin):
list_display=["name","year"]
# list_filter=["a","b","c","d","e","year"]
list_filter=["a","b","c","d","e","f","year"]
# actions = [export_as_csv_action("CSV Export", fields=['name','year','email','contact','a','b','c','d','e'])]
actions = [export_as_csv_action("CSV Export", fields=['name','email','contact','year','a','b','c','d','e','f'])]
@admin.register(hkct_ouside)
class hkct_model(admin.ModelAdmin):
list_display=["name","location"]
# list_filter=["a","b","c","d","e","year"]
list_filter=["location", "institute"]
# actions = [export_as_csv_action("CSV Export", fields=['name','year','email','contact','a','b','c','d','e'])]
actions = [export_as_csv_action("CSV Export", fields=['name','email','contact','location','institute'])]
class Contact_request(admin.ModelAdmin):
list_display=["contact_name","contact_email"]
admin.site.register(Secretarie,core_team_model)
admin.site.register(Head_Team,head_model)
admin.site.register(About_us,about_us_model)
admin.site.register(About_us_content)
admin.site.register(Events,event_model)
admin.site.register(Post,post_model)
admin.site.register(Team,team_model)
admin.site.register(MSWeek_Event,msweek_event_model)
admin.site.register(Inspirus_Event,inspirus_event_model)
admin.site.register(Rumble_Event,rumble_event_model)
admin.site.register(contact_request,Contact_request)
admin.site.register(index_gallery)
admin.site.register(MSWEEK_gallery)
admin.site.register(INSPIRUSUS_gallery)
admin.site.register(RUMBLE_gallery)
admin.site.register(Heading_Content)
# admin.site.register(registration)
# admin.site.register(registration,registration_model)
| [
"arjavjain59998@gmail.com"
] | arjavjain59998@gmail.com |
3260dc302f4391afe755256b44ea9ca140f33a0e | 8ad8ee4e3a4e0e8ae0ed8e92c68cf122f5ba3723 | /jk_en/jk_en/sendEmail.py | e87f6f678832966b752cbb243ab64a762fe3c534 | [] | no_license | yangyangyanga/automatic_update | 5b5065713853c4a1225142ece4ea39be1a05d011 | 53c1777cbb84e489b887f38e2745477d6b6f4604 | refs/heads/master | 2020-05-25T21:18:24.979779 | 2019-05-22T08:34:02 | 2019-05-22T08:34:02 | 187,996,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.mime.multipart import MIMEMultipart
import pymysql
def sendEmail(subject='无标题',messageText='无内容'):
# accepter = 'zjlhyd0422@163.com'
accepter = "625786425@qq.com"
sender = "cyh6257@163.com"
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
message = MIMEText(messageText, 'plain', 'utf-8')
message['From'] = sender
message['To'] = accepter
#邮件标题subject
subject = subject
message['Subject'] = Header(subject, 'utf-8')
try:
smtp = smtplib.SMTP()
smtp.connect('smtp.163.com', '25')
smtp.login('cyh6257@163.com', 'cyh1995')
smtp.sendmail(sender,accepter, message.as_string())
print("发送成功")
smtp.quit()
except smtplib.SMTPException as e:
print(e, "发送失败")
def SendEmailMain():
conn = pymysql.connect(host='172.16.10.71', port=3306, user='python_team', passwd='shiqiyu', db='hooli_school',charset="utf8")
cursor = conn.cursor()
#获取变化的学校数据
conn.ping(reconnect=True)
sql = "select old_id,url_old,university,change_context from Label_content where old_id like 'e%' and change_context like '%1%' order by university"
cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
sql2 = "select count(*),university from Label_content where change_context like '%1%' and old_id like 'e%' GROUP BY university"
cursor.execute(sql2)
conn.commit()
result2=cursor.fetchall()
# print(result)
# print(result2)
conn.close()
sendemailschool=''.join(list(map(lambda x:x[1]+'有'+str(x[0])+'条专业发送变化'+'\n',result2)))
sendemaillists=''.join(list(map(lambda x:'id为: '+x[0]+' 的专业'+x[3].replace('01','内容发生变化').replace('11','内容和标签发生变化').replace('10','标签发生变化')+' 学校: '+x[2]+' 链接为:'+x[1]+'\n',result)))
messagetext=sendemailschool+'\n'+sendemaillists
if messagetext!='\n':
sendEmail(subject='英国变化邮件',messageText=messagetext)
# SendEmailMain()
| [
"1102213456@qq.com"
] | 1102213456@qq.com |
6e13a86a38a7074885b34f89dead98ef3c467f1b | 2dc613c1e66e20e5e400c91beee9251da20f5b23 | /cameras/migrations/0001_initial.py | 8db906111a230d9e4dfc5a2b3e1378ddc22e0fd6 | [] | no_license | Lol-Devs/what-the-pic-server | b86c6708befe024e0e4586d339bf2a95d3ff66e9 | 5a0c569576076e1871d3fe488b60d6eb3962b579 | refs/heads/main | 2023-07-06T08:27:42.198937 | 2021-08-07T11:42:50 | 2021-08-07T11:42:50 | 393,531,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | # Generated by Django 3.2.6 on 2021-08-07 02:06
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Camera',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('ip4', models.CharField(max_length=16)),
('interval', models.PositiveIntegerField()),
('last_active', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Picture',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('picture', models.ImageField(upload_to='pictures')),
('camera_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cameras.camera')),
],
),
]
| [
"ivan.gufler@tfobz.net"
] | ivan.gufler@tfobz.net |
f736b689aa16b25f7ebcc257bdcb50e7e1fb736b | 6c3aecc9c37e7e0264c508dd781d2ad20c8b41b0 | /fakecsv/settings.py | ed6e28c328ddc6affd09b84ffc54de581e1cd2b8 | [] | no_license | shams0910/fake-csv | c53186111be61c1119e3167cedcbdac42e28d3f2 | 90aefb28fd3583f5d79bbd54d0f81f8742662981 | refs/heads/master | 2023-04-23T12:54:06.472701 | 2021-05-11T07:09:35 | 2021-05-11T07:09:35 | 364,623,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,924 | py | """
Django settings for fakecsv project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
# import django_heroku
import os
from pathlib import Path
try:
from dotenv import load_dotenv
load_dotenv()
except:
print("running remotely")
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['fake-csv-demo.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'schemas'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fakecsv.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fakecsv.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("DB_NAME"),
'USER': os.environ.get("DB_USER"),
'PASSWORD': os.environ.get("DB_PASSWORD"),
'HOST': os.environ.get("DB_HOST"),
'PORT': os.environ.get("DB_PORT"),
}
}
# Auth user model
AUTH_USER_MODEL = 'accounts.User'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'staticfiles'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Media files
MEDIA_URL = '/media/'
MEDIA_ROOT = (
os.path.join(BASE_DIR, 'media'),
)
# Celery configs
CELERY_BROKER_URL = os.environ.get('REDISTOGO_URL')
| [
"shamsiddin2000.09@gmail.com"
] | shamsiddin2000.09@gmail.com |
64183f6a05e4f1c1c9a3020a36e2d519263948ec | 581b4d9507f9580bbcd1a9acfad9fbff018173af | /Books/TextMiningABC/4-00.3.plot.py | fe509d5bded68c51664afebf2221a7200fb967fd | [] | no_license | matoken78/python | 0e6c79bccfa13df2f4e400dad0a1a071a8ecfd5f | d76492dfc6ba33735ca6ee5085782a139b8b7549 | refs/heads/master | 2021-06-05T01:21:45.239174 | 2020-03-01T05:13:31 | 2020-03-01T05:13:31 | 133,585,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,843 | py | import matplotlib.pyplot as plt
import numpy as np
import nltk
from nltk.corpus import inaugural
from collections import Counter
text = inaugural.raw('1789-Washington.txt')
sents = nltk.tokenize.sent_tokenize(text)
# sents の文ごとの文字数のリストを作り、Counterで頻度を数える
cnt = Counter(len(x) for x in sents)
# 長さと頻度の降順にソートして表示(頻度は全て1なので実質長さでソート)
print('ワシントン大統領就任演説 長さ×頻度')
print(sorted(cnt.items(), key=lambda x: [x[1], x[0]], reverse=True))
import utils
utils.adapt_japanese_font(plt)
# ヒストグラムの表示
nstring = np.array([len(x) for x in sents])
plt.hist(nstring)
plt.title('1789年ワシントン大統領就任演説の文ごとの文字数分布')
plt.xlabel('文の文字数')
plt.ylabel('出現頻度')
plt.show()
import re
from aozora import Aozora
aozora = Aozora('wagahaiwa_nekodearu.txt')
# 文に分解してから、文ごとに文字数をカウントする
string = '\n'.join(aozora.read())
# 全角空白を取り除く。句点・改行で分割、。」の。は改行しない
string = re.split('。(?!」)|\n', re.sub(' ', '', string))
# 空行を除く
while '' in string:
string.remove('')
# string の要素(文)の長さをリストにする
cnt = Counter([len(x) for x in string])
# 文の長さを頻度順にソートして出力する
print('吾輩は猫である 長さ×頻度')
print(sorted(cnt.items(), key=lambda x: x[1], reverse=True)[:100])
nstring = np.array([len(x) for x in string if len(x) < 150])
print('max', nstring.max())
plt.hist(nstring, bins=nstring.max())
plt.title('「吾輩は猫である」文ごとの文字数分布')
plt.xlabel('文の文字数')
plt.ylabel('出現頻度')
plt.show() | [
"matoken78@gmail.com"
] | matoken78@gmail.com |
4a6d0e9e62e869dd1c32e50729222a00bdb0eb19 | 8d6788e459af7e457bd2ceb97fc3b0aeb7a7d093 | /task1_understanding_OTP/otp.py | bba69fc5f6558e9c80dab3cc2de88fc7e0a5a973 | [] | no_license | cris391/Cryptography | bc593dd523a119d903d3f19a9f0695f54ffb75e2 | 27070d6cd043bcf66fd73852fe14e317e6d37c88 | refs/heads/master | 2020-04-01T21:21:24.347541 | 2018-12-11T09:36:10 | 2018-12-11T09:36:10 | 153,651,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from PIL import Image, ImageChops
plaintext = Image.open("./example/plaintext.jpeg", mode='r').convert("1")
key = Image.open("./example/key.gif", mode='r').convert("1")
encrypted = ImageChops.logical_xor(plaintext, key)
encrypted.save('encrypted.gif', 'GIF')
print(plaintext.format, plaintext.size, plaintext.mode) | [
"cristianpand@yahoo.com"
] | cristianpand@yahoo.com |
65b9341d74e0d9ee36c3676d0dd08ee9bd7ca9be | b35c288c0b534e2714c7ff73bf87f2265737e39e | /practice3.py | dbf4b2a78ff25fff3091627b6265ada6aa975aa7 | [] | no_license | PaulLiuEngineering/python_practice | 26ddfd82157c168d09e35113df8cae8fc957bfb6 | c5dbbd80464bcadbaa0f29915141ea29d6b92762 | refs/heads/master | 2020-04-14T06:43:36.936019 | 2018-12-31T19:55:48 | 2018-12-31T19:55:48 | 163,694,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import random
word_file = "/usr/share/dict/words"
WORDS = open(word_file).read().splitlines()
n = len(WORDS)
word = WORDS[random.randint(0,n-1)]
print(word)
# c = 12
# while(c > 0):
# letter = input("Guess a letter: ")
# if letter in word:
# a = 0
# show
# for a in len(word):
# if word[a] == letter
# print("") | [
"paulliu14@gmail.com"
] | paulliu14@gmail.com |
c424a4fe81c9efd0308c124f6b3b50fda90d747f | f87877d22e6c80f5bbc27ae44fb74518779a9afd | /ariac_behaviors/ariac_flexbe_behaviors/src/ariac_flexbe_behaviors/robotselectorstate_sm.py | d66459fa3a418b333e35f981a3aa49239f58ff36 | [] | no_license | Bgompie2000/Fase2_robotica_machinebouw | c5500860c7834f504fe05cb0611cf87caec91dd8 | b9dada0431cbfa92d5a08e7e7fdfebb14503637c | refs/heads/master | 2022-07-31T19:43:18.802065 | 2020-05-19T13:16:50 | 2020-05-19T13:16:50 | 261,993,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,964 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from ariac_support_flexbe_states.equal_state import EqualState
from ariac_support_flexbe_states.replace_state import ReplaceState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu May 14 2020
@author: Bas
'''
class RobotSelectorStateSM(Behavior):
'''
selecteren van de juiste robot
'''
def __init__(self):
super(RobotSelectorStateSM, self).__init__()
self.name = 'RobotSelectorState'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:533 y:79, x:130 y:365
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['arm_id'], output_keys=['move_group_prefix'])
_state_machine.userdata.prefix_arm1 = 'ariac/arm1'
_state_machine.userdata.prefix_arm2 = 'ariac/arm2'
_state_machine.userdata.arm1 = 'arm1'
_state_machine.userdata.arm2 = 'arm2'
_state_machine.userdata.arm_id = ''
_state_machine.userdata.move_group_prefix = ''
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:51 y:45
OperatableStateMachine.add('vergelijk1',
EqualState(),
transitions={'true': 'value1', 'false': 'vergelijk1_2'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'value_a': 'arm_id', 'value_b': 'arm1'})
# x:50 y:168
OperatableStateMachine.add('vergelijk1_2',
EqualState(),
transitions={'true': 'value1_2', 'false': 'failed'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'value_a': 'arm_id', 'value_b': 'arm2'})
# x:280 y:49
OperatableStateMachine.add('value1',
ReplaceState(),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'value': 'prefix_arm1', 'result': 'move_group_prefix'})
# x:279 y:169
OperatableStateMachine.add('value1_2',
ReplaceState(),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'value': 'prefix_arm2', 'result': 'move_group_prefix'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| [
"bgommers@avans.nl"
] | bgommers@avans.nl |
bfe6665bbfad17b4af69155aaa293cd1909810cf | 6d9e1aa1361f8330a4867b7f4ff337d6cfc27d81 | /main.py | 809a5b173b85366d2a810e4e9c1a17486e9ca203 | [
"CC0-1.0"
] | permissive | ykrods/pyobjc-webview-mtls-example | f97afe2455efa1c7f12d6ea0271fbdd086f9155d | d3b37c8861747d5651012fed0428514caf0bc76a | refs/heads/master | 2023-03-21T18:21:15.133047 | 2021-03-17T12:56:19 | 2021-03-17T12:56:19 | 347,983,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import logging
import sys
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
if sys.platform == "darwin":
from browser.cocoa.main import main
main()
else:
raise RuntimeError("Unsupported platform")
| [
"890082+ykrods@users.noreply.github.com"
] | 890082+ykrods@users.noreply.github.com |
16b94d1ee6177ff15cbcf4c32d01a3ff9d2f6844 | ecf53ca75324e9a293c2aecd46b27888ec541bf7 | /riam_api_client/models/inline_response20044_message.py | 6fff66167dcaf09f5028f5abdbad8b032a1e6351 | [
"MIT"
] | permissive | RiskAmerica/api-client-python | 9bcede4cac091ca42b7c534555c63223fb1c315f | 468c554a0440bef5086828631e25d99d41e28571 | refs/heads/main | 2023-07-07T14:02:25.601988 | 2021-08-20T19:33:36 | 2021-08-20T19:33:36 | 329,913,363 | 0 | 1 | MIT | 2021-08-20T19:33:37 | 2021-01-15T13:01:30 | Python | UTF-8 | Python | false | false | 7,445 | py | # coding: utf-8
"""
APIs RISKAMERICA
A continuación les presentamos la documentación las **APIs** **de** **RiskAmerica**, el cual es un servicio pagado ofrecido por RiskAmerica que se contrata por separado a nuestras otras ofertas de software. Algunas consideraciones que debe tener al momento de usar las APIs: - El APIKEY o Token lo puede conseguir solicitándolo al equipo comercial de RiskAmerica - El request necesita ser enviado con el header **Accept:** **application/json** para que responda en formato **JSON** (de no ser enviado con esto se responderá en formato **XML**) - Todos los Servicios son **REST** y sus parametros pueden ser enviados tanto en **POST** como **GET** - El uso de las APIs puede llevar un cobro asociado según se pacte en el acuerdo comercial, por lo que le recomendamos ser cuidadosos en el uso de éstas para evitar sobre-cargos innecesarios. - RiskAmerica funciona con un mecanismo de **WhiteList** **de** **IPs** para las consultas de las API. Para habilitar o modificar la lista de IPs permitidas debe contactarse al mail **contacto@riskamerica.com**. - En caso de usar **Python** como lenguaje de programación puede visitar nuestro SKD disponible en [https://github.com/RiskAmerica/api-client-python](https://github.com/RiskAmerica/api-client-python) . - En caso de usar otros lenguajes de programación puede usar el proyecto [https://github.com/swagger-api/swagger-codegen/tree/3.0.0](https://github.com/swagger-api/swagger-codegen/tree/3.0.0) para generar su propio SDK a partir del archivo [openapi.json](https://ra-public-files.s3-sa-east-1.amazonaws.com/wide-public/riam-api/openapi.json) . - Todas las APIs funcionan exclusivamente bajo el protocolo HTTPS usando TLS 1.2 o 1.3 # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
# Importing related models
class InlineResponse20044Message(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'short_name': 'str',
'long_name': 'str',
'mime_type': 'str'
}
attribute_map = {
'id': 'id',
'short_name': 'shortName',
'long_name': 'longName',
'mime_type': 'mimeType'
}
def __init__(self, id=None, short_name=None, long_name=None, mime_type=None): # noqa: E501
"""InlineResponse20044Message - a model defined in Swagger""" # noqa: E501
self._id = None
self._short_name = None
self._long_name = None
self._mime_type = None
self.discriminator = None
if id is not None:
self.id = id
if short_name is not None:
self.short_name = short_name
if long_name is not None:
self.long_name = long_name
if mime_type is not None:
self.mime_type = mime_type
@property
def id(self):
"""Gets the id of this InlineResponse20044Message. # noqa: E501
Identificador del formato de descarga # noqa: E501
:return: The id of this InlineResponse20044Message. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InlineResponse20044Message.
Identificador del formato de descarga # noqa: E501
:param id: The id of this InlineResponse20044Message. # noqa: E501
:type: int
"""
self._id = id
@property
def short_name(self):
"""Gets the short_name of this InlineResponse20044Message. # noqa: E501
Título del formato de descarga # noqa: E501
:return: The short_name of this InlineResponse20044Message. # noqa: E501
:rtype: str
"""
return self._short_name
@short_name.setter
def short_name(self, short_name):
"""Sets the short_name of this InlineResponse20044Message.
Título del formato de descarga # noqa: E501
:param short_name: The short_name of this InlineResponse20044Message. # noqa: E501
:type: str
"""
self._short_name = short_name
@property
def long_name(self):
"""Gets the long_name of this InlineResponse20044Message. # noqa: E501
Descripción del formato de descarga # noqa: E501
:return: The long_name of this InlineResponse20044Message. # noqa: E501
:rtype: str
"""
return self._long_name
@long_name.setter
def long_name(self, long_name):
"""Sets the long_name of this InlineResponse20044Message.
Descripción del formato de descarga # noqa: E501
:param long_name: The long_name of this InlineResponse20044Message. # noqa: E501
:type: str
"""
self._long_name = long_name
@property
def mime_type(self):
"""Gets the mime_type of this InlineResponse20044Message. # noqa: E501
MimeType que describe el tipo de archivo usado para la descarga # noqa: E501
:return: The mime_type of this InlineResponse20044Message. # noqa: E501
:rtype: str
"""
return self._mime_type
@mime_type.setter
def mime_type(self, mime_type):
"""Sets the mime_type of this InlineResponse20044Message.
MimeType que describe el tipo de archivo usado para la descarga # noqa: E501
:param mime_type: The mime_type of this InlineResponse20044Message. # noqa: E501
:type: str
"""
self._mime_type = mime_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse20044Message, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20044Message):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"inicovani@riskamerica.com"
] | inicovani@riskamerica.com |
d9c6b22c0d9d0930c5c499a903e64b5ffc183378 | a356e44897f8f618af30a17b38dc73695562e01b | /Day_2_exercise/animals/fish.py | 1246c64d9df0ad817bce056a4e6b2edadef6baaf | [] | no_license | phimiwo/Advanced_Python_Programming_Philipp_Wolf | e530073d7360a5494e3b2b29649d966de9f2fd34 | 4590cb9aad433a342ca800bf01995ab73e611b24 | refs/heads/master | 2023-03-12T10:00:41.940807 | 2021-03-01T21:13:01 | 2021-03-01T21:13:01 | 339,799,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | class Fish:
def __init__(self):
''' Constructor for this class. '''
# Create some member animals
self.members = ['Salmon', 'Trout', 'Hering', 'Shark']
def printMembers(self):
print('Printing members of the Fish class')
for member in self.members:
print('\t%s ' % member)
| [
"philipp.wolf@physics.uu.se"
] | philipp.wolf@physics.uu.se |
49e767fa58dad8a46d203ecfb58f86aa51295f65 | df1518e039d1f2232705cc5e4b66f68aeb21eb31 | /utils.py | 50b4b58be5cb9e1bd3c443f439808371c9b573f8 | [
"Apache-2.0"
] | permissive | archgroove/pytorch-transformers-classification | eef2667eb57de4d1c42e3650c9fec85da29fb484 | 2eb97aa6f3081f1d0ae44210f5c13b9846f2d03d | refs/heads/master | 2020-07-28T16:51:59.723315 | 2019-09-25T01:30:26 | 2019-09-25T01:30:26 | 209,471,440 | 0 | 1 | Apache-2.0 | 2019-09-19T05:44:56 | 2019-09-19T05:43:29 | null | UTF-8 | Python | false | false | 12,933 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from io import open
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
logger = logging.getLogger(__name__)
csv.field_size_limit(2147483647)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class BinaryProcessor(DataProcessor):
"""Processor for the binary data sets"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_example_to_feature(example_row, pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True, sep_token_extra=False):
example, label_map, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id, sep_token_extra = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3". " -4" for RoBERTa.
special_tokens_count = 4 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True,
process_count=cpu_count() - 2):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
examples = [(example, label_map, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id) for example in examples]
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples, chunksize=100), total=len(examples)))
return features_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, sep_token_extra=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
examples = [(example, label_map, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id, sep_token_extra) for example in examples]
process_count = cpu_count() - 2
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples, chunksize=500), total=len(examples)))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
processors = {
"binary": BinaryProcessor
}
output_modes = {
"binary": "classification"
}
GLUE_TASKS_NUM_LABELS = {
"binary": 2
}
| [
"chaturangarajapakshe@gmail.com"
] | chaturangarajapakshe@gmail.com |
363953484217fd1729ea121e7d4b99926dc59bd7 | 5d9908f1b5516ae2dcf2258f78d04f509e7554e3 | /sort.py | d6ce2fdf79c859603c9d058b0f213a0ea5d392ab | [] | no_license | farhanaqazi/sortPY | c8d7ef3d0cb24152b8b0113d3d87b5f8f412f986 | b20a347d8b6799dbe8d5f5e16f18c936177a9410 | refs/heads/master | 2020-03-25T09:13:11.246582 | 2018-08-27T23:14:04 | 2018-08-27T23:14:04 | 143,654,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | ''' sorter program'''
x = int(input("Enter first Number Please: "))
y = int(input("Enter second Number Please: "))
z = int(input("Enter third Number Please: "))
mylist=sorted([x,y,z])
print(mylist)
| [
"noreply@github.com"
] | farhanaqazi.noreply@github.com |
836aa6db7bc353f8f51112830011ad012d786779 | 6ecd18dd5a35b7d739f7f8246467eef20180a962 | /backend/XGBoost/DataGen.py | f292d5c50529dd2b4f0ea154da90f8f82f4a06eb | [] | no_license | PurdueEats/backend | 86c0416c9dbccaae74b0280e6b6ef5aaf5fa9271 | a2f3aeed7095ad9251a4eff9e20c5e6b050fbf49 | refs/heads/main | 2023-04-14T13:48:00.868914 | 2021-04-23T19:35:23 | 2021-04-23T19:35:23 | 331,550,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | import requests
from datetime import date, timedelta
URL = 'https://api.hfs.purdue.edu/menus/v2/locations/'
LOCATIONS = ['Earhart', 'Hillenbrand', 'Ford', 'Windsor', 'Wiley']
DATE = ''
# Change based on local file system
FILENAME = 'temp.csv'
START_DATE = date(2016, 8, 22)
END_DATE = date(2021, 3, 1)
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def generate_dataset():
file = open(FILENAME, 'a')
file.write('Date;LocationId;MealType;Time;MenuItem\n')
for single_date in daterange(START_DATE, END_DATE):
cur_date = single_date.strftime("%Y-%m-%d")
for loc in LOCATIONS:
response = requests.get(URL + '/' + loc + '/' + cur_date).json()
meals_list = []
try:
meals_list = [x for x in response['Meals']
if x['Status'] == 'Open']
except:
pass
for meals in meals_list:
time = meals['Hours']['StartTime'] + \
"-" + meals['Hours']['EndTime']
for station in meals['Stations']:
for item in station['Items']:
file.write(
cur_date + ';' + loc + ';' + meals['Name'] + ';' + time + ';' + item['Name'] + '\n')
file.close()
if __name__ == "__main__":
generate_dataset()
| [
"arora74@purdue.edu"
] | arora74@purdue.edu |
9a93a2420acc3eb462984f16192bf378b923dbf2 | 0f880fab72fe18a2e5c4718ba4bf78fbe800f988 | /code/CityList.py | 6dd00a6060c4d7c719719ac0c9f538ffcdc1ab89 | [] | no_license | clwater/GetCityGeo | ce208abb69130b091acaf9ac77b194035d7d96d4 | c82b922c25c07ace0245eaa20055bfe8290d7072 | refs/heads/master | 2021-01-19T17:33:09.171032 | 2017-08-24T16:23:00 | 2017-08-24T16:23:00 | 101,068,888 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,244 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import requests
import json , re
import mysql.connector
conn = mysql.connector.connect(user='root', password='root', database='Utils')
def getCityGeo(cityname):
url = 'http://ditu.amap.com/service/poiInfo?query_type=TQUERY&keywords=%s' % (cityname)
html = requests.get(url).text
print html
if len(html) < len('{"status":"2","data":"Return Failure!"12312323}') :
return -1
data = json.loads(html)
cityList = []
try:
searchList = data['data']['locres']['poi_list']
# searchList = data['data']['poi_list']
# city = searchList[0]
# _city = {'level': '', 'child_station_count': city['child_station_count'],
# 'adcode': city['adcode'], 'coords': '', 'address': city['address'],
# 'ename': '', 'name': city['name'], 'longitude': city['longitude'],
# 'latitude': city['latitude']}
# return _city
for city in searchList:
_city = { 'level' : city['level'] , 'child_station_count' : city['child_station_count'],
'adcode': city['adcode'] , 'coords' : city['coords'] , 'address' : city['address'],
'ename' : city['ename'], 'name' : city['name'] , 'longitude' : city['longitude'],
'latitude': city['latitude']}
return _city
except Exception:
return cityList
def saveInfo(cityInfo , city):
if cityInfo < 3:
print city + 'not include'
return
print city
try:
print cityInfo['ename']
cursor = conn.cursor()
tem = cityInfo['ename']
tem = str(tem).replace('\'' , '`')
_sql = 'insert into CityGeo(ename , name , level , adcode ,child_station_count,coords , address , longitude ,latitude ) values (\'%s\',\'%s\',\'%s\',\'%s\',%s, \'%s\' ,\'%s\' ,\'%s\', \'%s\')' % (
tem, city, cityInfo['level'], cityInfo['adcode'], cityInfo['child_station_count'],
# cityInfo['coords'] ,
"",
cityInfo['address'] ,cityInfo['longitude'] ,cityInfo['latitude'])
print(_sql)
cursor.execute(_sql)
conn.commit()
except Exception:
with open('errorcity' ,'a') as f:
# print city
f.write(city + '\n')
print (city + 'error')
def getCityListDB():
cursor = conn.cursor()
_sql = 'SELECT `ChinaCity`.`cityName`,`ChinaCity`.`regionName` FROM `ChinaCity` WHERE `ChinaCity`.`cityName` != \'\' and id > 248'
cursor.execute(_sql)
cityList = cursor.fetchall()
for city in cityList:
if len(city) > 1:
if '盟' in city[0]:
temp = city[0] + city[1]
else:
temp = city[0] + u'市' + city[1]
else:
temp = city[0] + u'市'
print temp
saveInfo( getCityGeo(temp) , temp)
def getCityListText():
with open('citylist' , 'r') as f:
cityList = f.readlines()
for city in cityList:
city = city.strip()
# city = city + '县'
saveInfo(getCityGeo(city), city)
getCityListText()
# getCityListDB()
# getCityGeo('北京') | [
"gengzhibo@gengzhibo.net"
] | gengzhibo@gengzhibo.net |
b4b07278d2bdd76b4fcc168e8ca4a5e2f2b61686 | 4a027b32b1e2dfebd6d65c9e7afce1f2e93f16bc | /webblog/blog/admin.py | c103bbbe9137099f88f5d13d6b08262854240b18 | [] | no_license | Jethet/DjangoProject-WebBlog | 92aa2959349129b2ef192163ab5637dbd4099224 | f64a79d889abe6a2d3caa9aa5350178d97b5c5eb | refs/heads/master | 2020-05-29T23:32:53.154542 | 2020-02-11T21:59:24 | 2020-02-11T21:59:24 | 189,438,086 | 1 | 0 | null | 2020-02-11T21:59:25 | 2019-05-30T15:28:38 | Python | UTF-8 | Python | false | false | 197 | py | from django.contrib import admin
# First import the model that you want to show up on the admin page:
from .models import Post
# Register your models on the admin page:
admin.site.register(Post)
| [
"henriette.hettinga@gmail.com"
] | henriette.hettinga@gmail.com |
044048b2ab53cf63e68a9202507e3b6e27c8705e | 82b7d00eec2ce90acb12cbc74ea5a217862d26ac | /tarkov/models.py | 6095f0529724099e3082b1051b048d470e0ba551 | [
"MIT"
] | permissive | yakumo1255/jet_py | 565242d44068f616a5507d89d6a48cd7c87ba5ce | 2f352b5e6f5d88594d08afc46e9458e919271788 | refs/heads/main | 2023-05-29T07:17:56.823366 | 2021-06-17T22:28:16 | 2021-06-17T22:28:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,396 | py | from __future__ import annotations
from pathlib import Path
from typing import Any, ClassVar, Generic, Optional, Type, TypeVar
import pydantic
import yaml
from pydantic import Extra, ValidationError
from pydantic.generics import GenericModel
class Base(pydantic.BaseModel):
class Config:
extra = Extra.forbid
use_enum_values = True
validate_assignment = True
validate_all = True
allow_population_by_field_name = True
def dict(self, by_alias: bool = True, **kwargs: Any) -> dict:
return super().dict(
by_alias=by_alias,
**kwargs,
)
def json(
self,
*args: list,
by_alias: bool = True,
indent: int = 4,
**kwargs: Any,
) -> str:
# pylint: disable=useless-super-delegation
return super().json(*args, by_alias=by_alias, indent=indent, **kwargs)
ConfigType = TypeVar("ConfigType", bound="BaseConfig")
class BaseConfig(pydantic.BaseModel):
__config_path__: ClassVar[Path]
class Config:
extra = Extra.forbid
use_enum_values = True
validate_all = True
allow_mutation = False
@classmethod
def load(
cls: Type[ConfigType], path: Path = None, auto_create: bool = True
) -> ConfigType:
path = path or cls.__config_path__
if not path.exists():
if auto_create:
try:
config = cls()
if not path.parent.exists():
path.parent.mkdir(parents=True, exist_ok=True)
yaml.safe_dump(config.dict(), path.open(mode="w", encoding="utf8"))
except ValidationError as error:
raise ValueError(f"Config on {path} does not exists.") from error
with path.open(encoding="utf8") as file:
config = yaml.safe_load(file)
return cls.parse_obj(config)
ResponseType = TypeVar("ResponseType")
class TarkovSuccessResponse(GenericModel, Generic[ResponseType]):
err: int = 0
errmsg: Optional[str] = None
data: Optional[ResponseType] = None
class TarkovErrorResponse(GenericModel, Generic[ResponseType]):
err: int = True
errmsg: Optional[str]
data: Any = None
@staticmethod
def profile_id_is_none() -> "TarkovErrorResponse":
return TarkovErrorResponse(errmsg="Profile id is None")
| [
"thirvondukr@gmail.com"
] | thirvondukr@gmail.com |
978881310e18797911f55de4a40517a6a5407761 | 214264c77b802a8437cb6e4380c976afa35c0b34 | /fib.py | 032c339426d94f0602ac62b28f86610c8f92dfab | [] | no_license | ire-and-curses/python_examples | 804896e13b1300526ac21b9493c2cf61e952e508 | 71095e6c35e4ad90017f0e1bd070271cb2c5888e | refs/heads/master | 2020-04-28T12:41:54.199552 | 2012-09-26T07:52:00 | 2012-09-26T07:52:00 | 5,798,983 | 3 | 0 | null | 2012-09-13T19:24:25 | 2012-09-13T18:23:21 | Python | UTF-8 | Python | false | false | 1,325 | py | #!/usr/bin/env python
def simple_fib(n):
a = 0
b = 1
for i in range(n):
c = a + b
a = b
b = c
return c
def iter_fib(n):
a, b = 0, 1
for i in range(n-1):
a, b = b, a+b
return b
def fib_series(n):
a, b, = 0, 1
series = [0, 1]
if n == 0:
return [0]
for i in range(n-1):
a, b = b, a+b
series.append(b)
return series
def recursive_fib(n):
if n in (0, 1): return n
return recursive_fib(n-1) + recursive_fib(n-2)
stored = {
'0' : 0,
'1' : 1
}
def memoized_fib1(n):
if str(n) in stored: return n
if str(n-1) in stored:
a = stored[str(n-1)]
else:
a = memoized_fib(n-1)
stored[str(n-1)] = a
b = stored[str(n-2)]
return a + b
stored = [0,1]
def memoized_fib2(n):
if n < len(stored): return stored[n]
if n-1 <= len(stored):
a = stored[n-1]
else:
a = memoized_fib2(n-1)
stored.insert[n-1, a]
b = stored[n-2]
return a + b
if __name__ == "__main__":
n = 60
print "n =", n
print fib_series(n)
#print "Simple fib:", simple_fib(n)
print "Explicit iteration:", iter_fib(n)
#print "Recursive fib:", recursive_fib(n)
print "Memoized fib:", memoized_fib2(n)
| [
"eric.saunders@gmail.com"
] | eric.saunders@gmail.com |
ce555ee518fcfbdb43e59334bdddd885f194b341 | 1a24def8879972f21d846ffb3813632070e1cf12 | /Chapter08/0813exception-while-true.py | fa0b51fa722222322031c355be5fd5b499f32cbf | [] | no_license | mushahiroyuki/beginning-python | 03bb78c8d3f678ce39662a44046a308c99f29916 | 4d761d165203dbbe3604173c404f70a3eb791fd8 | refs/heads/master | 2023-08-16T12:44:01.336731 | 2023-07-26T03:41:22 | 2023-07-26T03:41:22 | 238,684,870 | 5 | 4 | null | 2023-09-06T18:34:01 | 2020-02-06T12:33:26 | Python | UTF-8 | Python | false | false | 397 | py | #ファイル名 Chapter08/0813exception-while-true.py
while True:
try:
x = int(input('最初の数を入れてください: '))
y = int(input('2番目の数を入れてください: '))
value = x / y
print(f'{x}/{y}は{value}です。')
except:
print('入力が正しくありません。再度入力してください。')
else:
break
| [
"hmusha@gmail.com"
] | hmusha@gmail.com |
5b3cbdb9ee3124e0fee05d82c702f0c9e56923ec | fc77fc08e983385521f7073e160cf05b8484dc9d | /Music App/mapp/db_setup.py | 7d68ace0c79aa6840b2127f24640c4be99f1da1e | [] | no_license | Icode4passion/Apps | e561a179147ab0f9bd074998f2b3e3a9bfedc539 | 51e5f2c9026a7f6a6efef33f4f54c9d7573a3070 | refs/heads/master | 2020-05-04T15:22:59.139023 | 2019-04-03T07:57:58 | 2019-04-03T07:57:58 | 179,238,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///mymusic.db', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine,))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import models
Base.metadata.create_all(bind=engine) | [
"yogeerama@gmail.com"
] | yogeerama@gmail.com |
27f2e8d188e5e25adfa5a3b17dd7b04f7506bcf8 | fc09bb758c37ce5f04f7a40f89e35476c5f9c497 | /venv/Scripts/easy_install-3.6-script.py | 562e51ddf3923e3fd6c0128389c17d5ec1ee83df | [] | no_license | sarahwang93/python-leetcode | c71b23ccd300ba49f72a375d0bc8df6f343e7999 | 59b9e22c5e15a361b5120e3bbd13a209c8dbd53f | refs/heads/master | 2022-10-29T04:15:38.284332 | 2020-06-18T18:47:28 | 2020-06-18T18:47:28 | 261,823,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!C:\pyproject\Assignment\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"wanggeo93@gmail.com"
] | wanggeo93@gmail.com |
4804af56b10ae70dcf40078e2d78cc84cb65e93f | 8ed6932e4df311177144d3c37763030c3c23bc01 | /kvadratni_koren (1).py | a220165a60a37a8932a9daf621ad72bc549c54ec | [] | no_license | pengu5055/RacunalniskiPraktikum | c3261d7a432f160e77b1c66445d5dddb563e53d7 | 3a585d5edd0967d6bf943c631beb1c13a46db587 | refs/heads/master | 2020-12-19T23:48:31.152076 | 2020-01-30T15:17:01 | 2020-01-30T15:17:01 | 235,889,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,367 | py | # =============================================================================
# Kvadratni koren
#
# Približke za kvadratni koren števila $n$ lahko izračunamo po naslednjem
# postopku. Začetni približek $x_0$ je enak $n / 2$. Vsak naslednji približek
# $x_{k + 1}$ pa izračunamo kot $(x_k + n / x_k) / 2$.
# =====================================================================@021462=
# 1. podnaloga
# Sestavite funkcijo `priblizek_po_korakih(n, k)`, ki po zgornjem postopku
# izračuna `k`. približek korena števila `n`.
# =============================================================================
def priblizek_po_korakih(n, k):
x0 = n/2
for i in range(k):
x0 = (x0 + n/x0)/2
return x0
# =====================================================================@021463=
# 2. podnaloga
# Sestavite funkcijo `priblizek_do_natancnosti(n, eps)`, ki po zgornjem
# postopku izračuna prvi približek korena števila `n`, za katerega se kvadrat
# približka od `n` razlikuje za manj kot `eps`. Smislena vrednost za argument
# `eps` je npr. $10^{-6}$.
# =============================================================================
def priblizek_do_natancnosti(n, eps):
k = 1
x = priblizek_po_korakih(n, k)
while x**2 - n > eps:
k += 1
x = priblizek_po_korakih(n, k)
print("loop")
print("done")
return priblizek_po_korakih(n, k)
# ============================================================================@
'Če vam Python sporoča, da je v tej vrstici sintaktična napaka,'
'se napaka v resnici skriva v zadnjih vrsticah vaše kode.'
'Kode od tu naprej NE SPREMINJAJTE!'
import json, os, re, sys, shutil, traceback, urllib.error, urllib.request
import io, sys
from contextlib import contextmanager
class VisibleStringIO(io.StringIO):
def read(self, size=None):
x = io.StringIO.read(self, size)
print(x, end='')
return x
def readline(self, size=None):
line = io.StringIO.readline(self, size)
print(line, end='')
return line
class Check:
@staticmethod
def has_solution(part):
return part['solution'].strip() != ''
@staticmethod
def initialize(parts):
Check.parts = parts
for part in Check.parts:
part['valid'] = True
part['feedback'] = []
part['secret'] = []
Check.current_part = None
Check.part_counter = None
@staticmethod
def part():
if Check.part_counter is None:
Check.part_counter = 0
else:
Check.part_counter += 1
Check.current_part = Check.parts[Check.part_counter]
return Check.has_solution(Check.current_part)
@staticmethod
def feedback(message, *args, **kwargs):
Check.current_part['feedback'].append(message.format(*args, **kwargs))
@staticmethod
def error(message, *args, **kwargs):
Check.current_part['valid'] = False
Check.feedback(message, *args, **kwargs)
@staticmethod
def clean(x, digits=6, typed=False):
t = type(x)
if t is float:
x = round(x, digits)
# Since -0.0 differs from 0.0 even after rounding,
# we change it to 0.0 abusing the fact it behaves as False.
v = x if x else 0.0
elif t is complex:
v = complex(Check.clean(x.real, digits, typed), Check.clean(x.imag, digits, typed))
elif t is list:
v = list([Check.clean(y, digits, typed) for y in x])
elif t is tuple:
v = tuple([Check.clean(y, digits, typed) for y in x])
elif t is dict:
v = sorted([(Check.clean(k, digits, typed), Check.clean(v, digits, typed)) for (k, v) in x.items()])
elif t is set:
v = sorted([Check.clean(y, digits, typed) for y in x])
else:
v = x
return (t, v) if typed else v
@staticmethod
def secret(x, hint=None, clean=None):
clean = Check.get('clean', clean)
Check.current_part['secret'].append((str(clean(x)), hint))
@staticmethod
def equal(expression, expected_result, clean=None, env=None, update_env=None):
global_env = Check.init_environment(env=env, update_env=update_env)
clean = Check.get('clean', clean)
actual_result = eval(expression, global_env)
if clean(actual_result) != clean(expected_result):
Check.error('Izraz {0} vrne {1!r} namesto {2!r}.',
expression, actual_result, expected_result)
return False
else:
return True
@staticmethod
def run(statements, expected_state, clean=None, env=None, update_env=None):
code = "\n".join(statements)
statements = " >>> " + "\n >>> ".join(statements)
global_env = Check.init_environment(env=env, update_env=update_env)
clean = Check.get('clean', clean)
exec(code, global_env)
errors = []
for (x, v) in expected_state.items():
if x not in global_env:
errors.append('morajo nastaviti spremenljivko {0}, vendar je ne'.format(x))
elif clean(global_env[x]) != clean(v):
errors.append('nastavijo {0} na {1!r} namesto na {2!r}'.format(x, global_env[x], v))
if errors:
Check.error('Ukazi\n{0}\n{1}.', statements, ";\n".join(errors))
return False
else:
return True
@staticmethod
@contextmanager
def in_file(filename, content, encoding=None):
encoding = Check.get('encoding', encoding)
with open(filename, 'w', encoding=encoding) as f:
for line in content:
print(line, file=f)
old_feedback = Check.current_part['feedback'][:]
yield
new_feedback = Check.current_part['feedback'][len(old_feedback):]
Check.current_part['feedback'] = old_feedback
if new_feedback:
new_feedback = ['\n '.join(error.split('\n')) for error in new_feedback]
Check.error('Pri vhodni datoteki {0} z vsebino\n {1}\nso se pojavile naslednje napake:\n- {2}', filename, '\n '.join(content), '\n- '.join(new_feedback))
@staticmethod
@contextmanager
def input(content, visible=None):
old_stdin = sys.stdin
old_feedback = Check.current_part['feedback'][:]
try:
with Check.set_stringio(visible):
sys.stdin = Check.get('stringio')('\n'.join(content) + '\n')
yield
finally:
sys.stdin = old_stdin
new_feedback = Check.current_part['feedback'][len(old_feedback):]
Check.current_part['feedback'] = old_feedback
if new_feedback:
new_feedback = ['\n '.join(error.split('\n')) for error in new_feedback]
Check.error('Pri vhodu\n {0}\nso se pojavile naslednje napake:\n- {1}', '\n '.join(content), '\n- '.join(new_feedback))
@staticmethod
def out_file(filename, content, encoding=None):
encoding = Check.get('encoding', encoding)
with open(filename, encoding=encoding) as f:
out_lines = f.readlines()
equal, diff, line_width = Check.difflines(out_lines, content)
if equal:
return True
else:
Check.error('Izhodna datoteka {0}\n je enaka{1} namesto:\n {2}', filename, (line_width - 7) * ' ', '\n '.join(diff))
return False
@staticmethod
def output(expression, content, env=None, update_env=None):
global_env = Check.init_environment(env=env, update_env=update_env)
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
exec(expression, global_env)
finally:
output = sys.stdout.getvalue().rstrip().splitlines()
sys.stdout = old_stdout
equal, diff, line_width = Check.difflines(output, content)
if equal:
return True
else:
Check.error('Program izpiše{0} namesto:\n {1}', (line_width - 13) * ' ', '\n '.join(diff))
return False
@staticmethod
def difflines(actual_lines, expected_lines):
actual_len, expected_len = len(actual_lines), len(expected_lines)
if actual_len < expected_len:
actual_lines += (expected_len - actual_len) * ['\n']
else:
expected_lines += (actual_len - expected_len) * ['\n']
equal = True
line_width = max(len(actual_line.rstrip()) for actual_line in actual_lines + ['Program izpiše'])
diff = []
for out, given in zip(actual_lines, expected_lines):
out, given = out.rstrip(), given.rstrip()
if out != given:
equal = False
diff.append('{0} {1} {2}'.format(out.ljust(line_width), '|' if out == given else '*', given))
return equal, diff, line_width
@staticmethod
def init_environment(env=None, update_env=None):
global_env = globals()
if not Check.get('update_env', update_env):
global_env = dict(global_env)
global_env.update(Check.get('env', env))
return global_env
@staticmethod
def generator(expression, expected_values, should_stop=None, further_iter=None, clean=None, env=None, update_env=None):
from types import GeneratorType
global_env = Check.init_environment(env=env, update_env=update_env)
clean = Check.get('clean', clean)
gen = eval(expression, global_env)
if not isinstance(gen, GeneratorType):
Check.error("Izraz {0} ni generator.", expression)
return False
try:
for iteration, expected_value in enumerate(expected_values):
actual_value = next(gen)
if clean(actual_value) != clean(expected_value):
Check.error("Vrednost #{0}, ki jo vrne generator {1} je {2!r} namesto {3!r}.",
iteration, expression, actual_value, expected_value)
return False
for _ in range(Check.get('further_iter', further_iter)):
next(gen) # we will not validate it
except StopIteration:
Check.error("Generator {0} se prehitro izteče.", expression)
return False
if Check.get('should_stop', should_stop):
try:
next(gen)
Check.error("Generator {0} se ne izteče (dovolj zgodaj).", expression)
except StopIteration:
pass # this is fine
return True
@staticmethod
def summarize():
for i, part in enumerate(Check.parts):
if not Check.has_solution(part):
print('{0}. podnaloga je brez rešitve.'.format(i + 1))
elif not part['valid']:
print('{0}. podnaloga nima veljavne rešitve.'.format(i + 1))
else:
print('{0}. podnaloga ima veljavno rešitev.'.format(i + 1))
for message in part['feedback']:
print(' - {0}'.format('\n '.join(message.splitlines())))
settings_stack = [{
'clean': clean.__func__,
'encoding': None,
'env': {},
'further_iter': 0,
'should_stop': False,
'stringio': VisibleStringIO,
'update_env': False,
}]
@staticmethod
def get(key, value=None):
if value is None:
return Check.settings_stack[-1][key]
return value
@staticmethod
@contextmanager
def set(**kwargs):
settings = dict(Check.settings_stack[-1])
settings.update(kwargs)
Check.settings_stack.append(settings)
try:
yield
finally:
Check.settings_stack.pop()
@staticmethod
@contextmanager
def set_clean(clean=None, **kwargs):
clean = clean or Check.clean
with Check.set(clean=(lambda x: clean(x, **kwargs))
if kwargs else clean):
yield
@staticmethod
@contextmanager
def set_environment(**kwargs):
env = dict(Check.get('env'))
env.update(kwargs)
with Check.set(env=env):
yield
@staticmethod
@contextmanager
def set_stringio(stringio):
if stringio is True:
stringio = VisibleStringIO
elif stringio is False:
stringio = io.StringIO
if stringio is None or stringio is Check.get('stringio'):
yield
else:
with Check.set(stringio=stringio):
yield
def _validate_current_file():
def extract_parts(filename):
with open(filename, encoding='utf-8') as f:
source = f.read()
part_regex = re.compile(
r'# =+@(?P<part>\d+)=\s*\n' # beginning of header
r'(\s*#( [^\n]*)?\n)+?' # description
r'\s*# =+\s*?\n' # end of header
r'(?P<solution>.*?)' # solution
r'(?=\n\s*# =+@)', # beginning of next part
flags=re.DOTALL | re.MULTILINE
)
parts = [{
'part': int(match.group('part')),
'solution': match.group('solution')
} for match in part_regex.finditer(source)]
# The last solution extends all the way to the validation code,
# so we strip any trailing whitespace from it.
parts[-1]['solution'] = parts[-1]['solution'].rstrip()
return parts
def backup(filename):
backup_filename = None
suffix = 1
while not backup_filename or os.path.exists(backup_filename):
backup_filename = '{0}.{1}'.format(filename, suffix)
suffix += 1
shutil.copy(filename, backup_filename)
return backup_filename
def submit_parts(parts, url, token):
submitted_parts = []
for part in parts:
if Check.has_solution(part):
submitted_part = {
'part': part['part'],
'solution': part['solution'],
'valid': part['valid'],
'secret': [x for (x, _) in part['secret']],
'feedback': json.dumps(part['feedback']),
}
if 'token' in part:
submitted_part['token'] = part['token']
submitted_parts.append(submitted_part)
data = json.dumps(submitted_parts).encode('utf-8')
headers = {
'Authorization': token,
'content-type': 'application/json'
}
request = urllib.request.Request(url, data=data, headers=headers)
response = urllib.request.urlopen(request)
return json.loads(response.read().decode('utf-8'))
def update_attempts(old_parts, response):
updates = {}
for part in response['attempts']:
part['feedback'] = json.loads(part['feedback'])
updates[part['part']] = part
for part in old_parts:
valid_before = part['valid']
part.update(updates.get(part['part'], {}))
valid_after = part['valid']
if valid_before and not valid_after:
wrong_index = response['wrong_indices'].get(str(part['part']))
if wrong_index is not None:
hint = part['secret'][wrong_index][1]
if hint:
part['feedback'].append('Namig: {}'.format(hint))
filename = os.path.abspath(sys.argv[0])
file_parts = extract_parts(filename)
Check.initialize(file_parts)
if Check.part():
Check.current_part['token'] = 'eyJ1c2VyIjo0NTAwLCJwYXJ0IjoyMTQ2Mn0:1iLwJf:3h8UQCiPdDE8v292xLC-HjCzq_U'
try:
Check.equal('priblizek_po_korakih(2, 0)', 1)
Check.equal('priblizek_po_korakih(2, 1)', 1.5)
Check.equal('priblizek_po_korakih(2, 2)', 1.4166666666666665)
Check.equal('priblizek_po_korakih(2, 3)', 1.4142156862745097)
Check.equal('priblizek_po_korakih(2, 4)', 1.4142135623746899)
Check.equal('priblizek_po_korakih(3, 5)', 1.7320508075688772)
for n in range(1, 1000):
k = n % 10
Check.secret(priblizek_po_korakih(n, k), (n, k))
except:
Check.error("Testi sprožijo izjemo\n {0}",
"\n ".join(traceback.format_exc().split("\n"))[:-2])
if Check.part():
Check.current_part['token'] = 'eyJ1c2VyIjo0NTAwLCJwYXJ0IjoyMTQ2M30:1iLwJf:UfPb5Yl63UDLarzUHmesbzcMMKM'
try:
Check.equal('priblizek_do_natancnosti(2, 0.000001)', 1.4142135623746899)
Check.equal('priblizek_do_natancnosti(2, 0.01)', 1.4166666666666665)
Check.equal('priblizek_do_natancnosti(2, 0.00001)', 1.4142156862745097)
Check.equal('priblizek_do_natancnosti(3, 0.000001)', 1.7320508100147274)
Check.equal('priblizek_do_natancnosti(12345, 0.000001)', 111.10805551354053)
for n in range(1, 1000):
Check.secret(priblizek_do_natancnosti(n, 0.000001), n)
except:
Check.error("Testi sprožijo izjemo\n {0}",
"\n ".join(traceback.format_exc().split("\n"))[:-2])
print('Shranjujem rešitve na strežnik... ', end="")
try:
url = 'https://www.projekt-tomo.si/api/attempts/submit/'
token = 'Token 9a7722a5c35aa619c25fa80ae51cafcf33363e81'
response = submit_parts(Check.parts, url, token)
except urllib.error.URLError:
print('PRI SHRANJEVANJU JE PRIŠLO DO NAPAKE! Poskusite znova.')
else:
print('Rešitve so shranjene.')
update_attempts(Check.parts, response)
if 'update' in response:
print('Updating file... ', end="")
backup_filename = backup(filename)
with open(__file__, 'w', encoding='utf-8') as f:
f.write(response['update'])
print('Previous file has been renamed to {0}.'.format(backup_filename))
print('If the file did not refresh in your editor, close and reopen it.')
Check.summarize()
if __name__ == '__main__':
_validate_current_file()
| [
"urbancmarko1@gmail.com"
] | urbancmarko1@gmail.com |
d99e7ce41fbb8fefe6664bce5bb89164292919a9 | 93d1bb284dd6fee32e4aed81794d9c7a10f3d870 | /課題1-2/humanrank/calcPagerank.py | 82c2ae9355365ed02cfd47b6042e043f7bc37991 | [] | no_license | SakuragiYoshimasa/WikiPageRank | 3d88ab71318e16785f3c0ea003f4c0794869b50b | 94953541d82fb93203fd45739f80d9426b389b8d | refs/heads/master | 2021-01-10T15:39:12.949559 | 2015-05-28T14:07:28 | 2015-05-28T14:07:28 | 36,161,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,862 | py | # -*- coding: utf-8 -*-
import MySQLdb
import GetTitleFromIdApi
import LinkTargetAmountApi
import LinkedPagesListApi
import page
import PageManager
import GetIdFromTitle
import numpy as np
import time
import InertHumanPageRankApi
def calc(depth):
#n次正方行列の生成(depth >= 0,targetID)
#pageManager = PageManager.PageManager(int(depth),targetPage.page_id)
#pageManager.makePagesCollction()
matrix = PageManager.make(int(depth))
#行列の転置
transposedMatrix = Transpose(matrix)
print transposedMatrix
#初期ベクトルの生成
pageRankVector = np.array([1 for col in range(transposedMatrix.shape[0])])
#行列計算のループ
pageRankVector = operationMatrixMultiple(transposedMatrix,pageRankVector)
print pageRankVector
#入力に対するPageRank
print pageRankVector[0]
InsertHumanPageRankApi.InsertPageRank(pageRankVector)
#転置
def Transpose(matrix):
"""transposed = [[0 for col in range(len(matrix))] for row in range(len(matrix))]
for i in range(0,len(matrix)):
for j in range(0,len(matrix)):
transposed[i][j] = matrix[j][i]
return transposed
"""
return np.transpose(matrix)
#行列演算
def operationMatrixMultiple(transposedMatrix,pageRankVector):
t = time.time()
for i in range(0,10):
#newPageRank = [0 for j in range(len(transposedMatrix))]
newPageRank = transposedMatrix.dot(pageRankVector)
newPageRank = 0.15 + newPageRank * 0.85
"""
for n in range(0,len(transposedMatrix)):
for k in range(0,len(transposedMatrix)):
newPageRank[n] += pageRankVector[n]*transposedMatrix[n][k]
newPageRank[n] = 0.15 + 0.85 * newPageRank[n]
"""
pageRankVector = newPageRank
print time.time() - t
return pageRankVector
| [
"ysrhsp@outlook.com"
] | ysrhsp@outlook.com |
6c1bf6fab2e22592689f50f485e73331b10f3260 | 8b45220fa03e6f4352e9f52532f271b33b40512a | /branches/wx/grafit/project.py | feb382213dfa3ee2a5617a1d85d89656668f0a00 | [] | no_license | BackupTheBerlios/grafity-svn | de04a2e9a646992b8b89a2ed1f24f8437e7c1d89 | 06d45668abd4c9fccc56f8a409f61c42518308b0 | refs/heads/master | 2020-05-18T18:19:48.539045 | 2006-07-28T11:07:54 | 2006-07-28T11:07:54 | 40,603,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,396 | py | import sys
import re
import time, random, socket, md5
try:
import metakit
except ImportError:
import grafit.thirdparty.metakit as metakit
from grafit.actions import action_from_methods, action_list, action_from_methods2, StopAction
from grafit.signals import HasSignals
# by (Carl Free Jr. http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/213761)
def create_id(*args):
"""Generates a universally unique ID.
Any arguments only create more randomness.
"""
t = long(time.time() * 1000)
r = long(random.random()*100000000000000000L)
try:
a = socket.gethostbyname(socket.gethostname())
except:
# if we can't get a network address, just imagine one
a = random.random()*100000000000000000L
data = str(t)+' '+str(r)+' '+str(a)+' '+str(args)
data = md5.md5(data).hexdigest()
return data
# The layout of the metakit project database.
# For each type of object (worksheet, graph etc) we call
# register_class(class, metakit_desc)
storage_desc = {}
def register_class(cls, description):
storage_desc[cls] = description
def wrap_attribute(name, signal=None):
"""Wrap a metakit column in a class attribute.
If the wrapped attribute is an id of an object in project.items,
it is wrapped with an attribute referencing the object.
"""
def get_data(self):
value = getattr(self.data, name)
if hasattr(self, 'project') and value in self.project.items:
value = self.project.items[value]
return value
def set_data(self, value):
if hasattr(self, 'project') and hasattr(value, 'id') and value in self.project.items.values():
value = value.id
try:
setattr(self.data, name, value)
except TypeError:
setattr(self.data, name, value.encode('utf-8'))
if signal:
self.emit(signal)
return property(get_data, set_data)
class Item(HasSignals):
"""Base class for all items in a Project"""
def __init__(self, project, name=None, parent=None, location=None):
self.project = project
action_list.disable()
if location is None or isinstance(location, dict):
# this is a new item, not present in the database
# create an entry for it
self.view, self.data, self.id = project._create(type(self), location)
# we have to handle creation of the top folder as a special case
# we cannot specify its parent when we create it!
if hasattr(self, '_isroot') and self._isroot:
parent = self
# parent defaults to top-level folder
# (XXX: should this be the current folder?)
if parent is None:
parent = self.project.top
if name is None:
name = self.create_name(parent)
if not self.check_name(name, parent):
raise NameError
# enter ourselves in the project dictionary
self.project.items[self.id] = self
# initialize
self.name = name
self.parent = parent.id
else:
# this is an item already present in the database
self.view, self.data, self.id = location
# enter ourselves in the project dictionary
self.project.items[self.id] = self
action_list.enable()
# We have to emit the signal at the end
# so the signal handlers can access wrapped attributes.
# We can't emit in project.add()
self.project.emit('add-item', self)
def check_name(self, name, parent):
if not re.match('^[a-zA-Z]\w*$', name):
return False
if isinstance(parent, Folder) and name in [i.name for i in parent.contents()]:
return False
return True
def create_name(self, parent):
for i in xrange(sys.maxint):
name = self.default_name_prefix+str(i)
if self.check_name(name, parent):
return name
def set_parent(self, state, parent):
state['new'], state['old'] = parent, self._parent
oldparent = self._parent
self._parent = parent
self.parent.emit('modified')
if isinstance(oldparent, Folder):
oldparent.emit('modified')
else:
raise StopAction
def undo_set_parent(self, state):
self._parent = state['old']
if state['old'] != '':
state['old'].emit('modified')
state['new'].emit('modified')
def redo_set_parent(self, state):
self._parent = state['new']
if state['old'] != '':
state['old'].emit('modified')
state['new'].emit('modified')
set_parent = action_from_methods2('object/set-parent', set_parent, undo_set_parent, redo=redo_set_parent)
def get_parent(self):
return self._parent
parent = property(get_parent, set_parent)
_parent = wrap_attribute('parent')
def set_name(self, state, n):
if not self.check_name(n, self.parent):
raise StopAction
state['new'], state['old'] = n, self._name
self._name = n
self.set_name_notify()
def undo_set_name(self, state):
self._name = state['old']
self.set_name_notify()
def redo_set_name(self, state):
self._name = state['new']
self.set_name_notify()
def set_name_notify(self):
self.emit('rename', self._name, item=self)
if isinstance(self.parent, Folder):
self.parent.emit('modified')
set_name = action_from_methods2('object/rename', set_name, undo_set_name, redo=redo_set_name)
def get_name(self):
return self._name
name = property(get_name, set_name)
_name = wrap_attribute('name')
def todict(self):
import mk
return mk.row_to_dict(self.view, self.data)
default_name_prefix = 'item'
class Folder(Item, HasSignals):
def __init__(self, project, name=None, parent=None, location=None, _isroot=False):
# we have to handle creation of the top folder as a special case
# since we cannot specify its parent when we create it.
# see Item.__init__
self._isroot = _isroot
self.project = project
Item.__init__(self, project, name, parent, location)
def contents(self):
for desc in storage_desc.values():
for row in self.project.db.getas(desc):
if row.parent == self.id and row.id in self.project.items and row.id != self.id:
yield self.project.items[row.id]
def ancestors(self):
if self == self.project.top:
return
yield self.parent
for f in self.parent.ancestors():
yield f
def subfolders(self):
for item in self.contents():
if isinstance(item, Folder):
yield item
name = wrap_attribute('name')
_parent = wrap_attribute('parent')
def set_parent(self, parent):
oldparent = self._parent
self._parent = parent
if oldparent != '' and isinstance(self.parent, Folder) and self in self.parent.ancestors():
print >>sys.stderr, "are you kidding?"
self._parent = oldparent
return
if oldparent != '':
oldparent.emit('modified')
if isinstance(self.parent, Folder) and self != self.parent:
self.parent.emit('modified')
self.project.top.emit('modified')
def get_parent(self):
return self._parent
parent = property(get_parent, set_parent)
default_name_prefix = 'folder'
up = property(lambda self: self.parent)
def __getattr__(self, key):
# try:
# attr = Item.__getattr__(self, key)
# except AttributeError, err:
try:
return self[key]
except KeyError:
raise AttributeError, err
def __getitem__(self, key):
cn = [i.name for i in self.contents()]
ci = [i.id for i in self.contents()]
if key in cn:
return self.project.items[ci[cn.index(key)]]
else:
raise KeyError, "item '%s' does not exist" % key
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def __repr__(self):
return '<Folder %s>' % self.name
register_class(Folder, 'folders[name:S,id:S,parent:S]')
class Project(HasSignals):
def __init__(self, filename=None):
if isinstance(filename, unicode):
filename = filename.encode(sys.getfilesystemencoding())
self.filename = filename
if self.filename is None:
# We initially create an in-memory database.
# When we save to a file, we will reopen the database from the file.
self.db = metakit.storage()
# self.filename = 'defau.gt'
# self.db = metakit.storage(self.filename, 1)
# for desc in storage_desc.values():
# self.db.getas(desc)
# self.db.commit()
else:
self.db = metakit.storage(self.filename, 1)
self.cleanup()
# self.aside = metakit.storage('grafit-storage.mka', 1)
# self.db.aside(self.aside)
# print >>sys.stderr, "project created"
self._modified = False
action_list.connect('added', self.on_action_added)
self.items = {}
self.deleted = {}
self._dict = {}
self.save_dict = {}
# Create top folder.
# - it must be created before all other items
# - it must be created with _isroot=True, to set itself as its parent folder
try:
fv = self.db.getas(storage_desc[Folder])
row = fv.select(name='top')[0]
self.top = self.items[row.id] = Folder(self, location=(fv, row, row.id), _isroot=True)
except IndexError:
# can't find it in the database, create a new one.
self.top = Folder(self, 'top', _isroot=True)
self.here = self.top
self.this = None
# create objects
for cls, desc in [(i, storage_desc[i]) for i in (Folder, grafit.worksheet.Worksheet, grafit.graph.Graph)]:
view = self.db.getas(desc)
for i, row in enumerate(view):
if row.id != self.top.id:
if not row.id.startswith('-'):
# print 'loading', cls, row.id,
self.items[row.id] = cls(self, location=(view, row, row.id))
# print 'end'
else:
self.deleted[row.id] = cls(self, location=(view, row, row.id))
def on_action_added(self, action=None):
self.modified = True
def cd(self, folder):
# restore dictionary
for o in self.here.contents():
try:
del self._dict[o.name]
except KeyError:
pass
self._dict.update(self.save_dict)
self._save_dict = {}
self.here = folder
# update dictionary
self._dict['here'] = self.here
self._dict['up'] = self.here.up
for o in self.here.contents():
if o.name in self._dict:
self._save_dict[o.name] = self._dict[o.name]
self._dict[o.name] = o
self.emit('change-current-folder', folder)
def set_current(self, obj):
if obj not in list(self.here.contents()):
raise NotImplementedError
self.this = obj
self._dict['this'] = self.this
self.emit('set-current-object', obj)
def set_dict(self, d):
self._dict = d
self._dict['top'] = self.top
self._dict['this'] = self.this
self.cd(self.here)
def unset_dict(self):
for o in self.here.contents():
if o.name in self._dict:
self._save_dict[o.name] = self._dict[o.name]
self._dict[o.name] = o
def cleanup(self):
"""Purge all deleted items from the database"""
for cls, desc in storage_desc.iteritems():
view = self.db.getas(desc)
for i, row in enumerate(view):
if row.id.startswith('-'):
view.delete(i)
def _create(self, cls, location):
"""Create a new entry a new item of class `cls` in the database
This method is called from the constructor of all `Item`-derived
classes, if the item is not already in the database.
Returns the view, row and id of the new item.
"""
try:
view = self.db.getas(storage_desc[cls])
except KeyError:
raise TypeError, "project cannot create an item of type '%s'" % cls
id = create_id()
from mk import addrow
if location is None:
row = view.append(id=id)
else:
row = addrow(view, location)
view[row].id = id
data = view[row]
return view, data, id
# new ##################################
def new(self, cls, *args, **kwds):
obj = cls(self, *args, **kwds)
self.items[obj.id] = obj
if obj.parent is self.top:
self._dict[obj.name] = obj
# don't emit 'add-item' because it is emitted by Item.__init__
return obj, obj
def new_undo(self, obj):
del self.items[obj.id]
obj.id = '-'+obj.id
self.deleted[obj.id] = obj
if obj.parent is self.top and obj.name in self._dict:
del self._dict[obj.name]
self.emit('remove-item', obj)
obj.parent.emit('modified')
def new_redo(self, obj):
del self.deleted[obj.id]
obj.id = obj.id[1:]
self.items[obj.id] = obj
if obj.parent is self.top:
self._dict[obj.name] = obj
self.emit('add-item', obj)
obj.parent.emit('modified')
def new_cleanup(self, obj):
if obj.id in self.deleted:
del self.deleted[obj.id]
obj.view.remove(obj.view.select(id=obj.id))
new = action_from_methods('project_new', new, new_undo, new_redo, new_cleanup)
# remove ###############################
def remove(self, id):
obj = self.items[id]
ind = obj.view.find(id=id)
if obj.name in self._dict and obj.name in self._dict:
del self._dict[obj.name]
if ind == -1:
raise NameError
else:
del self.items[id]
obj.id = '-'+obj.id
self.deleted[obj.id] = obj
self.emit('remove-item', obj)
return id
def remove_undo(self, id):
obj = self.deleted['-'+id]
ind = obj.view.find(id=obj.id)
del self.deleted[obj.id]
obj.id = obj.id[1:]
self.items[obj.id] = obj
if obj.parent is self.top:
self._dict[obj.name] = obj
self.emit('add-item', obj)
remove = action_from_methods('project_remove', remove, remove_undo)
# Shortcuts for creating and removing folders
def mkfolder(self, path, parent=None):
self.new(Folder, path, parent)
def rmfolder(self, path):
if path in self.here:
self.remove(self.here[path].id)
else:
raise NameError, "folder '%s' does not exist" % path
# def icommit(self):
# print >>sys.stderr, 'icommit'
# self.db.commit()
# self.aside.commit()
def commit(self):
# self.db.commit(1)
self.db.commit()
# self.aside.commit()
self.modified = False
def saveto(self, filename):
if isinstance(filename, unicode):
filename = filename.encode(sys.getfilesystemencoding())
try:
f = open(filename, 'wb')
self.db.save(f)
finally:
f.close()
self.modified = False
def get_modified(self):
return self._modified
def set_modified(self, value):
# if value:
# self.icommit()
if value and not self._modified:
self.emit('modified')
elif self._modified and not value:
self.emit('not-modified')
self._modified = value
modified = property(get_modified, set_modified)
# import only in order to register object types
import grafit.worksheet
import grafit.graph
| [
"danielf@9c678b79-fe04-0410-97bf-b6137538dda5"
] | danielf@9c678b79-fe04-0410-97bf-b6137538dda5 |
2955ad1171d290d6996293625599f01a37fe7871 | 175f9f142dcee56dde4b3e98775a7990a508ee55 | /reserve2/pipelines_store_to_mongo.py | 7d03913ccf5e134d5088840e8322323203828288 | [] | no_license | estrellaouyang/spiderredis | 1bf46eedf633f3504d8f01e7ac28a31726632389 | 1417e83989c945b26240fae16ef2a8b6ed2b5839 | refs/heads/master | 2020-05-02T03:05:59.775816 | 2019-03-26T10:48:41 | 2019-03-26T10:48:41 | 177,719,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
class Store2MongoPipeline(object):
collection_name = 'scrapy_items'
def __init__(self,mongo_uri,mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri = crawler.settings.get('MONGO_URI'),
mongo_db = crawler.settings.get('MONGO_DATABASE','items')
)
def open_spider(self,spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self,spider):
self.client.close()
def process_item(self,item,spider):
self.db[self.collection_name].insert_one(dict(item))
return item
| [
"support-jec@hotmail.com"
] | support-jec@hotmail.com |
4c98cf8a1b6b479bd5006e410e7bcfef77126afc | 88c2c7e9970c7bd26f960d10366021a9f6163b6e | /Feat_script/transformer_feat.py | 3703de9d9324e40ec8318c0e1c6124d1af2a4495 | [] | no_license | soumyadip1997/qepsilon | ee71ec4056e1e2db73c8f4fe2008b3ca2609eaef | aeb70ea651f0e5e14a9b24ab10ab29d880fdb9a1 | refs/heads/main | 2023-09-03T13:59:10.217963 | 2023-08-23T20:20:07 | 2023-08-23T20:20:07 | 607,034,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,369 | py | import torch
from Bio.PDB.PDBParser import PDBParser
from transformers import BertModel, BertTokenizer,XLNetLMHeadModel, XLNetTokenizer,pipeline,T5EncoderModel, T5Tokenizer
import re
import os
import warnings
import requests
from tqdm.auto import tqdm
import glob
import numpy as np
import os
import pandas as pd
import math
from multiprocessing import Pool
import pickle
import argparse
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import *
def transformer1(sequence,model=None,tokenizer=None,device=None):
ids = tokenizer.batch_encode_plus(sequence, add_special_tokens=True, padding=True)
input_ids = torch.tensor(ids['input_ids']).to(device)
attention_mask = torch.tensor(ids['attention_mask']).to(device)
with torch.no_grad():
embedding = model(input_ids=input_ids,attention_mask=attention_mask)
embedding = embedding.last_hidden_state.cpu().numpy()
features = []
for seq_num in range(len(embedding)):
seq_len = (attention_mask[seq_num] == 1).sum()
seq_emd = embedding[seq_num][:seq_len-1]
features.append(seq_emd)
return features
def transformer_prep(loc,model=None,tokenizer=None,device=None):
#try:
parser = PDBParser()
with warnings.catch_warnings(record=True) as w:
structure = parser.get_structure("1", loc)
residues = [r.get_resname() for r in structure.get_residues()]
req_seq=""
for p1 in range(len(residues)):
req_seq+=str(three_to_one(residues[p1])+" ")
trans_feat=transformer1([req_seq[:-1]],model,tokenizer,device)
return np.array(trans_feat)
#except:
# print("No")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Res_Feat')
parser.add_argument('--decoy-location', type=str, default="Q-epsilon/", metavar='N',
help='location to the downloaded decoy 3D structures of all CASP')
parser.add_argument('--output-location', type=str, default="Q-epsilon/Features/", metavar='O',
help='location for the output features to be stored')
args = parser.parse_args()
output_path=args.output_location+"/TRANS/"
tokenizer = T5Tokenizer.from_pretrained("Rostlab/prot_t5_xl_uniref50", do_lower_case=False )
model = T5EncoderModel.from_pretrained("Rostlab/prot_t5_xl_uniref50")
device =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
f = open("Index.txt", "a")
model = model.to(device)
model = model.eval()
#model=None
#tokenizer=None
CASP_DIR=['CASP9','CASP10','CASP11','CASP12','CASP13','CASP14']
for p1 in CASP_DIR:
decoy_loc=glob.glob(args.decoy_location+p1+"/decoys/*/*")
for i in decoy_loc:
try:
flag=1
target_name=(i.split("/")[-2])
decoy_name=(i.split("/")[-1]).split(".")[0]
req_output_name=output_path+"Trans_"+str(target_name)+"_"+str(decoy_name)
transformer_features=transformer_prep(i,model,tokenizer,device)
#print(transformer_features)
np.save(req_output_name,transformer_features)
print(f"Done {req_output_name}")
#break
except:
print("No")
#break
f.close()
| [
"noreply@github.com"
] | soumyadip1997.noreply@github.com |
c12c05a686344fc864f389e20514f16676a6f4b6 | de4e1332950d37707620c54a9613258c1dd9489c | /donghae/6주차/BOJ_2231.py | 71d0e943eacb4344e6752ef02d7569bf22d28dea | [] | no_license | PnuLikeLion9th/Summer_algorithm | 8fe74066b9673fb891b7205f75f808a04c7fe750 | dcfcb6325854b3b4c529451d5c6b162298b53bc1 | refs/heads/master | 2023-07-10T13:57:05.511432 | 2021-08-15T07:50:00 | 2021-08-15T07:50:00 | 378,679,514 | 3 | 10 | null | 2021-08-15T07:50:01 | 2021-06-20T15:32:18 | Python | UTF-8 | Python | false | false | 323 | py | n= int(input())
res = 0
for i in range(0,n+1): #n까지의 수 모두 계산
n_sum = list(map(int, str(i))) #자릿수 합 구하기
res = i+sum(n_sum) #분해합 구해서 res에 넣기
if n == res:
print(i) #생성자 찾으면 출력 후 break
break;
if n == i:
print(0)
| [
"ldonghae320@gmail.com"
] | ldonghae320@gmail.com |
6ef1afb8fd47c9869fbc831c11b4d24aacbf704c | 9062c1b2b1715d4b5b34062dd52b6007fb2ca537 | /tensorflow/python/ops/collective_ops_gpu_test.py | d12d6240cf97e1d80b16ed5dd9f5a36901f73d69 | [
"Apache-2.0"
] | permissive | robotastic/tensorflow | 54c4c7cbcde5e9d374897d5038a96eb5feff16aa | b88f9f60de706dbe78acf9189b9fa04bdc7a6836 | refs/heads/master | 2020-08-30T06:13:07.176029 | 2019-11-05T01:49:44 | 2019-11-05T01:49:44 | 218,283,699 | 2 | 1 | Apache-2.0 | 2019-10-29T12:38:51 | 2019-10-29T12:38:50 | null | UTF-8 | Python | false | false | 12,428 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Collective Operations that require GPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class CollectiveOpGPUTest(test.TestCase):
def _configure(self, group_size, set_config_proto_nccl=True):
"""Set environment variables and return `ConfigProto` for NCCL execution."""
# Configure virtual GPU devices
virtual_devices = [config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=([1 << 10] * group_size))] # 1 GB per virtual GPU
gpu_options = config_pb2.GPUOptions(
visible_device_list='0',
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
# Configure NCCL
os.environ['NCCL_DEBUG'] = 'INFO'
os.environ['NCCL_LAUNCH_MODE'] = 'PARALLEL'
experimental = config_pb2.ConfigProto.Experimental()
if set_config_proto_nccl:
experimental.collective_nccl = True
return config_pb2.ConfigProto(gpu_options=gpu_options,
experimental=experimental)
@test_util.run_deprecated_v1
def testBasicNcclAllReduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_size = len(inputs)
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_reduce(
t, group_size, group_key, instance_key, 'Add', 'Div'))
results = sess.run(collectives)
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testInt32Error(self):
inputs = [[0, 1], [2, 3]]
group_size = len(inputs)
group_key = 1
instance_key = 50
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i], dtype=dtypes.int32)
collectives.append(collective_ops.all_reduce(
t, group_size, group_key, instance_key, 'Add', 'Div'))
with self.assertRaisesRegexp(
errors.InternalError,
'does not support datatype DT_INT32 on DEVICE_GPU'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testFp16Reduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_size = len(inputs)
group_key = 1
instance_key = 100
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i], dtype=dtypes.float16)
collectives.append(collective_ops.all_reduce(
t, group_size, group_key, instance_key, 'Add', 'Div'))
results = sess.run(collectives)
for result in results:
logging.info('i {} result {} expected {}'.format(i, results[i], expected))
self.assertAllClose(result, expected, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testNcclHintAllReduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_size = len(inputs)
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(
config=self._configure(group_size,
set_config_proto_nccl=False)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_reduce(
t, group_size, group_key, instance_key, 'Add', 'Div',
communication_hint='nccl'))
results = sess.run(collectives)
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testBasicNcclBroadcast(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_size = 2
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
with ops.device(devices[0]):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_send(
t, t.shape, t.dtype, group_size, group_key, instance_key))
with ops.device(devices[1]):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_recv(
t.shape, t.dtype, group_size, group_key, instance_key))
results = sess.run(collectives)
for result in results:
self.assertAllClose(result, tensor_value, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testNcclBroadcastDoubleRecv(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_size = 2
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for device in devices:
with ops.device(device):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_recv(
t.shape, t.dtype, group_size, group_key, instance_key))
with self.assertRaisesRegexp(errors.InternalError, 'found no source'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testNcclBroadcastDoubleSend(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_size = 2
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for device in devices:
with ops.device(device):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_send(
t, t.shape, t.dtype, group_size, group_key, instance_key))
with self.assertRaisesRegexp(errors.InternalError, 'already has source'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testBasicNcclAllGather(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1,
0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]
group_size = len(inputs)
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_gather(t, group_size,
group_key, instance_key))
results = sess.run(collectives)
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testCollectiveDeviceMismatch(self):
group_size = 2
group_key = 10
instance_key = 20
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
with self.session(
config=self._configure(group_size,
set_config_proto_nccl=False)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(in0, group_size, group_key,
instance_key, 'Add', 'Id')
with ops.device('/GPU:0'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(in1, group_size, group_key,
instance_key, 'Add', 'Id')
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 100
with self.assertRaisesRegexp(errors.InternalError,
'but that group has type'):
sess.run([c0, c1], options=run_options)
@test_util.run_v2_only
def testCollectiveReduceMinMax(self):
gpus = config.list_physical_devices('GPU')
if len(gpus) != 1:
self.skipTest('Expected 1 GPU but found {} GPUs'.format(len(gpus)))
config.set_virtual_device_configuration(gpus[0], [
context.VirtualDeviceConfiguration(1024),
context.VirtualDeviceConfiguration(1024)
])
context.ensure_initialized()
@def_function.function
def run_all_reduce(group_key, instance_key, merge_op):
group_size = 2
t0 = [1., 20., 3., 40., 5.]
t1 = [10., 2., 30., 4., 50.]
os.environ['NCCL_DEBUG'] = 'INFO'
os.environ['NCCL_LAUNCH_MODE'] = 'PARALLEL'
with ops.device('/GPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(
in0, group_size, group_key, instance_key, merge_op, final_op='Id',
communication_hint='nccl')
with ops.device('/GPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(
in1, group_size, group_key, instance_key, merge_op, final_op='Id',
communication_hint='nccl')
return c0, c1
for combination in [('Max', [10., 20., 30., 40., 50.]),
('Min', [1., 2., 3., 4., 5.])]:
merge_op = combination[0]
results = run_all_reduce(group_key=10, instance_key=20, merge_op=merge_op)
expected = combination[1]
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
831824ea60627955c3668694fcdecb08741b2688 | 9b62b0d104f4ae9ca7ffd1c330bbc7b9252d6a3c | /sms_sender/urls.py | 340dbf2b5d655d5bcff7d88cca0753a319c77258 | [] | no_license | KONAPAVANKUMAR/sms_sender | efd403dd9881c55298df460c8ce096220e4de2ff | 4e1ac07dfbee466abd6afb29894136d2b023ada8 | refs/heads/main | 2023-08-25T07:40:39.713335 | 2021-10-01T13:03:08 | 2021-10-01T13:03:08 | 412,468,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | """sms_sender URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('smsapp.urls'))
]
| [
"konapavankumar.pk@gmail.com"
] | konapavankumar.pk@gmail.com |
192f1edf5a7c689278a89613efd7f7460b9516b8 | 1f6a85330596eb86a55e631ce5a0a643e200e977 | /muddery/server/typeclasses/script_room_interval.py | 2048e8b16d3f7555894ca832a36db1eb0acbe74d | [
"BSD-3-Clause"
] | permissive | kwer8080/muddery | ba41765c6245d33978b431ef490f10873ca8615c | 8b712eeb90cfee2d602aad4505a4929528d44afd | refs/heads/master | 2022-12-02T14:27:22.363386 | 2020-08-16T03:51:12 | 2020-08-16T03:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,549 | py | """
Scripts
Scripts are powerful jacks-of-all-trades. They have no in-game
existence and can be used to represent persistent game systems in some
circumstances. Scripts can also have a time component that allows them
to "fire" regularly or a limited number of times.
There is generally no "tree" of Scripts inheriting from each other.
Rather, each script tends to inherit from the base Script class and
just overloads its hooks to have it perform its function.
"""
import time
from evennia.scripts.scripts import DefaultScript
from muddery.server.mappings.event_action_set import EVENT_ACTION_SET
class ScriptRoomInterval(DefaultScript):
"""
This script triggers an event in a room at intervals.
"""
def at_script_creation(self):
# Set default data.
if not self.attributes.has("room"):
self.db.room = None
if not self.attributes.has("event_key"):
self.db.event_key = ""
if not self.attributes.has("action"):
self.db.action = ""
if not self.attributes.has("begin_message"):
self.db.begin_message = ""
if not self.attributes.has("end_message"):
self.db.end_message = ""
if not self.attributes.has("offline"):
self.db.offline = False
if not self.attributes.has("last_trigger_time"):
self.db.last_trigger_time = 0
def set_action(self, room, event_key, action, offline, begin_message, end_message):
"""
Set action data.
Args:
event: (string) event's key.
action: (string) action's key.
"""
self.db.room = room
self.db.event_key = event_key
self.db.action = action
self.db.begin_message = begin_message
self.db.end_message = end_message
self.db.offline = offline
self.db.last_trigger_time = 0
def at_start(self):
"""
Called every time the script is started.
"""
# The script will be unpaused when the server restarts. So pause it if the character is no online now.
if self.db.begin_message:
if self.obj:
self.obj.msg(self.db.begin_message)
# Offline intervals.
if self.db.offline:
last_time = self.db.last_trigger_time
if last_time:
current_time = time.time()
times = int((current_time - last_time) / self.interval)
if times > 0:
self.db.last_trigger_time = current_time
action = EVENT_ACTION_SET.get(self.db.action)
if action and hasattr(action, "offline_func"):
action.offline_func(self.db.event_key, self.obj, self.db.room, times)
def at_repeat(self):
"""
Trigger events.
"""
if not self.obj.location:
# The character's location is empty (maybe just login).
return
if self.obj.location != self.db.room:
# The character has left the room.
self.obj.scripts.delete(self)
return
# Do actions.
if self.db.offline:
self.db.last_trigger_time = time.time()
func = EVENT_ACTION_SET.func(self.db.action)
if func:
func(self.db.event_key, self.obj, self.db.room)
def at_stop(self):
"""
Called every time the script is stopped.
"""
if self.db.end_message:
if self.obj:
self.obj.msg(self.db.end_message)
| [
"luyijun999@gmail.com"
] | luyijun999@gmail.com |
fe5f044c311bdc55c5a7fdfcf1035ade7b9bada9 | 99de45be1c5707fd2c06c5cfef09e1737aaadf53 | /Week 2/Chapter 8/8-13.py | 4bb92b96b6d6a1f09b7c91aee0282acc1cc272cb | [] | no_license | ArnoBali/book_exercises | d3cffb86fdec609499e5974d2491d3c4d5a57136 | b7b17f90fc2cbb369f8eaacd1c3d03db3328cbd8 | refs/heads/master | 2020-04-25T15:18:47.701114 | 2019-02-28T08:36:04 | 2019-02-28T08:36:04 | 172,874,365 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | '''
8-13. User Profile: Start with a copy of user_profile.py from page 153 .
Build a profile of yourself by calling build_profile(), using your first and last names and three other key-value pairs that describe you .
''' | [
"akoldewee@gmail.com"
] | akoldewee@gmail.com |
4d89d6885d89e3ef8dd81d55898bbf73e87b17df | 4f0d0e5833999fec1a6a7ffb44977148b0453ccd | /singleton.py | 24acbe52d0cf530efcbbe2b616c8a542ebf9459a | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jlisee/filelock | 3eb6d100463183fb11bdaecd416ee057298024ee | 470e0a92b0e77401a217e5cc52f1a8d6c3e409c4 | refs/heads/master | 2016-08-05T11:46:54.529152 | 2014-11-23T23:05:32 | 2014-11-23T23:05:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,936 | py | # Software License:
# Python Software Foundation License Version 2
# See: PSF-LICENSE.txt for the full license.
# Python Imports
import sys
import os
import tempfile
import logging
# Our Imports
import lockfile
class SingleInstance(object):
"""
If you want to prevent your script from running in parallel just instantiate
SingleInstance() class. If is there another instance already running it will
exit the application with the message "Another instance is already running,
quitting.", returning -1 error code.
>>> import singleton
... me = singleton.SingleInstance()
This option is very useful if you have scripts executed by crontab at small
amounts of time.
Remember that this works by creating a lock file with a filename based on
the full path to the script file. You can override this with the provided
program_path option.
"""
def __init__(self, flavor_id=None, program_path=None, pid=None):
self.lockfile = self.lockfile_path(flavor_id, program_path)
self.pidpath = None
self.lock = lockfile.FileLock(self.lockfile)
self.fd = None
logger.debug("SingleInstance lockfile: " + self.lockfile)
try:
self.lock.acquire(timeout=0)
except lockfile.Timeout:
pass
if self.lock.is_locked():
self.fd = self.lock._lock_file_fd
if self.fd is None:
logger.error("Another instance is already running, quitting.")
sys.exit(-1)
else:
# Write out the pid file
if pid is None:
pid = os.getpid()
self.pidpath = self.pidfile_path(flavor_id, program_path)
with open(self.pidpath, 'w+') as pidfile:
pidfile.write('%d\n' % pid)
self.initialized = True
@staticmethod
def lockfile_path(flavor_id=None, program_path=None):
"""
Generates a lock file path based on the location of the executable,
and flavor_id.
"""
if flavor_id is None:
flavor_id = ""
if program_path is None:
program_path = sys.argv[0]
program_noext, _ = os.path.splitext(os.path.abspath(program_path))
basename = program_noext. \
replace("/", "-"). \
replace(":", ""). \
replace("\\", "-") + '-%s' % flavor_id + '.lock'
return os.path.normpath(tempfile.gettempdir() + '/' + basename)
@staticmethod
def pidfile_path(flavor_id=None, program_path=None):
"""
Get the path to the pid file.
"""
lockpath = SingleInstance.lockfile_path(flavor_id, program_path)
basename, _ = os.path.splitext(lockpath)
return basename + ".pid"
@staticmethod
def get_pid(program_path, flavor_id=""):
"""
Gets the pid of the given program if it's running, None otherwise.
"""
lockpath = SingleInstance.pidfile_path(flavor_id=flavor_id,
program_path=program_path)
pid = None
if os.path.exists(lockpath):
c = open(lockpath).read()
if c and len(c) and c.endswith('\n'):
pid = int(c)
return pid
def __del__(self):
try:
# Clean up the pid file
if os.path.exists(self.pidpath):
os.remove(self.pidpath)
except Exception as e:
if logger:
logger.warning(e)
else:
print("Unloggable error: %s" % e)
# Release our lock
try:
self.lock.release()
except Exception as e:
if logger:
logger.warning(e)
else:
print("Unloggable error: %s" % e)
sys.exit(-1)
logger = logging.getLogger("lockfile.singleton")
logger.addHandler(logging.StreamHandler())
| [
"jlisee@gmail.com"
] | jlisee@gmail.com |
c2d66a86bc8c1bdefdcc8f232bcd6f31506bf870 | 02e1875e7073f91bec47da5e260e39602129fb14 | /strings.py | ba4fe947466f02d8e3fc551874a3dfe92554ea43 | [] | no_license | lipewinning/python | aeee6a270dd69902e91dd14ef5089a157cccbebb | 7d8b383f6fedc0eb09600723955de24500adfcab | refs/heads/master | 2022-08-02T17:00:51.750014 | 2020-05-24T16:01:50 | 2020-05-24T16:01:50 | 266,376,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | hello = 'hello world'
print(hello)
run = "I'm going on a run" #using '
print(run)
print('hello \nworld')
print( len(hello) )
print( hello[0])
print( hello[-3])
print( hello[6:])
print(hello[6:9])
print(hello[:5])
print( hello[::2] )
print(hello[:5] + ' ' + hello[6:])
letter = 'z'
print(letter * 10)
print(hello.replace('l', 'L'))
print( hello.upper())
print( hello.split('o'))
title = 'Hi {0} welcome to our hotel. Have a lovely {1}'
print ( title.format('Sonia', 'Friday'))
title = 'Hi2 {name} welcome to our hotel. Have a lovely {day}'
print ( title.format(name='Sonia', day='Friday')) | [
"felipealvesdesousa@gmail.com"
] | felipealvesdesousa@gmail.com |
cb72bed745489fd0e982e080dff5966200d993e3 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_intern.py | e7bb5052b4b5d9571da6b4b40941ddd27288a488 | [
"Apache-2.0",
"EPL-1.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 1,405 | py | # Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name(u"sys"), Name(u"intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, u'sys', node)
return new
| [
"dmitry.trofimov@jetbrains.com"
] | dmitry.trofimov@jetbrains.com |
ede56eb757eb2fc97eb75d30cbfbbdeecdf49398 | a0c26df97a4f4aede90a65b8a66de06e9b6284fe | /account/urls.py | 4712a3606d0bc263c28b4de9dee277568839602a | [] | no_license | Kelmac/django | fa8444a29cba967fa8ffadacca3c215067a54413 | ec71e4ca18d54ab236f338b553db46ef4e871cea | refs/heads/master | 2021-09-06T16:29:48.151034 | 2018-02-08T14:03:37 | 2018-02-08T14:03:37 | 120,091,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from django.conf.urls import url
from . import views
urlpatterns = [
# url(r'^login/$', views.user_login, name='login'),
url(r'^login/$', 'django.contrib.auth.views.login',name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout',name='logout'),
url(r'^logout-then-login/$', 'django.contrib.auth.views.logout_then_login',name='logout_then_login'),
url(r'^$',views.dashboard, name='dashboard'),
] | [
"pteresiak@gmail.com"
] | pteresiak@gmail.com |
c4baca129a1ab492e6a184dae0870b3e22f04a9d | 430f3f80e364e7f800eedb97f26c527be6c1439f | /modules/parser/parser.py | de89fbce6d6013151339e12e1c89a1e374e916a2 | [] | no_license | stas12312/tensor-test-task | 3094dab330b2f7ffa5f96e7ca4aa784eec835098 | de3ee237cf8bfdcae58e7a793c704650f566ad6c | refs/heads/main | 2023-06-26T11:21:28.301665 | 2021-07-21T15:46:07 | 2021-07-21T15:46:07 | 388,165,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | from typing import Union
from bs4 import BeautifulSoup, Tag, NavigableString, PageElement, Comment # noqa
from .converter import Converter
from .strategy import Strategy
class Parser:
"""
Класс для процесса поиска полезной информации
на странице и его преобразование в текст
"""
def __init__(self, body: bytes,
strategy: Strategy, converter: Converter):
self.body = BeautifulSoup(body, 'html.parser')
self.strategy = strategy
self.converter = converter
self.elements = self._get_useful_elemets()
def _get_useful_elemets(self) -> list[Union[Tag, NavigableString]]:
"""Получение элементов с полезной информацией"""
return self.strategy.get_elements(self.body)
def get_elemets(self) -> list[Union[Tag, NavigableString]]:
"""Получение необходимых элементов"""
return self.elements
def get_text(self) -> str:
"""Получение текста"""
text_parts = []
for element in self.elements:
element_text = self.converter.to_text(element)
if element_text:
text_parts.append(element_text)
return '\n'.join(text_parts)
| [
"911rush@gmail.com"
] | 911rush@gmail.com |
1b87cae6819e4ab4adf9f153f5d57e83421a7aab | c2252e7cfc871c8fc0d5647253e7e5f0899622e5 | /gleu.py | d30568612324c3c797e3f90f8709442b291eb704 | [] | no_license | X11/DAST | 41ab740ee8e05bb5a7942b1a0945320b37697669 | f924cf0cacfbfc69e735461b41dec92cc3577c69 | refs/heads/master | 2021-04-23T17:14:49.463410 | 2020-04-08T18:55:34 | 2020-04-08T18:55:34 | 249,942,641 | 0 | 0 | null | 2020-03-25T09:57:55 | 2020-03-25T09:57:54 | null | UTF-8 | Python | false | false | 8,698 | py | # -*- coding: utf-8 -*-
# Natural Language Toolkit: GLEU Score
#
# Copyright (C) 2001-2020 NLTK Project
# Authors:
# Contributors: Mike Schuster, Michael Wayne Goodman, Liling Tan
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from collections import Counter
from nltk.util import ngrams, everygrams
from nltk import tokenize
import nltk
# nltk.download('punkt')
import re
def sentence_gleu(references, hypothesis, min_len=1, max_len=4):
"""
Calculates the sentence level GLEU (Google-BLEU) score described in
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi,
Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey,
Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser,
Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens,
George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith,
Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes,
Jeffrey Dean. (2016) Google’s Neural Machine Translation System:
Bridging the Gap between Human and Machine Translation.
eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf
Retrieved on 27 Oct 2016.
From Wu et al. (2016):
"The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective."
Note: The initial implementation only allowed a single reference, but now
a list of references is required (which is consistent with
bleu_score.sentence_bleu()).
The infamous "the the the ... " example
>>> ref = 'the cat is on the mat'.split()
>>> hyp = 'the the the the the the the'.split()
>>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS
0.0909...
An example to evaluate normal machine translation outputs
>>> ref1 = str('It is a guide to action that ensures that the military '
... 'will forever heed Party commands').split()
>>> hyp1 = str('It is a guide to action which ensures that the military '
... 'always obeys the commands of the party').split()
>>> hyp2 = str('It is to insure the troops forever hearing the activity '
... 'guidebook that party direct').split()
>>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS
0.4393...
>>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS
0.1206...
:param references: a list of reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: the sentence level GLEU score.
:rtype: float
"""
return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len)
def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4):
"""
Calculate a single corpus-level GLEU score (aka. system-level GLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average
precision), Wu et al. (2016) sum up the matching tokens and the max of
hypothesis and reference tokens for each sentence, then compute using the
aggregate values.
From Mike Schuster (via email):
"For the corpus, we just add up the two statistics n_match and
n_all = max(n_all_output, n_all_target) for all sentences, then
calculate gleu_score = n_match / n_all, so it is not just a mean of
the sentence gleu scores (in our case, longer sentences count more,
which I think makes sense as they are more difficult to translate)."
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_gleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5673...
The example below show that corpus_gleu() is different from averaging
sentence_gleu() for hypotheses
>>> score1 = sentence_gleu([ref1a], hyp1)
>>> score2 = sentence_gleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6144...
:param list_of_references: a list of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: The corpus-level GLEU score.
:rtype: float
"""
# sanity check
assert len(list_of_references) == len(
hypotheses
), "The number of hypotheses and their reference(s) should be the same"
# sum matches and max-token-lengths over all sentences
corpus_n_match = 0
corpus_n_all = 0
for references, hypothesis in zip(list_of_references, hypotheses):
hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len))
tpfp = sum(hyp_ngrams.values()) # True positives + False positives.
hyp_counts = []
for reference in references:
ref_ngrams = Counter(everygrams(reference, min_len, max_len))
tpfn = sum(ref_ngrams.values()) # True positives + False negatives.
overlap_ngrams = ref_ngrams & hyp_ngrams
tp = sum(overlap_ngrams.values()) # True positives.
# While GLEU is defined as the minimum of precision and
# recall, we can reduce the number of division operations by one by
# instead finding the maximum of the denominators for the precision
# and recall formulae, since the numerators are the same:
# precision = tp / tpfp
# recall = tp / tpfn
# gleu_score = min(precision, recall) == tp / max(tpfp, tpfn)
n_all = max(tpfp, tpfn)
if n_all > 0:
hyp_counts.append((tp, n_all))
# use the reference yielding the highest score
if hyp_counts:
n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1])
corpus_n_match += n_match
corpus_n_all += n_all
# corner case: empty corpus or empty references---don't divide by zero!
if corpus_n_all == 0:
gleu_score = 0.0
else:
gleu_score = corpus_n_match / corpus_n_all
return gleu_score | [
"ikzelf@protonmail.com"
] | ikzelf@protonmail.com |
aa3457cc12ed40f2852ce66f093c9aa02a053d63 | ed9721a0a3660e1621a88811fe026be1dd7dc92d | /problems/l11-problem4.py | 12431ae734bd5a620b9999b14f64d5ca91355001 | [] | no_license | buyfn/6.00.1x | 413f1559bec58018080701c88a1f014e9376fb14 | c0483bbb19bac433d41180e83a886df3117e3633 | refs/heads/master | 2021-01-16T18:03:05.020938 | 2015-10-30T15:07:51 | 2015-10-30T15:07:51 | 41,844,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def __str__(self):
return '<' + str(self.getX()) + ',' + str(self.getY()) + '>'
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __repr__(self):
return 'Coordinate(' + str(self.x) + ', ' + str(self.y) + ')'
o = Coordinate(0, 0)
| [
"buyfng@gmail.com"
] | buyfng@gmail.com |
068a260875e758d0ed56acb09ea6b0c0307e8367 | de2069916279ed8e618cba3f268c0552049aeff3 | /Parser.py | 23cd9d3bb2d5c61d54ba0675f0df9231713bc316 | [] | no_license | rpbeltran/Cantor | f2f135d497c0e4448bd5743ca3783135d737dc0c | ced4fe1e4e282364244818f31ef70bab7112ff4a | refs/heads/master | 2021-09-10T14:25:16.594841 | 2018-03-27T21:32:45 | 2018-03-27T21:32:45 | 116,281,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,796 | py |
from Tokenizer import CTokenizer
import ply.yacc as yacc
tokenizer = CTokenizer()
tokens = CTokenizer.tokens
# ------------------------
# - - - Structures - - -
# ------------------------
class Name_Engine:
def __init__ ( self, label ):
self.namespace = label
self.subspaces = {}
self.definitions = {}
self.scope = []
self.usings = [[]]
def using ( self, namespace ):
self.usings.append( namespace )
def hang ( self, namespace ):
space = self
for ns in namespace[:-1]:
space = space.subspaces[ns]
space.subspaces[namespace[-1]] = Name_Engine( namespace[-1] )
def store ( self, key, value, namespace = None ):
if( namespace == None ): namespace = self.scope
space = self
for p in namespace:
space = space.subspaces[p]
space.definitions[key] = value
def retrieve ( self, key, namespace = None ):
if( namespace == None ): namespace = self.scope
tor = None
if( key in self.definitions ):
tor = self.definitions[key]
space = self
for p in namespace:
space = space.subspaces[p]
if( key in space.definitions ):
tor = space.definitions[key]
return tor
class Registry_Entry:
def __init__( self, hash, data ):
self.id = 0
self.hash = hash
self.data = data
class Registry:
def __init__ ( self ):
self.hash_table = { }
self.id_table = []
def insert( self, entry ):
if ( not( entry.hash in self.hash_table) ):
entry.id = len( self.hash_table )
self.hash_table[ entry.hash ] = entry
self.id_table.append( entry )
else:
entry = self.hash_table[entry.hash]
return entry.id
def register( self, data ):
hash = ''
if( isinstance( data, int ) ):
hash = 'u%s' % data
elif( isinstance( data, C_Set) ):
hash = r'{'+str(data.data)[1:-1]+'}'
tor = self.insert( Registry_Entry( hash, data ) )
return tor
def retrieve_raw( self, addr ):
d = self.id_table[addr]
return d
def show( self ):
print ( "\nTable - %s"%len(self.id_table))
for x in self.id_table:
print ( "%s : %s : %s" %( x.id, x.hash, x.data ) )
registry = Registry()
class C_Set:
def __init__ ( self, data = [] ):
self.data = data
def union ( self, s ):
return C_Set( self.data + [ x for x in s.data if not( x in self.data) ] )
def intersection ( self, s ):
return C_Set( [ x for x in self.data if x in s.data ] )
def complement ( self, s ):
return C_Set( [x for x in self.data if not(x in s.data) ] )
def powerset ( self ):
return C_Set( [] )
def add ( self, expression ):
if not( expression in self.data ):
self.data.append( expression )
def addAll ( self, expressions ):
self.data += [ x for x in expressions if not( x in self.data ) ]
def cardinality( self ):
return len( self.data )
class C_Function :
def __init__ ( self, param ):
self.param = param
def resolve ( self, params ):
pass
# -----------------------
# - - - Variables - - -
# -----------------------
names = Name_Engine('global')
# ------------------------
# - - - Precedence - - -
# ------------------------
precedence = (
('left','PLUS','MINUS','UNION','INTERSECTION'),
('left','TIMES','DIVIDE'),
('right','POWERSET'),
('nonassoc','CARDINALITY', 'UMINUS')
)
# -----------------------------
# - - - Parser Patterns - - -
# -----------------------------
def p_block_body ( p ):
'''body :
| block body
| definition body
| expression body
| comment body
'''
p[0] = []
if( len( p ) > 1 ):
if( p[1] != None ):
p[0].append( p[1] )
p[0] += p[2]
def p_block ( p ):
'block : block_open body block_close'
if ( p[1] == "query" ):
print( registry.id_table[p[2][0]].data )
elif( p[1] == "using" ):
names.using( p[2][0] )
elif( p[1] == "export" ):
pass
elif( p[1] == "import" ):
pass
def p_block_open ( p ):
'''block_open : STARTOPENTAG IDENTIFIER IDENTIFIER ENDTAG
| STARTOPENTAG IDENTIFIER ENDTAG'''
p[0] = p[2]
if( p[2] == "namespace" ):
names.scope.append( p[3] )
names.hang( names.scope )
elif( p[2] == "pretty_printer" ):
pass
elif( p[2] == "replace" ):
pass
def p_block_close ( p ):
'''block_close : STARTCLOSETAG IDENTIFIER ENDTAG'''
p[0] = p[2]
if( p[2] == "namespace" ):
names.scope.pop()
elif( p[2] == "pretty_printer" ):
pass
# ----------------
# || definition ||
# ----------------
def p_definition_variable ( p ):
'definition : IDENTIFIER DENOTES expression'
names.store( p[1], p[3] )
# ----------------
# || expression ||
# ----------------
def p_expression ( p ):
'''expression : set
| urelemental
| IDENTIFIER'''
if ( isinstance( p[1], str ) ):
q = p[1].split('::')
val = names.retrieve( q[-1], names.scope + q[:-1] )
if (p[0] != None):
val = names.retrieve( q[-1], q[:-1] )
if( val != None ):
p[0] = val
#else:
# p[0] = C_Function( p[1] )
else:
p[0] = p[1]
# -----------------
# || urelemental ||
# -----------------
def p_urelemental ( p ):
'''urelemental : LPAREN urelemental RPAREN
| CARDINALITY expression
| expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| expression GT expression
| expression LT expression
| expression GE expression
| expression LE expression
| expression EQUALS expression
| MINUS expression %prec UMINUS
| INTEGER
'''
urel = None
# INTEGER
if( len(p) == 2 ):
urel = p[1]
# Unaries
elif p[1] == '#':
assert( isinstance( p[1], C_Set ) )
urel = registry.id_table[p[1]].data.cardinality()
elif p[1] == '-':
assert( isinstance( registry.id_table[p[1]].data, int ) )
urel = -registry.id_table[p[1]].data
# LPAREN urelemental RPAREN
elif p[1] == '(':
urel = p[2].data
# urelemental OPERATION urelemental
else:
assert( isinstance( p[1], int ) and isinstance( p[3], int ) )
if p[2] == '+':
urel = registry.id_table[p[1]].data + registry.id_table[p[3]].data
elif p[2] == '-':
urel = registry.id_table[p[1]].data - registry.id_table[p[3]].data
elif p[2] == '*':
urel = registry.id_table[p[1]].data * registry.id_table[p[3]].data
elif p[2] == '/':
urel = registry.id_table[p[1]].data / registry.id_table[p[3]].data
elif p[2] == '<':
urel = 1 if (registry.id_table[p[1]].data < registry.id_table[p[3]].data ) else 0
elif p[2] == '>':
urel = 1 if (registry.id_table[p[1]].data > registry.id_table[p[3]].data ) else 0
elif p[2][0] == '=':
urel = 1 if (registry.id_table[p[1]].data == registry.id_table[p[3]].data ) else 0
p[0] = registry.register( urel )
# ---------
# || set ||
# ---------
def p_set( p ):
'''set : LPAREN set RPAREN
| expression UNION expression
| expression INTERSECTION expression
| expression COMPLEMENT expression
| POWERSET expression
'''
newset = None
if( len(p) == 3 ):
newset = p[2].powerset()
elif ( p[1] == '(' ):
assert( isinstance( p[2], C_Set ) )
newset = p[2]
else:
a = registry.id_table[p[1]].data
b = registry.id_table[p[3]].data
if ( p[2] == '∪' ):
newset = a.union( b )
elif ( p[2] == '∩' ):
newset = a.intersection( b )
elif ( p[2] == '\\' ):
newset = a.complement( b )
if( newset != None):
p[0] = registry.register( newset )
def p_emptyset( p ):
'set : EMPTYSET'
empty = C_Set([])
p[0] = registry.register( empty )
def p_rawset( p ):
'''set : STARTSET ENDSET
| STARTSET expression setinsidetail ENDSET
'''
rawset = C_Set([])
if( len( p ) > 3 ):
rawset.add ( p[2] )
rawset.addAll ( p[3] )
p[0] = registry.register( rawset )
def p_setinsidetail( p ):
'''setinsidetail :
| COMMA expression setinsidetail'''
rawsetinside = [ ]
if( len(p) > 1 ):
rawsetinside.append( p[2] )
rawsetinside += p[3]
p[0] = rawsetinside
# -------------
# || comment ||
# -------------
def p_comment( p ):
'comment : COMMENT'
return None
yacc.yacc()
while True:
try:
s = input('\n\n\n> ') # use input() on Python 3
except EOFError:
break
output = yacc.parse(s)
print( output )
registry.show()
| [
"rp.beltran@yahoo.com"
] | rp.beltran@yahoo.com |
9b462eeafdaa588a48edbed76431aeda39f7bc5f | 6f37c71ac7d4bba77a19761cb080ce7fd4c8904f | /forbuy/migrations/0001_initial.py | 11cf632a5f0789bb11139b57f68fbd7519d504e8 | [] | no_license | Willian-H/houseInfo_vistual | d55abe7f6a5be45b209a95ee04e47890a867f2c7 | f5b8aeddaf3a3312a49cd2f62046ab603adf276c | refs/heads/master | 2023-06-19T22:19:52.736229 | 2021-07-22T10:14:18 | 2021-07-22T10:14:18 | 388,418,063 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # Generated by Django 2.2 on 2021-07-22 02:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='forbuy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('houseaddress', models.CharField(max_length=255)),
('houseprice', models.CharField(max_length=255)),
('housearea', models.CharField(max_length=255)),
('housedescription', models.CharField(max_length=255)),
('linkman', models.CharField(max_length=255)),
('contact', models.CharField(max_length=255)),
],
),
]
| [
"3025946783@qq.com"
] | 3025946783@qq.com |
deac4dc9c416a85a1b186d22b702e4fd17d953c0 | 436177bf038f9941f67e351796668700ffd1cef2 | /venv/Lib/site-packages/sklearn/decomposition/_pca.py | 01906b2b0cc5586c35abeabb5c496f37ee7c9cf0 | [] | no_license | python019/matplotlib_simple | 4359d35f174cd2946d96da4d086026661c3d1f9c | 32e9a8e773f9423153d73811f69822f9567e6de4 | refs/heads/main | 2023-08-22T18:17:38.883274 | 2021-10-07T15:55:50 | 2021-10-07T15:55:50 | 380,471,961 | 29 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,683 | py | """ Principal Component Analysis.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Michael Eickenberg <michael.eickenberg@inria.fr>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
#
# License: BSD 3 clause
from math import log, sqrt
import numbers
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from scipy.sparse import issparse
from scipy.sparse.linalg import svds
from ._base import _BasePCA
from ..utils import check_random_state
from ..utils._arpack import _init_arpack_v0
from ..utils.extmath import fast_logdet, randomized_svd, svd_flip
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
def _assess_dimension(spectrum, rank, n_samples):
"""Compute the log-likelihood of a rank ``rank`` dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``. This implements the method of
T. P. Minka.
Parameters
----------
spectrum : ndarray of shape (n_features,)
Data spectrum.
rank : int
Tested rank value. It should be strictly lower than n_features,
otherwise the method isn't specified (division by zero in equation
(31) from the paper).
n_samples : int
Number of samples.
Returns
-------
ll : float
The log-likelihood.
References
----------
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
<https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
"""
n_features = spectrum.shape[0]
if not 1 <= rank < n_features:
raise ValueError("the tested rank should be in [1, n_features - 1]")
eps = 1e-15
if spectrum[rank - 1] < eps:
# When the tested rank is associated with a small eigenvalue, there's
# no point in computing the log-likelihood: it's going to be very
# small and won't be the max anyway. Also, it can lead to numerical
# issues below when computing pa, in particular in log((spectrum[i] -
# spectrum[j]) because this will take the log of something very small.
return -np.inf
pu = -rank * log(2.)
for i in range(1, rank + 1):
pu += (gammaln((n_features - i + 1) / 2.) -
log(np.pi) * (n_features - i + 1) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
v = max(eps, np.sum(spectrum[rank:]) / (n_features - rank))
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension(spectrum, n_samples):
"""Infers the dimension of a dataset with a given spectrum.
The returned value will be in [1, n_features - 1].
"""
ll = np.empty_like(spectrum)
ll[0] = -np.inf # we don't want to return n_components = 0
for rank in range(1, spectrum.shape[0]):
ll[rank] = _assess_dimension(spectrum, rank, n_samples)
return ll.argmax()
class PCA(_BasePCA):
"""Principal component analysis (PCA).
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
It can also use the scipy.sparse.linalg ARPACK implementation of the
truncated SVD.
Notice that this class does not support sparse input. See
:class:`TruncatedSVD` for an alternative with sparse data.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float or 'mle', default=None
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
MLE is used to guess the dimension. Use of ``n_components == 'mle'``
will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
number of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components.
If ``svd_solver == 'arpack'``, the number of components must be
strictly less than the minimum of n_features and n_samples.
Hence, the None case results in::
n_components == min(n_samples, n_features) - 1
copy : bool, default=True
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, default=False
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : {'auto', 'full', 'arpack', 'randomized'}, default='auto'
If auto :
The solver is selected by a default policy based on `X.shape` and
`n_components`: if the input data is larger than 500x500 and the
number of components to extract is lower than 80% of the smallest
dimension of the data, then the more efficient 'randomized'
method is enabled. Otherwise the exact full SVD is computed and
optionally truncated afterwards.
If full :
run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
If arpack :
run SVD truncated to n_components calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
0 < n_components < min(X.shape)
If randomized :
run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
tol : float, default=0.0
Tolerance for singular values computed by svd_solver == 'arpack'.
Must be of range [0.0, infinity).
.. versionadded:: 0.18.0
iterated_power : int or 'auto', default='auto'
Number of iterations for the power method computed by
svd_solver == 'randomized'.
Must be of range [0, infinity).
.. versionadded:: 0.18.0
random_state : int, RandomState instance or None, default=None
Used when the 'arpack' or 'randomized' solvers are used. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 0.18.0
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
``explained_variance_``.
explained_variance_ : ndarray of shape (n_components,)
The amount of variance explained by each of the selected components.
The variance estimation uses `n_samples - 1` degrees of freedom.
Equal to n_components largest eigenvalues
of the covariance matrix of X.
.. versionadded:: 0.18
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of the ratios is equal to 1.0.
singular_values_ : ndarray of shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
.. versionadded:: 0.19
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=0)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or the lesser value of n_features and n_samples
if n_components is None.
n_features_ : int
Number of features in the training data.
n_samples_ : int
Number of samples in the training data.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
compute the estimated data covariance and score samples.
Equal to the average of (min(n_features, n_samples) - n_components)
smallest eigenvalues of the covariance matrix of X.
See Also
--------
KernelPCA : Kernel Principal Component Analysis.
SparsePCA : Sparse Principal Component Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
IncrementalPCA : Incremental Principal Component Analysis.
References
----------
For n_components == 'mle', this class uses the method from:
`Minka, T. P.. "Automatic choice of dimensionality for PCA".
In NIPS, pp. 598-604 <https://tminka.github.io/papers/pca/minka-pca.pdf>`_
Implements the probabilistic PCA model from:
`Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
component analysis". Journal of the Royal Statistical Society:
Series B (Statistical Methodology), 61(3), 611-622.
<http://www.miketipping.com/papers/met-mppca.pdf>`_
via the score and score_samples methods.
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
"Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions".
SIAM review, 53(2), 217-288.
<https://doi.org/10.1137/090771806>`_
and also
`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
"A randomized algorithm for the decomposition of matrices".
Applied and Computational Harmonic Analysis, 30(1), 47-68
<https://doi.org/10.1016/j.acha.2010.02.003>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(n_components=2)
>>> print(pca.explained_variance_ratio_)
[0.9924... 0.0075...]
>>> print(pca.singular_values_)
[6.30061... 0.54980...]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X)
PCA(n_components=2, svd_solver='full')
>>> print(pca.explained_variance_ratio_)
[0.9924... 0.00755...]
>>> print(pca.singular_values_)
[6.30061... 0.54980...]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(n_components=1, svd_solver='arpack')
>>> print(pca.explained_variance_ratio_)
[0.99244...]
>>> print(pca.singular_values_)
[6.30061...]
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, copy=True, whiten=False,
svd_solver='auto', tol=0.0, iterated_power='auto',
random_state=None):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
U, S, Vt = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * Vt * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
# Raise an error for sparse input.
# This is more informative than the generic one raised by check_array.
if issparse(X):
raise TypeError('PCA does not support sparse input. See '
'TruncatedSVD for a possible alternative.')
X = self._validate_data(X, dtype=[np.float64, np.float32],
ensure_2d=True, copy=self.copy)
# Handle n_components==None
if self.n_components is None:
if self.svd_solver != 'arpack':
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
# Handle svd_solver
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == 'auto':
# Small problem or n_components == 'mle', just call full PCA
if max(X.shape) <= 500 or n_components == 'mle':
self._fit_svd_solver = 'full'
elif n_components >= 1 and n_components < .8 * min(X.shape):
self._fit_svd_solver = 'randomized'
# This is also the case of n_components in (0,1)
else:
self._fit_svd_solver = 'full'
# Call different fits for either full or truncated SVD
if self._fit_svd_solver == 'full':
return self._fit_full(X, n_components)
elif self._fit_svd_solver in ['arpack', 'randomized']:
return self._fit_truncated(X, n_components, self._fit_svd_solver)
else:
raise ValueError("Unrecognized svd_solver='{0}'"
"".format(self._fit_svd_solver))
def _fit_full(self, X, n_components):
"""Fit the model by computing full SVD on X."""
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, numbers.Integral):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, Vt = linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U, Vt)
components_ = Vt
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension(explained_variance_, n_samples)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
# side='right' ensures that number of features selected
# their variance is always greater than n_components float
# passed. More discussion in issue: #15669
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components,
side='right') + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, Vt
def _fit_truncated(self, X, n_components, svd_solver):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X.
"""
n_samples, n_features = X.shape
if isinstance(n_components, str):
raise ValueError("n_components=%r cannot be a string "
"with svd_solver='%s'"
% (n_components, svd_solver))
elif not 1 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 1 and "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features),
svd_solver))
elif not isinstance(n_components, numbers.Integral):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, was of type=%r"
% (n_components, type(n_components)))
elif svd_solver == 'arpack' and n_components == min(n_samples,
n_features):
raise ValueError("n_components=%r must be strictly less than "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features),
svd_solver))
random_state = check_random_state(self.random_state)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if svd_solver == 'arpack':
v0 = _init_arpack_v0(min(X.shape), random_state)
U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U[:, ::-1], Vt[::-1])
elif svd_solver == 'randomized':
# sign flipping is done inside
U, S, Vt = randomized_svd(X, n_components=n_components,
n_iter=self.iterated_power,
flip_sign=True,
random_state=random_state)
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = Vt
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = np.var(X, ddof=1, axis=0)
self.explained_variance_ratio_ = \
self.explained_variance_ / total_var.sum()
self.singular_values_ = S.copy() # Store the singular values.
if self.n_components_ < min(n_features, n_samples):
self.noise_variance_ = (total_var.sum() -
self.explained_variance_.sum())
self.noise_variance_ /= min(n_features, n_samples) - n_components
else:
self.noise_variance_ = 0.
return U, S, Vt
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) -
fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : Ignored
Returns
-------
ll : float
Average log-likelihood of the samples under the current model.
"""
return np.mean(self.score_samples(X))
def _more_tags(self):
return {'preserves_dtype': [np.float64, np.float32]}
| [
"82611064+python019@users.noreply.github.com"
] | 82611064+python019@users.noreply.github.com |
daa30f428287b6beaea3a731e63905cbc2ce21b6 | dd5e1cb7aa3cdc9bd0d56437b1019e7f630e3aa3 | /Python/Curso de Python/Dir e Help.py | ce390a9dad319702f922d893ba86f240ab6fb9ff | [] | no_license | Renan-S/How-I-Program | c1f25e81a5364bef2567e05fee83f4395b50a2ae | d6025b9794f3adaf34825ee3bbdf019d77a462dd | refs/heads/master | 2020-08-22T17:43:29.727720 | 2020-02-14T18:20:28 | 2020-02-14T18:20:28 | 216,450,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | """
dir - Apresenta todos os atributos/propriedades e funções/métodos disponíveis para determinado tipo de dado ou variável
help - Apresenta a documentação/como utilizar os atributos/propriedades e funções/métodos disponíveis para determinado tipo de dado ou variável
"""
print(dir('Nigga'))
help('Nigga'.lower) #Mostra o que faz uma determinado atributo
print('NIGGA'.lower()) #Lower case
print('nigga'.upper()) #Upper case
print('renan santos cavalcante'.title()) #Upper case nas primeiras letras
print('HOMEM MACACO'.lower().title()) #Da pra usar dois
num = 4
print(num.__add__(36))
print(num+36)
help(num.__add__) | [
"noreply@github.com"
] | Renan-S.noreply@github.com |
cdd4522df9b400c1f869ee63223f4b046e646df0 | b4d1b640b36bf68e0cf1a354be882f1b8cd5d8ef | /infoCollecter/infoCollecter/middlewares.py | 54773c9cf56863f01d73c5bfbd17fe223fccdb3d | [] | no_license | liyingxuan89/enterpriseInfo | d5965f74f82b5db274492c5073f6cbdc700866ff | 047c52c4b71e06ea34c16052bae5e6086562659f | refs/heads/master | 2020-03-25T01:37:18.131581 | 2018-08-28T15:06:27 | 2018-08-28T15:06:27 | 143,247,073 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,611 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class InfocollecterSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class InfocollecterDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"8199714@qq.com"
] | 8199714@qq.com |
8e8fc9760cf997901255dbed108d815c0d7153d5 | 9d8e4c88bdca341bc32a40ead1f9549b066e29a3 | /containers/inference/index.py | cfb74fb9257dfc300abad0a808c60c6c5e5f0990 | [] | no_license | sergioarmgpl/mlops-argo-k3s | daaaa4b5d689e759e75258c1161d0e883facf562 | 17f12ec3a10393f2522693e1d77827d22bae1500 | refs/heads/master | 2023-03-29T18:36:44.621824 | 2021-04-03T23:14:26 | 2021-04-03T23:14:26 | 332,510,367 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | from google.cloud import storage
import pandas as pd
import os
import requests
import urllib3
#Ignore SSL Warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def upload_blob(bucket_name, source_file_name, destination_blob_name):
storage_client = storage.Client.from_service_account_json('argok3s.json')
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
def download_blob(bucket_name, source_blob_name, destination_file_name):
storage_client = storage.Client.from_service_account_json('argok3s.json')
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
def predict(zone,p1,p2):
r = requests.post('https://'+os.environ["endpoint"]+'/'+os.environ["appname"]+'/predict',\
json = {'data':[int(zone),int(p1),int(p2)]} \
,verify=False)
return int(r.json()["prediction"])
bucket = os.environ["BUCKET"]
print("Downloading CSV...",end="")
#change here the bucket
download_blob(bucket,"scores_processed.csv","scores_processed.csv")
print("done")
print("Reading Data...",end="")
df = pd.read_csv('scores_processed.csv',sep=';', delimiter=None, header='infer')
print("done")
print("Doing inference...",end="")
df["FINAL_prediction"] = df.apply(lambda x: predict(x["ZONE"],x["P1"],x["P2"]), axis=1)
df.to_csv("inference.csv",index=False,sep=';')
upload_blob(bucket,"inference.csv","inference.csv")
print("done")
print("Showing inference")
print(df.head(5))
print("done")
| [
"sergioarm.gpl@gmail.com"
] | sergioarm.gpl@gmail.com |
5c7276944bdc8248c6f71658827a21c3b55926ff | c212044cc7da47d1ed6b1b82ea6a4180875b39ab | /Homework-2/hw2.starter code/french_count.py | fd2b093b6d1f9604990a9ad07155ce9631a7867b | [] | no_license | TATimo/544NLP-project | bf57e9378a1592498f0cbcc650987eb1bb520160 | 1b6d223ba24a1119b62eff7c6c86afcf72451e1a | refs/heads/master | 2021-05-08T17:36:08.263324 | 2018-01-30T03:32:57 | 2018-01-30T03:32:57 | 119,476,565 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,359 | py | import sys
from fst import FST
from fsmutils import composewords
kFRENCH_TRANS = {0: "zero", 1: "un", 2: "deux", 3: "trois", 4:
"quatre", 5: "cinq", 6: "six", 7: "sept", 8: "huit",
9: "neuf", 10: "dix", 11: "onze", 12: "douze", 13:
"treize", 14: "quatorze", 15: "quinze", 16: "seize",
20: "vingt", 30: "trente", 40: "quarante", 50:
"cinquante", 60: "soixante", 100: "cent"}
kFRENCH_AND = 'et'
def prepare_input(integer):
assert isinstance(integer, int) and integer < 1000 and integer >= 0, \
"Integer out of bounds"
if integer != 0: return list("%03i" % integer)
else: return list('z')
## Modify this func to achieve 0 situation
def french_count():
f = FST('french')
f.add_state('start')
f.add_state('z')
for i in range(30):
f.add_state(str(i))
f.initial_state = ('start')
for i in range(20,30):
f.set_final(str(i))
f.set_final('z')
f.add_arc('start', 'z', ['z'], [kFRENCH_TRANS[0]])
for i in range(10):
f.add_arc('start', str(i), [str(i)], [])
for j in range(10,20):
if i is 0:
f.add_arc(str(i), str(j), [str(j-10)], [])
elif i is 1:
f.add_arc(str(i), str(j), [str(j-10)], [kFRENCH_TRANS[100]])
elif i in range(2,10):
f.add_arc(str(i), str(j), [str(j-10)], [kFRENCH_TRANS[i],kFRENCH_TRANS[100]])
for i in range(10,20):
for j in range(20,30):
if i is 10:
if j!= 20: f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[j-20]])
else: f.add_arc(str(i), str(j), [str(j-20)], [])
elif i is 11 and j in range(20,27):
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[j-10]])
elif i is 11 and j in range(27,30):
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[10],kFRENCH_TRANS[j-20]])
elif i in range(12,17):
if j is 20:
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[int(i%10)*10]])
elif j is 21:
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[int(i%10)*10],kFRENCH_AND,kFRENCH_TRANS[1]])
else:
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[int(i%10)*10],kFRENCH_TRANS[j-20]])
elif i is 17:
if j is 20:
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[60],kFRENCH_TRANS[10]])
elif j is 21:
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[60],kFRENCH_AND,kFRENCH_TRANS[11]])
elif j in range(22,27):
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[60],kFRENCH_TRANS[j-10]])
elif j in range(27,30):
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[60],kFRENCH_TRANS[10],kFRENCH_TRANS[j-20]])
elif i is 18:
if j is 20:
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[4],kFRENCH_TRANS[20]])
elif j in range(21,30):
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[4],kFRENCH_TRANS[20],kFRENCH_TRANS[j-20]])
elif i is 19:
if j in range(20,27):
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[4],kFRENCH_TRANS[20],kFRENCH_TRANS[j-10]])
elif j in range(27,30):
f.add_arc(str(i), str(j), [str(j-20)], [kFRENCH_TRANS[4],kFRENCH_TRANS[20],kFRENCH_TRANS[10],kFRENCH_TRANS[j-20]])
return f
if __name__ == '__main__':
string_input = raw_input()
user_input = int(string_input)
## f = french_count()
## file = open('french_numbers.txt', 'r')
## lines = file.read().split('\n')
## for line in lines:
## token = line.split(':')
## try: output = " ".join(f.transduce(prepare_input(int(token[0].strip()))))
## except: print "error_input: " + token[0].strip()
## if output != token[1].strip().lower():
## print token[0].strip() + " incorrect, program output " + output + ", expect " + token[1].strip()
if string_input:
print user_input, '-->',
print " ".join(f.transduce(prepare_input(user_input)))
| [
"noreply@github.com"
] | TATimo.noreply@github.com |
6f64803b680f530118f50d12f840345200374827 | 001ca88155c90447ae3564bb51c503500d4fdcdd | /apps/christmas/migrations/0001_initial.py | 2f33cc812b526ca9d65d097c3b32136603943187 | [] | no_license | andre23arruda/cartas-de-natal | b7d5766b2806814dc7aaed1315b0d51d4aa53582 | b704b28137256e9c52a7d716e462334928c9d2bd | refs/heads/main | 2023-04-28T04:33:28.139797 | 2021-05-14T04:56:05 | 2021-05-14T04:56:05 | 367,122,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | # Generated by Django 3.1.4 on 2021-05-13 03:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Letter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('created_at', models.DateField(auto_now=True)),
('updated_at', models.DateField(auto_now_add=True)),
('title', models.CharField(max_length=100)),
('message', models.TextField()),
],
),
]
| [
"andre23arruda@gmail.com"
] | andre23arruda@gmail.com |
656edae8b743f0b4d8b9b4f62d5f3411137c9016 | 6993d7b0ddf752b59d89674ec1b1758855ccef57 | /show_config.py | deca24c6d086950b953ef8fca9b0bad81350ea80 | [] | no_license | jrglasgow/timelapse | 0691e41cf1e1d87c3a6dd4bd3dde02a4b87a6332 | 3fb1b6886fc3ce3cc53372beae5d072b01f3421e | refs/heads/master | 2021-01-24T06:05:56.289618 | 2015-04-15T19:31:59 | 2015-04-15T19:31:59 | 32,296,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,014 | py | #! /usr/bin/env python
import gphoto2 as gp
context = gp.Context()
camera = gp.Camera()
camera.init(context)
text = camera.get_summary(context)
#print('Summary')
#print('=======')
#print(str(text))
#camera.exit(context)
config = camera.get_config(context)
def tl_gphoto2_type(this_type):
if this_type == gp.GP_WIDGET_WINDOW:
return "gp.GP_WIDGET_WINDOW %s" % this_type
elif this_type == gp.GP_WIDGET_SECTION:
return "gp.GP_WIDGET_SECTION %s" % this_type
elif this_type == gp.GP_WIDGET_TEXT:
return "gp.GP_WIDGET_TEXT %s" % this_type
elif this_type == gp.GP_WIDGET_RANGE:
return "gp.GP_WIDGET_RANGE %s" % this_type
elif this_type == gp.GP_WIDGET_TOGGLE:
return "gp.GP_WIDGET_TOGGLE %s" % this_type
elif this_type == gp.GP_WIDGET_RADIO:
return "gp.GP_WIDGET_RADIO %s" % this_type
elif this_type == gp.GP_WIDGET_MENU:
return "gp.GP_WIDGET_MENU %s" % this_type
elif this_type == gp.GP_WIDGET_BUTTON:
return "gp.GP_WIDGET_BUTTON %s" % this_type
elif this_type == gp.GP_WIDGET_DATE:
return "gp.GP_WIDGET_DATE %s" % this_type
else:
return "unknown %s" % (this_type)
pass
def tl_show_config_options(config, spacing):
this_type = config.get_type()
if this_type == gp.GP_WIDGET_WINDOW:
print "%sgp.GP_WIDGET_WINDOW %s" % (spacing, this_type)
elif this_type == gp.GP_WIDGET_SECTION:
print "%sgp.GP_WIDGET_SECTION %s" % (spacing, this_type)
elif this_type == gp.GP_WIDGET_TEXT:
#print "%sgp.GP_WIDGET_TEXT %s" % (spacing, this_type)
pass
elif this_type == gp.GP_WIDGET_RANGE:
print "%sgp.GP_WIDGET_RANGE %s" % (spacing, this_type)
elif this_type == gp.GP_WIDGET_TOGGLE:
#print "%sgp.GP_WIDGET_TOGGLE %s" % (spacing, this_type)
pass
elif this_type == gp.GP_WIDGET_RADIO:
#print "%sgp.GP_WIDGET_RADIO %s" % (spacing, this_type)
for i in range(0, config.count_choices() - 1):
print "%s%s: %s" % (spacing, i, config.get_choice(i))
pass
elif this_type == gp.GP_WIDGET_MENU:
print "%sgp.GP_WIDGET_MENU %s" % (spacing, this_type)
elif this_type == gp.GP_WIDGET_BUTTON:
print "%sgp.GP_WIDGET_BUTTON %s" % (spacing, this_type)
elif this_type == gp.GP_WIDGET_DATE:
#print "%sgp.GP_WIDGET_DATE %s" % (spacing, this_type)
pass
else:
print "unknown %s" % (this_type)
pass
def tl_show_config(config, config_path='', spacing=''):
config_path = "%s.%s" % (config_path, config.get_name())
#print "%sLabel: %s (%s) Type: %s" % (spacing, str(config.get_label()), config_path, str(config.get_type()))
#print "config.count_children(): %s" % config.count_children()
new_spacing = "%s " % (spacing)
if config.count_children() > 0:
print "%s%s" % (spacing, config.get_name())
for i in range(0, config.count_children() - 1):
tl_show_config(config.get_child(i), config_path, new_spacing)
else:
print "%s%s (%s): %s" % (spacing, config.get_name(), tl_gphoto2_type(config.get_type()), config.get_value())
tl_show_config_options(config, new_spacing)
#new_spacing = "%s " % (spacing)
#print "config.get_child(i).get_type(): %s" % config.get_child(i).get_type()
#print "config.get_child(i).count_choices(): %s" % config.get_child(i).count_choices()
#if config.count_choices() > 0:
# print "config.count_choices() > 0"
#print "config.count_choices(): %s" % config.count_choices()
#for i in range(0, config.count_choices() - 1):
# print "%s%s: %s" % (new_spacing, i, config.get_choice(i))
pass
if __name__ == '__main__':
tl_show_config(config)
#config.get_child_by_name('capturesettings').get_child_by_name('shutterspeed').get_value()
#config.get_child_by_name('capturesettings').get_child_by_name('shutterspeed').get_choice(52)
#config.get_child_by_name('capturesettings').get_child_by_name('shutterspeed').set_value(config.get_child_by_name('capturesettings').get_child_by_name('shutterspeed').get_choice(52))
#camera.set_config(config, context)
#file = camera.capture(gp.GP_CAPTURE_IMAGE, context)
| [
"james@jrglasgow.com"
] | james@jrglasgow.com |
616042fd1ec2e56ac3a0c9702803669b7b868276 | a8bd64e730a4633e4f678f0833efb9ae8160f58d | /python/MITx/6.00.1x_Introduction_to_ComputerSCience_and_Programming_using_Python/power function.py | 577f3b6f3c8a4d21efe4bb2459d1232ca7f022c2 | [] | no_license | basantech89/code_monk | ddc9c4fe598c3f5d9cebd772bff86d9f8880c97c | f80cfaec8212201443454018be408f4b263ebfee | refs/heads/master | 2023-07-23T15:36:20.531102 | 2023-07-18T05:50:34 | 2023-07-18T05:50:34 | 143,033,881 | 1 | 0 | null | 2022-12-30T16:38:21 | 2018-07-31T15:35:09 | Java | UTF-8 | Python | false | false | 291 | py | x = int(raw_input("Type any integer: "))
p = int(raw_input("Type an integer power: "))
result = 1
def iterativePower(x,p):
result = 1
for turn in range(p):
print "iteration: "+str(turn)+" current result "+str(result)
result *= x
return result
z=iterativePower(3,5)
print z
| [
"basantech89@gmail.com"
] | basantech89@gmail.com |
8b1fa50ca360ec5a538377d6f93566e574a2295b | 6efa6748c749f4b2d77a9ddc83038f4616f73972 | /leetcode_2.py | 07569518e95e80291e4f9fcabac397a052fa978f | [] | no_license | IvanYerkinov/SPD1.4 | 040015a523fc6e55be815cdccd7a55d37c5b1240 | 1a843c892fcff25714f3fa3edd31840be411376b | refs/heads/master | 2021-05-24T13:48:34.540049 | 2020-04-19T21:12:47 | 2020-04-19T21:12:47 | 253,590,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | # Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
# Hard problem
class Solution:
def longestValidParentheses(self, s: str) -> int:
highestnum = 0
r = len(s) - 2
num = 0
i = 0
while i < r:
if s[i] == '(' and s[i+1] == ')':
num += 1
i += 2
else:
if num > highestnum:
highestnum = num
num = 0
i += 1
return highestnum
if __name__ == "__main__":
sol = Solution()
stri = "((())()()(()((()()()((()))()()((()))()"
print(sol.longestValidParentheses(stri))
| [
"darkblackvoid@yahoo.com"
] | darkblackvoid@yahoo.com |
bd52bb1039bba3f6e62021f5e1b5035e90a422c1 | 7bc0075367290ff06565991e19033b13f0604f96 | /Mundo 2/aula13/desafio047.py | 531922ef5ea3d9c949fd2497d363dc2cbe2bf5db | [] | no_license | iamtheluiz/curso_em_video_python | 298acd90e36473fbf797ba7bf85d729d0ca28407 | aa4247b7d206771f9c9b08ad5d8585c3813ddaff | refs/heads/master | 2020-04-12T16:17:51.672662 | 2019-01-22T00:10:41 | 2019-01-22T00:10:41 | 162,608,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # imports
print("""
|******************|
| Desafio047 |
|******************|
""")
print("Números Pares de 1 até 50")
result = ""
for i in range(1, 51, 2):
if i == 1:
result += str(i + 1)
else:
result += ", "+str(i + 1)
print(result)
| [
"iamtheluiz.dev@gmail.com"
] | iamtheluiz.dev@gmail.com |
5291f471b2d5e46a05cd5e2ec8fd990b3acf7711 | 33114a0f96406008da69adac757b271229fb81bf | /__init__.py | 5488e89bfa7b2ba3c29c0da45814f981069162df | [] | no_license | ROB-Seismology/simpledb | 9f1eaf3ad4cd2367a03b5e79931a18959e9a370d | 4993dd472d1cb37023751ffca80e4dde7a6ad7fc | refs/heads/master | 2021-06-24T12:13:34.309067 | 2020-10-20T10:30:34 | 2020-10-20T10:30:34 | 90,835,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | """
Module providing basic read-write access to SQL databases.
Currently supports MySQL, PostgreSQL and SQLite/SpatiaLite databases.
Author: Kris Vanneste, Royal Observatory of Belgium
"""
from __future__ import absolute_import, division, print_function, unicode_literals
## Reloading mechanism
try:
reloading
except NameError:
## Module is imported for the first time
reloading = False
else:
## Module is reloaded
reloading = True
try:
## Python 3
from importlib import reload
except ImportError:
## Python 2
pass
## Import submodules
## base
if not reloading:
from . import base
else:
reload(base)
from .base import (SQLDB, SQLRecord, build_sql_query)
## sqlite, depends on base
if not reloading:
from . import sqlite
else:
reload(sqlite)
from .sqlite import (SQLiteDB, query_sqlite_db, query_sqlite_db_generic)
__all__ = base.__all__ + sqlite.__all__
## mysql, depends on base
if not reloading:
from . import mysql
else:
reload(mysql)
if mysql.HAS_MYSQL:
from .mysql import (MySQLDB, query_mysql_db, query_mysql_db_generic)
__all__ += mysql.__all__
## postgres, depends on base
if not reloading:
from . import postgres
else:
reload(postgres)
if postgres.HAS_POSTGRES:
from .postgres import (PgSQLDB, query_pgsql_db, query_pgsql_db_generic)
__all__ += postgres.__all__
| [
"kris.vanneste@oma.be"
] | kris.vanneste@oma.be |
8626563455dc17cda7a8a62f924deeded4bea943 | f083496832666105ae2369df3c6c7943dfcb9eb6 | /src/transformers/models/auto/auto_factory.py | 0d82184be57882677ad60997b568afd6e3022ead | [
"Apache-2.0"
] | permissive | seujung/transformers | b54fe19e581d414b932ca79d94260471f90b8b49 | ef74d5aaebd9b3ccf0623c38c9929f503d869a50 | refs/heads/master | 2023-06-04T15:57:11.681334 | 2021-06-25T02:27:52 | 2021-06-25T02:27:52 | 294,680,346 | 1 | 0 | Apache-2.0 | 2020-09-11T11:44:46 | 2020-09-11T11:44:45 | null | UTF-8 | Python | false | false | 28,249 | py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory function to build auto-model classes."""
import types
from ...configuration_utils import PretrainedConfig
from ...deepspeed import deepspeed_config, is_deepspeed_zero3_enabled
from ...file_utils import copy_func
from ...utils import logging
from .configuration_auto import AutoConfig, replace_list_option_in_docstrings
logger = logging.get_logger(__name__)
CLASS_DOCSTRING = """
This is a generic model class that will be instantiated as one of the model classes of the library when created
with the :meth:`~transformers.BaseAutoModelClass.from_pretrained` class method or the
:meth:`~transformers.BaseAutoModelClass.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
FROM_CONFIG_DOCSTRING = """
Instantiates one of the model classes of the library from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.BaseAutoModelClass.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('checkpoint_placeholder')
>>> model = BaseAutoModelClass.from_config(config)
"""
FROM_PRETRAINED_TORCH_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either
passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing,
by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
The model is set in evaluation mode by default using ``model.eval()`` (so for instance, dropout modules are
deactivated). To train the model, you should first set it back in training mode with ``model.train()``
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder')
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_pretrained('./tf_model/shortcut_placeholder_tf_model_config.json')
>>> model = BaseAutoModelClass.from_pretrained('./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
FROM_PRETRAINED_TF_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either
passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing,
by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the PyTorch model in a
TensorFlow model using the provided conversion scripts and loading the TensorFlow model
afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_pt (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a PyTorch checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder')
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_pretrained('./pt_model/shortcut_placeholder_pt_model_config.json')
>>> model = BaseAutoModelClass.from_pretrained('./pt_model/shortcut_placeholder_pytorch_model.bin', from_pt=True, config=config)
"""
FROM_PRETRAINED_FLAX_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either
passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing,
by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the PyTorch model in a
TensorFlow model using the provided conversion scripts and loading the TensorFlow model
afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_pt (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a PyTorch checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder')
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_pretrained('./pt_model/shortcut_placeholder_pt_model_config.json')
>>> model = BaseAutoModelClass.from_pretrained('./pt_model/shortcut_placeholder_pytorch_model.bin', from_pt=True, config=config)
"""
def _get_model_class(config, model_mapping):
supported_models = model_mapping[type(config)]
if not isinstance(supported_models, (list, tuple)):
return supported_models
name_to_model = {model.__name__: model for model in supported_models}
architectures = getattr(config, "architectures", [])
for arch in architectures:
if arch in name_to_model:
return name_to_model[arch]
elif f"TF{arch}" in name_to_model:
return name_to_model[f"TF{arch}"]
elif f"Flax{arch}" in name_to_model:
return name_to_model[f"Flax{arch}"]
# If not architecture is set in the config or match the supported models, the first element of the tuple is the
# defaults.
return supported_models[0]
class _BaseAutoModelClass:
# Base class for auto models.
_model_mapping = None
def __init__(self, *args, **kwargs):
raise EnvironmentError(
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
f"`{self.__class__.__name__}.from_config(config)` methods."
)
def from_config(cls, config, **kwargs):
if type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
if is_deepspeed_zero3_enabled():
import deepspeed
logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
# this immediately partitions the model across all gpus, to avoid the overhead in time
# and memory copying it on CPU or each GPU first
with deepspeed.zero.Init(config=deepspeed_config()):
return model_class(config, **kwargs)
else:
return model_class(config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
kwargs["_from_auto"] = True
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
def insert_head_doc(docstring, head_doc=""):
if len(head_doc) > 0:
return docstring.replace(
"one of the model classes of the library ",
f"one of the model classes of the library (with a {head_doc} head) ",
)
return docstring.replace(
"one of the model classes of the library ", "one of the base model classes of the library "
)
def auto_class_factory(name, model_mapping, checkpoint_for_example="bert-base-cased", head_doc=""):
# Create a new class with the right name from the base class
new_class = types.new_class(name, (_BaseAutoModelClass,))
new_class._model_mapping = model_mapping
class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc)
new_class.__doc__ = class_docstring.replace("BaseAutoModelClass", name)
# Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't
# have a specific docstrings for them.
from_config = copy_func(_BaseAutoModelClass.from_config)
from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc)
from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name)
from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
from_config.__doc__ = from_config_docstring
from_config = replace_list_option_in_docstrings(model_mapping, use_model_types=False)(from_config)
new_class.from_config = classmethod(from_config)
if name.startswith("TF"):
from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING
elif name.startswith("Flax"):
from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING
else:
from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING
from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained)
from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc)
from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name)
from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
shortcut = checkpoint_for_example.split("/")[-1].split("-")[0]
from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut)
from_pretrained.__doc__ = from_pretrained_docstring
from_pretrained = replace_list_option_in_docstrings(model_mapping)(from_pretrained)
new_class.from_pretrained = classmethod(from_pretrained)
return new_class
def get_values(model_mapping):
result = []
for model in model_mapping.values():
if isinstance(model, (list, tuple)):
result += list(model)
else:
result.append(model)
return result
| [
"noreply@github.com"
] | seujung.noreply@github.com |
4016b3761cfd5ec4682849313798fe876b99d853 | 21300b2723f6a161753cc286a4662d1d65111d2a | /src/offline/terms_filter.py | 19717b72a46f5b4e914c98729e9f198c75743566 | [] | no_license | fagan2888/linkedin-tags | 275d85b7dbccae5f650573ecb99ea1017c719321 | f6a955079355022e8500be797cd16175bd6353b3 | refs/heads/master | 2021-06-19T06:08:02.179016 | 2017-07-04T15:40:22 | 2017-07-04T15:40:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | from directories import Directories
from operator import itemgetter
from os import listdir
class Terms:
def __init__(self):
self.terms = set()
@staticmethod
def enlist_termfiles():
all_files = listdir(Directories.TERMS_FOLDER)
return Directories.only_data(all_files)
def include_shortlist(self, shortlist):
target_dir = Directories.TERMS_FOLDER
with open(target_dir + shortlist) as input:
for line in input:
term = line.strip()
self.terms.add(term)
def include_all_shortlists(self):
for termfile in Terms.enlist_termfiles():
self.include_shortlist(termfile)
class Filterer:
def __init__(self, document):
self.document = document
self.corpus = {}
@staticmethod
def enlist_combined():
all_files = listdir(Directories.RESULTS_FOLDER + Directories.COMBINED_SUBFOLDER)
return Directories.only_data(all_files)
def process(self, line, terms_list):
term_ratio = line.split(',')
if len(term_ratio) != 2:
return None
term = term_ratio[0]
if term not in terms_list.terms:
return False
ratio = float(term_ratio[1])
self.corpus[term] = ratio
def load_corpus(self, terms_list):
target_dir = Directories.RESULTS_FOLDER + Directories.COMBINED_SUBFOLDER
with open(target_dir + self.document) as input:
input.readline() # skip header row
for line in input:
result = self.process(line, terms_list)
def save_filtered(self):
target_dir = Directories.FILTERED_FOLDER
Directories.create_directory(target_dir)
with open(target_dir + self.document, 'w') as output:
output.write(Directories.HEADER_ROW)
ranklist = sorted(self.corpus.items(), key=itemgetter(1), reverse=True)
for term_ratio in ranklist:
output.write(term_ratio[0] + ',' + str(term_ratio[1]) + '\n')
if __name__ == '__main__':
all_terms = Terms()
all_terms.include_all_shortlists()
documents = Filterer.enlist_combined()
for document in documents:
print('filtering results from ' + document)
filterer = Filterer(document)
filterer.load_corpus(all_terms)
filterer.save_filtered()
| [
"samuelfzhang@gmail.com"
] | samuelfzhang@gmail.com |
a0a3ea768d64f88cd14469cd9615137023869813 | aabb5bff92930439c37535589b827d44a8e4e66b | /AcmeExplorer/urls.py | cebbc838e5e71d66e857a4296210ab394f1f496f | [] | no_license | Dobloq/Django | c8d029bf905ced30f4ac573a42ab4732f012c8a0 | f5262e5892547eef4ff05155b62dfffee342cb94 | refs/heads/master | 2021-08-02T07:29:25.554978 | 2021-07-23T23:17:00 | 2021-07-23T23:17:00 | 132,141,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,963 | py | """DP URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from AcmeExplorer.views import *
app_name = "AcmeExplorer"
urlpatterns = [
path('', home, name="home"),
path('logout/', Logout.as_view(), name="logout"),
path('login/', Login.as_view(), name="login"),
# path('ranger/nonauth/create', createRanger, name="createRanger"),
################################# Legal Text #########################################
path('legalText/create/', LegalTextCreate.as_view(), name="legalTextCreate"),
path('legalText/<int:pk>/edit/', LegalTextUpdate.as_view(), name="legalTextUpdate"),
path('legalText/<int:pk>/delete/', LegalTextDelete.as_view(), name="legalTextDelete"),
path('legalText/<int:pk>/', LegalTextDisplay.as_view(), name="legalTextDisplay"),
path('legalText/', LegalTextList.as_view(), name="legalTextList"),
################################# Social Identities #########################################
path('socialIdentities/<int:user_pk>/', SocialIdentitiesList.as_view(), name="socialIdentitiesUserList"),
path('socialIdentities/', SocialIdentitiesList.as_view(), name="socialIdentitiesList"),
path('socialIdentities/user/<int:pk>', SocialIdentitiesDisplay.as_view(), name="socialIdentitiesDisplay"),
path('socialIdentities/user/<int:pk>/edit', SocialIdentitiesUpdate.as_view(), name="socialIdentitiesUpdate"),
path('socialIdentities/user/<int:pk>/delete', SocialIdentitiesDelete.as_view(), name="socialIdentitiesDelete"),
path('socialIdentities/create', socialIdentitiesCreate, name="socialIdentitiesCreate"),
################################# Folder #########################################
path('folder/create', folderCreate, name="folderCreate"),
path('folder/<int:pk>', FolderDisplay.as_view(), name="folderDisplay"),
path('folder/', FolderList.as_view(), name="folderList"),
path('folder/<int:pk>/edit', folderUpdate, name="folderUpdate"),
path('folder/<int:pk>/delete', FolderDelete.as_view(), name="folderDelete"),
################################## Message ##############################################
path('message/create', messageCreate, name="messageCreate"),
path('message/<int:pk>', MessageDisplay.as_view(), name="messageDisplay"),
path('message/', MessageList.as_view(), name="messageList"),
path('message/<int:pk>/delete', MessageDelete.as_view(), name="messageDelete"),
path('message/broadcast', messageBroadcast, name="messageBroadcast"),
################################## Contact #############################################33
path('contact/create', contactCreate, name="contactCreate"),
path('contact/<int:pk>', ContactDisplay.as_view(), name="contactDisplay"),
path('contact/', ContactList.as_view(), name="contactList"),
path('contact/<int:pk>/edit', contactUpdate, name="contactUpdate"),
path('contact/<int:pk>/delete', ContactDelete.as_view(), name="contactDelete"),
################################# Category #########################################
path('category/create/', CategoryCreate.as_view(), name="categoryCreate"),
path('category/<int:pk>/edit/', CategoryUpdate.as_view(), name="categoryUpdate"),
path('category/<int:pk>/delete/', CategoryDelete.as_view(), name="categoryDelete"),
path('category/<int:pk>/', CategoryDisplay.as_view(), name="categoryDisplay"),
path('category/', CategoryList.as_view(), name="categoryList"),
################################# Curriculum #####################################
path('curriculum/create', curriculumCreate, name="curriculumCreate"),
path('curriculum/', CurriculumList.as_view(), name="curriculumList"),
path('curriculum/ranger/<int:pk>', CurriculumRangerList.as_view(), name="curriculumRangerList"),
path('curriculum/<int:pk>', CurriculumDisplay.as_view(), name="curriculumDisplay"),
path('curriculum/<int:pk>/delete', CurriculumDelete.as_view(), name="curriculumDelete"),
#path('actor/create/', ActorCreate.as_view(), name="actorCreate"),
path('ranger/create/', RangerCreate.as_view(), name="rangerCreate"),
path('explorer/create/', ExplorerCreate.as_view(), name="explorerCreate"),
path('admin/create/', AdminCreate.as_view(), name="adminCreate"),
]
| [
"dobloq@gmail.com"
] | dobloq@gmail.com |
ced93fedbfb6a93b2af0345ded4072f4e6ddfd54 | 9470c82b2bb848c18750cfc2593304cefa9ece08 | /geomloss/__init__.py | bd098a464a6b8d0441635e2d398fbb9952f654e0 | [
"MIT"
] | permissive | arthurmensch/geomloss | 450e8f2e43a4dc77f637d0ee97d69526a482407e | 9188d051302cbc4a9a7c2224cfab8ed4a31b23b7 | refs/heads/master | 2020-06-15T09:43:29.327790 | 2019-05-05T16:54:10 | 2019-05-05T16:54:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | import sys, os.path
__version__ = '0.2.1'
from .samples_loss import SamplesLoss
__all__ = sorted(["SamplesLoss"]) | [
"jean.feydy@gmail.com"
] | jean.feydy@gmail.com |
7f788150cb65d8a9dd0618a8bae8840a7efe7aac | b788f1f8bfa8949177e28dd4be436572162c418b | /regular expression.py | 5622c6603c31b7c63a0a789938965fc66832786f | [] | no_license | KaziMotiour/pyhton-OOP | bc9506f3afe7686a7451de9a5448c759f3cdcbac | 8e85cbe31809a11293fb90d6e39b2d0293cff9b5 | refs/heads/master | 2022-02-18T09:40:05.274955 | 2019-09-02T17:46:53 | 2019-09-02T17:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | import re
def multi_find(text_patterns,phrase):
for pat in test_patterns:
print("Search for pattern {}".format(pat))
print(re.findall(pat,phrase))
print('\n')
patterns = ['Team1', 'Team2']
text = "This is a starting! with Team1, 1234567, not to others arr arrr"
# for pattern in patterns:
# print("I'm searching for: "+pattern)
# if re.search(pattern,text):
# #if pattern in text:
# print("Match")
# else:
# print("Not Match")
# match = re.search('Team1',text)
# print(match.start())
# textSplit = re.split('with', text)
# print(textSplit)
# print(re.findall('a', text))
# test_patterns = ['ar*']
# test_patterns = ['ar+']
# test_patterns = ['ar{2}']
# test_patterns = ['ar{1,2}']
# test_patterns = ['[^!>?]+']
# test_patterns = ['[a-z]+'] # show all the lowercase in text
# test_patterns = ['[A-Z]+'] # show all the uppercase in text
# test_patterns = [r'\d'] # show all the number in text
# test_patterns = [r'\d+'] # show all the number in text
# test_patterns = [r'\D+'] # show all the text except number in text
test_patterns = [r'\w+'] # show all the text alpha numeric in text
multi_find(test_patterns,text) | [
"kmatiour30@gmail.com"
] | kmatiour30@gmail.com |
f19bfda0103aa1651fbc00ef6c04a78931717828 | 7830408f95e24bf0f7ea7a7c2b3f52fb98ec83a7 | /assignment7_sum_of_n_numbers.py | adb3860371053d517159dfe7dcdd1fd336be1a92 | [] | no_license | gsaikarthik/python-assignment7 | d76b78f435a104030a255ddfbf7ed1cdead6f7fc | 214bc32d8dca00ed08b52a738471fcd4c7cc713f | refs/heads/master | 2022-11-05T14:45:08.833102 | 2020-06-25T14:39:02 | 2020-06-25T14:39:02 | 274,939,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | num=int(input("Enter a number "))
sum = 0
i = 1
while i <= num:
sum = sum + i
i = i+1
print("The sum of natural number is ",sum) | [
"noreply@github.com"
] | gsaikarthik.noreply@github.com |
43de0f25b05156487048885ff2232634c02ff850 | afd333532543f7e650ef3796e5b6b929fb997524 | /Python/Activities/Activity17.py | 8a45371ac505020b40421157950bb85519ebade2 | [] | no_license | ShaliniSS93/FST-M1 | d2434fea9f1c7222249c034a4d43803792d29a41 | 20785a1df0e55523ddba965c1914757eb28b6637 | refs/heads/main | 2023-08-28T00:38:32.751616 | 2021-10-23T17:18:10 | 2021-10-23T17:18:10 | 408,321,526 | 0 | 0 | null | 2021-09-20T05:30:42 | 2021-09-20T05:30:41 | null | UTF-8 | Python | false | false | 503 | py |
# Import pandas
import pandas
# Create a Dictionary that will hold our data
data = {
"Usernames": ["admin", "Charles", "Deku"],
"Passwords": ["password", "Charl13", "AllMight"]
}
# Create a DataFrame using that data
dataframe = pandas.DataFrame(data)
# Print the DataFrame
print(dataframe)
"""
Write the DataFrame to a CSV file
To avoid writing the index numbers to the file as well
the index property is set to false
"""
dataframe.to_csv("sample.csv", index=False) | [
"noreply@github.com"
] | ShaliniSS93.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.