index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
16,300 | 0d09823014dce7cd11b4513e09c5e879beeef766 | import redis
def init_client():
client = redis.StrictRedis(host='192.168.204.128', port=6379, db=0)
return client
def set_key(key, value):
client = init_client()
client.set(key, value)
def set_key_expire(key, value, time):
client = init_client()
client.set(key, value, time)
def get_key(key):
client = init_client()
result = client.get(key)
return result
if __name__ == "__main__":
set_key("foo1", "far")
set_key("foo2", "far")
set_key("foo3", "far")
|
16,301 | d0e8af8ff44af8268d76a472ff6595762a6ceb86 | from data_structures import DataStructures
import inspect
class Interface:
def __init__(self):
self.objDataStructures = DataStructures()
print("initialized Interface")
def MethodsCount(self, chapter):
if chapter == '1':
return inspect.getmembers(DataStructures)
|
16,302 | b225968f85322e50052feb56b9742b3271f58e32 | import FWCore.ParameterSet.Config as cms
from RecoLocalCalo.HcalRecProducers.HBHEIsolatedNoiseReflagger_cfi import *
hcalGlobalRecoTask = cms.Task(hbhereco)
hcalGlobalRecoSequence = cms.Sequence(hcalGlobalRecoTask)
#--- for Run 3 and later
from Configuration.Eras.Modifier_run3_HB_cff import run3_HB
from RecoLocalCalo.HcalRecProducers.HBHEPhase1Reconstructor_cfi import hbheprereco as _phase1_hbheprereco
run3_HB.toReplaceWith(hbhereco, _phase1_hbheprereco)
#--- for Run 3 on GPU
from Configuration.ProcessModifiers.gpu_cff import gpu
from RecoLocalCalo.HcalRecProducers.hcalCPURecHitsProducer_cfi import hcalCPURecHitsProducer as _hcalCPURecHitsProducer
gpu.toReplaceWith(hbhereco, _hcalCPURecHitsProducer.clone(
recHitsM0LabelIn = "hbheRecHitProducerGPU",
recHitsM0LabelOut = "",
recHitsLegacyLabelOut = ""
))
|
16,303 | d73a8ed4540f665de2dfd74bb0bc13d7aaa5ef43 | from __future__ import absolute_import, print_function, division
import warnings
import numpy as np
from matplotlib.collections import LineCollection
import astropy.units as u
# from . import math_util
# from . import wcs_util
# from . import angle_util as au
# from .ticks import tick_positions, default_spacing
# from .decorators import auto_refresh
# import math_util
# import wcs_util
# import angle_util as au
# from ticks import tick_positions, default_spacing
from .decorators import auto_refresh
class Grid(object):
# @auto_refresh
def __init__(self, parent, x, y):
# Save axes and wcs information
self.ax = parent.ax
self._wcs = parent._wcs
self._figure = parent._figure
self.x = x
self.y = y
self.x_unit = self._wcs.wcs.cunit[self.x]
self.y_unit = self._wcs.wcs.cunit[self.y]
self.grid_type = parent.grid_type
# Save plotting parameters (required for @auto_refresh)
self._parameters = parent._parameters
# Initialize grid container
# self._grid = None
# self._active = False
# Set defaults
# self.x_auto_spacing = True
# self.y_auto_spacing = True
# self.default_color = 'white'
# self.default_alpha = 0.5
# Set grid event handler
# TODO: Ask Tom about grid event handlers for WCSAxes..
# self.ax.callbacks.connect('xlim_changed', self._update_norefresh)
# self.ax.callbacks.connect('ylim_changed', self._update_norefresh)
# @auto_refresh
# def _remove(self):
# self._grid.remove()
# @auto_refresh
def set_xspacing(self, xspacing):
'''
Set the grid line spacing in the longitudinal direction
Parameters
----------
xspacing : { float, str }
The spacing in the longitudinal direction. To set the spacing
to be the same as the ticks, set this to 'tick'
'''
# TODO: Assumes unit is degrees
if xspacing != 'tick':
self.ax.coords[self.x].set_ticks(spacing=xspacing * self.x_unit)
else:
self.ax.coords[self.x].grid(grid_type=self.grid_type)
# @auto_refresh
def set_yspacing(self, yspacing):
'''
Set the grid line spacing in the latitudinal direction
Parameters
----------
yspacing : { float, str }
The spacing in the latitudinal direction. To set the spacing
to be the same as the ticks, set this to 'tick'
'''
if yspacing != 'tick':
self.ax.coords[self.y].set_ticks(spacing=yspacing * self.y_unit)
else:
self.ax.coords[self.y].grid(grid_type=self.grid_type)
# @auto_refresh
def set_color(self, color):
'''
Set the color of the grid lines
Parameters
----------
color : str
The color of the grid lines
'''
self.ax.coords.grid(color=color, grid_type=self.grid_type)
# @auto_refresh
def set_alpha(self, alpha):
'''
Set the alpha (transparency) of the grid lines
Parameters
----------
alpha : float
The alpha value of the grid. This should be a floating
point value between 0 and 1, where 0 is completely
transparent, and 1 is completely opaque.
'''
self.ax.coords.grid(alpha=alpha, grid_type=self.grid_type)
# @auto_refresh
def set_linewidth(self, linewidth):
self.ax.coords.grid(linewidth=linewidth, grid_type=self.grid_type)
# @auto_refresh
def set_linestyle(self, linestyle):
self.ax.coords.grid(linestyle=linestyle, grid_type=self.grid_type)
# @auto_refresh
def show(self):
self.ax.grid(grid_type=self.grid_type)
# @auto_refresh
def hide(self):
# TODO: Doesn't work..
self.ax.grid(draw_grid=False, grid_type=self.grid_type)
|
16,304 | 4d7f9e387281b05167f0869486f56016516cfbc0 | from templates.utils import settings, templater
template = """
country_event = {{
id = dmm_mod_clear.{count}
hide_window = yes
is_triggered_only = yes
trigger = {{
has_global_flag = dmm_mod_{count}
}}
immediate = {{
remove_global_flag = dmm_mod_{count}
remove_global_flag = dmm_mod_{count}_disabled
}}
}}"""
def process(publish_dir):
mod_lines = []
for i in range(1, settings.total + 1):
mod_lines.append(template.format(count=i))
templater.process_file(
publish_dir + "/events/dmm_mod_clear.txt", events=mod_lines)
|
16,305 | 415bb1338253331da6df6fcf9a683bddf4e4fb61 | """
Populate the operons table based on information found
in ./SampleFiles/OperonSet.txt
"""
import pymysql
import subprocess
import getpass
# open sql connection
sqlconn = pymysql.connect(host='bm185s-mysql.ucsd.edu', user='kkchau',
passwd=getpass.getpass("Input password: "), db='kkchau_db')
# populate operons table just from OperonSet.txt
# all operons that are strong or confirmed
operons_lines = subprocess.check_output(['grep', "^[^#]", './SampleFiles/OperonSet.txt']
).splitlines()
operons_lines = [line.decode('ascii') for line in operons_lines]
for line in operons_lines:
line = line.strip().split()
# only strong or confirmed operons
if 'Strong' in line[len(line) - 1] or 'Confirmed' in line[len(line) - 1]:
genes = line[5].strip().split(',')
# for each gene member of the operon
for g in genes:
# work with gene synonyms in case the gene is not in the genes table
all_syns = [g]
with sqlconn.cursor() as cur:
cur.execute("SELECT gene_id FROM gene_synonyms WHERE synonym='{}'".format(g))
g_id = cur.fetchone()
if g_id:
g_id = g_id[0]
cur.execute("SELECT synonym FROM gene_synonyms WHERE gene_id='{}'".format(g_id))
g_syns = cur.fetchall()
all_syns.extend([synonym[0] for synonym in g_syns])
# for each gene and its synonym
for s in all_syns:
with sqlconn.cursor() as cur:
# get the gene_id
cur.execute("SELECT gene_id FROM genes where name=\"{}\";".format(s))
result = cur.fetchone()
# skip if gene is not found
if not result:
break
# insert gene with operon
operon = '\"' + line[0] + '\"'
conf = '\"' + line[-1] + '\"'
cur.execute("INSERT INTO operons VALUES ({});".format(','.join([str(result[0]),operon,conf])))
sqlconn.commit()
# populate operons table just from TUSet.txt
tu_lines = subprocess.check_output(["grep", "^[^#]", "./SampleFiles/TUSet.txt"]).splitlines()
tu_lines = [line.decode('ascii').strip().split() for line in tu_line]
for line in tu_lines:
if 'Strong' not in line[-1] or 'Confirmed' not in line[-1]:
continue
sqlconn.close()
|
16,306 | a1ce5a995b1ec0691fec0ce1a863c11797cac594 | import numpy as np
def kneeThresholding(_Y):
"""
Perform knee (or elbow) thresholding.
To determine the number of clusters:
1. Order the input.
2. Plot the input (x-axis: input index, y-axis: input)
3. Compute the line crosses the points marked by the first and last
input of the previous plot.
4. Compute the distances of all the points of the previous plot
to the line computed in step 3.
5. Detect the point with the largest distance (knee detection).
6. The index of the point coresponds to expected threshold.
_kernelCov: A string with the preprocessing on the data. None for no preprocessing.
_oFName: The full path to save the plots. None for no ploting
RETURN: Number of clusters (estimated)
"""
# diagonal line (equation)
_X = range(0,_Y.shape[0])
P1 = np.array([0 , _Y[0]])
P2 = np.array([_X[_Y.shape[0]-1], _Y[_Y.shape[0]-1]])
l = (P2[1] - P1[1])/(P2[0] - P1[0])
b = P2[1] - l*P2[0]
# find distances (Euclidean)
d = []
Q = np.sqrt(l*l + 1)
for i in range(0,_Y.shape[0]):
d.append(np.abs(l*i - _Y[i] + b)/Q)
# find max
x_max = np.argmax(d)
P0 = [_X[x_max], _Y[x_max]] # this is the point with max distance
return(P0[0]+1) |
16,307 | b5268f033d7c168d5c61d5646883a22f8b55626a | import threading
import time
import gremlin
t16000 = gremlin.input_devices.JoystickDecorator(
"T.16000M",
72331530,
"Default"
)
# Maximum duration of a "short" press
g_timeout = 0.3
# Seconds to hold the button pressed
g_hold_time = 0.1
# Seconds to wait between releasing and pressing the button again
g_pause_time = 0.1
# Timer object triggering the "long press"
g_timer = None
# Flag indicating if the pulse action is running or should be stopped
g_is_running = True
# Assumption same key for both short and long press, but long press will start
# to pulse the key instead of just holding it down.
def pulse(vjoy, btn_id):
"""Runs the long press macro repeatedly with a pause in between executions.
:param vjoy the vJoy proxy object
:param btn_id the id of the button to press and release in the pulse
"""
global g_is_running
g_is_running = True
while g_is_running:
vjoy[1].button(btn_id).is_pressed = True
time.sleep(g_hold_time)
vjoy[1].button(btn_id).is_pressed = False
time.sleep(g_pause_time)
@t16000.button(1)
def button1(event, vjoy):
# Id of the button to press or pulse
btn_id = 5
global g_is_running, g_timer
if event.is_pressed:
# Press the button
vjoy[1].button(btn_id).is_pressed = True
# Start a timer so we can transition to pulsing when the button
# is held down long enough
g_timer = threading.Timer(g_timeout, lambda: pulse(vjoy, btn_id))
g_timer.start()
else:
# Terminate the pulsing if needed and remove the timer if needed
g_is_running = False
if g_timer:
g_timer.cancel()
# Ensure the button is released
vjoy[1].button(btn_id).is_pressed = False
@t16000.button(2)
def button2(event, vjoy):
btn_id = 6
global g_is_running, g_timer
if event.is_pressed:
vjoy[1].button(btn_id).is_pressed = True
g_timer = threading.Timer(g_timeout, lambda: pulse(vjoy, btn_id))
g_timer.start()
else:
g_is_running = False
if g_timer:
g_timer.cancel()
vjoy[1].button(btn_id).is_pressed = False
@t16000.button(4)
def button4(event, joy, vjoy):
btn_id = 6
global g_is_running, g_timer
if event.is_pressed and joy[1].button(3).is_pressed:
vjoy[1].button(btn_id).is_pressed = True
g_timer = threading.Timer(g_timeout, lambda: pulse(vjoy, btn_id))
g_timer.start()
else:
g_is_running = False
if g_timer:
g_timer.cancel()
vjoy[1].button(btn_id).is_pressed = False
|
16,308 | 4209dfa47bc996bb304fc0354e14bfe74d597982 | # Generated by Django 3.0 on 2019-12-17 13:29
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='RentedProducts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userFullName', models.CharField(max_length=75)),
('email', models.CharField(max_length=75)),
('address', models.TextField(max_length=200)),
('date', models.DateField(default=datetime.date.today)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Product')),
],
),
]
|
16,309 | 2373bf67a42cfe0eb567197577be11b730446eea | from __future__ import annotations
from abc import abstractmethod
from uuid import uuid4
from django.conf import settings
from django.core.validators import RegexValidator
from django.db import models
from django_extensions.db.models import TimeStampedModel
from dandiapi.api.storage import (
get_embargo_storage,
get_embargo_storage_prefix,
get_storage,
get_storage_prefix,
)
from .asset import AssetBlob, EmbargoedAssetBlob
from .dandiset import Dandiset
class BaseUpload(TimeStampedModel):
ETAG_REGEX = r'[0-9a-f]{32}(-[1-9][0-9]*)?'
class Meta:
indexes = [models.Index(fields=['etag'])]
abstract = True
# This is the key used to generate the object key, and the primary identifier for the upload.
upload_id = models.UUIDField(unique=True, default=uuid4, db_index=True)
etag = models.CharField(
null=True,
blank=True,
max_length=40,
validators=[RegexValidator(f'^{ETAG_REGEX}$')],
db_index=True,
)
# This is the identifier the object store assigns to the multipart upload
multipart_upload_id = models.CharField(max_length=128, unique=True, db_index=True)
size = models.PositiveBigIntegerField()
@staticmethod
@abstractmethod
def object_key(upload_id, *, dandiset: Dandiset): # noqa: N805
pass
@classmethod
def initialize_multipart_upload(cls, etag, size, dandiset: Dandiset):
upload_id = uuid4()
object_key = cls.object_key(upload_id, dandiset=dandiset)
multipart_initialization = cls.blob.field.storage.multipart_manager.initialize_upload(
object_key, size
)
upload = cls(
upload_id=upload_id,
blob=object_key,
etag=etag,
size=size,
dandiset=dandiset,
multipart_upload_id=multipart_initialization.upload_id,
)
return upload, {'upload_id': upload.upload_id, 'parts': multipart_initialization.parts}
def object_key_exists(self):
return self.blob.field.storage.exists(self.blob.name)
def actual_size(self):
return self.blob.field.storage.size(self.blob.name)
def actual_etag(self):
return self.blob.storage.etag_from_blob_name(self.blob.name)
class Upload(BaseUpload):
blob = models.FileField(blank=True, storage=get_storage, upload_to=get_storage_prefix)
dandiset = models.ForeignKey(Dandiset, related_name='uploads', on_delete=models.CASCADE)
@staticmethod
def object_key(upload_id, *, dandiset: Dandiset | None = None):
upload_id = str(upload_id)
return (
f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
f'blobs/{upload_id[0:3]}/{upload_id[3:6]}/{upload_id}'
)
def to_asset_blob(self) -> AssetBlob:
"""Convert this upload into an AssetBlob."""
return AssetBlob(
blob_id=self.upload_id,
blob=self.blob,
etag=self.etag,
size=self.size,
)
class EmbargoedUpload(BaseUpload):
blob = models.FileField(
blank=True, storage=get_embargo_storage, upload_to=get_embargo_storage_prefix
)
dandiset = models.ForeignKey(
Dandiset, related_name='embargoed_uploads', on_delete=models.CASCADE
)
@staticmethod
def object_key(upload_id, *, dandiset: Dandiset):
upload_id = str(upload_id)
return (
f'{settings.DANDI_DANDISETS_EMBARGO_BUCKET_PREFIX}'
f'{dandiset.identifier}/'
f'blobs/{upload_id[0:3]}/{upload_id[3:6]}/{upload_id}'
)
def to_embargoed_asset_blob(self) -> EmbargoedAssetBlob:
"""Convert this upload into an AssetBlob."""
return EmbargoedAssetBlob(
blob_id=self.upload_id,
blob=self.blob,
etag=self.etag,
size=self.size,
dandiset=self.dandiset,
)
|
16,310 | 2d6375c8b944b8b264ac4f1bbbfac3b0b4718eed | class Solution:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
dx = [-1, 0, 1, 0]
dy = [0, -1, 0, 1]
def dp(x, y):
if f[x][y] != -1:
return f[x][y]
f[x][y] = 1
for i in range(4):
a, b = x+dx[i], y+dy[i]
if a >= 0 and a < n and b >= 0 and b < m and g[a][b] < g[x][y]:
f[x][y] = max(f[x][y], dp(a, b) + 1)
return f[x][y]
if len(matrix) == 0 or len(matrix[0]) == 0:
return 0
g = matrix
n, m = len(g), len(g[0])
f = [[-1 for i in range(m)] for j in range(n)]
res = 0
for i in range(n):
for j in range(m):
res = max(res, dp(i, j))
return res
|
16,311 | c246e68d3fac9d5609a8b3f9da138471c84bde30 | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
#os.path.join(os.path.dirname(__file__), 'webapp').replace('\\', '/')
# TODO: Make this generic
'C:/Users/Hannah/Documents/git_repos/dynamic_bng/app/app/web-app',
'C:/Users/hanspe/Documents/git_repos/dynamic_bng/app/app/web-app',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
REST_FRAMEWORK = {
# 'PAGINATE_BY': 10,
# 'PAGINATE_BY_PARAM': 'page_size',
# 'MAX_PAGINATE_BY': 100,
# # DRF v3.1+
# 'DEFAULT_PAGINATION_CLASS':
# 'rest_framework_ember.pagination.PageNumberPagination',
# # older than DRF v3.1
# 'DEFAULT_PAGINATION_SERIALIZER_CLASS':
# 'rest_framework_ember.pagination.PaginationSerializer',
# 'DEFAULT_PARSER_CLASSES': (
# 'rest_framework_ember.parsers.JSONParser',
# 'rest_framework.parsers.FormParser',
# 'rest_framework.parsers.MultiPartParser'
# ),
# 'DEFAULT_RENDERER_CLASSES': (
# 'rest_framework_ember.renderers.JSONRenderer',
# 'rest_framework.renderers.BrowsableAPIRenderer',
# ),
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# ]
}
# REST_EMBER_FORMAT_KEYS = True
# REST_EMBER_PLURALIZE_KEYS = True
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# TODO: Revisit this
# Enable cookie-based sessions
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# TODO: Make this production-ready
# STATIC_ROOT='C:/Users/Hannah/Documents/git_repos/dynamic_bng/app/app/static/'
STATIC_URL = '/static/'
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'static/'),
)
|
16,312 | 1260431c408bc73ad7bfe7d639e4b13135120c74 | import collections
import nltk
import wordcount as wordcount
from sklearn.model_selection import train_test_split
from nltk.corpus import stopwords
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import confusion_matrix, classification_report
from nltk.stem.snowball import SnowballStemmer
from collections import Counter
import pymongo
#ΑΝΕΒΑΣΜΑ ΑΡΧΕΙΩΝ
client = pymongo.MongoClient('localhost', 27017)
db = client['yelp']
data=db["reviews"].find()
yelp_reviews=pd.DataFrame(list(data)).sample(1000)
data2=db["restaurants"].find()
yelp_restaurant=pd.DataFrame(list(data2))
#ΚΑΝΟΥΜΕ ΤΑ DARAFRAME ΕΝΑ ΚΑΙ ΠΕΤΑΜΕ ΤΟ STARS TOY ΡΕΣΤΟΡΑΝΤ
yelp_restaurant=yelp_restaurant.drop("stars",axis=1)
yelp=pd.merge(yelp_reviews,yelp_restaurant,on="business_id",how='inner')
stemmer = SnowballStemmer("english")
yelp.shape
yelp['text_length'] = yelp['neighborhood'].apply(len)
yelp.head()
yelp.info()
yelp.describe()
''''#plt.show()
g = sns.FacetGrid(data=yelp, col='stars')
g.map(plt.hist, 'text_length', bins=150)
#plt.show()
sns.boxplot(x="stars", y="text_length", data=yelp)
#plt.show()
stars = yelp.groupby('stars').mean()
sns.heatmap(data=stars.corr(), annot=True)
#plt.show()'''
yelp_class = yelp[(yelp['stars'] == 1) | (yelp['stars'] == 5)]
positive=0
negative=0
for n in yelp_class['stars']:
if n==5:
positive +=1
elif n==1:
negative+=1
sizes=[positive,negative]
labels =['positive','negative']
plt.pie(sizes, labels =labels, colors = ['red','blue'], shadow = True, )
plt.title("positive and negative crowd ")
#plt.show()
Counter = Counter(yelp_class['text'])
df = pd.DataFrame(columns=[yelp_class['text'],Counter.most_common()])
df.head(10)
''''Prepei na ginei merge me to dataframe kai na mpei sta feature gia na prosthesoume ena akomi '''
X = yelp_class[["text",'address','city']].astype(str).sum(axis=1)
y = yelp_class['stars']
import string
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
print("arxise tifidf")
bow_transformer = TfidfVectorizer(analyzer=text_process).fit(X)
print("telow vectorizer")
len(bow_transformer.vocabulary_)
X = bow_transformer.transform(X)
print("telos tifidf vectorizer")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
print("split ok")
nb = MultinomialNB()
nb.fit(X_train, y_train)
preds = nb.predict(X_test)
print(confusion_matrix(y_test, preds))
print('\n')
print(classification_report(y_test, preds))
print("desicion treeeeeee")
classifier = DecisionTreeClassifier()
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print("RandomForest modelll")
rf = RandomForestRegressor(n_estimators = 1000, random_state = 42)
rf.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print("lenear regression")
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
predictions = lm.predict(X_test)
plt.scatter(y_test, predictions)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
plt.xlabel('Y Test')
plt.ylabel('Predicted Y')
|
16,313 | 1196d182a98a67ee8064c7932fb3b0adf3e41d92 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import actionlib
import tf_conversions
import math
from sensor_msgs.msg import Joy
from move_base_msgs.msg import MoveBaseActionGoal, MoveBaseActionFeedback, MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import Pose
import actionlib_msgs
############# node initialization ####################
rospy.init_node('auto_navigation')
client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
client.wait_for_server()
def send_to_goal(goal_pose, absolute = True):
global client
#ori = tf_conversions.transformations.quaternion_from_euler(0, 0, math.pi)
goal = MoveBaseGoal()
goal.target_pose.pose = goal_pose
if absolute:
goal.target_pose.header.frame_id = 'map'
else:
goal.target_pose.header.frame_id = 'base_link'
goal.target_pose.header.stamp = rospy.Time.now()
print goal
client.send_goal(goal, done_cb, active_cb, feedback_cb)
def done_cb(state, result):
if client.get_state() == actionlib_msgs.msg.GoalStatus.SUCCEEDED:
#rospy.loginfo("Successfully reached goal")
print "Successfully reached goal"
elif client.get_state() == actionlib_msgs.msg.GoalStatus.PREEMPTED:
#rospy.loginfo("Goal preempted by user!")
print "Goal preempted by user!"
else:
#rospy.logerror("Other execution status of move_base actionclient")
print "Other execution status of move_base actionclient"
print client.get_state()
def active_cb():
pass
def feedback_cb(feedback):
pass
def joy_cb(msg):
global client
if msg.buttons[7]:
print '------cancelling goal-------'
client.cancel_goal()
#### definition of publisher/subscriber and services ###
rospy.Subscriber('/joy', Joy, joy_cb)
############# main program #############################
goal_pose = Pose()
goal_pose.position.x = 0
goal_pose.position.y = -1
goal_pose.position.z = 0
ori = tf_conversions.transformations.quaternion_from_euler(0, 0, -math.pi/2)
goal_pose.orientation.x = ori[0]
goal_pose.orientation.y = ori[1]
goal_pose.orientation.z = ori[2]
goal_pose.orientation.w = ori[3]
send_to_goal(goal_pose, True)
#--------------endless loop till shut down -------------#
rate = rospy.Rate(10)
while not rospy.is_shutdown():
#print 'running'
rate.sleep()
|
16,314 | 31da7d83d0424717fa7e75ac6dae28b295b24086 | import ipaddress
ip = '192.168.0.1'
rede = '192.168.0.0/24'
endereco = ipaddress.ip_address(ip)
network = ipaddress.ip_network(rede)
print(endereco)
print(network)
print(endereco + 1500)
print("#"*60)
for n in network:
print(n) |
16,315 | 9bdec41b9e02b1373d7462dd8a3daea70af13462 | class StopWordRemover(object):
"""description of class"""
def __init__(self, dictionary):
self.dictionary = dictionary
def get_dictionary(self):
return self.dictionary
def remove(self, text):
"""Remove stop words."""
words = text.split(' ')
for word in words:
if self.dictionary.contains(word):
words.remove(word)
return ' '.join(words)
|
16,316 | 651358028ce154dfae12d639d33e3b49bf148e83 | # Generated by Django 3.1.6 on 2021-05-24 04:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MyWeb', '0006_auto_20210517_1304'),
]
operations = [
migrations.CreateModel(
name='Tip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('AmountofDonation', models.IntegerField(null=True)),
('costumerId', models.ManyToManyField(to='MyWeb.Booking')),
],
),
migrations.RemoveField(
model_name='donatetoanimals',
name='costumerId',
),
migrations.RemoveField(
model_name='bookingdetails',
name='Submit',
),
migrations.RemoveField(
model_name='ratings',
name='Submit',
),
migrations.DeleteModel(
name='BookingFee',
),
migrations.DeleteModel(
name='DonateToAnimals',
),
]
|
16,317 | 4d4677833e2be093bc09fef2c86330a88e9e69f3 | # coding=utf-8
import os
import glob
import csv
import time
from urllib.parse import urljoin, urlencode, quote_plus
import spider
import utils
if __name__ == "__main__":
csv_file = open("gametree.csv", "w", encoding="utf-8")
csv_writer = csv.writer(csv_file)
csv_writer.writerow(
["gamename", "gametype", "follower_count", "topic_count", "tiebaurl"])
total_count = 0
time_start = time.time()
for infile in glob.glob("./htmlcache/*/*.html"):
folder, fullname = os.path.split(infile)
parentfolder, subfolder = os.path.split(folder)
filename, extension = os.path.splitext(fullname)
try:
with open(infile, 'r', encoding="utf-8") as f:
game_tieba_html = f.read()
followers, topics = utils.get_pop_of_tieba(game_tieba_html)
tieba_url = spider.TIEBA_DETAIL_URL.format(
quote_plus(filename))
csv_writer.writerow(
[filename, subfolder, str(followers), str(topics), tieba_url])
total_count += 1
print('parsed ', total_count, ' tieba: ', filename, ", type: ", subfolder,
", followers: ", str(followers), ", topics: ", str(
topics), ", remaining: ",
str((time.time() - time_start)/(total_count+1)
* (60000-total_count)/60) + "min")
if total_count % 20 == 0:
csv_file.flush()
except Exception as e:
print(e)
continue
csv_file.close()
|
16,318 | 90cdcfbd5b87010288ea4f7ca3e5333bfa3a53f1 | """
173. Binary Search Tree Iterator
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class BSTIterator:
def __init__(self, root: TreeNode):
self.stack = []
temp = root
while temp:
self.stack.append(temp)
temp = temp.left
def next(self) -> int:
"""
@return the next smallest number
"""
top = self.stack.pop()
temp = top.right;
while temp:
self.stack.append(temp)
temp = temp.left
return top.val
def hasNext(self) -> bool:
"""
@return whether we have a next smallest number
"""
if self.stack: return True
else: return False
|
16,319 | 42894b20a30101258c0c73f702b75506575bf3c4 | from DFTtoolbox.elk import postproc
import os
# Parameters =======================================
run_task=[1,2,3,4,5,6]
wkdir=os.path.dirname(os.path.realpath(__file__))
klabel=['$\Gamma$','X','W','K','$\Gamma$','L','U','W','L','K']
Ebound=[-5,5]
state_grp=[['1:1/1/a/a'],['2:2/2/a/a']]
# Main =============================================
print(wkdir)
pp=postproc(wkdir)
for task in run_task:
if task is 1:
pp.band_read()
elif task is 2:
pp.band_plot(klabel,Ebound)
elif task is 3:
pp.fatband_read()
elif task is 4:
pp.fatband_plot(state_grp,klabel,Ebound)
elif task is 5:
pp.pdos_read()
elif task is 6:
pp.pdos_plot(state_grp,Ebound) |
16,320 | fef24d3cb5fe9460bd107d3a9ca0b5dcdc137454 | import plugins.qq_namelist
import plugins.show_group_id
import plugins.for_fun
import plugins.alipay_redpack
import plugins.recall
import plugins._002_water_meter_control
import plugins._000_admins
import plugins._001_group_invite
import plugins._1002_command
import plugins._1001_filter_old_message
import plugins._1005_drive_mode
import plugins._1006_water_meter_filter
import plugins._1100_text_forward
import plugins._1101_other_qq_group_events
|
16,321 | 1e225a6c0d781b9a5241178541f4212068752e20 | class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
p1 = 0
p2 = 0
while p2 < len(chars):
temp=chars[p2]
counter = 0
while p2 < len(chars) and chars[p2] == temp:
p2+=1
counter +=1
newStr=temp+str(counter)
if counter == 1:
newStr = temp
chars[p1:p1+len(newStr)]=newStr
p1+=len(newStr)
return p1
S=Solution()
print S.compress(["a","a","b"])
|
16,322 | 6a00e49197df9648edf21f0628baaa45afd03a4a | import scrapy
class ShiyanlouGithubSpider(scrapy.Spider):
name = "shiyanlou_github"
@property
def start_urls(self):
url_tmpl = "https://github.com/shiyanlou?page={}&tab=repositories"
return (url_tmpl.format(i) for i in range(1, 5))
def parse(self, response):
for repos in response.css('li.col-12'):
yield {
"name": repos.xpath('.//a[contains(@itemprop, "name codeRepository")]/text()').re_first('[^\w]*(\w*)'),
"update_time": repos.xpath('.//relative-time/@datetime').extract_first()
}
|
16,323 | a4364dd327d3ad1b3c5179d031ec2b1ba4a1fd1a | # Inspired by http://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/00-classification.ipynb
import numpy as np
import os
import caffe
import dlib
import glob
from skimage import io
import scipy.misc
proto = 'age.prototxt'
model = 'dex_imdb_wiki.caffemodel'
image = 'test.JPG'
cur_path = os.path.dirname(os.path.realpath(image))
if cur_path[-1] == '/': cur_path = cur_path[:-1]
caffe.set_mode_cpu()
print("Starting Basic Classifier")
net = caffe.Classifier(proto, model, caffe.TEST)
print("Setting up transform")
mu = np.array([111.47710628057908, 94.57594005191616, 84.77318938524172]) # IMDB-Wiki Mean
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR
print("Getting faces")
face_path = ''
faces = glob.glob(os.path.join(face_path + "*.jpg")) + glob.glob(os.path.join(face_path + "*.jpeg"))
# set the size of the input (we can skip this if we're happy
# with the default; we can also change it later, e.g., for different batch sizes)
net.blobs['data'].reshape(len(faces), # batch size
3, # 3-channel (BGR) images
224, 224) # image size is 227x227
print("Preprocessing Image")
predictor_path = "shape_predictor_68_face_landmarks.dat" # https://github.com/davisking/dlib
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
def score_face(file):
print("Getting Face for: {}".format(file))
img = io.imread(file)
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
break
if len(dets) > 0:
margin = 0.4
arr = [d.left(), d.right(), d.top(), d.bottom()]
asize = [arr[1] - arr[0], arr[3] - arr[2]]
top = int(arr[2]-(asize[1]*margin))
bottom = int(arr[3]+(asize[1]*margin))
left = int(arr[0]-(asize[0]*margin))
right = int(arr[1]+(asize[0]*margin))
if top < arr[2]: top = arr[2]
if bottom > arr[3]: bottom = arr[3]
if left < arr[0]: left = arr[0]
if right > arr[1]: right = arr[1]
image_top = img[top:bottom]
image_left = np.array([a[left:right] for a in image_top])
scipy.misc.toimage(image_left, cmin=0.0, cmax=255.0).save(file.replace('.jpg','_cropped.jpg').replace('jpeg','_cropped.jpeg'))
return True
else:
return False
nums = []
for i in range(len(faces)):
check = score_face(cur_path + faces[i])
if check:
image = caffe.io.load_image(cur_path + faces[i].replace('.jpg','_cropped.jpg').replace('.jpeg','_cropped.jpeg'))
transformed_image = transformer.preprocess('data', image)
os.remove(faces[i].replace('.jpg','_cropped.jpg').replace('.jpeg','_cropped.jpeg'))
net.blobs['data'].data[i] = transformed_image
nums.append(i)
output = net.forward()
results = []
for i in range(len(faces)):
if i in nums:
results.append(output["prob"][i].argmax())
else:
results.append(0)
print dict([(a,b) for a,b in zip(faces,results)]) |
16,324 | 044b6cc70a2492c5e374219e3d0268cd76215431 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module containing swap strategies for blocks of commuting gates.
Swap routing is, in general, a hard problem. However, this problem is much simpler if
the gates commute. Many variational algorithms such as QAOA are built with blocks of
commuting gates. Transpiling such circuits with a general purpose SWAP router typically
yields sub optimal results or is costly to run. This module introduces a framework to
transpile blocks of commuting gates by applying layers of a predefined swap strategy.
Further details can also be found here: https://arxiv.org/abs/2202.03459.
"""
from .swap_strategy import SwapStrategy
from .pauli_2q_evolution_commutation import FindCommutingPauliEvolutions
from .commuting_2q_gate_router import Commuting2qGateRouter
|
16,325 | f2670a169957ce6e5c81009f24555c9bfb37ef17 | # Generated by Django 3.1 on 2020-10-22 02:07
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('viveros', '0006_remove_productocontrol_fecha_aplicado'),
]
operations = [
migrations.AddField(
model_name='productor',
name='correo',
field=models.EmailField(default='correo@ejemplo.com', max_length=254),
),
migrations.AlterField(
model_name='productocontrol',
name='valor',
field=models.PositiveIntegerField(help_text='Valor del producto de control'),
),
migrations.AlterField(
model_name='productor',
name='apellido_1',
field=models.CharField(max_length=20, validators=[django.core.validators.MinLengthValidator(3, 'El primer apellido debe tener al menos 3 letras')]),
),
migrations.AlterField(
model_name='productor',
name='apellido_2',
field=models.CharField(max_length=20, validators=[django.core.validators.MinLengthValidator(3, 'El segundo apellido debe tener al menos 3 letras')]),
),
]
|
16,326 | c0ebcc55405059b3043d2dee634f1d3c0fa1e07a | from flask import Flask, render_template
from flask_socketio import SocketIO
import time
import temp
import random
import json
from threading import Thread, Event
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
thread = Thread()
thread_stop_event = Event()
class RandomThread(Thread):
def __init__(self):
self.delay = 1
super(RandomThread, self).__init__()
def randomNumberGenerator(self):
"""
Generate a random number every 1 second and emit to a socketio instance (broadcast)
Ideally to be run in a separate thread?
"""
#infinite loop of magical random numbers
print("Making random numbers")
while not thread_stop_event.isSet():
number = random.randint(10000,99999)
print(number)
socketio.emit('newQrCode', str(number), namespace='/test')
time.sleep(5)
def run(self):
self.randomNumberGenerator()
@app.route('/')
def sessions():
return (str(json.dumps({"arr":temp.readRace()})))
@app.route('/getRace', methods=['GET']):
return (str(json.dumps({"arr":temp.readRace()})))
@app.route('/getRaceDetail',methods=['POST']):
race = request.get("data")
return (str(json.dumps({"arr":temp.readRace()})))
def messageReceived(methods=['GET', 'POST']):
print('message was received!!!')
@socketio.on('connect', namespace='/test')
def test_connect():
# need visibility of the global thread object
global thread
print('Client connected')
#Start the random number generator thread only if the thread has not been started before.
if not thread.isAlive():
print("Starting Thread")
thread = RandomThread()
thread.start()
@socketio.on('disconnect', namespace='/test')
def test_disconnect():
print('Client disconnected')
temp.dropDB()
temp.insert()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
|
16,327 | 972e0ffbd85663af4089feb682f8e0f5537fcffc | """test stt module."""
import random
import string
try: # py3
from unittest import mock
except ImportError: # py2
import mock
import pytest
skip_because_stt_removed = pytest.mark.skip(reason='stt module removed.')
def get_random_string(exclude_list=None):
"""get random gender which is not 'female' or 'male'."""
exclude_list = [] if exclude_list is None else exclude_list
length = 10
result = ''.join(random.choice(string.lowercase) for i in range(length))
while result in exclude_list:
result = ''.join(
random.choice(string.lowercase) for i in range(length)
)
return result
# taken from http://stackoverflow.com/a/8658332
# mocking the import so mock_profile could be loaded.
# Store original __import__
orig_import = __import__
mock_name = get_random_string()
default_profile_data = {
'va_name': mock.Mock(),
'name': mock_name,
'stt': mock.Mock()
}
mock_profile = mock.Mock()
mock_profile.data = default_profile_data
mock_tts = mock.Mock()
# mock_import side effect
def import_mock(name, *args):
"""import mock side effect."""
names = ('profile_populator', 'profile', 'actions_db', 'pyvona')
for n in names:
if name == 'profile':
return mock_profile
elif n == name:
return mock.Mock()
return orig_import(name, *args)
@skip_because_stt_removed
def test_run():
"""test run."""
with mock.patch('__builtin__.__import__', side_effect=import_mock), \
mock.patch('melissa.stt.sr') as mock_sr, \
mock.patch('melissa.stt.tts') as mock_tts:
from melissa.stt import stt
stt()
mock_tts.assert_not_called()
mock_sr.Recognizer.assert_called_once_with()
@skip_because_stt_removed
def test_run_google_stt_with_error():
"""test run with google stt with error.
when this test run,
an error will be raised when trying to print the speech_text.
"""
profile_data = default_profile_data
profile_data['stt'] = 'google'
mock_profile.data = profile_data
with mock.patch('__builtin__.__import__', side_effect=import_mock),\
mock.patch('melissa.stt.sr') as mock_sr, \
mock.patch('melissa.stt.tts'):
from melissa.stt import stt
with pytest.raises(TypeError):
stt()
assert len(mock_sr.mock_calls) == 8
assert mock_sr.Microphone.call_count == 1
mock_mic_enter = mock_sr.Microphone().__enter__()
mock_recognizer_listen = mock_sr.Recognizer().listen()
sr_calls = [
mock.call.Recognizer(),
mock.call.Microphone(),
mock.call.Microphone().__enter__(),
mock.call.Recognizer().listen(mock_mic_enter),
mock.call.Microphone().__exit__(None, None, None),
mock.call.Recognizer()
.recognize_google(mock_recognizer_listen),
mock.call.Recognizer()
.recognize_google().lower(),
mock.call.Recognizer()
.recognize_google().lower().replace("'", ''),
]
for call in sr_calls:
assert call in mock_sr.mock_calls
@skip_because_stt_removed
def test_run_google_stt():
"""test run with google stt.
At the first run, it will run normally.
On the next run an error will be raised to stop the loop.
"""
profile_data = default_profile_data
profile_data['stt'] = 'google'
va_name = get_random_string()
profile_data['va_name'] = va_name
mock_profile.data = profile_data
random_audio_text = get_random_string()
with mock.patch('__builtin__.__import__', side_effect=import_mock),\
mock.patch('melissa.stt.sr') as mock_sr, \
mock.patch('melissa.stt.tts'):
from melissa.stt import stt
raised_error = KeyboardInterrupt
mock_sr.Recognizer.return_value.listen.side_effect = [
mock.Mock(), raised_error()]
mock_sr.Recognizer.return_value.recognize_google.return_value = \
random_audio_text
stt()
@skip_because_stt_removed
def test_run_sphinx_stt():
"""test run with sphinx stt.
At the first run, it will run normally.
On the next run an error will be raised to stop the loop.
"""
profile_data = default_profile_data
profile_data['stt'] = 'sphinx'
mock_flag_modeldir = mock_flag_hmm = mock.Mock()
mock_flag_lm = mock_flag_dic = mock.Mock()
profile_data['pocketsphinx'] = {
'modeldir': mock_flag_modeldir,
'hmm': mock_flag_hmm,
'lm': mock_flag_lm,
'dic': mock_flag_dic,
}
mock_profile.data = profile_data
mock_open = mock.mock_open()
with mock.patch('__builtin__.__import__', side_effect=import_mock),\
mock.patch('melissa.stt.sr') as mock_sr, \
mock.patch('melissa.stt.Decoder') as mock_decoder, \
mock.patch('melissa.stt.open', mock_open, create=True), \
mock.patch('melissa.stt.tts'):
from melissa.stt import stt
raised_error = ValueError
mock_audio = mock.Mock()
mock_sr.Recognizer.return_value.listen.side_effect = [
mock_audio, raised_error()]
stt()
mock_audio.get_wav_data.assert_called_once_with()
assert len(mock_sr.mock_calls) == 5
assert len(mock_open.mock_calls) == 7
mock_open_data = [
mock.call('recording.wav', 'wb'),
mock.call().__enter__(),
mock.call().write(mock_audio.get_wav_data()),
mock.call().__exit__(None, None, None),
mock.call('recording.wav', 'rb'),
mock.call().seek(44),
mock.call().read()
]
for item in mock_open_data:
assert item in mock_open.mock_calls
mock_decoder_config = mock_decoder.default_profile_data()
mock_decoder_data = [
mock.call.default_config(),
mock.call.default_config().set_string('-hmm', mock_flag_hmm),
mock.call.default_config().set_string('-lm', mock_flag_lm),
mock.call.default_config().set_string('-dict', mock_flag_dic),
mock.call.default_config().set_string('-logfn', '/dev/null'),
mock.call(mock_decoder_config()),
mock.call().start_utt(),
mock.call().process_raw('', False, True),
mock.call().end_utt(),
mock.call().hyp(),
# mock.call().hyp().hypstr.__radd__().__add__("'"),
# mock.call().hyp().hypstr.__radd__().__add__().__str__(),
mock.call().hyp().hypstr.lower(),
mock.call().hyp().hypstr.lower().replace("'", ''),
]
for item in mock_decoder_data:
assert item in mock_decoder.mock_calls
@skip_because_stt_removed
def test_run_keyboard_stt():
"""test run with keyboard stt.
At the first run, it will run normally.
On the next run an error will be raised to stop the loop.
"""
profile_data = default_profile_data
profile_data['stt'] = 'keyboard'
with mock.patch('__builtin__.__import__', side_effect=import_mock),\
mock.patch('melissa.stt.raw_input') as mock_input, \
mock.patch('melissa.stt.tts'):
from melissa.stt import stt
mock_text = mock.Mock()
raised_error = ValueError
mock_input.side_effect = [mock_text, raised_error()]
stt()
assert mock_input.call_count == 1
assert mock.call('Write something: ') in mock_input.mock_calls
@skip_because_stt_removed
def test_run_telegram_stt_wrong_token():
"""test run with telegram stt with wrong token."""
profile_data = default_profile_data
profile_data['stt'] = 'telegram'
wrong_token = 'xxxx'
profile_data['telegram_token'] = wrong_token
with mock.patch('__builtin__.__import__', side_effect=import_mock),\
mock.patch('melissa.stt.tts') as mock_tts:
from melissa.stt import stt
with pytest.raises(SystemExit):
stt()
mock_tts_call = (
'Please enter a Telegram token or configure a different STT'
' in the profile.json file.')
mock_tts.assert_called_with(mock_tts_call)
assert mock_tts.call_count == 1
@skip_because_stt_removed
def test_run_telegram_stt():
"""test run with telegram stt with wrong token."""
profile_data = default_profile_data
profile_data['stt'] = 'telegram'
mock_token = mock.Mock()
profile_data['telegram_token'] = mock_token
with mock.patch('__builtin__.__import__', side_effect=import_mock),\
mock.patch('melissa.stt.tts'), \
mock.patch('melissa.stt.time') as mock_time, \
mock.patch('melissa.stt.telepot') as mock_telepot:
raised_error = KeyboardInterrupt
mock_time.sleep.side_effect = [mock.Mock(), raised_error()]
from melissa.stt import stt
with pytest.raises(raised_error):
stt()
assert len(mock_telepot.mock_calls) == 2
mock_telepot.Bot.assert_called_with(mock_token)
assert mock_telepot.Bot.return_value.notifyOnMessage.called
assert mock_time.sleep.call_count == 2
mock_time.sleep.called_with(10)
|
16,328 | b8aafefdced260881554266d3ad1748b0a02f086 | # -*- coding: utf-8 -*-
# Original Source by ssml-builder(https://github.com/Reverseblade/ssml-builder)
import re
class Speech:
#interpret-as
#단일 사용 가능
#spell-out : 영어 단어를 개별 알파벳으로 읽어줌
#digits : 숫자를 개별적으로 읽어줌 (일, 이, 삼..)
#telephone: 전화번호로 읽어줌
#kakao:none : kakao:effect의 tone 처리 시, 원형 그대로를 보호해주고, 연결된 조사를 자연스럽게 변경해줌.
#kakao:score : 스코어를 읽어줌 (예 3:1 → 3대1)
#kakao:vocative : 호격 조사 처리를 하도록 함
#
#format attribute를 반드시 지정해주어야 함
#date : 날짜로 읽어줌
#time : 시간으로 읽어줌
#kakao:number : 숫자 표현. 한글식(한/하나, 두/둘, 세/셋), 한자식(일, 이, 삼)으로 읽어줌
VALID_INTERPRET_AS = ( 'spell-out', 'digits', 'telephone',
'kakao:none', 'kakao:score', 'kakao:vocative',
'date', 'time', 'kakao:number' )
VALID_PROSODY_ATTRIBUTES = {
#rate
#slow : 0.9 / medium : 1.0 (default) / fast : 1.1
#SSML 표준의 경우 최대값 50% (1.5에 해당), 최소값 -33.3% (0.7에 해당)이므로 표준에 부합
'rate': ('slow', 'medium', 'fast'),
#volume
#soft : 0.7 / medium:1.0 (default) / loud : 1.4
#SSML 표준의 경우 최대값 +4.08dB (1.6배에 해당), 최소값 정의 없음이므로 표준에 부합
'volume': ('soft', 'medium', 'loud')
}
#WOMAN_READ_CALM : 여성 차분한 낭독체 (default)
#MAN_READ_CALM : 남성 차분한 낭독체
#WOMAN_DIALOG_BRIGHT : 여성 밝은 대화체
#MAN_DIALOG_BRIGHT : 남성 밝은 대화체
VALID_VOICE_NAMES = ('WOMAN_READ_CALM', 'MAN_READ_CALM', 'WOMAN_DIALOG_BRIGHT', 'MAN_DIALOG_BRIGHT')
def __init__(self):
self.speech = ""
def speak(self):
"""
<speak>
:return:
"""
return '<speak>{}</speak>'.format(self.speech)
def add_text(self, value):
"""
add text
:return:
"""
self.speech += value
return self
def say_as(self, value, interpret_as, is_nested=False):
"""
<say_as>
:param value:
:param interpret_as:
:param is_nested:
:return:
"""
if interpret_as not in self.VALID_INTERPRET_AS:
raise ValueError('The interpret-as provided to say_as is not valid')
ssml = '<say-as interpret-as="{interpret_as}">' \
'{value}</say-as>'.format(interpret_as=interpret_as, value=value)
if is_nested:
return ssml
self.speech += ssml
return self
def prosody(self, value, rate='medium', volume='medium', is_nested=False):
"""
<prosody>
:param value:
:param rate:
:param volume:
:param is_nested:
:return:
"""
if rate not in self.VALID_PROSODY_ATTRIBUTES['rate']:
if re.match(r'^\d+%$', rate) is None:
raise ValueError('The rate provided to prosody is not valid')
if volume not in self.VALID_PROSODY_ATTRIBUTES['volume']:
raise ValueError('The volume provided to prosody is not valid')
ssml = '<prosody rate="{rate}" volume="{volume}">' \
'{value}</prosody>'.format(rate=rate, volume=volume, value=value)
if is_nested:
return ssml
self.speech += ssml
return self
def sub(self, value, alias, is_nested=False):
"""
<sub>
:param value:
:param alias:
:param is_nested:
:return:
"""
ssml = '<sub alias="{}">{}</sub>'.format(alias, value)
if is_nested:
return ssml
self.speech += ssml
return self
def voice(self, value, name, is_nested=False):
"""
<voice>
:param value:
:param name:
:return:
"""
if name not in self.VALID_VOICE_NAMES:
raise ValueError('The name provided to voice is not valid')
ssml = '<voice name="{}">{}</voice>'.format(name, value)
if is_nested:
return ssml
self.speech += '<voice name="{}">{}</voice>'.format(name, value)
return self
def pause(self, time, is_nested=False):
"""
<break>
:param time:
:param is_nested:
:return:
"""
ssml = '<break time="{}"/>'.format(time)
if is_nested:
return ssml
self.speech += ssml
return self
#kakao:effect tone=friendly
def friendly(self, value, is_nested=False):
"""
:param value:
:param is_nested:
:return:
"""
ssml = '<kakao:effect tone="friendly">{}</kakao:effect>'.format(value)
if is_nested:
return ssml
self.speech += ssml
return self
def audio(self, src, is_nested=False):
"""
:param src:
:param is_nested:
:return:
"""
ssml = '<audio src="{}" />'.format(src)
if is_nested:
return ssml
self.speech += ssml
return self
def escape(self):
"""
escapes any special characters that will cause SSML to be invalid
:return:
"""
pass
|
16,329 | 7b0675d7340de0a5630139932c19ad00eb4ff5d1 | # Generated by Django 2.2.5 on 2019-09-13 11:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('perfis', '0002_auto_20190913_1141'),
('publicacoes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='comentario',
options={'verbose_name': 'Comentário', 'verbose_name_plural': 'Comentários'},
),
migrations.AddField(
model_name='tweet',
name='likes',
field=models.ManyToManyField(related_name='tweets_liked', to='perfis.Perfil'),
),
migrations.AlterField(
model_name='tweet',
name='autor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tweets', to='perfis.Perfil'),
),
]
|
16,330 | cc474d3c5ad3552a4e8ea405318ecf71e4f43380 | # coding: utf-8
from common.constDefine import *
from protocol import game_pb2
from utils.eventUtils import EventUtils
from utils.timerUtils import TimerUtils
class Judge:
"""法官"""
room = None # 房间实例
timer = None # 定时器
event = None # 事件对象
kill_info = None # 被杀玩家ID
def __init__(self, room):
self.room = room
self.kill_info = []
def register_vent(self):
self.event[len(self.event)] = EventUtils().register(GAME_EVENT_WEREWOLF_KILL.format(self.room.room_id), self.event_cb_werewolf)
self.event[len(self.event)] = EventUtils().register(GAME_EVENT_SEER_CHECK.format(self.room.room_id), self.event_cb_seer)
self.event[len(self.event)] = EventUtils().register(GAME_EVENT_WITCH_DRUG.format(self.room.room_id), self.event_cb_witch)
self.event[len(self.event)] = EventUtils().register(GAME_EVENT_NORMAL_VOTE.format(self.room.room_id), self.event_cb_normal)
def event_cb_normal(self, info):
if self.room.status != ROOM_STATUS_VOTE:
return
if info and info.flag:
pass
else:
pass
def event_cb_werewolf(self, info):
if self.room.status != ROOM_STATUS_WEREWOLF:
return
self.kill_info.append(info)
def event_cb_seer(self, info):
if self.room.status != ROOM_STATUS_SEER:
return
def event_cb_witch(self, info):
if self.room.status != ROOM_STATUS_WITCH:
return
def start(self):
# 通知全部玩家游戏开始
self.room.sendMsgToAllUsers(SERVER_SEND_GAME_START, game_pb2.startGame())
# 分配身份
self.room.allotRole()
# begin
self.registerEvent()
|
16,331 | 4013bd3bebdfd8c7c947e161a9038109f8e9c6e1 | # pylint: disable=W5101
"""
Assuntos são importantes para cruzar os segmentos atendidos por um evento e,
por sua vez, saber quais os assuntos de interesses das pessoas participantes
de eventos.
"""
from django.db import models
class Subject(models.Model):
""" Modelo para as 'Assuntos' """
name = models.CharField(max_length=255, verbose_name='nome')
active = models.BooleanField(default=True, verbose_name='ativo')
description = models.TextField(
blank=True,
null=True,
verbose_name='descrição'
)
class Meta:
verbose_name = 'Assunto'
verbose_name_plural = 'Assuntos'
ordering = ['name']
def __str__(self):
return self.name
|
16,332 | 6ec5c732c04a6c136ed9be71ed4274af16dd5dd5 | import numpy as nump
from matplotlib import pyplot as graphic
import seaborn as plate
#------------------------------------------------------------
A = 2
B = 1
u_0 = lambda x, y: nump.arctan(nump.cos(nump.pi * x / A))
u_0_part = lambda x, y: nump.sin(2 * nump.pi * x / A) * nump.sin(nump.pi * y / B)
T = 4
n_x = 100
n_y = 100
n_t = 5000
#------------------------------------------------------------
δ_x = A / n_x
δ_y = B / n_y
δ_t = T / n_t
Cond = δ_t / δ_x + δ_t / δ_y
if Cond > 1:
print('---------------------------------------------------')
print('ERROR!!! The condition is not satisfied')
print('---------------------------------------------------')
#------------------------------------------------------------
x_values = nump.linspace(-A / 2, A / 2, n_x)
y_values = nump.linspace(-B / 2, B / 2, n_y)
mas = nump.zeros((n_t, n_x, n_y))
for i in range(n_x):
for j in range(n_y):
mas[0, i, j] = u_0(x_values[i], y_values[j])
for i in range(1, n_x - 1):
for j in range(1, n_y - 1):
mas[1, i, j] = (u_0(x_values[i], y_values[j]) + u_0_part(x_values[i], y_values[j]) * δ_t + δ_t ** 2 / (2 * δ_x ** 2) * (mas[0, i + 1, j] - 2 * mas[0, i, j] + mas[0, i - 1, j]) + δ_t ** 2 / (2 * δ_y ** 2) * (mas[0, i, j + 1] - 2 * mas[0, i, j] + mas[0, i, j - 1]))
mas[1, 1: -1, 0] = mas[1, 1: -1, 1]
mas[1, 1: -1, -1] = mas[1, 1: -1, -2]
for t in range(1, n_t - 1):
mas[t + 1, 1: -1, 1: -1] = (2 * mas[t, 1: -1, 1: -1] - mas[t - 1, 1: -1, 1: -1] + δ_t ** 2 / δ_x ** 2 * (mas[t, : -2, 1: -1]- 2 * mas[t, 1: -1, 1: -1] + mas[t, 2:, 1: -1]) + δ_t ** 2 / δ_y ** 2 * (mas[t, 1: -1, : -2] - 2 * mas[t, 1: -1, 1: -1] + mas[t, 1: -1, 2:]))
mas[t + 1, 1: -1, 0] = mas[t + 1, 1: -1, 1]
mas[t + 1, 1: -1, -1] = mas[t + 1, 1: -1, -2]
#------------------------------------------------------------
for i in range(0, len(mas), 300):
plate.heatmap(mas[i].T, cmap='BuPu', vmin=mas.min(), vmax=mas.max())
graphic.show() |
16,333 | cf56126976718b82ae69d340980bc61a03e09e3a | import sys
sys.setrecursionlimit(1000000000)
input = sys.stdin.readline
n,m=map(int,input().split())
a=sorted([tuple(map(int,input().split())) for _ in range(m)])
i=0
cnt=0
k=n
while i<m:
k=a[i][1]
if a[i][0]<k:
while i<m and a[i][0]<k:
k=min(k,a[i][1])
i+=1
if i<m and a[i][0]>=k:
cnt+=1
else:
cnt+=1
i+=1
print(cnt) |
16,334 | 5de09ee1dc0c0392363497980f23672014687d1e | ii = [('BailJD2.py', 1), ('LyttELD.py', 1), ('MartHRW.py', 1)] |
16,335 | f702956352d6867d788437bbb00c42de8ed3c0df | print('--------Crawling Started--------')
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
import requests
def SubwayInfoCrawling():
# crawling option = headless
options = webdriver.ChromeOptions()
options.add_argument('headless')
# Use chromedriver from selenium <https://www.selenium.dev/>
# You can choose other driver if you wish.
driver = webdriver.Chrome("./chromedriver.exe", options=options)
driver.get('http://www.seoulmetro.co.kr/kr/cyberStation.do')
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
mapinfo = soup.find('div', 'mapInfo')
lines = mapinfo.find_all('li')
output = pd.DataFrame()
for i in range(len(lines)):
st_line = lines[i].span.text
st_list = lines[i].div.text.split(',')
for j in range(len(st_list)):
st_name = st_list[j].strip()
unit = pd.DataFrame({'st_name':[st_name],
'st_line':[st_line]})
output = pd.concat([output,unit], axis=0)
output = output.reset_index(drop=True)
driver.close()
return output
# Save crawled data as a dataframe object
st_info = SubwayInfoCrawling()
print(st_info['st_line'].unique())
# Substitute line name with formal expression
line_dict = {
'분당':'분당선',
'신분당':'신분당선',
'경의중앙':'경의중앙선',
'용인경전철':'에버라인',
'우이신설경전철':'우이신설선',
'김포':'김포골드라인'
}
st_info['st_line'] = st_info['st_line'].replace(line_dict)
# Substitute station name with formal expression
st_info.loc[st_info['st_name']=='4·19민주묘지','st_name'] = '4.19민주묘지'
st_info.loc[st_info['st_name']=='사우(김포시청)','st_name'] = '사우'
st_info['st_name'] = st_info['st_name'].apply(lambda x: x if x[-1]=='역' else x + '역')
# My own Kakao Developer API Key. Use for Geocoding.
kakao_api_key = pd.read_csv('../../kakao_api.csv')
# Geocode
headers = {'Authorization':f"{kakao_api_key['rest_api'][0]}"}
def Geocoding(st_name, st_line):
url = f'https://dapi.kakao.com/v2/local/search/keyword.json?query={st_name} {st_line}'
response = requests.get(url, headers=headers)
lat = response.json()['documents'][0]['y']
lng = response.json()['documents'][0]['x']
return [lat,lng]
st_info['coordinates'] = st_info.apply(lambda x: Geocoding(x['st_name'], x['st_line']), axis=1)
st_info['lat'] = st_info['coordinates'].apply(lambda x: x[0])
st_info['lng'] = st_info['coordinates'].apply(lambda x: x[1])
st_info = st_info.drop(columns='coordinates')
# Save as a csv file
st_info.to_csv('../subway_location_info.csv', index=False)
print('--------Crawling Finished--------') |
16,336 | 612cccd85a736fe56678474cbbe2312eaef0a1f2 | """
Utilizando Lambdas
- Conhecidas por expressões lambdas, ou simplesmente lambdas, são funções sem nome, ou seja, funções anônimas
Sintaxe - lambda x1, x2, x3, ..., xn: <expressão>
dois pontos separam os parâmetros da expressão a ser executada
"""
# Modo básico de expressão lambda --------------------------------------------------------------------------------------
nome_completo = lambda nome, sobrenome: nome.strip().title() + ' ' + sobrenome.strip().title()
print(nome_completo(' renato ', ' alberto '))
# Obs: Quantidade de argumentos deve ser a mesma quantidade de parâmetros
# Modo mais interessante de expressão lambda ---------------------------------------------------------------------------
autores = ['Arthur C. Clarke', 'Douglas Adams', 'Walter Lippmann', 'Arthur M. Schlesinger', 'Joseph Wood Krutch',
'Buckminster Fuller', 'Omar Bradley', 'Reece Elizabeth', 'John Lasseter', 'Sydney J. Harris']
# cada nome passa pelo parâmetro da expressão lambda, é transformado em 'último sobrenome' e utilizado na classificação
autores.sort(key=lambda sobrenome: sobrenome.split(' ')[-1].lower())
print(autores)
# Função Quadrática - f(x) = a * x ** 2 + b * x + c --------------------------------------------------------------------
def funcao_quadratica(a, b, c):
return lambda x: a * x ** 2 + b * x + c
teste = funcao_quadratica(2, 3, -5) # retorna a expressão lambda que recebe x como parâmetro
print(teste(0)) # executa passado o argumento x
print(teste(1)) # executa passado o argumento x
print(teste(2)) # executa passado o argumento x
|
16,337 | ddcb2cf8d661bbc82268cef7f68dd58929c16bda | try:
from unittest import skipIf
except ImportError:
from django.utils.unittest import skipIf
try:
from django.conf.urls.defaults import url
except ImportError:
from django.conf.urls import url
try:
from django.conf.urls.defaults import include
except ImportError:
from django.conf.urls import include
|
16,338 | 587f5913e7284c79e12171ca26e3b36e21e6265e | from django.apps import AppConfig
class BiografiConfig(AppConfig):
name = 'Biografi'
verbose_name = 'Biografia'
|
16,339 | bac290550f9a52b2b5a0e58fdc201dfee2f022d1 | """
created by Nagaj at 22/04/2021
"""
from constants import SHORT_TEXT, ALL_CHARS_ARE_NUMBERS
class ShortLenError(Exception):
message = SHORT_TEXT
class TextAsNumber(Exception):
message = ALL_CHARS_ARE_NUMBERS
|
16,340 | f2079d98dab6bf74f677788cba5610d3dfdde074 | import Dijkstra_with_DLL as dijk
import GUI as gui
def run(station1, station2, time):
"""Function to run the Dijkstra algorithm and also format the output in a way that can be displayed in the
interface """
route = dijk.Dijkstra(station1, station2)
path_results = route.find_path(dijk.builder(time))
path = path_results[0]
total_time = path_results[1]
lines = dijk.find_lines(path_results[0],time)
line_ID = lines[0]
time_taken = lines[1]
# Headers fot the Table
records = [('Station', 'Line', 'Travel Time to Next Station(in mins)')]
# Table Data
for x, y, z in zip(path, line_ID, time_taken):
records.append((x, y, z))
if path_results == "Route Not Possible":
gui.route_not_possible()
else:
gui.results(records, total_time)
|
16,341 | 4e214a87d565d0948bcd82a5115d5f8da95f8584 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
created by gjwei on 9/22/17
"""
import numpy as np
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def identity(z):
return z
def binary_step(z):
return np.where(z < 0, 0, 1)
def logistic(z):
return 1.0 / (1 + np.exp(-z))
def tanh(z):
return np.tanh(z)
def arctan(z):
return np.arctan(z)
def softsign(z):
return z / (1.0 + np.abs(z))
def relu(z):
return np.maximum(1, z)
def leakyRelu(z):
# return np.where(z < 0, 0.01 * z, z)
return np.maximum(0.01 * z, z)
def pRelu(z, alpha):
return np.where(z < 0, alpha * z, z)
def RLeakyRelu(z, alpha):
return np.where(z < 0, alpha * z, z)
def exponential_linear_unit(z, alpha):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
def scaled_exponential_Lu(z):
alpha = 1.67326
lambda_ = 1.0507
return np.where(z < 0, lambda_ * alpha * (np.exp(z) - 1), lambda_ * z)
def adaptive_piecewise_linear(z):
pass
def softPlus(x):
return np.log1p(x)
def bent_identity(x):
return ((np.sqrt(x ** 2 + 1) - 1) / 2) + x
def softExp(x, a):
return np.where(a < 0, -1 * (np.log(1 - a * (x + a))) / a, np.where(a == 0, x, np.exp(a * x) - 1) / a) + a
def sinusoid(x):
return np.sin(x)
def sinc(x):
return np.where(x == 0, 1, np.sin(x) / x)
def gaussian(x):
return np.exp(- x ** 2)
|
16,342 | 6d43881e60ab221a5a9e9deedd264e50437995ce | #!/usr/bin/env python3
#encoding: windows-1252
import brickpi3
import math
import time
# Global
BP = brickpi3.BrickPi3()
# Constants
WHEEL_DIAMETER = 8.2 # CM
GEAR_RATIO = 12 / 20 # 12t:20t
WHEEL_TRACK_WIDTH = 15.00 # CM (nice round number, what are the chances!)
LEFT_MOTOR = BP.PORT_B
RIGHT_MOTOR = BP.PORT_C
def mowing():
print("mowing")
def turnLeft():
print("left")
def turnRight():
print("right")
def quitting():
print("quitting")
quit()
currentState = 0
states = {
0: mowing,
1: turnLeft,
2: turnRight,
3: quitting
}
if __name__ == "__main__":
func = states.get(currentState)
func()
|
16,343 | 6a8578bc01abd1cb603764c05c8d2bbaa3ff7aa1 | import os
import re
from pathlib import Path
import cv2
import numpy as np
import requests
from upres.utils.environment import env
class Image:
def __init__(self, path: Path, greyscale: bool, scaling: int):
self.path = path
self.greyscale = greyscale
self.read_type = cv2.IMREAD_GRAYSCALE if greyscale else cv2.IMREAD_COLOR
self.scaling = scaling
@property
def name(self):
return re.search(r"/([\w]*).png", str(self.path)).group(1)
def get_array(self, scale=1):
"""
Loads image and adds padding as needed to accomodate the scaling without fractions.
"""
array = cv2.imread(str(self.path), self.read_type)
# resize original image so it can be be scaled without fractions
x_extra = array.shape[0] % self.scaling
y_extra = array.shape[1] % self.scaling
x_extra = self.scaling - x_extra if x_extra != 0 else x_extra
y_extra = self.scaling - y_extra if y_extra != 0 else y_extra
padded_array = cv2.resize(
array, (int(array.shape[1] + y_extra), int(array.shape[0] + x_extra))
)
# scale image
resized_array = cv2.resize(
padded_array,
(int(padded_array.shape[1] * scale), int(padded_array.shape[0] * scale)),
)
# cv2 reads in array as BGR, tensorboard shows as RGB
if not self.greyscale:
x = np.copy(resized_array)
resized_array[:, :, 0] = x[:, :, 2]
resized_array[:, :, 2] = x[:, :, 0]
# cv2.imshow('image',array)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if self.greyscale:
resized_array = np.expand_dims(resized_array, 2)
return resized_array
def download_images(urls):
for url in urls:
# unit = re.search(r"[/\d]([A-Za-z_]*).png", url).group(1)
unit = re.search(r"[/\d]([\w]*).jpg", url).group(1)
file_name = str(env.units / f"{unit}.png")
with open(file_name, "wb+") as f:
f.write(requests.get(url).content)
print(Image(file_name, False, 1).get_array().shape)
def download_unit_images():
image_urls = [
"https://bnetcmsus-a.akamaihd.net/cms/gallery/UXAGLN2RQ8001492473801945.jpg",
"https://bnetcmsus-a.akamaihd.net/cms/gallery/MOM5V4N13UVI1492476408599.jpg",
"https://bnetcmsus-a.akamaihd.net/cms/gallery/MBKXXJZ6YCVV1492476407508.jpg",
"https://bnetcmsus-a.akamaihd.net/cms/gallery/TA17UJIR1TV31492476406352.jpg",
"https://bnetcmsus-a.akamaihd.net/cms/gallery/CRSJ1ZI8GNQA1492476407623.jpg",
"https://bnetcmsus-a.akamaihd.net/cms/gallery/CMQOBEWCGCUE1492476407691.jpg",
"https://bnetcmsus-a.akamaihd.net/cms/gallery/5VKFTLLMYUUH1492476407602.jpg",
"https://bnetcmsus-a.akamaihd.net/cms/gallery/HJIMRH145ATT1492476406520.jpg",
"https://bnetcmsus-a.akamaihd.net/cms/gallery/0AAPN0GIQ0I91492476406344.jpg",
# "https://liquipedia.net/commons/images/9/9d/Marine.png",
# "https://liquipedia.net/commons/images/8/8a/Firebat.png",
# "https://liquipedia.net/commons/images/2/26/Medic.png",
# "https://liquipedia.net/commons/images/f/f7/Scv.png",
# "https://liquipedia.net/commons/images/a/ab/Ghost.png",
]
download_images(image_urls)
def download_building_images():
image_urls = [
"https://liquipedia.net/commons/images/thumb/1/13/%24Academy.png/600px-%24Academy.png",
"https://liquipedia.net/commons/images/thumb/d/dd/%24Armory.png/600px-%24Armory.png",
"https://liquipedia.net/commons/images/thumb/d/df/%24Barracks.png/600px-%24Barracks.png",
"https://liquipedia.net/commons/images/thumb/e/e9/%24Bunker.png/600px-%24Bunker.png",
"https://liquipedia.net/commons/images/thumb/d/dc/%24Command_Center.png/600px-%24Command_Center.png",
"https://liquipedia.net/commons/images/thumb/1/1f/%24Comsat_Station.png/600px-%24Comsat_Station.png",
"https://liquipedia.net/commons/images/thumb/2/2b/%24Control_Tower.png/600px-%24Control_Tower.png",
"https://liquipedia.net/commons/images/thumb/0/04/%24Covert_Ops.png/600px-%24Covert_Ops.png",
"https://liquipedia.net/commons/images/thumb/4/41/%24Engineering_Bay.png/600px-%24Engineering_Bay.png",
"https://liquipedia.net/commons/images/thumb/3/36/%24Factory.png/600px-%24Factory.png",
"https://liquipedia.net/commons/images/thumb/0/0b/%24Machine_Shop.png/600px-%24Machine_Shop.png",
"https://liquipedia.net/commons/images/thumb/4/41/%24Missile_Turret.png/600px-%24Missile_Turret.png",
"https://liquipedia.net/commons/images/thumb/e/ed/%24Nuclear_Silo.png/600px-%24Nuclear_Silo.png",
"https://liquipedia.net/commons/images/thumb/7/7a/%24Physics_Lab.png/600px-%24Physics_Lab.png",
"https://liquipedia.net/commons/images/thumb/c/ce/%24Refinery.png/600px-%24Refinery.png",
"https://liquipedia.net/commons/images/thumb/2/25/%24Science_Facility.png/600px-%24Science_Facility.png",
"https://liquipedia.net/commons/images/thumb/2/24/%24Starport.png/600px-%24Starport.png",
"https://liquipedia.net/commons/images/thumb/c/c7/%24Supply_Depot.png/600px-%24Supply_Depot.png",
]
download_images(image_urls)
|
16,344 | 55a1f718eeb0b0d37ab67ab65255c64ec15e572d | import GenerateData.calc_breast_tmap as calc_breast
import GenerateData.draw_axe as draw_axe
import matplotlib.pyplot as plt
def create(left_up_t_values, right_up_t_values, left_dawn_t_values, right_dawn_t_values):
(l_deep_table, l_deep_min, l_deep_max) = calc_breast.calc_t_map(t_values=left_up_t_values, breast_side='left')
(r_deep_table, r_deep_min, r_deep_max) = calc_breast.calc_t_map(t_values=right_up_t_values, breast_side='right')
(l_skin_table, l_skin_min, l_skin_max) = calc_breast.calc_t_map(t_values=left_dawn_t_values, breast_side='left')
(r_skin_table, r_skin_min, r_skin_max) = calc_breast.calc_t_map(t_values=right_dawn_t_values, breast_side='right')
_min = min(l_deep_min, r_deep_min, l_skin_min, r_skin_min)
_max = max(l_deep_max, r_deep_max, l_skin_max, r_skin_max)
fig, subplots = plt.subplots(nrows=2, ncols=2, figsize=(4, 4))
draw_axe.set_axes_breast(temperature_table=r_deep_table, min_t=_min, max_t=_max, axes=fig.axes[0])
draw_axe.set_axes_breast(temperature_table=l_deep_table, min_t=_min, max_t=_max, axes=fig.axes[1])
draw_axe.set_axes_breast(temperature_table=r_skin_table, min_t=_min, max_t=_max, axes=fig.axes[2])
draw_axe.set_axes_breast(temperature_table=l_skin_table, min_t=_min, max_t=_max, axes=fig.axes[3])
return fig
|
16,345 | 796183bf2116d19a2c65403b47a56828ab9f0a8c | from django.apps import AppConfig
class PollutionappConfig(AppConfig):
name = 'PollutionApp'
|
16,346 | 65b987cf8f80081cafe46777da5750935c7fbdd8 | ZODIACS = ["Козирог", "Водолей", "Риби", "Овен", "Телец", "Близнаци",
"Рак", "Лъв", "Дева", "Везни", "Скорпион", "Стрелец", "Козирог"]
SPLIT = [19, 18, 20, 20, 20, 20, 21, 22, 22, 22, 21, 21]
def what_is_my_sign(day, month):
if day <= SPLIT[month - 1]:
return ZODIACS[month - 1]
return ZODIACS[month]
|
16,347 | 5c3991c4dbb4bb7a58a6cd4fb7574687dfd92d69 | import unittest
from random import *
import string
from user import User
from credential import Credential
class TestUser(unittest.TestCase):
'''
Test class that defines test cases for the user class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_user = User("Beyonce","Knowles","@bey","bknowles@carter.com","#####")
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
credential_list = []
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user.first_name,"Beyonce")
self.assertEqual(self.new_user.last_name,"Knowles")
self.assertEqual(self.new_user.username,"@bey")
self.assertEqual(self.new_user.email,"bknowles@carter.com")
self.assertEqual(self.new_user.password,"#####")
def test_create_credential(self):
'''
test_create_credential allows user to update a new credential
'''
facebook_credential = User.create_credential("facebook","Beyonce","Knowles","@beybey","beybey@hack","password")
self.assertTrue(facebook_credential)
def test_save_credential(self):
'''
test_save_credentials test case to test if the credential object is saved into
the credential list
'''
facebook_credential = User.create_credential("facebook","Beyonce","Knowles","@beybey","beybey@hack","password")
user_credential_list = []
User.save_credential(facebook_credential,user_credential_list)
self.assertEqual(len(user_credential_list),1)
def test_display_credentials(self):
'''
test_display_credential test case to test if all credentials will be returned
'''
facebook_credential = User.create_credential("facebook","Beyonce","Knowles","@beybey","beybey@hack","password")
user_credential_list = []
User.save_credential(facebook_credential,user_credential_list)
self.assertEqual(User.display_credentials(user_credential_list),("facebook","Beyonce","Knowles","@beybey","beybey@hack","password"))
def test_delete_credentials(self):
'''
test_display_credential test case to test if all credentials will be returned
'''
facebook_credential = User.create_credential("facebook","Beyonce","Knowles","@beybey","beybey@hack","password")
user_credential_list = []
User.save_credential(facebook_credential,user_credential_list)
new_list = User.del_credential("facebook",user_credential_list)
self.assertEqual(len(new_list),0)
if __name__ == '__main__':
unittest.main()
|
16,348 | 9075391a4fe75da4ffa78b0cb788a418d093b51c | #202 ac
def nextSquareSum(x):
tmparr = []
while x >= 10:
tmparr.append(x%10)
x/=10
tmparr.append(x)
return sum([i*i for i in tmparr])
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
tmpSet= set() # determine if it get into a cycle
x = n
while True:
if x == 1:
return True
if x in tmpSet:
return False
tmpSet.add(x)
x = nextSquareSum(x)
|
16,349 | 88bf25a34c54ff9a4a62fd7213044d22c6ead530 | #!/usr/bin/env python3
"""
This script extracts docstrings from modules and generates the appropriate
files. It is intended to be run by the instructor before distributing the
starter code.
It really has no reason to exist (pytest supports running from the modules
directly) other than:
1. Students tend to be silly sometimes and remove their own docstrings (or,
type above them!) and loose their unit tests
2. I can grade your answers against the extracted docstrings
"""
import slyther.types
import slyther.parser
import slyther.interpreter
import slyther.builtins
import slyther.evaluator
def trim(docstring):
"""
Normalize indentation on a docstring, per PEP-257.
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
try:
indent = min(len(l) - len(l.lstrip()) for l in lines[1:] if l)
except ValueError:
indent = 0
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
return '\n'.join(trimmed) + '\n'
deliverables = {
'd1': [
slyther.types.ConsCell,
slyther.types.ConsCell.__eq__,
slyther.types.ConsCell.__repr__,
slyther.types.ConsList,
slyther.types.ConsList.__init__,
slyther.types.ConsList.from_iterable,
slyther.types.ConsList.__getitem__,
slyther.types.ConsList.cells,
slyther.types.ConsList.__len__,
slyther.types.ConsList.__contains__,
slyther.types.ConsList.__reversed__,
slyther.types.ConsList.__eq__,
slyther.types.ConsList.__repr__,
slyther.types.SExpression,
slyther.types.cons,
slyther.types.LexicalVarStorage,
slyther.types.LexicalVarStorage.fork,
slyther.types.LexicalVarStorage.put,
slyther.types.LexicalVarStorage.__getitem__,
],
'd2': [
slyther.parser.lex,
slyther.parser.parse,
slyther.parser.parse_strlit,
slyther.parser,
],
'd3': [
slyther.evaluator,
slyther.evaluator.lisp_eval,
slyther.types.UserFunction,
slyther.types.UserFunction.__init__,
slyther.types.UserFunction.__call__,
slyther.types.UserFunction.__repr__,
slyther.builtins,
slyther.builtins.add,
slyther.builtins.sub,
slyther.builtins.mul,
slyther.builtins.div,
slyther.builtins.floordiv,
slyther.builtins.list_,
slyther.builtins.car,
slyther.builtins.cdr,
],
'd4': [
slyther.builtins.define,
slyther.builtins.lambda_func,
slyther.builtins.let,
slyther.builtins.if_expr,
slyther.builtins.cond,
slyther.builtins.and_,
slyther.builtins.or_,
slyther.builtins.setbang,
slyther.builtins.eval_,
slyther.builtins.parse_string,
],
}
def fullname(obj):
if hasattr(obj, '__qualname__'):
return obj.__module__ + '.' + obj.__qualname__
return obj.__name__
if __name__ == '__main__':
for d, items in deliverables.items():
for item in items:
if hasattr(item, "__doc__") and item.__doc__:
f = open("tests/{}/test__docstring_{}.txt".format(
d, fullname(item).replace('/', 'div')), "w")
mod = getattr(item, "__module__", getattr(item, "__name__"))
f.write('>>> from {} import *\n\n'.format(mod))
f.write(trim(item.__doc__))
f.close()
|
16,350 | 9825c5a1e43e85876f8cef50e6a8ce75eb747bd3 | from math import ceil
from django.shortcuts import render
from django.http import HttpResponse
from .models import Product, Women, Men, Watch, Kids, Other, Shoes
from .models import Contact
from .models import Order
def index(request):
# # product = Product.objects.all()
# price = Product.objects.values('product_price', 'product_id')
# price1 = {item['price'] for item in price}
#
# actualprice = Product.objects.values('actual_price', 'product_id')
# actualprice1 = {item['actualprice'] for item in actualprice}
# save1 = actualprice1 - price1
allProd = []
catprod = Product.objects.values('category')
cats = {item['category'] for item in catprod}
for cat in cats:
product = Product.objects.filter(category=cat)
# if product == 'T-shirt':
n = len(product)
nSlide = n // 4 + ceil((n / 4) - (n // 4))
allProd.append([product, range(1, n), nSlide])
params = {'allProd': allProd}
return render(request, 'shop/index.html', params)
def contact(request):
# contact = Contact.objects.all()
if request.method == "POST":
name = request.POST.get('name', '')
email = request.POST.get('email', '')
phone = request.POST.get('phone', '')
address = request.POST.get('address', )
textarea = request.POST.get('textarea', )
contact = Contact(name=name, email=email, phone=phone, address=address, textarea=textarea)
contact.save()
return render(request, 'shop/contact.html')
def productView(request, myid):
product = Product.objects.filter(product_id=myid)
param = {'product': product[0]}
return render(request, 'shop/productView.html', param)
def cart(request):
return render(request, 'shop/cart.html')
def tracker(request):
return render(request, 'shop/tracker.html')
def about(request):
return render(request, 'shop/about.html')
def checkout(request):
if request.method == "POST":
items_json = request.POST.get('itemsJson', '')
name = request.POST.get('name', '')
email = request.POST.get('email', '')
phone = request.POST.get('phone', '')
address = request.POST.get('address', ) + " " + request.POST.get('address2', '')
city = request.POST.get('city', )
state = request.POST.get('state', )
zip_code = request.POST.get('zip_code', )
order = Order(items_json=items_json, name=name, email=email, address=address, city=city,
state=state, zip_code=zip_code, phone=phone)
order.save()
thank = True
id = order.order_id
return render(request, 'shop/checkout.html', {'thank': thank, 'id': id})
return render(request, 'shop/checkout.html')
def category(request, womenid):
ladiesproduct = Women.objects.filter(women_id=womenid)
param = {'ladiesproduct': ladiesproduct[0]}
return render(request, 'shop/category.html', param)
def women(request):
allwomenProd = []
womenprod = Women.objects.values('category')
cats = {item['category'] for item in womenprod}
for cat in cats:
womenproduct = Women.objects.filter(category=cat)
n = len(womenproduct)
# nSlide = n // 4 + ceil((n / 4) - (n // 4))
allwomenProd.append([womenproduct, range(1, n)])
params = {'allwomenProd': allwomenProd}
return render(request, 'shop/women.html', params)
def womenview(request, wid):
womenproduct = Women.objects.filter(women_id=wid)
param = {'womenproduct': womenproduct[0]}
return render(request, 'shop/women.html', param)
def men(request):
allmenProd = []
menprod = Men.objects.values('category')
cats = {item['category'] for item in menprod}
for cat in cats:
menproduct = Men.objects.filter(category=cat)
n = len(menproduct)
# nSlide = n // 4 + ceil((n / 4) - (n // 4))
allmenProd.append([menproduct, range(1, n)])
params = {'allmenProd': allmenProd}
return render(request, 'shop/men.html', params)
def kid(request):
allkidProd = []
kidprod = Kids.objects.values('category')
cats = {item['category'] for item in kidprod}
for cat in cats:
kidsproduct = Kids.objects.filter(category=cat)
n = len(kidsproduct)
# nSlide = n // 4 + ceil((n / 4) - (n // 4))
allkidProd.append([kidsproduct, range(1, n)])
params = {'allkidProd': allkidProd}
return render(request, 'shop/kid.html', params)
def other(request):
allotherProd = []
otherprod = Other.objects.values('category')
cats = {item['category'] for item in otherprod}
for cat in cats:
otherproduct = Other.objects.filter(category=cat)
n = len(otherproduct)
# nSlide = n // 4 + ceil((n / 4) - (n // 4))
allotherProd.append([otherproduct, range(1, n)])
params = {'allotherProd': allotherProd}
return render(request, 'shop/other.html')
def shoe(request):
allshoeProd = []
shoeprod = Shoes.objects.values('category')
cats = {item['category'] for item in shoeprod}
for cat in cats:
shoeproduct = Shoes.objects.filter(category=cat)
n = len(shoeproduct)
# nSlide = n // 4 + ceil((n / 4) - (n // 4))
allshoeProd.append([shoeproduct, range(1, n)])
params = {'allshoeProd': allshoeProd}
return render(request, 'shop/shoe.html')
def watch(request):
allwatchProd = []
watchprod = Watch.objects.values('category')
cats = {item['category'] for item in watchprod}
for cat in cats:
watchproduct = Watch.objects.filter(category=cat)
n = len(watchproduct)
# nSlide = n // 4 + ceil((n / 4) - (n // 4))
allwatchProd.append([watchproduct, range(1, n)])
params = {'allwatchProd': allwatchProd}
return render(request, 'shop/watch.html')
|
16,351 | 7c2fa0e70d84c247699538158ab78391e6a2c25a | import pytest
from galois_field.GFp import GFp
from galois_field.core.ElementInGFp import ElementInGFp
@pytest.mark.parametrize('p', [(5), (2), (123456791)])
def test_GFp_init(p):
gf = GFp(p)
assert gf.p == p
@pytest.mark.parametrize('p, expected', [
(5, True),
(2, True),
(123456791, True),
(6, False),
(4, False),
(123456789, False)])
def test_GFp_is_valid(p, expected):
gf = GFp(p)
assert gf.is_valid() == expected
@pytest.mark.parametrize('p, expected', [
(5, 'GF(5)'),
(2, 'GF(2)')
])
def test_GFp_str(p, expected):
gf = GFp(p)
assert str(gf) == expected
@pytest.mark.parametrize('integer, p', [
(1, 5),
(2, 11),
(3, 123456791)
])
def test_GFp_elm(integer, p):
gf = GFp(p)
result = gf.elm(integer)
assert isinstance(result, ElementInGFp)
@pytest.mark.parametrize('p, expected_contain', [
(5, [2, 3]),
(31, [3, 11, 12, 13, 17, 21, 22, 24]),
(499, [7, 10, 11, 15, 17, 19, 23, 28, 35, 40, 41, 42, 44, 50, 53, 58, 60,
61, 63, 65, 66, 68, 71, 75, 76, 79, 85, 86, 87, 89, 90, 92, 94, 95,
98, 99, 102, 112, 113, 114, 129, 135, 138, 141, 146, 147, 153, 157,
160, 163, 164, 168, 171, 173, 176, 179, 182, 185, 193, 200, 202, 205,
206, 207, 210, 212, 214, 217, 218, 219, 223, 229, 232, 238, 240, 241,
242, 244, 246, 252, 260, 262, 264, 266, 271, 272, 273, 274, 275, 278,
284, 286, 295, 300, 301, 302, 303, 304, 309, 310, 311, 315, 316, 318,
319, 321, 325, 327, 329, 340, 341, 344, 347, 348, 349, 356, 357, 362,
363, 366, 367, 368, 369, 373, 376, 377, 378, 379, 380, 383, 390, 392,
393, 394, 396, 398, 399, 408, 411, 415, 417, 419, 426, 429, 430, 442,
443, 448, 450, 452, 453, 454, 456, 461, 465, 466, 469, 470, 474, 477,
478, 479, 485, 494]),
])
def test_GFp_random_primitive_root(p, expected_contain):
LOOP_NUM = 5
gfp = GFp(p)
for _ in range(LOOP_NUM):
result = gfp.random_primitive_elm()
assert isinstance(result, ElementInGFp)
assert result.value in expected_contain
|
16,352 | 70bcb26014e3ba899d064cc8325c59ddf78e10d2 | from typing import Iterable, List
from collector.core.models.user import User
from collector.core.models.photo import Photo
from collector.core.models.image import Image
from collector.core.http.request import Request
from collector.core.collector import Collector
class UnsplashCollector(Collector):
name = "unsplash"
def start_requests(self):
urls = [
"https://unsplash.com/napi/topics/wallpapers/photos?page=1&per_page=50",
]
for url in urls:
self.logger.info(f"Yielding request {url}")
yield Request(url=url)
def parse(self, response) -> Iterable[Photo]:
photos = response.json()
for photo in photos:
photo_id = photo["id"]
user = photo["user"]
profile_image = user["profile_image"]
yield Photo(
photo_id=photo_id,
title=photo["alt_description"],
description=photo["alt_description"],
photo_image=Image(
raw=photo["urls"]["raw"],
large=photo["urls"]["full"],
regular=photo["urls"].get("regular"),
small=photo["urls"]["small"],
),
likes=photo["likes"],
user=User(
user_id=user["id"],
username=user["username"],
first_name=user["first_name"],
last_name=user["last_name"],
portfolio_url=user["portfolio_url"],
bio=user["bio"],
profile_image=Image(
large=profile_image["large"],
regular=profile_image.get("regular"),
small=profile_image["small"],
),
),
instagram_username=user["instagram_username"],
total_likes=user["total_likes"],
total_photos=user["total_photos"],
for_hite=user["for_hire"],
)
|
16,353 | 4a25629cc7e5e6a8371be0b48f430398b97f3a88 | from setuptools import setup
import app
setup(
name='Example Flask API',
version=app.__version__,
packages=['app'],
)
|
16,354 | 1cfc124e082457f2a82dbe037d8fe08400119fe3 | '''
Created on 22 de mar de 2018
@author: I844141
'''
from lexer.TokenModule import Token
import re
WHITESPACE = 'whitespace'
BEGIN_COMMENT_BLOCK = 'BEGIN_COMMENT_BLOCK'
END_COMMENT_BLOCK = 'END_COMMENT_BLOCK'
COMMENT_LINE = 'COMMENT_LINE'
class Lexer(object):
'''
Lexical Analyzer for language C
'''
def __init__(self, text):
self.text = text
self.tokens = []
#self.keywords = ['void', 'return', '#include',
# 'if', 'else', 'do', 'while', 'switch', 'case', 'break', 'for',
# 'new', 'NewArray', 'ReadLine', 'ReadInteger']
self.keywords = ['def', 'if', 'else', 'while', 'return', 'break', 'continue', 'true', 'false',
'for', 'callout', 'class', 'interface', 'extends', 'implements', 'new', 'this', 'string',
'float', 'double', 'null']
self.variable_types = ['int', 'void', 'bool']
token_specification = [(WHITESPACE, r'\s+'),
(Token.TYPE_STRING_LITERAL, r'".*"'),
(Token.TYPE_ID, r'[A-Za-z][A-Za-z0-9_]*'),
#(BEGIN_COMMENT_BLOCK, r'\/\*'),
#(END_COMMENT_BLOCK, r'\*\/'),
(COMMENT_LINE, r'\/\/.*'),
#(Token.TYPE_ARIT_OP, r'\+|-|\*|/'),
#(Token.TYPE_FLOAT, r'\d+\.\d+'),
(Token.TYPE_HEX, r'0x[0-9a-fA-F]*'),
(Token.TYPE_NUM, r'\d+(\.\d+)?'),
(Token.TYPE_PAREN_L, r'\('),
(Token.TYPE_PAREN_R, r'\)'),
(Token.TYPE_BRACE_L, r'\{'),
(Token.TYPE_BRACE_R, r'\}'),
(Token.TYPE_BRACKET_L, r'\['),
(Token.TYPE_BRACKET_R, r'\]'),
(Token.TYPE_UNARY_OP, r'!'),
(Token.TYPE_BINARY_OP, r'\*|/|%|\+|-|<|<=|>=|>|==|!=|&&|\|\|'),
(Token.TYPE_EQUAL, r'='),
(Token.TYPE_COMMA, r','),
(Token.TYPE_SEMICOLON, r';')
#(Token.TYPE_RELATIONAL_OP, r'<|<=|==|!=|>=|>'),
#(Token.TYPE_LOGICAL_OP, r'\|\||&&'),
#(Token.TYPE_INCLUDE, r'((?i)\#include).*'),
]
self.tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)
def evaluate(self):
line_num = 1
#is_comment = False
for line in self.text:
for mo in re.finditer(self.tok_regex, line):
kind = mo.lastgroup
value = mo.group(kind)
#if (kind == BEGIN_COMMENT_BLOCK):
# is_comment = True
if (kind != WHITESPACE and kind != COMMENT_LINE): # and is_comment == False):
if (kind == Token.TYPE_ID):
if (value in self.keywords):
kind = Token.TYPE_RESERVED_WORD
elif (value in self.variable_types):
kind = Token.TYPE_RESERVED_VAR_TYPE
self.tokens.append(Token(kind, value, line_num, mo.start()))
#if (kind == END_COMMENT_BLOCK):
# is_comment = False
line_num += 1
def printTokens(self):
for t in self.tokens:
print(t)
if __name__ == "__main__":
with open('entrada.txt','r') as f:
text = f.readlines()
text = [l.strip() for l in text]
lexer = Lexer(text)
lexer.evaluate()
lexer.printTokens()
|
16,355 | 75871b8c673dc4f77e4cc42a4563a422106d211f | # This code is intended for the MagicLight Wifi LED Lightbulb
# http://www.amazon.com/MagicLight%C2%AE-WiFi-LED-Light-Bulb/dp/B00SIDVZSW
# as seen here.
import socket
import binascii
IP = "192.168.2.37"
PORT = 5577
mode = "31" # "default" mode
magicBytes = "00f00f"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect():
try:
s.connect((IP, PORT))
print ("Connected to " + IP + ":" + str(PORT))
except:
print ("Failed to connect")
# adjust the calibration to fit your bulb. Mine was *very* blue w/ white light.
def calibrate(color):
return (int(color[0] * 1), int(color[1] * 0.95), int(color[2] * 0.6))
# sends a (r,g,b) color to the lightbulb
def setColor(color):
print("Sending color: " + str(color))
color = calibrate(color)
# the structure of the packets sent to the light are
# pattern, red, green, blue, "00f00f", some 1 byte checksum?
# I have yet to establish a pattern between the colors and the checksum
# but since it's only 255 bytes, I don't think it's too big a deal.
for x in range(255):
message = mode + format(color[0], "02x") + format(color[1], "02x") + format(color[2], "02x") + magicBytes + format(x, "02x")
s.send(bytes.fromhex(message))
|
16,356 | ea2d8a03a5b7f9100d5c3f3f8d099803272f55fb | zoo = 'python', 'elephant', 'penguin'
print('Number of animals in the zoo is', len(zoo))
newzoo = 'monkey', 'camel', zoo
print('Number of cages in the new zoo is', len(newzoo))
print('All animals in new zoo are', newzoo)
print('Animals brought from old zoo are', newzoo[2])
print('Last animals brought from old zoo is', newzoo[2][2])
print('Number of animals in the new zoo is', len(newzoo)-1 + len(zoo))
# 案例
# 我会推荐你总是使用括号
# 来指明元祖的开始与结束
# 尽管括号是一个可选选项
# 明了胜过晦涩,显式优于隐式
zoo = ('python', 'elephant', 'penguin')
print('Number of animals in the zoo is', len(zoo))
new_zoo = 'monkey', 'camel', zoo
print('Number of cages in the new zoo is', len(new_zoo))
print('All animals in new zoo are', new_zoo)
print('Animals brought from old zoo are', new_zoo[2])
print('Last animals brought from old zoo is', new_zoo[2][2])
print('Number of animals in the new zoo is', len(new_zoo)-1 + len(newzoo[2]))
|
16,357 | e3960aca73ac0a7da6f4237f651bad7a4b549ed0 | from django.db import models
# Create your models here.
class Todo(models.Model):
task=models.CharField(max_length=20)
isComplete=models.BooleanField(default=False)
def __str__(self):
return (self.task) |
16,358 | fe954d96b24b95f458c96ba05d924e18c4febdb2 | class Solution:
def canJump(self, nums: list[int]) -> bool:
if len(nums) == 1:
return True
if nums[0] == 0:
return False
index = 0
steps = nums[index]
target = len(nums)-1
explored = []
stack = list(range(1,steps+1)) #store index
while len(stack) > 0:
index = stack.pop()
if index >= target:
return True
explored.append(index)
steps = nums[index]
new = [ x for x in range(index+1, index+steps+1) if x not in explored ]
stack.extend(new)
return False
s = Solution()
print(s.canJump([1,1,3]))
print(s.canJump([1,0,3]))
print(s.canJump([1,2,3]))
print(s.canJump([1,0,3]))
print(s.canJump([2,3,1,1,4]))
print(s.canJump([3,2,1,0,4]))
print(s.canJump([0]))
print(s.canJump([10]))
print(s.canJump([1,1,1,1,1,1,1,0]))
print(s.canJump([1,1,1,1,1,1,0,1]))
print(s.canJump([2,0,6,9,8,4,5,0,8,9,1,2,9,6,8,8,0,6,3,1,2,2,1,2,6,5,3,1,2,2,6,4,2,4,3,0,0,0,3,8,2,4,0,1,2,0,1,4,6,5,8,0,7,9,3,4,6,6,5,8,9,3,4,3,7,0,4,9,0,9,8,4,3,0,7,7,1,9,1,9,4,9,0,1,9,5,7,7,1,5,8,2,8,2,6,8,2,2,7,5,1,7,9,6]))
|
16,359 | f36224664a87694cc0d57b9147217974f4e10290 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test_ipu import IPUOpTest
import paddle
import paddle.nn.functional as F
import paddle.static
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_test_op()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
def set_test_op(self):
self.op = F.elu
self.op_attrs = {}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
self.feed_list = list(self.feed_fp32.keys())
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out = self.op(x, **self.op_attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestHardTanhCase0(TestBase):
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) * 30
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
self.feed_list = list(self.feed_fp32.keys())
def set_test_op(self):
self.op = paddle.nn.functional.hardtanh
self.op_attrs = {}
class TestHardTanhCase1(TestHardTanhCase0):
def set_test_op(self):
self.op = paddle.nn.functional.hardtanh
self.op_attrs = {"min": 0.1, 'max': 10.0}
class TestEluCase1(TestBase):
def set_test_op(self):
self.op = F.elu
self.op_attrs = {"alpha": 0.3}
class TestHardShrinkCase0(TestBase):
def set_test_op(self):
self.op = F.hardshrink
self.op_attrs = {}
class TestHardSigmoidCase0(TestBase):
def set_test_op(self):
self.op = F.hardsigmoid
self.op_attrs = {}
class TestHardSigmoidCase1(TestBase):
def set_test_op(self):
self.op = F.hardsigmoid
self.op_attrs = {
'slope': 0.2,
'offset': 0.33,
}
class TestHardSwishCase0(TestBase):
def set_test_op(self):
self.op = F.hardswish
self.op_attrs = {}
class TestLeakyReluCase0(TestBase):
def set_test_op(self):
self.op = F.leaky_relu
self.op_attrs = {}
class TestLeakyReluCase1(TestBase):
def set_test_op(self):
self.op = F.leaky_relu
self.op_attrs = {'negative_slope': 0.2333}
class TestLog10Case0(TestBase):
def set_test_op(self):
self.op = paddle.log10
self.op_attrs = {}
class TestLog1pCase0(TestBase):
def set_test_op(self):
self.op = paddle.log1p
self.op_attrs = {}
class TestLog2Case0(TestBase):
def set_test_op(self):
self.op = paddle.log2
self.op_attrs = {}
class TestLogSigmoidCase0(TestBase):
def set_test_op(self):
self.op = F.log_sigmoid
self.op_attrs = {}
class TestLogSoftmaxCase0(TestBase):
def set_test_op(self):
self.op = F.log_softmax
self.op_attrs = {}
class TestMishCase0(TestBase):
def set_test_op(self):
self.op = F.mish
self.op_attrs = {}
class TestRelu6Case0(TestBase):
def set_test_op(self):
self.op = F.relu6
self.op_attrs = {}
class TestRsqrtCase0(TestBase):
def set_test_op(self):
self.op = paddle.rsqrt
self.op_attrs = {}
class TestSeluCase0(TestBase):
def set_test_op(self):
self.op = F.selu
self.op_attrs = {}
class TestSiluCase0(TestBase):
def set_test_op(self):
self.op = F.silu
self.op_attrs = {}
class TestSoftShrinkCase0(TestBase):
def set_test_op(self):
self.op = F.softshrink
self.op_attrs = {}
class TestSoftShrinkCase1(TestBase):
def set_test_op(self):
self.op = F.softshrink
self.op_attrs = {'threshold': 0.2333}
class TestSquareCase0(TestBase):
def set_test_op(self):
self.op = paddle.square
self.op_attrs = {}
class TestSwishCase0(TestBase):
def set_test_op(self):
self.op = F.swish
self.op_attrs = {}
class TestTanhShrinkCase0(TestBase):
def set_atol(self):
super().set_atol()
self.atol = 1e-7
def set_test_op(self):
self.op = F.tanhshrink
self.op_attrs = {}
class TestThresholdedReluCase0(TestBase):
def set_test_op(self):
self.op = F.thresholded_relu
self.op_attrs = {}
class TestThresholdedReluCase1(TestBase):
def set_test_op(self):
self.op = F.thresholded_relu
self.op_attrs = {'threshold': 0.2333}
if __name__ == "__main__":
unittest.main()
|
16,360 | 2fea33feb328d518187252b11cb07626e06e5ce1 | import sys
import os
import time
import re
import json
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
try: # Python 3.x
from urllib.parse import quote as urlencode
from urllib.request import urlretrieve
except ImportError: # Python 2.x
from urllib import pathname2url as urlencode
from urllib import urlretrieve
try: # Python 3.x
import http.client as httplib
except ImportError: # Python 2.x
import httplib
from astropy.table import Table
from matplotlib.patches import Polygon
import pprint
pp = pprint.PrettyPrinter(indent=4)
def mastQuery(request):
"""Perform a MAST query.
Parameters
----------
request (dictionary): The MAST request json object
Returns head,content where head is the response HTTP headers, and content is the returned data"""
server='mast.stsci.edu'
# Grab Python Version
version = ".".join(map(str, sys.version_info[:3]))
# Create Http Header Variables
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
"User-agent":"python-requests/"+version}
# Encoding the request as a json string
requestString = json.dumps(request)
requestString = urlencode(requestString)
# opening the https connection
conn = httplib.HTTPSConnection(server)
# Making the query
conn.request("POST", "/api/v0/invoke", "request="+requestString, headers)
# Getting the response
resp = conn.getresponse()
head = resp.getheaders()
content = resp.read().decode('utf-8')
# Close the https connection
conn.close()
return head,content
def parse_footprint(s_region):
reg = s_region.split()[1:]
if len(reg) % 2:
reg = reg[1:]
reg = np.array(reg).astype(float).reshape(len(reg)/2,2)
reg[:,0] = reg[:,0]%360.
return reg
def plot_footprints(s_regions,ax=None):
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
colors = cm.rainbow(np.linspace(0, 1, len(s_regions)))
for i, s_reg in enumerate(s_regions):
reg = parse_footprint(s_reg)
rect = Polygon(reg, alpha=.1, closed=True, fill=False, color=colors[i])
ax.add_patch(rect)
plt.show()
if __name__ == '__main__':
objectOfInterest = 'GD153'
resolverRequest = {'service':'Mast.Name.Lookup',
'params':{'input':objectOfInterest,
'format':'json'},
}
headers,resolvedObjectString = mastQuery(resolverRequest)
resolvedObject = json.loads(resolvedObjectString)
objRa = resolvedObject['resolvedCoordinate'][0]['ra']
objDec = resolvedObject['resolvedCoordinate'][0]['decl']
mashupRequest = {
"service":"Mast.Caom.Filtered.Position",
"format":"json",
"params":{
"columns":"*",
"filters":[
{"paramName":"dataproduct_type",
"values":["image"]},
{"paramName":"project",
"values":["HST"]},
{"paramName":"instrument_name",
"values":["WFC3/UVIS"]}
],
"obstype":"all",
"position":"{}, {}, 0.3".format(objRa, objDec)
}}
headers,mastDataString = mastQuery(mashupRequest)
mastData = json.loads(mastDataString)
# print(mastData.keys())
print("Query status:",mastData['status'])
# pp.pprint(mastData['fields'])
data = mastData['data']
# pp.pprint(data[0])
print 'N: {}'.format(len(mastData['data']))
df = pd.read_json(json.dumps(data))
# print df['proposal_id']
df = df.loc[(df['target_name'] != 'NONE') & (df['calib_level'] > 2)]
# print set(df.loc[df['target_name'] == 'NONE']['obs_title'])
print len(df)
ax = df.plot('s_ra','s_dec',kind='scatter',alpha=.3)
plt.xlim(195,194)
plt.ylim(21.8,22.2)
plot_footprints(df['s_region'].values, ax)
# print df.columns
print df['obs_id']
|
16,361 | 590f8b6458d11642f67d99ddc283fad98b9954df | # File transfer server side
'''
Program for server multithreading
Allows massive transfer from a specific folder in the server side to another folder in the client
'''
import socket
import os, sys
import threading
import time
#import SocketServer
# Main variables and constants
port = 8080
host = " 10.182.179.221" # $$$ Enter the local IP address of your computer $$$.
fpath = "G:\\Distributed Systems\\multithreading\\server\\" # $$$Enter the path to the folder where the server folder is placed on your computer$$$.
bsize = 1024
class mainServer(object):
def __init__(self,host,port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = socket.gethostname()
self.sock.bind((host,port))
self.sock.listen(5)
print ('Server Ready...')
self.runServer()
self.delete()
def runServer(self):
# loop to get a connection
while True:
print ("waiting for connections...")
(conn, (ip,port)) = self.sock.accept()
conn.settimeout(360)
threading.Thread(target = ServerThread, args = (conn,ip,port)).start()
time.sleep(40)
'''if input() == "q":
break'''
class ServerThread(object):
def __init__(self,conn,ip,port):
self.ip = ip
self.port = port
print ('{0}-{1}:{2}'.format("New client connected",str(ip),str(port)))
#self.runThread(conn,'{0}:{1}'.format(str(ip),str(port)))
c=input("do you want to delete files from server Press Y or N\n") #Delete Operation
if(c=='Y'):
for root, dirs, files in os.walk(fpath):
for file in files:
print("File = %s" % file)
delfile= input("enter the name of file you want to delete along with extension\n")
os.remove("G:\\File transfer\\multithreading\\server\\"+delfile) #$$$Enter the path to the folder where the server folder is placed on your computer$$$.
print("file deleted\n")
else:
print("file not deleted")
x=input("Do you want to rename your file?!! Press Y or N\n") #Rename Operation
if(x=='Y'):
for root, dirs, files in os.walk(fpath):
for file in files:
print("File = %s" % file)
prevname=input("Enter the name of the file you want to rename from above ?!!\n")
newname=input("Enter the new name of the file along with extension!!\n ")
os.rename("G:\\File transfer\\multithreading\\server\\"+prevname,"G:\\File transfer\\multithreading\\server\\"+newname) #$$$Enter the path to the folder where the server folder is placed on your computer$$$.
print("file renamed")
else:
print("not renamed")
self.runThread(conn,'{0}:{1}'.format(str(ip),str(port)))
def runThread(self,conn,addr):
z=input("Do you want to dowanload? Press Y or N\n") #Download Operation
if(z=='Y'):
flcount=0
while True:
dirs = os.listdir(fpath)
time.sleep(10)
for fl in dirs:
msg = '{0}{1}'.format("Sending file: ",fl)
conn.send(msg.encode())
if "ok" in conn.recv(bsize).decode(): # client ready to receive
selfl = '{0}{1}'.format(fpath,fl)
f = open(selfl,'rb')
payload = f.read(bsize)
while (payload):
conn.send(payload)
print('\n........\n')
if "ok" in conn.recv(bsize).decode():
payload = f.read(bsize)
conn.send("eof".encode())
f.close()
else:
print("Download cancelled\n")
srv = mainServer(host,int(port))
|
16,362 | 1fddc86c367058e26d1189c31a1691f22293c960 | import string
with open("MGKmodlyrics.txt", "r") as MGKreader:
with open ("MGKdicLyrics.txt", "w") as MGKwriter:
text = set(MGKreader.read().split(" "))
for word in text:
if word == "" or word =="\n":
continue
for punc in string.punctuation:
word = word.replace(punc,"")
MGKwriter.write(word+"\n") |
16,363 | 9ab6f8ccc4c7b82d98f6577cf1be7b1a128a7ad0 | class Visitor:
def __init__(self, _id, name, address, phone_number):
self.id = _id
self.name = name
self.address = address
self.phone_number = phone_number
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
'address': self.address,
'phoneNumber': self.phone_number
}
|
16,364 | 0a0a77ce910f5b9135debfbf14a6d602cf19b7e7 |
#lab 6-1
from collections import Counter
L = [1,1,7,7,7,4,4,4,2,1,5,5,9,11,3,'a','x',9,8,'b','b','z','b']
counts = Counter(L)
for key in counts.keys():
#(==)------------------
#for key in counts : 해도 키만 꺼내는 것이 됨
if counts[key] >= 3:
print(key)
#lab 6-2
import math
y = math.exp(math.pi) + 10
print(y)
#lab 6-3
from itertools import product
first = ['A','E','I','O','U']
second = ['A','B','C','D','E','a','b','c','d','e']
third = [2,3]
forth = [1,3,5]
combinations = list(product(first,second,third,forth))
len(combinations)
#lab 6-4
def superviesd_learning():
return '지도학습은 정답을 아는 학습 데이터로부터 하나의 함수를 추론하는 기계학습의 한 방법'
def unsuperviesed_learning():
return '비지도학습은 정답을 모르는 데이터로부터 숨겨진 관계를 설명하는 함수를 추론하는 기계학습의 한 방법'
|
16,365 | 501f36426d38be5077b2cca968a65c4620866c67 | import requests
import json
import time
total_count = 0
for day in range(11, 27):
items = []
total_count = 0
if len(str(day)) == 1:
date = '2020-02-' + '0' + str(day)
else:
date = '2020-02-' + str(day)
print('Processing date ', date)
for hour in range(24):
time_str = ''
if hour < 10:
hour_str = '0' + str(hour)
else:
hour_str = str(hour)
print('Processing hour ', hour)
for minute in range(60):
if minute < 10:
time_str = hour_str + ':' + '0' + str(minute)
else:
time_str = hour_str + ':' + str(minute)
r = requests.get('https://api.github.com/search/repositories?q=+language:python+created:'+date+'T'+time_str+'&page:1', auth=('annyl', 'jyfteh-poPcym-pynhy4'))
time.sleep(2)
result = r.text
result = result.replace('false', 'False')
result = result.replace('null', 'None')
result = result.replace('true', 'True')
result_eval = eval(result)
if result_eval["total_count"] > 30:
total_count += 30
items.extend(result_eval["items"])
else:
total_count += result_eval["total_count"]
items.extend(result_eval["items"])
print(total_count)
with open(date+'.txt', 'w') as outfile:
json.dump({date:items}, outfile)
|
16,366 | 21a8ad7fce6472249ea03e4ff0b5c5cd3eadda87 | #!/usr/bin/python3
def simple_delete(adict, key=""):
return adict.del(key)
|
16,367 | 34281d116e0964a9444adca156c46455c15ae2ac | import classes
def cadastroMateria(lista, nome, codigo):
lista.append(classes.Materia(nome, codigo))
def cadastroProfessor(lista, nome, departamento):
lista.append(classes.Professor(nome, departamento))
def cadastroAluno(lista, nome, dre, periodo):
lista.append(classes.Aluno(nome, dre, periodo))
def cadastroTurma(lista, nome):
lista.append(classes.Turma(nome))
def DesignaProfessor(lista, nome_turma, nome_professor):
for i in range(len(lista)):
if(nome_turma == lista[i].nome):
lista[i].DesProfessor(nome_professor)
def ProcuraLista(lista, nome):
for elem in lista:
if(nome == elem.nome):
return elem
return None
def printLista(lista):
for item in lista:
print(f'\n{item}\n')
def VerificaInteiro(numero):
try:
float(numero)
return True
except ValueError:
return False
def VerificaVazio(string):
if string == '':
return True
return False
|
16,368 | f79f12b6de1fa82661c1dd926b9ab5c7a3299db0 | import getopt
import hashlib
import sys
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from os import curdir, sep
import subprocess
#from subprocess import subprocess,Popen,PIPE
import os
import urllib2
#import try_listen as t
from lrucache import LRUCache
class RequestHandler(BaseHTTPRequestHandler):
#Overriding do_GET method to serve contents from local cache
#and fetch contents from origin and store them here.
def do_GET(self):
print "self.port", self.port
print "self.origin", self.origin
print "------------LRU CACHE CAPACTIY-----------"
print self.lrucache.max_capacity
print "------------LRU CACHE size in GET-----------"
print self.lrucache.size
print "------------LRU CACHE-----------"
print self.lrucache.cache
print "-----------------------"
homePages = ["","wiki","wiki/","wiki/Main_Page","wiki/Main_Page/"]
pathAndFilename = self.path.split("/",1)[1]
#If path is found in the replica
if pathAndFilename in homePages:
pathAndFilename = "wiki/Main_Page"
serverPathAndFilename = pathAndFilename.replace("/",".")
try:
if serverPathAndFilename in self.lrucache.cache and self.path != "/wiki/Special:Random":
print "-------before cache method-----------"
self.get_from_cache(serverPathAndFilename)
else:
print "----------GET FROM ORIGIN--------------"
print "self.path-------",self.path
response = urllib2.urlopen("http://"+self.origin+":8080" + self.path)
serverPathAndFilename = response.geturl().split("/",3)[-1].replace("/",".")
if serverPathAndFilename in self.lrucache.cache:
print "-------before cache method-----------"
self.get_from_cache(serverPathAndFilename)
else:
print "-------before origin method-----------"
self.get_from_origin(response, serverPathAndFilename)
except:
self.send_error(404,'File Not Found: %s' % self.path)
return
#Method to get data from cache and respond
def get_from_cache(self, serverPathAndFilename):
print "------------IN GET FROM CACHE------------"
filepath = os.path.join(os.getcwd(),"cache",serverPathAndFilename)
f = open(filepath)
self.send_response(200)
self.send_header('Content-type','')
self.end_headers()
self.wfile.write(f.read())
f.close()
self.lrucache.update_cache(serverPathAndFilename)
#Get contents from the origin server
def get_from_origin(self, response, serverPathAndFilename):
print "-----------IN GET FROM ORIGIN-------------"
data = response.read()
self.send_response(200)
self.send_header('Content-type','')
self.end_headers()
self.wfile.write(data)
filepath = os.path.join(os.getcwd(),"cache",serverPathAndFilename)
open(filepath, 'w').write(data)
self.lrucache.add_file_to_cache(serverPathAndFilename, len(data))
# Method to parse the input and get port and origin
def parse_input(arg):
port = 0
origin = ''
opts, args = getopt.getopt(arg[1:], 'p:o:')
for param, val in opts:
# get port
if param == '-p':
port = int(val)
# get origin
elif param == '-o':
origin = val
# else exit on invalid inputs
else:
sys.exit("Please enter inputs as: ./httpserver -p <port> -o <origin>")
return port, origin
#Replica Server starts here by initializing cache and starting the HTTPserver
if __name__ == '__main__':
[port, origin] = parse_input(sys.argv)
print port
print origin
serverAddress = ('', port)
RequestHandler.port = port
RequestHandler.origin = origin
cache_max_capacity = 9.9 * 1024 * 1024
RequestHandler.lrucache = LRUCache(cache_max_capacity)
RequestHandler.lrucache.create_cache_directory("cache")
RequestHandler.lrucache.read_files_from_cache("cache")
httpServer = HTTPServer(serverAddress, RequestHandler)
print "httpServer has been created"
httpServer.serve_forever()
|
16,369 | e2ff8eafeed253bcc264141a6c27a80b403e1ff1 | # linked list
# stack
# queue
# D queue
ll = []
ll.append("A")
ll.append("B")
ll.append("C")
print(f"The link list is {ll}")
choice = 0
while choice < 5:
print('Linked list operations')
print('1 to add the elements')
print('2 to remove the elements')
print('3 to replace the elements')
print('4 Search for the element')
print('5 Exist the program')
choice = int(input('Your choice'))
if choice == 1:
ele = input('Enter the element')
pos = int(input('Enter the position'))
ll.insert(pos, ele)
elif choice == 2:
try:
ele = input('Enter the element to removed')
ll.remove(ele) # error
except ValueError:
print('Element not in the list')
elif choice == 3:
ele = input('Enter the element')
pos = int(input('Enter the position'))
ll.pop(pos)
ll.insert(pos, ele)
elif choice == 4:
try:
ele = input('please Enter the element to be searched')
pos = ll.index(ele)
print(pos)
except ValueError:
print('Element not in the list')
else:
break
print(f'The upated list is {ll}')
# Stack
# Pop
#
# pusp
#
# peep
# empty
# LIFO
class Stack:
isEmpty = False
def _init_(self):
self.st = []
def isempty(self):
return self.st == []
def pop(self):
if self.isempty():
self.isEmpty = True
else:
return self.st.pop()
def push(self, element):
if self.isempty():
self.isEmpty = True
else:
self.st.append(element)
def peep(self):
if self.isempty():
self.isEmpty = True
else:
return self.st[len(self.st) - 1]
def display(self):
if self.isEmpty:
print('List is blank')
else:
print(self.st)
stt = Stack()
choice = 0
while choice < 5:
print('Linked list operations')
print('1 to remove the elements')
print('2 to push the elements')
print('3 to view upper element without deleting')
print('4 display')
choice = int(input('Your choice'))
if choice == 1:
print(stt.pop())
elif choice == 2:
ele = input('Please Enter the element')
stt.push(ele)
stt.display()
elif choice == 3:
print(stt.peep())
elif choice == 4:
stt.display()
else:
break |
16,370 | 03283967bae56ba0f2d81348d0c35bc48b38c1ba | #!/usr/bin/env python3.3
# -*- coding: utf-8 -*-
#
# Interface for the database connection
#
# Copyright (c) 2014-2015 Pieter-Jan Moreels
#
# Software is free software released under the "Original BSD license"
import os
import sys
_runPath = os.path.dirname(os.path.realpath(__file__))
code = (open(os.path.join(_runPath, "Head.py"), "r").read())
code += (open(os.path.join(_runPath, "Add.py"), "r").read())
code += (open(os.path.join(_runPath, "Get-IDs.py"), "r").read())
code += (open(os.path.join(_runPath, "Get-Names.py"), "r").read())
code += (open(os.path.join(_runPath, "Get-Objects.py"), "r").read())
code += (open(os.path.join(_runPath, "Get-Dicts.py"), "r").read())
code += (open(os.path.join(_runPath, "Update.py"), "r").read())
code += (open(os.path.join(_runPath, "Calculations.py"), "r").read())
code += (open(os.path.join(_runPath, "Tail.py"), "r").read())
exec(code)
|
16,371 | 91012de02406063ed93d953e4225e7b5ba8bb872 | import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question, Category, db
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "trivia_test"
self.database_path = "postgres://{}/{}".format('localhost:5432', self.database_name)
setup_db(self.app, self.database_path)
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
# Create Test Object
self.new_Question = {
'question':'What is the tallest building',
'answer':'burjdubai',
'category':'4',
'difficulty':'2'
}
def tearDown(self):
"""Executed after reach test"""
pass
"""
Write at least one test for each test for successful operation and for expected errors.
"""
def test_getsimilarquestions(self):
res = self.client().get('/questions/search')
data = json.loads(res.data)
self.assertEqual(res.status_code,200)
self.assertGreaterEqual(data[0]['totalQuestions'], 0)
def test_getallquestions(self):
res = self.client().get('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code,200)
self.assertGreaterEqual(data[0]['totalquestions'], 0)
def test_post_new_question(self):
res = self.client().post('/question/create', data=self.new_Question)
self.assertEqual(res.status_code,200)
question = db.session.query(Question).filter_by(answer='burjdubai')
#filter query should not return null
global filter_id
filter_id = question[0].id
self.assertIsNotNone(question)
def test_getspecificquestion(self):
question = db.session.query(Question).filter_by(answer='burjdubai')
filter_id = question[0].id
#url = f'/question/{filter_id}'
url = '/question/' + filter_id
print(url)
#res = self.client().post(f'{url}')
res = self.client().post(url)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data[0]['question'],'What is the tallest building')
self.assertEqual(data[0]['answer'], 'burjdubai')
def test_deletequestion(self):
# Create entry --> Executed in previous test
question = db.session.query(Question).filter_by(answer='burjdubai')
filter_id = question[0].id
res = self.client().delete('/question/delete', data={'id':filter_id})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
def test_getquestionscategory(self):
res = self.client().post('/play/quiz')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertIsNotNone(data[0]['question'])
self.assertIsNotNone(data[0]['answer'])
def test_playquestion(self):
res = self.client().post('/play/quiz')
data = json.loads(res.data)
self.assertEqual(res.status_code,200)
self.assertIsNotNone(data[0]['question'])
self.assertIsNotNone(data[0]['answer'])
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
16,372 | 7d6af6b2d71c0273703bfe63ca5dec1eb350fb63 | import jieba
import random
import glob
def load_stop_words(path):
with open(path, 'r', encoding='utf8', errors='ignore') as f:
return [line.strip() for line in f]
def load_content(path):
with open(path, 'r', encoding='gbk', errors='ignore') as f:
content = ''
for line in f:
line = line.strip()
content += line
return content
def get_TF(words, topK=10):
tf_dic = {}
for w in split_words:
tf_dic[w] = tf_dic.get(w, 0) + 1
topK_words = sorted(tf_dic.items(), key=lambda x: x[1], reverse=True)[:topK]
return topK_words
if __name__ == '__main__':
files = glob.glob('D:/ML/BookSourceCode/learning-nlp/chapter-3/data/news/C000013/*.txt')
corpus = [load_content(x) for x in files]
stop_words = load_stop_words('D:/ML/BookSourceCode/learning-nlp/chapter-3/data/stop_words.utf8')
split_words = list(jieba.cut(corpus[0]))
# 去除停用词
split_words = [w for w in split_words if w not in stop_words]
print('分词效果:' + '/ '.join(split_words))
# 统计高频词
top10_words = get_TF(split_words, topK=10)
print(top10_words)
|
16,373 | e0822c1a108b3b79ece344abcabc3cf6735de9d0 | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.system.check import check
from collections import namedtuple
class ingest_method_field(namedtuple('ingest_method_field', 'key, optional')):
def __new__(clazz, key, optional = False):
check.check_string(key)
check.check_bool(optional)
return clazz.__bases__[0].__new__(clazz, key, optional)
|
16,374 | 73f941284d31a668d6002299c80e140492ccea0e | import distutils.version
import logging
import os
import sys
from gym import error
from gym.configuration import logger_setup, undo_logger_setup
from gym.utils import reraise
from gym.version import VERSION as __version__
logger = logging.getLogger(__name__)
# Do this before importing any other gym modules, as most of them import some
# dependencies themselves.
def sanity_check_dependencies():
import numpy
import requests
import six
if distutils.version.LooseVersion(numpy.__version__) < distutils.version.LooseVersion('1.10.4'):
logger.warn("You have 'numpy' version %s installed, but 'gym' requires at least 1.10.4. HINT: upgrade via 'pip install -U numpy'.", numpy.__version__)
if distutils.version.LooseVersion(requests.__version__) < distutils.version.LooseVersion('2.0'):
logger.warn("You have 'requests' version %s installed, but 'gym' requires at least 2.0. HINT: upgrade via 'pip install -U requests'.", requests.__version__)
sanity_check_dependencies()
from gym.core import Env, Space, Wrapper, ObservationWrapper, ActionWrapper, RewardWrapper
from gym.benchmarks import benchmark_spec
from gym.envs import make, spec
from gym.scoreboard.api import upload
from gym import wrappers
__all__ = ["Env", "Space", "Wrapper", "make", "spec", "upload", "wrappers"]
|
16,375 | 3295229dd011a2df7d60a159994aa4d52af6d059 | import arcpy
import os
import sys
import shutil
projectdir="D:/CCW20"
targetgeodatabase=projectdir+"/GIS/"+"geocover.gdb"
targetworkspace=targetgeodatabase+"/GC_ROCK_BODIES"
bedrock="GC_BEDROCK"
exploit_PLG="GC_EXPLOIT_GEOMAT_PLG"
exploit_PT="GC_EXPLOIT_GEOMAT_PT"
fossils="GC_FOSSILS"
linearobjects="GC_LINEAR_OBJECTS"
pointobjects="GC_POINT_OBJECTS"
surfaces="GC_SURFACES"
unco="GC_UNCO_DESPOSIT"
#list all directories
os.chdir(importdir)
V2maplist=[]
for f in os.listdir(importdir):
if "-V2-" in f and ".zip" not in f:
V2maplist.append(f)
#load data into target geodatabase
for geocoverdir in V2maplist:
print geocoverdir
if "Data" in os.listdir(importdir+"/"+geocoverdir):
if "FGDB" in os.listdir(importdir+"/"+geocoverdir+"/"+"Data"):
os.chdir(importdir + "/" + geocoverdir+"/"+"Data/FGDB/de")
for gd in os.listdir(importdir + "/" + geocoverdir+"/"+"Data/FGDB/de"):
if ".gdb" in gd:
sourcegeodatabase=importdir + "/" + geocoverdir+"/"+"Data/FGDB/de/"+gd
#load data
#arcpy.LoadData_production(targetgeodatabase, sourcegeodatabase+"/GC_ROCK_BODIES", targetgeodatabase+"/GC_ROCK_BODIES")
arcpy.Append_management([sourcegeodatabase+"/GC_ROCK_BODIES/"+bedrock], targetworkspace+"/"+bedrock, "TEST")
arcpy.Append_management([sourcegeodatabase + "/GC_ROCK_BODIES/" + exploit_PLG],targetworkspace + "/" + exploit_PLG, "TEST")
arcpy.Append_management([sourcegeodatabase + "/GC_ROCK_BODIES/" + exploit_PT], targetworkspace + "/" + exploit_PT, "TEST")
arcpy.Append_management([sourcegeodatabase + "/GC_ROCK_BODIES/" + fossils],targetworkspace + "/" + fossils, "TEST")
arcpy.Append_management([sourcegeodatabase + "/GC_ROCK_BODIES/" + linearobjects],targetworkspace + "/" + linearobjects, "TEST")
arcpy.Append_management([sourcegeodatabase + "/GC_ROCK_BODIES/" + pointobjects], targetworkspace + "/" + pointobjects, "TEST")
arcpy.Append_management([sourcegeodatabase + "/GC_ROCK_BODIES/" + surfaces],targetworkspace + "/" + surfaces, "TEST")
arcpy.Append_management([sourcegeodatabase + "/GC_ROCK_BODIES/" + unco],targetworkspace + "/" + unco, "TEST")
shutil.rmtree(importdir + "/" + geocoverdir, ignore_errors=True)
print geocoverdir+" loaded ..."
print "done .." |
16,376 | 60e2cdaee577a426d31c0601fafbbf16b5e45879 | # -*- coding: utf-8 -*-
"""
@author: Brandon Langley
bplangl
CPSC 4820
Project 4
"""
#import pandas as pd
import os.path
def main():
spam =0
ham=0
counted=dict()
#trainig data
fName="GEASTrain.txt"
fName=input("Please Enter the file name of the Training Data: ")
while os.path.exists(fName) is not True:
print("\n\nInvalid File Name")
fName=input("Please Enter the file name of the Training Data: ")
f=open(fName,"r")
#stopWords-a list of words which are universal in both spam and non spam email and there for unhelpful in classification and will be ignored
fstop="StopWords.txt"
fstop=input("Please Enter the file name of the Stop Words: ")
while os.path.exists(fstop) is not True:
print("\n\nInvalid File Name")
fstop=input("Please Enter the file name of the Stop Words: ")
fs=open(fstop,"r")
stopWords = fs.read().splitlines()
#print(stopWords)
line=f.readline()
while line!="":
isSpam=int(line[:1])
if isSpam==1:
spam=spam+1
else :
ham=ham+1
line=cleanText(line[1:])
words=line.split()
words=set(words)
counted=countedWords(words,isSpam,counted,stopWords)
line =f.readline()
pl=percentList(.1,counted,spam,ham)
total=spam+ham
testSpam=0
testHam=0
tp=0
tn=0
fp=0
fn=0
#test file with known data to run against the trained algorithm to determine effectiveness of the classifier
fTest="GEASTest.txt"
fTest=input("Please Enter the file name of the Test Data: ")
while os.path.exists(fTest) is not True:
print("\n\nInvalid File Name")
fTest=input("Please Enter the file name of the Test Data: ")
ft=open(fTest,"r")
sl=ft.readline()
while sl !="":
isSpam=int(sl[:1])
if isSpam==1:
testSpam=testSpam+1
else :
testHam=testHam+1
sl=cleanText(sl)
sl=sl.split()
sl=set(sl)
spamProb=probability(sl,pl,1)
#print("spam: ",spamProb)
hamProb=probability(sl,pl,0)
#print("ham: ",hamProb)
prediction=(spamProb*(spam/total))/(spamProb*(spam/total) + hamProb*(ham/total))
#print("prediction",prediction,"\n\n")
if prediction >= .5:
prediction=1
else:
prediction=0
if prediction==isSpam and prediction==1:
tp+=1
elif prediction==isSpam and prediction==0:
tn+=1
elif prediction!=isSpam and prediction==1:
fp+=1
elif prediction!=spam and prediction==0:
fn+=1
sl=ft.readline()
accuracy=(tp+tn)/(tp+tn+fp+fn)
precision=tp/(tp+fp)
recall=tp/(tp+fn)
f1=2*((1)/((1/precision)+(1/recall)))
print("\n\nSpam in test file: ",testSpam)
print("Ham in test file: ",testHam,"\n\n")
print("True Positive: \t",tp)
print("True Negative: \t",tn)
print("False Positive:\t",fp)
print("False Negative:\t",fn,"\n\n")
print("Accuracy: %.5f"%(accuracy))
print("Precision: %.5f"%(precision))
print("Recall: %.5f"%(recall))
print("F1: %.5f"%(f1))
fs.close()
f.close()
#naive bayes probability algorithm
#calcultes the probability that each word is spam
def probability(sl,pl,i):
i=i
prob=1
for word in pl:
if word in sl:
prob=prob*pl[word][i]
else:
prob=prob*(1-pl[word][i])
#print(prob)
return prob
#clean the input of punctuation/unimportant chars
def cleanText(line):
line=line.lower()
line=line.strip()
for letters in line:
if letters in """[]!.,"-!_@;':#$%^&*()+/?""":
line=line.replace(letters," ")
return line
#count the words and store them in the respective dict() spam/ham
def countedWords(text, is_spam, counted, stopWords):
for word in text:
if word in counted and word not in stopWords:
if is_spam==1:
counted[word][1]=counted[word][1]+1
else:
counted[word][0]=counted[word][0]+1
elif word not in stopWords:
if is_spam==1:
counted[word]=[0,1]
else:
counted[word]=[1,0]
return counted
#the relative pecrent of each word in spam and ham email
def percentList(k, theCount,spams,hams):
for key in theCount:
theCount[key][0]=(theCount[key][0] + k)/(2 * k + hams)
theCount[key][1]=(theCount[key][1] + k)/(2 * k + spams)
return theCount
if __name__== "__main__":
main()
|
16,377 | 7aeaa2b387e272dea8b46e045fabe4b5837516ed | me = 'protein_frequency_num.txt'
neighbor1 = '0724_protein_frequency_num.txt'
neighbor2 = '2390_protein_frequency_num.txt'
neighbor3 = '2987_protein_frequency_num.txt'
neighbor4 = '8971_protein_frequency_num.txt'
## attention!! this file can't run again!! 会毁了已有文件!
DNA_table = 'protein_table.txt'
file_me = open(me,'r')
file_n1 = open(neighbor1,'r')
file_n2 = open(neighbor2,'r')
file_n3 = open(neighbor3,'r')
file_n4 = open(neighbor4,'r')
table_file = open(DNA_table,'w')
data_me = file_me.readlines()
data_n1 = file_n1.readlines()
data_n2 = file_n2.readlines()
data_n3 = file_n3.readlines()
data_n4 = file_n4.readlines()
file_me.close()
file_n1.close()
file_n2.close()
file_n3.close()
file_n4.close()
pointer1=0
pointer2=0
pointer3=0
pointer4=0
pointer0=0
table_dic = {}
for line in data_me:
table_dic[line[:2]] = [line[6:-1], 'None', 'None', 'None', 'None']
for line in data_n1:
if line[:2] not in table_dic:
table_dic[line[:2]] = ['None',line[6:-1],'None','None','None']
else:
table_dic[line[:2]][1] = line[6:-1]
for line in data_n2:
if line[:2] not in table_dic:
table_dic[line[:2]] = ['None','None',line[6:-1],'None','None']
else:
table_dic[line[:2]][2] = line[6:-1]
for line in data_n3:
if line[:2] not in table_dic:
table_dic[line[:2]] = ['None','None','None',line[6:-1],'None']
else:
table_dic[line[:2]][3] = line[6:-1]
for line in data_n4:
if line[:2] not in table_dic:
table_dic[line[:2]] = ['None','None','None','None',line[6:-1]]
else:
table_dic[line[:2]][4] = line[6:-1]
print(table_dic)
sorted_key = sorted(table_dic.keys())
print('in total:',len(sorted_key))
ToBeAdd = ''
for key in sorted_key:
data = ' '.join(table_dic[key])
ToBeAdd += key+' '+data+'\n'
table_file.write(ToBeAdd)
table_file.close()
|
16,378 | e9bcb586d78c5d7ea592f6e398b59d2de11a54b9 | from app.models.instagram import InstagramScraper
from app.models.dbmodel import MongoDBModel
from app.models import config
from app.tasks import worker
from time import sleep
mongo = MongoDBModel(config.DB_NAME, config.MONGODB_URI)
ig = InstagramScraper()
@worker.task(name='instagram_scraper.ig_profile')
def ig_profile(username):
profile = ig.getProfileInfo(username)
mongo.insertByOne(config.IG_PROFILE_COLL, profile)
return 'scraping instagram profile: {}, succeeded.'.format(username)
@worker.task(name='instagram_scraper.ig_user_post')
def ig_user_post(username):
posts = ig.getProfilePost(username)
for post in posts:
mongo.insertByOne(config.IG_PROFILE_POST_COLL, post)
return 'scraping instagram user post: {}, succeeded.'.format(username)
@worker.task(name='instagram_scraper.ig_hashtag_post')
def ig_hashtag_post(hashtag):
posts = ig.getHashtagPost(hashtag)
sleep(10)
for post in posts:
comments = ig.getPostComments(post['shortcode'])
post['comments'] = comments
mongo.insertByOne(config.IG_HASHTAG_POST_COLL, post)
return 'scraping instagram hashtag post: {}, succeeded.'.format(hashtag)
|
16,379 | 7e30ed8d054d3f4fcc6d7c6f9a91ecbe4350441b | """add active to appear_records
Revision ID: 9f7ce1a0a181
Revises: 507273553149
Create Date: 2020-01-13 10:36:31.040976
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9f7ce1a0a181'
down_revision = '507273553149'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('appear_records', sa.Column('active', sa.SmallInteger(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('appear_records', 'active')
# ### end Alembic commands ###
|
16,380 | fc2d7eddfdeecf446289dd93f91df2caff38c6d6 | __author__ = 'hooda'
def intersection(line1, line2):
[line1, line2] = sorted([line1, line2])
if line1[0] == line2[0]:
print("INVALID")
m1, c1, m2, c2 = line1[0], line1[1], line2[0], line2[1]
x = (c2 - c1) / (m1 - m2)
y = (m2 * c1 - m1 * c2) / (m2 - m1)
print('interstection', line1, line2, x, y)
return [x, y]
def visible(lines):
# print(lines)
# print("visible check", lines)
if len(lines) == 1:
return [[float("-inf"), float("inf"), lines[0]]]
if len(lines) == 2:
line1 = lines[0]
line2 = lines[1]
point = intersection(line1, line2)
return [[float("-inf"), point[0], line1], [point[0], float("inf"), line2]]
mid = len(lines) / 2
struct1 = visible(lines[0:mid])
struct2 = visible(lines[mid:])
struct = combine(struct1, struct2)
# print("visibleD ", struct)
return struct
def combine(struct1, struct2):
# print("combining''''''''''''''''''''''''''''''''''''''")
# print(struct1)
# print(struct2)
# IDEA : We assume that struct1 is from the lower slope half, struct2 from the higher slop half.
# Now, we merge the intersection points in struct1 and struct2. We get something like:
# p11, p12, p21, p13, p22 etc. some random order. The insight is the point of intersection we're looking for
# Must lie between consec. p's. (Or straight up outside the range).
# So we sort of pick each interleaving, find the corresponding lines
# in that region, and check if their intersection is also in that region.
# Another approach is to notice that as we approach form -infinity, struct2 must be lower,
# and as we approach +infinity, struct2 must be higer.
# The point of intersection is where this flip happens. This is also a reasonable approach,
# but the corner casses etc. need to be considered.
# Unsaid here is the assumption that there is one and only one point of intersection.
# I can't come up with a definite proof, but it seems reasonable nonetheless.
# The flippy approach.
# Struct1 is required by intergalactic law to be low-slope struct.
# if the infinity lines intersect at x < x10 and x20, we are done. Similarly for x > x1n and x2n.
infx = intersection(struct1[0][2], struct2[0][2])[0]
# print("infx", infx)
inf2x = intersection(struct1[-1][2], struct2[-1][2])[0]
# print("inf2x", inf2x)
if infx <= min(struct1[0][1], struct2[0][1]):
final = [[float("-inf"), infx, struct1[0][2]], [infx, struct2[0][1], struct2[0][2]]] + struct2[1:]
elif inf2x >= max(struct1[-1][0], struct2[-1][0]):
final = struct1[0:-1] + [[struct1[-1][0], inf2x, struct1[-1][2]], [inf2x, float("inf"), struct2[-1][2]]]
# Otherwise we truncate the structs to finite lengths. Find the intersection using flipping.
else:
minx = min(struct1[0][1], struct2[0][1])
maxx = max(struct1[-1][0], struct2[-1][0])
struct1a = confine(struct1, minx, maxx)
struct2a = confine(struct2, minx, maxx)
intersectionx = struct_intersection(struct1a, struct2a)
pos1 = getindex(intersectionx, struct1)
pos2 = getindex(intersectionx, struct2)
final1 = struct1[0:pos1] + [[struct1[pos1][0], intersectionx, struct1[pos1][2]]]
final2 = [[intersectionx, struct2[pos2][1], struct2[pos2][2]]] + struct2[pos2 + 1:]
final = final1 + final2
flag = False
if flag:
print("=1=1=1=11=1=1=1=1=1=1=1=1=1=1=1=1=1=1=1")
print(struct1, struct2)
print("seem to have combined into")
print(final)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
return final
def confine(struct, x1, x2):
# print("confinig", struct, x1, x2)
newstruct = struct[0:]
if newstruct[0][1] > x1:
newstruct[0] = [x1, newstruct[0][1], newstruct[0][2]]
elif newstruct[0][1] == x1:
newstruct = newstruct[1:]
if newstruct[-1][0] < x2:
newstruct[-1] = [newstruct[-1][0], x2, newstruct[-1][2]]
elif newstruct[-1][0] == x2:
newstruct = newstruct[:-1]
# print("CONNFFFIIIINNNNNEEEEEEDDDDDDD", newstruct)
return newstruct
def struct_intersection(struct1, struct2):
pos1 = binary_flip_search(struct1, struct2)
pos2 = binary_flip_search(struct2, struct1)
intersectionx = intersection(struct1[pos1][2], struct2[pos2][2])[0]
return intersectionx
def binary_flip_search(struct, cand):
# print("-----------------------------")
# print("binary flip search", struct, cand)
if len(struct) == 1:
if higher(struct[0], cand) is 0:
return 0
else:
print("ERROR. Flipping didn't happen in: ", struct, cand)
mid = len(struct) / 2
higher1 = higher(struct[0], cand)
highern = higher(struct[-1], cand)
higher_mid = higher(struct[mid], cand)
if higher1 is 0:
return 0
if highern is 0:
return len(struct) - 1
if higher_mid is 0:
return mid
if higher1 == higher_mid:
# print("in call case0|||||||||||||||||||||||")
return mid + 1 + binary_flip_search(struct[mid + 1:-1], cand)
else:
# print("in call case1||||||||||||||||||||||||||")
return 1 + binary_flip_search(struct[1:mid], cand)
def higher(region, cand):
point1 = [region[0], gety(region[0], region[2])]
point2 = [region[1], gety(region[1], region[2])]
high1 = high(point1, cand)
high2 = high(point2, cand)
if high1 and high2:
return 1
elif not (high1 or high2):
return -1
else:
return 0
def high(point, struct):
# print("HIGHHIGHHIG", point, struct)
line = struct[getindex(point[0], struct)][2]
y = gety(point[0], line)
# print("Results for :", point, struct, line, y)
if point[1] >= y:
return True
else:
return False
def getindex(x, struct):
if len(struct) == 1:
if struct[0][0] <= x <= struct[0][1]:
return 0
else:
return "Out of range of struct."
else:
mid = len(struct) / 2
if struct[mid][0] <= x <= struct[mid][1]:
return mid
elif x < struct[mid][0]:
return getindex(x, struct[0:mid])
elif x > struct[mid][1]:
return mid + 1 + getindex(x, struct[mid + 1:])
def gety(x, line):
return line[0] * x + line[1]
def reader(infile):
linelist = []
infile = open(infile)
lines = infile.readlines()
for i in range(1, int(lines[0]) + 1):
line = lines[i].split(":")
linelist += [[float(line[0]), float(line[1]), i]]
return linelist
def writer(outfile, struct):
outfile = open(outfile, "w")
visibles = []
for i in range(0, len(struct)):
visibles += [struct[i][2][2]]
visibles = sorted(list(set(visibles)))
s = str(visibles)
s = s[1:-1]
s = s.replace("'", "").replace(' ', '')
# print(s)
outfile.write(s)
outfile.close()
return s
def clean(lines):
if len(lines) < 2:
return lines
i = 1
while i < len(lines):
now = lines[i][0]
prv = lines[i - 1][0]
if now == prv:
# print(len(lines))
# print("hahaha. lele fuckru")
lines = lines[0:i - 1] + lines[i:]
# i += 1
# print(len(lines))
else:
i += 1
return lines
def runner(inf, outf):
lines = reader(inf)
lines.sort()
lines = clean(lines)
# sure = superbrute(lines)
struct = visible(lines)
s = writer(outf, struct)
# surelines = []
# for line in sure:
# surelines += [line[2]]
# s = str((sorted(surelines)))
# s = s[1:-1].replace(' ', '')
print(s)
return s
infile = "input.txt"
outfile = "output.txt"
def superbrute(lines):
visibles = []
for line in lines:
if brute(lines, line):
visibles += [line]
print(visibles)
return visibles
def brute(lines, mine):
# print(len(lines))
intersections = []
for line in lines:
if not mine == line:
intersections += [intersection(line, mine)[0]]
# intersections.sort()
ivisible = False
print(intersections)
for x in intersections:
my = gety(x, mine)
print('my',x,my)
high = True
for line in lines:
if not mine == line:
print('ot',x,gety(x, line))
if gety(x, line) > my:
print('other was higher')
high = False
if high:
ivisible = True
# print(mine)
return ivisible
return ivisible
import random
def generate(n):
mylines = []
for i in range(1, n + 1):
m = float(random.uniform(-100000, 100000))
c = float(random.uniform(-100000, 100000))
mylines += [[m, c, i]]
f = open('input.txt', 'w')
f.write(str(n) + '\n')
for line in mylines:
f.write(str(line[0]) + ':' + str(line[1]) + '\n')
return mylines
def supertest(n):
# lines = generate(n)
# lines.sort()
# lines = clean(lines)
# for line in lines:
# print(line)
# print("Doing Brute Forces")
# sure = superbrute(lines)
print("doing ninja speed mode")
maybe = visible(lines)
writer(outfile, maybe)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print(s)
def infitest():
# print('lol')
# return
while True:
i = int(raw_input('What now?'))
lines = generate(i)
print(sorted(lines))
maybe = runner('input.txt', 'output.txt')
sure = superbrute(lines)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print('sure',s)
print('maybe',maybe)
# runner('input.txt','output.txt')
# infitest()
# TODO make etc. files for script based checking.
|
16,381 | 024b045fd2ed8a7c3c38a08ea3cede920f58e061 | import json
import boto3
from botocore.exceptions import ClientError
dynamodb = boto3.resource('dynamodb')
def lambda_handler(event, context):
res = None
try:
symbol = event['queryStringParameters']['symbol']
if event['path'] == '/shareprice':
[price, price_alarm] = get_price(symbol)
hData = get_h_data(symbol)
res = {'price': str(price), 'price_alarm': str(price_alarm), 'hData': hData}
elif event['path'] == '/updatealarm':
res = update_alarm(symbol, event['queryStringParameters']['price'])
elif event['path'] == '/updatealarm/turnoff':
res = turnoff_alarm(symbol)
except Exception as e:
print(e)
return 'Error'
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Origin': '*',
},
'body': json.dumps({
'message': res,
})
}
def get_price(symbol):
table = dynamodb.Table('Stock_prices')
price = table.get_item(Key={'symbol': symbol})['Item']['price']
price_alarm = table.get_item(Key={'symbol': symbol})['Item']['price_alarm']
return [price, price_alarm]
def get_h_data(symbol):
table = dynamodb.Table('Historical_data')
hData = table.get_item(Key={'symbol': symbol})['Item']['hData']
hData = json.loads(hData)
return hData
def update_alarm(symbol, price):
table = dynamodb.Table('Stock_prices')
response = table.update_item(
Key={
'symbol': symbol
},
UpdateExpression='set price_alarm = :p',
ExpressionAttributeValues={
':p': price,
},
ReturnValues='UPDATED_NEW'
)
return response
def turnoff_alarm(symbol):
table = dynamodb.Table('Stock_prices')
response = table.update_item(
Key={
'symbol': symbol
},
UpdateExpression='set price_alarm = :p',
ExpressionAttributeValues={
':p': 'null',
},
ReturnValues='UPDATED_NEW'
)
return response |
16,382 | 8cd4a9c28d87f25b24b3ea33d601b192d38c367a | from __future__ import division
import unittest
from chainer import testing
from chainer import training
@testing.parameterize(
# single iteration
{
'iter_per_epoch': 2, 'schedule': (2, 'iteration'), 'resume': 3,
'expected': [False, True, False, False, False, False, False]},
# multiple iteration
{
'iter_per_epoch': 2, 'schedule': ([2, 4], 'iteration'), 'resume': 3,
'expected': [False, True, False, True, False, False, False]},
# single epoch
{
'iter_per_epoch': 3, 'schedule': (1, 'epoch'), 'resume': 3,
'expected': [False, False, True, False, False, False, False]},
# multiple epoch
{
'iter_per_epoch': 3, 'schedule': ([1, 2], 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, True, False]},
# single fractional epoch
{
'iter_per_epoch': 2, 'schedule': (1.5, 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, False, False]},
# multiple fractional epoch
{
'iter_per_epoch': 2, 'schedule': ([1.5, 2.5], 'epoch'), 'resume': 4,
'expected': [False, False, True, False, True, False, False]},
# single unaligned epoch
{
'iter_per_epoch': 2.5, 'schedule': (1, 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, False, False]},
# multiple unaligned epoch
{
'iter_per_epoch': 2.5, 'schedule': ([1, 2], 'epoch'), 'resume': 4,
'expected': [False, False, True, False, True, False, False]},
# single tiny epoch
{
'iter_per_epoch': 0.5, 'schedule': (1, 'epoch'), 'resume': 4,
'expected': [True, False, False, False, False, False, False]},
# multiple tiny epoch
{
'iter_per_epoch': 0.5, 'schedule': ([1, 2], 'epoch'), 'resume': 4,
'expected': [True, False, False, False, False, False, False]},
)
class TestTrigger(unittest.TestCase):
def test_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected in self.expected:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected in self.expected[:self.resume]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected in self.expected[self.resume:]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
testing.run_module(__name__, __file__)
|
16,383 | 83260230430108481e83e85469f0b4465f567cda | # Generated by Django 3.0 on 2019-12-20 01:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0003_auto_20191220_0104'),
]
operations = [
migrations.AlterField(
model_name='scraperesult',
name='value',
field=models.CharField(max_length=255, verbose_name='採取値'),
),
]
|
16,384 | 0b025f7129ed9d5f4866ba7a26bd0c7b12fd9d42 | from django.db import models
from datetime import datetime
class Contact(models.Model):
name = models.CharField(max_length=50)
email = models.EmailField(max_length=100)
phone = models.CharField(max_length=15, blank=True)
message = models.TextField()
date = models.DateTimeField(default=datetime.now)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Emails"
ordering = ('date',)
|
16,385 | 3301b8d9d1e0c40f105eecca0eb15a9b0c031d8b | import itertools
from itertools import chain, combinations
def all_subsets(ss):
return chain(*map(lambda x: combinations(ss, x), range(0, len(ss)+1)))
def func(F):
Parts = f2p(F)
n=len(Parts)
final=[]
#Final=[[f1,f2,f3...fn],[f1&f2,f2&f3,f1&fn ...],[f1&f2&f3,f2&f3&f4,...],...[]]
num=[[i] for i in range(n)]
x=list(all_subsets(num))
x.pop(0)
pl=[]
for i in range(n):
of=[]
for j in range(len(Parts[i][1])):
of.append(str(Parts[i][1][j]))
pl.append(set(of))
for subset in x:
frag=[subset[i][0] for i in range(len(list(subset)))]
d=[pl[subset[i][0]] for i in range(len(list(subset)))]
z = pl[subset[0][0]].intersection(*map(set,d))
z=map(int, z)
final.append((frag,list(z)))
return final
def f2p(F):
Parts = []
for i in range(len(F)):
Parts.append([i,F[i]])
return Parts
# def funk(F,pdbdata):
# final = []
# for i in range(len(F)):
# final.append([])
# for i in pdbdata[0]:
# for j in F:
# if i in j :
# return final
def get_subsets(fullset):
listrep = list(fullset)
n = len(listrep)
return [[listrep[k] for k in range(n) if i&1<<k] for i in range(2**n)]
|
16,386 | 3b41235974b6661630512b9c5cc84c4aa210bc83 | """Defines environment dynamics for paraphrase generation task as RL problem"""
import torch
import numpy as np
import random
import os
import config
import data
import model_evaluation
import supervised_model as sm
from train_ESIM import load_ESIM_model
DEVICE = config.DEVICE
MAX_LENGTH = config.MAX_LENGTH
SOS_token = config.SOS_token
EOS_token = config.EOS_token
train_pairs = data.TRAIN_PAIRS
val_pairs = data.VAL_PAIRS
test_pairs = data.TEST_PAIRS
vocab_index = data.VOCAB_INDEX
#%%
class ParaPhraseeEnvironment(object):
"""Define the paraphrase generation task in the style of OpenAI Gym"""
def __init__(self, source_sentence, target_sentence, supervised_encoder,
reward_function, similarity_model_name, sentence_pairs):
self.name = 'ParaPhrasee'
self.source_sentence = source_sentence # Stored as string
self.target_sentence = target_sentence # Stored as string
self.predicted_words = []
self.reward_function = reward_function # String ex. BLEU
self.similarity_model_name = similarity_model_name
self.ESIM_model_name = 'ESIM_noisy_3'
self.similarity_model, self.fluency_model, self.ESIM_model, \
self.logr_model, self.std_scaler, \
self.similarity_dist, self.fluency_dist, self.ESIM_dist = model_evaluation.init_eval_models(
reward_function=self.reward_function, similarity_model_name=self.similarity_model_name,
ESIM_model_name=self.ESIM_model_name)
self.sentence_pairs = sentence_pairs
self.supervised_encoder = supervised_encoder
self.max_length = MAX_LENGTH
self.max_steps = self.max_length
self.done = 0
self.ep_reward = 0
self.gamma = 0.999
self.changing_input = True
self.action_tensor = torch.tensor([[SOS_token]], device=DEVICE)
self.encoder_outputs = torch.zeros(MAX_LENGTH, supervised_encoder.hidden_size, device=DEVICE)
self.context, _, _ = sm.embed_input_sentence([self.source_sentence, self.target_sentence], supervised_encoder,
max_length=self.max_length)
self.state = (self.action_tensor, self.context)
self.action_space = vocab_index.n_words
def pred_sentence(self):
"""Returns the sentence prediction from the environment"""
output_sentence = ' '.join(self.predicted_words)
return output_sentence
def supervised_baseline(self, supervised_decoder):
"""Returns the supervised model prediction for the same sentence for comparative purposes"""
supervised_decoder_pred, _, baseline_reward = sm.validationMetricPerformance(
input_pairs=[(self.source_sentence, self.target_sentence)], encoder=self.supervised_encoder,
decoder=supervised_decoder, similarity_model=self.similarity_model, fluency_model=self.fluency_model,
ESIM_model=self.ESIM_model, logr_model=self.logr_model, std_scaler=self.std_scaler,
similarity_dist=self.similarity_dist, fluency_dist=self.fluency_dist, ESIM_dist=self.ESIM_dist,
vocab_index=vocab_index, verbose=False, metric=self.reward_function)
supervised_decoder_pred = supervised_decoder_pred[0][1]
return supervised_decoder_pred, np.around(baseline_reward, 3)
def step(self, action, decoder_hidden):
"""Key function which represents the transition dynamics.
given an action (word choice) this returns the updated state FROM THE AGENT
is effectively the decoder
All this is effectively doing is checking if the episode is over and returning the
appropirate reward, else updating the state based on the decoder outputs"""
# Check whether episode is over
if (action == EOS_token) or (len(self.predicted_words)>= self.max_length):
self.state = action, decoder_hidden
RL_model_reward = model_evaluation.performance_metrics(
target_sentence=self.target_sentence, pred_sentence=self.pred_sentence(),
similarity_model=self.similarity_model, fluency_model=self.fluency_model, ESIM_model=self.ESIM_model,
logr_model=self.logr_model, std_scaler=self.std_scaler,
similarity_dist=self.similarity_dist, fluency_dist=self.fluency_dist, ESIM_dist=self.ESIM_dist,
vocab_index=vocab_index, metric=self.reward_function)
# Calculate relative reward
self.ep_reward = np.around(RL_model_reward, 3)
self.done = 1
else:
self.state = action, decoder_hidden
# Add word to pred words
self.predicted_words.append(vocab_index.index2word[action.item()])
return self.state, self.ep_reward, self.done, None
def reset(self):
"""Resets the environment to a random initial state through picking a random sentence from the
sentence input pairs"""
if self.changing_input:
sentence_pair = random.choice(self.sentence_pairs)
self.source_sentence = sentence_pair[0] # Stored as string
self.target_sentence = sentence_pair[1] # Stored as string
self.predicted_words = []
self.ep_reward = 0
self.done = 0
self.action_tensor = torch.tensor([[SOS_token]], device=DEVICE)
self.encoder_outputs = torch.zeros(MAX_LENGTH, self.supervised_encoder.hidden_size, device=DEVICE)
self.context, _, _ = sm.embed_input_sentence([self.source_sentence, self.target_sentence],
self.supervised_encoder, max_length=self.max_length)
self.state = (self.action_tensor, self.context)
return self.state
|
16,387 | 8e9336000948c83e86c755e1c52b6e01284f698f | from .transaction import SimpleTransaction, AllowedTransaction
from .account import Account
from .event import EventLightweightPush |
16,388 | 7f3e640dc1d3294a67e4a5d22c7c71176f130873 | # https://atcoder.jp/contests/agc029/tasks/agc029_a
# A - Irreversible operation
import sys
sys.setrecursionlimit(10 ** 7)
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
s = input()
# 操作が不可能になった時点で、石の配置は必ず・・・WWWWWBBBBB・・・となる
# つまり、Wを左に移動する回数を求めれば良く、これは初期配置でWの左側にあるBを数え上げることで求まる
cnt_B = 0
res = 0
for i in s:
if i == "B":
cnt_B += 1
if i == "W":
res += cnt_B
print(res)
if __name__ == '__main__':
resolve()
|
16,389 | b35f5689e7c9649434c224777731fddf51b22861 | import pytest
import sys
import threading
import traceback
try:
from queue import Queue
except:
from Queue import Queue
from .util import run_test, destroy_window
def url_load():
import webview
def _change_url(webview):
try:
webview.load_url('https://www.google.org')
q.put(0)
except Exception as e:
q.put(1)
pytest.fail('Exception occured:\n{0}'.format(traceback.format_exc()))
destroy_event.set()
q = Queue()
t = threading.Thread(target=_change_url, args=(webview,))
t.start()
destroy_event = destroy_window(webview)
webview.create_window('URL change test', 'https://www.example.org')
exitcode = q.get()
sys.exit(exitcode)
def test_url_load():
run_test(url_load)
|
16,390 | c0056f40b6bf02ef8255f37e02310fa741ee3367 | from datetime import datetime
from backtesting.strategies.trend.SuperTrendStrategy import SuperTrendStrategy
from backtesting.tests.Test import Test
class SuperTrendTest(Test):
def __init__(self):
self.strategies = [SuperTrendStrategy]
super().__init__(self.__class__.__name__, self.strategies)
def get_atr_period(self):
return self.config['SuperTrend']['atr_period']
def get_atr_multiplier(self):
return self.config['SuperTrend']['atr_multiplier']
def get_to_date(self):
d = self.config['SuperTrend']['to_date']
if d == "now":
return datetime.now().date()
else:
return datetime.strptime(d, '%b-%d-%Y').date()
def get_max_candles(self):
return self.config['SuperTrend']['max_candles']
def add_strategy(self, cerebro, strategy, symbol):
cerebro.addstrategy(strategy, ticker=symbol,
atr_period=self.get_atr_period(), atr_multiplier=self.get_atr_multiplier(),
order_pct=self.get_order_percent())
|
16,391 | 34e6c90a9da668618280e8d5654af966c6393ce8 | #opencv3.3
#https://www.pyimagesearch.com/2017/09/11/object-detection-with-deep-learning-and-opencv/
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse as arg
import imutils
import time
import cv2
ap=arg.ArgumentParser()
ap.add_argument("-p","--prototxt",required=True,help="caffe'deploy'prototxt file")
ap.add_argument('-m',"-model",required=True,help="Caffe pre-trained model")
ap.add_argument('-c',"--confidence",type=float,default=0.2,help="过滤弱检测的最小概率")
args=vars(ap.parse_args())
CLASSES=['background','horse','horse','person','bicycle']
COLARS=np.random.uniform(0,255,size=(len(CLASSES),3))
print('[INFO] loading model...')
net=cv2.dnn.readNetFromCaffe(args["prototext"],ars['model'])
print ('[INFO] starting video stream')
vs=VideoStream(src=1).start()
time.sleep(2.0)
fps=FPS().start()
def draw():
for i in np.arange(0,detections.shae[2]):
confidence=detections[0,0,i,2]
if confidence>args["confidence"]:
idx=int(detections[0,0,i,1])
box=detections[0,0,i,3:7]*np.array([w,h,w,h])
(startX,startY,endX,endY)=box.astype('int')
while True:
frame=vs.read()
frame=imutils.resize(frame,width=400)
(h,w)=frame.shape[:2]
blob=cv2.dnn.blobFromImage(frame,0.007843,(300,300),127.5)
net.setInput(blob)
detections=net.forward()
"""
tensorflow 对mobilenet重新训练
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/ml/blogs/road-not-road/data/ \
--learning_rate=0.0001 \
--testing_percentage=20 \
--validation_percentage=20 \
--train_batch_size=32 \
--validation_batch_size=-1 \
--flip_left_right True \
--random_scale=30 \
--random_brightness=30 \
--eval_step_interval=100 \
--how_many_training_steps=600 \
--architecture mobilenet_1.0_224
https://github.com/marc-antimodular/ofxOpenCV-MobileNetSDD-Example
https://github.com/opencv/opencv/blob/master/samples/dnn/ssd_mobilenet_object_detection.cpp
http://blog.csdn.net/wfei101/article/details/78310226 mobile理解
Tensoflow mobilenet 分类
http://blog.csdn.net/u010302327/article/details/78248394 下载voc 数据集
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md 预训练模型
http://www.image-net.org/challenges/LSVRC/2012/ imagenet
https://github.com/Zehaos/MobileNet 非官方mobilenet
https://github.com/chuanqi305/MobileNet-SSd caffe版本 有预训练模型
https://github.com/weiliu89/caffe/tree/ssd 使用非MobelNet 训练的ssd
https://github.com/JianGoForIt/YellowFin momentum SGD 优化器
https://github.com/matvi/ObjectDetectionMobileNetSSD caffe 版本
http://blog.csdn.net/xxiaozr/article/details/77073164 ssd理解
https://github.com/phongnhhn92/Remote-Marker-based-Tracking-using-MobileNet-SSD 物联网应用
https://developforai.com/ 案例
Densnet
-------------
Densely Connected Convolutional Networks》当选 CVPR 2017 最佳论文,
Torch implementation: https://github.com/liuzhuang13/DenseNet/tree/master/models
PyTorch implementation: https://github.com/gpleiss/efficient_densenet_pytorch
MxNet implementation: https://github.com/taineleau/efficient_densenet_mxnet
Caffe implementation: https://github.com/Tongcheng/DN_CaffeScript
与苹果的首篇公开论文《Learning From Simulated and Unsupervised Images through Adversarial Training》共获这一殊荣。
CVPR 2017 的一篇 workshop 文章 《The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation》 (https://arxiv.org/abs/1611.09326) 表明,基于 DenseNet 的全卷积网络(FCN)模型在不需要预训练的情况下甚至可以达到比其他预训练方法更高的精度,并且比达到相同效果的其他方法的模型要小 10 倍。
https://github.com/Queequeg92/SE-Net-CIFAR se-net 代码
https://github.com/szq0214/DSOD Densnet+ssd
https://github.com/songrotek/Deep-Learning-Papers-Reading-Roadmap 机器学习各领域早期论文
-------------------
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android/
https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/inception5h.py
https://machinelearningflashcards.com/ tick
https://chrisalbon.com/
https://www.kdnuggets.com/2017/09/neural-networks-tic-tac-toe-keras.html
keras例子
https://chrisalbon.com/ 博客
https://github.com/keras-team/keras/tree/master/examples
https://www.kdnuggets.com/tutorials/index.html 英文学习资料
"""
#这里的网络架构和论文中插图中的网络架构是相一致的。
#对了,忘了说了,这里使用的keras版本是1.2.2,等源码读完之后,我自己改一个2.0.6版本上传到github上面。
#可别直接粘贴复制,里面有些中文的解释,不一定可行的。
#defint input shape
input_shape = (300,300,3)
#defint the number of classes
num_classes = 21
#Here the network is wrapped in to a dictory because it more easy to make some operations.
net = {}
# Block 1
input_tensor = Input(shape=input_shape)
#defint the image hight and wight
img_size = (input_shape[1], input_shape[0])
net['input'] = input_tensor
net['conv1_1'] = Convolution2D(64, 3, 3,
activation='relu',
border_mode='same',
name='conv1_1')(net['input'])
net['conv1_2'] = Convolution2D(64, 3, 3,
activation='relu',
border_mode='same',
name='conv1_2')(net['conv1_1'])
net['pool1'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool1')(net['conv1_2'])
# Block 2
net['conv2_1'] = Convolution2D(128, 3, 3,
activation='relu',
border_mode='same',
name='conv2_1')(net['pool1'])
net['conv2_2'] = Convolution2D(128, 3, 3,
activation='relu',
border_mode='same',
name='conv2_2')(net['conv2_1'])
net['pool2'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool2')(net['conv2_2'])
# Block 3
net['conv3_1'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_1')(net['pool2'])
net['conv3_2'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_2')(net['conv3_1'])
net['conv3_3'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_3')(net['conv3_2'])
net['pool3'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool3')(net['conv3_3'])
# Block 4
net['conv4_1'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_1')(net['pool3'])
net['conv4_2'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_2')(net['conv4_1'])
#the first layer be operated
net['conv4_3'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_3')(net['conv4_2'])
net['pool4'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool4')(net['conv4_3'])
# Block 5
net['conv5_1'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_1')(net['pool4'])
net['conv5_2'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_2')(net['conv5_1'])
net['conv5_3'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_3')(net['conv5_2'])
net['pool5'] = MaxPooling2D((3, 3), strides=(1, 1), border_mode='same',
name='pool5')(net['conv5_3'])
#here is the FC6 in the orginal VGG16 Network,There move to Atrous Convolution for the reason i don't know.
# FC6
net['fc6'] = AtrousConvolution2D(1024, 3, 3, atrous_rate=(6, 6),
activation='relu', border_mode='same',
name='fc6')(net['pool5'])
#the second layer to be operated
# FC7
net['fc7'] = Convolution2D(1024, 1, 1, activation='relu',
border_mode='same', name='fc7')(net['fc6'])
# x = Dropout(0.5, name='drop7')(x)
# Block 6
net['conv6_1'] = Convolution2D(256, 1, 1, activation='relu',
border_mode='same',
name='conv6_1')(net['fc7'])
#the third layer to be opreated
net['conv6_2'] = Convolution2D(512, 3, 3, subsample=(2, 2),
activation='relu', border_mode='same',
name='conv6_2')(net['conv6_1'])
# Block 7
net['conv7_1'] = Convolution2D(128, 1, 1, activation='relu',
border_mode='same',
name='conv7_1')(net['conv6_2'])
net['conv7_2'] = ZeroPadding2D()(net['conv7_1'])
#the forth layer to be operated
net['conv7_2'] = Convolution2D(256, 3, 3, subsample=(2, 2),
activation='relu', border_mode='valid',
name='conv7_2')(net['conv7_2'])
# Block 8
net['conv8_1'] = Convolution2D(128, 1, 1, activation='relu',
border_mode='same',
name='conv8_1')(net['conv7_2'])
#the fifth layer to be operated
net['conv8_2'] = Convolution2D(256, 3, 3, subsample=(2, 2),
activation='relu', border_mode='same',
name='conv8_2')(net['conv8_1'])
# the last layer to be operated
# Last Pool
net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])
# Prediction from conv4_3
# net['conv4_3']._shape = (?, 38, 38, 512)
# 算了还是说中文吧,这个层是用来对输入数据进行正则化的层,有参数需要学习,输出的数据形式和输入输入形式是一致的。
net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])
num_priors = 3
#here is *4 because the box need 4 number to define,here is only predice the box coordinate
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv4_3_norm_mbox_loc')(net['conv4_3_norm'])
net['conv4_3_norm_mbox_loc'] = x
flatten = Flatten(name='conv4_3_norm_mbox_loc_flat')
net['conv4_3_norm_mbox_loc_flat'] = flatten(net['conv4_3_norm_mbox_loc'])
#the box coordinate is finished now it will perdice the classes
name = 'conv4_3_norm_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
# here is start predict the classes
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv4_3_norm'])
net['conv4_3_norm_mbox_conf'] = x
flatten = Flatten(name='conv4_3_norm_mbox_conf_flat')
net['conv4_3_norm_mbox_conf_flat'] = flatten(net['conv4_3_norm_mbox_conf'])
#这里是用来对conv4_3层的feature map生成论文中所说的default box,对没错,就是直接使用Feature map来进行default box的生成
#当然这里要指定一些参数,这些参数是需要好好斟酌的。
priorbox = PriorBox(img_size, 30.0, aspect_ratios=[2],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv4_3_norm_mbox_priorbox')
net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])
#好了,到这里第一个层的操作就完成了,下面其他层的操作都是相类似的啦。
# Prediction from fc7
num_priors = 6
net['fc7_mbox_loc'] = Convolution2D(num_priors * 4, 3, 3,
border_mode='same',
name='fc7_mbox_loc')(net['fc7'])
flatten = Flatten(name='fc7_mbox_loc_flat')
net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])
name = 'fc7_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
net['fc7_mbox_conf'] = Convolution2D(num_priors * num_classes, 3, 3,
border_mode='same',
name=name)(net['fc7'])
flatten = Flatten(name='fc7_mbox_conf_flat')
net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])
priorbox = PriorBox(img_size, 60.0, max_size=114.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='fc7_mbox_priorbox')
net['fc7_mbox_priorbox'] = priorbox(net['fc7'])
# Prediction from conv6_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv6_2_mbox_loc')(net['conv6_2'])
net['conv6_2_mbox_loc'] = x
flatten = Flatten(name='conv6_2_mbox_loc_flat')
net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])
name = 'conv6_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv6_2'])
net['conv6_2_mbox_conf'] = x
flatten = Flatten(name='conv6_2_mbox_conf_flat')
net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])
priorbox = PriorBox(img_size, 114.0, max_size=168.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv6_2_mbox_priorbox')
net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])
# Prediction from conv7_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv7_2_mbox_loc')(net['conv7_2'])
net['conv7_2_mbox_loc'] = x
flatten = Flatten(name='conv7_2_mbox_loc_flat')
net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])
name = 'conv7_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv7_2'])
net['conv7_2_mbox_conf'] = x
flatten = Flatten(name='conv7_2_mbox_conf_flat')
net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])
priorbox = PriorBox(img_size, 168.0, max_size=222.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv7_2_mbox_priorbox')
net['conv7_2_mbox_priorbox'] = priorbox(net['conv7_2'])
# Prediction from conv8_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv8_2_mbox_loc')(net['conv8_2'])
net['conv8_2_mbox_loc'] = x
flatten = Flatten(name='conv8_2_mbox_loc_flat')
net['conv8_2_mbox_loc_flat'] = flatten(net['conv8_2_mbox_loc'])
name = 'conv8_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv8_2'])
net['conv8_2_mbox_conf'] = x
flatten = Flatten(name='conv8_2_mbox_conf_flat')
net['conv8_2_mbox_conf_flat'] = flatten(net['conv8_2_mbox_conf'])
priorbox = PriorBox(img_size, 222.0, max_size=276.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv8_2_mbox_priorbox')
net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])
# Prediction from pool6
num_priors = 6
x = Dense(num_priors * 4, name='pool6_mbox_loc_flat')(net['pool6'])
net['pool6_mbox_loc_flat'] = x
name = 'pool6_mbox_conf_flat'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Dense(num_priors * num_classes, name=name)(net['pool6'])
net['pool6_mbox_conf_flat'] = x
priorbox = PriorBox(img_size, 276.0, max_size=330.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='pool6_mbox_priorbox')
#由于这里的维数不对,因此要修改Feature map层对应的维数信息
if K.image_dim_ordering() == 'tf':
target_shape = (1, 1, 256)
else:
target_shape = (256, 1, 1)
net['pool6_reshaped'] = Reshape(target_shape,
name='pool6_reshaped')(net['pool6'])
net['pool6_mbox_priorbox'] = priorbox(net['pool6_reshaped'])
#好啦,到这里位置,所有的信息都已经生成了,下一步就是根据这些信息来进行训练或者是预测了。
# Gather all predictions
net['mbox_loc'] = merge([net['conv4_3_norm_mbox_loc_flat'],
net['fc7_mbox_loc_flat'],
net['conv6_2_mbox_loc_flat'],
net['conv7_2_mbox_loc_flat'],
net['conv8_2_mbox_loc_flat'],
net['pool6_mbox_loc_flat']],
mode='concat', concat_axis=1, name='mbox_loc')
net['mbox_conf'] = merge([net['conv4_3_norm_mbox_conf_flat'],
net['fc7_mbox_conf_flat'],
net['conv6_2_mbox_conf_flat'],
net['conv7_2_mbox_conf_flat'],
net['conv8_2_mbox_conf_flat'],
net['pool6_mbox_conf_flat']],
mode='concat', concat_axis=1, name='mbox_conf')
net['mbox_priorbox'] = merge([net['conv4_3_norm_mbox_priorbox'],
net['fc7_mbox_priorbox'],
net['conv6_2_mbox_priorbox'],
net['conv7_2_mbox_priorbox'],
net['conv8_2_mbox_priorbox'],
net['pool6_mbox_priorbox']],
mode='concat', concat_axis=1,
name='mbox_priorbox')
if hasattr(net['mbox_loc'], '_keras_shape'):
num_boxes = net['mbox_loc']._keras_shape[-1] // 4
elif hasattr(net['mbox_loc'], 'int_shape'):
num_boxes = K.int_shape(net['mbox_loc'])[-1] // 4
net['mbox_loc'] = Reshape((num_boxes, 4),
name='mbox_loc_final')(net['mbox_loc'])
net['mbox_conf'] = Reshape((num_boxes, num_classes),
name='mbox_conf_logits')(net['mbox_conf'])
net['mbox_conf'] = Activation('softmax',
name='mbox_conf_final')(net['mbox_conf'])
net['predictions'] = merge([net['mbox_loc'],
net['mbox_conf'],
net['mbox_priorbox']],
mode='concat', concat_axis=2,
name='predictions')
model = Model(net['input'], net['predictions'])
|
16,392 | c8372a5e1aca095a5ba2f84a157c604e7ad197fc | import jaccardIndex
def calculate(base,term):
return 1.0 - jaccardIndex.calculate(base,term) |
16,393 | 2cc3fbc31ed4a3a7b8b18ab3fbc98e3ab58b9892 | import numpy as np
from scipy.ndimage import morphological_gradient
def _is_iterable(x):
try:
iter(x)
except TypeError:
return False
else:
return True
def _norm_along_last_axis(x):
"""Compute the norm of x along the last axis.
"""
return np.sqrt(np.sum(np.square(x), axis=x.ndim - 1))
def _compute_set_distances(nonzeros_1, nonzeros_2):
"""Compute all surface distances from one set to the other.
"""
distances = np.zeros(len(nonzeros_1))
for i, _ in enumerate(distances):
distances[i] = np.min(
_norm_along_last_axis(nonzeros_1[i].reshape(1, -1) - nonzeros_2)
)
return distances
def compute_surface_distances(mask1, mask2, voxel_dimensions=1):
"""Return the surface distances for all points on the surface of mask1 to the surface of mask2.
Arguments
---------
mask1 : np.ndarray
Boolean mask to compute distances from
mask2 : np.ndarray
Boolean mask to compute distances to
voxel_dimensions : iterable or numeric
Voxel size, for anisotropic voxels, use an iterable of same length as the image dimensions.
"""
structuring_el_size = tuple(3 for _ in mask1.shape)
grad1 = morphological_gradient(mask1.astype(int), size=structuring_el_size)
grad2 = morphological_gradient(mask2.astype(int), size=structuring_el_size)
if not _is_iterable(voxel_dimensions):
voxel_dimensions = [voxel_dimensions for _ in mask1.shape]
voxel_dimensions = np.array(voxel_dimensions).reshape(1, -1)
nonzeros_1 = np.array(np.nonzero(grad1)).T * voxel_dimensions
nonzeros_2 = np.array(np.nonzero(grad2)).T * voxel_dimensions
return np.sort(_compute_set_distances(nonzeros_1, nonzeros_2))
def compute_labelled_surface_distances(
labelled_1, labelled_2, num_labels_1, num_labels_2, voxel_dimensions=1
):
"""Compute the surface distances for for all connected components in one mask to the whole second mask.
"""
mask1 = labelled_1 != 0
mask2 = labelled_2 != 0
surface_distance_label_1 = []
for idx in range(num_labels_1):
surface_distance_label_1.append(
compute_surface_distances(labelled_1 == idx + 1, mask2, voxel_dimensions)
)
surface_distance_label_2 = []
for idx in range(num_labels_2):
surface_distance_label_2.append(
compute_surface_distances(labelled_2 == idx + 1, mask1, voxel_dimensions)
)
return surface_distance_label_1, surface_distance_label_2
def compute_object_percentile_surface_distances(
labelled_surface_distances_1, labelled_surface_distances_2, percentile
):
"""Compute the Hausdorff distance for for all connected components in one mask to the whole second mask.
"""
hausdorffs_label_1 = []
for surface_distance in labelled_surface_distances_1:
hausdorffs_label_1.append(np.percentile(surface_distance, percentile))
hausdorffs_label_2 = []
for surface_distance in labelled_surface_distances_2:
hausdorffs_label_2.append(np.percentile(surface_distance, percentile))
return np.array(hausdorffs_label_1), np.array(hausdorffs_label_2)
def compute_overall_percentile_surface_distances(
labelled_surface_distances_1, labelled_surface_distances_2, percentile
):
hausdorff_1 = np.percentile(
np.concatenate(labelled_surface_distances_1), percentile
)
hausdorff_2 = np.percentile(
np.concatenate(labelled_surface_distances_2), percentile
)
return hausdorff_1, hausdorff_2
def compute_object_average_surface_distances(labelled_surface_distances_1, labelled_surface_distances_2):
"""Compute the Hausdorff distance for for all connected components in one mask to the whole second mask.
"""
asd_label_1 = []
for surface_distance in labelled_surface_distances_1:
asd_label_1.append(np.mean(surface_distance))
asd_label_2 = []
for surface_distance in labelled_surface_distances_2:
asd_label_2.append(np.mean(surface_distance))
return (
np.array(asd_label_1),
np.array(asd_label_2),
)
def compute_overall_average_surface_distances(labelled_surface_distances_1, labelled_surface_distances_2):
asd_1 = np.mean(np.concatenate(labelled_surface_distances_1))
asd_2 = np.mean(np.concatenate(labelled_surface_distances_2))
return asd_1, asd_2
|
16,394 | 226fcbdc43073f610624da574227762cfc85f363 | # *****************************************************************************
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# *****************************************************************************
"""
Modified from https://github.com/keithito/tacotron
"""
from typing import List
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters)
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
def text_to_sequence(sent: str) -> List[int]:
r'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
Args:
sent (str): The input sentence to convert to a sequence.
Returns:
List of integers corresponding to the symbols in the sentence.
'''
sent = unidecode(sent) # convert to ascii
sent = sent.lower() # lower case
sent = normalize_numbers(sent) # expand numbers
for regex, replacement in _abbreviations: # expand abbreviations
sent = re.sub(regex, replacement, sent)
sent = re.sub(_whitespace_re, ' ', sent) # collapse whitespace
return [_symbol_to_id[s] for s in sent if s in _symbol_to_id]
|
16,395 | 654e857cf6c608fa6bc0ac22c57a794f41f8705e | import pickle
import time
from random import *
from tkinter import *
caughtt=["caught","missed"]
backpack=["Bulbasaur", "Charmander"]
ballcaught=["Poke Ball", "Poke Ball"]
for i in range(0,10):
for i in range(0,10):
time.sleep(0.5)
balls=["Great Ball","Master Ball","Poke Ball","Ultra Ball"]
things=["Solgaleo", "Lunala", "Cosmog", "Charizard", "Charmeleon","Charmander","Venusaur","Ivysaur", "Bulbasaur", "Blastoise","Wartortle", "Squirtle","Grimer","Muk","Machamp","Machop", "Machoke"]
found=choice(things)
ball=choice(balls)
caught=choice(caughtt)
print('You found a wild %s' % found)
print('You %s it with a %s' % (caught,ball))
if caught=='caught' :
backpack.append(found)
ballcaught.append(ball)
for x in range(0,len(backpack)):
print('%s was caught by an %s' % (backpack[x], ballcaught[x]))
pq=[backpack, ballcaught]
tk = Tk()
def q():
save_file = open('pokemonopener','wb')
pickle.dump(pq, save_file)
btnq = Button(tk,text = "Save",command=q)
btnq.pack()
|
16,396 | 479708aeb5b98179fd7769677445b9376d400ac3 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 14 14:36:04 2019
@author: vnandanw
"""
#import relevant libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
#Read the data set from file
FlatData = pd.read_csv('Insurance.csv')
#Separate features and labels from the data set
X = FlatData.iloc[:,:-1].values
y = FlatData.iloc[:,1].values
#Create train and test data
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.ensemble import RandomForestRegressor
RFRegression = RandomForestRegressor(n_estimators = 10, random_state = 0)
RFRegression.fit(X,y)
#Plot the data on the graph
plt.scatter(X,
y,
color='green')
plt.plot(X, RFRegression.predict(X), color = 'red')
plt.title('Insurance Premium - Polynomial')
plt.xlabel('Age')
plt.ylabel('Premium')
plt.show()
val = [[40]]
predictLinear = RFRegression.predict(val)
|
16,397 | 07d4c13a04d32e6ea847ba7ace31306cd8b48527 | from PIL import Image,ImageDraw
def addTransparency(img, factor = 0.7 ):
# 调整图片的透明程度
img = img.convert('RGBA')
img_blender = Image.new('RGBA', img.size, (0,0,0,0))
img = Image.blend(img_blender, img, factor)
return img
def circle_corner(img, radii):
"""
圆角处理
:param img: 源图象。
:param radii: 半径,如:30。
:return: 返回一个圆角处理后的图象。
"""
# 画圆(用于分离4个角)
circle = Image.new('L', (radii * 2, radii * 2), 0) # 创建一个黑色背景的画布
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, radii * 2, radii * 2), fill=255) # 画白色圆形
# 原图
img = img.convert("RGBA")
w, h = img.size
# 画4个角(将整圆分离为4个部分)
alpha = Image.new('L', img.size, 255)
alpha.paste(circle.crop((0, 0, radii, radii)), (0, 0)) # 左上角
alpha.paste(circle.crop((radii, 0, radii * 2, radii)), (w - radii, 0)) # 右上角
alpha.paste(circle.crop((radii, radii, radii * 2, radii * 2)), (w - radii, h - radii)) # 右下角
alpha.paste(circle.crop((0, radii, radii, radii * 2)), (0, h - radii)) # 左下角
# alpha.show()
img.putalpha(alpha) # 白色区域透明可见,黑色区域不可见
return img
# 打开图片
# imagesfile = "../../../html5+css3/images/16.jpg"
# imagesfile = "../../../../../bia/demo1.jpg"
# im = Image.open(imagesfile)
# box = (0,0,728,525)
# im2 = Image.new("RGB",())
# im = im.crop(box)
# im1.paste(im,(100,100))
# print(im.size)
# im1.show()
# 新建图片
# im1 = Image.new("RGBA",(800,300),(136,136,136))
# im1 = addTransparency(im1, factor = 0.1)
# print(im1.mode)
# 获取图片打印格式
# print(im1.format)
# im2 = Image.blend(im,im1,1)
# im1.show()
# 保存文件
# im.save("../../../../../bia/demo.jpg","JPEG")
#
im1 = Image.open("../../../../../bia/demo.jpg")
# im1 = im1.crop((0,0,1920,1056))
# im2 = Image.new("RGB",(1488,572),"#A9A9A9")
# im2 = circle_corner(im2, radii=50)
# im2 = addTransparency(im2,0.4)
# im1.paste(im2,(218,243))
# im1 = im1.resize((1080,300))
# im1 = im1.draft("RGB", (200,200))
#
fnt=ImageFont.truetype("c:/Windows/Fonts/Tahoma.ttf", 40)
print(im1.size)
im1.show()
exit() |
16,398 | 5f24ab42b4f80d32e4b4059d471f5c3a462ab283 | import importlib
import inspect
import logging
import sys
from pathlib import Path
from unittest.mock import Mock
import pytest
from aio_wx_widgets import widgets
logging.basicConfig(level=logging.DEBUG)
_LOGGER = logging.getLogger(__name__)
IMAGE_PATH = Path(__file__).parent
def load_module(module_file: Path):
"""Load and import a module based on the location of the *.py file."""
try:
name = module_file.stem
spec = importlib.util.spec_from_file_location(name, module_file)
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
spec.loader.exec_module(module)
return module
except Exception as err:
_LOGGER.exception(err)
raise
def get_all_modules(package):
"""Yield all the modules from the provided package."""
base = Path(inspect.getabsfile(package)).parent
for fl in base.glob("*.py"):
print(f"loading module {fl}")
yield load_module(fl)
def get_all_classes_defined_in_module(module):
"""Return a tuple with the name of the class and the class object to be
instantiated."""
for _cls in inspect.getmembers(module, inspect.isclass):
if module.__name__ == _cls[1].__module__:
yield _cls
def get_all_widget_classes(package):
for module in get_all_modules(package):
for _cls in get_all_classes_defined_in_module(module):
name, cls = _cls
if name.startswith("_"):
continue
kwargs = {}
if name == "AioButton":
kwargs["label"] = "a label"
kwargs["callback"] = Mock()
elif name == "Group":
kwargs["label"] = "grouplabel"
elif name == "Entry":
kwargs["binding"] = Mock()
elif name == "Select":
kwargs["choices"] = [Mock()]
elif name == "CheckBox":
kwargs["label"] = "checkbox_label"
kwargs["binding"] = Mock()
elif name == "LabelledItem":
kwargs["label_text"] = "labeltext"
kwargs["item"] = Mock()
elif name == "Image":
kwargs["image"] = Path(IMAGE_PATH / "phoenix_main.png")
yield name, cls, kwargs
@pytest.mark.parametrize(
"widget_class,kwargs",
[
pytest.param(_cls[1], _cls[2], id=_cls[0])
for _cls in get_all_widget_classes(widgets)
],
)
def test_init(widget_class, kwargs, wx_app):
widget_class(**kwargs)
|
16,399 | b75bbbc8e7c031267b2d55b14e75048c294c52df | import numpy as np
import sys
import math
from scipy.spatial.distance import euclidean
from lib import fastdtw
from os import listdir
from os.path import isfile, join
from plotData import PlotData
class ReviewClassifier(object):
def __init__(self, recipe_name, max_line):
self.recipe_name = recipe_name
path = "data/temperatureData/" + str(self.recipe_name)
self.failed_meal_list = {}
self.meal_list = {}
self.initial_meal_list(path, max_line)
def initial_meal_list(self, path, max_line):
with open(path) as file:
i = 0
for line in file:
if len(self.meal_list) > max_line:
break
meal_id = line.split()[0]
line = line.strip(meal_id)
line = line.strip(" {")
line = line.strip("}\n")
self.meal_list[meal_id] = {}
self.meal_list[meal_id]["data"] = []
i = 0
for temperature in line.split(','):
self.meal_list[meal_id]["data"].append([i,round(float(temperature),2)])
i += 10
self.meal_list[meal_id]["error"] = 0
self.meal_list[meal_id]["stddev"] = 0
self.meal_list[meal_id]["is_cooked"] = None
def zero_results_in_meal_list(self):
for meal_id in self.meal_list:
self.meal_list[meal_id]["error"] = 0
self.meal_list[meal_id]["stddev"] = 0
def list_failed_meal_id(self, reviews_file):
with open(reviews_file) as file:
i = 0
for line in file:
print(line)
def generate_failed_list_use_review(self, reviews_file):
failed_meal_id_list = self.list_failed_meal_id(reviews_file)
print(failed_meal_id_list)
classifier = ReviewClassifier(recipe_name = sys.argv[1], max_line = int(sys.argv[2]))
classifier.generate_failed_list_use_review(reviews_file = "data/reviews.csv")
#PlotData.plot_meal(meal_list = classifier.meal_list, failed_meal_list = classifier.failed_meal_list)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.