index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,700 | de78d2e3db546d3bd4a505e850923e2cb4a9c601 | """
Home Work 6
Ankit Khandelwal
Exercise 2
15863
"""
from math import pi, sin
import matplotlib.pyplot as plt
import numpy as np
D = 0.1
A = 10
B = 12
tau = 365
def T_surf(t):
return A + B * sin(2 * pi * t / tau)
L = 20
N = 40
a = L / N
h = 1
Temp = np.zeros(N, float)
Temp[0] = T_surf(0)
Temp[1:N - 1] = 10
Temp[N - 1] = 11
Temp_new = np.zeros((N), float)
xv = np.linspace(0, 20, N)
for t in range(10 * 365):
for x in range(N):
if x == 0:
Temp_new[x] = T_surf(t)
elif x < N - 1:
Temp_new[x] = Temp[x] + h * D / a ** 2 * (Temp[x + 1] + Temp[x - 1] - 2 * Temp[x])
else:
Temp_new[x] = 11
Temp = np.copy(Temp_new)
if t == 91 + 9 * 365 / h or t == 182 + 9 * 365 / h or t == 273 + 9 * 365 / h or t == 364 + 9 * 365 / h:
plt.plot(xv, Temp, label='{0:.2f} years.'.format((t / 365)))
plt.legend()
plt.xlabel('Depth (in m)')
plt.ylabel('Temperature (in C)')
plt.title('Temperature Profile of Earth\'s Crust')
plt.show()
|
996,701 | 42b183ce3ba9b8d6f8d2719d75866f8d7a7106f2 | from transformers import ElectraTokenizer, pipeline
from model import ElectraForSequenceClassification
from pprint import pprint
tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-finetuned-sentiment")
model = ElectraForSequenceClassification.from_pretrained("monologg/koelectra-small-finetuned-sentiment")
nsmc = pipeline(
"sentiment-analysis",
tokenizer=tokenizer,
model=model
)
texts = [
"이 영화는 미쳤다. 넷플릭스가 일상화된 시대에 극장이 존재해야하는 이유를 증명해준다.",
"촬영감독의 영혼까지 갈아넣은 마스터피스",
"보면서 화가날수있습니다.",
"아니 그래서 무슨말이 하고싶은거야 ㅋㅋㅋ"
]
pprint(nsmc(texts))
|
996,702 | f8d426629e6217aaf52c8ef15ba68c62b53ba9b6 | import numpy as np
import pytest
import scipy.stats
from pyextremes.tests import KolmogorovSmirnov
class TestKolmogorovSmirnov:
def test_init_errors(self):
with pytest.raises(TypeError, match=r"invalid type.*rv_continuous"):
KolmogorovSmirnov(
extremes=[1, 2, 3],
distribution=1,
fit_parameters={"a": 1},
)
@pytest.mark.parametrize(
"distribution,fit_parameters",
[
[
"genextreme",
{"c": 0.3, "loc": 10, "scale": 2},
],
[
scipy.stats.genpareto,
{"c": 0.3, "loc": 10, "scale": 2},
],
],
)
def test_init(self, distribution, fit_parameters):
if isinstance(distribution, str):
scipy_distribution = getattr(scipy.stats, distribution)
else:
scipy_distribution = distribution
np.random.seed(12345)
extremes = scipy_distribution.rvs(size=100, **fit_parameters)
scipy_kstest = scipy.stats.kstest(
rvs=extremes,
cdf=lambda x: scipy_distribution.cdf(x, **fit_parameters),
)
kstest = KolmogorovSmirnov(
extremes=extremes,
distribution=distribution,
fit_parameters=fit_parameters,
significance_level=0.05,
)
assert kstest.name == "Kolmogorov-Smirnov"
assert scipy_distribution.name in kstest.null_hypothesis
assert scipy_distribution.name in kstest.alternative_hypothesis
assert kstest.success
assert np.isclose(kstest.test_statistic, scipy_kstest.statistic)
assert np.isclose(kstest.pvalue, scipy_kstest.pvalue)
assert np.isclose(
kstest.critical_value,
scipy.stats.ksone.ppf(1 - 0.05 / 2, len(extremes)),
)
# Test failure
kstest = KolmogorovSmirnov(
extremes=extremes,
distribution=scipy.stats.norm,
fit_parameters={"loc": 0, "scale": 1},
significance_level=0.05,
)
assert not kstest.success
|
996,703 | d20f53c0ffb5d58ae70a8a979c4a2dd942d312e9 | class LoanRaw:
def __init__(self, loan_id, account_id, date, amount, duration, payments, status):
self.loan_id = loan_id
self.account_id = account_id
self.date = date
self.amount = amount
self.duration = duration
self.payments = payments
self.status = status |
996,704 | 2a00826353773768339c74c3ac71279e394691fe | from pathlib import Path
import argparse
class colors:
WARNING = '\033[93m'
# FAIL = '\033[91m'
ENDLINE = '\033[0m'
def _parse_args():
parser = argparse.ArgumentParser(
prog='word_replacer',
description='Change one word with another in a text file (works also with directory recusively',
)
parser.add_argument('path',
metavar='path',
type=Path,
help='path to file or folder')
parser.add_argument('find',
metavar='find-word',
type=str,
help='the word/phrase that will be modified')
parser.add_argument('replace',
metavar='replace-word',
type=str,
help='the replace word/phrase')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Execute the script verbosely')
parser.add_argument('-d',
'--directory',
action='store_true',
help='Modify files in a directory')
parser.add_argument('-r',
'--recursive',
action='store_true',
help='Search all subdirectories recursively. Requires "-d"')
args = parser.parse_args()
_validate_args(args)
return args
def _validate_args(args: argparse.Namespace):
if not args.path.exists():
print('Path does not exist')
elif args.directory and not args.path.is_dir():
print('Not a directory path. Remove the "-d" flag to work with a single file')
elif not args.directory and args.path.is_dir():
print('Not a file path. Use the "-d" flag to work with a directory')
elif args.recursive and not args.directory:
print('Recursive mode cannot be used when working with a single file. '
'Use the "-d" flag to work with a directory')
else:
return
exit(1)
def _replace_in_file(file: Path, target: str, replacement: str, verbose: bool):
if verbose:
print(f"modifying file \t {file}")
try:
initial_text = file.read_text()
replacement_text = initial_text.replace(target, replacement)
file.write_text(replacement_text)
except (UnicodeDecodeError, PermissionError) as err:
if verbose:
print(f"{colors.WARNING}failed to modify file {file}:\n{err}, skipping{colors.ENDLINE}")
return
def main():
args = _parse_args()
if args.directory:
files_to_edit = args.path.glob('**/*.*' if args.recursive else '*.*')
else:
files_to_edit = [args.path]
for file in files_to_edit:
if file.is_dir(): continue
_replace_in_file(
file,
target=args.find,
replacement=args.replace,
verbose=args.verbose,
)
if __name__ == "__main__":
main() |
996,705 | 48274cc3e66b8421994083738fea4e660a0919a7 | from nltk.sentiment.vader import SentimentIntensityAnalyzer
import pandas as pd
import matplotlib.pyplot as plt
import string
import nltk
from stockjson import *
import datetime
nltk.download('vader_lexicon')
# RUN EVERY DAY
# CHANGE DATETIME CONSTANTS IN Weigh_Tweets() AND Parse_Stock_Prices()
def Weigh_Tweets(clean_tweets):
seven_day_sentiment = pd.read_csv("weighed_tweet_scores.csv")
seven_day_sentiment.drop(seven_day_sentiment.columns[[0]], axis=1, inplace=True)
weighed_tweet_scores = pd.DataFrame(columns=['Date','Average Sentiment'])
morning_total_followers = 0
afternoon_total_followers = 0
current = datetime.datetime.now()
morning_today = current.replace(hour = 0, minute = 0)
afternoon_today = current.replace(hour=13, minute=0)
time1 = datetime.time(12, 5, 0)
times = clean_tweets['time'].dt.time.tolist()
followers = clean_tweets['follower_count'].values.tolist()
compound_scores = clean_tweets['compound'].values.tolist()
for i in range(len(times)):
if times[i] < time1:
morning_total_followers += followers[i]
else:
afternoon_total_followers += followers[i]
total_morning_sentiment = 0
total_afternoon_sentiment = 0
for i in range(len(times)):
if times[i] < time1:
total_morning_sentiment += compound_scores[i]*followers[i]
else:
total_afternoon_sentiment += compound_scores[i]*followers[i]
today = pd.DataFrame()
if morning_total_followers == 0:
total_morning_sentiment = 0
total_afternoon_sentiment /= afternoon_total_followers
today = pd.DataFrame([[afternoon_today, total_afternoon_sentiment]],
columns=['Date', 'Average Sentiment'])
else:
total_afternoon_sentiment = 0
total_morning_sentiment /= morning_total_followers
today = pd.DataFrame([[morning_today, total_morning_sentiment]],
columns=['Date','Average Sentiment'])
weighed_tweet_scores = weighed_tweet_scores.append(today)
seven_day_sentiment = seven_day_sentiment.append(weighed_tweet_scores)
seven_day_sentiment.to_csv('weighed_tweet_scores.csv')
return seven_day_sentiment
def Parse_Stock_Prices(stock_prices):
seven_day_prices = pd.read_csv("seven_day_prices.csv")
seven_day_prices.drop(seven_day_prices.columns[[0]], axis=1, inplace=True)
time1 = datetime.time(12, 0, 0)
times = stock_prices['date'].dt.time.tolist()
prices = stock_prices['4. close'].values.tolist()
morning_count = 0
afternoon_count = 0
for i in range(len(times)):
if times[i] < time1:
morning_count += 1
else:
afternoon_count += 1
morning_price = 0
afternoon_price = 0
for i in range(len(times)):
if times[i] < time1:
morning_price += prices[i]
else:
afternoon_price += prices[i]
morning_price /= morning_count
afternoon_price /= afternoon_count
average_prices = pd.DataFrame(columns=['date', 'Average Stock Price'])
# CHANGE EVERY TIME YOU RUN TO CURRENT DATE
morning = datetime.datetime.now()
morning_today = morning.replace(hour=0, minute=0)
afternoon_today = morning.replace(hour=12, minute=0)
today = pd.DataFrame([[morning_today, morning_price], [afternoon_today, afternoon_price]],
columns=['date', 'Average Stock Price'])
average_prices = average_prices.append(today, ignore_index = True)
seven_day_prices = seven_day_prices.append(average_prices)
seven_day_prices.to_csv('seven_day_prices.csv')
return seven_day_prices
cleaned_tweets = pd.read_csv("tweet_data/cleaned_hashtag_tweet.csv")
vader = SentimentIntensityAnalyzer()
tweet_sentiment = pd.DataFrame()
tweet_sentiment['tweet'] = cleaned_tweets['text']
scores = cleaned_tweets['text'].apply(vader.polarity_scores).tolist()
scores_df = pd.DataFrame(scores)
tweet_sentiment["follower_count"] = cleaned_tweets["follower_count"]
tweet_sentiment["neg"] = scores_df["neg"]
tweet_sentiment['neu'] = scores_df['neu']
tweet_sentiment["pos"] = scores_df["pos"]
tweet_sentiment["compound"] = scores_df["compound"]
tweet_sentiment["time"] = cleaned_tweets['date'].values.astype('datetime64[ns]')
tweet_sentiment.to_csv(index=False, path_or_buf="tweet_data/hashtag_tweet_sentiment.csv")
weighed_tweets = Weigh_Tweets(tweet_sentiment)
times2 = pd.to_datetime(weighed_tweets['Date'], errors='coerce')
print(times2)
stock_prices, symbol = plot_single_stock()
stock_prices.to_csv("google_stocks.csv")
stock_prices['date'] = (stock_prices['date']).values.astype(dtype='datetime64[ms]')
stock_prices = Parse_Stock_Prices(stock_prices)
stock_prices = pd.read_csv('seven_day_prices.csv')
stock_prices.drop(stock_prices.columns[[0]], axis=1, inplace=True)
stock_prices.to_csv('seven_day_prices.csv')
times = pd.to_datetime(stock_prices['date'], errors='coerce')
# print(stock_prices
stock_name = 'GOOGL'
fig,ax = plt.subplots()
# # make a plot
ax.plot(times, stock_prices['Average Stock Price'], color="red", marker="o")
# set x-axis label
ax.set_xlabel("date",fontsize=14)
# set y-axis label
ax.set_ylabel("stock price", color="red",fontsize=14)
ax2=ax.twinx()
# make a plot with different y-axis using second axis object
ax2.plot(times2, weighed_tweets["Average Sentiment"],color="blue",marker="o")
ax2.set_ylabel("Average Sentiment", color="blue", fontsize=14)
plt.savefig('price_vs_sentiment.png') |
996,706 | 1118cc64111c2960956604f43061ec2d5005bba8 | def check_grade(percent: float):
if 90 <= percent <= 100:
return 'A+'
elif 80 <= percent < 90:
return 'A'
elif 70 <= percent < 80:
return 'B+'
elif 60 <= percent < 70:
return 'B'
elif 50 <= percent < 60:
return 'C'
elif 40 <= percent < 50:
return 'D'
elif 33 <= percent < 40:
return 'E'
else:
return 'F' |
996,707 | 030cc9320b807993a684e04ad2516b9ae8fa5e58 | from api import *
@app.after_request
def after_request(response):
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add("Access-Control-Allow-Methods", "*")
response.headers.add("Access-Control-Allow-Headers", "*")
return response
if __name__ == '__main__':
db.bind(**app.config['PONY'])
db.generate_mapping()
app.run(host='0.0.0.0')
|
996,708 | 0be7c32aaa9f2e0dad31b56238b5e95b2805cbca | #For decoded t.wnry file from sample: ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41aa
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Hash import SHA
import sys
import struct
import binascii
import hashlib
def decode_rsa(privkey, data):
rsa_key = RSA.importKey(privkey)
cipher = PKCS1_v1_5.new(rsa_key)
sentinel = Random.new().read(16)
d = cipher.decrypt(data[::-1],sentinel)
return d
if __name__ == "__main__":
data = open(sys.argv[1],'rb').read()
privkey = open('privkey.der').read()
hdr = data[:8]
data = data[8:]
size = struct.unpack_from('<I', data)[0]
data = data[4:]
blob1 = data[:size]
data = data[size:]
(id, size) = struct.unpack_from('<IQ', data)
data = data[12:]
blob2 = data[:size]
data = data[size:]
if data != '':
print("More data found!")
key = decode_rsa(privkey, blob1)
aes = AES.new(key, AES.MODE_CBC, '\x00'*16)
decoded = aes.decrypt(blob2)
sha256 = hashlib.sha256(decoded).hexdigest()
open(sha256, 'wb').write(decoded)
print("Wrote decoded file to: "+sha256)
|
996,709 | df5bebcd19371c155bbf1a8625fb4cef01c42c24 | from django.shortcuts import render, redirect
from models import Category, NewsList
# Create your views here.
def index(request, pk=1):
navs = Category.objects.all()
navs_count = navs.count()
news_list = NewsList.objects.filter(category_id=pk)
return render(request, 'index.html', locals())
def index_redirect(request):
return redirect('nav/1/', parament=True)
def news(request, pk):
news_list = NewsList.objects.get(pk=pk)
news_category = Category.objects.get(id=news_list.category_id_id)
return render(request, 'news.html', locals())
|
996,710 | db5a394ae6fd2051eedab2803c4b6860d87de863 | from urlparse import urlparse
from urllib import urlencode
import json
from flask import (
Blueprint,
render_template,
make_response,
abort,
request,
redirect
)
import copy
import random
from flask_cdn import url_for
from synchro import const
landing_pages = Blueprint('landing_pages', __name__)
endpoint_info_dict = {
'gold': {
# Base/Fallback Gold LP (never link directly to this version)
'v0': {
'template': 'landing_pages/gold-capsules/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Gold Original LP for Organic
'original-turmeric-supplement': {
'template': 'landing_pages/gold-og/v1-0/0-index.html',
'template_vars': {
'is_variant': False,
}
},
# Gold Lemon Ginger LP for Organic
'lemon-ginger-turmeric-supplement': {
'template': 'landing_pages/gold-og/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Gold LP for Paid (Facebook/Instagram)
'fb': {
'template': 'landing_pages/gold-og/v1-0-paid-a/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Gold LP for Paid (Adwords)
'gg': {
'template': 'landing_pages/gold-og/v1-0-paid/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Gold LP for Retargeting (Facebook/Instagram)
'r': {
'template': 'landing_pages/gold-og/v1-0-paid-r/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Gold Capsules LP for Organic
'turmeric-capsules': {
'template': 'landing_pages/gold-capsules/v1-0/0-index.html',
'template_vars': {
'is_variant': False,
}
},
# Gold Capsules LP with Susan Autoplay
'turmeric-capsules-susan': {
'template': 'landing_pages/gold-capsules/susan/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Gold Capsules LP for Paid (Facebook/Instagram)
'turmeric-capsules-fb': {
'template': 'landing_pages/gold-capsules/v1-0-paid-a/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Gold Capsules LP for Retargeting (Facebook/Instagram)
'turmeric-capsules-r': {
'template': 'landing_pages/gold-capsules/v1-0-paid-r/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Gold Customers Reviews
'reviews': {
'template': 'landing_pages/reviews/gold-elixir/0-index.html',
'template_vars': {
'is_variant': False,
}
}
},
'genesis': {
# Genesis LP for Organic
'v0': {
'template': 'landing_pages/genesis/v2-0/0-index.html',
'template_vars': {
'is_variant': False,
}
},
# Genesis LP for Paid (Facebook/Instagram)
'fb': {
'template': 'landing_pages/genesis/v2-0-paid/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Genesis LP for Paid (Adwords)
'gg': {
'template': 'landing_pages/genesis/v2-0-paid/0-index.html',
'template_vars': {
'is_variant': True,
}
}
},
'ketomanna': {
# Ketomanna LP for Organic
'v0': {
'template': 'landing_pages/ketomanna/v1-0/0-index.html',
'template_vars': {
'is_variant': False,
}
},
# Ketomanna LP for Paid (Facebook/Instagram)
'fb': {
'template': 'landing_pages/ketomanna/v1-0-paid-a/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Ketomanna LP for Paid (Adwords)
'gg': {
'template': 'landing_pages/ketomanna/v1-0-paid-a/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Ketomanna LP for Retargeting (Facebook/Instagram)
'r': {
'template': 'landing_pages/ketomanna/v1-0-paid-r/0-index.html',
'template_vars': {
'is_variant': True,
}
}
},
'ketoplan': {
# Ketoplan LP for Organic
'v0': {
'template': 'landing_pages/keto-bundle/v1-0/0-index.html',
'template_vars': {
'is_variant': False,
}
},
# Ketoplan LP for Paid (Facebook/Instagram)
'fb': {
'template': 'landing_pages/keto-bundle/v1-0-paid/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Ketoplan LP for Paid (Adwords)
'gg': {
'template': 'landing_pages/keto-bundle/v1-0-paid/0-index.html',
'template_vars': {
'is_variant': True,
}
}
},
'keto-cleanse': {
# Keto Cleanse LP for Organic
'v0': {
'template': 'landing_pages/keto-cleanse/v1-0/0-index.html',
'template_vars': {
'is_variant': False,
}
},
# Keto Cleanse LP for Paid (Facebook/Instagram)
'fb': {
'template': 'landing_pages/keto-cleanse/v1-0-paid/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Keto Cleanse LP for Paid (Adwords)
'gg': {
'template': 'landing_pages/keto-cleanse/v1-0-paid/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Keto Cleanse Program Home Page for Direct
'home': {
'template': 'landing_pages/keto-cleanse/directory/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Keto Cleanse Program Week 1 Page for Direct
'week-1': {
'template': 'landing_pages/keto-cleanse/week-1/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Keto Cleanse Program Week 2 Page for Direct
'week-2': {
'template': 'landing_pages/keto-cleanse/week-2/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
# Keto Cleanse Program Week 3 Page for Direct
'week-3': {
'template': 'landing_pages/keto-cleanse/week-3/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
}
},
## This subpath to be phased out once new subdomain is live
'keto': {
'v0': {
'template': 'landing_pages/keto-bundle/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
'keto-cleanse-program': {
'template': 'landing_pages/keto-cleanse/v1-0-paid/0-index.html',
'template_vars': {
'is_variant': True,
}
},
'keto-cleanse-program-home': {
'template': 'landing_pages/keto-cleanse/directory/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
'keto-cleanse-program-1': {
'template': 'landing_pages/keto-cleanse/week-1/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
'keto-cleanse-program-2': {
'template': 'landing_pages/keto-cleanse/week-2/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
},
'keto-cleanse-program-3': {
'template': 'landing_pages/keto-cleanse/week-3/v1-0/0-index.html',
'template_vars': {
'is_variant': True,
}
}
}
## Experiments
}
@landing_pages.route('/', defaults={'page': 'none', 'version': 'v0', 'prod_category': None})
@landing_pages.route('/<page>/', defaults={'version': 'v0', 'prod_category': None})
@landing_pages.route('/<page>/<version>', defaults={'prod_category': None})
@landing_pages.route('/<prod_category>/<page>/<version>')
def landing_page(page, version, prod_category):
parsed_url = urlparse(request.url_root)
host = parsed_url[1].split(':')[0] # Don't care about port, if it's in the netloc
subdomain = host.split('.')[0]
# Only set page to the parsed 'subdomain' in case it's actually letters.
# in other words if this is a health-check request going directly to an instance's ephemeral ip
# let's not set the page to 3 numerical digits.
if const.kENVIRONMENT == 'production' and subdomain.isalpha():
# For addresses like: gold.besynchro.com/1, or keto.besynchro.com/ketomanna/keto-chocolate-fudge
# we have to do a little shifting around of values, because in each URL the subdomain is serving
# as either the <page> or the <prod_category>, while the URI values are themselves serving as
# different values.
# Only do this whole page/version/category switcharoo for the old subdodmains... live.besynchro.com
# should work basically exactly like the dev site.
if subdomain != 'live':
if page is not 'none' and version is not 'v0':
prod_category = subdomain
elif page is not 'none':
version = page
page = subdomain
else:
page = subdomain
else:
if page is 'none':
return redirect('https://besynchro.com', code=301)
## Redirect to https if this isn't a health-checker request
if request.headers.get('X-Forwarded-Proto', '').lower() != "https":
full_url = request.url
ssl_url = full_url.replace('http://', 'https://')
return redirect(ssl_url, code=301)
# Page and version can also be passed in as GET vars, for URL-formatting reasons
if 'p' in request.args:
page = request.args['p']
# Use the query param to set the version iff there is not a URL route doing the same.
# ie, we want the URL route to take precedent over the query param... this is to make VWO work better.
if 'v' in request.args and version == 'v0':
version = request.args['v']
# Make sure instances respond correctly to health checker pings
if page == 'none':
return ('', 200)
# Fail if we don't have a valid page, and default to v0 if the version is invalid
noindex = False
variant_default = False
assert page in endpoint_info_dict
if version not in endpoint_info_dict[page]:
version = 'v0'
variant_default = True
noindex = True
# Assert that each version entry in the info_dict contains a template to render.
# A failed assertion should happen only during development, so this helps ensure developer consistency.
assert 'template' in endpoint_info_dict[page][version]
# If there is a dict of template vars, let's grab it and pass it as kwargs to render_template
template_vars = {}
if 'template_vars' in endpoint_info_dict[page][version]:
template_vars = copy.deepcopy(endpoint_info_dict[page][version]['template_vars'])
if variant_default:
template_vars['is_variant'] = noindex
## This is just some random stuff to make our keto-cleanse-program page appear to be tracking
## users and assigning them a "participant_id". We just cookie them and make sure to tack the saved
## participant_id onto the URL they accessed... easy.
if 'keto-cleanse-program-' in version or 'home' in version or 'week-1' in version or 'week-2' in version or 'week-3' in version:
url_participant_id = request.args.get('participant_id')
if not url_participant_id:
current_query_string = urlencode(request.args)
fake_participant_id = request.cookies.get('participant_id')
if fake_participant_id:
query_string = "participant_id=%s&%s" % (fake_participant_id, current_query_string)
return redirect('%s?%s' % (request.base_url, query_string), code=302)
else:
fake_participant_id = random.randint(1, 500000)
query_string = "participant_id=%s&%s" % (fake_participant_id, current_query_string)
resp = make_response(redirect('%s?%s' % (request.base_url, query_string), code=302))
resp.set_cookie('participant_id', str(fake_participant_id))
return resp
## Do some cookie magic so we can detect returning customers and offer them a discounted deal...
previous_activity = request.cookies.get('synchro_purchase_tracking')
try:
previous_activity = json.loads(previous_activity)
except:
previous_activity = {}
resp = make_response(render_template(
endpoint_info_dict[page][version]['template'],
kENV=const.kENVIRONMENT,
page=page,
returning=previous_activity.get(page, {}).get('purchased', False),
**template_vars
))
if page not in previous_activity:
previous_activity[page] = {
'visited': True,
'purchased': False
}
resp.set_cookie('synchro_purchase_tracking', json.dumps(previous_activity), domain=".besynchro.com")
return resp
|
996,711 | 9a6ea61a8040267276e282cd8dc08d7f220c0fd9 | from tensorflow.keras.datasets import cifar100
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, LSTM
from tensorflow.keras.layers import MaxPooling2D, Flatten
import matplotlib.pyplot as plt
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_predict=x_test[:10, :, :, :]
x_train=x_train.reshape(50000, 32*32*3).astype('float32')/255.
x_test=x_test.reshape(10000, 32*32*3).astype('float32')/255.
x_predict=x_predict.reshape(10, 32*32*3).astype('float32')/255.
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
model=Sequential()
model.add(Dense(1000, activation='relu', input_shape=(32*32*3,)))
model.add(Dense(2000, activation='relu'))
model.add(Dense(3000, activation='relu'))
model.add(Dense(800, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(100, activation='softmax'))
#(2). 다중 분류의 output layer의 활성화함수는 softmax를 쓴다.
model.summary()
#3. 컴파일, 훈련
#(3). 다중분류에선 반드시 loss를 categorical_crossentropy로 쓴다. 이걸로 이제 accuracy를 잡아줄 수 있다.
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
es=EarlyStopping(monitor='loss', patience=10, mode='auto')
# to_hist=TensorBoard(log_dir='graph', histogram_freq=0, write_graph=True, write_images=True)
model.fit(x_train, y_train, epochs=1000, batch_size=32, verbose=1, validation_split=0.2, callbacks=[es])
#4. 평가, 예측
loss, accuracy=model.evaluate(x_test, y_test, batch_size=32)
print('loss : ', loss)
print('accuracy : ', accuracy)
y_predict=model.predict(x_predict)
y_predict=np.argmax(y_predict, axis=1)
y_actually=np.argmax(y_test[:10, :], axis=1)
print('실제값 : ', y_actually)
print('예측값 : ', y_predict)
'''
cifar100 DNN
loss :
acc :
실제값 :
예측값 :
'''
|
996,712 | dfbe5282051357d1c1e7333a341b70131a15f8ad | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SYS6016
# NLP - LemonMonster
# Jiangxue Han, Jing Sun, Luke Kang, Runhao Zhao
# CNN_Class.py
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
class Cnn:
def buildCnn(self,x_train,x_test,y_train,y_test):
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
tf.reset_default_graph()
n_inputs = x_train.shape[1]
n_outputs = 1
n_hidden1 = 500
n_hidden2 = 300
n_hidden3 = 100
n_hidden4 = 50
batch_size = 64
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.float32, shape=(None,n_outputs), name="y")
#regularizer
regularizer = tf.contrib.layers.l1_l2_regularizer()
with tf.name_scope("dnn"):
s = tf.shape(X)[0]
X1 = tf.reshape(X,shape=[s,n_inputs,1])
#the first layer
conv1 = tf.layers.conv1d(X1, filters=4, kernel_size=10,strides=2,name="hidden1",activation=tf.nn.relu,activity_regularizer=regularizer)
fc1 = tf.contrib.layers.flatten(conv1)
logits = tf.layers.dense(fc1, n_outputs, name="outputs")
with tf.name_scope("loss"):
#entropy
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
#loss function
loss = tf.reduce_mean(xentropy, name="loss")
loss_summary = tf.summary.scalar('log_loss', loss)
lr = 0.001
with tf.name_scope("train"):
optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999,\
epsilon=1e-5)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
predicted = tf.sigmoid(logits)
correct_pred = tf.equal(tf.round(predicted), y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
p = 1
n_epochs = 40
writer = tf.summary.FileWriter('./graphs/train_cnn', tf.get_default_graph())
test_writer = tf.summary.FileWriter('./graphs/test_cnn', tf.get_default_graph())
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
#batch
for i in range(x_train.shape[0] // batch_size):
X_batch = x_train[i*batch_size:(i+1)*batch_size]
y_batch = y_train[i*batch_size:(i+1)*batch_size]
sess.run(training_op, feed_dict = {X:X_batch, y:y_batch})
m,n = sess.run([accuracy,loss],feed_dict = {X:x_train, y:y_train})
m1,n1 = sess.run([accuracy,loss],feed_dict = {X:x_test, y:y_test})
print(epoch, "Train accuracy:", m, "Val accuracy:", m1)
env FLASK_APP=~/Desktop/ds5559/Project/polo2/app/app.py
env POLO_PROJ=~/Desktop/ds5559/Project/my_project/myproject |
996,713 | 4a61b6e82c9239eec796b0c646fbdec738fad1be | from django.core.management.base import BaseCommand, CommandError
from apps.tasks.models import TaskManager, Task
from faker import Factory
import random
class Command(BaseCommand):
help = 'Generate fake data for tasks app'
def handle(self, *args, **options):
"""Fake data."""
fake = Factory.create('pt_BR')
max_tasks=37
min_tasks=3
max_task_title=20
max_task_description=200
PROGRESS = ['TODO', 'DOING', 'BLOCKED', 'DONE']
for taskmanager in TaskManager.objects.all():
for _ in range(random.randint(min_tasks, max_tasks)):
if taskmanager.tasks.all().count() >= max_tasks:
print("maximum number of tasks reached")
break
title=fake.text(max_nb_chars=max_task_title, ext_word_list=None)
description=fake.text(max_nb_chars=max_task_description, ext_word_list=None)
expected_date=fake.date_this_year(before_today=True, after_today=True)
responsible=random.choice(taskmanager.vigilantes.all())
status=random.choice(PROGRESS)
task = Task(title=title, description=description, status=status, expected_date=expected_date, responsible=responsible, task_manager=taskmanager, owner=taskmanager.owner)
task.save()
print("Task {} saved!".format(title))
print("All tasks generated with success!") |
996,714 | 15b499e3e8a34b052cde913fc571298c93a4c944 | # This tells matplotlib not to try opening a new window for each plot.
%matplotlib inline
import pandas as pd
import numpy as np
import copy
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
# load original gansta versions of the data
# DO NOT OVERWRITE THESE
clicks_train_og = pd.read_csv("../input/clicks_train.csv")
clicks_test_og = pd.read_csv("../input/clicks_test.csv")
promoted_content_og = pd.read_csv("../input/promoted_content.csv")
doc_cats_og = pd.read_csv("../input/documents_categories.csv")
doc_ents_og = pd.read_csv("../input/documents_entities.csv")
doc_meta_og = pd.read_csv("../input/documents_meta.csv")
doc_topics_og = pd.read_csv("../input/documents_topics.csv")
events_og = pd.read_csv("../input/events.csv")
page_views_og = pd.read_csv("../input/page_views_sample.csv")
# FOR TESTING WITH MINI DATASET
doc_ids = set(page_views_og['document_id']) & set(promoted_content_og['document_id'])
# pull in the content that is in both page_views and promoted_content
events = events_og[events_og['document_id'].isin(doc_ids)]
clicks_train = clicks_train_og[clicks_train_og['display_id'].isin(events['display_id'])]
clicks_test = clicks_test_og[clicks_test_og['display_id'].isin(events['display_id'])]
events = events[events['display_id'].isin(clicks_train['display_id'])]
promoted_content = promoted_content_og[promoted_content_og['ad_id'].isin(clicks_train['ad_id'])]
doc_cats = doc_cats_og[doc_cats_og['document_id'].isin(promoted_content['document_id'])]
doc_ents = doc_ents_og[doc_ents_og['document_id'].isin(promoted_content['document_id'])]
doc_meta = doc_meta_og[doc_meta_og['document_id'].isin(promoted_content['document_id'])]
doc_topics = doc_topics_og[doc_topics_og['document_id'].isin(promoted_content['document_id'])]
page_views = page_views_og[page_views_og['document_id'].isin(events['document_id'])]
# # FOR FULL DATASET ON AWS
# display_sample = np.random.choice(clicks_train_og["display_id"].unique(), 10000) # change this if too many rows
# clicks_train = clicks_train_og[clicks_train_og["display_id"].isin(display_sample)]
# # select 4000 random display id's and grab all rows in click_train with that display
# # every display has multiple ads and only 1 ad in every display is clicked
# promoted_content = promoted_content_og[promoted_content_og["ad_id"].isin(clicks_train["ad_id"])]
# # same ad can show up in multiple displays, so length of unique ads < length of unique displays
# doc_cats = doc_cats_og[doc_cats_og["document_id"].isin(promoted_content["document_id"])]
# doc_ents = doc_ents_og[doc_ents_og["document_id"].isin(promoted_content["document_id"])]
# doc_meta = doc_meta_og[doc_meta_og["document_id"].isin(promoted_content["document_id"])]
# doc_topics = doc_topics_og[doc_topics_og["document_id"].isin(promoted_content["document_id"])]
# events = events_og[events_og["display_id"].isin(clicks_train_og["display_id"])]
# page_views = page_views_og[page_views_og["document_id"].isin(promoted_content["document_id"])]
# # platform & traffic source need to be either all integers or all strings (right now its mixed)
# Merging information aout the displays to master dataset
data = clicks_train.merge(events, on='display_id', how='left')
# joins information about the display that the user saw
# each display has a unique user id, doc id, and timestamp
# events has the information about the display (who the user is, which site (document_id) it was on, when it was seen, from where, etc.)
# Identifying which documents the ads refer to (aka destination documents)
data = data.merge(promoted_content, on='ad_id', how='left')
data.head()
# Gather/bin data about the documents the ads refer to
sparsetop = doc_topics.pivot(index='document_id', columns='topic_id', values='confidence_level')
sparsetop.columns = ['top_' + str(col) for col in sparsetop.columns]
sparsecat = doc_cats.pivot(index='document_id', columns='category_id', values='confidence_level')
sparsecat.columns = ['cat_' + str(col) for col in sparsecat.columns]
sparse = sparsetop.join(sparsecat, how='outer')
sparse.fillna(0, inplace=True)
sparse.reset_index(level=0, inplace=True)
sparse.head()
print(len(sparse['document_id'].unique()), len(data['document_id_y'].unique()))
data = data.merge(sparse, left_on='document_id_y', right_on='document_id', how='left')
data.head()
# Adding meta data about the advertiser and campaign successes
advr_success = dict(zip(data.advertiser_id.unique(), [sum(data[data['advertiser_id']==x]['clicked'])/len(data[data['advertiser_id']==x]) for x in data['advertiser_id'].unique()]))
camp_success = dict(zip(data.campaign_id.unique(), [sum(data[data['campaign_id']==x]['clicked'])/len(data[data['campaign_id']==x]) for x in data['campaign_id'].unique()]))
data['campaign_perc'] = data['campaign_id'].map(camp_success)
data['advertiser_perc'] = data['advertiser_id'].map(advr_success)
data.head()
doc_view_freq = dict(zip(page_views.document_id.unique(), [len(page_views[page_views.document_id==x]) for x in page_views.document_id.unique()]))
data['docx_view_freq'] = data['document_id_x'].map(doc_view_freq)
data.head()
# Splitting dataset into data and labels
labels = data['clicked']
labels = labels.values.reshape(-1,1) # check this please! my python is 3.5 and told me to use values.reshape
del data['clicked']
print 'Labels length:', len(labels)
print 'data length:', data.shape
# Making training and test sets
train_data = data[:int(.7*len(data))]
test_data = data[int(.7*len(data)):]
train_labels = labels[:int(.7*len(data))]
test_labels = labels[int(.7*len(data)):]
print 'training label shape:', train_labels.shape
print 'training data shape:', train_data.shape
print 'test label shape:', test_labels.shape
print 'test data shape:', test_data.shape |
996,715 | 4cfffb9ccfc0c90e4975642ff6cbeca88910f877 | import pandas as pd
from pandas.io import html
df = pd.read_csv('Resources/cities.csv')
datahtml = df.to_html()
print(html) |
996,716 | b6503c2ba7d4de0521dcfe8416db6dfbc557cd49 | #from django.db import models
from djongo import models
from django.forms import ModelForm
from django import forms
# Create your models here.
class CategoriesApp(models.Model):
categoryID = models.CharField(max_length=250, unique=True)
categoryName = models.CharField(max_length=250)
description = models.TextField()
picture = models.CharField(max_length=250)
class CategoryForm(ModelForm):
class Meta:
model = CategoriesApp
fields = ['categoryID', 'categoryName', 'description', 'picture']
class UploadCategory(forms.Form):
categoryID = forms.CharField(max_length=250)
categoryName = forms.CharField(max_length=250)
filename = forms.FileField(label=" ", label_suffix="+")
def sanitizefile(self):
#print("Self: %s"%(self))
fle = self.cleaned_data['filename']
ext = fle.name.split('.')[-1].lower()
if ext not in ["jpg"]:
return 0, 0
return fle, ext
|
996,717 | 2e34867ac8263ce9cb9cd33d3d86741cf72f2405 | 1 '''
2 Starting in the top left corner of a 2x2 grid, there are 6 routes (without backtracking) to the bottom right corner.
3
4 How many routes are there through a 20x20 grid?
5 '''
6
7 def fact(n):
8 f = 1
9 for x in xrange(1, n+1): f = f * x
10 return f
11
12 print fact(40) / fact(20) / fact(20) |
996,718 | 6e44ae827debc827569aac3884a85d9466d70903 | import tensorflow as tf
import numpy as np
import RL
from RL.common.utils import tf_inputs, TfRunningStats, conv_net, auto_conv_dense_net, dense_net, tf_training_step, tf_scale, tf_clip, need_conv_net
class SACModel:
def __init__(self, context: RL.Context, name: str, num_actors=1, num_critics=1, num_valuefns=1, reuse=tf.AUTO_REUSE):
self.name = name
self.context = context
self.state_space = self.context.env.observation_space
self.action_space = self.context.env.action_space
self.num_actors = num_actors
self.num_critics = num_critics
self.num_valuefns = num_valuefns
self.check_assertions()
with tf.variable_scope(name, reuse=reuse):
# running stats:
if self.context.normalize_observations:
self._states_running_stats = TfRunningStats(list(self.state_space.shape), "running_stats/states")
if self.context.normalize_actions:
self._actions_running_stats = TfRunningStats(list(self.action_space.shape), "running_stats/actions")
# placeholders:
self._states_placeholder, states_input = tf_inputs([None] + list(self.state_space.shape), self.state_space.dtype, "states", cast_to_dtype=tf.float32)
self._actions_placeholder, actions_input = tf_inputs([None] + list(self.action_space.shape), self.action_space.dtype, "actions_input", cast_to_dtype=tf.float32)
self._actions_noise_placholder, actions_noise_input = tf_inputs([None] + list(self.action_space.shape), tf.float32, "actions_noise_input")
self._actor_loss_coeffs_placeholder, actor_loss_coeffs_input = tf_inputs([self.num_critics], tf.float32, "actor_loss_coefficients")
self._actor_loss_alpha_placholder, actor_loss_alpha_input = tf_inputs(None, tf.float32, "actor_loss_alpha_coefficient")
self._critics_loss_coeffs_placeholder, critics_loss_coeffs_input = tf_inputs([self.num_critics], tf.float32, "critics_loss_coefficients")
self._critics_targets_placeholder, critics_targets_input = tf_inputs([self.num_critics, None], tf.float32, "critics_targets")
self._valuefns_loss_coeffs_placeholder, valuefns_loss_coeffs_input = tf_inputs([self.num_valuefns], tf.float32, "valuefns_loss_coefficients")
self._valuefns_targets_placeholder, valuefns_targets_input = tf_inputs([self.num_valuefns, None], tf.float32, "valuefns_targets")
# normalized inputs:
states_input_normalized = self._states_running_stats.normalize(states_input, "states_input_normalize") if self.context.normalize_observations else states_input
actions_input_normalized = self._actions_running_stats.normalize(actions_input, "actions_input_normalize") if self.context.normalize_actions else actions_input
# critics:
self._critics = [self.tf_critic(states_input_normalized, actions_input_normalized, "critic{0}".format(i)) for i in range(self.num_critics)]
self._critics_loss = self.tf_critics_loss(critics_loss_coeffs_input, self._critics, critics_targets_input, "critics_loss")
# value functions:
self._valuefns = [self.tf_value_fn(states_input_normalized, "valuefn{0}".format(i)) for i in range(self.num_valuefns)]
self._valuefns_loss = self.tf_valuefns_loss(valuefns_loss_coeffs_input, self._valuefns, valuefns_targets_input, "valuefns_loss")
if self.num_actors:
# actor
self._actor_means, self._actor_logstds, self._actor_actions, self._actor_logpis = self.tf_actor(states_input_normalized, actions_noise_input, 'actor')
# actor normalized
actor_actions_normalized = self._actions_running_stats.normalize(self._actor_actions, "actor_actions_normalize") if self.context.normalize_actions else self._actor_actions
# actor-critics
self._actor_critics = [self.tf_critic(states_input_normalized, actor_actions_normalized, "critic{0}".format(i), reuse=True) for i in range(self.num_critics)]
# actor-loss:
self._actor_loss = self.tf_actor_loss(actor_loss_coeffs_input, actor_loss_alpha_input, self._actor_critics, self._actor_logpis, "actor_loss")
def check_assertions(self):
if not hasattr(self.state_space, 'dtype'):
self.state_space.dtype = np.float32
if not hasattr(self.action_space, 'dtype'):
self.action_space.dtype = np.float32
assert hasattr(self.state_space, 'shape')
assert hasattr(self.action_space, 'shape') and hasattr(self.action_space, 'low') and hasattr(self.action_space, 'high')
assert len(self.action_space.shape) == 1
assert self.num_actors <= 1, "There can be atmost 1 actor"
def tf_actor_activation_fn(self, means, logstds, noise, name):
with tf.variable_scope(name):
# gaussian actions
stds = tf.exp(logstds)
x = means + noise * stds
logpi_per_dimension = -tf.square((x - means) / (stds + 1e-8)) - logstds - np.log(np.sqrt(2 * np.pi)) # log of gaussian per dimension
logpi = tf.reduce_sum(logpi_per_dimension, axis=-1) # overall pi is product of pi per dimension, so overall log_pi is sum of log_pi per dimension
# tanh
means = tf.nn.tanh(means)
y = tf.nn.tanh(x)
dy_dx = tf_clip(1 - tf.square(y), 0, 1)
log_dy_dx = tf.log(dy_dx + 1e-6)
logpi = logpi - tf.reduce_sum(log_dy_dx, axis=-1) # by change of variables: overall log probability after activations = logpi - component wise sum of log derivatives of the activation
return means, y, logpi
def tf_actions_scale(self, means, actions, name):
'''Does not change logpi'''
with tf.variable_scope(name):
actions = tf_scale(actions, -1, 1, self.action_space.low, self.action_space.high, "scale_actions")
means = tf_scale(means, -1, 1, self.action_space.low, self.action_space.high, "scale_mean")
# dy_dx = ((self.action_space.high - self.action_space.low) / 2).astype(np.float32)
# log_dy_dx = tf.log(dy_dx)
# overall log derivative:
# by change of variables: overall log probability after activations = logpi - component wise sum of log derivatives of the activation
# logpi = logpi - tf.reduce_sum(log_dy_dx, axis=-1)
return means, actions
def tf_actor(self, states, actions_noise, name, reuse=tf.AUTO_REUSE):
with tf.variable_scope(name, reuse=reuse):
outp = auto_conv_dense_net(need_conv_net(self.context.env.observation_space), states, self.context.convs, self.context.hidden_layers, self.context.activation_fn, 2 * self.action_space.shape[0], lambda x: x, "conv_dense", layer_norm=self.context.layer_norm, output_kernel_initializer=self.context.output_kernel_initializer, reuse=reuse)
means = outp[:, 0:self.action_space.shape[0]]
# logstds = tf.tanh(outp[:, self.action_space.shape[0]:])
# logstds = tf_scale(logstds, -1, 1, self.context.logstd_min, self.context.logstd_max, 'scale_logstd')
logstds = tf_clip(outp[:, self.action_space.shape[0]:], self.context.logstd_min, self.context.logstd_max)
means, actions, logpi = self.tf_actor_activation_fn(means, logstds, actions_noise, 'gaussian_tanh')
means, actions = self.tf_actions_scale(means, actions, "scale")
return means, logstds, actions, logpi
def tf_critic(self, states, actions, name, reuse=tf.AUTO_REUSE):
with tf.variable_scope(name, reuse=reuse):
if need_conv_net(self.context.env.observation_space):
states = conv_net(states, self.context.convs, self.context.activation_fn, 'conv', reuse=reuse)
states_actions = tf.concat(values=[states, actions], axis=-1)
return dense_net(states_actions, self.context.hidden_layers, self.context.activation_fn, 1, lambda x: x, "dense", layer_norm=self.context.layer_norm, output_kernel_initializer=self.context.output_kernel_initializer, reuse=reuse)[:, 0]
def tf_value_fn(self, states, name, reuse=tf.AUTO_REUSE):
with tf.variable_scope(name, reuse=reuse):
return auto_conv_dense_net(need_conv_net(self.context.env.observation_space), states, self.context.convs, self.context.hidden_layers, self.context.activation_fn, 1, lambda x: x, "conv_dense", layer_norm=self.context.layer_norm, output_kernel_initializer=self.context.output_kernel_initializer, reuse=reuse)[:, 0]
def tf_actor_loss(self, actor_loss_coeffs, actor_loss_alpha, actor_critics, actor_logpis, name):
with tf.variable_scope(name):
loss = 0
loss = sum([-actor_loss_coeffs[i] * actor_critics[i] for i in range(self.num_critics)]) + actor_loss_alpha * actor_logpis
loss = tf.reduce_mean(loss)
return loss
def tf_critics_loss(self, critics_loss_coeffs, critics, critics_targets, name):
'''assumes the first axis is critic_id'''
with tf.variable_scope(name):
loss = sum([critics_loss_coeffs[i] * tf.losses.mean_squared_error(critics[i], critics_targets[i]) for i in range(self.num_critics)])
return loss
def tf_valuefns_loss(self, valuefns_loss_coeffs, valuefns, valuefns_targets, name):
'''assumes the first axis is valuefn_id'''
with tf.variable_scope(name):
loss = sum([valuefns_loss_coeffs[i] * tf.losses.mean_squared_error(valuefns[i], valuefns_targets[i]) for i in range(self.num_valuefns)])
return loss
def get_vars(self, *scopes):
if len(scopes) == 0:
scopes = ['']
return sum([tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='{0}/{1}'.format(self.name, scope)) for scope in scopes], [])
def get_trainable_vars(self, *scopes):
if len(scopes) == 0:
scopes = ['']
return sum([tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='{0}/{1}'.format(self.name, scope)) for scope in scopes], [])
def get_perturbable_vars(self, *scopes):
return list(filter(lambda var: not('LayerNorm' in var.name or 'batch_norm' in var.name or 'running_stats' in var.name), self.get_vars(*scopes)))
def sample_actions_noise(self, batch_size, sigma=1):
if hasattr(sigma, '__len__'):
sigma = np.asarray(sigma)
sigma = np.reshape(sigma, [batch_size, 1])
return sigma * np.random.standard_normal(size=[batch_size] + list(self.action_space.shape))
def actions(self, states, noise=None):
actions = self._actor_actions
if noise is None:
actions = self._actor_means
noise = np.zeros([len(states), self.context.env.action_space.shape[0]])
return self.context.session.run(actions, {
self._states_placeholder: states,
self._actions_noise_placholder: noise
})
def actions_means_logstds_logpis(self, states, noise=None):
actions = self._actor_actions
if noise is None:
actions = self._actor_means
noise = np.zeros([len(states), self.context.env.action_space.shape[0]])
return self.context.session.run([actions, self._actor_means, self._actor_logstds, self._actor_logpis], {
self._states_placeholder: states,
self._actions_noise_placholder: noise
})
def Q(self, critic_ids, states, actions):
'''returns the list of state-action values per critic specified in critic_ids'''
return self.context.session.run([self._critics[i] for i in critic_ids], {self._states_placeholder: states, self._actions_placeholder: actions})
def V(self, valuefn_ids, states):
'''returns the list of state values per valuefn specified in valuefn_ids'''
return self.context.session.run([self._valuefns[i] for i in valuefn_ids], {self._states_placeholder: states})
def setup_training(self, name, reuse=tf.AUTO_REUSE):
with tf.variable_scope(name, reuse=reuse):
# running stats update
if self.context.normalize_observations:
self._update_states_running_stats = self._states_running_stats.update(self._states_placeholder[0], "update_states_running_stats")
if self.context.normalize_actions:
self._update_actions_running_stats = self._actions_running_stats.update(self._actions_placeholder[0], "update_actions_running_stats")
# actor training
if self.num_actors:
actor_trainable_vars = self.get_trainable_vars('actor')
actor_optimizer = tf.train.AdamOptimizer(self.context.actor_learning_rate, epsilon=self.context.adam_epsilon)
assert len(actor_trainable_vars) > 0, "No vars to train in actor"
self._actor_train_step = tf_training_step(self._actor_loss, actor_trainable_vars, actor_optimizer, self.context.actor_l2_reg, self.context.clip_gradients, "actor_train_step")
# critics training
if self.num_critics:
critics_trainable_vars = self.get_trainable_vars(*('critic{0}'.format(i) for i in range(self.num_critics)))
critics_optimizer = tf.train.AdamOptimizer(self.context.learning_rate, epsilon=self.context.adam_epsilon)
assert len(critics_trainable_vars) > 0, "No vars to train in critics"
self._critics_train_step = tf_training_step(self._critics_loss, critics_trainable_vars, critics_optimizer, self.context.l2_reg, self.context.clip_gradients, "critics_train_step")
# valuefns training
if self.num_valuefns:
valuefns_trainable_vars = self.get_trainable_vars(*('valuefn{0}'.format(i) for i in range(self.num_valuefns)))
valuefns_optimizer = tf.train.AdamOptimizer(self.context.learning_rate, epsilon=self.context.adam_epsilon)
assert len(valuefns_trainable_vars) > 0, "No vars to train in valuefns"
self._valuefns_train_step = tf_training_step(self._valuefns_loss, valuefns_trainable_vars, valuefns_optimizer, self.context.l2_reg, self.context.clip_gradients, "valuefns_train_step")
def update_states_running_stats(self, states):
for i in range(len(states)):
self.context.session.run(self._update_states_running_stats, {self._states_placeholder: [states[i]]})
def update_actions_running_stats(self, actions):
for i in range(len(actions)):
self.context.session.run(self._update_actions_running_stats, {self._actions_placeholder: [actions[i]]})
def train_actor(self, states, noise, critic_ids, loss_coeffs, alpha):
'''train the actor to optimize the critics specified by critic_ids weighted by loss_coeffs and optimize entropy weighted by alpha'''
loss_coeffs_all_ids = [0] * self.num_critics
actor_critics = []
for i, coeff in zip(critic_ids, loss_coeffs):
loss_coeffs_all_ids[i] = coeff
actor_critics.append(self._actor_critics[i])
_, loss, actor_critics, logstds, logpis = self.context.session.run([self._actor_train_step, self._actor_loss, actor_critics, self._actor_logstds, self._actor_logpis], {
self._states_placeholder: states,
self._actions_noise_placholder: noise,
self._actor_loss_coeffs_placeholder: loss_coeffs_all_ids,
self._actor_loss_alpha_placholder: alpha
})
return loss, actor_critics, logstds, logpis
def train_critics(self, states, actions, critic_ids, targets, loss_coeffs):
'''jointly train the critics of given ids with given loss coeffs. targets is expected to be a list of targets per critic to train. i.e. of shape [len(critic_ids), len(states)]'''
targets_all_ids = np.zeros([self.num_critics, len(states)])
loss_coeffs_all_ids = [0] * self.num_critics
for i, tar, coeff in zip(critic_ids, targets, loss_coeffs):
loss_coeffs_all_ids[i] = coeff
targets_all_ids[i] = tar
_, critics_loss = self.context.session.run([self._critics_train_step, self._critics_loss], feed_dict={
self._states_placeholder: states,
self._actions_placeholder: actions,
self._critics_targets_placeholder: targets_all_ids,
self._critics_loss_coeffs_placeholder: loss_coeffs_all_ids
})
return critics_loss
def train_valuefns(self, states, valuefn_ids, targets, loss_coeffs):
'''jointly train the valuefns of given ids with given loss coeffs. targets is expected to be a list of targets per valuefn to train i.e. of shape [len(valuefn_ids), len(states)]'''
targets_all_ids = np.zeros([self.num_valuefns, len(states)])
loss_coeffs_all_ids = [0] * self.num_valuefns
for i, tar, coeff in zip(valuefn_ids, targets, loss_coeffs):
loss_coeffs_all_ids[i] = coeff
targets_all_ids[i] = tar
_, valuefns_loss = self.context.session.run([self._valuefns_train_step, self._valuefns_loss], feed_dict={
self._states_placeholder: states,
self._valuefns_targets_placeholder: targets_all_ids,
self._valuefns_loss_coeffs_placeholder: loss_coeffs_all_ids
})
return valuefns_loss
|
996,719 | 6caf57492e0a60fbf5e05de4253ad5d9f078f952 | """
Author: Konnie Detoudom
Description: Coding with Files Class 2 (CSV Exercise)
"""
import csv
# -----------
# Problem 2
# -----------
file_to_read = input("Enter name of your 'csv_file': ")
thin_mints = 0
peanut_butter = 0
shortbread = 0
try:
with open(file_to_read, 'r') as csv_file:
data = csv.reader(csv_file)
list_row = 0
header = None
for i, row in enumerate(data):
if i == 0:
header = row
else:
thin_mints += int(row[1])
peanut_butter += int(row[2])
shortbread += int(row[3])
print(f'There were {thin_mints} Thin Mints, {peanut_butter} Peanut Butter Cookies, and {shortbread} Shortbread Cookies sold.')
except FileNotFoundError:
print('Something went wrong. That file does not exist.')
|
996,720 | f01f37414c6b21e0f652e1253dbd1a1b5907df9c | from http.server import HTTPServer, SimpleHTTPRequestHandler
import socket
hostname = socket.gethostname()
class MyHandler(SimpleHTTPRequestHandler):
def do_GET(self):
uri = self.path
body = ""
if uri == "/zun":
body = b"zun"
elif uri == "/doko":
body = b"doko"
elif uri == "/kiyoshi":
body = b"__kiyoshi__"
else :
body = judge_reponce(hostname).encode('utf-8')
self.send_response(200)
self.end_headers()
self.send_header('Content-type', 'text/html; charset=utf-8')
self.send_header('Content-length', len(body))
self.wfile.write(body)
def judge_reponce(cid):
intid = int(hostname, 16)
if intid % 5 == 0:
return "**kiyoshi**"
elif intid % 5 == 1:
return "doko"
else:
return "zun"
host = '0.0.0.0'
port = 8000
httpd = HTTPServer((host, port), MyHandler)
print('serving at port', port)
httpd.serve_forever()
|
996,721 | 7875aafb180168e09251515b73d795103fbfb825 | #!/usr/bin/python
#Simple script that is called by Cron at the scheduled time. Script sends mqtt message that tells the sunrise script to begin the sunrise now.
import paho.mqtt.publish as publish
publish.single("home/bedroom/sunrise/scheduler", payload="GO", hostname="bunker", qos=1) |
996,722 | 136a8748bfecf7ab45b5b2b3f573fe485f5dc03e | from models.DeepQ.Model import DeepQModel
from models.PolicyGradient.AC import ActorCritic
from Params import Params
def load_model():
if Params.model == 'DeepQ':
return DeepQModel()
elif Params.model == 'ActorCritic':
return ActorCritic()
|
996,723 | 91910ad3e330da47d4e8ef7aab4b103c644c2f2b | from __future__ import with_statement
import unittest
from _utils import throwing
from utils.crypto import Crypto, CryptoException
class CryptoTest(unittest.TestCase):
def testEncDec(self):
c = Crypto('some secret key_')
plaintext = 'foo'
ciphertext = c.enc(plaintext)
plaintext2 = c.dec(ciphertext)
assert plaintext == plaintext2
def testEncidDecid(self):
c = Crypto('some secret key_')
plaintext = 1
ciphertext = c.encid(plaintext)
plaintext2 = c.decid(ciphertext)
assert plaintext == plaintext2
def testEncidBad(self):
# try encoding a non integer
c = Crypto('some secret key_')
plaintext = 'foo'
with throwing(CryptoException):
ciphertext = c.encid(plaintext)
def testDecidBad(self):
# try decoding a non integer ciphertext
c = Crypto('some secret key_')
plaintext = 'foo'
ciphertext = c.enc(plaintext)
with throwing(CryptoException):
plaintext2 = c.decid(ciphertext)
if __name__ == '__main__':
unittest.main()
|
996,724 | 5ce434a0f966e3ae3fa89679774ed99c030bfc05 | import sys
from contraxo import clargs
from contraxo import simulator
from contraxo import script
def main(args = sys.argv[1:]):
opts = clargs.parse_args(args)
sim = simulator.Simulator()
scr = script.Script.load(opts.SOURCE)
scr.execute(sim)
|
996,725 | a710382fee5dba16ed8ca3716beeb7d363670c75 | ## multioutput_face_completion
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rc("font", size=12)
from sklearn import preprocessing
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
#自己加入
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
#loading face dataset
data, targets= fetch_olivetti_faces(return_X_y=True)
train =data[targets < 30]
test = data[targets >= 30]
# Test on a subset of people
n_faces=5
rng= check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
#上半部臉
X_train = train[:, :(n_pixels+1)//2]
#下半部臉
y_train =train[:, n_pixels//2:]
X_test = test[:, :(n_pixels+1)//2]
y_test= test[:, n_pixels//2:]
#fit estimators
ESTIMATORS={
"Extra trees":ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0 ),
"Knn":KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
#額外regression
"Decision tree" : DecisionTreeRegressor(max_depth=10, random_state=0),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] =estimator.predict(X_test)
#繪製完整臉譜
image_shape=(64,64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle('Face completion with multi-output estimators', size=12)
for i in range(n_faces):
true_face= np.hstack((X_test[i], y_test[i]))
if i:
sub= plt.subplot(n_faces, n_cols, i * n_cols +1)
else:
sub= plt.subplot(n_faces, n_cols, i * n_cols +1, title='True face')
sub.axis('off')
sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation='nearest')
for j, esti in enumerate(sorted(ESTIMATORS)):
completed_face= np.hstack((X_test[i], y_test_predict[esti][i]))
if i:
sub= plt.subplot(n_faces, n_cols, i* n_cols+2+j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=esti)
sub.axis('off')
sub.imshow(completed_face.reshape(image_shape),cmap=plt.cm.gray, interpolation='nearest')
plt.show()
|
996,726 | 7423e7d1f64950a9894e0829e50b22945fbe5135 | from .models import RecruitmentInfo
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
class UserCreateForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'password')
class FormRecruitmentInfo(forms.ModelForm):
class Meta :
model = RecruitmentInfo
fields = ['name','email','phone','full_address','name_of_university','graduation_year','cgpa','experience_in_months','current_work_place_name',
'applying_in','expected_salary','field_buzz_reference','github_project_url','cv_file']
|
996,727 | a986f125a4d43f3e1babeed409e60dd4f6c821f3 | import logging
import sys
import fiona
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
def test_collection_get():
with fiona.open('tests/data/coutwildrnp.shp') as src:
result = src[5]
assert result['id'] == '5'
def test_collection_slice():
with fiona.open('tests/data/coutwildrnp.shp') as src:
results = src[:5]
assert isinstance(results, list)
assert len(results) == 5
assert results[4]['id'] == '4'
def test_collection_iterator_slice():
with fiona.open('tests/data/coutwildrnp.shp') as src:
results = list(src.items(5))
assert len(results) == 5
k, v = results[4]
assert k == 4
assert v['id'] == '4'
def test_collection_iterator_next():
with fiona.open('tests/data/coutwildrnp.shp') as src:
k, v = next(src.items(5, None))
assert k == 5
assert v['id'] == '5'
def test_collection_iterator_items_slice():
with fiona.open('tests/data/coutwildrnp.shp') as src:
l = len(src)
items = list(src.items(0, 5))
assert len(items) == 5
items = list(src.items(1, 5))
assert len(items) == 4
items = list(src.items(-5, None))
assert len(items) == 5
items = list(src.items(-5, -1))
assert len(items) == 4
items = list(src.items(0, None))
assert len(items) == l
items = list(src.items(5, None))
assert len(items) == (l - 5)
items = list(src.items(5, None, -1))
assert len(items) == 6
items = list(src.items(5, None, -2))
assert len(items) == 3
items = list(src.items(4, None, -2))
assert len(items) == 3
items = list(src.items(-1, -5, -1))
assert len(items) == 4
items = list(src.items(-5, None, -1))
assert len(items) == (l - 5 + 1)
def test_collection_iterator_keys_next():
with fiona.open('tests/data/coutwildrnp.shp') as src:
k = next(src.keys(5, None))
assert k == 5
|
996,728 | 2264c840ac7330898f0791189068849b7b8ed39a | ############################################################################
############################################################################
# exercise1_solution.py
#
# September 2020, baclark@msu.edu
# Example solution for the first exercise in bootcamp 2020
#
# just to mix it up, and demonstrate a more python-oriented analysis flow
# unlike in the C++ version, I am not going to rely on any ROOT classes
# after I have accessed the waveforms
# I will also use matplotlib to make the plot
############################################################################
import ROOT
import numpy as np
import matplotlib.pyplot as plt
import os
ROOT.gSystem.Load(os.environ.get('ARA_UTIL_INSTALL_DIR')+"/lib/libAraEvent.so")
# open a data file
file = ROOT.TFile.Open("/data/wipac/ARA/2014/unblinded/L1/ARA02/0101/run002823/event002823.root")
# load in the event free for this file
eventTree = file.Get("eventTree")
# set the tree address to access our raw data type
rawEvent = ROOT.RawAtriStationEvent()
eventTree.SetBranchAddress("event",ROOT.AddressOf(rawEvent))
# get the number of entries in this file
num_events = eventTree.GetEntries()
print('total events:', num_events)
list_of_snrs = []
# loop over the events
for event in range(num_events):
# get the event
eventTree.GetEntry(event)
#if not a cal pulser, continue
if(rawEvent.isCalpulserEvent()==0):
continue
# make a useful event
usefulEvent = ROOT.UsefulAtriStationEvent(rawEvent,ROOT.AraCalType.kLatestCalib)
graph = usefulEvent.getGraphFromRFChan(0)
# unpack the voltages into a numpy array to make it easier to do math on
# I'm going to do this to model the case where we want to stop using ROOT
# after we've gotten the raw waveform
volts = []
for sample in range(graph.GetN()):
volts.append(graph.GetY()[sample])
volts = np.asarray(volts)
# now, I can use np to compute the rms and the peak voltage
rms = np.std(volts)
peak = np.max(np.abs(volts))
list_of_snrs.append(peak/rms)
del usefulEvent
list_of_snrs = np.asarray(list_of_snrs)
# now we make a histogram
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
ax.hist(list_of_snrs, bins=np.arange(0,10,0.5))
ax.set_ylabel('SNR')
ax.set_xlabel('Number of Events')
ax.set_title('Histogram of Ch 0 SNRs')
fig.savefig('hist_of_snr_py.png', bbox_inches='tight', dpi=300)
|
996,729 | 1b90d1c0b5b983e46d499c048b98232e8b20d6e9 | # -*- coding: utf-8 -*-
import telebot, requests
token = '265784305:AAEyapE7qBFPvObRUIvhWyIMSoIpbv6JvRE'
bot = telebot.TeleBot(token)
@bot.message_handler(content_types=["text"])
def main(message):
# Exchange rates with Privat24
if "p24" == message.text.lower():
p24_url = "https://api.privatbank.ua/p24api/pubinfo?json&exchange&coursid=3"
r = requests.get(p24_url)
response = r.json()
msg = "Exchange: buy / sale\n"
for row in response:
msg += "{0}: {2} {1} / {3} {1}\n".format(row["ccy"], row["base_ccy"], row["buy"], row["sale"])
bot.send_message(message.chat.id, msg)
# Calculator
elif not sum(map(lambda x: int(x.isalpha()), message.text)):
msg = str(eval(message.text))
bot.send_message(message.chat.id, msg)
# Weather
else:
r = requests.post('https://genesis-mihailselezniov.c9users.io/weather', json={"city_name":message.text})
response = r.json()
if not 'error' in response:
city_name = response['info']["weather"]["day"]['title']
day_part = response['info']["weather"]["day"]["day_part"][0]
temperature = day_part["temperature"]["#text"]
dampness = day_part["dampness"]
pressure = day_part["pressure"]
wind_speed = day_part["wind_speed"]
msg = u"{}\n{}°\nВлажность: {}%\nДавление: {} мм\nСкорость ветра: {} м/сек".format(
city_name, temperature, dampness, pressure, wind_speed)
bot.send_message(message.chat.id, msg)
# Translation suggestions
else:
msg = message.text
ru, en = 0, 0
for i in msg:
if u'а'<=i<=u'я' or u'А'<=i<=u'Я':
ru += 1
elif u'a'<=i<=u'z' or u'A'<=i<=u'Z':
en += 1
if ru > en:
lang="ru-en"
else:
lang="en-ru"
key = "trnsl.1.1.20160725T230300Z.5c6a542dc27e3a7d.3baed373aed1a3a990ad18f8e652adea67d14f72"
url_translate_yandex = 'https://translate.yandex.net/api/v1.5/tr.json/translate'
r = requests.post(url_translate_yandex, data = {'key':key, 'text':msg, 'lang':lang})
r_json = r.json()
if r_json["code"] == 200:
translate_msg = r_json["text"][0]
bot.send_message(message.chat.id, translate_msg)
if __name__ == '__main__':
bot.polling(none_stop=True) |
996,730 | 79c33af819d54e291e033edffc9a0d6462e2c2a5 | import random
print('Welcome to Blackjack!')
know_to_play = input('Do you know how to play blackjack? (yes/no)').lower()
if know_to_play == 'yes':
print("Awesome, then let's get started!")
elif know_to_play == 'no':
print('I will deal you two cards at random at the start of the game. The objective of the game is to get the sum of your cards as close to 21 as possible without going over. Once you have both of your cards, you have the option to either "hit" or you can "stand". When you hit, you get another card at random, so if you have a high sum with the first two cards, it could be risky to hit as you could go over 21. When you stand, you pass up your turn without receiving another card. If you receive an ACE, you can choose to either count it as one point or eleven (11) points, depending on how many points you have already. So, if your two first cards add up to 12 and you hit and get an ACE, you should choose to have the ACE count as one point because if it was eleven (11) points, you would be over 21, losing the game. After you go, the dealer then reveals their cards, and if their two cards are 17 or more, he/she has to stand. If the dealer has less than 17, he/she picks cards until the sum is over 17. The person to get closest to 21 without going over wins.')
J = 10
Q = 10
K = 10
card_list = [2, 3, 4, 5, 6, 7, 8, 9, 10, J, Q, K, 'A'] * 4
random.shuffle(card_list)
your_cards = []
dealer_cards = []
your_cards_sum = 0
dealer_cards_sum = 0
game_over = False
player_win = False
dealer_win = False
player_blackjack = False
dealer_blackjack = False
player_bust = False
dealer_bust = False
user_input = ''
def add_new_card():
global your_cards_sum
your_cards.append(random.choice(card_list))
card_list.remove(your_cards[-1])
if your_cards[-1] != 'A':
your_cards_sum += your_cards[-1]
def handle_player_ace():
global your_cards_sum
if 'A' in your_cards:
ace_spot = your_cards.index('A')
ace_input = input("Do you want the ace to count as one or eleven? (one/eleven): ").lower()
if ace_input == 'one':
your_cards[ace_spot] = 1
your_cards_sum += 1
print(f'Your cards are {your_cards} with a sum of {your_cards_sum}')
if ace_input == 'eleven':
your_cards[ace_spot] = 11
your_cards_sum += 11
print(f'Your cards are {your_cards} with a sum of {your_cards_sum}')
def add_dealer_card():
global dealer_cards_sum
dealer_cards.append(random.choice(card_list))
card_list.remove(dealer_cards[-1])
if dealer_cards[-1] != 'A':
dealer_cards_sum += dealer_cards[-1]
def player_blackjack_check():
if your_cards_sum == 21:
game_over = True
player_blackjack = True
return True
add_new_card()
add_dealer_card()
add_new_card()
add_dealer_card()
print(f"Your starting cards are {your_cards}, and the dealer's first card is {dealer_cards[0]}")
if ((dealer_cards[0] == 10 and dealer_cards[1] == 'A') or (dealer_cards[0] == 'A' and dealer_cards[1] == 10)) and ((your_cards[0] == 10 and your_cards[1] == 'A') or (your_cards[0] == 'A' and your_cards[1] == 10)):
print("How lucky/unlucky, both you and the dealer got blackjacks! No one wins!")
game_over = True
elif (dealer_cards[0] == 10 and dealer_cards[1] == 'A') or (dealer_cards[0] == 'A' and dealer_cards[1] == 10):
dealer_blackjack = True
game_over = True
elif (your_cards[0] == 10 and your_cards[1] == 'A') or (your_cards[0] == 'A' and your_cards[1] == 10):
player_blackjack = True
game_over = True
if not game_over:
user_input = input("Press any key to continue, or press 'q' to quit: ")
if user_input == 'q':
game_over = True
while user_input != 'q' and not game_over: # first main loop to control the player hitting or standing
print(f'Your cards are {your_cards} with a sum of {your_cards_sum}')
handle_player_ace()
if player_blackjack_check():
break
hit_or_stand = input(f'You currently have {your_cards_sum} points, do you want to hit or stand? (hit/stand): ').lower()
while hit_or_stand == 'hit':
add_new_card()
print(f"Your new cards are {your_cards} with a sum of {your_cards_sum}")
handle_player_ace()
if your_cards_sum > 21:
game_over = True
player_bust = True
break
if player_blackjack_check():
game_over = True
player_blackjack = True
break
hit_or_stand = input(f'You currently have {your_cards_sum} points, do you want to hit or stand? (hit/stand): ').lower()
if hit_or_stand == 'stand':
print(f"You chose to stand with {your_cards_sum} points.")
break
if not game_over:
if 'A' in dealer_cards:
ace_spot = dealer_cards.index('A')
if dealer_cards_sum <= 10:
dealer_cards_sum += 11
dealer_cards[ace_spot] = 11
else:
dealer_cards_sum += 1
dealer_cards[ace_spot] = 1
print(f"Dealer's cards are {dealer_cards} with a sum of {dealer_cards_sum}")
if dealer_cards_sum < 17:
while dealer_cards_sum < 17:
add_dealer_card()
if 'A' in dealer_cards:
ace_spot = dealer_cards.index('A')
if dealer_cards_sum <= 10:
dealer_cards_sum += 11
dealer_cards[ace_spot] = 11
else:
dealer_cards_sum += 1
dealer_cards[ace_spot] = 1
print(f"Dealer's cards are {dealer_cards} with a sum of {dealer_cards_sum}")
if dealer_cards_sum >= 17 and dealer_cards_sum < 21:
if dealer_cards_sum > your_cards_sum:
dealer_win = True
elif dealer_cards_sum < your_cards_sum:
player_win = True
if dealer_cards_sum == 21:
dealer_blackjack = True
if dealer_cards_sum > 21:
dealer_bust = True
if player_blackjack:
print("Blackjack! You win!")
elif dealer_blackjack:
print("The dealer got a blackjack. You lose!")
elif player_win:
print(f"You beat the dealer by {your_cards_sum - dealer_cards_sum} points!")
elif dealer_win:
print(f"You lost to the dealer by {dealer_cards_sum - your_cards_sum} points!")
elif dealer_bust:
print("You win, the dealer busted!")
elif player_bust:
print("You busted, the dealer wins!")
elif dealer_cards_sum == your_cards_sum:
print("You and the dealer tied! No one wins, but no one loses!")
print('Thanks for playing!')
|
996,731 | 6661023ea6ba47f67c43200d78e631c261b26c91 | from queue import Queue
class TaskQueue(object):
pagesQueue = Queue()
moviesQueue = Queue()
def __init__(self):
pass
@classmethod
def getTaskQueue(cls):
return cls.pagesQueue
@classmethod
def getMovieQueue(cls):
return cls.moviesQueue |
996,732 | 5c81d9a0228533a48b137fbc293c15576f08cb80 | import unittest
from const import const
from request_common import hello_parents_post_url_params
from request_common import hello_parents_get
from request_common import hello_parents_post
from time import sleep
import random
from request_params import Params
from utils.idcard_util import gen_id_card_random
class user(unittest.TestCase):
'''作业模块测试'''
@classmethod
def setUpClass(cls):
params = Params()
params.load_parents_request_params('training.json')
cls.params_dict = params
"""个训列表"""
def test_training_list_success(self):
params = self.params_dict.__getitem__('test_training_list_success')
r = hello_parents_get(const.parents_training_list, params)
sleep(0.5)
self.assertEqual(r.code,200,"test_training_list_success error")
"""个训详情"""
def test_training_info_success(self):
params = self.params_dict.__getitem__('test_training_info_success')
# 删除参数中的id
id = params['id']
# url路径上带有参数,在这里进行拼装
params.pop('id')
r = hello_parents_get(const.parents_training_info+"/"+id, params)
sleep(0.5)
self.assertEqual(r.code,200,"test_training_info_success error")
"""个训反馈列表"""
def test_training_feedbackList(self):
params = self.params_dict.__getitem__('test_training_feedbackList')
# 删除参数中的trainingid
trainingid = params['trainingid']
# url路径上带有参数,在这里进行拼装
params.pop('trainingid')
r = hello_parents_get(const.parents_training_feedbackList+"/"+trainingid, params)
sleep(0.5)
self.assertEqual(r.code,200,"test_training_feedbackList error")
"""个训目标"""
def test_training_targetinfo_success(self):
params = self.params_dict.__getitem__('test_training_targetinfo_success')
r = hello_parents_get(const.parents_training_targetinfo, params)
sleep(0.5)
self.assertEqual(r.code,200,"test_training_targetinfo_success error")
@classmethod
def tearDownClass(cls):
pass |
996,733 | 9e2f79856f7c2294f866524bd2a8f30e7df11624 | from graphene_sqlalchemy_filter import FilterableConnectionField, FilterSet
from .models import Ship
# ALL_OPERATIONS = ['eq', 'ne', 'like', 'ilike', 'is_null', 'in', 'not_in', 'lt', 'lte', 'gt', 'gte', 'range']
OPERATIONS = ['eq', 'range', 'ilike']
class ShipFilter(FilterSet):
class Meta:
model = Ship
fields = {
"id": ['eq'],
"category_id": ['eq'],
"manufacturer_id": ['eq'],
"crew_cap": ['range'],
"size": ['range'],
"travel_range": ['range'],
"price": ['range'],
"ftl": ['eq'],
"used": ['eq'],
"name": ['ilike'],
"category": ['ilike']
}
class MyFilterableConnectionField(FilterableConnectionField):
filters = {Ship: ShipFilter()}
|
996,734 | e14b4d4aa19adcc7f1daef70b6721db72dc3d969 | from django.http import HttpResponse, HttpResponseRedirect
import datetime
from django.shortcuts import render, render_to_response
from django.contrib.auth import authenticate, login, get_user_model
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.core.context_processors import csrf
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
def home(request):
if request.user.is_authenticated():
return render(request,'home.html', RequestContext(request))
else:
return render(request, 'auth.html', {'state': 'You must be signed in to view this page.', 'form' : AuthenticationForm()})
def hello(request):
return HttpResponse("Hello World")
def index(request):
return render(request, 'index.html')
def current_datetime(request):
now = datetime.datetime.now()
html = "<html><body>It is %s. </body</html>" % now
return HttpResponse(html);
|
996,735 | f455813517e552a0d59204c05af4e03516e3db72 | /usr/lib64/python3.4/copy.py |
996,736 | 5e887c87c5fbb0b02e201bbfba570d4620228991 | from django.apps import AppConfig
class DataTablesConfig(AppConfig):
name = 'data_tables'
verbose_name = 'DataTables Demo'
|
996,737 | caea89f72b1163d9b41e7291b1e62336cab6ad71 | #!/usr/bin/python
# coding: utf-8
import sys
import subprocess
import os
from lxml import etree
import time
import logging
#Depuracion
logging.basicConfig(level = logging.DEBUG) #.DEBUG para debug .INFO lo oculta
logger = logging.getLogger('pfinalp1')
#Menu ayuda
ayuda = """Uso: pfinal.py [OPCIÓN...] [MÁQUINA]
crear [Numero_maquinas], Crea el numero de maquinas a utilizar en el escenario, si esta opción no se incluye se crearan solamente 2 maquinas.
arrancar [Maquina_arrancar], Máquinas o maquina que se van a arrancar, si no se pone ningún parametro opcional con el nombre de la máquina se arrancarán todas.
parar [Maquina_parar], Máquinas o maquina que se va a parar, si no se pone ningún parametro opcional con el nombre de la máquina se pararán todas.
balanceador, Lanza el ping para probrar la funcionalidad del balanceador de carga
monitor, Muestra el estado de las máquinas del escenario (Running, Shut Down).
memoria Nombre_Maquina, Lanza el comando top sobre las maquinas virtuales para ver el estado de la memoria.
dominfo Nombre_Maquina, Muestra el estado del dominio seleccionado.
destruir, Para y borra todo el escenario y elimina cualquier archivo dependiente de las maquinas virtuales.
Los argumentos obligatorios no van entre corchetes mientras que los opcionales que van entre corchetes se pueden omitir.
Informe de los errores a a.molinas@alumnos.upm.es o javier.conde.diaz@alumnos.upm.es """
class E(RuntimeError):
def __init__(self, msg):
self.msg = msg
def getMsg(self):
return self.msg
#Numero de servidores
def leer():
f1 = open("servidores", "r")
for line in f1:
numero = line
numero.strip()
f1.close()
return int(numero)
def guardar(arrancados):
f2 = open("arrancados","w")
for x in arrancados:
f2.write(x + "\n")
f2.close()
logger.debug(arrancados)
# Estado servidores (apagado, encendido)
def leerarrancado():
arrancado=[]
f2 = open("arrancados","r")
for line in f2:
servidor = line
servidor= servidor.strip('\n')
arrancado.append(servidor)
f2.close()
return (arrancado)
#Array con el nombre de todas las MV
def obtenerArrayMaquinas():
servers = leer()
maquinas = ["c1" , "lb"]
for x in range (1, servers + 1):
maquina = "s" + str(x)
maquinas.append(maquina)
return maquinas
def crear():
#Creo el fichero donde pongo el estado de las maquinas
f2 = open("arrancados" , "w")
f2.close()
#Creo las imagenes de diferencias
servers = leer() #Numero de servidores
maquinas = obtenerArrayMaquinas() #Array con el nombre de todas las MV
rutaFichero = os.getcwd()
comprimida=os.path.isfile(rutaFichero+"/cdps-vm-base-p3.qcow2.bz2") #Voy a comprobar si la maquina esta comprimida
#Aqui hago las sentencias para la descompresion del archivo
if comprimida == True:
subprocess.call('bunzip2 cdps-vm-base-p3.qcow2.bz2', shell = True)
for x in maquinas:
subprocess.call('qemu-img create -f qcow2 -b cdps-vm-base-p3.qcow2 ' + x + '.qcow2', shell = True)
#Modifico los xml
for x in maquinas:
tree = etree.parse('plantilla-vm-p3.xml')
root = tree.getroot()
nombre = root.find("name")
nombre.text = x
source = root.find("./devices/disk[@type='file'][@device='disk']/source")
source.set("file", rutaFichero + "/" + x + ".qcow2")
bridge = root.find("./devices/interface[@type='bridge']/source")
LAN = "LAN1"
if x == "c1":
LAN = "LAN1"
elif x == "lb":
root[10].insert(3, etree.Element("interface"))
root[10][3].set("type" , "bridge")
root[10][3].insert(0, etree.Element("source"))
root[10][3].insert(1, etree.Element("model"))
root[10][3][0].set("bridge", "LAN2")
root[10][3][1].set("type", "virtio")
LAN = "LAN1"
else:
LAN = "LAN2"
bridge.set("bridge", LAN)
f1 = open(x + '.xml' , "w")
f1.write(etree.tostring(tree))
f1.close()
#Creo los bridges
subprocess.call('sudo brctl addbr LAN1', shell = True)
subprocess.call('sudo brctl addbr LAN2', shell = True)
subprocess.call('sudo ifconfig LAN1 up', shell = True)
subprocess.call('sudo ifconfig LAN2 up', shell = True)
#Creo las MV de forma persistente
for x in maquinas:
subprocess.call('virsh define ' + x + '.xml', shell = True)
#Configuro los archivos de redes, nombre de la maquina y LB como router
#Ips de las interfaces eth0 de las MV y gateways
subprocess.call('mkdir mnt', shell = True)
ip = ["10.0.1.2", "10.0.1.1"]
gateway = ["10.0.1.1", "no tiene"]
for x in range(1, servers + 1):
ip.append("10.0.2." + str(10 + x))
gateway.append("10.0.2.1")
#cambio el contendio de los archivos
for x in range(0, len(maquinas)):
logger.debug(maquinas[x])
subprocess.call("sudo vnx_mount_rootfs -s -r " + maquinas[x] +".qcow2 mnt", shell = True)
f1 = open("mnt/etc/hostname", "w")
f1.write(maquinas[x])
f1.close()
finterface = open("mnt/etc/network/interfaces" , "w")
finterface.write("auto lo" + "\n")
finterface.write("iface lo inet loopback" + "\n")
finterface.write("auto eth0" + "\n")
finterface.write("iface eth0 inet static" + "\n")
finterface.write("address " + ip[x] + "\n")
finterface.write("netmask 255.255.255.0" + "\n")
if maquinas[x] != "lb":
finterface.write("gateway " + gateway[x] + "\n")
finterface.close()
#Configuracion del router cambia un poco, debo anadirle interfaz eth1
else:
finterface.write("auto eth1" + "\n")
finterface.write("iface eth1 inet static" + "\n")
finterface.write("address 10.0.2.1 " + "\n")
finterface.write("netmask 255.255.255.0" + "\n")
finterface.close()
#Configuro el balanceador
fbalancr = open("mnt/etc/rc.local", 'r')
lines = fbalancr.readlines()
fbalancr.close()
fbalancw = open("mnt/etc/rc.local", 'w')
balanceador = "xr -dr --verbose --server tcp:0:8080 "
for x in range(2, len(maquinas)):
balanceador += "--backend " + ip[x] + ":80 "
balanceador += "--web-interface 0:8001"
for line in lines:
if "exit 0" in line:
fbalancw.write("service apache2 stop" + "\n")
fbalancw.write(balanceador + " \n" )
fbalancw.write(line)
else:
fbalancw.write(line)
fbalancw.close()
#Configuro para que se comporte como router ip
f2 = open('mnt/etc/sysctl.conf', 'r')
lines = f2.readlines()
f2.close()
f3 = open('mnt/etc/sysctl.conf', 'w')
for line in lines:
if "net.ipv4.ip_forward" in line:
f3.write("net.ipv4.ip_forward=1")
else:
f3.write(line)
f3.close()
subprocess.call("bash -c \"echo "+maquinas[x]+" > ./mnt/var/www/html/index.html\" ", shell = True)
time.sleep(1) #no deberia de tener que usar el timer !!!!CAMBIAR
subprocess.call('sudo vnx_mount_rootfs -u mnt', shell = True)
subprocess.call('rm -r mnt/', shell = True)
#Configuro el host
subprocess.call('sudo ifconfig LAN1 10.0.1.3/24', shell = True)
subprocess.call('sudo ip route add 10.0.0.0/16 via 10.0.1.1', shell = True)
def arrancar(machines):
arrancados=[]
arrancados = leerarrancado()
servers = leer()
if machines == "todas":
maquinas = obtenerArrayMaquinas()
else:
maquinas = [machines]
if not machines in obtenerArrayMaquinas() :
logger.debug("Lo sentimos, esa maquina no esta en elsecenario")
return
for x in maquinas:
if not x in arrancados:
subprocess.call('virsh start ' + x, shell = True)
#arranco en background --> &
subprocess.call('xterm -e " virsh console ' + x + '" &', shell = True)
arrancados.append(x);
guardar(arrancados)
def parar(machines):
arrancados=[]
arrancados = leerarrancado()
servers = leer()
if machines == "todas":
maquinas = obtenerArrayMaquinas()
else:
maquinas = [machines]
if not machines in obtenerArrayMaquinas() :
logger.debug("Lo sentimos, esa maquina no esta en elsecenario")
return
for x in maquinas:
if x in arrancados:
subprocess.call('virsh shutdown ' + x, shell = True)
arrancados.remove(x);
guardar(arrancados)
def destruir():
arrancados=[]
arrancados = leerarrancado()
servers = leer()
maquinas = obtenerArrayMaquinas()
for x in maquinas:
if x in arrancados:
subprocess.call('virsh destroy ' + x, shell = True)
arrancados.remove(x)
subprocess.call('virsh undefine ' + x, shell = True)
subprocess.call('rm ' + x + '.xml', shell = True)
subprocess.call('rm -f ' + x + '.qcow2', shell = True)
subprocess.call('rm servidores', shell = True)
subprocess.call('rm arrancados', shell = True)
#No se si tengo que descomprimirlo o ya me lo dan
#bunzip2 cdps-vm-base-p3.qcow2.bz2 .
if len(sys.argv) > 1:
orden = sys.argv[1]
if orden == "crear":
servers = 2
if len (sys.argv) > 2:
servers = int(sys.argv[2])
if servers < 1 or servers > 5:
try:
raise E("Error, Numero de servidores entre 1 y 5")
except E, obj:
logger.debug('Msg:'+ obj.getMsg())
sys.exit(1)
else:
f1 = open("servidores" , "w")
f1.write(str(servers))
f1.close()
crear()
elif orden == "arrancar":
if len(sys.argv) == 2:
arrancar("todas")
elif len(sys.argv) == 3:
arrancar(sys.argv[2])
else:
print ayuda
elif orden == "parar":
if len(sys.argv) == 2:
parar("todas")
elif len(sys.argv) == 3:
parar(sys.argv[2])
else:
print ayuda
elif orden == "destruir":
destruir()
elif orden == "monitor":
subprocess.call('xterm -e "watch --interval 5 virsh list ' + '" &', shell = True)
elif orden == "memoria":
if len(sys.argv) == 3:
servers = leer()
maquinas = obtenerArrayMaquinas()
monitorizar= sys.argv[2]
variable = False
iteracion = 0
for x in maquinas:
ip = ["10.0.1.2", "10.0.1.1"]
for x in range(1, servers + 1):
ip.append("10.0.2." + str(10 + x))
for x in maquinas:
variable = ( monitorizar ==x)
if variable:
subprocess.call('xterm -e "watch ssh root@'+ ip[iteracion]+' \'top -b | head -n 20\' '+'"&', shell = True)
break; #para salir del for
iteracion += 1
logger.debug(iteracion)
if ( variable != True):
print(" Lo sentimos, la maquina que ha intentado monitorizar no se encuentra en el escenario")
else:
print ayuda
elif orden == "dominfo":
if len(sys.argv) == 3:
servers = leer()
maquinas = obtenerArrayMaquinas()
monitorizar= sys.argv[2]
variable = False
iteracion = 0
for x in maquinas:
variable = ( monitorizar ==x)
if variable:
subprocess.call('xterm -e "watch virsh dominfo '+ monitorizar+'"&', shell = True)
break; #para salir del for
iteracion += 1
logger.debug(iteracion)
if ( variable != True):
print(" Lo sentimos, la maquina sobre la que ha intentado lanzar este comando no se encuentra en el escenario")
else:
print " Por favor este parametro necesita un parametro OPCIONAL con el nombre de la maquina de la que quieres obtener su info de dominio"
elif orden == "balanceador":
subprocess.call('xterm -e " while true; do curl 10.0.1.1:8080; sleep 0.1; done" &', shell = True)
else:
print ayuda
else:
print ayuda
|
996,738 | ec2f4d407f846f9bb0382ed99310f5f323e16125 | #!/usr/bin/env python
from subprocess import Popen
from std_msgs.msg import String
import rospy
import os
import signal
class bag_record:
def __init__(self):
self.recording = False
self.recordSub = rospy.Subscriber("/seqreg_tpp/record", String, self.record)
def record(self,msg):
if not self.recording:
self.recording = True
self.recorder = Popen('rosbag record -a -O /home/james/Dropbox/NASA/experiment/robust_bags/%s.bag' % msg.data, shell=True, preexec_fn=os.setsid)
else:
self.recording = False
self.recorder.send_signal(signal.SIGINT)
os.killpg(self.recorder.pid,signal.SIGTERM)
def shutdown(self):
os.killpg(self.recorder.pid, signal.SIGTERM)
def main():
rospy.init_node('bag_record', anonymous=True)
BR = bag_record()
rospy.on_shutdown(BR.shutdown)
rospy.spin()
if __name__ == '__main__':
main()
|
996,739 | b97c14fa89d75cb9556307d97278c2d65e4761bd | import requests
from tabulate import tabulate
import pandas as pd
import time
import datetime
import re
from maks_lib import output_path
print('Execution Started Please wait....')
today = datetime.datetime.now()
start_time = time.time()
#Csv File location to store the scraped data
path = output_path+'Consolidate_Barclays_Data_Mortgage_'+today.strftime('%m_%d_%Y')+'.csv'
Excel_Table = []
jsonHeaders = {"Host":"www.barclays.co.uk","User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0","Accept":"application/json, text/javascript, */*; q=0.01","Accept-Language":"en-US,en;q=0.5","Accept-Encoding":"gzip, deflate, br","Referer":"https://www.barclays.co.uk/mortgages/mortgage-calculator/","Content-Type":"application/json","currentState":"default_current_state","action":"default","X-Requested-With":"XMLHttpRequest","Content-Length":"201","Connection":"keep-alive"}
#Required Fields to scrap the data.
table_headers = ['Bank_Product_Name', 'Min_Loan_Amount', 'Term (Y)', 'Interest_Type', 'Interest', 'APRC', 'Mortgage_Loan_Amt','Mortgage_Down_Payment']
cases = [[90000, 18000], [270000, 54000], [450000, 90000]]
terms = [10, 15, 25, 30]
for case in cases:
for term in terms:
d = {"header": {"flowId": "0"},
"body": {"wantTo": "FTBP",
"estimatedPropertyValue": case[0],
"borrowAmount": case[0]-case[1],
"interestOnlyAmount": 0,
"repaymentAmount": case[0]-case[1],
"ltv": 80,
"totalTerm": term*12,
"purchaseType": "Repayment"
}
}
print(d)
#Getting API passing url and post data through requests module.
r = requests.post('https://www.barclays.co.uk/dss/service/co.uk/mortgages/costcalculator/productservice',json=d, headers=jsonHeaders)
for data in r.json()['body']['mortgages']:
try:
Bank_Product_Name = data['mortgageName']
APRC = data['aprValue']
Interest = data['initialRate']
Balance = data['monthlyRepaymentHolder']['monthlyRepaymentForFixedTerm']
Term_In_Months = data['monthlyRepaymentHolder']['fixedTermPeriod']
MLTV = data['maxLtv']
LTV = 100-data['maxLtv']
Min_Loan_Amount = data['minLoanAmount']
Interest_Type = data['mortgageType'].title()
Interest_Type = Interest_Type if 'fixed' in Interest_Type.lower() else 'Variable'
if int(data['applicationFee'])!=0:
Bank_Product_Name = Bank_Product_Name+' With Fee'
if data["eligibility"]["firstTimeBuyer"]:
if data['maxLtv'] in [80]:
if Min_Loan_Amount == 5000:
check = ['cashback', 'purchase', 'scotland', 'barclays green home ']
found = False
for c in check:
if c in Bank_Product_Name.lower():
found = True
break
if not found:
a = [Bank_Product_Name, Min_Loan_Amount, term, Interest_Type, str(float(Interest))+'%', str(float(APRC))+'%', case[0]-case[1],' '+str(int(LTV))+'%']
Excel_Table.append(a)
except Exception as e:
print(e)
print(tabulate(Excel_Table))
#----------------------------------------Moving Data To CSV File using Pandas---------------------------------------
df = pd.DataFrame(Excel_Table, columns=table_headers)
df['Date'] = ' '+today.strftime('%Y-%m-%d')
df['Bank_Native_Country'] = 'UK'
df['State'] = 'London'
df['Bank_Name'] = 'Barclays'
df['Bank_Local_Currency'] = 'GBP'
df['Bank_Type'] = 'Bank'
df['Bank_Product'] = 'Mortgages'
df['Bank_Product_Type'] = 'Mortgages'
df['Bank_Offer_Feature'] = 'Offline'
# df['Mortgage_Down_Payment'] = '20%'
df['Mortgage_Category'] = 'New Purchase'
df['Mortgage_Reason'] = 'Primary Residence'
df['Mortgage_Pymt_Mode'] = 'Principal + Interest'
df['Bank_Product_Code'] = None
df['Fixed_Rate_Term'] = df['Bank_Product_Name'].apply(lambda x: re.sub('[^0-9]','',re.findall('\d.*year',x,re.IGNORECASE)[0]) if len(re.findall('\d.*year',x,re.IGNORECASE))!=0 else None)
#Arranging columns in required format.
order = ["Date", "Bank_Native_Country", "State", "Bank_Name", "Bank_Local_Currency", "Bank_Type", "Bank_Product", "Bank_Product_Type", "Bank_Product_Name", "Min_Loan_Amount", "Bank_Offer_Feature", "Term (Y)", "Interest_Type", "Interest", "APRC", "Mortgage_Loan_Amt", "Mortgage_Down_Payment", "Mortgage_Category", "Mortgage_Reason", "Mortgage_Pymt_Mode", "Fixed_Rate_Term", "Bank_Product_Code"]
df = df[order]
df.to_csv(path, index=False) #Mobving Data To CSV File.
print('Execution Completed.')
print('Total Execution time is ',time.time()-start_time, 'seconds') |
996,740 | 2e5c99873d49906f1b77f4465fa75ae2616718c1 | import tkinter as Tk
Tk._test()
root = Tk.Tk()
root.mainloop()
|
996,741 | bd9a5fe0a6bc391c1c5c62d82fdc1dda1ad9f6b7 | from tkinter import *
import os
root = Tk()
root.geometry("350x100")
root.configure(background="wheat2")
def runButton():
os.system('python3 analyzer.py '+ entry.get())
os.system('python3 adv_analyzer.py '+ entry.get())
root.destroy()
def runEnter(self):
os.system('python3 analyzer.py '+ entry.get())
os.system('python3 adv_analyzer.py '+ entry.get())
root.destroy()
frame = Frame(root)
frame.pack({"side":"bottom"})
label = Label(root, text = "Enter Team Name (ex. Columbia)", bg = 'wheat2')
entry = Entry(root, highlightbackground='wheat2')
button_1 = Button(root, text = "Run", command = runButton, width = 15, highlightbackground='wheat2')
label.pack({"side":"top"})
button_1.pack({"side":"bottom"})
entry.pack({"side":"top"})
root.bind("<Return>", runEnter)
root.mainloop() |
996,742 | 9e4af4602521b399dd361bbaee861068e83dc641 | import unittest
from cppy.renderer import *
class TestRenderer(unittest.TestCase):
def test_strip_newline(self):
self.assertEqual("bla\nblub", strip_newlines("\nbla\nblub\n"))
self.assertEqual("bla\nblub", strip_newlines(" \nbla\nblub \n "))
self.assertEqual(" bla\n blub", strip_newlines("\n bla\n blub\n"))
def test_change_text_indent(self):
self.assertEqual("bla\nblub", change_text_indent("bla\nblub", 0))
self.assertEqual("bla\nblub", change_text_indent(" bla\n blub", 0))
self.assertEqual("bla\n blub", change_text_indent(" bla\n blub", 0))
self.assertEqual(" bla\n blub", change_text_indent("bla\nblub", 1))
self.assertEqual(" bla\n blub", change_text_indent(" bla\nblub", 2))
def test_apply_string_dict(self):
dic = { "val": "value",
"val2": "other",
"multiline": "for x in range(10):\n print(x)"
}
self.assertEqual("value", apply_string_dict("%(val)s", dic))
self.assertEqual(" value", apply_string_dict(" %(val)s", dic))
self.assertEqual("blub-value-bla", apply_string_dict("blub-%(val)s-bla", dic))
self.assertEqual("blub-value.other-bla", apply_string_dict("blub-%(val)s.%(val2)s-bla", dic))
self.assertEqual("void value::cppy_copy(value* copy)",
apply_string_dict("void %(val)s::cppy_copy(%(val)s* copy)", dic))
self.assertEqual("func() {\n value;\n}",
apply_string_dict("func() {\n %(val)s;\n}", dic) )
self.assertEqual("func() {\n value;\n other;\n}",
apply_string_dict("func() {\n %(val)s;\n %(val2)s;\n}", dic) )
code = """def func(a):
%(val)s = 10
for i in a:
%(val2)s[i] = %(val)s
%(multiline)s
"""
expect = """def func(a):
value = 10
for i in a:
other[i] = value
for x in range(10):
print(x)
"""
self.assertEqual(expect, apply_string_dict(code, dic))
if __name__ == "__main__":
unittest.main() |
996,743 | 8642bf69c1d56f23f18c198d19d84ee5b767f63c | import datetime
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.plot import plot_water_levels
from floodsystem.flood import stations_highest_rel_level
"""Task 2E: plot water level"""
def run():
# Build list of stations
stations = build_station_list()
N=5
update_water_levels(stations)
list_of_5_stations_greatest_level=stations_highest_rel_level(stations , N)
dt=10
for station in list_of_5_stations_greatest_level:
dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt))
plot_water_levels(station, dates, levels)
if __name__ == "__main__":
print("*** Task 2E: CUED Part IA Flood Warning System ***")
run()
|
996,744 | 525ddf475ec85da2ee45ad9ee151ee9bb11ed81e | '''
__author__ : kris singh
__date__:
__filename__ : perms.py
__descpription__: generate permutation of a string
__email__: cs15mtech11007@iith.ac.in
'''
def swap(string,index_1,index_2):
#@params - string : the input string to be permuted
#@params - index_1: index of char to be swaped
#@params - index_2: index of char to be swaped
#print string
temp = string[index_1]
string[index_1] = string[index_2]
string[index_2] = temp
return "".join(string)
def perms(string):
#@params- string:the input string we want to permute
#base case string of length of size 2
#print string
base_str = []
if len(string) == 2:
#print "here"
base_str.append(string)
base_str.append(swap(list(string),0,1))
return base_str
else:
strings_return = []
for key,value in enumerate(string):
#print key,value
#swap the last el of the given string
new_string = swap(list(string),len(string) - 1,key)
org_string = new_string
#recurse for new string of length n-1
new_string = list(new_string)[:-1]
returned_strings = perms("".join(new_string))
for i in returned_strings:
i = list(i)
i.append(org_string[-1])
strings_return.append(i)
return strings_return
if __name__ == "__main__":
string1 = "123"
result_list = perms(string1)
final_list = []
for i in result_list:
final_list.append("".join(i))
if len(set(final_list)) == len(final_list):
print final_list,len(final_list)
else:
print "error"
|
996,745 | 8f57ce8ce01d6907292343ccfdf8fca8a69ce7db | '''
Наименьший положительный
Выведите значение наименьшего из всех положительных элементов в списке.
Известно, что в списке есть хотя бы один положительный элемент,
а значения всех элементов списка по модулю не превосходят 1000.
Формат ввода
Вводится список чисел. Все числа списка находятся на одной строке.
'''
num_list = list(map(int, input().split()))
pos_list = list(filter(lambda n: n > 0, num_list))
pos_min = pos_list[0]
for n in pos_list:
if n < pos_min:
pos_min = n
print(pos_min)
|
996,746 | aadf63b3fff95aee34147749981011eac9add127 |
#calss header
class _HISTOGRAMS():
def __init__(self,):
self.name = "HISTOGRAMS"
self.definitions = histogram
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['histogram']
|
996,747 | 44f09e4c93268d845ac5800fc1b70467945d5c61 | import sys
import attrdict
import numpy as np
import numpy.linalg
from scipy import io, interpolate
from thesis2 import codegen, kalman, mocos, symstats, vme
from thesis2.experiments import base
class SymbolicModel(base.SymbolicModel):
'''trakSTAR path reconstruction model.'''
x = ['x', 'y', 'z', 'q0', 'q1', 'q2', 'q3',
'vx', 'vy', 'vz', 'wx', 'wy', 'wz']
'''State vector.'''
y = ['x_meas', 'y_meas', 'z_meas',
'q1_meas', 'q2_meas', 'q3_meas']
'''Measurement vector.'''
p = []
'''Parameter vector.'''
s = []
'''Exogenous signals vector.'''
c = ['x_meas_std', 'y_meas_std', 'z_meas_std',
'q1_meas_std', 'q2_meas_std', 'q3_meas_std',
'w0_std', 'v0_std', 'pos0_std', 'q_renorm_gain']
'''Constants vector.'''
def f(self, t, x, p, s, c):
'''Drift function.'''
a = attrdict.AttrDict(self.unpack_arguments(t=t, x=x, p=p, s=s, c=c))
renorm = a.q_renorm_gain * (1 - a.q0**2 - a.q1**2 - a.q2**2 - a.q3**2)
return [
a.vx,
a.vy,
a.vz,
-0.5 * (a.q1 * a.wx + a.q2 * a.wy + a.q3 * a.wz) + renorm * a.q0,
-0.5 * (-a.q0 * a.wx - a.q2 * a.wz + a.q3 * a.wy) + renorm * a.q1,
-0.5 * (-a.q0 * a.wy + a.q1 * a.wz - a.q3 * a.wx) + renorm * a.q2,
-0.5 * (-a.q0 * a.wz - a.q1 * a.wy + a.q2 * a.wx) + renorm * a.q3,
0,
0,
0,
0,
0,
0,
]
def meas_mean(self, t, x, p, s, c):
'''Measurement mean.'''
a = attrdict.AttrDict(self.unpack_arguments(t=t, x=x, p=p, s=s, c=c))
return [a.x, a.y, a.z, a.q1, a.q2, a.q3]
def meas_cov(self, t, x, p, s, c):
'''Measurement covariance matrix.'''
a = attrdict.AttrDict(self.unpack_arguments(t=t, x=x, p=p, s=s, c=c))
stds = [
a.x_meas_std, a.y_meas_std, a.z_meas_std,
a.q1_meas_std, a.q2_meas_std, a.q3_meas_std
]
return np.diag(stds) ** 2
def meas_ll(self, y, t, x, p, s, c):
'''Measurement log-likelihood.'''
a = attrdict.AttrDict(
self.unpack_arguments(t=t, x=x, y=y, p=p, s=s, c=c)
)
return (symstats.normal_logpdf(a.x_meas, a.x, a.x_meas_std) +
symstats.normal_logpdf(a.y_meas, a.y, a.y_meas_std) +
symstats.normal_logpdf(a.z_meas, a.z, a.z_meas_std) +
symstats.normal_logpdf(a.q1_meas, a.q1, a.q1_meas_std) +
symstats.normal_logpdf(a.q2_meas, a.q2, a.q2_meas_std) +
symstats.normal_logpdf(a.q3_meas, a.q3, a.q3_meas_std))
def prior_logpdf(self, x, p, c):
a = attrdict.AttrDict(
self.unpack_arguments(x=x, p=p, c=c)
)
return (symstats.normal_logpdf(a.wx, 0, a.w0_std) +
symstats.normal_logpdf(a.wy, 0, a.w0_std) +
symstats.normal_logpdf(a.wz, 0, a.w0_std) +
symstats.normal_logpdf(a.vx, 0, a.v0_std) +
symstats.normal_logpdf(a.vy, 0, a.v0_std) +
symstats.normal_logpdf(a.vz, 0, a.v0_std) +
symstats.normal_logpdf(a.x, 0, a.pos0_std) +
symstats.normal_logpdf(a.y, 0, a.pos0_std) +
symstats.normal_logpdf(a.z, 0, a.pos0_std))
def generated_src():
model_generator = base.ModelGenerator(SymbolicModel(), 'GeneratedModel')
return model_generator.generate()
def print_generated_module():
from os import path
module_path = path.join(path.dirname(__file__), 'generated_corujas.py')
with open(module_path, 'w') as module_file:
module_file.write(generated_src())
try:
from generated_corujas import GeneratedModel
except ImportError:
context = {'__name__': __name__}
exec(generated_src(), context)
GeneratedModel = context['GeneratedModel']
def unwrap_quaternion(q):
unwrapped = np.array(q)
increments = np.linalg.norm(q[1:] - q[:-1], axis=1)
jumps = np.flatnonzero(increments > 1) + 1
for k in jumps:
unwrapped[k:] *= -1
return unwrapped
def load_data(filepath, start=None, stop=None):
range_ = slice(start, stop)
data = io.loadmat(filepath)
tmeas = data['time'].flatten()[range_]
q = data['q'][range_]
q_unwrapped = unwrap_quaternion(q)
y_dict = dict(
x_meas=data['x'].flatten()[range_],
y_meas=data['y'].flatten()[range_],
z_meas=data['z'].flatten()[range_],
q0_meas=q_unwrapped[:, 0],
q1_meas=q_unwrapped[:, 1],
q2_meas=q_unwrapped[:, 2],
q3_meas=q_unwrapped[:, 3],
)
y_dict['x_meas'] -= y_dict['x_meas'][0]
y_dict['y_meas'] -= y_dict['y_meas'][0]
y_dict['z_meas'] -= y_dict['z_meas'][0]
return tmeas, y_dict
def spline_fit(tmeas, y_dict, smoothing_factor):
Tknot = (tmeas[1] - tmeas[0]) * smoothing_factor
knots = np.arange(tmeas[0] + 2 * Tknot, tmeas[-1] - 2 * Tknot, Tknot)
splines = {}
for yname in SymbolicModel.y + ['q0_meas']:
splines[yname] = interpolate.LSQUnivariateSpline(
tmeas, y_dict[yname], knots, k=5
)
return splines
def given_params():
return dict(q_renorm_gain=1, w0_std=1e-3, v0_std=10, pos0_std=1,
x_meas_std=0.04, y_meas_std=0.04, z_meas_std=0.04,
q0_meas_std=0.0002, q1_meas_std=0.0002,
q2_meas_std=0.0002, q3_meas_std=0.0002)
def param_guess():
return {
'x_meas_std': 0.2, 'y_meas_std': 0.2, 'z_meas_std': 0.2,
'q1_meas_std': 0.0005, 'q2_meas_std': 0.0005, 'q3_meas_std': 0.0005,
}
def estim_problem(tmeas, y, model, col_order, meas_subdivide):
yind = meas_subdivide * np.arange(tmeas.size)
test = np.linspace(
tmeas[0], tmeas[-1], (tmeas.size - 1) * meas_subdivide + 1
)
collocation = mocos.LGLCollocation(col_order)
problem = vme.Problem(model, test, y, yind, collocation, True)
t_fine = problem.t_fine
return problem, t_fine
def pack_x_guess(splines, t_fine):
q0 = splines['q0_meas'](t_fine)
q1 = splines['q1_meas'](t_fine)
q2 = splines['q2_meas'](t_fine)
q3 = splines['q3_meas'](t_fine)
q0_dot = [splines['q0_meas'].derivatives(t)[1] for t in t_fine]
q1_dot = [splines['q1_meas'].derivatives(t)[1] for t in t_fine]
q2_dot = [splines['q2_meas'].derivatives(t)[1] for t in t_fine]
q3_dot = [splines['q3_meas'].derivatives(t)[1] for t in t_fine]
wx = 2 * (q0 * q1_dot + q3 * q2_dot - q2 * q3_dot - q1 * q0_dot)
wy = 2 * (-q3 * q1_dot + q0 * q2_dot + q1 * q3_dot - q2 * q0_dot)
wz = 2 * (q2 * q1_dot - q1 * q2_dot + q0 * q3_dot - q3 * q0_dot)
x_dict = dict(
x=splines['x_meas'](t_fine),
y=splines['y_meas'](t_fine),
z=splines['z_meas'](t_fine),
vx=[splines['x_meas'].derivatives(t)[1] for t in t_fine],
vy=[splines['y_meas'].derivatives(t)[1] for t in t_fine],
vz=[splines['z_meas'].derivatives(t)[1] for t in t_fine],
q0=q0, q1=q1, q2=q2, q3=q3, wx=wx, wy=wy, wz=wz
)
return GeneratedModel.pack_x(t_fine.shape, **x_dict)
def save_data(tmeas, y_dict, t_fine, xopt, filename):
data = y_dict.copy()
data.update(zip(GeneratedModel.xnames, xopt))
data.update(t_meas=tmeas, t=t_fine)
io.matlab.mio.savemat(filename, data)
def main(argv):
file_name = str(argv[0])
start = int(argv[1]) if len(argv) > 1 else None
stop = int(argv[2]) if len(argv) > 2 else None
tmeas, y_dict = load_data(file_name, start, stop)
splines = spline_fit(tmeas, y_dict, 4)
params = {}
params.update(given_params())
params.update(param_guess())
G = np.zeros((GeneratedModel.nx, 6))
G[-6:] = np.eye(6) * [50, 50, 50, 2.71, 2.71, 2.71]
c = GeneratedModel.pack_c(**params)
p = GeneratedModel.pack_p(**params)
y = GeneratedModel.pack_y(tmeas.shape, **y_dict)
model = GeneratedModel(G, c=c, p=p)
problem, t_fine = estim_problem(tmeas, y, model, 5, 1)
x_guess = pack_x_guess(splines, t_fine)
z0 = problem.pack_decision(x_guess, None, p)
p_lb = dict(x_meas_std=0, y_meas_std=0, z_meas_std=0,
q0_meas_std=0, q1_meas_std=0, q2_meas_std=0, q3_meas_std=0)
p_fix = dict()
z_bounds = problem.pack_bounds(p_lb=p_lb, p_fix=p_fix)
z_bounds[:, 3:7] = [y_dict['q0_meas'][0], y_dict['q1_meas'][0],
y_dict['q2_meas'][0],y_dict['q3_meas'][0]]
nlp = problem.nlp(z_bounds)
nlp.num_option(b'tol', 1e-6)
nlp.int_option(b'max_iter', 100)
zopt, solinfo = nlp.solve(z0)
xopt, dopt, popt = problem.unpack_decision(zopt)
yopt = model.meas_mean(t_fine, xopt, popt)
save_data(tmeas, y_dict, t_fine, xopt, file_name + 'meas')
if __name__ == '__main__':
main(sys.argv[1:])
|
996,748 | dddd744c531746a0e01f03a3a91e4e62c1409668 | import random
# Вводим данные для расчёта максимальной суммы элементов с заданным направлением шага (вниз или вниз-вправо)
def manual_enter(size, gen):
pyramid = []
for i in range(size):
for j in range(size):
if gen == 1:
n = (int(input('Введите значения ' + str(i) + ' строки ' + str(i + 1) + ': \n')))
pyramid.append(n)
if gen == 2:
pyramid.append(random.randint(1, 100))
if gen == 3:
pyramid = [7, 5, 8, 9, 8, 2, 1, 3, 5, 6, 6, 2, 4, 4, 5, 9, 5, 3, 5, 5, 7, 7, 4, 6, 4, 7, 6, 8]
if j == i:
break
else:
j += 1
return pyramid
def max_sum_line(manual_enter, size):
temp = []
if size == 0:
print('Пирамида не создана!')
if size != 0:
print('\n\n Рассматриваемая пирамида: ')
# Создание красивого вывода пирамиды
while size != 0:
temp.append(manual_enter[(len(manual_enter) - size):(len(manual_enter))])
for i in range(size):
manual_enter.pop()
size -= 1
temp.reverse()
for element in temp:
print(element)
print('\n Максимальная сумма цепочки элементов: ')
# Ядро расчёта
z = []
s = []
# start
x = temp[len(temp) - 1]
y = temp[len(temp) - 2]
for i in range(len(y)):
z.append(y[i] + x[i])
z.append(y[i] + x[i + 1])
s.append(z[::])
z.clear()
del temp[- 2:]
y.clear()
x = temp[len(temp) - 1]
for j in range(len(temp)):
s[j] = s[j] + s[j + 1]
del s[- 1:]
for res in range(len(temp)):
for k in range(len(temp)):
n = 0
while n < len(s[k]):
z.append(s[k][n] + x[k])
n += 1
y.append(z[::])
z.clear()
s.clear()
for l in range(len(y) - 1):
s.append(y[l] + y[l + 1])
if len(y) > 1:
y.clear()
del temp[len(temp) - 1:]
x = temp[len(temp) - 1]
return print(max(y[0]))
# Создаём что-то типа меню с выбором опций: создание горки сокровищ автоматически, с ручным указанием значений или с данными из условия
size = 0
gen = int(input(
'\n1) Ручной ввод значений \n2) Автоматический ввод значений \n3) Ввод заданных значений \n\nВведите выбранный номер пункта из предложенных: \n '))
if gen == 1 or gen == 2:
size = int(input('Введите размер пирамиды: \n'))
if gen == 3:
size = 7
if gen < 1 or gen > 3:
print('Введённый номер не корректен.')
max_sum_line(manual_enter(size, gen), size)
|
996,749 | 1c1459739cfd478f5da3038937c62f600006325b | import requests
_phone = input("Enter number: ")
a = requests.get("https://avtobzvon.ru/request/makeTestCall",
params={"to": "("+ _phone[0] + "" + _phone[1] + "" + _phone[2] +") "+ _phone[3] + ""+_phone[4] +""+_phone[5]+"-"+_phone[6]+""+_phone[7]+"-"+_phone[8]+""+_phone[9]+""}
)
print("Answer 1: ")
print(a.text)
c = requests.post('https://autodozvon.ru/test/makeTestCall',
params={"to": "("+ _phone[0] + "" + _phone[1] + "" + _phone[2] +") "+ _phone[3] + ""+_phone[4] +""+_phone[5]+"-"+_phone[6]+""+_phone[7]+"-"+_phone[8]+""+_phone[9]+""}
)
print("Answer 2: ")
print(c.text)
|
996,750 | bfcdb68682c53a49e7d4987e2e16620f3f4b5696 | import sys
import os
import json
import zipfile
import requests
arguments = sys.argv
arguments.remove(sys.argv[0])
url = "https://raw.githubusercontent.com/UntriexTv/test_directory/main/ver.json"
if len(arguments) == 0:
sys.exit()
command = arguments[0]
if command in ["u", "update"]:
try:
server_version = json.loads(requests.get(url).text)
except Exception as error:
print(f"CAN'T DOWNLOAD VERSION LIST. ERROR: {error}")
sys.exit()
if "-version" in arguments:
try:
version_download = arguments[arguments.index("-version") + 1]
except IndexError:
print("Version argument is empty.")
sys.exit()
if version_download not in list(server_version.keys()):
print("Version not found.")
sys.exit()
else:
try:
with open("version.json", "r", encoding='utf-8') as f: # loading settings
version = json.load(f)
except:
version = {"id": 0, "version": "recovery"}
for ver, data in enumerate(server_version.values()):
if data["id"] > version["id"]:
version_download = list(server_version.keys())[ver]
try:
with open("update.zip", "wb", encoding='utf-8') as save:
save.write(
bytes(requests.get(
f"https://github.com/UntriexTv/test_directory/releases/download/{version_download}/update.zip").content))
except Exception as error:
print(f"FAILED TO DOWNLOAD UPDATE. ERROR: {error}")
sys.exit()
with zipfile.ZipFile("update.zip", "r") as zip_ref:
zip_ref.extractall("")
os.remove("update.zip")
print("SUCCESS")
print(f"""Update from version {version["version"]} to {version_download} was sucesfull""")
if command == "clean":
if arguments[1] == "all":
open("log.txt", "w").close()
with open("settings.json", "r", encoding='utf-8') as file:
settings = json.load(file)
for line in settings["heartbeat_table"]:
settings["heartbeat_table"][line] = []
with open("settings.json", "w", encoding='utf-8') as file:
json.dump(settings, file, indent=2)
if arguments[1] == "log":
open("log.txt", "w").close()
if arguments[1] == "heartbeat_table":
with open("settings.json", "r", encoding='utf-8') as file:
settings = json.load(file)
for line in settings["heartbeat_table"]:
settings["heartbeat_table"][line] = []
with open("settings.json", "w", encoding='utf-8') as file:
json.dump(settings, file, indent=2)
|
996,751 | 68c22bc889589c8b921510926764c0f57582030f | from bs4 import BeautifulSoup
import requests
import re
import random
request_headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
search_url = "https://www.last10k.com/Search?q="
symbols = ["PIH", "TURN", "FCCY", "SRCE", "ABIL", "ANCX", "ACNB", "AGFS", "AGFSW", "ABTX", "AMR", "AMRWW", "AMBC", "AMBCW", "ATAX", "AMNB", "ANAT", "AMRB", "ABCB", "AMSF", "ASRV", "ASRVP", "ATLO", "AFSI", "ANCB", "ANDA", "ANDAR", "ANDAU", "ANDAW", "ACGL", "ACGLO", "ACGLP", "AGII", "AGIIL", "AROW", "ASFI", "ATAC", "ATACR", "ATACU", "AAME", "ACBI", "ACFC", "ATLC", "AFH", "AFHBL", "AUBN", "BWINA", "BWINB", "BANF", "BANFP", "BCTF", "BOCH", "BMRC", "BMLP", "BKSC", "BOTJ", "OZRK", "BFIN", "BWFG", "BANR", "DFVL", "DFVS", "DLBL", "DLBS", "DTUL", "DTUS", "DTYL", "DTYS", "FLAT", "STPP", "TAPR", "BHAC", "BHACR", "BHACU", "BHACW", "BYBK", "BCBP", "BSF", "BNCL", "BGCP", "BRPA", "BRPAR", "BRPAU", "BRPAW", "BCAC", "BCACR", "BCACU", "BCACW", "BRAC", "BRACR", "BRACU", "BRACW", "HAWK", "BCOR", "BHBK", "BOFI", "BOFIL", "BOKF", "BOKFL", "BOMN", "BPFH", "BPFHP", "BPFHW", "BDGE", "BHF", "BYFC", "BPY", "BRKL", "BMTC", "BLMT", "CFFI", "CATC", "CAC", "CCBG", "CFFN", "CSTR", "CARO", "CART", "CARV", "CATY", "CATYW", "CBFV", "CBOE", "CBTX", "CSFL", "CFBK", "CVCY", "CNBKA", "CHFN", "CHFC", "CHMG", "CCCR", "JRJC", "HGSH", "CLDC", "CINF", "CZNC", "CZWI", "CZFC", "CIZN", "CHCO", "CIVB", "CIVBP", "CSBK", "CMSS", "CMSSR", "CMSSU", "CMSSW", "CME", "CCNE", "CWAY", "COBZ", "CVLY", "CIGI", "CBAN", "COLB", "CBSH", "CBSHP", "ESXB", "CFBI", "CTBI", "CWBC", "CNFR", "CNOB", "CNAC", "CNACR", "CNACU", "CNACW", "CPSS", "CRVL", "ICBK", "COWN", "COWNZ", "PMTS", "CACC", "DGLD", "DSLV", "GLDI", "SLVO", "TVIX", "TVIZ", "UGLD", "USLV", "USOI", "VIIX", "VIIZ", "XIV", "ZIV", "CRESY", "CVBF", "DHIL", "DCOM", "DNBF", "DGICA", "DGICB", "LYL", "DOTA", "DOTAR", "DOTAU", "DOTAW", "ETFC", "EBMT", "EGBN", "EFBI", "EWBC", "EHTH", "ELEC", "ELECU", "ELECW", "ESBK", "EMCI", "EMCF", "ECPG", "ESGR", "ENFC", "EBTC", "EFSC", "EQFN", "EQBK", "ERIE", "ESQ", "ESSA", "EEFT", "FANH", "FMAO", "FFKT", "FMNB", "FBSS", "FSAC", "FSACU", "FSACW", "FNHC", "FFBW", "FDBC", "LION", "FITB", "FITBI", "FNGN", "FISI", "FNTE", "FNTEU", "FNTEW", "FBNC", "FNLC", "BUSE", "FBIZ", "FCAP", "FCNCA", "FCBC", "FCCO", "FBNK", "FDEF", "FFBC", "FFBCW", "FFIN", "THFF", "FFNW", "FFWM", "FGBI", "FHB", "INBK", "INBKL", "FIBK", "FRME", "FMBH", "FMBI", "FNWB", "FSFG", "FUNC", "FUSB", "FSV", "FFIC", "FNBG", "FRPH", "FSBW", "FSBC", "FULT", "GABC", "GBCI", "GLBZ", "GBLI", "GBLIL", "GBLIZ", "GPAQU", "GSHT", "GSHTU", "GSHTW", "GOV", "GOVNI", "GSBC", "GNBC", "GCBC", "GLRE", "GRIF", "GGAL", "GTYH", "GTYHU", "GTYHW", "GBNK", "GNTY", "GFED", "GWGH", "HALL", "HBK", "HLNE", "HBHC", "HBHCL", "HAFC", "HONE", "HWBK", "HYAC", "HYACU", "HYACW", "HIIQ", "HTLF", "HNNA", "HTBK", "HFWA", "HX", "HMNF", "HBCP", "HOMB", "HFBL", "HMST", "HMTA", "HTBI", "HOPE", "HFBC", "HBNC", "HBMD", "HBAN", "HBANN", "HBANO", "HBANP", "HVBC", "IAM", "IAMXR", "IAMXW", "IBKC", "IBKCO", "IBKCP", "ICCH", "IROQ", "ILG", "INDB", "IBCP", "IBTX", "INDU", "INDUU", "INDUW", "IPCC", "IBKR", "IBOC", "INTL", "ISTR", "ISBC", "ITIC", "JXSB", "JRVR", "JTPY", "KAAC", "KAACU", "KAACW", "KBLM", "KBLMR", "KBLMU", "KBLMW", "KRNY", "KFFB", "KINS", "KNSL", "LSBK", "LBAI", "LKFN", "LCA", "LCAHU", "LCAHW", "LARK", "LCNB", "LTXB", "LACQ", "LACQU", "LACQW", "TREE", "LX", "LOB", "LIVE", "LMFA", "LMFAW", "LPLA", "LBC", "MBTF", "MACQ", "MACQW", "MIII", "MIIIU", "MIIIW", "MCBC", "MFNC", "MGYR", "MHLD", "MSFG", "MLVF", "MKTX", "MRLN", "MPAC", "MPACU", "MPACW", "MBFI", "MBFIO", "MFIN", "MFINL", "MELR", "MBWM", "MBIN", "EBSB", "CASH", "MPB", "MBCN", "MSBI", "MOFG", "MMAC", "MMDM", "MMDMR", "MMDMU", "MMDMW", "MORN", "MSBF", "MTECU", "MUDSU", "MFSF", "MVBF", "NDAQ", "NKSH", "NCOM", "NESR", "NESRW", "NGHC", "NGHCN", "NGHCO", "NGHCP", "NGHCZ", "NHLD", "NHLDW", "NSEC", "NWLI", "JSM", "NAVI", "NBTB", "NEBUU", "UEPS", "NYMTP", "NMRK", "NODK", "NICK", "NCBS", "NMIH", "NBN", "NTRS", "NTRSP", "NFBK", "NRIM", "NWBI", "NWFL", "OVLY", "OCFC", "OFED", "OVBC", "OLBK", "ONB", "OPOF", "OSBC", "OSBCP", "OBAS", "OPHC", "ORIT", "ORRF", "OSPR", "OSPRU", "OSPRW", "OTTW", "OXBR", "OXBRW", "PMBC", "PPBI", "PACW", "PKBK", "PBHC", "PNBK", "PYDS", "PBBI", "PCSB", "PDLB", "PGC", "PWOD", "WRLS", "WRLSR", "WRLSU", "WRLSW", "PEBO", "PEBK", "PFIS", "PBCT", "PBCTP", "PUB", "PICO", "PNFP", "EAGLU", "PLBC", "PBSK", "BPOP", "BPOPM", "BPOPN", "PBIB", "PRAA", "PFBI", "PFG", "PVBC", "PROV", "PBIP", "QCRH", "RNDB", "RBB", "RDFN", "RBNC", "RNST", "RBCAA", "FRBK", "RVSB", "STBA", "SCAC", "SCACU", "SCACW", "SAFT", "SAL", "SASR", "SBFG", "SBFGP", "SBCF", "SNFCA", "SEIC", "SLCT", "SIGI", "STNL", "STNLU", "STNLW", "SFBS", "SVBI", "SHBI", "SIFI", "SIEB", "BSRR", "SBNY", "SBNYW", "SAMG", "SFNC", "SLM", "SLMBP", "SMBK", "SFBC", "SSB", "SFST", "SMBC", "SONA", "SBSI", "SSLJ", "STFC", "STBZ", "STLR", "STLRU", "STLRW", "SBT", "SSFN", "SYBT", "SMMF", "SBBX", "SIVB", "TROW", "AMTD", "TBNK", "TCBI", "TCBIL", "TCBIP", "TCBIW", "TFSL", "TBBK", "CG", "TCGP", "TCFC", "FBMS", "FLIC", "NAVG", "TIL", "TSBK", "TIPT", "TMSR", "TMSRW", "TCBK", "TSC", "TBK", "TRST", "TRMK", "TRCB", "GROW", "UMBF", "UMPQ", "UNAM", "UBSH", "UNB", "UBCP", "UBOH", "UBSI", "UCBA", "UCBI", "UCFC", "UBNK", "UFCS", "UIHC", "UBFO", "UNTY", "UVSP", "VALU", "VEAC", "VEACU", "VEACW", "VBTX", "VCTR", "VBFC", "VIRT", "VRTS", "VRTSP", "WAFD", "WAFDW", "WASH", "WSBF", "WCFB", "WEBK", "WSBC", "WTBA", "WABC", "WNEB", "WLTW", "WINS", "WTFC", "WTFCM", "WTFCW", "WETF", "WMIH", "WRLD", "WSFS", "WVFC", "YIN", "ZAIS", "ZION", "ZIONW", "ZIONZ"]
# random.shuffle(symbols)
# print(symbols)
def run():
for symbol in symbols[:10]:
r = requests.get(search_url + symbol, headers = request_headers)
content = r.text
soup = BeautifulSoup(content, 'lxml')
reports = soup.select('#reportLinksList10K')
download(reports, '2012')
def download(reports, year):
for report in reports:
infos = report.find_all('li')
for info in infos:
if repr(info.get_text()).find(year) > 0:
data_cik = info['data-cik']
data_accessionno = info['data-accessionno'].replace('-', '')
data_filename = info['data-filename'] + '.doc'
# https://www.last10k.com/sec-filings/1591890/000138713117001422/pih-10k_123116.htm.doc
# https://www.last10k.com/sec-filings/1591890/000138713115001018/pih-10k_123114.htm.doc
download_url = "https://www.last10k.com/sec-filings/" + data_cik + '/' + data_accessionno + '/' + data_filename
print(download_url)
d = requests.get(download_url, headers = request_headers)
with open(data_filename.replace('.htm', ''), "wb") as f:
f.write(d.content)
if __name__ == '__main__':
run()
|
996,752 | 1e6fc008f30f27f07817de338124fafa48c6393f | #
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xgboost as xgb
import lightgbm as lgb
from sklearn.preprocessing import LabelBinarizer
#import seaborn as sns
import scipy.interpolate
import scipy.integrate
from datetime import datetime
from sklearn.model_selection import train_test_split
#from sklearn.model_selection import KFold
from sklearn import model_selection
from PortoSeguro.env import setEnv
from PortoSeguro.gini import gini_xgb
from PortoSeguro.gini import gini_lgb
from PortoSeguro.DataModelClass import DataModelClass
import gc
|
996,753 | d213ad734c815be4ae56c36216f1f97484a2be72 | import datetime
from smtpd import usage
from django.contrib.auth.context_processors import auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.core.mail import mail_admins
from django.shortcuts import render, redirect, HttpResponse, get_object_or_404, reverse, get_list_or_404
from djangobin.forms import SnippetForm, ContactForm, LoginForm
from django.contrib import messages
from djangobin.models import Language, Snippet, Tag
from djangobin.utils import paginate_result
from django.contrib.auth.models import User
from django.contrib import auth
def index(request):
if request.method=='POST':
f=SnippetForm(request.POST)
if f.is_valid():
snippet=f.save(request)
return redirect(reverse('djangobin:snippet_detail', args=[snippet.slug]))
else:
f=SnippetForm()
return render(request, 'djangobin/index.html',{'form':f})
def snippet_detail(request, snippet_slug):
snippet = get_object_or_404(Snippet, slug=snippet_slug)
snippet.hits += 1
snippet.save()
# return render(request, 'djangobin/snippet_detail.html', {'snippet': snippet})
return render(request, 'djangobin/snippet_detail.html', {'snippet': snippet})
def download_snippet(request, snippet_slug):
snippet=get_object_or_404(Snippet, slug=snippet_slug)
file_extension=snippet.language.file_extension
filename=snippet.slug+file_extension
res=HttpResponse(snippet.original_code)
res['content-disposition']='attachment; filename='+filename+';'
return res
def raw_snippet(request, snippet_slug):
snippet=get_object_or_404(Snippet, slug=snippet_slug)
return HttpResponse(snippet.original_code, content_type=snippet.language.mime)
def trending_snippets(request, language_slug=''):
lang=None
snippets=Snippet.objects
if language_slug:
snippets=snippets.filter(language__slug=language_slug)
lang=get_object_or_404(Language, slug=language_slug)
snippets=snippets.all()
snippet_list=get_list_or_404(snippets.filter(exposure='public').order_by('-hits'))
snippets=paginate_result(request, snippet_list, 5)
return render(request, 'djangobin/trending.html',{'snippets':snippets, 'lang':lang})
def tag_list(request, tag):
t=get_object_or_404(Tag, name=tag)
snippet_list=get_list_or_404(t.snippet_set)
snippets=paginate_result(request, snippet_list, 5)
return render(request, 'djangobin/tag_list.html', {'snippets':snippets, 'tag':t})
def profile(request, username):
return HttpResponse('<p> Profile page for #{}</p>'.format(username))
def contact(request):
if request.method=='POST':
f=ContactForm(request.POST)
if f.is_valid():
name=f.cleaned_data['name']
subject="You have a new feedback from {}:<{}>".format(name, f.cleaned_data['email'])
message="Purpose: {}\n\nDate: {}\n\nMessage:\n\n {}".format(
dict(f.purpose_choices).get(f.cleaned_data['purpose']),
datetime.datetime.now(),
f.cleaned_data['message']
)
mail_admins(subject, message)
messages.add_message(request, messages.INFO, 'Thanks for submitting your feedback')
return redirect('djangobin:contact')
else:
f=ContactForm()
return render(request, 'djangobin/contact.html', {'form':f})
def login(request):
if request.user.is_authenticated:
return redirect('djangobin:profile', username=request.user.username)
if request.method=='POST':
f=LoginForm(request.POST)
if f.is_valid():
user=User.objects.filter(email=f.cleaned_data['email'])
if user:
user=auth.authenticate(
username=user[0].username,
password=f.cleaned_data['password'],
)
if user:
auth.login(request, user)
return redirect(request.GET.get('next') or 'djangobin:index')
messages.add_message(request, messages.INFO, 'Invalid email/password.')
return redirect('djangobin:login')
else:
f=LoginForm()
return render(request, 'djangobin/login.html', {'form':f})
@login_required
def logout(request):
auth.logout(request)
return render(request, 'djangobin/logout.html')
@login_required
def user_details(request):
user=get_object_or_404(User, id=request.user.id)
return render(request, 'djangobin/user_details.html', {'user':user})
def signup(request):
if request.user.is_authenticated:
return redirect('djangobin:profile', username=request.user.username)
if request.method=='POST':
f=UserCreationForm(request.POST)
if f.is_valid():
f.save()
messages.success(request, 'Account created successfully')
return redirect('signup')
else:
f=UserCreationForm()
return render(request, 'djangobin/signup.html', {'form':f}) |
996,754 | 3045e439a7f350345d7c621d664021d9b0c1bf6f | from __future__ import print_function
import json
import requests
import luigi
import datetime
import re
from slack import *
## WIP - conversations have to be sampled by date, topic or something else
class GenerateChatterbotCorpusFromSlackChannel(luigi.Task):
channel_name = luigi.Parameter()
date = luigi.DateParameter(default=datetime.date.today())
def requires(self):
return DownloadSlackChannelHistory(date=self.date, channel_name=self.channel_name)
def run(self):
corpus = { "conversations": [] }
with self.input().open('r') as infile:
messages = [line.strip().split('\t')[-1:][0] for line in infile]
messages.reverse()
corpus["conversations"] = [messages]
with self.output().open('w') as outfile:
json.dump(corpus, outfile, sort_keys=True, indent=4, separators=(',', ': '))
def output(self):
return luigi.LocalTarget(self.date.strftime('data/Chatterbot/Corpus/' + self.channel_name + '_%Y-%m-%d.json'))
|
996,755 | dde93b5db42e1266ba13dce1231ff6f10e4f8919 | # Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from behave import *
import nose.tools
import numpy.testing
import toyplot.format
@given(u'an instance of toyplot.format.DefaultFormatter')
def step_impl(context):
context.formatter = toyplot.format.DefaultFormatter()
@then(
u'formatting strings with the toyplot.format.DefaultFormatter should produce valid output')
def step_impl(context):
prefix, separator, suffix = context.formatter.format("1")
nose.tools.assert_equal(prefix, "1")
nose.tools.assert_equal(separator, "")
nose.tools.assert_equal(suffix, "")
@then(
u'formatting integers with the toyplot.format.DefaultFormatter should produce valid output')
def step_impl(context):
prefix, separator, suffix = context.formatter.format(1)
nose.tools.assert_equal(prefix, "1")
nose.tools.assert_equal(separator, "")
nose.tools.assert_equal(suffix, "")
@given(u'an instance of toyplot.format.FloatFormatter')
def step_impl(context):
context.formatter = toyplot.format.FloatFormatter()
@then(
u'formatting floats with the toyplot.format.FloatFormatter should produce valid output')
def step_impl(context):
column = numpy.arange(4) + 0.1
prefix, separator, suffix = context.formatter.format(4.1)
nose.tools.assert_equal(prefix, "4")
nose.tools.assert_equal(separator, ".")
nose.tools.assert_equal(suffix, "1")
@then(
u'formatting integers with the toyplot.format.FloatFormatter should produce valid output')
def step_impl(context):
prefix, separator, suffix = context.formatter.format(1)
nose.tools.assert_equal(prefix, "1")
nose.tools.assert_equal(separator, "")
nose.tools.assert_equal(suffix, "")
@given(u'an instance of toyplot.format.UnitFormatter')
def step_impl(context):
context.formatter = toyplot.format.UnitFormatter()
@then(
u'formatting inch units with the toyplot.format.UnitFormatter should produce valid output')
def step_impl(context):
val = context.formatter.format(12.2, "inches")
prefix, separator, suffix, units = val
nose.tools.assert_equal(prefix, "12")
nose.tools.assert_equal(separator, ".")
nose.tools.assert_equal(suffix, "2")
nose.tools.assert_equal(units, "in")
@then(
u'formatting point units with the toyplot.format.UnitFormatter should produce valid output')
def step_impl(context):
val = context.formatter.format(5.1, "points")
prefix, separator, suffix, units = val
nose.tools.assert_equal(prefix, "5")
nose.tools.assert_equal(separator, ".")
nose.tools.assert_equal(suffix, "1")
nose.tools.assert_equal(units, "pt")
@given(u'an instance of toyplot.format.CurrencyFormatter')
def step_impl(context):
context.formatter = toyplot.format.CurrencyFormatter(curr="cad")
@then(
u'formatting Canadian currency with the toyplot.format.CurrencyFormatter should produce valid output')
def step_impl(context):
codes, prefix, dp, suffix = context.formatter.format(100.00)
nose.tools.assert_equal(codes, "$")
nose.tools.assert_equal(prefix, "100")
nose.tools.assert_equal(dp, ".")
nose.tools.assert_equal(suffix, "00")
@then(
u'formatting European currency with the toyplot.format.CurrencyFormatter should produce valid output')
def step_impl(context):
context.formatter = toyplot.format.CurrencyFormatter(curr="eur")
val = context.formatter.format(9000.56)
codes, prefix, dp, suffix = val
nose.tools.assert_equal(codes, "€")
nose.tools.assert_equal(prefix, "9,000")
nose.tools.assert_equal(dp, ".")
nose.tools.assert_equal(suffix, "56")
@then(
u'formatting British currency with the toyplot.format.CurrencyFormatter should produce valid output')
def step_impl(context):
context.formatter = toyplot.format.CurrencyFormatter(curr="gbp")
val = context.formatter.format(23423410.5)
codes, prefix, dp, suffix = val
nose.tools.assert_equal(codes, "£")
nose.tools.assert_equal(prefix, "23,423,410")
nose.tools.assert_equal(dp, ".")
nose.tools.assert_equal(suffix, "50")
|
996,756 | 366610851e66fdef07ab80dcd7ee22969df0eaa4 | # Create your models here.
from django.db import models
class serverconnection(models.Model):
PersonID=models.CharField(max_length=255)
Nameofsurvey=models.CharField(max_length=255)
FormContent=models.CharField(max_length=255)
Formattrndvalues=models.CharField(max_length=255)
class responsemodel(models.Model):
PersonID=models.IntegerField()
Formid=models.IntegerField()
Nameofsurvey=models.CharField(max_length=255)
Formattrndvalues=models.CharField(max_length=255)
class registeration(models.Model):
Position=models.CharField(max_length=255)
Email=models.CharField(max_length=255)
Password=models.CharField(max_length=255)
|
996,757 | a526e1ad8d0b90d9a6dbf04ac7ce3489c2c6af8a | import os.path as osp
import mmcv
from mmcv.runner.utils import obj_from_dict
from mmcv.runner.hooks import (Hook, LrUpdaterHook, CheckpointHook, IterTimerHook,
OptimizerHook, lr_updater)
from mmcv_custom import lr_updater as lr_updater_custom
from mmcv.runner.checkpoint import save_checkpoint
import torch
from .parameters import parameters
class Runner(mmcv.runner.Runner):
"""A training helper for PyTorch.
Custom version of mmcv runner, overwrite init_optimizer method
"""
def init_optimizer(self, optimizer):
"""Init the optimizer.
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an
optimizer object or a dict used for constructing the optimizer.
Returns:
:obj:`~torch.optim.Optimizer`: An optimizer object.
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD'>
"""
if isinstance(optimizer, dict):
optimizer = obj_from_dict(
optimizer, torch.optim, dict(params=parameters(self.model, optimizer.lr)))
elif not isinstance(optimizer, torch.optim.Optimizer):
raise TypeError(
'optimizer must be either an Optimizer object or a dict, '
'but got {}'.format(type(optimizer)))
return optimizer
def resume(self, checkpoint, resume_optimizer=True,
map_location='default'):
if map_location == 'default':
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(
checkpoint,
map_location=lambda storage, loc: storage.cuda(device_id))
else:
checkpoint = self.load_checkpoint(
checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
if 'optimizer' in checkpoint and resume_optimizer:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
def auto_resume(self):
linkname = osp.join(self.work_dir, 'latest.pth')
if osp.exists(linkname):
self.logger.info('latest checkpoint found')
self.resume(linkname)
def register_lr_hooks(self, lr_config):
if isinstance(lr_config, LrUpdaterHook):
self.register_hook(lr_config)
elif isinstance(lr_config, dict):
assert 'policy' in lr_config
# from .hooks import lr_updater
hook_name = lr_config['policy'].title() + 'LrUpdaterHook'
if hasattr(lr_updater, hook_name):
hook_cls = getattr(lr_updater, hook_name)
elif hasattr(lr_updater_custom, hook_name):
hook_cls = getattr(lr_updater_custom, hook_name)
else:
raise ValueError('"{}" does not exist'.format(hook_name))
self.register_hook(hook_cls(**lr_config))
else:
raise TypeError('"lr_config" must be either a LrUpdaterHook object'
' or dict, not {}'.format(type(lr_config)))
def save_checkpoint(self,
out_dir,
filename_tmpl='epoch_{}.pth',
save_optimizer=True,
meta=None):
if meta is None:
meta = dict(epoch=self.epoch + 1, iter=self.iter)
else:
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
|
996,758 | 16f4be5cba9708a248097f3c1972bd1754a060e4 |
from PyQt4 import QtCore, QtGui
from ui_MainWindow import Ui_MainWindow
class MainWindow(QtGui.QMainwindow):
def __init__(selfself, parent = None):
self.ui = ui_MainWindow()
self.ui.setupUi(self)
|
996,759 | 8e152c01717b81d27be36dfe0be20eb63e7b1ec1 | from flask import Blueprint
main = Blueprint('main', __name__)
from app.main import views, errors
|
996,760 | b51da3ed35bcff6d743982b241da6c0892be0952 | # Elad Dolev / TomeRater Project
# ------------------------------
#
# A good practice and that is what I do now when I start using the know;edge from this Course that I apply to my work as QA, writing Scripts to
# maintain my VM and Environments, would be use each Class in a seperate File but for some reason I kept getting Errors and I decide does not worth the trouble
# of Debugging as I use this Practice already in daily Life .
class Book(object):
def __init__(self, title, isbn, price = 0.0):
self.title = title
self.isbn = isbn
self.ratings = []
self.price = price #My addition
def __hash__(self):
return hash((self.title, self.isbn))
def get_title(self):
return self.title
def get_isbn(self):
return self.isbn
def set_isbn(self, new_ISBN):
self.isbn = new_ISBN
print("The ISBN of the book {} has been updated to {}".format(self.title, self.isbn))
def get_price(self):
return self.price
def add_rating(self, rating):
if rating:
if rating > 0 and rating < 4:
self.ratings.append(rating)
else:
print("Invalid Rating")
def __eq__(self, other_book):
if self.title == other_book.title and self.isbn == other_book.isbn:
return True
else:
return False
def __repr__(self):
return self.title
def get_average_rating(self):
rtg_summ = 0
for rtg in self.ratings:
rtg_summ += rtg
if len(self.ratings) > 0:
avg_rtg = rtg_summ / len(self.ratings)
else:
avg_rtg = 0
return avg_rtg
class User(object):
def __init__(self, name, email):
self.name = name
self.email = email
self.books = {}
# Return email of this user
def get_email(self):
return self.email
# Change email of this user
def change_email(self, address):
self.email = address
print("The user {} email has been updated to {}".format(self.name, self.email))
# Representation of the user
def __repr__(self):
return ("The user: {}, with email: {}, has {} books read".format(self.name, self.email, len(self.books)))
# Test if equal is same as another email user
def __eq__(self, other_user):
if self.name == other_user.name and self.email == other_user.email:
return True
else:
return False
# Method to call when user has read a book, Rating is Optional
def read_book(self, book, rating=None):
self.books[book] = rating
# Method to get Average Rating of Books read and rated by User
def get_average_rating(self):
books_count = 0
rtg_summ = 0
for rtg in self.books.values():
if rtg:
books_count += 1
rtg_summ += rtg
avgrtg = rtg_summ / books_count
return avgrtg
class Fiction(Book):
def __init__(self, title, author, isbn, price):
super().__init__(title, isbn)
self.author = author
def get_author(self):
return self.author
def __repr__(self):
return ("{} by {}".format(self.title, self.author))
class NonFiction(Book):
def __init__(self, title, subject, level, isbn, price):
super().__init__(title, isbn, price)
self.subject = subject
self.level = level
def get_subject(self):
return self.subject
def get_level(self):
return self.level
def __repr__(self):
return ("{}, a {} manual on {} for ${price}".format(self.title, self.level, self.subject, price=self.price))
class TomeRater(object):
def validate_email(self, email):
if "@" in email and email[-4:] in [".com", ".edu", ".org", ".de"]:
return True
else:
return False
def __init__(self):
self.users = {}
self.books = {}
def __repr__(self):
return "TomeRater {} and {}".format(self.users, self.books)
def __str__(self):
return "in TomeRater users are {} and books are {}".format(self.users, self.books)
def __eq__(self, other_rater):
if self.users == other_raters.users and self.books == other_rater.books:
return True
else:
return False
def create_book(self, title, isbn, price):
new_book = Book(title, isbn)
return new_book
def create_novel(self, title, author, isbn, price):
new_novel = Fiction(title, author, isbn, price)
return new_novel
def create_non_fiction(self, title, subject, level, isbn, price):
new_nf = NonFiction(title, subject, level, isbn, price)
return new_nf
def add_book_to_user(self, book, email, rating=None):
user = self.users.get(email, None)
if user:
user.read_book(book, rating)
if book not in self.books:
self.books[book] = 0
self.books[book] += 1
book.add_rating(rating)
else:
print("No user with email " + email)
def add_user(self, name, email, user_books=None):
new_user = User(name, email)
#Workaround if all checks and pass, then proceed is set to True and User can be added
proceed = False
#I found many ways to go with Validation, after several tries I go with this Method of split"."
#Which I compare to the Valid Extension I defined before.
valid_email_extensions = ["com", "edu", "org"]
try:
ext = (email.split(".")[-1])
valid_ext = False
for item in valid_email_extensions:
if ext == item:
valid_ext = True
except:
print("Something is wrong with Email Format.")
if self.users.get(email):
print ("The User with email address {} already exists.\n".format(email))
elif "@" not in email:
print("Missing @: Email address {} is not valid. Try again. \n".format(email))
elif valid_ext == False:
print("User typed: {} - Email must end in .com, .edu or .org.\n".format(email))
else:
proceed = True
if proceed == True:
self.users.update({email: new_user})
print("User {} with email {} added successfully!\n".format(name, email))
if user_books is not None:
for book in user_books:
self.add_book_to_user(book, email, rating=None)
def get_Investment_of_user(self, user_email):
# Total Value spent by User on Books
worth = 0
user = self.users[user_email]
for book in user.books:
worth += book.price
return "Total Value owned by user: {0}: ${1:.2f}".format(user_email, worth)
def print_catalog(self):
for item in self.books:
print(item)
def print_users(self):
for user in self.users.values():
print(user)
def most_read_book(self):
mostread = None
highest_read_count = 0
for book in self.books:
value = self.books.get(book)
if value > highest_read_count:
mostread = book
highest_read_count = value
print("The most read book is: {} with {} reads!".format(mostread, highest_read_count))
return mostread #I need still to understand how to avoid here the 2nd Return of the Book Name
def highest_rated_book(self):
high_rtg = 0
high_rtd_book = None
for book in self.books:
bookavgrtg = book.get_average_rating()
if bookavgrtg > high_rtg:
high_rtg = bookavgrtg
high_rtd_book = book
return high_rtd_book
def most_positive_user(self):
high_rtg = 0
posit_user = None
for user in self.users.values():
useravgrtg = user.get_average_rating()
if useravgrtg > high_rtg:
high_rtg = useravgrtg
posit_user = user
return posit_user
def spacing(self): # Just simple Line spacing to be used in populate.py
print("")
print("")
|
996,761 | 709d2529a645ac95b66c7afe34f94b96a52bceb5 | import urllib.request
import sys
from bs4 import BeautifulSoup
def scrapeSite(url):
page = urllib.request.urlopen(url)
return BeautifulSoup(page, "html.parser")
def getCitations(data):
citRes = data.find(id='citations-count-number')
if citRes is None:
citations = 0
else :
citations = citRes.contents[0]
return citations
def getDownloads(data):
downRes = data.find(class_="article-metrics__views")
if downRes is None:
downloads = 0
else:
downloads = downRes.contents[0]
return downloads
def getKeywords(data):
keyRes = data.findAll('script')[2]
if keyRes is None:
keywords = "None"
else :
contents = keyRes.contents[0]
startIndex = contents.find('Keywords') + len('Keywords')
if (startIndex == 7):
keywords = "None"
else:
endIndex = contents.find('Country')
keywords = contents[startIndex+3:endIndex-1]
keywords = keywords.rstrip()
keywords = keywords[0:len(keywords)-2]
return keywords
def getJournalData(journalLink):
data = scrapeSite(journalLink)
imprData = (data.find(class_="ListStack ListStack--float")).findAll('span')
label = str(imprData[0].contents[0])
if label is "Impact Factor":
impactRes = float(imprData[1].contents[0])
else:
impactRes = 1.0
return impactRes
|
996,762 | a1674cc2220b7ee90d5c3e6cba8994e5c3cbc00e | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 12:49:18 2020
@author: Win
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import warnings
import matplotlib.gridspec as gridspec
import itertools
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
warnings.filterwarnings("ignore")
# import dataseta
xls = pd.ExcelFile("./Dataset_modified.xlsx")
# mapiranje sheet-a u map
sheet_to_df_map = {}
for sheet_name in xls.sheet_names:
sheet_to_df_map[sheet_name] = xls.parse(sheet_name, skiprows=2, index_col=None)
print ('dolazi do skipa')
sheet_to_df_map['rel.vlaznost'].fillna(0, inplace = True ) # zamijenimo NaN sa nulama
# uzimamo svaku trecu godinu za podatke
print ('dolazi do zamjene nan-a sa nulama')
X = sheet_to_df_map['rel.vlaznost'].iloc[:, 2:15].values
print ('dolazi do pakovanja X-a')
# uzimamo prvih 15 redova
Y = sheet_to_df_map['rel.vlaznost'].iloc[:, 14].values
print ('dolazi pakovanja Y-a')
for num in X:
print(num)
print("KRAJ IKSA")
for num in Y:
print(num)
print("KRAJ IPSILONA")
#Xmultiplied = []
#Ymultiplied = []
#for num in X:
# Xmultiplied.append( num * 10 )
#for iks in Xmultiplied:
# print(iks)
# print("KRAJ REDA")
#for num in Y:
# Ymultiplied.append (num * 10 )
#for iks in Ymultiplied:
# print(iks)
# print("KRAJ REDA IPSILON")
clf1 = LogisticRegression(random_state = 1)
clf2 = RandomForestClassifier(random_state = 1)
clf3 = GaussianNB()
clf4 = MultinomialNB()
print ('2-fold cross validation: \n')
labels = ['Logistic regression', 'Random forest', 'Gausian Naive Bayes', 'Multinomial Naive Bayes']
print ('prolazi labele')
for clf, label in zip([clf1, clf2, clf3, clf4], labels):
print ('upada u petlju')
scores = model_selection.cross_val_score(clf, X, Y, cv = 2, scoring = 'accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
# std standard deviation along the specified axis
#hard voting algoritam
voting_clf_hard = VotingClassifier(estimators = [(labels[0], clf1),
(labels[1], clf2),
(labels[2],clf3)], voting = 'hard')
#soft voting algoritam
voting_clf_soft = VotingClassifier(estimators = [(labels[0], clf1),
(labels[1], clf2),
(labels[2],clf3)], voting = 'soft')
labels_new = ['Logistic Regression', 'Random forest', 'Naive bayes', 'Voting Classifier Hard', 'Voting Classifier Soft']
for (clf, label) in zip([clf1,clf2,clf3, voting_clf_hard,voting_clf_soft], labels_new):
scores = model_selection.cross_val_score(clf, X, Y, cv = 2, scoring = 'accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
clf1.fit(X, Y)
clf2.fit(X, Y)
clf3.fit(X, Y)
clf4.fit(X, Y)
XT = X[:20]
print("stize do prije plott-a")
plt.figure()
print("stize posle figure")
plt.plot(clf1.predict(XT), 'gd' , label = 'LogisticRegression')
plt.plot(clf2.predict(XT), 'b^' , label = 'RandomForest')
plt.plot(clf3.predict(XT), 'ys' , label = 'GausianNB')
plt.plot(clf4.predict(XT), 'r*' , label = 'MultinomialNB')
plt.tick_params(axis='x', which='both', bottom=False, top=False,
labelbottom=False)
plt.ylabel('predicted')
plt.xlabel('training samples')
plt.legend(loc="best")
plt.title('Poredjivanje individualnih predikcija sa prosjekom')
plt.show() |
996,763 | 139e83208185a8ca1104d767202cf454d3816062 | from PIL import Image, ImageFilter
from os import walk
mypath = "C:\\Users\\visva\\Desktop\\hand maching\\handPos5"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
f.extend(filenames)
for i in f:
image = Image.open("C:\\Users\\visva\\Desktop\\hand maching\\handPos5\\"+ i)
gray = image.convert('L')
imageQuality = 5
gray.save(i, quality = int(imageQuality)) |
996,764 | 5ae066359b0dcc40255f5eb33408efcb40a87795 | from time import time
from django.http import HttpResponseRedirect
from django.utils.deprecation import MiddlewareMixin
from django.core.urlresolvers import reverse
import re
from users.models import UserModel, TicketModel
# class UserMiddleware(MiddlewareMixin):
#
# def process_request(self, request):
#
# path = request.path
# ticket = request.COOKIES.get('ticket')
# if (path == '/mine/mine/' or path == '/quickbuy/market/') and not request.user.id and not ticket:
# return None
#
# re_patterns = (r'^/user/*', r'^/homepage/*', r'^/quickbuy/market/.+')
#
# for patt in re_patterns:
# if re.match(patt, path):
# return None
#
# if not ticket:
# return HttpResponseRedirect(reverse('user:login', kwargs={'r_path': path}))
#
# tm = TicketModel.objects.filter(ticket=ticket)
# if tm:
# if tm.first().create_time + 3600 < int(time()):
# return HttpResponseRedirect(reverse('user:login', kwargs={'r_path': path}))
# else:
# return HttpResponseRedirect(reverse('user:login', kwargs={'r_path': path}))
#
# request.user = tm.first().user
class UserMiddleware(MiddlewareMixin):
def process_request(self, request):
path = request.path
ticket = request.COOKIES.get('ticket')
if not ticket:
pass
else:
tm = TicketModel.objects.filter(ticket=ticket)
if tm:
if tm.first().create_time + 3600 < int(time()):
pass
else:
request.user = tm[0].user
|
996,765 | c9b9a0e14af74c648545477b5f1c0995a8d97b79 | """
Utility functions for various tasks.
"""
from warnings import warn
from json import JSONEncoder
from typing import List
from math import inf
import numpy as np
_user_element_to_Z = {}
_user_Z_to_element = {}
def inject_user_definition(element: str, Z: int):
"""Allow user-defined element. The definition
will override the default ones from the periodic table.
Example:
>>> import flare.utils
>>> import flare.utils.element_coder as ec
>>> ec.inject_user_definition('C1', 6)
>>> ec.inject_user_definition('C2', 7)
>>> ec.inject_user_definition('H1', 1)
>>> ec.inject_user_definition('H2', 2)
This block should be executed before any other
flare modules are imported. And user has to
be very careful to not let Z overlap with other
elements in the system
:param element: string symbol of the element
:type element: str
:param Z: corresponding Z
:type Z: int
"""
_user_element_to_Z[element] = Z
_user_Z_to_element[Z] = element
# Dictionary mapping elements to their atomic number (Z)
_element_to_Z = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
"Na": 11,
"Mg": 12,
"Al": 13,
"Si": 14,
"P": 15,
"S": 16,
"Cl": 17,
"Ar": 18,
"K": 19,
"Ca": 20,
"Sc": 21,
"Ti": 22,
"V": 23,
"Cr": 24,
"Mn": 25,
"Fe": 26,
"Co": 27,
"Ni": 28,
"Cu": 29,
"Zn": 30,
"Ga": 31,
"Ge": 32,
"As": 33,
"Se": 34,
"Br": 35,
"Kr": 36,
"Rb": 37,
"Sr": 38,
"Y": 39,
"Zr": 40,
"Nb": 41,
"Mo": 42,
"Tc": 43,
"Ru": 44,
"Rh": 45,
"Pd": 46,
"Ag": 47,
"Cd": 48,
"In": 49,
"Sn": 50,
"Sb": 51,
"Te": 52,
"I": 53,
"Xe": 54,
"Cs": 55,
"Ba": 56,
"La": 57,
"Ce": 58,
"Pr": 59,
"Nd": 60,
"Pm": 61,
"Sm": 62,
"Eu": 63,
"Gd": 64,
"Tb": 65,
"Dy": 66,
"Ho": 67,
"Er": 68,
"Tm": 69,
"Yb": 70,
"Lu": 71,
"Hf": 72,
"Ta": 73,
"W": 74,
"Re": 75,
"Os": 76,
"Ir": 77,
"Pt": 78,
"Au": 79,
"Hg": 80,
"Tl": 81,
"Pb": 82,
"Bi": 83,
"Po": 84,
"At": 85,
"Rn": 86,
"Fr": 87,
"Ra": 88,
"Ac": 89,
"Th": 90,
"Pa": 91,
"U": 92,
"Np": 93,
"Pu": 94,
"Am": 95,
"Cm": 96,
"Bk": 97,
"Cf": 98,
"Es": 99,
"Fm": 100,
"Md": 101,
"No": 102,
"Lr": 103,
"Rf": 104,
"Db": 105,
"Sg": 106,
"Bh": 107,
"Hs": 108,
"Mt": 109,
"Ds": 110,
"Rg": 111,
"Cn": 112,
"Nh": 113,
"Fl": 114,
"Mc": 115,
"Lv": 116,
"Ts": 117,
"Og": 118,
}
# Define inverse mapping
_Z_to_element = {z: elt for elt, z in _element_to_Z.items()}
def element_to_Z(element: str) -> int:
"""
Returns the atomic number Z associated with an elements 1-2 letter name.
Returns the same integer if an integer is passed in.
:param element:
:return:
"""
if element in _user_element_to_Z:
return _user_element_to_Z[element]
# If already integer, do nothing
if isinstance(element, (int, np.integer)):
return element
if type(element).__module__ == "numpy" and np.issubdtype(type(element), np.integer):
return element
# If a string-casted integer, do nothing
if isinstance(element, str) and element.isnumeric():
return int(element)
# Check that a valid element was passed in then return
if _element_to_Z.get(element, None) is None:
warn(
f"Element as specified not found in list of element-Z mappings. "
f"If you would like to specify a custom element, use an integer"
f" of your choosing instead. Setting element {{element}} to intege"
f"r 0"
)
return _element_to_Z.get(element, 0)
class NumpyEncoder(JSONEncoder):
"""
Special json encoder for numpy types for serialization
use as
json.loads(... cls = NumpyEncoder)
or:
json.dumps(... cls = NumpyEncoder)
Thanks to StackOverflow users karlB and fnunnari, who contributed this from:
`https://stackoverflow.com/a/47626762`
"""
def default(self, obj):
""""""
if isinstance(
obj,
(
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
return JSONEncoder.default(self, obj)
def Z_to_element(Z: int) -> str:
"""
Maps atomic numbers Z to element name, e.g. 1->"H".
:param Z: Atomic number corresponding to element.
:return: One or two-letter name of element.
"""
if Z in _user_Z_to_element:
return _user_Z_to_element[Z]
# Check proper formatting
if isinstance(Z, str):
if Z.isnumeric():
Z = int(Z)
else:
raise ValueError("Input Z is not a number. It should be an integer")
return _Z_to_element[Z]
_Z_to_mass = {
1: 1.0079,
2: 4.0026,
3: 6.941,
4: 9.0122,
5: 10.811,
6: 12.0107,
7: 14.0067,
8: 15.9994,
9: 18.9984,
10: 20.1797,
11: 22.9897,
12: 24.305,
13: 26.9815,
14: 28.0855,
15: 30.9738,
16: 32.065,
17: 35.453,
19: 39.0983,
18: 39.948,
20: 40.078,
21: 44.9559,
22: 47.867,
23: 50.9415,
24: 51.9961,
25: 54.938,
26: 55.845,
28: 58.6934,
27: 58.9332,
29: 63.546,
30: 65.39,
31: 69.723,
32: 72.64,
33: 74.9216,
34: 78.96,
35: 79.904,
36: 83.8,
37: 85.4678,
38: 87.62,
39: 88.9059,
40: 91.224,
41: 92.9064,
42: 95.94,
43: 98,
44: 101.07,
45: 102.9055,
46: 106.42,
47: 107.8682,
48: 112.411,
49: 114.818,
50: 118.71,
51: 121.76,
53: 126.9045,
52: 127.6,
54: 131.293,
55: 132.9055,
56: 137.327,
57: 138.9055,
58: 140.116,
59: 140.9077,
60: 144.24,
61: 145,
62: 150.36,
63: 151.964,
64: 157.25,
65: 158.9253,
66: 162.5,
67: 164.9303,
68: 167.259,
69: 168.9342,
70: 173.04,
71: 174.967,
72: 178.49,
73: 180.9479,
74: 183.84,
75: 186.207,
76: 190.23,
77: 192.217,
78: 195.078,
79: 196.9665,
80: 200.59,
81: 204.3833,
82: 207.2,
83: 208.9804,
84: 209,
85: 210,
86: 222,
87: 223,
88: 226,
89: 227,
91: 231.0359,
90: 232.0381,
93: 237,
92: 238.0289,
95: 243,
94: 244,
96: 247,
97: 247,
98: 251,
99: 252,
100: 257,
101: 258,
102: 259,
104: 261,
103: 262,
105: 262,
107: 264,
106: 266,
109: 268,
111: 272,
108: 277,
}
|
996,766 | 003baea96dbce6430bb5dd04f28c0f2a2d8cfaad | # 20.1 1부터 100까지 숫자 출력하기 ------------------------------------------------------------
# FizzBuzz는 매우 간단한 프로그래밍 문제이며 규칙은 다음과 같습니다.
# 1에서 100까지 출력
# 3의 배수는 Fizz 출력
# 5의 배수는 Buzz 출력
# 3과 5의 공배수는 FizzBuzz 출력
for i in range(1, 101):
if i % 15 == 0:
print('FizzBuzz')
elif i % 3 == 0:
print('Fizz')
elif i % 5 == 0:
print('Buzz')
else:
print(i)
# 코드 단축하기 ***
for i in range(1, 101):
print('Fizz' * (i % 3 == 0) + 'Buzz' * (i % 5 == 0) or i)
# 문자열 곱셈과 덧셈을 이용하여 print 안에서 처리
# 'Fizz' * True
# 20.7 연습문제 : 2와 11의 배수, 공배수 처리하기 -------------------------------------------
for i in range(1, 101):
if i % 22 == 0:
print('FizzBuzz')
elif i % 2 == 0:
print('Fizz')
elif i % 11 == 0:
print('Buzz')
else:
print(i)
# 코드 단순화
for i in range(1, 101):
print('Fizz' * (i & 2 == 0) + 'Buzz' * (i % 11 == 0) or i)
# 20.8 심사문제 ----------------------------------------------------------------------------
# 표준 입력으로 정수 두 개가 입력됩니다
# (첫 번째 입력 값의 범위는 1~1000, 두 번째 입력 값의 범위는 10~1000이며
# 첫 번째 입력 값은 두 번째 입력 값보다 항상 작습니다).
# 첫 번째 정수부터 두 번째 정수까지 숫자를 출력하면서
# 5의 배수일 때는 'Fizz', 7의 배수일 때는 'Buzz',
# 5와 7의 공배수일 때는 'FizzBuzz'를 출력하는 프로그램을 만드세요(input에서 안내 문자열은 출력하지 않아야 합니다).
start, stop = map(int, input().split())
for i in range(start, stop+1):
if i % 35 == 0:
print('FizzBuzz')
elif i % 5 == 0:
print('Fizz')
elif i % 7 ==0:
print('Buzz')
else:
print(i)
# 코드 단순화
for i in range(start, stop+1):
print('Fizz' * (i % 5 == 0) + 'Buzz' * (i % 7 == 0) or i)
|
996,767 | 363e5b39dacfc0c1a8ba5df3b063722846f5fd99 | # -*- coding: utf-8 -*-
'''
Created on 20170107
@author: yangweijia
'''
from base import Page
import time
class Element(Page):
def go_to_url(self,url):
self.driver.get(url)
time.sleep(2)
def max_wind(self):
self.driver.maximize_window()
time.sleep(1)
|
996,768 | 8633d9811248b0ec06fe7ee3315ed88c5c001111 | import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
get_ipython().run_line_magic('matplotlib', 'inline')
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes=load_dataset()
X_train=X_train_orig/255
X_test=X_test_orig/255
Y_train=Y_train_orig.T
Y_test=Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input=Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X=ZeroPadding2D((3,3))(X_input)
# CONV -> BN -> RELU Block applied to X
X=Conv2D(32,(7,7),strides=(1,1),name="conv0")(X)
X=BatchNormalization(axis=3,name="bn0")(X)
X=Activation("relu")(X)
#maxpool
X=MaxPooling2D((2,2),name="max_pool")(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X=Flatten()(X)
X=Dense(1,activation="sigmoid",name="fc")(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model=Model(inputs=X_input,outputs=X,name="HappyModel")
return model
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input=Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X=ZeroPadding2D((3,3))(X_input)
# CONV -> BN -> RELU Block applied to X
X=Conv2D(32,(7,7),strides=(1,1),name="conv0")(X)
X=BatchNormalization(axis=3,name="bn0")(X)
X=Activation("relu")(X)
#maxpool
X=MaxPooling2D((2,2),name="max_pool")(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X=Flatten()(X)
X=Dense(1,activation="sigmoid",name="fc")(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model=Model(inputs=X_input,outputs=X,name="HappyModel")
return model
happyModel=HappyModel(X_train.shape[1:])
happyModel.compile("adam","binary_crossentropy",metrics=["accuracy"])
happyModel.fit(X_train,Y_train,batch_size=50,epochs=40)
preds=happyModel.evaluate(X_test,Y_test,batch_size=32,verbose=1,sample_weight=None) # Verbosity mode. 0 = silent, 1=progress bar.
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
img_path = 'images/Test_1.jpg'
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
|
996,769 | 7c01992883e745c761df6e7fe402f61cb8ca3942 | # audio-offset-finder
#
# Copyright (c) 2014-22 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import os
import types
from importlib.machinery import ModuleSpec, SourceFileLoader
from importlib.util import spec_from_loader, module_from_spec
from unittest.mock import patch
from io import StringIO
import tempfile
# Function to import code from a file
def import_from_source(name: str, file_path: str) -> types.ModuleType:
loader: SourceFileLoader = SourceFileLoader(name, file_path)
spec: ModuleSpec = spec_from_loader(loader.name, loader)
module: types.ModuleType = module_from_spec(spec)
loader.exec_module(module)
return module
script_path: str = os.path.abspath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"bin",
"audio-offset-finder",
)
)
tool: types.ModuleType = import_from_source("audio-offset-finder", script_path)
def test_reorder_correlations():
input_array1 = np.array([0, 1, 2, 3])
np.testing.assert_array_equal(tool.reorder_correlations(input_array1), np.array([2, 3, 0, 1]))
input_array2 = np.array([0, 1, 2, 3, 4])
np.testing.assert_array_equal(tool.reorder_correlations(input_array2), np.array([2, 3, 4, 0, 1]))
def test_tool():
temp_dir = tempfile.TemporaryDirectory()
plot_file_path = os.path.join(temp_dir.name, "zzz.png")
args1 = (
"--find-offset-of tests/audio/timbl_2.mp3 --within tests/audio/timbl_1.mp3 --resolution 160 " "--trim 35 --save-plot "
) + plot_file_path
with patch("sys.stdout", new=StringIO()) as fakeStdout:
tool.main(args1.split())
output = fakeStdout.getvalue().strip()
assert output, "audio_offset_finder did not produce any output"
assert "Offset: 12.26" in output
assert "score: 28" in output # Different FFmpeg versions can slightly alter this value, so don't be too strict
assert os.path.isfile(plot_file_path), "audio_offset_finder did not create a plot file"
temp_dir.cleanup()
args2 = "--find-offset-of tests/audio/timbl_2.mp3"
with pytest.raises(SystemExit) as error:
tool.main(args2.split())
assert error.type == SystemExit
assert error.value.code > 0, "missing 'within' file"
args3 = "--within tests/audio/timbl_1.mp3"
with pytest.raises(SystemExit) as error:
tool.main(args3.split())
assert error.type == SystemExit
assert error.value.code > 0, "missing 'offset-of' file"
def test_json():
import json
args = "--find-offset-of tests/audio/timbl_2.mp3 --within tests/audio/timbl_1.mp3 --resolution 160 " "--trim 35 --json"
with patch("sys.stdout", new=StringIO()) as fakeStdout:
tool.main(args.split())
output = fakeStdout.getvalue().strip()
json_array = json.loads(output)
assert len(json_array) == 2
assert pytest.approx(json_array["time_offset"]) == 12.26
assert pytest.approx(json_array["standard_score"], rel=1e-2) == 28.99
|
996,770 | cb9daff6c5281b49231ba7a044ef801818242ed9 | from django.urls import path
from news.views import NewsTemplateView
urlpatterns = [
]
|
996,771 | b0e3ef491666dfe0fa09c890042da41b18178840 | import datetime
print("datetime library imported from:", datetime.__file__)
import ipaddress
print("ipaddress library imported from:", ipaddress.__file__)
import sys
from pprint import pprint
print("Python's sys.path:")
pprint(sys.path)
|
996,772 | 08b2f51f266a8e50aa1e488d478f53afbf04079d | from pokemons.core.dao import GenericDAO
from pokemon.models import Stat
class StatDAO(GenericDAO):
def __init__(self):
super().__init__()
self.model = Stat
|
996,773 | 0f380b4a55dce29484856ab5cafd5f4bb7142766 | import re
from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Post, Tag, Comment
class AuthorSerializer(serializers.ModelSerializer):
avatar_url = serializers.SerializerMethodField("avatar_url_field")
def avatar_url_field(self, author):
if re.match(r"^https?://", author.avatar_url):
return author.avatar_url
if "request" in self.context:
scheme = self.context["request"].scheme
host = self.context["request"].get_host()
return scheme + "://" + host + author.avatar_url
class Meta:
model = get_user_model()
fields = ["username", "name", "avatar_url"]
class PostSerializer(serializers.ModelSerializer):
author = AuthorSerializer(read_only=True)
is_like = serializers.SerializerMethodField("is_like_field")
def is_like_field(self, post):
if "request" in self.context:
user = self.context["request"].user
return post.like_user_set.filter(pk=user.pk).exists()
return False
class Meta:
model = Post
fields = ['id', 'author', 'created_at', 'photo', 'caption', 'location', 'tag_set', 'is_like']
class CommentSerializer(serializers.ModelSerializer):
author = AuthorSerializer(read_only=True)
class Meta:
model = Comment
fields = ["id", "author", "message", "created_at"]
|
996,774 | b4ae3806f2730f11766fdedab7935c6b92023468 | import io
import os
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
if __name__ == '__main__':
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'REPLACE WITH YOUR GCP SERVICE ACCOUNT KEY'
client = speech.SpeechClient()
for i in range(1, 31):
filename = os.path.join("audio_flac", "%d.flac" % i)
with io.open(filename, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.FLAC,
sample_rate_hertz=16000,
language_code='en-US'
)
print("===Results for %d.flac===" % i)
response = client.recognize(config, audio)
for result in response.results:
print('Transcript: {}'.format(result.alternatives[0].transcript))
|
996,775 | 809699400c0ac663dc2e648da138bad312c3d11a | """
818. Race Car
https://leetcode.com/problems/race-car/
"""
class Solution:
def minFlipsMonoIncr(self, S: str) -> int:
if not S:
return 0
ones = 0
flip = 0
for ch in S:
if ch == '0':
if ones != 0:
flip += 1
else:
continue
else:
ones += 1
# flip = min(flip, ones)
if flip > ones:
flip = ones
return flip
"""
left[i] : number of min flips -> s[0] ~ s[i] all 0
right[i] : number of min flips -> s[i] ~ s[n-1] all 1
res = min(left[i-1] + right[i], left[n-1], right[0])
if s[i] == '1':
left[i] = left[i-1] + 1
if s[i] == '0':
right[i] = right[i+1] + 1
"""
class SolutionDP:
def minFlipsMonoIncr(self, S: str) -> int:
dp0 = [0] # cnts to make S[:i+1] valid and ending with '0' after flip
dp1 = [0] # cnts to make S[:i+1] valid and ending with '1' after flip
for c in S:
if c is '0':
dp0.append(dp0[-1])
dp1.append(min(dp0[-2], dp1[-1]) + 1)
elif c is '1':
dp0.append(dp0[-1] + 1)
dp1.append(min(dp0[-2], dp1[-1]))
return min(dp0[-1], dp1[-1])
|
996,776 | b59c9b27ec2b4d826b754444c39daee83dca0b2c | nums = [
[1, 2, 3],
[4, 5, 6],
]
# Объединение списков
joined = sum(nums, [])
print(joined)
# Удалить дубликаты в списке (не оптимальное решение с точки зрения производительности)
unique = [1, 2, 3, 3, 5, 5]
unique = list(set(unique))
print(unique)
# Рокировка значений переменных
a, b = 10, 20
print(f'{a} {b}')
a, b = b, a
print(f'{a} {b}')
# Частотный поиск в списке
total = [1, 2, 3, 3, 5, 5, 5]
print(
max(
set(total),
key=total.count
)
)
# Вывод списка с распаковкой
print(*total, end='', sep=',')
|
996,777 | 809264e068ed0487858c977e22fa92c89fd29d63 | from abc import ABCMeta, abstractmethod, abstractproperty
class Ferramenta(metaclass = ABCMeta):
@property
def nome(self):
pass
@nome.setter
@abstractmethod
def nome(self,valor):
pass
@property
def tensao(self):
pass
@tensao.setter
@abstractmethod
def tensao(self,valor):
pass
@property
def preco(self):
pass
@preco.setter
@abstractmethod
def preco(self,valor):
pass
def getInformacoes(self):
print(f"Nome: {self.nome}")
print(f"Tensão: {self.tensao}")
print(f"Preço: R$ {self.preco}")
@abstractmethod
def Cadastrar(self):
pass |
996,778 | a7dda714bca408b2aa9fa6959cc669a8aa4afc3f | from django.db import models
from django_countries.fields import CountryField
from django.contrib.auth.models import AbstractUser
class CustomUser(AbstractUser):
email = models.EmailField(max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False) # Instructor
# Email will be treated as unique identifier instead of 'username'
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
def __str__(self):
return f'{self.first_name} {self.last_name}'
class Profile(models.Model):
user = models.OneToOneField(CustomUser,
on_delete=models.CASCADE)
date_of_birth = models.DateField(blank=True, null=True)
country = CountryField(blank_label='Where are you from?', default='IE')
photo = models.ImageField(upload_to='profile_pic/%Y/%M/%d', blank=True)
bio = models.CharField(max_length=200, default='') # A short description
def __str__(self):
return f'Profile of {self.user.username}'
|
996,779 | 44deaccbbe9a222ae75149cba92e345033e40fb7 | DRIVER = None
base_url = 'https://demo.opencart.com/'
urls = {
'main_page': f'{base_url}',
'login_page': f'{base_url}index.php?route=account/login',
'admin_login_page': f'{base_url}admin',
'catalog_page': f'{base_url}index.php?route=product/category&path=20',
'product_page': f'{base_url}index.php?route=product/product&path=57&product_id=49'
}
|
996,780 | f8c8969d14fb7de0702a29dffbbcba1d6f882847 | # import requests
# import requests.auth
# from config import *
# import json
#
# client_auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
# post_data = {"grant_type": "password", "username": username, "password": password}
# headers = {"User-Agent": "DictionaryBot/0.1 by thewhiteone3"}
# response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers)
# response.json()
# res = json.loads(response.text)
# token = res['access_token']
# print(token)
import praw
from config import *
from praw.models import MoreComments
r = praw.Reddit(client_id=client_id,
client_secret=client_secret,
password=password,
user_agent=user_agent,
username=username)
# print(reddit.user.me())
posts = r.subreddit('science').top(limit=100)
for x in posts:
print(x.score, x.title)
# submission = r.submission(id='3g1jfi')
#
# for top_level_comment in submission.comments:
# if isinstance(top_level_comment, MoreComments):
# continue
# print(top_level_comment.body) |
996,781 | 8ceef62f07aaadc6b97b912aec3577c1faacb3cb | from rest_framework import serializers
from item.models import Item
from category.serializers import CategorySerializer
from user.serializers import UserDetailSerializer
from datetime import datetime
class ItemDetailSerializer(serializers.ModelSerializer):
category = CategorySerializer()
user = UserDetailSerializer()
created_date = serializers.SerializerMethodField()
class Meta:
model = Item
fields = ['id', 'title', 'description',
'category', 'user', 'created_date']
def get_created_date(self, obj):
return datetime.strftime(obj.created_date, "%b %d %Y %H:%M:%S")
class ItemSerializer(serializers.ModelSerializer):
created_date = serializers.SerializerMethodField()
class Meta:
model = Item
fields = ['title', 'user', 'category', 'created_date']
def get_created_date(self, obj):
return datetime.strftime(obj.created_date, "%b %d %Y %H:%M:%S")
|
996,782 | b7de3c955d73ef19aabc22751328a49871b7f42c | import tkinter as tk
window = tk.Tk()
window.title('My radio')
window.geometry('400x300')
var1 = tk.StringVar()
l = tk.Label(window,bg='yellow',width=20,height=2,text='Empty')
l.pack()
def radio_command():
l.config(text='you have selected ' + var1.get())
return
r1 = tk.Radiobutton(window,text='Optiion A',variable=var1,value='A',command=radio_command)
r1.pack()
r2 = tk.Radiobutton(window,text='Optiion B',variable=var1,value='B',command=radio_command)
r2.pack()
r3 = tk.Radiobutton(window,text='Optiion C',variable=var1,value='C',command=radio_command)
r3.pack()
tk.Radiobutton(window,text='Optiion D',variable=var1,value='D',indicatoron=0,command=radio_command).pack()
tk.Radiobutton(window,text='Optiion E',variable=var1,value='E',indicatoron=0,command=radio_command).pack()
window.mainloop() |
996,783 | 449da5bccdc59e0d5300f60417006efea07967a2 | import ctypes
import pandas
import logging
import numpy as np
import array
logger = logging.getLogger(__name__)
def show_error_window(error_message, window_name=u"Ошибка"):
ctypes.windll.user32.MessageBoxW(
0, error_message, window_name, 0)
def write_to_excel(collection):
df = pandas.DataFrame(np.array(collection), columns=[
"Поз.", "Обозначение", "Наименование", "Кол.", "Объем ед. м3", "Примечание"])
try:
df.to_excel('./ppt_excel_template.xlsx',
sheet_name='Расскладка', index=False)
except Exception:
show_error_window(
u"Ошибка при записи данных в Excel, возможно вы забыли закрыть рабочий файл.")
logger.debug(
"Ошибка при записи данных в Excel, возможно вы забыли закрыть рабочий файл.")
def get_selected(doc, text="Выберете объект"):
doc.Utility.prompt(text)
try:
doc.SelectionSets.Item("SS1").Delete()
except Exception:
logger.debug('Delete selection failed')
selected = doc.SelectionSets.Add("SS1")
selected.SelectOnScreen()
return selected
def get_coordinates_of_item(coordinates_tuple):
x_coordinates_list = []
y_coordinates_list = []
for index in range(len(coordinates_tuple)):
if index % 2 == 0:
x_coordinates_list.append(coordinates_tuple[index])
else:
y_coordinates_list.append(coordinates_tuple[index])
max_x = max(x_coordinates_list)
min_x = min(x_coordinates_list)
max_y = max(y_coordinates_list)
min_y = min(y_coordinates_list)
return {
"min_x": min_x,
"max_x": max_x,
"min_y": min_y,
"max_y": max_y,
}
def add_name_item_to_model(doc, coordinates_tuple, name, text_height=3):
coordinates = get_coordinates_of_item(coordinates_tuple)
x = ((coordinates["min_x"] + coordinates["max_x"]) / 2) - (text_height / 2)
y = ((coordinates["min_y"] + coordinates["max_y"]) / 2) - (text_height / 2)
insertion_point = array.array('d', [x, y, 0.0])
doc.ModelSpace.AddText(name, insertion_point, text_height)
|
996,784 | 78df22d812d8430bfb1148f6d43f531b1f0ad2f3 | h,w = map(int,input().split())
s = [input() for _ in range(h)]
dp = [[float('inf')]*w for i in range(h)]
dp[0][0] = 1 if s[0][0]=='#' else 0
for x in range(h):
for y in range(w):
if x+1<h:
a = 1 if s[x][y]=='.' and s[x+1][y]=='#' else 0
dp[x+1][y] = min(dp[x][y]+a,dp[x+1][y])
if y+1<w:
a = 1 if s[x][y]=='.' and s[x][y+1]=='#' else 0
dp[x][y+1] = min(dp[x][y]+a,dp[x][y+1])
print(dp[h-1][w-1])
|
996,785 | ad0913464d3675576187cc7ba9c73306d9b26711 | class NestedIterator(object):
def __init__(self, nestedList):
self.array = self.flatten(nestedList)
self.i = 0
self.length = len(self.array)
def flatten(self, nestedList):
ans = []
for item in nestedList:
if item.isInteger():
ans.append(item.getInteger())
else:
ans += self.flatten(item.getList())
return ans
def next(self):
"""
:rtype: int
"""
num = self.array[self.i]
self.i += 1
return num
def hasNext(self):
"""
:rtype: bool
"""
return self.i < self.length |
996,786 | 97246c274dd4de098fee5ddc9f00cd699bd9cf87 | from typing import Tuple
from .optimizer import _params_t, Optimizer
class Rprop(Optimizer):
def __init__(self, params: _params_t, lr: float=..., etas: Tuple[float, float]=..., step_sizes: Tuple[float, float]=...) -> None: ...
|
996,787 | 33d0c749d983fd4eb1120c59d25f86d6806535a1 | from django.shortcuts import render, get_object_or_404
from korpa.forms import KorpaDodajProizvodForma
from .models import Proizvod, Kategorija
from .preporuceno import Preporucivac
def proizvodi_lista(request, kategorija_slug=None):
kategorija = None
kategorije = Kategorija.objects.all()
proizvodi = Proizvod.objects.filter(dostupno=True)
if kategorija_slug:
jezik = request.LANGUAGE_CODE
kategorija = get_object_or_404(Kategorija,
translations__language_code=jezik,
translations__slug=kategorija_slug)
proizvodi = proizvodi.filter(kategorija=kategorija)
return render(request,
'prodavnica/proizvod/lista.html', {
'kategorija': kategorija,
'kategorije': kategorije,
'proizvodi': proizvodi})
def proizvod_detaljno(request, id, proizvod_slug):
jezik = request.LANGUAGE_CODE
proizvod = get_object_or_404(Proizvod,
dostupno=True,
id=id,
translations__language_code=jezik,
translations__slug=proizvod_slug)
korpa_proizvod_forma = KorpaDodajProizvodForma()
p = Preporucivac()
preporuceni_proizvodi = p.preporuci_proizvode_za([proizvod], 4)
return render(request,
'prodavnica/proizvod/detaljno.html',
{'proizvod': proizvod,
'korpa_proizvod_forma': korpa_proizvod_forma,
'preporuceni_proizvodi': preporuceni_proizvodi})
|
996,788 | 940e03f625fa48e183dd937ab2f4f36a962f69da | ##############################################################################
# Import some libraries
##############################################################################
import random
import os
import glob
import copy
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as io
import pylab as pl
import scipy.optimize as opt
import scipy.misc
from scipy.interpolate import interp1d
###############################################################################
# Define some functions
###############################################################################
# Generate holograms with first two parameters to optimise - Λ and φ
def Holo_tilt(Λ, φ, Hol_δy, Hol_δx, ϕ_min, ϕ_max):
x = np.arange(Hol_δx)
y = np.arange(Hol_δy)
[X, Y] = np.meshgrid(x, y)
θ = np.arctan((ϕ_max - ϕ_min) / Λ)
Z = np.tan(θ) * (X * np.cos(φ) + Y * np.sin(φ))
Z_mod = Z % (ϕ_max - ϕ_min - 0.00000001)
Z_mod = Z_mod * (ϕ_max - ϕ_min) / (np.max(Z_mod)) + ϕ_min
Holo_s = (Z, Z_mod)
return Holo_s
# Add sub hologram Z_mod to larger hologram (initially set to 0s)
def Add_Holo(Hol_cy, Hol_cx, Z_mod, LCOSy, LCOSx):
Holo_f = np.zeros((LCOSy, LCOSx))
(Hol_δy, Hol_δx) = np.shape(Z_mod)
y1 = np.int(Hol_cy - np.floor(Hol_δy / 2))
y2 = np.int(Hol_cy + np.ceil(Hol_δy / 2))
x1 = np.int(Hol_cx - np.floor(Hol_δx / 2))
x2 = np.int(Hol_cx + np.ceil(Hol_δx / 2))
Holo_f[y1:y2, x1:x2] = Z_mod
return Holo_f
# Defining the functional form of grayscale to phase (g(ϕ))
def Phase(x, A, B):
ϕ = np.square(np.sin(A * (1 - np.exp(-B * x))))
return ϕ
# Use g(ϕ) defined in 'Phase' to fit experimentally obtained phaseramps
def Fit_phase():
p1 = r"C:\Users\Philip\Documents\Python\Local Repo\Curve fitting"
os.chdir(p1)
files = glob.glob('*Phaseramp.mat')
phaseramp = io.loadmat(files[0])
y_dB = phaseramp['P4'].ravel()
y_lin = np.power(10, y_dB / 10) / np.max(np.power(10, y_dB / 10))
x0 = np.linspace(0, 255, len(y_dB))
x1 = np.linspace(0, 255, 25)
x3 = range(255)
f1 = interp1d(x0, y_lin)
initial_guess = (15, 1 / 800)
try:
popt, pcov = opt.curve_fit(Phase, x1, f1(
x1), p0=initial_guess, bounds=([0, -np.inf], [np.inf, np.inf]))
except RuntimeError:
print("Error - curve_fit failed")
ϕ_A = popt[0]
ϕ_B = popt[1]
ϕ_g = (2 / np.pi) * np.abs(ϕ_A) * (1 - np.exp(-ϕ_B * x3))
return (ϕ_A, ϕ_B, ϕ_g)
# Use the fitting results from 'Fit_phase' & linear mapping
# to remap hologram Z_mod
def Remap_phase(Z_mod, g_ϕ):
Z_mod1 = copy.copy(Z_mod)
for i1 in range(np.shape(Z_mod)[0]):
Z_mod1[i1, :] = g_ϕ(Z_mod[i1, :])
return (Z_mod1)
# Save bmp file
def Save_holo(Hologram, Path):
scipy.misc.imsave(Path, Hologram)
# Overshoot mapping
def Overshoot_phase(Z_mod1, g_OSlw, g_OSup, g_min, g_max):
Z_mod2 = copy.copy(Z_mod1)
Super_thres_indices = Z_mod1 > g_OSup
Sub_thres_indices = Z_mod1 <= g_OSlw
Z_mod2[Super_thres_indices] = g_max
Z_mod2[Sub_thres_indices] = g_min
return Z_mod2
###############################################################################
# Calculate phase map
###############################################################################
# Phase mapping details (ϕ)
(ϕ_A, ϕ_B, ϕ_g) = Fit_phase()
ϕ_min = 0
ϕ_max = max(ϕ_g)
print(ϕ_max)
ϕ_rng = (ϕ_min, ϕ_max)
g_ϕ = interp1d(ϕ_g, range(255))
###############################################################################
# Specify parameters
###############################################################################
# LCOS size (# pixels in x & y)
LCOS_δy = 100
LCOS_δx = 100
# Subhologram size and location
Hol_δx = 50
Hol_δy = 50
Hol_cx = int(LCOS_δx / 2)
Hol_cy = int(LCOS_δy / 2)
# Phase (ϕ) upper and lower limits
ϕ_uplim = ϕ_max
ϕ_lwlim = ϕ_min
# Overshooting thresholds
g_OSup = g_ϕ(2)
g_OSlw = g_ϕ(0.2)
# g_OSup = g_ϕ(ϕ_max/2)
# g_OSlw = g_ϕ(ϕ_max/2)
g_min = 0
g_max = 255
# Grating metrics (specify Λ (period) and φ (rotation angle))
Λ = 7.5
φ = 0.25 * np.pi
###############################################################################
# Construct some stuff for the code
###############################################################################
LCOS_δyx = (LCOS_δy, LCOS_δx)
Hol_δyx = (Hol_δy, Hol_δx)
Hol_cyx = (Hol_cy, Hol_cx)
ϕ_lims = (ϕ_lwlim, ϕ_uplim)
# Define holo params
Holo_params = (Λ, φ, *Hol_δyx, *φ_lims)
###############################################################################
# Run some of the functions defined above
###############################################################################
# Calculate sub hologram (Holo_s)
Holo_s = Holo_tilt(*Holo_params)
Z = Holo_s[0]
Zϕ_mod = Holo_s[1]
# Remap phase with non linear ϕ map
Zg_mod1 = Remap_phase(Zϕ_mod, g_ϕ)
# Use overshooting
Zg_mod2 = Overshoot_phase(Zg_mod1, g_OSlw, g_OSup, g_min, g_max)
# Calculate full holograms (Holo_fN)
Holo_f1 = Add_Holo(*Hol_cyx, Zg_mod1, *LCOS_δyx)
Holo_f2 = Add_Holo(*Hol_cyx, Zg_mod2, *LCOS_δyx)
# Set output holograms (Z_out, Holo_out)
Z_out = Zg_mod2
Holo_out = Holo_f2
###############################################################################
# Save output
###############################################################################
Save_holo(Holo_out, 'Name.bmp')
###############################################################################
# Plotting
###############################################################################
cmap = plt.get_cmap('gray')
###############################################################################
# # Plot Z_mod
# pl.figure('Z_mod')
# im0 = plt.imshow(Z_mod, cmap)
###############################################################################
# pl.figure('g vs x')
# pl.plot(range(Hol_δy), g2_ϕ(Z_mod[:, 0]), ':b.')
# pl.plot(range(Hol_δy), g1_ϕ(Z_mod[:, 0]), ':r.')
# pl.title('g vs x position')
###############################################################################
# pl.figure('g vs ϕ')
# pl.plot(g2_ϕ(Z_mod[:, 0]), ':b.')
# pl.plot(g1_ϕ(Z_mod[:, 0]), ':r.')
# pl.title('g vs ϕ position')
###############################################################################
# pl.figure('Full Phase')
# im0 = plt.imshow(Holo_f0, cmap)
# pl.title('Λ = %s, ϕ = %sπ' % (Λ, φ / np.pi))
# plt.colorbar()
# plt.clim(ϕ_min, ϕ_max)
# print(np.max(Holo_f0))
###############################################################################
# Full Holograms
pl.figure('Full Hologram1')
im0 = plt.imshow(Holo_f2, cmap)
pl.title('Λ = %s, ϕ = %sπ' % (Λ, φ / np.pi))
plt.colorbar()
plt.clim(0, 255)
plt.ylabel('LCOS y axis')
plt.xlabel('LCOS x axis')
###############################################################################
# Phase Profile
# pl.figure('Phase Profile')
# plt.plot(range(LCOS_δy), ϕ_lwlim *
# np.ones(np.shape(Holo_f0[:, Hol_cx])), 'xkcd:green')
# plt.plot(range(LCOS_δy), ϕ_uplim *
# np.ones(np.shape(Holo_f0[:, Hol_cx])), 'xkcd:green')
# plt.plot(range(LCOS_δy), Holo_f0[:, Hol_cx], '.:',
# color='xkcd:light green', mfc='xkcd:dark green')
# pl.title('Λ = %s, ϕ = %sπ' % (Λ, φ / np.pi))
# plt.ylim([ϕ_min, ϕ_max])
# plt.grid()
###############################################################################
# Hologram profiles (x/y)
pl.figure('Hologram Profiles')
plt.plot(range(LCOS_δx), g_ϕ(ϕ_lwlim) *
np.ones(np.shape(Holo_out[:, Hol_cx])), '--', color='xkcd:light blue')
plt.plot(range(LCOS_δx), g_ϕ(ϕ_uplim) *
np.ones(np.shape(Holo_out[:, Hol_cx])), '--', color='xkcd:light blue')
plt.plot(range(LCOS_δx), Holo_out[:, Hol_cx], '.-',
color='xkcd:blue', mfc='xkcd:dark blue', label='Horizontal profile')
plt.plot(range(LCOS_δy), g_ϕ(ϕ_lwlim) *
np.ones(np.shape(Holo_out[Hol_cy, :])), ':', color='xkcd:light red')
plt.plot(range(LCOS_δy), g_ϕ(ϕ_uplim) *
np.ones(np.shape(Holo_out[Hol_cy, :])), ':', color='xkcd:light red')
plt.plot(range(LCOS_δy), Holo_out[Hol_cy, :], '.-',
color='xkcd:red', mfc='xkcd:dark red', label='Vertical profile')
pl.title('Λ = %s, ϕ = %sπ' % (Λ, φ / np.pi))
plt.ylim([0, 255])
plt.ylabel('Greyscale value [0:255]')
plt.xlabel('LCOS y axis')
plt.legend()
plt.grid()
###############################################################################
# pl.figure('ϕ mapping')
# pl.plot(range(255), ϕ_g, color='xkcd:red', label='From exp. φ mapping')
# pl.plot(g2_ϕ(ϕ_lwlim) * np.ones(100),
# np.linspace(ϕ_min, ϕ_lwlim, 100), ':', color='xkcd:light red')
# pl.plot(g2_ϕ(ϕ_uplim) * np.ones(100),
# np.linspace(ϕ_min, ϕ_uplim, 100), ':', color='xkcd:light red')
# pl.plot(range(255), np.linspace(ϕ_min, ϕ_max, 255),
# color='xkcd:blue', label='Linear mapping')
# pl.plot(g1_ϕ(ϕ_lwlim) * np.ones(100),
# np.linspace(ϕ_min, ϕ_lwlim, 100), ':', color='xkcd:light blue')
# pl.plot(g1_ϕ(ϕ_uplim) * np.ones(100),
# np.linspace(ϕ_min, ϕ_uplim, 100), ':', color='xkcd:light blue')
# pl.plot(range(255), ϕ_uplim * np.ones(255), color='xkcd:black')
# pl.plot(range(255), ϕ_lwlim * np.ones(255), color='xkcd:black')
# plt.legend()
# plt.ylabel('Phase (φ)')
# plt.xlabel('Greyscale value [0:255]')
plt.show()
|
996,789 | 8c5171da59a907aad43a0fd101425b4d7d684be5 | a_string = "bob"
len(a_string)
print("con" + "cat")
con_cat = "".join(["con", "cat"])
print("a;a;a;".split(";"))
url = "http://www.bob.com"
scheme, _, address = url.partition("://")
"Replace {0} with {1}".format("this", 1)
"With {named} {fields}".format(named="much", fields="better")
import math
print("Math stuff: {math.pi}".format(math=math)) # N.B. Passing objects to format
print("Math stuff: {math.pi:.3F}".format(math=math)) # N.B. Formating
|
996,790 | 9b47273f70aa55de295bce35a16f70f8aedf489c | import pygame, os, json
from config.config import colours
from sprites.text import Text
from states.state import State
class Game_over(State):
"""The main menu"""
def __init__(self, game, player_coins, player_kills, player_height):
"""Initialize the menu class."""
super().__init__(game)
all_animations = {}
for animations in os.listdir(self.game.animation_directory):
all_animations[animations] = []
for frames in os.listdir(os.path.join(self.game.animation_directory, animations)):
img = pygame.image.load(os.path.join(self.game.animation_directory, animations, frames)).convert()
img.set_colorkey((0,0,0))
duration = frames.split("_")[-1].split(".")[0]
all_animations[animations].append([img, int(duration)])
self.all_sounds = {}
for sound in os.listdir(self.game.sound_directory):
self.all_sounds[sound.split(".")[0]] = pygame.mixer.Sound(os.path.join(self.game.sound_directory, sound))
self.player_coins, self.player_kills, self.player_height = 0, 0, 0
self.total_player_coins, self.total_player_kills, self.total_player_height = player_coins, player_kills, player_height
self.score_timer = 0
self.score = 20*self.total_player_coins + 40*self.total_player_kills + self.total_player_height
with open(os.path.join("config", "scores.json"), "r") as scores_json_file:
scores = json.load(scores_json_file)
if int(scores["highscore"]) < self.score:
scores["highscore"] = self.score
self.score_colour = colours["yellow"]
else:
self.score_colour = colours["white"]
self.highscore = int(scores["highscore"])
scores_json_file.close()
with open(os.path.join("config", "scores.json"), "w") as scores_json_file:
json.dump(scores, scores_json_file)
scores_json_file.close()
self.load_sprites()
self.text_loop = 30
def load_sprites(self):
self.all_sprites = pygame.sprite.Group()
self.game_over_txt = Text(self.game.game_canvas, os.path.join(self.game.font_directory,"alphbeta.ttf"), 22, "GAME OVER", colours["red"], False, self.game.GAME_WIDTH *.5, 80-40, True)
self.coins_txt = Text(self.game.game_canvas, os.path.join(self.game.font_directory,"alphbeta.ttf"), 22, "Coins - " + str(self.player_coins), colours["white"], False, self.game.GAME_WIDTH *.5, 140-45, True)
self.kills_txt = Text(self.game.game_canvas, os.path.join(self.game.font_directory,"alphbeta.ttf"), 22, "Enemies - " + str(self.player_kills), colours["white"], False, self.game.GAME_WIDTH *.5, 165-45, True)
self.height_txt = Text(self.game.game_canvas, os.path.join(self.game.font_directory,"alphbeta.ttf"), 22, "Depth - " + str(int(self.player_height)), colours["white"], False, self.game.GAME_WIDTH *.5, 190-45, True)
self.score_txt = Text(self.game.game_canvas, os.path.join(self.game.font_directory,"alphbeta.ttf"), 22, "YOUR SCORE - " + str(int(self.score)), colours["blue"], False, self.game.GAME_WIDTH *.5, 240-40, True)
self.highscore_txt = Text(self.game.game_canvas, os.path.join(self.game.font_directory,"alphbeta.ttf"), 22, "HIGHSCORE - " + str(int(self.highscore)), colours["yellow"], False, self.game.GAME_WIDTH *.5, 50, True)
self.restart_txt = Text(self.game.game_canvas, os.path.join(self.game.font_directory,"alphbeta.ttf"), 22, "> Press Enter to Restart <", colours["red"], False, self.game.GAME_WIDTH *.5, 180, True)
def update(self):
"""Update the menu state."""
self.game.check_inputs()
if self.game.actions[pygame.K_RETURN]:
self.game.restart()
def render(self):
"""Render the menu state."""
self.game.game_canvas.fill(colours["black"])
self.score_timer += 1
if self.score_timer > 120 and self.score_timer < 400:
if self.total_player_coins > self.player_coins: self.player_coins += 0.5
self.coins_txt.update(self.game.game_canvas, content = "Coins - " + str(int(self.player_coins)))
if self.score_timer == 120:
pygame.mixer.find_channel(True).play(self.all_sounds["thing"])
if self.score_timer > 200 and self.score_timer < 400:
if self.total_player_kills > self.player_kills: self.player_kills += 0.2
self.kills_txt.update(self.game.game_canvas, content = "Enemies - " + str(int(self.player_kills)))
if self.score_timer == 200:
pygame.mixer.find_channel(True).play(self.all_sounds["thing"])
if self.score_timer > 280 and self.score_timer < 400:
if self.total_player_height > self.player_height: self.player_height += 10
self.height_txt.update(self.game.game_canvas, content = "Depth - " + str(int(self.player_height)))
if self.score_timer == 280:
pygame.mixer.find_channel(True).play(self.all_sounds["thing"])
if self.score_timer >= 400:
self.highscore_txt.update(self.game.game_canvas)
self.score_txt.update(self.game.game_canvas, content = "YOUR SCORE - " + str(self.score), colour = self.score_colour, y = 120)
self.text_loop -= 1
if self.text_loop < 0:
self.restart_txt.update(content = ">Press ENTER To Restart<")
if self.text_loop < -30: self.text_loop = 30
else:
self.restart_txt.update(content = "> Press ENTER To Restart <")
else:
self.game_over_txt.update(self.game.game_canvas)
self.score_txt.update(self.game.game_canvas, "YOUR SCORE - " + str(int(20*self.player_coins + 40*self.player_kills + self.player_height)))
|
996,791 | d9c368c45c5527adf254575fa886bad23013aec7 | # coding=utf-8
import base64
import sys
import os
import time
from decimal import Decimal
sys.path.append('../')
from multiprocessing import Process, Manager
from api.Util import tps_to_file, progressbar, get_test_data_from_file
from api.TopApi import TopApi
import conftest
def tps_transfer_out(senders, receivers, node_index, pid):
top = TopApi(node_index)
log = conftest.get_logger(os.path.basename(__file__), pid)
test_transfer_amount = 1
while True:
# get sender balance
for q in xrange(len(senders)):
sender = senders[q]
balance_rsp = top.account_balance(sender['account'])
balance_rsp_dict = balance_rsp.json()
if balance_rsp_dict['Result'] != 1:
log.error('pre_genesis_balance result not 1:' + str(sender['account']))
senders[q] = sender_pool.pop()
# 后续需要统计失败
log.error('get balance err, account: %s' % (sender['account']))
continue
balance = balance_rsp_dict['balance']
if balance == 0:
log.error('account power off!')
senders[q] = sender_pool.pop()
continue
nt = time.time()
if sender['balance'] == float(balance) + 1:
tps_list[1] += 1
else:
log.error('post err, hash: %s, pre :%d post: %d, pt :%d nt :%d' %
(sender['last_hash'].encode('hex'), sender['balance'], float(balance), sender['stamp'], nt))
# update sender last hash
rsp = top.get_account_last_hash(sender['account'])
if rsp.json()['Result'] != 1:
log.error('can\'t get last hash :' + str(sender['account']))
senders[q] = sender_pool.pop()
# 后续需要统计失败
log.error('get hash err, account: %s' % (sender['account']))
continue
sender_last_hash = base64.b64decode(rsp.json()['last_digest'])
sender['last_hash'] = sender_last_hash
sender['balance'] = float(balance)
sender['stamp'] = nt
# make deal
rsp, new_sender = top.top_transfer_out(sender, receivers[q], test_transfer_amount)
tps_list[0] += 1
if rsp.json()['Result'] != 1:
log.error('top_transfer_out fail:' + str(sender['account']))
senders[q] = sender_pool.pop()
# 后续需要统计失败
log.error('transfer err, hash: %s' % (sender['last_hash'].encode('hex')))
continue
def printer():
pre_hits = 0
current_date = time.strftime('%Y-%m-%d-%H-%M_%S', time.localtime(time.time()))
tps_file = conftest.result_dir + 'tps_out_' + current_date + '.txt'
if os.path.exists(tps_file):
os.remove(tps_file)
while True:
if len(tps_list) == 0:
time.sleep(10)
continue
if pre_hits == 0:
pre_hits = tps_list[0]
time.sleep(60)
pas = tps_list[1]
post_hits = tps_list[0]
incr = round(Decimal(post_hits - pre_hits) / Decimal(60), 2)
if incr > 0:
progressbar('all: %s | p: %s | f: %s | tps: %s '
% (str(post_hits), str(pas), str(post_hits - pas), str(incr)))
tps_to_file(tps_file,
'%s all: %s | p: %s | f: %s | tps: %s'
% (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), str(post_hits), str(pas),
str(post_hits - pas), str(incr)))
pre_hits = post_hits
def per_group(count):
p_s = []
p_r = []
for u in xrange(count):
p_s.append(sender_pool.pop())
p_r.append(receiver_pool.pop())
return p_s, p_r
if __name__ == '__main__':
process_count = int(sys.argv[1])
task_count = int(sys.argv[2])
node_send = int(sys.argv[3])
pl = []
mgr = Manager()
sender_pool = mgr.list()
receiver_pool = mgr.list()
tps_list = mgr.list()
tps_list.append(0)
tps_list.append(0)
sender_pool.extend(get_test_data_from_file('sender_account.txt'))
receiver_pool.extend(get_test_data_from_file('receiver_account.txt'))
for i in xrange(process_count):
per_s, per_r = per_group(task_count)
p = Process(target=tps_transfer_out, args=(per_s, per_r, node_send, i))
p.start()
pl.append(p)
p = Process(target=printer, args=())
p.start()
pl.append(p)
[p.join() for p in pl]
|
996,792 | 2a4104d45a53f3a9d22ad9f40765b3e73fcd5eac | '''
AESquantparams needed for Auger quant on smooth-differentiated peaks
This file contains AES peak centers and positions of desired background regions for typical Auger semi-quantitative analyses.
Input ROIs are cut and pasted from the excel spreadsheet named
EDXS_quant_parameters.xls.
Code:
'''
Si1AESpeak =94
SAESpeak =154
ClAESpeak =186
CAESpeak =276
CaAESpeak =296
OAESpeak =513
Fe1AESpeak =600
Fe2AESpeak =654
FeAESpeak =707
MgAESpeak =1185
AlAESpeak =1390
SiAESpeak =1610
NAESpeak =387
TiAESpeak =389
Ti2AESpeak =422
NaAESpeak =966
CsAESpeak =573
Cs2AESpeak =561
# Fe3 is main peak, Fe2 at 93% and Fe1 at 70%
# Corresponding positive peak positions
Si1pospeak =79
Spospeak =147
Clpospeak =179
Cpospeak =252
Capospeak =288
Opospeak =505
Fe1pospeak =582
Fe2pospeak =642
Fepospeak =697
Mgpospeak =1180
Alpospeak =1384
Sipospeak =1600
Npospeak =374
Tipospeak =379
Ti2pospeak =415
Napospeak =957 # guesstimate
Cspospeak =566
Cs2pospeak =552
# Search width for finding Auger peaks (typically varies by element)
Si1searchwidth =6
Ssearchwidth =6
Clsearchwidth =6
Csearchwidth =6
Casearchwidth =6
Osearchwidth =6
Fe1searchwidth =6
Fe2searchwidth =6
Fesearchwidth =6
Mgsearchwidth =6
Alsearchwidth =6
Sisearchwidth =8
Nsearchwidth =6
Tisearchwidth =6
Ti2searchwidth =6
Nasearchwidth =6
Cssearchwidth =6
Cs2searchwidth =6
#Auger background checks for S7D7
b121AESpeak =121
b121pospeak =111
b121searchwidth =9
b205AESpeak =205
b205pospeak =195
b205searchwidth =9
b320AESpeak =320
b320pospeak =310
b320searchwidth =9
b345AESpeak =345
b345pospeak =335
b345searchwidth =9
b450AESpeak =450
b450pospeak =440
b450searchwidth =9
b540AESpeak =540
b540pospeak =530
b540searchwidth =9
b565AESpeak =565
b565pospeak =555
b565searchwidth =9
b760AESpeak =760
b760pospeak =750
b760searchwidth =9
b1115AESpeak =1115
b1115pospeak =1105
b1115searchwidth =9
b1220AESpeak =1215
b1220pospeak =1205
b1220searchwidth =9
b1305AESpeak =1305
b1305pospeak =1295
b1305searchwidth =9
b1415AESpeak =1415
b1415pospeak =1405
b1415searchwidth =9
b1550AESpeak =1550
b1550pospeak =1540
b1550searchwidth =9
b1660AESpeak =1660
b1660pospeak =1650
b1660searchwidth =9
# compare peak-to-peak width with usual value
Si1peakwidth =15
Speakwidth =6.6
Clpeakwidth =7
Cpeakwidth =24.1
Capeakwidth =7
Opeakwidth =7.6
Fe1peakwidth =18.3
Fe2peakwidth =12.6
Fepeakwidth =9.9
Mgpeakwidth =6.8
Alpeakwidth =7
Sipeakwidth =10
Npeakwidth =13
Tipeakwidth =10
Ti2peakwidth =7
Napeakwidth =9 # guesstimate
Cspeakwidth =7
Cs2peakwidth =9
|
996,793 | e166885e6d7226e8351a77c271bffd2559b5a700 | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from .forms import MusicianForm, AlbumForm
from .models import Musician, Album
# Create your views here.
# 함수정의 함수이름(parameter):
def index(request):
# .all()로 조회하면 querySet형태
# querySet은 마치 list처럼 사용가능
Musicians = Musician.objects.all()
context = {
'musicians' : Musicians
}
return render(request, 'musicians/index.html', context)
def create(request):
if request.method == "POST":
# 사용자의 요청 정보를 담아준다.
form = MusicianForm(request.POST)
if form.is_valid():
# Musician가 models에게 받은 메서드
# ModelForm을 정의 할 때
# class Meta : model = Musician을 담아줬다
musician = form.save()
# redirect('app_name:path_name')
return redirect('musicians:index')
else:
# MusicianForm 불러오기
# form은 MusicianForm 인스턴스
# MusicianForm이 가진 정보를 가짐.
# MusicianForm은 Musician의 정보
# template에서 보여줄 tag들
form = MusicianForm()
print(form)
context = {
'form' : form
}
return render(request, 'musicians/create.html', context)
# 함수이름(request, url에 작선한 변수명)
def detail(request, musician_pk):
# Model.objects.get(조건=변수명)
musician = Musician.objects.get(pk=musician_pk)
album_form = AlbumForm()
albums = musician.album_set.all()
context = {
'musician' : musician,
'album_form' : album_form,
'albums' : albums
}
return render(request, 'musicians/detail.html', context)
def update(request, musician_pk):
musician = Musician.objects.get(pk=musician_pk)
if request.method == 'POST':
form = MusicianForm(request.POST, instance=musician)
if form.is_valid():
musician = form.save()
return redirect('musicians:detail', musician.pk)
else:
form = MusicianForm(instance=musician)
context = {
'form' : form
}
return render(request, 'musicians/create.html', context)
def delete(request, musician_pk):
musician = Musician.objects.get(pk=musician_pk)
musician.delete()
return redirect('musicians:index')
@require_POST
def album_create(request, musician_pk):
musician = get_object_or_404(Musician, pk=musician_pk)
album_form = AlbumForm(request.POST)
if album_form.is_valid():
album = album_form.save(commit=False)
album.musician = musician
album.save()
return redirect('musicians:detail', musician.pk)
else:
context = {
'album_form' : album_form,
'musician' : musician
}
return redirect('musicians:detail', context)
@require_POST
def album_delete(request, musician_pk, album_pk):
album = get_object_or_404(Album, pk=album_pk)
album.delete()
return redirect('musicians:detail', musician_pk) |
996,794 | 2b0a480e145917e1c05aa29348fd55ab8dac0467 | class Game:
def __init__(self, *, player_one=None, player_two=None, board=None, game_data=None, start_date=None, end_date=None,
status=None, name=None, _id=None):
self.player_one = player_one
self.player_two = player_two
self.board = board
self.game_data = game_data
self.start_date = start_date
self.end_date = end_date
self.status = status
self.name = name
self._id = _id
def to_json(self):
game_json = dict()
game_json['player_one'] = self.player_one._id if self.player_one else None
game_json['player_two'] = self.player_two._id if self.player_two else None
game_json['board'] = self.board._id if self.board else None
game_json['game_data'] = self.game_data
start_date = f'{self.start_date:%Y-%m-%d %H:%M:%S}' if self.start_date else None
game_json['start_date'] = start_date
end_date = f'{self.end_date:%Y-%m-%d %H:%M:%S}' if self.end_date else None
game_json['end_date'] = end_date
game_json['status'] = self.status
game_json['name'] = self.name
if self._id:
game_json['_id'] = self._id
return game_json
|
996,795 | 85b07fac429cb772aa6ff76b8152575b81b0c15c | """
Ashley Piccone
Euler Problem #17: Number letter counts
"""
import numpy as np
ones = []
ones.append('')
ones.append('one')
ones.append('two')
ones.append('three')
ones.append('four')
ones.append('five')
ones.append('six')
ones.append('seven')
ones.append('eight')
ones.append('nine')
ones.append('ten')
ones.append('eleven')
ones.append('twelve')
ones.append('thirteen')
ones.append('fourteen')
ones.append('fifteen')
ones.append('sixteen')
ones.append('seventeen')
ones.append('eighteen')
ones.append('nineteen')
tens = []
tens.append('twenty')
tens.append('thirty')
tens.append('forty')
tens.append('fifty')
tens.append('sixty')
tens.append('seventy')
tens.append('eighty')
tens.append('ninety')
def num_length(n):
length = 0
n = int(n)
if (n < 100):
if (n < 20):
length = len(ones[n])
if (n == 0):
length -= 4
else:
length = len(tens[int(n / 10)-2] + ones[n % 10])
else:
thous = np.floor(n / 1000)
hun = int(np.floor(n / 100) % 10)
ten = n % 100
if (n > 999):
length += len('thousand') + num_length(thous)
if (hun != 0):
length += len('hundred') + len(ones[hun])
if (ten != 0):
length += len('and') + num_length(ten)
return length
tot = 0
for i in range(1,1001):
tot += num_length(i)
print(tot)
|
996,796 | 9bfbfa15e549aa3cd1aea34973aa190a447667c3 | class Bike(object):
def __init__(self,price,max_speed):
self.price = price
self.max_speed = max_speed
self.miles = 0
def displayinfo(self):
'''
Prints price, max_speed, miles to console
Returns self
'''
print("""Price: {price},
Max Speed: {max_speed}
Total Miles: {miles}""".format(price=self.price,
max_speed=self.max_speed,miles=self.miles))
return self
def ride(self):
"""
Prints 'riding' to console and increases miles by 10
Returns self
"""
print("Riding")
self.miles += 10
return self
def reverse(self):
"""
Prints 'Reversing' to console, decreases miles by 5
Returns self
"""
print("Reversing")
self.miles -= 5
return self
# my_bike = Bike(100,10)
# my_bike.displayinfo().ride().ride().reverse().ride()
class Car(object):
def __init__(self,price,speed,fuel,mileage):
self.price = price
self.speed = speed
self.fuel = fuel
self.mileage = mileage
if price > 10000:
self.tax = 0.15
else:
self.tax = 0.12
def display_all(self):
"""
Prints attributes to console, return self
"""
print("Price: " + str(self.price))
print("Speed: " + str(self.speed) + "mph")
print("Fuel: " + self.fuel)
print("Mileage: " + str(self.mileage) + "mpg")
print("Tax: " + str(self.tax))
return self
#
# car1 = Car(20000,35,"Full",105)
# car1.display_all()
class Product(object):
def __init__(self,price,item_name,weight,brand):
self.price = price
self.item_name = item_name
self.weight = weight
self.brand = brand
self.status = "for sale"
def sell(self):
"""
change self.status -> 'sold'
return self
"""
self.status = "sold"
return self
def add_tax(self,tax):
"""
returns the price w tax
"""
return self.price + (self.price * tax)
def return_item(self,reason):
"""
if reason=defective, change status to defective, price to 0
if reason=unopened, change status to for sale
if reason=opened, change status to used, price = price - 20%
return self
"""
if reason == "defective":
self.status = "defective"
self.price = 0
elif reason == "unopened":
self.status = "for sale"
else:
self.status = "used"
self.price -= (.20 * self.price)
return self
cups = Product(10,"cups",1,"GE")
plates = Product(15,"plates",2,"NA")
knives = Product(2,"knives",1,"BBB")
class Store(object):
def __init__(self):
self.products = []
self.location = ""
self.owner = ""
def set_owner(self,owner):
self.owner = owner
return self
def set_location(self,location):
self.location = location
def add_product(self,product):
self.products.append(product)
return self
def remove_product(self,product):
index = self.products.index(product)
del(self.products[index])
return self
def inventory(self):
for product in self.products:
print("Name: {x}, Price: {y}, Status: {z}".format(
x=product.item_name, y=product.price, z=product.status
))
# our_store = Store()
# our_store.add_product(cups).add_product(knives).add_product(plates)
# our_store.set_owner("corey c")
# our_store.set_owner("washington dc")
# our_store.inventory()
|
996,797 | a25d6e8cd2e875d4e758fc076575b476fb044e44 | __author__ = 'danielkershaw'
from pymongo import MongoClient
from datetime import datetime
import logging
class DB:
def __init__(self, connection_string):
self.client = MongoClient(connection_string)
self.db = self.client.reddit
# Top 100 threads at the top of reddit and there indexes.
self.threads = self.db.threads
#Store for all the comments that are mined
self.comments = self.db.comments
self.comments.create_index('id', unique=True)
#List of what currently in the messaging queue for comment mining
self.mq = self.db.mq
#list of history threads mined
self.his_threads = self.db.his_threads
self.his_threads.create_index('id', unique=True)
#list of history threads mined
self.stream_threads = self.db.stream_threads
self.logger = logging.getLogger('Reddit Database')
self.logger.info('Starting Reddit Database')
def get_from_history(self, limit=1):
tmp = []
for method_frame, properties, body in self.historic_channel.consume('historic_threads'):
tmp.append(body)
self.channel.basic_ack(method_frame.delivery_tag)
if len(tmp) > 20:
break;
requeued_messages = self.channel.cancel()
return tmp
def get_from_channel(self, limit=1):
tmp = []
for method_frame, properties, body in self.channel.consume('comments'):
tmp.append(body)
self.channel.basic_ack(method_frame.delivery_tag)
if len(tmp) > 20:
break;
requeued_messages = self.channel.cancel()
return tmp
def insert_thread(self, index, value):
try:
value["index"] = index
value["mined_at"] = datetime.now().strftime("%c")
if not self.is_in_mq(value["id"]):
self.threads.insert(value)
except Exception as x:
self.logger.error(x)
def insert_histroic_thread(self, value):
try:
if not self.is_in_mq(value["id"]):
self.add_to_queue(value["id"])
self.his_threads.update({'id': value["id"]}, value, upsert=True)
except Exception as x:
self.logger.error(x)
def insert_stream_thread(self, value):
try:
tmp = self.is_in_mq(value["id"])
if tmp == False:
self.add_to_queue(value["id"])
tmptmp = self.stream_threads.update({'id': value["id"]}, value, upsert=True)
return tmp
except Exception as x:
self.logger.error(x)
return False
def add_to_queue(self, thread_id):
try:
t = "{0}".format(thread_id)
self.in_queue(thread_id)
except Exception as x:
self.logger.error(x)
def get_queue(self):
try:
return self.mq.find()
except Exception as x:
self.logger.error(x)
def add_to_historic_queue(self, thread_id):
try:
self.historic_channel.basic_publish(exchange='',
routing_key='historic_threads',
body=thread_id)
except Exception as x:
self._init_RadbbitMQhis()
self.add_to_historic_queue(thread_id)
self.logger.error(x)
def insert_comment(self, value):
try:
self.comments.update({'id': value['id']}, value, upsert=True)
except Exception as x:
self.logger.error(x)
def is_in_mq(self, thread_id):
try:
if self.mq.find_one({'id': thread_id}) != None:
return True
else:
return False
except Exception as x:
self.logger.error(x)
def remove_from_queue(self, thread_id):
try:
self.mq.remove({'id': thread_id})
except Exception as x:
self.logger.error(x)
def in_queue(self, thread_id):
try:
self.mq.update({'id': thread_id}, {'id': thread_id}, upsert=True)
except Exception as x:
self.logger.error(x)
|
996,798 | 9a4008877c0f27b7cd949092c0776ec7c89a036c | import numpy as np
from matplotlib import pyplot as plt
import cv2
def dft2D(f):
F = np.zeros_like(f, dtype=complex)
for i in np.arange(f.shape[0]):
F[i, :] = np.fft.fft(f[i, :])
for i in np.arange(f.shape[1]):
F[:, i] = np.fft.fft(F[:, i])
return F
def idft2D(F):
f = dft2D(F.conjugate())
f /= (F.shape[0] * F.shape[1])
f = f.conjugate()
return f
# 把数据映射到[0, 255]
def map2Uint8(f):
f -= np.min(f) # 归零
f /= np.max(f)
f *= 255
return f
# 中心化频谱
def centered_spectrum(F):
height, width = F.shape
centered = np.zeros_like(F)
for i in range(height // 2):
for j in range(width // 2):
centered[i, j] = F[i + height // 2, j + width // 2] # 左上
centered[i + height // 2, j + width // 2] = F[i, j] # 左下
centered[i, j + width // 2] = F[i + height // 2, j] # 右下
centered[i + height // 2, j] = F[i, j + width // 2] # 右上
return centered
if __name__ == "__main__":
f = cv2.imread('rose512.tif', cv2.IMREAD_GRAYSCALE)
f = f / 255.
F = dft2D(f)
g = idft2D(F)
npF = np.fft.fft2(f)
npf = np.fft.ifft2(npF)
print('正变换与numpy中函数的差:')
print(F - npF)
print('逆变换与numpy中函数的差:')
print(f - npf)
plt.imshow(f * 255 - map2Uint8(np.abs(g)), cmap='gray', vmin=0, vmax=255)
plt.axis('off')
plt.show()
f = np.zeros((512, 512), dtype=np.uint8)
f[226:285, 251:260] = 1
F = dft2D(f) # 傅里叶变换
fShift = centered_spectrum(F) # 中心化
result = np.log(1 + np.abs(fShift)) # 对数化
# plt.imshow()可以将数值序列直接映射为灰度图
plt.subplot(221), plt.imshow(
f * 255, cmap='gray'), plt.title('original'), plt.axis('off')
plt.subplot(222), plt.imshow(np.abs(F), cmap='gray'), plt.title(
'spectrum'), plt.axis('off')
plt.subplot(223), plt.imshow(np.abs(fShift), cmap='gray'), plt.title(
'centered spectrum'), plt.axis('off')
plt.subplot(224), plt.imshow(result, cmap='gray'), plt.title(
'log transform'), plt.axis('off')
plt.show()
|
996,799 | 24f7ac16b4fb9c32e18508d4ea54c3e25e23d77e | # 递归函数调用
# 定义阶乘函数
def fact(n):
sum = 1;
if n == 1:
return 1;
return n * fact(n - 1);
# 当直接通过python调用该脚本的时候, __name__ == __main__, 作为模块调用的时候则不会
if __name__ == '__main__':
print(fact); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.