index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
998,800 | c8a08f01db7e9602b0d74765c66593979883e92d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from Api import settings
from status.managers import StatusManager
from status.utils import upload_update_image
class Status(models.Model):
user = models.ForeignKey(User)
content = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to=upload_update_image, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now_add=True)
objects = StatusManager()
def __str__(self):
return str(self.content)[:50] if self.content else '---- ----'
class Meta:
verbose_name = 'Status post'
verbose_name_plural = "Status posts" |
998,801 | 27c8fd346ca48dda317e929ef608f9a7458433ec | n=int(input('Enter the number of integer you want to insert in the list::'))
lis=[]
for i in range(n):
lis.append(int(input('Enter the element for the sort in assending order::')))
t=lis
l=len(lis)
for i in range(l):
for j in range(i,l):
if lis[i]<lis[j]:
lis[i],lis[j]=lis[j],lis[i]
print(lis)
|
998,802 | a4229421245aa5dfe4e7a20804292a67c3d53599 | '''
Created on 6 feb 2012
@author: Batch
'''
import xbmc, xbmcaddon, xbmcgui, xbmcplugin
from common import notification, get_url, regex_get_all, regex_from_to, create_directory, write_to_file, read_from_file, clean_file_name
from datetime import date, timedelta
import urllib, os, sys, re
import shutil
from furk import FurkAPI
from mediahandler import play, download, download_and_play, set_resolved_url
from meta import TheTVDBInfo, set_movie_meta, download_movie_meta, set_tv_show_meta, download_tv_show_meta, meta_exist
from threading import Thread
ADDON = xbmcaddon.Addon(id='plugin.video.whatthefurk')
DATA_PATH = os.path.join(xbmc.translatePath('special://profile/addon_data/plugin.video.whatthefurk'), '')
CACHE_PATH = create_directory(DATA_PATH, "cache")
COOKIE_JAR = os.path.join(DATA_PATH, "cookiejar.lwp")
SUBSCRIPTION_FILE = os.path.join(DATA_PATH, "subsciption.list")
SEARCH_FILE = os.path.join(DATA_PATH, "search.list")
DOWNLOAD_PATH = create_directory(DATA_PATH, "download")
META_PATH = create_directory(DATA_PATH, "meta")
FURK_FILTER = 'cached'
IMDB_TITLE_SEARCH = "http://m.imdb.com/search/title?"
COUNT = "100" #Max 100
HAS = "asin-dvd-us" #To only show movies released on DVD
PRODUCTION_STATUS = "released"
SORT = "user_rating" #alpha/user_rating/num_votes/year/release_date_us/boxoffice_gross_us/moviemeter,desc
VIEW = "simple"
if ADDON.getSetting('release_date') == "true":
try:
from_date = ADDON.getSetting('release_date_from')
to_date = ADDON.getSetting('release_date_to')
RELEASE_DATE = "%s,%s" (from_date, to_date)
except:
RELEASE_DATE = ","
else:
RELEASE_DATE = ","
if ADDON.getSetting('user_rating') == "true":
try:
rating_min = ADDON.getSetting('user_rating_min')
rating_max = ADDON.getSetting('user_rating_max')
USER_RATING = "%s,%s" (rating_min, rating_max)
except:
USER_RATING = ","
else:
USER_RATING = ","
if ADDON.getSetting('number_of_votes') == "true":
try:
votes_min = ADDON.getSetting('number_of_votes_min')
votes_max = ADDON.getSetting('number_of_votes_max')
NUM_VOTES = "%s,%s" (votes_min, votes_max)
except:
NUM_VOTES = "10000,"
else:
NUM_VOTES = "10000,"
IMDB_RESULTS = (int(ADDON.getSetting('number_of_results')) + 1) * 100
if ADDON.getSetting('furk_account') == "true":
FURK_ACCOUNT = True
else:
FURK_ACCOUNT = False
if ADDON.getSetting('library_mode') == "true":
LIBRARY_MODE = True
else:
LIBRARY_MODE = False
if ADDON.getSetting('use_unicode_indicators') == "true":
UNICODE_INDICATORS = True
else:
UNICODE_INDICATORS = False
if ADDON.getSetting('download_meta') == "true":
DOWNLOAD_META = True
else:
DOWNLOAD_META = False
if ADDON.getSetting('movies_custom_directory') == "true":
MOVIES_PATH = ADDON.getSetting('movies_directory')
else:
MOVIES_PATH = create_directory(DATA_PATH, "movies")
if ADDON.getSetting('tv_shows_custom_directory') == "true":
TV_SHOWS_PATH = ADDON.getSetting('tv_shows_directory')
else:
TV_SHOWS_PATH = create_directory(DATA_PATH, "tv shows")
if ADDON.getSetting('first_time_startup') == "true":
FIRST_TIME_STARTUP = True
else:
FIRST_TIME_STARTUP = False
PLAY_MODE = 'stream'
FURK = FurkAPI(COOKIE_JAR)
def login_at_furk():
if FURK_ACCOUNT:
FURK_USER = ADDON.getSetting('furk_user')
FURK_PASS = ADDON.getSetting('furk_pass')
else:
return False
if FURK.login(FURK_USER, FURK_PASS):
return True
else:
dialog = xbmcgui.Dialog()
dialog.ok("Login failed", "The addon failed to login at Furk.net.", "Make sure you have confirmed your email and your", "login information is entered correctly in addon-settings")
def download_meta_zip():
menu_data = ["",
"http://wtf.gosub.dk/low.zip",
"http://wtf.gosub.dk/medium.zip",
"http://wtf.gosub.dk/high.zip",
"http://wtf.gosub.dk/medium.zip"]
menu_texts = ["Don't download",
"Download low quality images [123MB]",
"Download mid quality images [210MB]",
"Download high quality images [508MB]",
"Download maximum quality images [722MB]"]
data_url = "http://wtf.gosub.dk/data-338438.zip"
dialog = xbmcgui.Dialog()
menu_id = dialog.select('Select file', menu_texts)
if menu_id < 1:
return
ADDON.setSetting('meta_quality', value=str(menu_id + 1))
try:
pDialog = xbmcgui.DialogProgress()
pDialog.create('Searching for files')
meta_url = menu_data[menu_id]
xbmc.log("[What the Furk] Downloading meta...")
meta_path = os.path.join(DOWNLOAD_PATH, "meta.zip")
download(meta_url, meta_path, pDialog)
xbmc.log("[What the Furk] Extracting meta...")
xbmc.executebuiltin("XBMC.Extract(%s , %s)" % (meta_path, META_PATH))
xbmc.log("[What the Furk] ...done!")
data_path = os.path.join(DOWNLOAD_PATH, "data.zip")
download(data_url, data_path, pDialog)
xbmc.executebuiltin("XBMC.Extract(%s , %s)" % (data_path, META_PATH))
xbmc.log("[What the Furk] All done!")
except:
dialog.ok("Setup meta data", "Unable to reach the host server.")
def register_account():
keyboard = xbmc.Keyboard('', 'Username')
keyboard.doModal()
username = None
if keyboard.isConfirmed():
username = keyboard.getText()
if username == None:
return False
password = None
keyboard = xbmc.Keyboard('', 'Password')
keyboard.doModal()
if keyboard.isConfirmed():
password = keyboard.getText()
if password == None:
return False
email = None
keyboard = xbmc.Keyboard('', 'E-mail')
keyboard.doModal()
if keyboard.isConfirmed():
email = keyboard.getText()
if email == None:
return False
dialog = xbmcgui.Dialog()
response = FURK.reg(username, password, password, email)
if response['status'] == 'ok':
ADDON.setSetting('furk_user', value=username)
ADDON.setSetting('furk_pass', value=password)
dialog.ok("Registration", "Registration formula completed.", "In order to complete the registration you need to", "click the confirmation link sent to your email.")
return True
else:
errors = response['errors']
for key in errors.keys():
dialog.ok("Registration error", "%s: %s" % (key, errors[key]))
return register_account()
def get_subscriptions():
try:
content = read_from_file(SUBSCRIPTION_FILE)
lines = content.split('\n')
for line in lines:
data = line.split('\t')
if len(data) == 2:
if data[1].startswith('tt'):
tv_show_name = data[0]
tv_show_imdb = data[1]
tv_show_mode = "strm tv show dialog"
create_tv_show_strm_files(tv_show_name, tv_show_imdb, tv_show_mode, TV_SHOWS_PATH)
else:
mode = data[1]
items = get_menu_items(name, mode, "", "")
for (url, li, isFolder) in items:
paramstring = url.replace(sys.argv[0], '')
params = get_params(paramstring)
movie_name = urllib.unquote_plus(params["name"])
movie_data = urllib.unquote_plus(params["name"])
movie_imdb = urllib.unquote_plus(params["imdb_id"])
movie_mode = "strm movie dialog"
create_strm_file(movie_name, movie_data, movie_imdb, movie_mode, MOVIES_PATH)
except:
xbmc.log("[What the Furk] Failed to fetch subscription")
def subscription_index(name, mode):
try:
content = read_from_file(SUBSCRIPTION_FILE)
line = str(name) + '\t' + str(mode)
lines = content.split('\n')
index = lines.index(line)
return index
except:
return -1 #Not subscribed
def subscribe(name, mode):
if subscription_index(name, mode) >= 0:
return
content = str(name) + '\t' + str(mode) + '\n'
write_to_file(SUBSCRIPTION_FILE, content, append=True)
def unsubscribe(name, mode):
index = subscription_index(name, mode)
if index >= 0:
content = read_from_file(SUBSCRIPTION_FILE)
lines = content.split('\n')
lines.pop(index)
s = ''
for line in lines:
if len(line) > 0:
s = s + line + '\n'
write_to_file(SUBSCRIPTION_FILE, s)
def find_search_query(query):
try:
content = read_from_file(SEARCH_FILE)
lines = content.split('\n')
index = lines.index(query)
return index
except:
return -1 #Not found
def add_search_query(query):
if find_search_query(query) >= 0:
return
if os.path.isfile(SEARCH_FILE):
content = read_from_file(SEARCH_FILE)
else:
content = ""
lines = content.split('\n')
s = '%s\n' % query
for line in lines:
if len(line) > 0:
s = s + line + '\n'
write_to_file(SEARCH_FILE, s)
def remove_search_query(query):
index = find_search_query(query)
if index >= 0:
content = read_from_file(SEARCH_FILE)
lines = content.split('\n')
lines.pop(index)
s = ''
for line in lines:
if len(line) > 0:
s = s + line + '\n'
write_to_file(SEARCH_FILE, s)
def create_strm_file(name, data, imdb_id, mode, dir_path):
try:
strm_string = create_url(name, mode, data=data, imdb_id=imdb_id)
filename = clean_file_name("%s.strm" % name)
path = os.path.join(dir_path, filename)
stream_file = open(path, 'w')
stream_file.write(strm_string)
stream_file.close()
except:
xbmc.log("[What the Furk] Error while creating strm file for : " + name)
def create_tv_show_strm_files(name, imdb_id, mode, dir_path):
info = TheTVDBInfo(imdb_id)
episodes = info.episodes()
tv_show_path = create_directory(dir_path, name)
for episode in episodes:
first_aired = episode.FirstAired()
if len(first_aired) > 0:
d = first_aired.split('-')
episode_date = date(int(d[0]), int(d[1]), int(d[2]))
if date.today() > episode_date:
season_number = int(episode.SeasonNumber())
if season_number > 0:
episode_number = int(episode.EpisodeNumber())
episode_name = episode.EpisodeName()
display = "[S%.2dE%.2d] %s" % (season_number, episode_number, episode_name)
data = '%s<|>%s<|>%d<|>%d' % (name, episode_name, season_number, episode_number)
season_path = create_directory(tv_show_path, str(season_number))
create_strm_file(display, data, imdb_id, mode, season_path)
def remove_strm_file(name, dir_path):
try:
filename = "%s.strm" % (clean_file_name(name, use_blanks=False))
path = os.path.join(dir_path, filename)
os.remove(path)
except:
xbmc.log("[What the Furk] Was unable to remove movie: %s" % (name))
def remove_tv_show_strm_files(name, dir_path):
try:
path = os.path.join(dir_path, name)
shutil.rmtree(path)
except:
xbmc.log("[What the Furk] Was unable to remove TV show: %s" % (name))
def check_sources_xml(path):
try:
source_path = os.path.join(xbmc.translatePath('special://profile/'), 'sources.xml')
f = open(source_path, 'r')
content = f.read()
f.close()
path = str(path).replace('\\', '\\\\')
if re.search(path, content):
return True
except:
xbmc.log("[What the Furk] Could not find sources.xml!")
return False
def setup_sources():
xbmc.log("[What the Furk] Trying to add source paths...")
source_path = os.path.join(xbmc.translatePath('special://profile/'), 'sources.xml')
try:
f = open(source_path, 'r')
content = f.read()
f.close()
r = re.search("(?i)(<sources>[\S\s]+?<video>[\S\s]+?>)\s+?(</video>[\S\s]+?</sources>)", content)
new_content = r.group(1)
if not check_sources_xml(MOVIES_PATH):
new_content += '<source><name>Movies (What the Furk)</name><path pathversion="1">'
new_content += MOVIES_PATH
new_content += '</path></source>'
if not check_sources_xml(TV_SHOWS_PATH):
new_content += '<source><name>TV Shows (What the Furk)</name><path pathversion="1">'
new_content += TV_SHOWS_PATH
new_content += '</path></source>'
new_content += r.group(2)
f = open(source_path, 'w')
f.write(new_content)
f.close()
dialog = xbmcgui.Dialog()
dialog.ok("Source folders added", "To complete the setup:", " 1) Restart XBMC.", " 2) Set the content type of added sources.")
#if dialog.yesno("Restart now?", "Do you want to restart XBMC now?"):
#xbmc.restart()
except:
xbmc.log("[What the Furk] Could not edit sources.xml")
#Scrape
def search_imdb(params):
movies = []
count = 0
while count < IMDB_RESULTS:
body = title_search(params, str(count))
movies.extend(get_imdb_search_result(body))
count = count + 100
return movies
def title_search(params, start="1"):
params["count"] = COUNT
params["has"] = HAS
params["view"] = VIEW
params["num_votes"] = NUM_VOTES
params["user_rating"] = USER_RATING
params["start"] = start
url = IMDB_TITLE_SEARCH
for key in params:
url += "%s=%s&" % (key, params[key])
body = get_url(url, cache=CACHE_PATH, cache_time=86400) #Need to clear cache to allow filter changes
return body
def get_imdb_search_result(body):
all_tr = regex_get_all(body, '<tr class=', '</tr>')
movies = []
for tr in all_tr:
all_td = regex_get_all(tr, '<td', '</td>')
imdb_id = regex_from_to(all_td[1], '/title/', '/')
name = regex_from_to(all_td[1], '/">', '</a>')
year = regex_from_to(all_td[1], '<span class="year_type">\(', '\)')
rating = regex_from_to(all_td[2], '<b>', '</b>')
votes = regex_from_to(all_td[3], '\n', '\n')
movies.append({'imdb_id': imdb_id, 'name': name, 'year': year, 'rating': rating, 'votes': votes})
return movies
def scrape_xspf(body):
all_track = regex_get_all(body, '<track>', '</track>')
tracks = []
for track in all_track:
name = regex_from_to(track, '<title>', '</title>')
location = regex_from_to(track, '<location>', '</location>')
tracks.append({'name': name, 'location': location})
return tracks
def execute_video(name, url, list_item, strm=False):
if PLAY_MODE == 'stream':
if strm:
set_resolved_url(int(sys.argv[1]), name, url)
else:
play(name, url, list_item)
elif PLAY_MODE == 'download and play':
if strm:
download_and_play(name, url, play=True, handle=int(sys.argv[1]))
else:
download_and_play(name, url, play=True)
def get_items_in_dir(path):
items = []
for dirpath, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
items.append(subdirname)
for filename in filenames:
if filename.endswith(".strm"):
items.append(filename[:-5])
return items
def exist_in_dir(name, path, isMovie=False):
if isMovie:
name = "%s.strm" % name
item_list = os.listdir(path)
for item in item_list:
if item == name:
return True
return False
#test = clean_file_name(name)
#for item in get_items_in_dir(path):
# if item == test:
# return True
#return False
#Menu
def setup():
if FIRST_TIME_STARTUP:
dialog = xbmcgui.Dialog()
if not FURK_ACCOUNT:
if dialog.yesno("Setup account", "This addon requires a Furk.net account.", "What do you want to do?", '', "Use existing account", "Create new account"):
if not register_account():
dialog.ok("Setup account", "Account registation aborted.")
dialog.ok("Missing information", "You need to write down your Furk.net", "login information in the addon-settings.")
ADDON.openSettings()
else:
dialog.ok("Missing information", "You need to write down your Furk.net", "login information in the addon-settings.")
ADDON.openSettings()
if dialog.yesno("Setup metadata", "This addon supports the use of metadata,", "this data can be pre-downloaded.", "Do you want to download a metadata package?"):
download_meta_zip()
if dialog.yesno("Setup metadata", "This addon can download metadata while you", "are browsing movie and TV show categories.", "Do you want to activate this feature?"):
ADDON.setSetting('download_meta', value='true')
else:
ADDON.setSetting('download_meta', value='false')
if not check_sources_xml(MOVIES_PATH) or not check_sources_xml(TV_SHOWS_PATH):
if dialog.yesno("Setup folder", "The directories used are not listed as video sources.", "Do you want to add them to sources.xml now?"):
setup_sources()
ADDON.setSetting('first_time_startup', value='false')
def main_menu():
items = []
items.append(create_item_tuple('All movies', 'all movies menu', isSubscribable=True))
items.append(create_item_tuple('Movies by genre', 'movie genres menu'))
items.append(create_item_tuple('New movies', 'new movies menu', isSubscribable=True))
items.append(create_item_tuple('All TV shows', 'all tv shows menu'))
items.append(create_item_tuple('TV shows by genre', 'tv show genres menu'))
items.append(create_item_tuple('Active TV shows', 'active tv shows menu'))
items.append(create_item_tuple('Search', 'search menu'))
if LIBRARY_MODE and SUBSCRIPTION_FILE:
#add_menu_item('Subscriptions', 'subscription menu')
items.append(create_item_tuple('Subscriptions', 'subscription menu'))
#xbmcplugin.endOfDirectory(int(sys.argv[1]))
return items
def movies_all_menu():
params = {}
params["release_date"] = RELEASE_DATE
params["sort"] = SORT
params["title_type"] = "feature,documentary"
params["production_status"] = PRODUCTION_STATUS
movies = search_imdb(params)
return create_movie_items(movies)
def movies_genres_menu():
items = []
genres = ['Action', 'Adventure', 'Animation', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Family',
'Fantasy', 'History', 'Horror', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
for genre in genres:
items.append(create_item_tuple(genre, 'movie genre menu', isSubscribable=True))
return items
def movies_genre_menu(genre):
params = {}
params["release_date"] = RELEASE_DATE
params["sort"] = SORT
params["title_type"] = "feature,documentary"
params["genres"] = genre
params["production_status"] = PRODUCTION_STATUS
movies = search_imdb(params)
return create_movie_items(movies)
def movies_new_menu():
d = (date.today() - timedelta(days=365))
params = {}
params["release_date"] = "%s," % d
params["sort"] = "release_date_us,desc"
params["title_type"] = "feature,documentary"
params["production_status"] = PRODUCTION_STATUS
movies = search_imdb(params)
return create_movie_items(movies)
def tv_shows_all_menu():
params = {}
params["release_date"] = RELEASE_DATE
params["sort"] = SORT
params["title_type"] = "tv_series"
params["production_status"] = PRODUCTION_STATUS
tv_shows = search_imdb(params)
return create_tv_show_items(tv_shows)
def tv_shows_genres_menu():
items = []
genres = ['Action', 'Adventure', 'Animation', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Family',
'Fantasy', 'History', 'Horror', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
for genre in genres:
items.append(create_item_tuple(genre, 'tv show genre menu', isSubscribable=True))
return items
def tv_shows_genre_menu(genre):
params = {}
params["release_date"] = RELEASE_DATE
params["sort"] = SORT
params["title_type"] = "tv_series"
params["genres"] = genre
params["production_status"] = PRODUCTION_STATUS
tv_shows = search_imdb(params)
return create_tv_show_items(tv_shows)
def tv_shows_active_menu():
params = {}
params["production_status"] = "active"
params["sort"] = SORT
params["title_type"] = "tv_series"
tv_shows = search_imdb(params)
return create_tv_show_items(tv_shows)
def tv_shows_seasons_menu(name, imdb_id):
print ""
def tv_shows_episodes_menu(name, imdb_id):
items = []
info = TheTVDBInfo(imdb_id)
episodes = info.episodes()
name = name.split('(')[0][:-1]
for episode in episodes:
first_aired = episode.FirstAired()
if len(first_aired) > 0:
d = first_aired.split('-')
episode_date = date(int(d[0]), int(d[1]), int(d[2]))
if date.today() > episode_date:
season_number = int(episode.SeasonNumber())
if season_number > 0:
episode_number = int(episode.EpisodeNumber())
episode_name = episode.EpisodeName()
cleaned_name = clean_file_name(episode_name, use_blanks=False)
display = "(S%.2dE%.2d) %s" % (season_number, episode_number, cleaned_name)
data = "%s<|>%s<|>%d<|>%d" % (name, episode_name, season_number, episode_number)
(url, li, isFolder) = create_item_tuple(display, 'episode dialog', data=data, imdb_id=imdb_id)
li = set_tv_show_meta(li, imdb_id, META_PATH)
li.setInfo('video', {'title': display})
items.append((url, li, isFolder)) #items.append((url, li, False))
return items
def subscription_menu():
items = []
s = read_from_file(SUBSCRIPTION_FILE)
menu_items = s.split('\n')
for menu_item in menu_items:
if len(menu_item) < 3:
break
data = menu_item.split('\t')
item_name = data[0]
item_data = data[1]
items.append(create_item_tuple('%s [%s]' % (item_name, item_data), 'unsubscribe', data=item_data, isFolder=False))
return items
def search_menu():
items = []
items.append(create_item_tuple('@Search...', 'manual search'))
if os.path.isfile(SEARCH_FILE):
s = read_from_file(SEARCH_FILE)
search_queries = s.split('\n')
for query in search_queries:
items.append(create_item_tuple(query, 'manual search', data=query))
return items
def manual_search(query):
if query.startswith('@'):
query = ''
keyboard = xbmc.Keyboard(query, 'Search')
keyboard.doModal()
if keyboard.isConfirmed():
query = keyboard.getText()
if len(query) > 0:
add_search_query(query)
movie_dialog(query)
def episode_dialog(data, imdb_id=None, strm=False):
dialog = xbmcgui.Dialog()
open_playlists = dialog.yesno("Seach alternatives", "What search routine should be done?",
"Regular search: Slow, but finds all results.",
"Fast search: Fast but finds less files.",
"Fast search", "Regular search")
data = data.split('<|>')
tv_show_name = data[0]
episode_name = data[1]
season_number = int(data[2])
episode_number = int(data[3])
season_episode = "s%.2de%.2d" % (season_number, episode_number)
season_episode2 = "%d%.2d" % (season_number, episode_number)
tv_show_season = "%s season" % (tv_show_name)
tv_show_episode = "%s %s" % (tv_show_name, season_episode)
track_filter = [episode_name, season_episode, season_episode2]
files = []
files.extend(search(tv_show_episode, limit='25'))
if open_playlists:
files.extend(search(tv_show_season, limit='10'))
files.extend(search(tv_show_name, limit='10'))
files = remove_list_duplicates(files)
pDialog = xbmcgui.DialogProgress()
pDialog.create('Searching for files')
tracks = []
count = 0
for f in files:
count = count + 1
percent = int(float(count * 100) / len(files))
text = "%s files found" % len(tracks)
pDialog.update(percent, text)
if pDialog.iscanceled():
pDialog.close()
if strm:
set_resolved_to_dummy()
return
if f.av_result == "ok" and f.type == "video":
new_tracks = filter_playlist_tracks(get_playlist_tracks(f, open_playlists=open_playlists), track_filter)
tracks.extend(new_tracks)
pDialog.close()
(url, name) = track_dialog(tracks)
if not url or not name:
if strm:
set_resolved_to_dummy()
return
li = xbmcgui.ListItem(clean_file_name(episode_name))
li = set_tv_show_meta(li, imdb_id, META_PATH)
execute_video(name, url, li, strm)
def movie_dialog(data, imdb_id=None, strm=False):
dialog = xbmcgui.Dialog()
open_playlists = dialog.yesno("Seach alternatives", "What search routine should be done?",
"Regular search: Slow, but finds all results.",
"Fast search: Fast but finds less files.",
"Fast search", "Regular search")
files = search(data, limit='25')
pDialog = xbmcgui.DialogProgress()
pDialog.create('Searching for files')
tracks = []
count = 0
for f in files:
count = count + 1
percent = int(float(count * 100) / len(files))
text = "%s files found" % len(tracks)
pDialog.update(percent, text)
if f.type == "video":
new_tracks = get_playlist_tracks(f, open_playlists=open_playlists)
tracks.extend(new_tracks)
(url, name) = track_dialog(tracks)
if not url or not name:
if strm:
set_resolved_to_dummy()
return
li = xbmcgui.ListItem(clean_file_name(data))
li = set_movie_meta(li, imdb_id, META_PATH)
execute_video(name, url, li, strm)
def set_resolved_to_dummy():
DUMMY_PATH = os.path.join(ADDON.getAddonInfo('path'), 'dummy.wma')
listitem = xbmcgui.ListItem('Dummy data to avoid error message', path=DUMMY_PATH)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
def track_dialog(tracks):
menu_texts = []
menu_data = []
for track in tracks:
name = track['name']
if name.find('.VOB') < 0:
menu_texts.append(name)
menu_data.append(track['location'])
dialog = xbmcgui.Dialog()
if len(menu_data) == 0:
dialog = xbmcgui.Dialog()
dialog.ok("No files found", "The search was not able to find any files.")
return (None, None)
menu_id = dialog.select('Select file', menu_texts)
if(menu_id < 0):
return (None, None)
url = menu_data[menu_id]
name = menu_texts[menu_id]
return (url, name)
def search(query, limit=25):
query = clean_file_name(query)
query = query.replace('\'', ' ')
if not login_at_furk():
return []
files = []
if type(query).__name__ == 'list':
for q in query:
search_result = FURK.search(q, filter=FURK_FILTER, limit=limit)
if search_result.query_changed == None:
files.extend(search_result.files)
else:
search_result = FURK.search(query, filter=FURK_FILTER, limit=limit)
if search_result.query_changed == None:
files = search_result.files
return files
def remove_list_duplicates(list_to_check):
temp_set = {}
map(temp_set.__setitem__, list_to_check, [])
return temp_set.keys()
def filter_playlist_tracks(tracks, track_filters):
r = []
if type(track_filters).__name__ == 'list':
for track in tracks:
name = make_string_comparable(track['name'])
for f in track_filters:
track_filter = make_string_comparable(f)
if name.find(track_filter) >= 0:
r.append(track)
break
else:
track_filter = make_string_comparable(track_filters)
for track in tracks:
name = make_string_comparable(track['name'])
if name.find(track_filter) >= 0:
r.append(track)
return r
def make_string_comparable(s):
s = s.lower()
s = ''.join(e for e in s if e.isalnum())
return s
def get_playlist_tracks(playlist_file, open_playlists=False):
tracks = []
try:
file_name = playlist_file.name
if file_name.endswith('.avi') or file_name.endswith('.mkv'):
tracks = [{'name': file_name, 'location': playlist_file.url_dl}]
elif open_playlists:
playlist_url = playlist_file.url_pls
playlist = get_url(playlist_url)
tracks = scrape_xspf(playlist)
except:
pass
return tracks
def create_movie_tuple(name, imdb_id):
if LIBRARY_MODE:
return create_item_tuple(name, 'toggle movie strm', data=name, isFolder=False, isMovieItem=True, imdb_id=imdb_id)
else:
return create_item_tuple(name, 'movie dialog', isFolder=False, data=name, isMovieItem=True, imdb_id=imdb_id)
def create_tv_show_tuple(name, imdb_id):
if LIBRARY_MODE:
return create_item_tuple(name, 'toggle tv show strms', data=name, isFolder=False, isTVShowItem=True, isSubscribable=True, imdb_id=imdb_id)
else:
return create_item_tuple(name, 'episodes menu', data=name, isTVShowItem=True, isSubscribable=True, imdb_id=imdb_id)
def create_item_tuple(name, mode, data="", imdb_id="", isFolder=True, isSubscribable=False, isMovieItem=False, isTVShowItem=False):
url = create_url(name, mode, data, imdb_id)
li = create_list_item(name, mode, isSubscribable=isSubscribable, isMovieItem=isMovieItem, isTVShowItem=isTVShowItem, imdb_id=imdb_id)
if not imdb_id == "":
if isMovieItem:
li = set_movie_meta(li, imdb_id, META_PATH)
if isTVShowItem:
li = set_tv_show_meta(li, imdb_id, META_PATH)
return (url, li, isFolder)
def create_url(name, mode, data="", imdb_id=""):
name = urllib.quote(str(name))
data = urllib.quote(str(data))
mode = str(mode)
url = sys.argv[0] + '?name=%s&data=%s&mode=%s&imdb_id=%s' % (name, data, mode, imdb_id)
return url
def create_list_item(name, mode, isSubscribable=False, isMovieItem=False, isTVShowItem=False, imdb_id=''):
contextMenuItems = []
prefix = " "
if isMovieItem:
contextMenuItems.append(('Movie Information', 'XBMC.Action(Info)'))
if isTVShowItem:
contextMenuItems.append(('TV Show information', 'XBMC.Action(Info)'))
if LIBRARY_MODE:
if isMovieItem:
c_name = clean_file_name(name)
if exist_in_dir(c_name, MOVIES_PATH, isMovie=True):
if UNICODE_INDICATORS:
prefix = u'\u2605'
else:
prefix = "(A)"
if isTVShowItem:
c_name = clean_file_name(name.split('(')[0][:-1])
if exist_in_dir(c_name, TV_SHOWS_PATH):
if UNICODE_INDICATORS:
prefix = u'\u2605'
else:
prefix = "(A)"
if isTVShowItem:
sub_data = imdb_id
sub_path = TV_SHOWS_PATH
else:
sub_data = mode
sub_path = MOVIES_PATH
if isSubscribable:
if subscription_index(name, sub_data) < 0:
subscribe_url = sys.argv[0] + '?name=%s&data=%s&mode=subscribe' % (urllib.quote(name), sub_data)
contextMenuItems.append(('Subscribe', 'XBMC.RunPlugin(%s)' % subscribe_url))
else:
if UNICODE_INDICATORS:
prefix = u'\u2665'
else:
prefix = "(S)"
unsubscribe_url = sys.argv[0] + '?name=%s&data=%s&mode=unsubscribe' % (urllib.quote(name), sub_data)
contextMenuItems.append(('Unsubscribe', 'XBMC.RunPlugin(%s)' % unsubscribe_url))
li = xbmcgui.ListItem(prefix + clean_file_name(name, use_blanks=False))
li.addContextMenuItems(contextMenuItems)
return li
def create_movie_items(movies):
items = []
missing_meta = []
for movie in movies:
name = "%s (%s)" % (movie['name'], movie['year'])
imdb_id = movie['imdb_id']
items.append(create_movie_tuple(name, imdb_id))
if not meta_exist(imdb_id, META_PATH):
missing_meta.append(imdb_id)
return items, missing_meta
def create_tv_show_items(tv_shows):
items = []
missing_meta = []
for tv_show in tv_shows:
name = "%s (%s)" % (tv_show['name'], tv_show['year'])
imdb_id = tv_show['imdb_id']
items.append(create_tv_show_tuple(name, imdb_id))
if not meta_exist(imdb_id, META_PATH):
missing_meta.append(imdb_id)
return items, missing_meta
def scan_library():
if xbmc.getCondVisibility('Library.IsScanningVideo') == False:
#if ADDON.getSetting('update_video') == 'true':
xbmc.executebuiltin('UpdateLibrary(video)')
def clean_library():
#if xbmc.getCondVisibility('Library.IsScanningVideo') == False:
#if ADDON.getSetting('update_video') == 'true':
xbmc.executebuiltin('CleanLibrary(video)')
def get_missing_meta(missing_meta, type):
if len(missing_meta) > 0 and DOWNLOAD_META:
xbmc.log("[What the Furk] Downloading missing %s meta data for %d files..." % (type, len(missing_meta)))
dlThread = DownloadThread(missing_meta, type)
dlThread.start()
xbmc.log("[What the Furk] ...meta download complete!")
class DownloadThread(Thread):
def __init__(self, missing_meta, meta_type):
self.missing_meta = missing_meta
self.type = meta_type
Thread.__init__(self)
def run(self):
if self.type == 'movies':
for imdb_id in self.missing_meta:
download_movie_meta(imdb_id, META_PATH)
if self.type == 'tv shows':
for imdb_id in self.missing_meta:
download_tv_show_meta(imdb_id, META_PATH)
xbmc.executebuiltin("Container.Refresh")
def get_all_meta():
genres = ['Action', 'Adventure', 'Animation', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Family',
'Fantasy', 'History', 'Horror', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
xbmc.log("[What the Furk] Downloading movies_all_menu...")
items, missing_meta = movies_all_menu()
for imdb_id in missing_meta:
download_movie_meta(imdb_id, META_PATH)
xbmc.log("[What the Furk] ...complete!")
for genre in genres:
xbmc.log("[What the Furk] Downloading movies_genre_menu %s..." % genre)
items, missing_meta = movies_genre_menu(str(genre).lower())
for imdb_id in missing_meta:
download_movie_meta(imdb_id, META_PATH)
xbmc.log("[What the Furk] ...complete!")
xbmc.log("[What the Furk] Downloading movies_new_menu...")
items, missing_meta = movies_new_menu()
for imdb_id in missing_meta:
download_tv_show_meta(imdb_id, META_PATH)
xbmc.log("[What the Furk] ...complete!")
xbmc.log("[What the Furk] Downloading tv_shows_all_menu...")
items, missing_meta = tv_shows_all_menu()
for imdb_id in missing_meta:
download_tv_show_meta(imdb_id, META_PATH)
xbmc.log("[What the Furk] ...complete!")
for genre in genres:
xbmc.log("[What the Furk] Downloading tv_shows_genre_menu %s..." % genre)
items, missing_meta = tv_shows_genre_menu(str(genre).lower())
for imdb_id in missing_meta:
download_tv_show_meta(imdb_id, META_PATH)
xbmc.log("[What the Furk] ...complete!")
xbmc.log("[What the Furk] Downloading tv_shows_active_menu...")
items, missing_meta = tv_shows_active_menu()
for imdb_id in missing_meta:
download_tv_show_meta(imdb_id, META_PATH)
xbmc.log("[What the Furk] ...complete!")
xbmc.log("[What the Furk] META DOWNLOAD COMPLETE!")
def get_menu_items(name, mode, data, imdb_id):
if mode == "main menu": #Main menu
items = main_menu()
elif mode == "all movies menu": #all menu
items, missing_meta = movies_all_menu()
get_missing_meta(missing_meta, 'movies')
elif mode == "movie genres menu": #Genres menu
items = movies_genres_menu()
elif mode == "movie genre menu": #Genre menu
items, missing_meta = movies_genre_menu(str(name).lower())
get_missing_meta(missing_meta, 'movies')
elif mode == "new movies menu": #New movies menu
items, missing_meta = movies_new_menu()
get_missing_meta(missing_meta, 'movies')
elif mode == "all tv shows menu": #all menu
items, missing_meta = tv_shows_all_menu()
get_missing_meta(missing_meta, 'tv shows')
elif mode == "tv show genres menu": #Genres menu
items = tv_shows_genres_menu()
elif mode == "tv show genre menu": #Genre menu
items, missing_meta = tv_shows_genre_menu(str(name).lower())
get_missing_meta(missing_meta, 'tv shows')
elif mode == "active tv shows menu": #New movies menu
items, missing_meta = tv_shows_active_menu()
get_missing_meta(missing_meta, 'tv shows')
elif mode == "episodes menu":
items = tv_shows_episodes_menu(name, imdb_id)
elif mode == "seasons menu":
items = tv_shows_seasons_menu(name, imdb_id)
elif mode == "subscription menu": #Subscription menu
items = subscription_menu()
elif mode == "search menu": #Search menu
items = search_menu()
else:
items = []
return items
#Other
def get_params(paramstring):
param = {}
if len(paramstring) >= 2:
paramstring = paramstring.replace('?', '')
pairsofparams = paramstring.split('&')
for p in pairsofparams:
splitparams = p.split('=')
if len(splitparams) == 2:
param[splitparams[0]] = splitparams[1]
return param
params = get_params(sys.argv[2])
try:
name = urllib.unquote_plus(params["name"])
except:
name = ""
try:
data = urllib.unquote_plus(params["data"])
except:
data = ""
try:
imdb_id = urllib.unquote_plus(params["imdb_id"])
except:
imdb_id = ""
try:
mode = urllib.unquote_plus(params["mode"])
except:
mode = "main menu"
xbmc.log("[What the Furk] mode=%s name=%s data=%s imdb_id=%s" % (mode, name, data, imdb_id))
if mode.endswith('menu'):
items = get_menu_items(name, mode, data, imdb_id)
xbmcplugin.addDirectoryItems(int(sys.argv[1]), items, len(items))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
setup()
elif mode == "episode dialog":
episode_dialog(data, imdb_id)
elif mode == "movie dialog":
movie_dialog(data, imdb_id)
elif mode == "strm movie dialog":
movie_dialog(data, imdb_id, strm=True)
elif mode == "strm tv show dialog":
episode_dialog(data, imdb_id, strm=True)
elif mode == "play":
execute_video(name, imdb_id)
elif mode == "manual search":
manual_search(data)
xbmc.executebuiltin("Container.Refresh")
elif mode == "subscribe":
subscribe(name, data)
xbmc.executebuiltin("Container.Refresh")
elif mode == "unsubscribe":
if name.find('[') >= 0:
name = name.split('[')[0][:-1]
unsubscribe(name, data)
xbmc.executebuiltin("Container.Refresh")
elif mode == "get subscriptions":
get_subscriptions()
elif mode == "toggle movie strm":
name = clean_file_name(name)
if exist_in_dir(name, MOVIES_PATH, isMovie=True):
remove_strm_file(data, MOVIES_PATH)
#clean_library()
else:
create_strm_file(name, data, imdb_id, "strm movie dialog", MOVIES_PATH)
scan_library()
xbmc.executebuiltin("Container.Refresh")
elif mode == "toggle tv show strms":
data = clean_file_name(data.split('(')[0][:-1])
if exist_in_dir(data, TV_SHOWS_PATH):
remove_tv_show_strm_files(data, TV_SHOWS_PATH)
#clean_library()
else:
create_tv_show_strm_files(data, imdb_id, "strm tv show dialog", TV_SHOWS_PATH)
scan_library()
xbmc.executebuiltin("Container.Refresh")
#Sort by ...
#TODO:
#max search saves
#torrent mode
#download and play
|
998,803 | 152e5cd0252f23ab940555fa42178d0f9c6f0eaf | from threading import Thread
import usb
import usb.core
import usb.util
import time
import os
import sys
import struct
from pocketsphinx import LiveSpeech, get_model_path
live_speech=None
dev = usb.core.find(idVendor=0x2886,idProduct=0x0018)
file_path = os.path.abspath(__file__)
model_path = get_model_path()
# Define path
hotword_dic_path = file_path.replace(
'ros2_function/module_angular.py', '/dictionary/hey_ducker.dict')
hotword_gram_path = file_path.replace(
'ros2_function/module_angular.py', '/dictionary/hey_ducker.gram')
# PARAMETERS for sound localization
PARAMETERS = {
'DOAANGLE': (21, 0, 'int', 359, 0, 'ro', 'DOA angle. Current value. Orientation depends on build configuration.'),
'SPEECHDETECTED': (19, 22, 'int', 1, 0, 'ro', 'Speech detection status.', '0 = false (no speech detected)',
'1 = true (speech detected)')
}
TIMEOUT = 100000
# Find angular
def angular():
global live_speech
setup_live_speech(
False,
hotword_dic_path,
hotword_gram_path,
1e-20)
while True:
counter = 0
if read('SPEECHDETECTED') == 1:
for kw in live_speech:
angular = direction()
if kw != ' ':
counter += 1
print(str(counter) + ':' + str(angular), flush=True)
return angular
def read(param_name):
try:
data = PARAMETERS[param_name]
except KeyError:
return
cmd = 0x80 | data[1]
if data[2] == 'int':
cmd |= 0x40
id = data[0]
length = 8
response = dev.ctrl_transfer(
usb.util.CTRL_IN | usb.util.CTRL_TYPE_VENDOR | usb.util.CTRL_RECIPIENT_DEVICE,
0, cmd, id, length, TIMEOUT)
response = struct.unpack(b'ii', response.tostring())
if data[2] == 'int':
result = response[0]
else:
result = response[0] * (2. ** response[1])
return result
def direction():
return read('DOAANGLE')
# Setup livespeech
def setup_live_speech(lm, dict_path, jsgf_path, kws_threshold):
global live_speech
live_speech = LiveSpeech(lm=lm,
hmm=os.path.join(model_path, 'en-us'),
dic=dict_path,
jsgf=jsgf_path,
kws_threshold=kws_threshold)
if __name__ == '__main__':
angular()
|
998,804 | c0a4d362f87c9e2e07fa730cb7a173f511d7d3ab | import torch
import visdom
# 创建visdom客户端,使用默认端口8097,环境为first,环境的作用是对可视化的空间进行分区
vis = visdom.Visdom(env='first')
# vis对象有text(),line(),和image()等函数,其中的win参数代表了显示的窗格(pane)的名字
vis.text('first visdom', win='text1')
# 在此使用append为真来进行增加text,否则会覆盖之前的text
vis.text('hello pytorch', win='text1',append=True)
# 绘制y=-i^2+20*i+1的曲线,opts可以进行标题、坐标轴标签等的配置
for i in range(20):
vis.line(X=torch.FloatTensor([i]), Y=torch.FloatTensor([-i**2+20*i+1]),
opts={'title': 'y=-x^2+20x+1'}, win='loss', update='append')
# 可视化一张随机图片
vis.image(torch.randn(3,256,256), win='random_image') |
998,805 | f3fa44e95985408cdc9b06c8f2a622bc2dae0ee3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by fengyuwusong at 2018/1/19
__author__ = 'fengyuwusong' |
998,806 | a963c5439202edd12612e43ba50a70ba7627d0fb | import itertools as it
from conference_scheduler.resources import Shape, Constraint
from conference_scheduler.lp_problem import utils as lpu
def _schedule_all_events(events, slots, X, summation_type=None):
shape = Shape(len(events), len(slots))
summation = lpu.summation_functions[summation_type]
label = 'Event either not scheduled or scheduled multiple times'
for event in range(shape.events):
yield Constraint(
f'{label} - event: {event}',
summation(X[event, slot] for slot in range(shape.slots)) == 1
)
def _max_one_event_per_slot(events, slots, X, summation_type=None):
shape = Shape(len(events), len(slots))
summation = lpu.summation_functions[summation_type]
label = 'Slot with multiple events scheduled'
for slot in range(shape.slots):
yield Constraint(
f'{label} - slot: {slot}',
summation(X[(event, slot)] for event in range(shape.events)) <= 1
)
def _events_in_session_share_a_tag(events, slots, X, summation_type=None):
"""
Constraint that ensures that if an event has a tag and
is in a given session then it must
share at least one tag with all other event in that session.
"""
session_array, tag_array = lpu.session_array(slots), lpu.tag_array(events)
summation = lpu.summation_functions[summation_type]
label = 'Dissimilar events schedule in same session'
event_indices = range(len(tag_array))
session_indices = range(len(session_array))
for session in session_indices:
slots = lpu._slots_in_session(session, session_array)
for slot, event in it.product(slots, event_indices):
if len(events[event].tags) > 0:
other_events = lpu._events_with_diff_tag(event, tag_array)
for other_slot, other_event in it.product(slots, other_events):
if other_slot != slot and other_event != event:
# If they have different tags they cannot be scheduled
# together
yield Constraint(
f'{label} - event: {event}, slot: {slot}',
summation(
(
X[(event, slot)],
X[(other_event, other_slot)]
)
) <= 1
)
def _events_available_in_scheduled_slot(events, slots, X, **kwargs):
"""
Constraint that ensures that an event is scheduled in slots for which it is
available
"""
slot_availability_array = lpu.slot_availability_array(slots=slots,
events=events)
label = 'Event scheduled when not available'
for row, event in enumerate(slot_availability_array):
for col, availability in enumerate(event):
if availability == 0:
yield Constraint(
f'{label} - event: {row}, slot: {col}',
X[row, col] <= availability
)
def _events_available_during_other_events(
events, slots, X, summation_type=None
):
"""
Constraint that ensures that an event is not scheduled at the same time as
another event for which it is unavailable.
"""
summation = lpu.summation_functions[summation_type]
event_availability_array = lpu.event_availability_array(events)
label = 'Event clashes with another event'
for slot1, slot2 in lpu.concurrent_slots(slots):
for row, event in enumerate(event_availability_array):
if events[row].unavailability:
for col, availability in enumerate(event):
if availability == 0:
yield Constraint(
f'{label} - event: {row} and event: {col}',
summation(
(X[row, slot1], X[col, slot2])
) <= 1 + availability
)
def all_constraints(events, slots, X, summation_type=None):
kwargs = {
'events': events,
'slots': slots,
'X': X,
'summation_type': summation_type
}
generators = (
_schedule_all_events,
_max_one_event_per_slot,
_events_in_session_share_a_tag,
_events_available_in_scheduled_slot,
_events_available_during_other_events,
)
for generator in generators:
for constraint in generator(**kwargs):
yield constraint
|
998,807 | fd00224fa7816e7718ac592c2ccd2cdaaa55ed24 | # encoding: utf-8
from django.test import TestCase
from django_nose.tools import *
from wpkit.wp import content
from .. import Differ
BALANCE_TESTS = (
(
'<p> <p>Test 0</p>',
False,
'<p> </p><p>Test 0</p>'
),
(
'<p>Test 1 <i>abc <b>nested tags</b> and </p>\n<p>Unbalanced <a>tags</a>',
False,
'<p>Test 1 <i>abc <b>nested tags</b> and </i></p>\n<p>Unbalanced <a>tags</a></p>'
),
(
'<p>Test 2 <i>abc <b>nested </i>tags</b> and </p>\n<p>Unbalanced <a>tags</a>',
False,
'<p>Test 2 <i>abc <b>nested </b></i>tags and </p>\n<p>Unbalanced <a>tags</a></p>'
),
(
'<p>Test 2 <i>abc <b>nested </i>tags <br /></b> and </p>\n<p>Unbalanced <a>tags</a>',
False,
'<p>Test 2 <i>abc <b>nested </b></i>tags <br /> and </p>\n<p>Unbalanced <a>tags</a></p>'
),
(
'<p>Test 2 <i>abc <b>nested </i>tags <br></b> and </p>\n<p>Unbalanced <a>tags</a>',
False,
'<p>Test 2 <i>abc <b>nested </b></i>tags <br/> and </p>\n<p>Unbalanced <a>tags</a></p>'
),
(
'''<p>Test 1-2 <i>abc's <b>"nested tags"</b> -- and </p>\n<p>Unbalanced <a>tags</a>''',
True,
u'<p>Test 1-2 <i>abc’s <b>“nested tags”</b> — and </i></p>\n<p>Unbalanced <a>tags</a></p>'
),
(
'''<p>Test 1-2 <i>abc's <b>"nested tags"</b> -- and </p>\n<pre>Unbalanced <a>tags'</a>''',
True,
u'<p>Test 1-2 <i>abc’s <b>“nested tags”</b> — and </i></p>\n<pre>Unbalanced <a>tags\'</a></pre>'
),
(
u'''<img class="post" src="/wp-content/blogs.dir/1/eurostar.jpg" align="left" width="240" height="180" />Vi sarà capitato di salire su un Eurostar la mattina ancora insonnoliti, o a fine pomeriggio stanchi dopo una giornata di lavoro in trasferta, e conquistato l'agognato posto vicino al finestrino rannicchiarvi e cascare in un sonno profondo. E vi sarà anche capitato di svegliarvi di soprassalto per un rumore sferragliante qualche centimetro accanto alle vostre orecchie.''',
True,
u'''<img class="post" src="/wp-content/blogs.dir/1/eurostar.jpg" align="left" width="240" height="180" />Vi sarà capitato di salire su un Eurostar la mattina ancora insonnoliti, o a fine pomeriggio stanchi dopo una giornata di lavoro in trasferta, e conquistato l’agognato posto vicino al finestrino rannicchiarvi e cascare in un sonno profondo. E vi sarà anche capitato di svegliarvi di soprassalto per un rumore sferragliante qualche centimetro accanto alle vostre orecchie.'''
)
# ...
)
TXTR_BLOCK_TESTS = (
(
'''WP's a "great" app, PHP rocks, and code is poetry...''',
u'WP’s a “great” app, PHP rocks, and code is poetry…'
),
(
u'''Vi sarà capitato di salire su un Eurostar la mattina ancora insonnoliti, o a fine pomeriggio stanchi dopo una giornata di lavoro in trasferta, e conquistato l'agognato posto vicino al finestrino rannicchiarvi e cascare in un sonno profondo. E vi sarà anche capitato di svegliarvi di soprassalto per un rumore sferragliante qualche centimetro accanto alle vostre orecchie.''',
u'''Vi sarà capitato di salire su un Eurostar la mattina ancora insonnoliti, o a fine pomeriggio stanchi dopo una giornata di lavoro in trasferta, e conquistato l’agognato posto vicino al finestrino rannicchiarvi e cascare in un sonno profondo. E vi sarà anche capitato di svegliarvi di soprassalto per un rumore sferragliante qualche centimetro accanto alle vostre orecchie.'''
),
(
'This comment <!--abc--> should not be converted, nor should this one <!-- ab c -->',
'This comment <!--abc--> should not be converted, nor should this one <!-- ab c -->'
),
)
AUTOP_TESTS = (
(
'''Test 1.\nSecond line\n\n<br />Third line.''',
'''<p>Test 1.<br />\nSecond line</p>\n<p>Third line.</p>\n'''
),
(
'''Test 2.\n\n<pre>Second line</pre>\n\nThird line.''',
'''<p>Test 2.</p>\n<pre>Second line</pre>\n<p>Third line.</p>\n'''
),
)
class TestContent(TestCase):
def test_balance_tags(self):
for test, texturize, expected in BALANCE_TESTS:
result = content.balance_tags(test, texturize)
if result != expected:
raise ValueError(Differ(expected=expected, result=result))
def test_texturize_block(self):
for test, expected in TXTR_BLOCK_TESTS:
result = content.texturize_block(test)
if result != expected:
raise ValueError(Differ(expected=expected, result=result))
def test_autop(self):
for test, expected in AUTOP_TESTS:
result = content.autop(test)
if result != expected:
raise ValueError(Differ(expected=expected, result=result))
|
998,808 | 10aa2f23139ab31db29c82131a909cab2358924f | from django.views.generic import ListView, DetailView, UpdateView
from django.views.generic.edit import CreateView
import django_tables2 as tables
from django_tables2 import RequestConfig, SingleTableView
from core.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from orders.forms import CustomerForm, CustomerDetailReadOnlyForm
from orders.tables import OrderTable
from customers.models import Customer
from .tables import CustomersTable
class CustomerUpdateView(LoginRequiredMixin, UpdateView):
model = Customer
context_object_name = "customer"
template_name = "customers/customer_update.html"
form_class = CustomerForm
class CustomerCreateView(LoginRequiredMixin, CreateView):
model = Customer
context_object_name = "customer"
template_name = "customers/customer_update.html"
form_class = CustomerForm
class CustomerDetailView(LoginRequiredMixin, DetailView):
model = Customer
context_object_name = "customer"
template_name = "customers/customer_detail.html"
def get_context_data(self, **kwargs):
context = super(CustomerDetailView, self).get_context_data(**kwargs)
context['customer_details_form'] = CustomerDetailReadOnlyForm(instance = context['object'])
customer_orders = context['object'].order_set.all()
customer_orders_table = OrderTable(customer_orders)
RequestConfig(self.request).configure(customer_orders_table)
context['customer_orders_table'] = customer_orders_table
return context
class CustomerTableView(LoginRequiredMixin, SingleTableView):
model = Customer
table_class = CustomersTable
template_name = "customers/customer_table.html"
|
998,809 | 53a933ab636b13350cf861c38c778995289ac8ca | """Robot parameters"""
import numpy as np
import farms_pylog as pylog
class RobotParameters(dict):
"""Robot parameters"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def __init__(self, parameters):
super(RobotParameters, self).__init__()
# Initialise parameters
self.n_body_joints = parameters.n_body_joints
self.n_legs_joints = parameters.n_legs_joints
self.n_joints = self.n_body_joints + self.n_legs_joints
self.n_oscillators_body = 2*self.n_body_joints
self.n_oscillators_legs = self.n_legs_joints
self.n_oscillators = self.n_oscillators_body + self.n_oscillators_legs
self.freqs = np.zeros(self.n_oscillators)
self.coupling_weights = np.zeros([
self.n_oscillators,
self.n_oscillators
])
self.phase_bias = np.zeros([self.n_oscillators, self.n_oscillators])
self.rates = np.zeros(self.n_oscillators)
self.nominal_amplitudes = np.zeros(self.n_oscillators)
self.update(parameters)
def update(self, parameters):
"""Update network from parameters"""
self.set_frequencies(parameters) # f_i
self.set_coupling_weights(parameters) # w_ij
self.set_phase_bias(parameters) # theta_i
self.set_amplitudes_rate(parameters) # a_i
self.set_nominal_amplitudes(parameters) # R_i
def set_frequencies(self, parameters):
"""Set frequencies"""
freqs = np.ones(self.n_oscillators)
body_freq = 0
limbs_freq = 0
d_low = 1.0
d_high_body = 5.0
d_high_limbs = 3.0
if parameters.drive >= d_low and parameters.drive <= d_high_body:
body_freq = 0.2 * parameters.drive + 0.3
if parameters.drive >= d_low and parameters.drive <= d_high_limbs:
limbs_freq = 0.2 * parameters.drive
freqs[:20] = body_freq
freqs[20:] = limbs_freq
self.freqs = freqs
def set_coupling_weights(self, parameters):
"""Set coupling weights"""
weights_matrix = np.zeros([24,24])
#upwards in body CPG
np.fill_diagonal(weights_matrix[1:20], 10)
#downwards in body CPG
np.fill_diagonal(weights_matrix[:,1:20], 10)
#the oscillators 10 and 11 are not coupled in either direction
weights_matrix[9,10] = 0
weights_matrix[10,9] = 0
#from right to left in body CPG
np.fill_diagonal(weights_matrix[10:20], 10)
#from left to right in body CPG
np.fill_diagonal(weights_matrix[:,10:20], 10)
#whithin the limb CPG
weights_matrix[20,21:23] = 10
weights_matrix[21:23,20] = 10
weights_matrix[23,21:23] = 10
weights_matrix[21:23,23] = 10
#from limb to body CPG
weights_matrix[1:5,20] = 30
weights_matrix[11:15,21] = 30
weights_matrix[5:10,22] = 30
weights_matrix[15:20,23] = 30
self.coupling_weights = weights_matrix
def set_phase_bias(self, parameters):
"""Set phase bias"""
bias_matrix = np.zeros([24,24])
#upwards in body CPG
np.fill_diagonal(bias_matrix[1:20], parameters.phase_lag)
#downwards in body CPG
np.fill_diagonal(bias_matrix[:,1:20], -parameters.phase_lag)
#the oscillators 10 and 11 are not coupled in either direction
bias_matrix[9,10] = 0
bias_matrix[10,9] = 0
#from right to left in body CPG
np.fill_diagonal(bias_matrix[10:20], np.pi)
#from left to right in body CPG
np.fill_diagonal(bias_matrix[:,10:20], np.pi)
#whithin the limb CPG
bias_matrix[20,21:23] = np.pi
bias_matrix[21:23,20] = np.pi
bias_matrix[23,21:23] = np.pi
bias_matrix[21:23,23] = np.pi
#from limb to body CPG
bias_matrix[1:5,20] = np.pi
bias_matrix[11:15,21] = np.pi
bias_matrix[5:10,22] = np.pi
bias_matrix[15:20,23] = np.pi
self.phase_bias = bias_matrix
def set_amplitudes_rate(self, parameters):
"""Set amplitude rates"""
self.rates = self.n_oscillators * np.ones(self.n_oscillators)
def set_nominal_amplitudes(self, parameters):
"""Set nominal amplitudes"""
nominal_amplitudes = np.zeros(self.n_oscillators)
body_amp = 0
limbs_amp = 0
d_low = 1.0
d_high_body = 5.0
d_high_limbs = 3.0
if parameters.drive >= d_low and parameters.drive <= d_high_body:
body_amp = 0.065 * parameters.drive + 0.196
if parameters.drive >= d_low and parameters.drive <= d_high_limbs:
limbs_amp = 0.131 * parameters.drive + 0.131
nominal_amplitudes[:20] = body_amp
nominal_amplitudes[20:] = limbs_amp
self.nominal_amplitudes = nominal_amplitudes
|
998,810 | 52dcede9522b7eb556d900d5f78fd77f3126fb82 | # -*- coding: utf-8 -*-
import logging
import re
import threading
from zigbee_hub.telegesis.prompt_parsers import PROMPT_PARSERS
class IncomingMessage(object):
@staticmethod
def is_incoming_message(message):
"""
:param message: the message
:type message: unicode
"""
return message.startswith("RX:")
def __init__(self, message):
"""
< EUI64 >, < NWK addr >, < profileID >, < destinationEndpoint >, < SourceEndpoint >, < clusterID >, < length >: < payload >
:param message: the message
:type message: unicode
"""
parts = message.lstrip("RX:").split(",")
if len(parts) == 7:
self.eui64, self.mwk_addr, self.profile_id, self.destination_endpoint, self.source_endpoint, self.cluster_id, payload = parts
else:
self.mwk_addr, self.profile_id, self.destination_endpoint, self.source_endpoint, self.cluster_id, payload = parts
self.eui64 = None
self.length = int(payload.split(":")[0])
self.payload = payload.split(":")[1]
class CommandError(Exception):
def __init__(self, error_line):
self.code = int(error_line.split(":")[1])
def get_code(self):
return self.code
class CommandResponse(object):
def __init__(self):
self.lines = list()
self.index = -1
def add_line(self, line):
self.lines.append(line)
def has_data(self):
return len(self.lines) > 0
def get(self, index):
self.index = index
return self.lines[index]
def next(self):
self.index += 1
return self.lines[self.index]
class SerialReader(threading.Thread):
def __init__(self, serial_conn, at_queue, incoming_queue):
"""
:param serial_conn:
:type serial_conn: Serial
"""
super(SerialReader, self).__init__()
self.serial_conn = serial_conn
self.at_queue = at_queue
self.incoming_queue = incoming_queue
self._stop = False
def run(self):
pattern = re.compile(r"^[A-Za-z]+:")
response = CommandResponse()
while True:
if self._stop:
self.serial_conn.close()
return
line = self.serial_conn.readline().rstrip()
if line == "OK":
self.at_queue.put(response)
response = CommandResponse()
elif pattern.match(line):
self.parse_prompt(line)
elif IncomingMessage.is_incoming_message(line):
self.incoming_queue.put(IncomingMessage(line))
elif line.startswith("ERROR"):
raise CommandError(line)
elif len(line) > 0:
response.add_line(line)
def stop(self):
self._stop = True
@staticmethod
def parse_prompt(prompt):
prompt_command, prompt_payload = prompt.split(":", 1)
parser = PROMPT_PARSERS.get(prompt_command)
if parser is None:
logging.warn("Unsupported prompt: {:s}".format(prompt_command))
return
parser(payload=prompt_payload)
|
998,811 | 191673563e7bcf777afb9bc34d5b5a3197e1d40d | # -*- coding: utf-8 -*-
import scrapy
import re
import json
from tv_spider.spiders.store import client, db
import tv_spider.const.video_source as video_source
import tv_spider.const.tv_status as tv_status
import tv_spider.const.video_status as video_status
class IqiyiSpider(scrapy.Spider):
name = 'iqiyi'
custom_settings = {
'ITEM_PIPELINES': {'tv_spider.pipelines.TvSpiderPipeline': 300},
'DOWNLOAD_DELAY': 0.01
}
allowed_domains = ['iqiyi.com']
def start_requests(self):
self.tv_num = 0
self.client = client
self.db = db
self.page_num = 1
self.page_size = 20
self.cat_name = '电视剧'
yield scrapy.Request('http://search.video.iqiyi.com/o?pageNum=%s&pageSize=%s&mode=11&ctgName=%s&type=list&if=html5&pos=1&access_play_control_platform=15&site=iqiyi' % (self.page_num, self.page_size, self.cat_name), self.parse_tv_list, meta={'handle_httpstatus_all': True})
def parse_tv_list(self, response):
result = json.loads(response.text)
code = result.get('code')
docinfos = result.get('data').get('docinfos')
if code == 'A00000' and len(docinfos):
for o in docinfos:
self.tv_num = self.tv_num + 1
albumDocInfo = o.get('albumDocInfo')
# 名称
name = albumDocInfo.get('albumTitle')
# 别名
alias = albumDocInfo.get('albumAlias').split(';') if albumDocInfo.get('albumAlias') else []
albumLink = albumDocInfo.get('albumLink')
# 视频id
album_id = re.match(ur'http:\/\/www\.iqiyi\.com\/(\S+)\.html', albumLink).group(1)
# 缩略图
thumb_src = albumDocInfo.get('albumHImage')
# 发布日期
releaseDate = albumDocInfo.get('releaseDate')
publish_date = '-'.join([releaseDate[0:4], releaseDate[4:6], releaseDate[6:8]])
# 分数
score = albumDocInfo.get('score')
# 演职员
actors = albumDocInfo.get('star').split(';') if albumDocInfo.get('star') else []
actors_detail = map(lambda x: {
'name': x.get('name'),
'avatar': x.get('image_url'),
'role': None
}, albumDocInfo.get('video_lib_meta').get('actor', []))
# 导演
director = ','.join(albumDocInfo.get('director').split(';')) if albumDocInfo.get('director') else ''
# 地区
region = albumDocInfo.get('region', '')
# 类型
types = albumDocInfo.get('video_lib_meta').get('category', [])
# 播放数
play_count = albumDocInfo.get('playCount')
# 详情
desc = albumDocInfo.get('video_lib_meta').get('description', '')
# 当前分集
current_part = albumDocInfo.get('newest_item_number')
# 总分集
part_count = albumDocInfo.get('itemTotalNumber')
# 更新提醒
update_notify_desc = albumDocInfo.get('stragyTime')
# 电视剧状态
status = tv_status.COMPLETED if current_part == part_count else tv_status.UPDATING
# 是否VIP才能看
is_vip = True if albumDocInfo.get('isPurchase') else False
# 分集剧情需要id
iqiyi_album_id = albumDocInfo.get('albumId')
# 分集剧情,http://mixer.video.iqiyi.com/jp/mixin/videos/avlist?albumId=213681201&page=1&size=100
tv = {
'name': name,
'resource': {
'has_crawl_detail': True,
'source': video_source.IQIYI,
'album_id': album_id,
'alias': alias,
'publish_date': publish_date,
'folder': thumb_src,
'score': score,
'actors': actors,
'actors_detail': actors_detail,
'director': director,
'region': region,
'types': types,
'play_count': play_count,
'desc': desc,
'current_part': current_part,
'part_count': part_count,
'update_notify_desc': update_notify_desc,
'status': status,
'is_vip': is_vip
},
'videos': []
}
video_page_num = 1
video_page_size = 50
yield scrapy.Request('http://cache.video.iqiyi.com/jp/avlist/%s/%s/%s/' % (iqiyi_album_id, video_page_num, video_page_size), self.parse_tv_parts, meta={'tv': tv, 'video_page_num': video_page_num, 'video_page_size': video_page_size})
if self.page_num * self.page_size < result.get('data').get('result_num'):
self.page_num = self.page_num + 1
yield scrapy.Request('http://search.video.iqiyi.com/o?pageNum=%s&pageSize=%s&mode=11&ctgName=%s&type=list&if=html5&pos=1&access_play_control_platform=15&site=iqiyi' % (self.page_num, self.page_size, self.cat_name), self.parse_tv_list)
def parse_tv_parts(self, response):
tv = response.meta.get('tv')
video_page_num = response.meta.get('video_page_num')
video_page_size = response.meta.get('video_page_size')
videos = tv.get('videos')
result = json.loads(re.match(r'var\stvInfoJs=(\{.*\})', response.text).group(1))
if result.get('code') == 'A00000':
for item in result.get('data').get('vlist'):
thumb = item.get('vpic')
if thumb:
thumb = thumb[0:-4] + '_320_180' + thumb[-4::]
video = {
'album_id': tv.get('resource').get('album_id'),
'source': tv.get('resource').get('source'),
'sequence': item.get('pd'),
'video_id': re.match(r'http:\/\/www\.iqiyi\.com\/(\S+)\.html', item.get('vurl')).group(1),
'thumb': thumb,
'duration': item.get('timeLength'),
'status': video_status.PREVIEW if item.get('type') == '0' else (video_status.VIP if item.get('purType') == 2 else video_status.FREE),
'brief': item.get('vt') or item.get('shortTitle'),
'desc': item.get('desc')
}
videos.append(video)
if video_page_num * video_page_size < result.get('allNum'):
video_page_num = video_page_num + 1
yield scrapy.Request('http://cache.video.iqiyi.com/jp/avlist/%s/%s/%s/' % (iqiyi_album_id, video_page_num, video_page_size), self.parse_tv_parts, meta={'tv': tv, 'video_page_num': video_page_num, 'video_page_size': video_page_size})
yield tv
def closed(self, reason):
self.client.close()
self.logger.info('spider closed because %s,tv number %s', reason, self.tv_num)
|
998,812 | 33f4027e4ebb7dc24283d1009dd723c83476793d | import torch
from torch import nn
import torch.nn.functional as F
from bbox.bbox import box_decode
class BCEFocalLoss(torch.nn.Module):
def __init__(self, gamma=2, alpha=0.25, reduction='elementwise_mean'):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
def forward(self, _input, target):
# print(target.sum())
eps = 1e-6
pt = torch.sigmoid(_input)
#pt = _input
alpha = self.alpha
loss = - alpha * (1 - pt) ** self.gamma * target * torch.log(pt + eps) - \
(1 - alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt + eps)
# print(loss)
if self.reduction == 'elementwise_mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
class ATSSLoss(nn.Module):
def __init__(self, num_classes):
super(ATSSLoss, self).__init__()
self.cls_loss_func = BCEFocalLoss(reduction='sum')
self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="sum")
self.num_classes = num_classes
def GIoULoss(self, pred, target, anchor, weight=None):
pred_boxes = box_decode(pred.view(-1, 4), anchor.view(-1, 4))
pred_x1 = pred_boxes[:, 0]
pred_y1 = pred_boxes[:, 1]
pred_x2 = pred_boxes[:, 2]
pred_y2 = pred_boxes[:, 3]
pred_x2 = torch.max(pred_x1, pred_x2)
pred_y2 = torch.max(pred_y1, pred_y2)
pred_area = (pred_x2 - pred_x1) * (pred_y2 - pred_y1)
gt_boxes = box_decode(target.view(-1, 4), anchor.view(-1, 4))
target_x1 = gt_boxes[:, 0]
target_y1 = gt_boxes[:, 1]
target_x2 = gt_boxes[:, 2]
target_y2 = gt_boxes[:, 3]
target_area = (target_x2 - target_x1) * (target_y2 - target_y1)
x1_intersect = torch.max(pred_x1, target_x1)
y1_intersect = torch.max(pred_y1, target_y1)
x2_intersect = torch.min(pred_x2, target_x2)
y2_intersect = torch.min(pred_y2, target_y2)
area_intersect = torch.zeros(pred_x1.size()).to(pred)
mask = (y2_intersect > y1_intersect) * (x2_intersect > x1_intersect)
area_intersect[mask] = (x2_intersect[mask] - x1_intersect[mask]) * (y2_intersect[mask] - y1_intersect[mask])
x1_enclosing = torch.min(pred_x1, target_x1)
y1_enclosing = torch.min(pred_y1, target_y1)
x2_enclosing = torch.max(pred_x2, target_x2)
y2_enclosing = torch.max(pred_y2, target_y2)
area_enclosing = (x2_enclosing - x1_enclosing) * (y2_enclosing - y1_enclosing) + 1e-7
area_union = pred_area + target_area - area_intersect + 1e-7
ious = area_intersect / area_union
gious = ious - (area_enclosing - area_union) / area_enclosing
losses = 1 - gious
if weight is not None and weight.sum() > 0:
return (losses * weight).sum()
else:
assert losses.numel() != 0
return losses.sum()
def forward(self, preds, targets, anchors):
logits, bbox_reg, centerness = preds
batch_size = logits.shape[0]
# print('pred', logits.shape, bbox_reg.shape)
cls_targets, reg_targets, centerness_targets = targets
# print('targets', cls_targets.shape, reg_targets.shape)
logits_flatten = logits.reshape(-1, self.num_classes)
bbox_reg_flatten = bbox_reg.reshape(-1, 4)
centerness_flatten = centerness.reshape(-1)
cls_targets_flatten = cls_targets.reshape(-1)
reg_targets_flatten = reg_targets.reshape(-1, 4)
centerness_targets_flatten = centerness_targets.reshape(-1)
# print('cls_targets_flatten', cls_targets_flatten)
is_positive = (cls_targets_flatten > -1)
pos_inds = torch.nonzero(is_positive, as_tuple=False).squeeze(1)
positive_num = pos_inds.numel()
# check predict
# idx = pos_inds[:min(positive_num, 10)]
# print('before', pos_inds, pos_inds.numel())
# print('before backward predict cls score', logits_flatten[idx, :].sigmoid())
# print('positive_num', positive_num, pos_inds, pos_inds.shape)
# print('is_positive', is_positive, is_positive.shape)
cls_targets_flatten[is_positive == False] = 0
if self.num_classes > 1:
cls_targets_flatten = F.one_hot(cls_targets_flatten, self.num_classes).squeeze(dim=1)
cls_targets_flatten[is_positive == False, :] = 0
elif self.num_classes == 1:
cls_targets_flatten[is_positive == True] = 1
cls_targets_flatten = cls_targets_flatten.unsqueeze(1)
# print('cls_targets', cls_targets[:, 0].sum(), cls_targets[:, 1].sum())
# print(logits_flatten.shape, cls_targets_flatten.shape)
cls_loss = self.cls_loss_func(logits_flatten, cls_targets_flatten) / max(positive_num, 1.0)
# cls_loss = self.cls_loss_func(logits_flatten, cls_targets_flatten) / logits_flatten.shape[0]
bbox_reg_flatten = bbox_reg_flatten[pos_inds]
reg_targets_flatten = reg_targets_flatten[pos_inds]
centerness_flatten = centerness_flatten[pos_inds]
centerness_targets_flatten = centerness_targets_flatten[pos_inds]
anchors = torch.cat(anchors).expand(batch_size, -1, 4).reshape(-1, 4)[pos_inds]
# print(centerness_flatten[:10], centerness_targets_flatten[:10])
# print(anchors)
# print('positive_num', positive_num, centerness_flatten.shape)
if positive_num > 0:
# centerness loss
# print(bbox_reg_flatten.type(), reg_targets_flatten.type())
reg_loss = self.GIoULoss(bbox_reg_flatten, reg_targets_flatten, anchors, weight=centerness_targets_flatten) / positive_num
centerness_loss = self.centerness_loss_func(centerness_flatten, centerness_targets_flatten) / positive_num
else:
print('no positive samples')
reg_loss = bbox_reg_flatten.sum()
centerness_loss = centerness_flatten.sum()
return {"loss_cls": cls_loss, "loss_reg": reg_loss, "loss_centerness": centerness_loss, 'is_positive':is_positive}
|
998,813 | c38830f15293e78d763b083860b9c6d42a8bf989 | # -*- coding: utf-8 -*-
import datetime
import math
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from data.data_api import get_trip, get_name, is_suspended, unsuspend_trip, suspend_trip, remove_passenger, get_time, \
remove_trip, get_slots, new_trip, get_new_passengers
from routing.filters import separate_callback_data, create_callback_data as ccd
from util import common
from util.common import dir_name
def trips_handler(bot, update):
"""Metodo gestore delle chiamate indirizzate da "TRIPS", sottomenu di /me"""
data = separate_callback_data(update.callback_query.data)
action = data[1]
chat_id = str(update.callback_query.message.chat_id)
#
# Chiamata sul bottone "Nuovo viaggio"
# Tutte le richieste di questo bottone verranno indirizzate al metodo add_trip()
# presente più avanti nel file.
#
if action == "ADD":
keyboard = [
[InlineKeyboardButton("🎒 per Povo", callback_data=ccd("ADD_TRIP", "DAY", "Salita")),
InlineKeyboardButton("🏡 per il NEST", callback_data=ccd("ADD_TRIP", "DAY", "Discesa"))],
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ME", "TRIPS"))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Vuoi aggiungere un viaggio verso il NEST o Povo? Ricorda che puoi aggiungere"
" un solo viaggio al giorno per direzione. Eventuali viaggi già presenti"
" verranno sovrascritti, passeggeri compresi.",
reply_markup=InlineKeyboardMarkup(keyboard))
#
# EDIT_TRIP viene chiamato in seguito alla pressione di un bottone di un dato viaggio.
# Al suo interno è possibile modificare i passeggeri, l'orario, sospendere il viaggio
# o cancellarlo.
#
elif action == "EDIT_TRIP": # Chiamata sul bottone di un certo viaggio già presente
direction, day = data[2:4]
trip = get_trip(direction, day, chat_id)
if trip["Suspended"]:
text_string = " - 🚫 Sospeso"
keyboard = [[InlineKeyboardButton("✔ Annullare la sospensione",
callback_data=ccd("TRIPS", "SUS_TRIP", direction, day))]]
elif not common.is_sessione():
text_string = ""
keyboard = [
[InlineKeyboardButton("🕓 Modificare l'ora",
callback_data=ccd("TRIPS", "EDIT_TRIP_HOUR", direction, day))],
[InlineKeyboardButton("👥 Modificare i passeggeri",
callback_data=ccd("TRIPS", "EDIT_PASS", direction, day))],
[InlineKeyboardButton("🚫 Sospendere il viaggio",
callback_data=ccd("TRIPS", "SUS_TRIP", direction, day))]
]
else:
text_string = ""
keyboard = [
[InlineKeyboardButton("🕓 Modificare l'ora",
callback_data=ccd("TRIPS", "EDIT_TRIP_HOUR", direction, day))],
[InlineKeyboardButton("👥 Modificare i passeggeri",
callback_data=ccd("TRIPS", "EDIT_PASS", direction, day))]
]
keyboard += [
[InlineKeyboardButton("❌ Cancellare il viaggio",
callback_data=ccd("TRIPS", "REMOVE_TRIP", direction, day))],
[InlineKeyboardButton("↩ Tornare indietro", callback_data=ccd("ME", "TRIPS"))],
[InlineKeyboardButton("🔚 Uscire", callback_data=ccd("EXIT"))]
]
temporary_passengers = ", ".join(f"[{get_name(user)}](tg://user?id={user})"
for user in trip['Temporary'])
permanent_passengers = ", ".join(f"[{get_name(user)}](tg://user?id={user})"
for user in trip['Permanent'])
suspended_passengers = ", ".join(f"[{get_name(user)}](tg://user?id={user})"
for user in trip['SuspendedUsers'])
if common.is_sessione():
# Numero di giorni da sommare al giorno corrente
delta = common.days.index(day) + len(common.days) - datetime.datetime.today().weekday()
shown_day = f"{day} {datetime.datetime.today().day + delta}"
else:
shown_day = day
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text=f"Viaggio selezionato: {text_string}"
f"\n\n🗓 {shown_day}"
f"\n{common.dir_name(direction)}"
f"\n🕓 {trip['Time']}"
f"\n👥 (_temporanei_) {temporary_passengers}"
f"\n👥 (_permanenti_) {permanent_passengers}"
f"\n👥 (_sospesi_) {suspended_passengers}"
f"\n\nCosa vuoi fare?",
reply_markup=InlineKeyboardMarkup(keyboard),
parse_mode="Markdown")
#
# SUS_TRIP = SUSPEND_TRIP. Questa parte sospende temporaneamente (per una settimana) un viaggio,
# rendendolo invisibile all'utente finale e bloccando presenti e future prenotazioni. La sospensione
# viene sbloccata alle 02:00 del giorno successivo al viaggio bloccato, assieme alla gestione in night.py.
# Il codice riconosce se il viaggio è già sospeso o meno e modifica il layout e le azioni di
# conseguenza.
#
elif action == "SUS_TRIP": # Sospensione del viaggio
direction, day = data[2:4]
keyboard = [
[InlineKeyboardButton("✔ Sì", callback_data=ccd("TRIPS", "CO_SUS_TRIP", direction, day)),
InlineKeyboardButton("❌ No", callback_data=ccd("TRIPS", "EDIT_TRIP", direction, day))]
]
if is_suspended(direction, day, chat_id):
message = "Vuoi annullare la sospensione di questo viaggio?"
else:
message = "La sospensione di un viaggio è valida per una sola volta e " \
"comporta la sospensione di accreditamenti e prenotazioni " \
"fino al giorno successivo al viaggio. Sei sicuro di voler " \
"sospendere questo viaggio?"
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text=message, reply_markup=InlineKeyboardMarkup(keyboard))
# CO_SUS_TRIP = CONFERM_SUSPEND_TRIP
# Metodo di conferma della sospensione appena avvenuta.
elif action == "CO_SUS_TRIP":
direction, day = data[2:4]
if is_suspended(direction, day, chat_id):
unsuspend_trip(direction, day, chat_id)
message = "Il viaggio è ora operativo."
else:
suspend_trip(direction, day, chat_id)
message = "Viaggio sospeso con successo."
keyboard = [
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("TRIPS", "EDIT_TRIP", direction, day))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text=message,
reply_markup=InlineKeyboardMarkup(keyboard))
alert_suspension(bot, direction, day, chat_id)
#
# Questi tre pezzi di codice vengono chiamate quando l'utente clicca su "Modifica l'ora" (in EDIT_TRIP).
# Vengono eseguiti necessariamente in sequenza. Attenzione a fare modifiche per evitare di sforare il
# limite di 64 byte dell'API per le callback.
#
# EDIT_TRIP_HOUR
# Viene chiamato al momento dell'inserimento dell'ora durante la modifica dell'orario di un viaggio.
elif action == "EDIT_TRIP_HOUR":
direction, day = data[2:4]
keyboard = [
[InlineKeyboardButton(str(i).zfill(2), callback_data=ccd("TRIPS", "EDIT_TRIP_MIN", direction, day, i))
for i in range(7, 14, 1)],
[InlineKeyboardButton(str(i), callback_data=ccd("TRIPS", "EDIT_TRIP_MIN", direction, day, i))
for i in range(14, 21, 1)],
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("TRIPS", "EDIT_TRIP", direction, day))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Scegli l'ora di partenza del viaggio.",
reply_markup=InlineKeyboardMarkup(keyboard))
# EDIT_TRIP_MINUTES
# Viene chiamato al momento dell'inserimento dei minuti durante la modifica dell'orario di un viaggio.
elif action == "EDIT_TRIP_MIN":
direction, day, hour = data[2:5]
keyboard = [
[InlineKeyboardButton(str(i).zfill(2), callback_data=ccd("TRIPS", "CO_EDIT_TRIP", direction, day, hour, i))
for i in range(0, 30, 5)],
[InlineKeyboardButton(str(i), callback_data=ccd("TRIPS", "CO_EDIT_TRIP", direction, day, hour, i))
for i in range(30, 60, 5)],
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("TRIPS", "EDIT_TRIP", direction, day))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Scegli i minuti di partenza del viaggio.",
reply_markup=InlineKeyboardMarkup(keyboard))
# CO_EDIT_TRIP = CONFIRM_EDIT_TRIP
# Metodo chiamato per la conferma dell'orario appena modificato.
elif action == "CO_EDIT_TRIP":
direction, day, hour, minute = data[2:6]
trip = get_trip(direction, str(day), chat_id)
time = trip["Time"] = f"{hour.zfill(2)}:{minute.zfill(2)}"
keyboard = [
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ME", "TRIPS"))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
for user_group in trip["Permanent"], trip["Temporary"]:
for user in user_group:
bot.send_message(chat_id=user,
text=f"[{get_name(chat_id)}](tg://user?id={chat_id})"
f" ha spostato l'orario del viaggio di "
f"{day} {common.dir_name(direction)} alle {time}.",
parse_mode="Markdown")
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text=f"Nuovo orario di partenza:\n{day} alle "
f"{time} {common.dir_name(direction)}",
reply_markup=InlineKeyboardMarkup(keyboard))
#
# I seguenti comandi sono utilizzati per modificare la lista dei viaggitori di un
# dato viaggio e rimuoverlo. Il metodo per aggiungere nuovi passeggeri si trova
# in fondo al documento.
#
# EDIT_PASS - Comando chiamato una volta premuto il bottone della persona da prenotare
elif action == "EDIT_PASS":
direction, day = data[2:4]
trip = get_trip(direction, day, chat_id)
permanent_users = trip["Permanent"]
temporary_users = trip["Temporary"]
suspended_users = trip["SuspendedUsers"]
# Lista delle persone prenotate divise per Permanente e Temporanea
user_lines = [[InlineKeyboardButton(f"{get_name(user)} - Permanente",
callback_data=ccd("TRIPS", "REMOVE_PASS", direction, day, user,
"Permanent"))] for user in permanent_users] \
+ [[InlineKeyboardButton(f"{get_name(user)} - Temporaneo",
callback_data=ccd("TRIPS", "REMOVE_PASS", direction, day, user,
"Temporary"))]
for user in temporary_users] \
+ [[InlineKeyboardButton(f"{get_name(user)} - Permanente (SOSPESO)",
callback_data=ccd("TRIPS", "REMOVE_PASS", direction, day, user,
"SuspendedUsers"))]
for user in suspended_users]
keyboard = user_lines + [
[InlineKeyboardButton("➕ Nuovo passeggero", callback_data=ccd("ADD_PASS", "SELECT", direction, day, "0"))],
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("TRIPS", "EDIT_TRIP", direction, day))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Clicca su un passeggero per rimuoverlo"
" dal tuo viaggio, oppure aggiungine uno"
" manualmente.",
reply_markup=InlineKeyboardMarkup(keyboard))
# REMOVE_PASS - Comando chiamato in seguito a pressione del bottone contenente un utente di un viaggio
elif action == "REMOVE_PASS":
direction, day, user, mode = data[2:6]
keyboard = [
[InlineKeyboardButton("✔ Sì", callback_data=ccd("TRIPS", "CO_RE_PA", direction, day, user, mode)),
InlineKeyboardButton("❌ No", callback_data=ccd("TRIPS", "EDIT_TRIP", direction, day))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Sei sicuro di voler rimuovere questo passeggero?",
reply_markup=InlineKeyboardMarkup(keyboard))
# CO_RE_PA = CONFIRM_REMOVE_PASSENGER
# Comando chiamato in caso di rispsota positiva al precedente comando
elif action == "CO_RE_PA":
direction, day, user, mode = data[2:6]
remove_passenger(direction, day, chat_id, mode, user)
keyboard = [
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ME", "TRIPS"))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Passeggero rimosso con successo.",
reply_markup=InlineKeyboardMarkup(keyboard))
bot.send_message(chat_id=user,
text=f"Sei stato rimosso dal seguente viaggio: "
f"\n\n🚗 [{get_name(chat_id)}](tg://user?id={chat_id})"
f"\n🗓 {day}"
f"\n🕓 {get_time(direction, day, chat_id)}"
f"\n{common.dir_name(direction)}",
parse_mode="Markdown")
# Comando chiamato quando si clicca su "Rimuovi viaggio" nella vista viaggio
elif action == "REMOVE_TRIP":
direction, day = data[2:4]
keyboard = [
[InlineKeyboardButton("✔ Sì", callback_data=ccd("TRIPS", "CO_RE_TR", direction, day)),
InlineKeyboardButton("❌ No", callback_data=ccd("TRIPS", "EDIT_TRIP", direction, day))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Sei sicuro di voler cancellare questo viaggio?",
reply_markup=InlineKeyboardMarkup(keyboard))
# CO_RE_TR = CONFIRM_REMOVE_TRIP
# Comando chiamato in caso di risposta positiva al precedente comando
elif action == "CO_RE_TR":
direction, day = data[2:4]
remove_trip(direction, day, chat_id)
keyboard = [
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ME", "TRIPS"))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Viaggio cancellato con successo.",
reply_markup=InlineKeyboardMarkup(keyboard))
def add_passenger(bot, update):
"""
Metodo chiamato in seguito alla pressione di un bottone contenente un nome di un passeggero
da aggiungere. Questo metodo può essere chiamato solo dal sottomenù trips.
I potenziali passeggeri vengono listati su più pagine per evitare messaggi infiniti. A ogni pagina è
associata un bottone che permette di aprirla immediatamente. In ogni pagina vi sono PAGE_SIZE persone,
costante definita in util/common.py
"""
chat_id = str(update.callback_query.message.chat_id)
data = separate_callback_data(update.callback_query.data)
action = data[1]
# Comando chiamato dalla vista viaggio per aggiungere un passeggero
if action == "SELECT":
direction, day, page = data[2:5]
keyboard = []
page = int(page)
users = get_new_passengers(chat_id)
for index in range(common.PAGE_SIZE * page, common.PAGE_SIZE * (page + 1), 1):
try:
name, id = users[index]
keyboard.append([InlineKeyboardButton(
name, callback_data=ccd("ADD_PASS", "MODE", direction, day, id))])
except IndexError:
break
# Aggiungo un bottone per ogni pagina, in quanto la lista è troppo grande
page_buttons = []
for index in range(0, int(math.ceil(len(users) / common.PAGE_SIZE)), 1):
if index == page:
text = "☑"
else:
text = str(index + 1)
page_buttons.append(InlineKeyboardButton(
text, callback_data=ccd("ADD_PASS", "SELECT", direction, day, index))
)
keyboard.append(page_buttons)
keyboard.append([InlineKeyboardButton("↩ Indietro", callback_data=ccd("TRIPS", "EDIT_PASS", direction, day))])
keyboard.append([InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))])
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Seleziona un passeggero da aggiungere al tuo viaggio.",
reply_markup=InlineKeyboardMarkup(keyboard))
# Comando chiamato una volta premuto il bottone della persona da prenotare
elif action == "MODE":
direction, day, user = data[2:5]
if common.is_sessione():
keyboard = [
[InlineKeyboardButton("🔂 Temporanea", callback_data=ccd("ADD_PASS", "CONFIRM",
direction, day, user, "Temporary"))],
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ADD_PASS", "SELECT", direction, day, "0"))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
else:
keyboard = [
[InlineKeyboardButton("🔂 Temporanea", callback_data=ccd("ADD_PASS", "CONFIRM",
direction, day, user, "Temporary"))],
[InlineKeyboardButton("🔁 Permanente", callback_data=ccd("ADD_PASS", "CONFIRM",
direction, day, user, "Permanent"))],
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ADD_PASS", "SELECT", direction, day, "0"))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Scegli la modalità di prenotazione.",
reply_markup=InlineKeyboardMarkup(keyboard))
# Comando chiamato una volta premuto il bottone della persona da prenotare e scelta la modalità
elif action == "CONFIRM":
direction, day, user, mode = data[2:6]
keyboard = [
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("TRIPS", "EDIT_TRIP", direction, day))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
trip = get_trip(direction, day, chat_id)
occupied_slots = len(trip["Permanent"]) + len(trip["Temporary"])
total_slots = get_slots(chat_id)
if user in trip["Temporary"] or user in trip["Permanent"]:
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Questa persona si è già prenotata in questo viaggio!",
reply_markup=InlineKeyboardMarkup(keyboard))
elif occupied_slots >= total_slots:
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Temo che il tuo amico dovrà andare a piedi, i posti sono finiti.",
reply_markup=InlineKeyboardMarkup(keyboard))
else:
trip[mode].append(str(user))
bot.send_message(chat_id=user,
text=f"[{get_name(chat_id)}](tg://user?id={chat_id})"
f" ha effettuato una nuova prenotazione a tuo nome nel suo viaggio: "
f"\n\n🗓 {day}"
f"\n🕓 {trip['Time']}"
f"\n{common.dir_name(direction)}"
f"{common.mode_name(mode)}",
parse_mode="Markdown")
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
reply_markup=InlineKeyboardMarkup(keyboard),
text="Prenotazione completata. Dati del viaggio:"
f"\n\n👤 {str(get_name(user))}"
f"\n🗓 {day}"
f"\n🕓 {trip['Time']}"
f"\n{common.dir_name(direction)}"
f"\n{common.mode_name(mode)}")
#
# Questo metodo viene chiamato da trips_handler().
# Da questo metodo è possibile inserire per intero un nuovo viaggio di un autista.
#
def add_trip(bot, update):
data = separate_callback_data(update.callback_query.data)
chat_id = str(update.callback_query.message.chat_id)
mode = data[1]
#
# Metodo per l'inserimento del giorno
# Dati in entrata: "ADD_TRIP", "DAY", direction
#
if mode == "DAY":
direction = data[2:][0]
keyboard = []
today_number = datetime.datetime.today().weekday()
for item in range(len(common.days)):
day = common.day_to_string((item + today_number) % len(common.days))
if day == "Sabato" or day == "Domenica": # Sabato, domenica
continue
keyboard.append(InlineKeyboardButton(f"{day[:2]} {item + datetime.datetime.today().day}",
callback_data=ccd("ADD_TRIP", "HOUR", day, direction)))
keyboard = [keyboard,
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("TRIPS", "ADD"))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Scegli il giorno del viaggio.",
reply_markup=InlineKeyboardMarkup(keyboard))
#
# Metodo per l'inserimento dell'ora
# Dati in entrata: "ADD_TRIP", "HOUR", day, direction
#
elif mode == "HOUR":
day, direction = data[2:]
keyboard = [
[InlineKeyboardButton(str(i).zfill(2), callback_data=ccd("ADD_TRIP", "MINUTE", str(i), day, direction))
for i in range(7, 14, 1)],
[InlineKeyboardButton(str(i), callback_data=ccd("ADD_TRIP", "MINUTE", str(i), day, direction))
for i in range(14, 21, 1)],
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ADD_TRIP", "DAY", direction))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Scegli l'ora di partenza del viaggio.",
reply_markup=InlineKeyboardMarkup(keyboard))
#
# Metodo per l'inserimento dei minuti
# Dati in entrata: "ADD_TRIP", "HOUR", hour, day, direction
#
elif mode == "MINUTE":
hour, day, direction = data[2:]
keyboard = [
[InlineKeyboardButton(str(i).zfill(2),
callback_data=ccd("ADD_TRIP", "CONFIRM", str(i), hour, day, direction))
for i in range(0, 30, 5)],
[InlineKeyboardButton(str(i), callback_data=ccd("ADD_TRIP", "CONFIRM", str(i), hour, day, direction))
for i in range(30, 60, 5)],
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ADD_TRIP", "HOUR", day, direction))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text="Scegli i minuti di partenza del viaggio.",
reply_markup=InlineKeyboardMarkup(keyboard))
#
# Metodo di conferma finale
# Dati in entrata: "ADD_TRIP", "CONFIRM", minute, hour, day, direction
#
elif mode == "CONFIRM":
minute, hour, day, direction = data[2:]
time = f"{hour.zfill(2)}:{minute.zfill(2)}"
keyboard = [
[InlineKeyboardButton("↩ Indietro", callback_data=ccd("ME", "TRIPS"))],
[InlineKeyboardButton("🔚 Esci", callback_data=ccd("EXIT"))]
]
new_trip(direction, day, chat_id, time)
user_text = f"Viaggio aggiunto con successo:" \
f"\n\n{common.dir_name(direction)}" \
f"\n🗓 {day}" \
f"\n🕓 {time}"
bot.edit_message_text(chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text=user_text,
reply_markup=InlineKeyboardMarkup(keyboard))
def alert_suspension(bot, direction, day, driver):
trip = get_trip(direction, day, driver)
driver_name = f"[{get_name(driver)}](tg://user?id={driver})"
permanent_users = trip["Permanent"]
temporary_users = trip["Temporary"]
if trip["Suspended"]:
for user in permanent_users:
bot.send_message(chat_id=user,
text=f"Attenzione! {driver_name} ha sospeso il viaggio di {day}"
f" {dir_name(direction)}. Non verrai addebitato per questa volta.",
parse_mode="Markdown")
for user in temporary_users:
bot.send_message(chat_id=user,
text=f"Attenzione! {driver_name} ha sospeso il viaggio di {day}"
f" {dir_name(direction)}."
f" La tua prenotazione scalerà alla settimana successiva.",
parse_mode="Markdown")
else:
for user in (permanent_users + temporary_users):
bot.send_message(chat_id=user,
text=f"Attenzione! {driver_name} ha annullato la sospensione del viaggio di {day}"
f" {dir_name(direction)}.",
parse_mode="Markdown")
|
998,814 | 48c6ce078d49dbd825c59859508459ed6dda9253 | import numpy as np
from getTrainingData import getTrainingData
def normalizeData():
trainingData = getTrainingData()
size= trainingData.shape
trainingData[:,8] = trainingData[:,8] / np.linalg.norm(trainingData[:,8])
trainingData[:,17] = trainingData[:,17] / np.linalg.norm(trainingData[:,17])
trainingData[:,3] = trainingData[:,3] / np.linalg.norm(trainingData[:,3])
trainingData[:,10] = trainingData[:,10] / np.linalg.norm(trainingData[:,10])
return trainingData
|
998,815 | c411d6485f108a82c1103184cc8824b84b11a866 | '''
Unittests for VertexCovering.py
January 2021 Jakub Kazimierski
'''
import unittest
import VertexCovering
class test_VertexCovering(unittest.TestCase):
'''
Class with unittests for VertexCovering.py
'''
# region Unittests
def test_ExpectedOutput(self):
'''
Checks if returned output is as expected.
'''
input_val = ["(A,B,C,D)","(A-B,A-D,B-D,A-C)","(C,B)"]
output = VertexCovering.VertexCovering(input_val)
self.assertEqual(output, "yes")
# endregion
if __name__ == "__main__":
'''
Main method for test cases.
'''
unittest.main() |
998,816 | 2e865da8dd537bc2e4f863db95a70d58849b9608 | from multiprocessing import Process
import threading
import os,signal, time, sys, random, sysv_ipc,datetime
Quantite_energie = 0
def Weather():
print("weather affiche la temperature: ",temperature)
def transaction(lock,semaphore_thread):
message,messageType = MARKET_QUEUE.receive()
data = message.decode().split(',')
global Quantite_energie
#print(data)
qtt=data[1]
homeNum=data[0]
if ')'in data[0] or '(' in data[0]:
print("mmodif taille")
qttlen=len(qtt)-1
else:
qttlen=len(qtt)
print("taille",qttlen)
qttDemande = int(qtt[0:qttlen])
#print(homeNum," ",qttDemande)
#homeNumber = int(data[0])
if messageType==2:#on vend aux maison
with lock:
Quantite_energie=Quantite_energie-qttDemande
print(qttDemande," vendue a : ",homeNum)
if messageType==1:#on achete le surplus des maison
with lock:
Quantite_energie=Quantite_energie+qttDemande
print(qttDemande," achete a : ",homeNum)
print("quantité d'energie ayant transité par market",Quantite_energie)
semaphore_thread.release()
def Market():
Process(target=Weather,args=()).start()
semaphore_thread=threading.Semaphore(3)
lock=threading.Lock()
while True:
semaphore_thread.acquire()
trans=threading.Thread(target=transaction,args=(lock,semaphore_thread))
trans.start()
def Houses():
for i in range(NB_HOME):
A,B = (random.randint(0,10) for x in range(0,2))
home = Process(target=Home,args=(i,A,B,))
home.start()
home.join()
def Home(homeNumber,A,B):
#print("home",homeNumber,"connected")
#defining conso and prod using random numbers
homeConso = temperature * A
homeProd = temperature * B
if homeConso < homeProd:
Surproduction(homeNumber,homeConso,homeProd)
elif homeConso > homeProd:
Surconsommation(homeNumber,homeConso,homeProd)
def Surproduction(homeNumber,homeConso,homeProd):
print("Surprod", homeNumber)
surproduction = homeProd - homeConso
print("surproduction de ", surproduction," par ",homeNumber)
liste=str(homeNumber)+','+str(surproduction)
#print(type(liste))
#print(liste)
MARKET_QUEUE.send(liste.encode(),type=1)
surproduction = 0
#TODO : Verification du print dans le thread
def Surconsommation(homeNumber,homeConso,homeProd):
print("Surconso", homeNumber)
surconsommation = homeConso - homeProd
print("surconsommation de ", surconsommation," par ",homeNumber)
liste= str(homeNumber)+','+str(surconsommation)
#print(liste)
MARKET_QUEUE.send(liste.encode(),type=2)
surconsommation = 0
#TODO : Verification du print dans le thread.
if __name__ == '__main__':
NB_HOME = 5
temperature = 20
HOME_QUEUE = sysv_ipc.MessageQueue(1000,sysv_ipc.IPC_CREAT)
MARKET_QUEUE = sysv_ipc.MessageQueue(1100,sysv_ipc.IPC_CREAT)
# Market processing
Process(target=Market,args=()).start()
Process(target=Houses,args=()).start()
|
998,817 | 6c4a4bff4a319fef0d627965b9a86f958730a7be | from EmpiricalDistribution import EmpiricalDistribution
from Unigram import Unigram
from Bigram import Bigram
brown_clean = './corpora/clean/brown_clean.txt'
def head(n, prob_table):
c = 0
for i in prob_table:
print(i, prob_table[i])
c += 1
if c == n:
break
print()
def run(model, corpus, preview_len):
m = model(corpus)
m.train()
#head(preview_len, m.prob_table)
#head(preview_len, m.count_table)
print(m.generate_sentence())
print()
def run_empirical():
run(EmpiricalDistribution, brown_clean, 10)
def run_unigram():
run(Unigram, brown_clean, 10)
def run_bigram():
run(Bigram, brown_clean, 10)
if __name__ == '__main__':
# run_empirical()
# run_unigram()
run_bigram() |
998,818 | 0609f0e4597de9817e0acd0751e094fe7f78074c | import re
import os
def getNum(string):
pattern = re.compile('\d+')
nums_list = pattern.findall(string)
return nums_list
def getLineFromFile(readfile):
rf = open(readfile)
lines = rf.readlines()
for line in lines:
nums_list = getNum(line)
for num in nums_list[1:]:
sql = "insert " + nums_list[0] + "-- " + num
print(sql)
getLineFromFile("txt")
|
998,819 | 93ca3f29935222a0be3ad3eac0e736fa3bee3be3 | from threading import Thread
import time
from decimal import Decimal
import requests, json
class Strategy(Thread):
def __init__(self, queue, batch_size):
Thread.__init__(self)
self.queue = queue
self.batch_size = batch_size
def run(self):
batch = self.queue.get(self.batch_size)
if not batch:
# no data available
return
try:
avg = sum(batch)/len(batch)
except ZeroDivisionError:
pass
else:
try:
r = requests.post('http://127.0.0.1:27015', data={'price': avg})
if r.status_code == 200:
r = r.content
req_id = json.loads(r.replace("'",'"'))['id']
time.sleep(3)
r = requests.get('http://127.0.0.1:27015/?id=%s'%req_id)
if r.status_code == 200:
r = r.content
resp = json.loads(r.replace("'",'"'))['sold']
if resp:
print avg*Decimal(1.1)
else:
print avg*Decimal(.9)
except Exception, e:
pass
|
998,820 | 9b4ed7623e687a3a18feee5f07a12f8b400b2100 |
from tests import FieldElementTest
from helper import run
run(FieldElementTest("test_ne"))
run(FieldElementTest("test_sub"))
run(FieldElementTest("test_mul"))
run(FieldElementTest("test_div"))
|
998,821 | a657eabbb0f9695020485fc9f2fe2ca865fea1ac | n = int(input("Vul een getal in: \n"))
getal = "oneven"
if n % 2 == 0:
getal = "even"
print(getal)
|
998,822 | dc2d85ae24926623393235a53c0a6854fc188dd1 | a = int(input())
b = int(input())
total = a + b
diff = a - b
mult = a * b
div = a / b
exp = a ** b
print(a, "+", b, "=", total)
print(a, "-", b, "=", diff)
print(a, "*", b, "=", mult)
print(a, "/", b, "=", div)
print(a, "**", b, "=", exp)
|
998,823 | a265a583ef19d1743e419177c8ac2ccc1d63a303 | from django.urls import path
from .views import VentaView, ProductoView
urlpatterns = {
path('venta/', VentaView.as_view(), name="Realizar una venta"),
path('producto/', ProductoView.as_view())
} |
998,824 | 6e13ffe3657c6e55c02cfccca81385ec0de4ec8e | def transform(value, subject_number):
return (value * subject_number) % 20201227
public_keys = [9717666, 20089533]
key = 1
loop_size = 0
while key != public_keys[0]:
key = transform(key, 7)
loop_size += 1
key = 1
for _ in range(loop_size):
key = transform(key, public_keys[1])
print(key)
|
998,825 | 4ef6b628e6ae4b05eb597f1d061f4578617cefdf | def shifteslayout(i, p, *rows): i["00 Shift/EcalPreshower/" + p] = DQMItem(layout=rows)
shifteslayout(dqmitems, "01-IntegritySummary",
[{ 'path': "EcalPreshower/ESIntegrityClient/ES Integrity Summary 1 Z 1 P 1", 'description': "ES+ Front Integrity Summary 1 - <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> <br/> <table width=100%> <tr><td>1 - not used<td>2 - fiber problem<td>3 - OK<td>4 - FED problem<td><tr>5 - KCHIP problem<td>6 - ES counters are not synced with GT counters (see ESRawDataTask) <td> 7 - more than one problem<td>8 - SLink CRC error</table> " },
{ 'path': "EcalPreshower/ESIntegrityClient/ES Integrity Summary 1 Z -1 P 1", 'description': "ES- Front Integrity Summary 1 - <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> <br/> <table width=100%> <tr><td>1 - not used<td>2 - fiber problem<td>3 - OK<td>4 - FED problem<td><tr>5 - KCHIP problem<td>6 - ES counters are not synced with GT counters (see ESRawDataTask) <td> 7 - more than one problem<td>8 - SLink CRC error</table> " }],
[{ 'path': "EcalPreshower/ESIntegrityClient/ES Integrity Summary 1 Z 1 P 2", 'description': "ES+ Rear Integrity Summary 1 - <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> <br/> <table width=100%> <tr><td>1 - not used<td>2 - fiber problem<td>3 - OK<td>4 - FED problem<td><tr>5 - KCHIP problem<td>6 - ES counters are not synced with GT counters (see ESRawDataTask) <td> 7 - more than one problem<td>8 - SLink CRC error</table> " },
{ 'path': "EcalPreshower/ESIntegrityClient/ES Integrity Summary 1 Z -1 P 2", 'description': "ES- Rear Integrity Summary 1 - <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> <br/> <table width=100%> <tr><td>1 - not used<td>2 - fiber problem<td>3 - OK<td>4 - FED problem<td><tr>5 - KCHIP problem<td>6 - ES counters are not synced with GT counters (see ESRawDataTask) <td> 7 - more than one problem<td>8 - SLink CRC error</table> " }])
shifteslayout(dqmitems, "02-GoodRechitOccupancySummary",
[{ 'path': "EcalPreshower/ESOccupancyTask/ES Occupancy with selected hits Z 1 P 1", 'description': "ES Occupancy for ES+F. The colors in each segment represent the average number of strips per sensor per event that have hits with a pulse shape consistent with a real signal. A good reference run is 251643 for collision and 253299 for cosmic. The detailed description can be found in <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " },
{ 'path': "EcalPreshower/ESOccupancyTask/ES Occupancy with selected hits Z -1 P 1", 'description': "ES Occupancy for ES-F. The colors in each segment represent the average number of strips per sensor per event that have hits with a pulse shape consistent with a real signal. A good reference run is 251643 for collision and 253299 for cosmic. The detailed description can be found in <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " }],
[{ 'path': "EcalPreshower/ESOccupancyTask/ES Occupancy with selected hits Z 1 P 2", 'description': "ES Occupancy for ES+R. The colors in each segment represent the average number of strips per sensor per event that have hits with a pulse shape consistent with a real signal. A good reference run is 251643 for collision and 253299 for cosmic. The detailed description can be found in <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " },
{ 'path': "EcalPreshower/ESOccupancyTask/ES Occupancy with selected hits Z -1 P 2", 'description': "ES Occupancy for ES-R. The colors in each segment represent the average number of strips per sensor per event that have hits with a pulse shape consistent with a real signal. A good reference run is 251643 for collision and 253299 for cosmic. The detailed description can be found in <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " }])
shifteslayout(dqmitems, "03-GoodRechitEnergySummary",
[{ 'path': "EcalPreshower/ESOccupancyTask/ES RecHit Energy with selected hits Z 1 P 1", 'description': "Energy spectrum with selected hits with a pulse shape consistent with a real signal for ES+F. A good reference run is 251643 for collision and 253299 for cosmic. The detailed description can be found in <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " },
{ 'path': "EcalPreshower/ESOccupancyTask/ES RecHit Energy with selected hits Z -1 P 1", 'description': "Energy spectrum with selected hits with a pulse shape consistent with a real signal for ES-F. A good reference run is 251643 for collision and 253299 for cosmic. The detailed description can be found in <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " }],
[{ 'path': "EcalPreshower/ESOccupancyTask/ES RecHit Energy with selected hits Z 1 P 2", 'description': "Energy spectrum with selected hits with a pulse shape consistent with a real signal for ES+R. A good reference run is 251643 for collision and 253299 for cosmic. The detailed description can be found in <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " },
{ 'path': "EcalPreshower/ESOccupancyTask/ES RecHit Energy with selected hits Z -1 P 2", 'description': "Energy spectrum with selected hits with a pulse shape consistent with a real signal for ES-R. A good reference run is 251643 for collision and 253299 for cosmic. The detailed description can be found in <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " }])
shifteslayout(dqmitems, "04-ESTimingTaskSummary-EcalPreshower",
[{ 'path': "EcalPreshower/ESTimingTask/ES Timing Z 1 P 1", 'description': "ES Timing Z 1 P 1 - <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " },
{ 'path': "EcalPreshower/ESTimingTask/ES Timing Z -1 P 1", 'description': "ES Timing Z -1 P 1 - <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " }],
[{ 'path': "EcalPreshower/ESTimingTask/ES Timing Z 1 P 2", 'description': "ES Timing Z 1 P 2 - <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " },
{ 'path': "EcalPreshower/ESTimingTask/ES Timing Z -1 P 2", 'description': "ES Timing Z -1 P 2 - <a href=https://twiki.cern.ch/twiki/bin/view/CMS/DQMShiftPreshower>DQMShiftPreshower</a> " }])
shifteslayout(dqmitems, "05-ESGain-EcalPreshower",
[{ 'path': "EcalPreshower/ESIntegrityTask/ES Gain used for data taking", 'description': "ES Gain configuration in the front-end electronics"}])
shifteslayout(dqmitems, "06-ES-Fiber-Error-Code",
[{ 'path': "EcalPreshower/ESIntegrityTask/ES Fiber Error Code", 'description': "Error codes for each ES integrity error; the number of entries is the number of events with an error."}])
|
998,826 | f3a7efabc9ce5ad23cd6f7eb256b800fa9047ac6 | """ read the LHD summary csv file and put in in a dictionary of arrays
where the index is the shot number. This may require adding a "0" shot
(apparently not as of Feb 2013.
Where possible, integers and reals
are converted, and the strings are reduced to the minimum length. (Note - this
will cause errors if longer strings are added afterwards.
"""
from pyfusion.utils import read_csv_data
import numpy as np
import re
hack_merge_another_file=False
print('reading..')
lhd=read_csv_data.read_csv_data('LHD_Summary_Long.csv',header=3)
print('{k} keys, {n} entries read'.format(n=len(lhd['nShotnumber']),
k=len(lhd.keys())))
# this is hacked in because I missed GAMMA and another in my big file
if hack_merge_another_file:
lhd2 = read_csv_data.read_csv_data('/home/bdb112/datamining/lhd_summary_data.csv',header=3)
ksh='nShotnumber'
ws2 = np.where(lhd[ksh] != lhd2[ksh])[0]
if len(ws2) != 0: raise LookupError('{n} mismatched shots'.format(n=len(ws2)))
# if we already have the key, give this one a different name -otherwise same
for k in lhd2.keys():
if k in lhd.keys(): lhd[k+'1']=lhd2[k]
else: lhd[k]=lhd2[k]
def nansort(arr):
return(np.sort(arr[np.where(np.invert(isnan(arr)))]))
def nanargsort(arr):
return(np.argsort(arr[np.where(np.invert(isnan(arr)))]))
if __name__ == "__main__":
""" Do it simply, not necessarily efficiently
(after wasting 3 hours doing it efficiently)
First delete all records with blank shot numbers by copying to tmp
Then convert shot to int, and reorder everything to shot order
Then create the final shot array, indexed by shot (must be equal or bigger len)
The target address in the final array is just the shot coulum (sht) in the tmp
Then for each column, find non blanks (wnn)
Prepare a target arrlen array of the right type, with nan entries (or -1, '')
depost them target[sht[wnn]] = col[wnn]
Finally, the shot column in the final array (Shot) should be == arange(maxshot+1)
"""
LHD = {}
tmp = {}
sh = 90091
err=0
str_summary=[]
wnotnull = np.where(lhd['nShotnumber'] != '')[0] # cautiously convert to int
shots_tmp = lhd['nShotnumber'][wnotnull].astype(np.int32)
# need unique here, are there are 2 shot 100's ! (what does this mean?)
shots_test,ws = np.unique(shots_tmp, return_index=1)
# reorder the strings in a new dict, in shot number order.
for k in lhd.keys(): tmp.update({k: lhd[k][ws]})
# now prepare the final shot array
arrlen = np.max(shots_tmp)+1 # need a spot for all shots including 0
shots = np.zeros(arrlen, dtype=np.int32) -1 # initialise to shot=-1
shots[shots_tmp] = shots_tmp
LHD.update({'Shot': shots})
for k in tmp.keys():
as_str_in_order = tmp[k]
# now look for '' in other cols
wcolnotnull = np.where(as_str_in_order != '')[0]
chk_range = min(10, len(wcolnotnull))
# get a lot of values, in case the first choice is not representative
values = '_'.join([as_str_in_order[wcolnotnull[i]].strip()
for i in range(chk_range)])
if re.match('^[_0-9]*$',values):
dt = 'int32'
arr = -np.ones(arrlen).astype(dt)
wdecimal = np.where(
np.remainder(as_str_in_order[wcolnotnull].astype(float),1)!=0)[0]
if len(wdecimal)>0:
print('reverting {k} to float based on {eg}'
.format(k=k, eg=as_str_in_order[wcolnotnull[wdecimal[0]]]))
dt = 'float32'
arr = np.nan + np.ones(arrlen).astype(dt)
elif re.match('^[_+-.0-9eE]*$',values):
dt = 'float32'
arr = np.nan + np.ones(arrlen).astype(dt)
else:
dt == 'str'
#arr = np.empty(arrlen,dtype='|S256') # need to initialise empty
arr = np.array(arrlen*[''],dtype='|S256')
try: # the conversion may go wrong - protect
arr[shots_tmp[ws[wcolnotnull]]] = \
as_str_in_order[wcolnotnull].astype(np.dtype(dt))
except Exception as details:
err += 1
print('Failed on {k} (type was based on "{v}" for shot {sh}, {d} {a}'
.format(k=k, d=details, a=details.args, v = values, sh=sh))
arr = np.array(arrlen*[''],dtype='|S256')
#arr = np.empty(arrlen,dtype='|S256')
#arr = np.array(arrlen*[''])
arr[shots_tmp[ws[wcolnotnull]]] = as_str_in_order[wcolnotnull]
# compress, but beware assignments in the future.
arr=np.array([s.strip() for s in arr])
str_summary.append('{k}: {oldty}-> {dty}'
.format(k=k, dty=arr.dtype,
oldty=as_str_in_order.dtype))
print('revert {k} to a string, type {dty}'.format(k=k, dty=arr.dtype))
LHD.update({k: arr}) # add the new entry
print('{err} string reversions/compressions'.format(err=err))
print('{s}'.format(s=str_summary))
for k in lhd.keys():
if len(LHD[k]) != arrlen: print('conversion error on {k}'
.format(k=k))
wset = np.where(LHD['Shot'] != -1)[0]
werr = np.where(LHD['Shot'][wset] != np.arange(arrlen)[wset])[0]
if len(werr) > 0: raise LookupError('shot numbers mixed up')
if 'y' in raw_input('save ? ').lower():
fn = 'LHD_summary_new'
print('saving as {n}..'.format(n=fn))
np.savez_compressed(fn,LHD=LHD)
else:
print('not saved')
"""
reverting time_nlmax to float based on 0.511
reverting ECHToshiba3Power to float based on 0.132
reverting time_ipmax to float based on 0.486
reverting NBI2Power to float based on 586.68
Failed on ECHCoordinatorComment (type was based on "---_---_---_---_---_---_---_---_---_---" for shot 90091, could not convert string to float: ---
revert ECHCoordinatorComment to a string, type |S256
6 string reversions/compressions
['Status: |S1-> |S1', 'GasType: |S255-> |S8', 'CoordinatorComment: |S256-> |S171', 'LIDStatus: |S3-> |S3', 'ExperimentTheme: |S77-> |S77', 'ECHCoordinatorComment: |S255-> |S256']
"""
|
998,827 | af152d3b1784137a28144aea26fb38c9f1b2d007 | #### Implement doc2vec ####
import gensim
import pandas as pd
from sklearn.model_selection import train_test_split
pd.set_option('display.max_colwidth', 100)
# Read in data, clean it, and then split into train and test sets
messages = pd.read_csv('data/spam.csv', encoding='latin-1')
messages = messages.drop(labels = ['Unnamed: 2','Unnamed: 3','Unnamed: 4'], axis=1)
messages.columns = ['label','text']
messages['text_clean'] = messages['text'].apply(lambda x: gensim.utils.simple_preprocess(x))
messages.head()
X_train, X_test, y_train, y_test = train_test_split(messages['text_clean'], messages['label'],test_size=.2)
# Create tagged document objects to prepare to train the model
tagged_docs = [gensim.models.doc2vec.TaggedDocument(v, [i]) for i, v in enumerate(X_train)]
# Look at what a tagged document looks like
tagged_docs[0]
# Train a basic doc2vec model
d2v_model = gensim.models.Doc2Vec(tagged_docs, vector_size = 100, window=5, min_count =2)
d2v_model.infer_vector(['i','am','learning','nlp'])
# Prepare these vectors to be used in a machine learning model
vectors = [[d2v_model.infer_vector(words)] for words in X_test]
vectors[0]
|
998,828 | 1141a2849817bb86a3964d03b8a24d43f6acc488 | from app.model.tables import Movie
from app.model.tables import db
from app.model.serializer import SerialMovie
from sqlalchemy import func
serial_movie = SerialMovie()
serial_movie_many = SerialMovie(many=True)
def check_movie(name: str, year: int) -> bool:
movie = Movie.query.filter_by(name=name, year=year).first()
return True if movie else False
def create_movie(movie: dict):
if check_movie(name=movie["name"], year=movie["year"]):
return {"message": "Movie exists."}, 400
mov = Movie(**movie)
db.session.add(mov)
db.session.commit()
db.session.flush()
return {"message": "Movie created", "movie": serial_movie.dump(mov)}, 201
def select_all_movies() -> list:
movies = Movie.query.all()
movies_dict = serial_movie_many.dump(movies)
return {"movies": movies_dict}
def select_movie_by_name(name: str) -> dict:
movie = Movie.query.filter(func.lower(Movie.nome) == func.lower(name)).first()
return serial_movie.dump(movie)
def select_movie_by_id(id: int) -> dict:
movie = Movie.query.filter_by(id=id).first()
return serial_movie.dump(movie)
def select_movie_by_year(year: str) -> list:
movies = Movie.query.filter_by(ano=year).all()
return {"movies": serial_movie_many.dump(movies)}
def select_all_unrated_movies() -> list:
movies = Movie.query.filter_by(rate=None).all()
return {"movies": serial_movie_many.dump(movies)}
def select_all_unrated_movies_filter(movies: list) -> dict:
movies = Movie.query.filter(Movie.name.in_(movies), Movie.rate == None).first()
return serial_movie.dump(movies)
def update_rate_movie_by_id(id: int, rate: float):
movie = Movie.query.filter_by(id=id, rate=None).first()
if not movie:
return {"message": "The movie has already been rated."}, 400
db.session.query(Movie).filter_by(id=id).update({"rate": rate})
db.session.commit()
db.session.flush()
return {"message": "Movie updated.", "movie": serial_movie.dump(movie)}
def delete_movie_by_id(id):
movie = Movie.query.filter_by(id=id).first()
if not movie:
return {"message": "Movie not exists."}, 400
db.session.delete(movie)
db.session.commit()
return {"message": "Movie deleted."}
|
998,829 | 9c2c520c4db6b05238396231b5175b0f2c3d7655 | import tensorflow as tf
import numpy as np
from stable_baselines.common.callbacks import BaseCallback
from collections.abc import Iterable
import logging
import os
from framework.FileManager import FileManager
class EpisodeStatsLogger:
def __init__(self, tb_writer):
self.writer = tb_writer
def create_hist(self, values):
counts, bin_edges = np.histogram(values)#, bins=30)
values = np.array(values)
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
bin_edges = bin_edges[1:]
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
return hist
def write(self, stats, step):
values = []
# if len(stats['rewards']) == 0:
# return True
for k, val in stats.items():
if k == 'rewards':
assert len(val) > 0
values.append(tf.Summary.Value(tag='reward/' + k, histo=self.create_hist(val)))
values.append(tf.Summary.Value(tag='reward/' + k + '_mean', simple_value=float(np.mean(val))))
values.append(tf.Summary.Value(tag='reward/' + k + '_std', simple_value=float(np.std(val))))
values.append(tf.Summary.Value(tag='reward/' + k + '_sum', simple_value=float(np.sum(val))))
continue
if isinstance(val, Iterable):
assert len(val) > 0, k
values.append(tf.Summary.Value(tag='stats/' + k, histo=self.create_hist(val)))
values.append(tf.Summary.Value(tag="stats/" + k + '_mean', simple_value=float(np.mean(val))))
values.append(tf.Summary.Value(tag="stats/" + k + '_min', simple_value=float(np.min(val))))
values.append(tf.Summary.Value(tag="stats/" + k + '_sum', simple_value=float(np.sum(val))))
else:
assert type(val) == float
assert type(k) == str
values.append(tf.Summary.Value(tag="stats/" + k, simple_value=val))
summary = tf.Summary(value=values)
self.writer.add_summary(summary, step)
self.writer.flush()
class TensorboardCallback(BaseCallback):
"""
Custom callback for plotting additional values in tensorboard.
"""
def __init__(self, verbose=0):
self.is_tb_set = False
super(TensorboardCallback, self).__init__(verbose)
self.episodes_recorded = set()
def _on_rollout_end(self) -> None:
"""
This event is triggered before updating the policy.
Might not be ther end of an episode
Retrieve epoch data from environment and plot it
"""
w = EpisodeStatsLogger(self.locals['writer'])
stats = self.model.get_env().env_method("get_episode_info")[-1]
# -1 because env is vectorized, and we take result only from the last env in the vector (can be any)
if stats == None:
# on first rollouts it might happen that none of the environments have finished any of episodes
# but we don't allow this
raise Exception("n_steps is too small, first rollout completed without termination")
# self.rollout_calls would be a wrong iteration id here, because there might be
# many rollout calls before an episode is finished in one of the environments
episode_id = self.model.get_env().env_method("get_resets")[-1]
# assert that solver is updated by a new episode (although several might have passed
assert episode_id not in self.episodes_recorded
w.write(stats, len(self.episodes_recorded))
self.episodes_recorded.add(episode_id)
return True
class TestingCallback(BaseCallback):
"""
Custom callback for plotting additional values in tensorboard.
"""
def __init__(self, solver, verbose=0, eval_freq=1, draw=True, draw_freq=1):
super(TestingCallback, self).__init__(verbose)
self.eval_freq = eval_freq
self.solver = solver
self.draw = draw
self.draw_freq = draw_freq
self.rollout_calls = 1
self.verbose = verbose
self.step_it = 0
def _on_training_start(self) -> None:
"""
This method is called before the first rollout starts.
"""
if self.eval_freq > 0:
self.solver.run_tests(0, draw=self.draw, verbose=self.verbose)
def _on_step(self) -> bool:
if self.draw: # assuming each step updates time (works only with taxiEnvBatch)
figs = self.model.get_env().env_method("render")
fig_dir = os.path.join(self.solver.log_dir, str(self.rollout_calls))
FileManager.create_path(fig_dir)
for i in range(len(figs)):
fig = figs[i]
fig.savefig(os.path.join(fig_dir, "env{}_it{}_fig.png".format(i, self.step_it)), dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None, metadata=None)
self.step_it += 1
def _on_rollout_end(self) -> bool:
if self.eval_freq > 0 and self.rollout_calls % self.eval_freq == 0:
if_draw = self.draw and self.rollout_calls % self.draw_freq == 0
self.solver.run_tests(self.rollout_calls // self.eval_freq, draw=if_draw, verbose=self.verbose)
self.rollout_calls += 1
self.step_it = 0
return True
class RobustCallback(BaseCallback):
def __init__(self, solver, nu, epsilon, gamma, cmin, cmax, verbose=0):
super(RobustCallback, self).__init__(verbose)
self.solver = solver
self.nu = nu
self.epsilon = epsilon
self.rollout_calls = 1
self.gamma = gamma
self.cmin = cmin
self.cmax = cmax
self.call = 0
self.last_min = []
def find_c(self):
logging.info("Finding c")
cmin = self.cmin
cmax = self.cmax
c = (cmax + cmin) / 2
steps_log = []
while abs((cmax + cmin)/2 - cmin) > self.epsilon:
self.solver.test_env.set_income_bound(c)
stats = self.solver.run_test_episode(0, draw=False, debug=False)
reward = np.sum(stats['driver_income_bounded'])
robust_threshold = c * self.solver.test_env.n_drivers * (1 - self.nu)
possible = reward > robust_threshold
steps_log.append((c, reward, c * self.solver.test_env.n_drivers, reward - robust_threshold))
if possible:
cmin = cmin + (c - cmin) * self.gamma
c = (cmax + cmin) / 2
else:
cmax = cmax - (cmax - c) * self.gamma
c = (cmax + cmin) / 2
logging.info("Finishing with final c={}".format(c))
steps_log = sorted(steps_log)
self.solver.log['step_log_{}'.format(self.rollout_calls)] = np.array(steps_log, dtype=float).tolist()
return c
# def _on_training_start(self):
# self.training_env.env_method("auto_update_income_bound")
def _on_rollout_end(self) -> bool:
# if self.rollout_calls % 3 == 0:
# c = self.find_c()
# self.training_env.env_method("set_income_bound", c)
# self.training_env.env_method("auto_update_income_bound") # note that there are SEVERAL envs! each might have its own bound!
self.rollout_calls += 1 |
998,830 | 8953e3f1f9ac4fcbbfd9d5ae3647b6db1fd74b74 | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
__all__ = ("from_iter",)
from collections.abc import Iterable
from awkward_cpp.lib import _ext
import awkward as ak
from awkward._dispatch import high_level_function
from awkward._nplikes.numpylike import NumpyMetadata
np = NumpyMetadata.instance()
@high_level_function()
def from_iter(
iterable,
*,
allow_record=True,
highlevel=True,
behavior=None,
initial=1024,
resize=8,
):
"""
Args:
iterable (Python iterable): Data to convert into an Awkward Array.
allow_record (bool): If True, the outermost element may be a record
(returning #ak.Record or #ak.record.Record type, depending on
`highlevel`); if False, the outermost element must be an array.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.contents.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
initial (int): Initial size (in bytes) of buffers used by the `ak::ArrayBuilder`.
resize (float): Resize multiplier for buffers used by the `ak::ArrayBuilder`;
should be strictly greater than 1.
Converts Python data into an Awkward Array.
Any heterogeneous and deeply nested Python data can be converted, but the output
will never have regular-typed array lengths. Internally, this function uses
`ak::ArrayBuilder` (see the high-level #ak.ArrayBuilder documentation for a
more complete description).
The following Python types are supported.
* bool, including `np.bool_`: converted into #ak.contents.NumpyArray.
* int, including `np.integer`: converted into #ak.contents.NumpyArray.
* float, including `np.floating`: converted into #ak.contents.NumpyArray.
* bytes: converted into #ak.contents.ListOffsetArray with parameter
`"__array__"` equal to `"bytestring"` (unencoded bytes).
* str: converted into #ak.contents.ListOffsetArray with parameter
`"__array__"` equal to `"string"` (UTF-8 encoded string).
* tuple: converted into #ak.contents.RecordArray without field names
(i.e. homogeneously typed, uniform sized tuples).
* dict: converted into #ak.contents.RecordArray with field names
(i.e. homogeneously typed records with the same sets of fields).
* iterable, including np.ndarray: converted into
#ak.contents.ListOffsetArray.
See also #ak.to_list.
"""
return _impl(iterable, highlevel, behavior, allow_record, initial, resize)
def _impl(iterable, highlevel, behavior, allow_record, initial, resize):
if not isinstance(iterable, Iterable):
raise TypeError(
f"cannot produce an array from a non-iterable object ({type(iterable)!r})"
)
if isinstance(iterable, dict):
if allow_record:
return _impl(
[iterable],
highlevel,
behavior,
False,
initial,
resize,
)[0]
else:
raise ValueError(
"cannot produce an array from a single dict (that would be a record)"
)
# Ensure that tuples are treated as iterables, not records
if isinstance(iterable, tuple):
iterable = list(iterable)
builder = _ext.ArrayBuilder(initial=initial, resize=resize)
builder.fromiter(iterable)
formstr, length, buffers = builder.to_buffers()
form = ak.forms.from_json(formstr)
return ak.operations.ak_from_buffers._impl(
form,
length,
buffers,
buffer_key="{form_key}-{attribute}",
backend="cpu",
byteorder=ak._util.native_byteorder,
highlevel=highlevel,
behavior=behavior,
simplify=True,
)[0]
|
998,831 | 2484a0fdecd662a5765423d21409685f18cb6619 | class Validator():
def month_is_valid(self, month):
if(month is None):
return False
if(int(month) not in range(1, 13)):
return False
return True
def quarter_is_valid(self, quarter):
if(int(quarter) in range(1, 5)):
return True
return False
|
998,832 | f633a2f299d28cae99c20313246a96a8af6587b2 | """
只使用10个桶。(实际上是20个,可以优化。)
第一遍排序将nums中的数放到个位对应的桶中去。 每个桶是一个数组,可以装多个数据。
第二遍排序将桶中的数放到十位对应的桶(另一个桶)中去。 以此类推。
* 每次从桶中取数据都要从0号桶取到9号桶。 且桶中数据得按顺序读取。*
正确性验证:
第一遍时,每个桶中数字的个位数是一样的。
第二遍时,假如2号桶中有2个数字,则2个数字的十位一样,下面数字的个位数较小。(因为上一轮中个位数小的在小号的桶,因此会先取到它,放到当前桶)
以此类推,第N遍排序后,每个桶里数字后N位肯定是有顺序的。 所以最终是有顺序的。
复杂度:
排序的次数是最大值的位数D。 比如1234就要排序4遍(个位/十位/百位/千位)
每次排序从10个桶中遍历N个数。 因此时间复杂度为 D*O(N) 空间复杂度为 2*O(N)
"""
def get_ith_digit(number, i):
while i > 1:
number /= 10
i -= 1
return number%10
def get_level_of_number(number):
n = 0
while number > 0:
n += 1
number /= 10
return n
assert get_ith_digit(543, 2) == 4
assert get_level_of_number(345600) == 6
def bucket_sort(nums, case_id):
bucket_num = 10
buckets = [[] for i in range(bucket_num)]
buckets_tmp = [[] for i in range(bucket_num)]
max_num = max(nums)
dims = get_level_of_number(max_num)
for num in nums:
unit = get_ith_digit(num, 1)
buckets[unit].append(num)
for i in range(2, dims+1):
for num_arr in buckets:
for num in num_arr:
digit = get_ith_digit(num, i)
buckets_tmp[digit].append(num)
buckets = buckets_tmp
buckets_tmp = [[] for i in range(bucket_num)]
return [num for num_arr in buckets for num in num_arr]
# [[nums, target, expect]]
TestCase = [ [1, 2, 3, 4, 5],
[3, 2, 1, 4, 99, 22, 56, 11, 89],
[4880, 3, 212, 10, 20413, 3110, 0, 1011, 999],
]
def check_sort(nums):
for i in range(len(nums)-1):
if nums[i] > nums[i+1]:
return False
return True
for i in range(len(TestCase)):
testcase = TestCase[i]
nums = testcase
ret = bucket_sort(nums, i)
if not check_sort(ret):
print 'Test Case ', i, ' failed.', ret
|
998,833 | 47a5c0965d6ec3a13609a971b494ee357dc6b16a | <<<<<<< HEAD
class Solution:
def singleNumber(self, nums: List[int]) -> int:
"""
交换律:a ^ b ^ c <=> a ^ c ^ b
任何数于0异或为任何数 0 ^ n => n
相同的数异或为0: n ^ n => 0
"""
# a = 0
# for n in nums:
# a = a ^ n
# return a
# 脑筋急转弯型:
set1 = list(set(nums))
sum1 = sum(set1)
sum2 = sum(nums)
=======
class Solution:
def singleNumber(self, nums: List[int]) -> int:
"""
交换律:a ^ b ^ c <=> a ^ c ^ b
任何数于0异或为任何数 0 ^ n => n
相同的数异或为0: n ^ n => 0
"""
# a = 0
# for n in nums:
# a = a ^ n
# return a
# 脑筋急转弯型:
set1 = list(set(nums))
sum1 = sum(set1)
sum2 = sum(nums)
>>>>>>> bebc8612da16ca8e407398a44eba42627767ace0
return 2 * sum1 - sum2 |
998,834 | 3dff13b69fc49196686b2c26bd435d1d5992e2d0 | import os
def directory_choice():
#this function should print out
#all of the available directories
#in the current working directory
#then ask the user to choose one
#and return it
print(os.getcwd())
documents = (os.listdir(os.getcwd()))
print()
lst = []
for i in documents:
if os.path.isdir(i):
lst.append(i)
## if "files" in i:
## lst.append(i)
while True:
user = input("Please select a directory: ")
if user in lst:
print("Yes")
return user
break
else:
print("that is not valid")
pass
def print_files():
#write code in this function
#to print all the files
#but not directories
#in the current working directory
documents = (os.listdir(os.getcwd()))
print()
lst = []
for i in documents:
if os.path.isdir(i):
lst.append(i)
## if "files" in i:
## lst.append(i)
while True:
user = input("Please select a directory: ")
if user in lst:
##change address
path = os.path.join(os.getcwd(), user)
os.chdir(path)
emLst = []
doc = (os.listdir(os.getcwd()))
## print(doc)
for i in doc:
if os.path.isfile(i):
emLst.append(i)
return emLst
break
else:
print("that is not valid")
pass
#main section
#call directory_choice
#change to that directory
#call print_files
|
998,835 | 6a76844ab901cb35144f512cb4fb034b24d2f319 | # Convolution Neural Network (CNN)
# Importing the libraries
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation='relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))
# Compiling CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Image Augmentation to reduce overfitting (keras.io)
train_datagen = ImageDataGenerator(
rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary')
test_set = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary')
# Fitting the CNN to the images
classifier.fit_generator(training_set, steps_per_epoch=8000,
epochs=25, validation_data=test_set, validation_steps=2000)
|
998,836 | b3b8fd25bb87579398dbce2bd119ea7ab5374f1e | """
This module contains the functions used to create Dash layout elements.
"""
from datetime import datetime
from zoneinfo import ZoneInfo
import polars as pl
from dash import dcc, html
from dash.development.base_component import Component
from feffery_antd_components.AntdTree import AntdTree
from src.data.data_extract import (
get_departement_geographical_data,
get_waste_code_hierarchical_nomenclature,
)
from src.data.data_processing import (
get_recovered_and_eliminated_quantity_processed_by_week_series,
get_weekly_waste_quantity_processed_by_operation_code_df,
)
from src.data.datasets import ALL_BORDEREAUX_DATA, DEPARTEMENTS_GEOGRAPHICAL_DATA
from src.pages.advanced_statistics.utils import format_filter
from src.pages.figures_factory import create_weekly_quantity_processed_figure
from src.pages.utils import add_callout
def create_filters_selects_elements() -> html.Div:
"""
Returns a `html.Div` object containing the filters for selecting departments and waste codes.
Returns
-------
html.Div
A `html.Div` object containing the filters.
"""
geographical_data = DEPARTEMENTS_GEOGRAPHICAL_DATA
waste_nomenclature = get_waste_code_hierarchical_nomenclature()
geographical_data = geographical_data.to_dict(as_series=False)
options = [
{"value": a, "label": b}
for a, b in zip(
geographical_data["code_departement"], geographical_data["libelle"]
)
]
options.insert(0, {"value": "all", "label": "France entière"})
departements_dropdown = html.Div(
[
html.Label(
["Sélectionner un département :"],
className="fr-label",
htmlFor="departement-select",
),
dcc.Dropdown(
options=options,
placeholder="Rechercher un département...",
id="departement-select",
value="all",
clearable=False,
),
],
className="fr-select-group",
id="departement-select-group",
)
waste_select = html.Div(
[
html.Button(
["Filtrer par code déchet"],
id="waste-select-modal-button",
className="fr-btn",
**{"data-fr-opened": False, "aria-controls": "fr-modal-1"},
),
html.Dialog(
html.Div(
html.Div(
html.Div(
html.Div(
[
html.Div(
html.Button(
"Fermer",
className="fr-link--close fr-link",
title="Fermer la fenêtre de sélection des filtres sur les codes déchets",
**{
"aria-controls": "fr-modal-1",
},
),
className="fr-modal__header",
),
html.Div(
[
html.H1(
[
html.Span(
className="fr-fi-arrow-right-line fr-fi--lg"
),
"Filtrer par code déchets :",
],
id="fr-modal-title-modal-1",
className="fr-modal__title",
),
AntdTree(
id="waste-select",
className="waste-select",
treeData=waste_nomenclature,
# multiple=True,
checkable=True,
selectable=False,
defaultCheckedKeys=["all"],
defaultExpandedKeys=["all"],
),
],
className="fr-modal__content",
),
],
className="fr-modal__body",
),
className="fr-col-12 fr-col-md-8",
),
className="fr-grid-row fr-grid-row--center",
),
className="fr-container fr-container--fluid fr-container-md",
),
id="fr-modal-1",
className="fr-modal",
role="dialog",
**{"aria-labelledby": "fr-modal-title-modal-1"},
),
],
id="waste-select-group",
)
selects_div = html.Div(
[departements_dropdown, waste_select], className="selects-container"
)
return selects_div
def create_filtered_waste_processed_figure(
departement_filter: str, waste_codes_filter: dict[str, list[str]]
) -> list[Component]:
"""
Create a plot of the quantity of hazardous waste processed and tracked by week. The data is, if needed, filtered by departement
and waste codes.
Parameters:
-----------
departement_filter : str
The code of the departement to filter the data by. If "all" is passed, all departements will be included in the
plot.
waste_codes_filter : dict with "checked" and "half checked" keys
The dictionary that contains the waste codes checked or half-checked on UI that will be used for filtering.
Returns:
--------
list
A list of dash elements that are ready to be rendered.
Example:
--------
>>> create_filtered_waste_processed_figure("75", {"checked": ["01 01","01 01 01*",,"19", "19 01", "19 01 01"], "half_checked": ["01"]})
[html.H4("Quantité de déchets dangereux tracés et traités par semaine - Paris"),
dcc.Graph(figure=...)]
"""
geographical_data = DEPARTEMENTS_GEOGRAPHICAL_DATA
bs_data = ALL_BORDEREAUX_DATA
departement_filter_str = ""
bs_data_filtered = bs_data
if (departement_filter is not None) and (departement_filter != "all"):
departement_filter_str = (
"- "
+ geographical_data.filter(
pl.col("code_departement") == departement_filter
)["libelle"].item()
)
bs_data_filtered = bs_data_filtered.filter(
pl.col("destination_departement") == departement_filter
)
waste_filter_formatted = format_filter(pl.col("waste_code"), waste_codes_filter)
if waste_filter_formatted is not None:
bs_data_filtered = bs_data_filtered.filter(waste_filter_formatted)
date_interval = (
datetime(2022, 1, 3, tzinfo=ZoneInfo("Europe/Paris")),
datetime.now(tz=ZoneInfo("Europe/Paris")),
)
bs_data_filtered_grouped = get_weekly_waste_quantity_processed_by_operation_code_df(
bs_data_filtered,
date_interval,
)
(
df_recovered,
df_eliminated,
) = get_recovered_and_eliminated_quantity_processed_by_week_series(
bs_data_filtered_grouped
)
fig = create_weekly_quantity_processed_figure(
df_recovered, df_eliminated, date_interval
)
elements = [
html.H4(
f"Quantité de déchets dangereux tracés et traités par semaine {departement_filter_str}"
),
dcc.Graph(figure=fig),
]
return elements
def create_input_output_elements(
departement_filter: str, waste_codes_filter: dict[str, list[str]]
) -> list[Component]:
"""
Create input/output elements for a Dash application.
Parameters
----------
departement_filter : str
The filter to apply on the departement data. If set to 'all', no filter is applied.
waste_codes_filter : list[str]
The list of waste codes to filter the bordereaux data by.
Returns
-------
list
A list of Dash elements, each containing the quantity of incoming, outgoing, or locally processed
dangerous waste in a specific departement.
If no departemenent filter is provided (departement_filter is None or "all"), then nothing is returned.
"""
geographical_data = get_departement_geographical_data()
bs_data = ALL_BORDEREAUX_DATA
departement_filter_str = ""
date_interval = (
datetime(2022, 1, 3, tzinfo=ZoneInfo("Europe/Paris")),
datetime.now(tz=ZoneInfo("Europe/Paris")),
)
bs_data = bs_data.filter(
pl.col("processed_at").is_between(*date_interval, closed="left")
& pl.col("processing_operation")
.is_in(
[
"D9",
"D13",
"D14",
"D15",
"R12",
"R13",
]
)
.is_not()
& pl.col("status").is_in(["PROCESSED", "FOLLOWED_WITH_PNTTD"])
)
if (departement_filter is not None) and (departement_filter != "all"):
departement_filter_str = geographical_data.filter(
pl.col("code_departement") == departement_filter
)["libelle"].item()
bs_data_processed_incoming_filtered = bs_data.filter(
(pl.col("destination_departement") == departement_filter)
& (pl.col("emitter_departement") != departement_filter)
)
bs_data_processed_outgoing_filtered = bs_data.filter(
(pl.col("emitter_departement") == departement_filter)
& (pl.col("destination_departement") != departement_filter)
)
bs_data_processed_locally_filtered = bs_data.filter(
(pl.col("destination_departement") == departement_filter)
& (pl.col("emitter_departement") == departement_filter)
)
elements = [
html.H4(f"Flux de déchet du département - {departement_filter_str}"),
]
else:
return [
html.H4("Flux de déchet du département"),
html.Div(
"Veuillez sélectionner un département pour afficher les données",
id="departement-figure-no-data",
),
]
waste_filter_formatted = format_filter(pl.col("waste_code"), waste_codes_filter)
if waste_filter_formatted is not None:
bs_data_processed_incoming_filtered = (
bs_data_processed_incoming_filtered.filter(waste_filter_formatted)
)
bs_data_processed_outgoing_filtered = (
bs_data_processed_outgoing_filtered.filter(waste_filter_formatted)
)
bs_data_processed_locally_filtered = bs_data_processed_locally_filtered.filter(
waste_filter_formatted
)
bs_data_processed_incoming_quantity = (
bs_data_processed_incoming_filtered.select("quantity").sum().item()
)
bs_data_processed_outgoing_quantity = (
bs_data_processed_outgoing_filtered.select("quantity").sum().item()
)
bs_data_processed_locally_quantity = (
bs_data_processed_locally_filtered.select("quantity").sum().item()
)
elements.extend(
[
html.Div(
[
add_callout(
number=bs_data_processed_locally_quantity,
text="tonnes de déchets dangereux tracés et traités à l’intérieur du département",
),
add_callout(
number=bs_data_processed_incoming_quantity,
text="tonnes de déchets entrantes traités à l’intérieur du département",
),
add_callout(
number=bs_data_processed_outgoing_quantity,
text="tonnes de déchets sortantes traités à l’extérieur du département",
),
],
id="total-processed-figures",
className="row",
),
]
)
return elements # type: ignore
|
998,837 | 8a40ad24088a8307988dfe88a96c295312309089 | import os
from PIL import Image, ImageDraw
from weighted_quick_union import WeightedQuickUnion
class WeightedQuickUnionImage(WeightedQuickUnion):
"""docstring for WeightedQuickUnionImage"""
def __init__(self, n):
super(WeightedQuickUnionImage, self).__init__(n)
self.w = 400
self.h = 400
self.division = self.w * 3 / 4
self.bx = 20
self.xs = (self.w - self.bx*2) / n
self.xw = int(self.xs) - 1
self.ys = self.division/n
# 当前结点深度,绘图时使用
self.depth = [1]*n
self.out_path = os.path.dirname(os.path.abspath(__file__))
self.out_path = os.path.join(self.out_path, 'out')
if not os.path.exists(self.out_path):
os.makedirs(self.out_path)
self.step = 1
def union(self, p, q):
pRoot = self.find(p)
qRoot = self.find(q)
if pRoot == qRoot:
return
t = pRoot
# 将小树的根节点接到大树的根节点
if self.sz[pRoot] < self.sz[qRoot]:
self.id[pRoot] = qRoot
self.sz[qRoot] += self.sz[pRoot]
else:
self.id[qRoot] = pRoot
self.sz[pRoot] += self.sz[qRoot]
t = qRoot
self.set_depth()
self.draw_list(p, q, t)
self.count -= 1
def set_depth(self):
'''
使用遍历的方式计算深度
'''
n = len(self.id)
for i in range(n):
self.depth[i] = 1
p = i
while p != self.id[p]:
p = self.id[p]
self.depth[i] += 1
def draw_list(self, p=-1, q=-1, pRoot=-1):
img = Image.new('RGB', (self.w, self.h), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.line((0, self.division, self.w, self.division), (0, 0, 255))
if p != -1 and q != -1:
x1 = self.bx + self.xs*p
y_connect = self.division + 20
x2 = self.bx + self.xs*q
draw.line((x1, y_connect, x2, y_connect), (0, 255, 255))
x = self.bx
for n, v in enumerate(self.id):
y = self.h - 12
draw.text((x, y), str(n), (0, 0, 0))
if n == p or n == q:
draw.line((x, y, x, y_connect), (0, 255, 255))
y = self.ys * self.depth[n]
draw.text((x, y), str(n), (0, 0, 0))
x_id = self.bx + self.xs*v
y_id = y - self.ys
draw.text((x_id, y_id), str(v), (0, 0, 0))
color = (0, 0, 0)
if n == pRoot:
color = (255, 0, 0)
draw.line((x, y, x_id, y_id+12), color)
x += self.xs
name = str(self.step) + '.jpg'
img.save(os.path.join(self.out_path, name), 'jpeg')
self.step += 1
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
n = 10
uf_image = WeightedQuickUnionImage(n)
# 加权quick-union最坏的情况
d_worst = [[0, 1], [2, 3], [4, 5], [6, 7], [0, 2], [4, 6], [0, 4]]
for p, q in d_worst:
if uf_image.connected(p, q):
continue
uf_image.union(p, q)
print(uf_image.get_count())
else:
with open(sys.argv[1], 'r') as f:
n = int(f.readline())
uf = WeightedQuickUnionImage(n)
for line in f.readlines():
p, q = [int(x) for x in line.split()]
if uf.connected(p, q):
continue
uf.union(p, q)
print(uf.get_count())
|
998,838 | a5561e0c9c5a1ce8d95feec86d9f2df8912b06eb | from string import digits, ascii_letters
_chars = ascii_letters + digits
def to_char(table):
return ''.join([_chars[i] for i in table])
def to_base62(url_id):
table = []
while url_id > 0:
mod = url_id % 62
table.append(mod)
url_id = int(url_id / 62)
table.reverse()
result = to_char(table)
while len(result) < 4:
result = 'a' + result
return result
def from_base62(url_str):
i = 0
for j in url_str:
i = i * 62 + _chars.index(j)
return i |
998,839 | 2dd997908da046a2944f3e51bb194c52023c6f95 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat 11 March 11:40:23 2017
@author: Professor Junbin Gao
Copyright: Professsor Junbin Gao, The University of Sydney Business School
March 2017
"""
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('ibmclose.txt')
times, x = data[:,0], data[:,1]
plt.plot(times, x)
plt.ylabel('Closing Prices')
plt.xlabel('Times')
plt.title('IBM Stock Prices')
# Please run the first section above, then the second section below
# More Features to be added
plt.grid()
# Then run the following section
N = times.size # number of elements in times
# Move to a tighted x-axis
plt.xlim((1,N)) |
998,840 | 216485bb3f0fcf454dfea0c3182d83da3a4b83a9 |
def ask_number_columns():
print("Digite número de linhas e colunas", end="\n")
return input()
def ask_pos():
print("Digite linha", end="\r\n")
x = input()
print("Digite coluna", end="\r\n")
y = input()
return str(int(x)-1) + str(int(y)-1)
def print_board(no_rows_columns, number_bomb_map):
x = 0
y = 0
while y < no_rows_columns:
while x < no_rows_columns:
if str(x) + str(y) in number_bomb_map:
bomb_value = number_bomb_map[str(x) + str(y)]
print(bomb_value, end="\t")
else:
print("A", end="\t")
x += 1
print("")
y += 1
x = 0
def initialize(s, number_bomb_map):
s.reset_bombs()
s.initialize_board(5, 5)
print_board(5, number_bomb_map)
|
998,841 | 9df4134ebb66bea97b678c5f895d730c9038e1fc | import requests
import re
url = 'http://www.budejie.com/text/'
html = requests.get(url)
print(html.status_code)
content = html.text
duanzi = re.findall(r'<div class="j-r-list-c-desc">\s+(.*)\s+</div>', content)
f = open('duan.txt', 'w')
for each in duanzi:
if '<br />' in each:
new_each = re.sub(r'<br />', '\n',each)
print(new_each)
f.write(new_each)
else:
print(each)
f.write(each)
f.close()
|
998,842 | 4c79925689731831ba0b967edd52abd5737aacb8 | # -*- coding: utf-8 -*-
from .abstract_tag import AbstractTag
class ExperienceTag(AbstractTag):
pass
|
998,843 | 79c529e66fa9f2ed291a724f23a910cd4713a951 | n = int(input())
i = 0
j = ""
for i in range(0,n):
j+=(n-i)*"*"
j+=2*i*"o"
j+=(n-i)*"*"
print(j)
j=""
|
998,844 | 4e3aa71fee75a2abfc9c209091ad0b20afb09b5f | from django.conf import settings
def common(request):
return {
'BASE_TEMPLATE': getattr(settings, 'YUMMY_BASE_TEMPLATE', 'base.html')
}
|
998,845 | a68154062e71519aa1ecf4514ac468238cca3491 | from flask import Flask, render_template, redirect, url_for, request
app = Flask(__name__)
# @app.route("/")
# def index():
# return "Hello world"
@app.route("/", methods=['GET', 'POST'])#renders the login page
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != 'admin':
error = 'Invailid credentials. Please try again.'
else:
return redirect(url_('index'))
return render_template('login.html', error=error)
if __name__ == '__main__':
app.run(debug=True)
@app.route("/register")#renders the the register page
def register():
error = None
return render_template('Register.html', error=error)
@app.route("/mainpage")#renders the the home page
def mainpage():
error = None
return render_template('mainpage.html', error=error)
@app.route("/aboutgame")#renders the the about game page
def aboutgame():
error = None
return render_template('aboutgame.html', error=error)
@app.route("/aboutus")#renders the the about us page
def aboutus():
error = None
return render_template('aboutus.html', error=error)
@app.route("/score")#renders the the about us page
def score():
error = None
return render_template('score.html', error=error)
|
998,846 | e2632d3c4c8f204dc3720d8e4d3573cd96c5754d | import csv
import re
def at_removal(sentence):
x = sentence.split()
i = 0
length = len(x)
while i < len(x):
if re.search('@', x[i]):
x.pop(i)
length -= 1
i -= 1
i += 1
result = ' '.join(x)
return result
def hashtag_removal(sentence):
x = sentence.split()
i = 0
length = len(x)
while i < len(x):
if re.search('#', x[i]):
x.pop(i)
length -= 1
i -= 1
i += 1
result = ' '.join(x)
return result
def url_removal(sentence):
x = sentence.split()
i = 0
length = len(x)
while i < len(x):
if re.search('http', x[i]):
x.pop(i)
length -= 1
i -= 1
i += 1
result = ' '.join(x)
return result
|
998,847 | a8311d5e4d378da99b91686728f9d72fc41293a8 | import double_or_nothing_util as dnUtil
import sys
import math
class BotPlayer:
def __init__(self):
self.resetBot()
def resetBot(self):
"""Set up the bot for the beginning state of the game"""
self.state = [None, None, None, False]
self.score = 0
counts = {}
for value in dnUtil.valuesList:
counts[value] = self.calcDefaultCount(value)
self.state[2] = counts
def calcDefaultCount(self, value):
"""Returns the default number of cards higher/lower than the
input value.
"""
idx = dnUtil.valuesList.index(value)
higher = (len(dnUtil.valuesList) - idx - 1) * 4
lower = idx * 4
tie = 4
return (higher, lower, tie)
def updateBotCounts(self, nextCard):
"""Update the card count given the next card has been drawn"""
nextVal = dnUtil.getValue(nextCard)
state = self.getState()
counts = self.getCounts(state)
newCount = counts.copy()
for value in dnUtil.valuesList:
if counts[value][2] == 0:
continue
update = self.updateCount(value, nextVal, counts[value])
newCount[value] = update
self.setCounts(newCount)
def updateCount(self, cardVal, nextVal, counts):
"""Update the number of cards higher/lower than the value given
the next card has been drawn."""
higher, lower, tie = counts
comp = dnUtil.compareValue(cardVal, nextVal)
if comp == 0:
tie -= 1
elif comp < 0:
higher -= 1
else:
lower -= 1
return (higher, lower, tie)
def expectancyPcnt(self, p):
"""Simplified trade expectancy formula."""
return (2 * p) - 1
def getAction(self, game, state):
"""Choose an action given the current state."""
if self.getUnknownCard(state) is None:
baseVal = self.getBase(state)
counts = self.getCounts(state)[baseVal]
if counts[0] > counts[1]:
return dnUtil.Action.Higher
elif counts[0] < counts[1]:
return dnUtil.Action.Lower
else:
if dnUtil.random.random() > 50:
return dnUtil.Action.Higher
else:
return dnUtil.Action.Lower
elif self.getLoseBool(state):
return dnUtil.Action.Exit
else:
nextVal = self.getUnknownCard(state)
nextCounts = self.getCounts(state)[nextVal]
deckSize = sum(nextCounts)
high, low, tie = nextCounts
if high > low:
winrate = high / deckSize
elif high < low:
winrate = low / deckSize
else:
winrate = 0.5
risk = game.getRisk()
if self.expectancyPcnt(winrate) > self.expectancyPcnt(risk / 100):
return dnUtil.Action.Continue, winrate * 100
else:
return dnUtil.Action.Exit, winrate * 100
def getState(self):
return self.state
def setState(self, state):
self.state = state
def getScore(self):
return self.score
def setScore(self, score):
self.score = score
def getBase(self, state):
return state[0]
def setBase(self, card):
self.state[0] = dnUtil.getValue(card)
def getUnknownCard(self, state):
return state[1]
def setUnknownCard(self, card):
if card is not None:
self.state[1] = dnUtil.getValue(card)
else:
self.state[1] = None
def getCounts(self, state):
return state[2]
def setCounts(self, counts):
self.state[2] = counts
def setLoseBool(self, lose):
self.state[3] = lose
def getLoseBool(self, state):
return state[3]
|
998,848 | a13835bdaab1331ff5d47f79ef5b72d5dab31c78 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
dataset = pd.read_csv('diabetes.csv')
print(dataset.head(5))
print(dataset.shape)
print(dataset.describe())
#check if there are correlations, i.e. redundant features
corr = dataset.corr() #data frame correlation function
fig, ax = plt.subplots(figsize=(13,13))
ax.matshow(corr) #color code the rectangles by correlation value
plt.xticks(range(len(corr.columns)), corr.columns) #draw x tick marks
plt.yticks(range(len(corr.columns)), corr.columns) #draw y tick marks
plt.show()
#separate columns intofeatures
features = dataset.drop(['Outcome'], axis=1)
labels = dataset['Outcome']
features_train, feature_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.25)
classifier = KNeighborsClassifier()
classifier.fit(features_train, labels_train)
pred = classifier.predict(feature_test)
accuracy = accuracy_score(labels_test, pred)
print('Accuracy: {}'.format(accuracy)) |
998,849 | da4e6ed5da47c4ebc0463f2139541fe6003ec2ed | import os
import porter
# initializing punctuation marks list and stop words list
PUNCTUATION_MARKS = [',', '.', '<', '>', '|', ':', '(', ')', '/', '_', '\\', '?', '-', '!', '#', '%', '^', '&', '*', '_', '+', '~']
STOP_WORDS = open('STOP_WORDSs.txt', "r").read().split('\n')
# cleaning data by removing punctuation marks
for root, dirs, files in os.walk("./comp.os.ms-windows.misc"):
for input_file in files:
output_file = 'cleaned_data/' + input_file
input_file = 'comp.os.ms-windows.misc/' + input_file
f = open(input_file, "r")
f_new = open(output_file, "w")
for characters in f.read():
if characters not in PUNCTUATION_MARKS:
f_new.write(characters)
f.close()
f_new.close()
# stemming and removal of stop words
DOCUMENTS = []
UNIQUE_WORDS = set()
for root, dirs, files in os.walk("./cleaned_data"):
for input_file in files:
output_file = input_file
input_file = 'cleaned_data/' + input_file
DOCUMENTS.append(output_file)
f = open(input_file, "r")
f_new = open("stemmed_data/" + output_file, "w")
for line in f.readlines():
words = line.split()
for word in words:
word = word.lower()
if word.isalpha() == False:
continue
if word not in STOP_WORDS:
f_new.write(porter.stem(word) + " ")
UNIQUE_WORDS.add(porter.stem(word))
f_new.write("\n")
f.close()
f_new.close()
# creating index list with document frequency and term frequency
DOCUMENT_FREQUENCY = dict()
TERM_FREQUENCY = dict()
# initializing TERM_FREQUENCY matrix
for term in UNIQUE_WORDS:
TERM_FREQUENCY[term] = dict()
for root, dirs, files in os.walk("./stemmed_data"):
for input_file in files:
TERM_FREQUENCY[term][input_file] = 0
# building TERM_FREQUENCY matrix
for root, dirs, files in os.walk("./stemmed_data"):
for input_file in files:
f_new = open("stemmed_data/" + input_file, "r")
for line in f_new.readlines():
words = line.split()
for word in words:
word = word.lower()
if word.isalpha() == False:
continue
if word not in STOP_WORDS:
word = porter.stem(word)
if word in UNIQUE_WORDS:
TERM_FREQUENCY[word][input_file] += 1
f_new.close()
POSTING_LIST = dict()
# building document frequency
for word in UNIQUE_WORDS:
DOCUMENT_FREQUENCY[word] = 0
POSTING_LIST[word] = []
for input_file in TERM_FREQUENCY[word].keys():
if TERM_FREQUENCY[word][input_file] > 0:
POSTING_LIST[word].append(input_file)
DOCUMENT_FREQUENCY[word] += 1
# example
print 'Document frequency of term "Made":'
print DOCUMENT_FREQUENCY['made']
print 'Term frequency of term "Made":'
print TERM_FREQUENCY['made']
print 'Posting List of term "Made":'
print POSTING_LIST['made']
|
998,850 | a17315d2fa42a84558e5588305a8720b575c3b91 | """
First compute the prefix sums: first[m] is the sum of the first m numbers.
Then the sum of any subarray nums[i:k] is simply first[k] - first[i].
So we just need to count those where first[k] - first[i] is in [lower,upper].
To find those pairs, I use mergesort with embedded counting. The pairs in the left half and the pairs in the right half get counted in the recursive calls. We just need to also count the pairs that use both halves.
For each left in first[lo:mid] I find all right in first[mid:hi] so that right - left lies in [lower, upper]. Because the halves are sorted, these fitting right values are a subarray first[i:j]. With increasing left we must also increase right, meaning must we leave out first[i] if it's too small and and we must include first[j] if it's small enough.
Besides the counting, I also need to actually merge the halves for the sorting. I let sorted do that, which uses Timsort and takes linear time to recognize and merge the already sorted halves.
"""
class Solution(object):
def countRangeSum(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: int
"""
sums = [0]
for num in nums:
sums.append(sums[-1] + num)
return self.mergeSort(sums, 0, len(sums), lower, upper)
def mergeSort(self, sums, l, r, lower, upper):
if r - l <= 1:
return 0
mid = (l + r) / 2
count = self.mergeSort(sums, l, mid, lower, upper) + self.mergeSort(sums, mid, r, lower, upper)
i, j = mid, mid
for left in sums[l:mid]:
while i < r and sums[i] - left < lower:
i += 1
while j < r and sums[j] - left <= upper:
j += 1
count += j - i
sums[l:r] = sorted(sums[l:r])
return count |
998,851 | d9c3fea7d3586b07aa95904be48a8e6babf82c12 | """Syntax highlighting."""
from __future__ import annotations
import itertools
import logging
import time
import tkinter
from tkinter.font import Font
from typing import Any, Callable, Iterator
from pygments import styles, token
from pygments.lexer import Lexer, LexerMeta, RegexLexer
from pygments.lexers import MarkdownLexer
from porcupine import get_tab_manager, settings, tabs, textutils, utils
def _list_all_token_types(tokentype: Any) -> Iterator[Any]:
yield tokentype
for sub in map(_list_all_token_types, tokentype.subtypes):
yield from sub
all_token_tags = set(map(str, _list_all_token_types(token.Token)))
log = logging.getLogger(__name__)
ROOT_STATE_MARK_PREFIX = "highlight_root_"
root_mark_names = (ROOT_STATE_MARK_PREFIX + str(n) for n in itertools.count())
class Highlighter:
def __init__(self, text: tkinter.Text) -> None:
self.textwidget = text
self._lexer: Lexer | None = None
# the tags use fonts from here
self._fonts: dict[tuple[bool, bool], Font] = {}
for bold in (True, False):
for italic in (True, False):
# the fonts will be updated later, see below
self._fonts[(bold, italic)] = Font(
weight=("bold" if bold else "normal"), slant=("italic" if italic else "roman")
)
self.textwidget.bind("<<SettingChanged:font_family>>", self._font_changed, add=True)
self.textwidget.bind("<<SettingChanged:font_size>>", self._font_changed, add=True)
self.textwidget.bind("<<SettingChanged:pygments_style>>", self._style_changed, add=True)
self._font_changed()
self._style_changed()
def _font_changed(self, junk: object = None) -> None:
font_updates = dict(Font(name="TkFixedFont", exists=True).actual())
del font_updates["weight"] # ignore boldness
del font_updates["slant"] # ignore italicness
for (bold, italic), font in self._fonts.items():
# fonts don't have an update() method
for key, value in font_updates.items():
font[key] = value
def _style_changed(self, junk: object = None) -> None:
# http://pygments.org/docs/formatterdevelopment/#styles
# all styles seem to yield all token types when iterated over,
# so we should always end up with the same tags configured
style = styles.get_style_by_name(settings.get("pygments_style", str))
for tokentype, infodict in style:
# this doesn't use underline and border
# i don't like random underlines in my code and i don't know
# how to implement the border with tkinter
self.textwidget.tag_config(
str(tokentype),
font=self._fonts[(infodict["bold"], infodict["italic"])],
# empty string resets foreground
foreground=("" if infodict["color"] is None else "#" + infodict["color"]),
background=("" if infodict["bgcolor"] is None else "#" + infodict["bgcolor"]),
)
# make sure that the selection tag takes precedence over our
# token tag
self.textwidget.tag_lower(str(tokentype), "sel")
# yields marks backwards, from end to start
def _get_root_marks(self, start: str = "1.0", end: str = "end") -> Iterator[str]:
mark = None
while True:
# When stepping backwards, end seems to be excluded. We want to include it.
mark = self.textwidget.mark_previous(mark or f"{end} + 1 char")
if mark is None or self.textwidget.compare(mark, "<", start):
break
if mark.startswith(ROOT_STATE_MARK_PREFIX):
yield mark
def _index_is_marked(self, index: str) -> bool:
try:
next(self._get_root_marks(index, index))
except StopIteration:
return False
return True
def _detect_root_state(self, generator: Any, end_location: str) -> bool:
assert self._lexer is not None
# below code buggy for markdown
if isinstance(self._lexer, MarkdownLexer):
return False
# Only for subclasses of RegexLexer that don't override get_tokens_unprocessed
# TODO: support ExtendedRegexLexer's context thing
if type(self._lexer).get_tokens_unprocessed == RegexLexer.get_tokens_unprocessed:
# Use local variables inside the generator (ugly hack)
local_vars = generator.gi_frame.f_locals
# If new_state variable is not None, it will be used to change
# state after the yielding, and this is not a suitable place for
# restarting the highlighting later.
return (
local_vars["statestack"] == ["root"] and local_vars.get("new_state", None) is None
)
# Start of line (column zero) and not indentation or blank line
return end_location.endswith(".0") and bool(self.textwidget.get(end_location).strip())
def highlight_range(self, last_possible_start: str, first_possible_end: str = "end") -> None:
start_time = time.perf_counter()
assert self._lexer is not None
start = self.textwidget.index(next(self._get_root_marks(end=last_possible_start), "1.0"))
lineno, column = map(int, start.split("."))
end_of_view = self.textwidget.index("@0,10000")
if self.textwidget.compare(first_possible_end, ">", end_of_view):
first_possible_end = end_of_view
tag_locations: dict[str, list[str]] = {}
mark_locations = [start]
# The one time where tk's magic trailing newline is helpful! See #436.
generator = self._lexer.get_tokens_unprocessed(self.textwidget.get(start, "end"))
for position, tokentype, text in generator:
token_start = f"{lineno}.{column}"
newline_count = text.count("\n")
if newline_count != 0:
lineno += newline_count
column = len(text.rsplit("\n", 1)[-1])
else:
column += len(text)
token_end = f"{lineno}.{column}"
tag_locations.setdefault(str(tokentype), []).extend([token_start, token_end])
# We place marks where highlighting may begin.
# You can't start highlighting anywhere, such as inside a multiline string or comment.
# The tokenizer is at root state when tokenizing starts.
# So it has to be in root state for placing a mark.
if self._detect_root_state(generator, token_end):
if lineno >= int(mark_locations[-1].split(".")[0]) + 10:
mark_locations.append(token_end)
if self.textwidget.compare(
f"{lineno}.{column}", ">=", first_possible_end
) and self._index_is_marked(token_end):
break
if self.textwidget.compare(token_end, ">", end_of_view):
break
end = f"{lineno}.{column}"
for tag in all_token_tags:
self.textwidget.tag_remove(tag, start, end)
for tag, places in tag_locations.items():
self.textwidget.tag_add(tag, *places)
marks_to_unset = []
for mark in self._get_root_marks(start, end):
try:
mark_locations.remove(self.textwidget.index(mark))
except ValueError:
marks_to_unset.append(mark)
self.textwidget.mark_unset(*marks_to_unset)
for mark_index in mark_locations:
self.textwidget.mark_set(next(root_mark_names), mark_index)
mark_count = len(list(self._get_root_marks("1.0", "end")))
log.debug(
f"Highlighted between {start} and {end} in"
f" {round((time.perf_counter() - start_time)*1000)}ms. Root state marks:"
f" {len(marks_to_unset)} deleted, {len(mark_locations)} added, {mark_count} total"
)
def highlight_visible(self, junk: object = None) -> None:
self.highlight_range(self.textwidget.index("@0,0"))
def set_lexer(self, lexer: Lexer) -> None:
self.textwidget.mark_unset(*self._get_root_marks("1.0", "end"))
self._lexer = lexer
self.highlight_visible()
def on_change(self, event: utils.EventWithData) -> None:
change_list = event.data_class(textutils.Changes).change_list
if len(change_list) == 1:
[change] = change_list
if len(change.new_text) <= 1:
# Optimization for typical key strokes (but not for reloading entire file):
# only highlight the area that might have changed
self.highlight_range(f"{change.start[0]}.0", f"{change.end[0]}.0 lineend")
return
self.highlight_visible()
# When scrolling, don't highlight too often. Makes scrolling smoother.
def debounce(
any_widget: tkinter.Misc, function: Callable[[], None], ms_between_calls_min: int
) -> Callable[[], None]:
timeout_scheduled = False
running_requested = False
def timeout_callback() -> None:
nonlocal timeout_scheduled, running_requested
assert timeout_scheduled
if running_requested:
function()
any_widget.after(ms_between_calls_min, timeout_callback)
running_requested = False
else:
timeout_scheduled = False
def request_running() -> None:
nonlocal timeout_scheduled, running_requested
if timeout_scheduled:
running_requested = True
else:
assert not running_requested
function()
any_widget.after(ms_between_calls_min, timeout_callback)
timeout_scheduled = True
return request_running
def on_new_filetab(tab: tabs.FileTab) -> None:
# needed because pygments_lexer might change
def on_lexer_changed(junk: object = None) -> None:
highlighter.set_lexer(tab.settings.get("pygments_lexer", LexerMeta)())
highlighter = Highlighter(tab.textwidget)
tab.bind("<<TabSettingChanged:pygments_lexer>>", on_lexer_changed, add=True)
on_lexer_changed()
utils.bind_with_data(tab.textwidget, "<<ContentChanged>>", highlighter.on_change, add=True)
utils.add_scroll_command(
tab.textwidget, "yscrollcommand", debounce(tab, highlighter.highlight_visible, 100)
)
highlighter.highlight_visible()
def setup() -> None:
get_tab_manager().add_filetab_callback(on_new_filetab)
|
998,852 | 27b69fd17c93942c1287c6c7fa856fefd34df7b2 | from django.db import models
class Pessoas(models.Model):
nome = models.CharField(max_length=30)
sobrenome = models.CharField(max_length=30)
idade = models.IntegerField()
salario = models.DecimalField(max_digits=10, decimal_places=2)
bio = models.TextField()
foto = models.ImageField(upload_to='fotos_pessoas', null=True, blank=True)
def __str__(self):
return self.nome + ' ' + self.sobrenome
"""class Produto(models.Model):
descricao = models.CharField(max_length=100)
preco = models.DecimalField(max_digits=5, decimal_places=2)
def __str__(self):
return self.descricao
class Venda(models.Model):
numero = models.CharField(max_length=7)
valor = models.DecimalField(max_digits=5, decimal_places=2)
desconto = models.DecimalField(max_digits=5, decimal_places=2)
impostos = models.DecimalField(max_digits=5, decimal_places=2)
pessoa = models.ForeignKey(Pessoas, null=True, blank=True, on_delete=models.PROTECT)
produtos = models.ManyToManyField(Produto, blank=True)
def __str__(self):
return self.numero""" |
998,853 | 59c189fdea0a40f71699339bb7d006fb20f715c9 | def get_node_outputs(node_path):
"""
Get all node outputs and return it
"""
item = ix.get_item(node_path)
obj_array = ix.api.OfItemArray(1)
obj_array[0] = item
item_outputs = ix.api.OfItemVector()
ix.application.get_factory().get_items_outputs(obj_array, item_outputs, False)
node_outputs = []
for item_ in range(item_outputs.get_count()):
for i in range(item_outputs[item_].get_attribute_count()):
attr= item_outputs[item_].get_attribute(i)
if attr.get_texture():
if str(attr.get_texture()) == item.get_full_name():
#attrs[attr] = target_node.get_full_name()
node_outputs.append(attr)
return node_outputs
def get_node_inputs(node_path):
item = ix.get_item(node_path)
obj_array = ix.api.OfItemArray(1)
obj_array[0] = item
item_inputs = ix.api.OfItemVector()
node_inputs = []
for i in range(item.get_attribute_count()):
attr= item.get_attribute(i)
if attr.get_texture():
node_inputs.append(attr)
return node_inputs
print get_node_inputs("project://material_node")
print get_node_outputs("project://texture_node")
|
998,854 | dd0d0eae86003d6f0d9bbcc1d17fd8bc48e5af3a | import os
import re
import time
import sys
#save similarities or differences to a file
def saveFile(log):
file = open("similarLogs.txt","w")
for line in log:
file.write(line)
file.write("\n")
file.close()
return
#Open files and write them to a list
def openFile(Filename):
f = open(Filename,"r")
print "Opening log file...:" + Filename
log = []
noOfLines = 0
for line in f:
#if noOfLines == 10000:
# continue
#else:
if "root" and "INFO" and "Visiting" in line:
continue
elif "mechanize" and "INFO" and "JS retrieve" in line:
continue
else:
line = re.sub('(^\d*-\d*-\d*\s\d*:\d*:\d*,\s\w*,\s\w*,\s,\s)','',line)
log.append(line)
# noOfLines = noOfLines + 1
print "Log file opened."
f.close()
return log
start = time.time()
directory = os.path.join("/User/JAmes/Downloads","path")
f1=str(sys.argv[0])
f2=str(sys.argv[1])
log1 = []
log2 = []
log1 = openFile(f1)
log2 = openFile(f2)
logSimilar = []
log1difference = 0
noOfSimilarities = 0
print "Checking for similarities..."
for line in log1:
if line in log2:
logSimilar.append(line)
noOfSimilarities = noOfSimilarities + 1
print line
saveFile(logSimilar)
end = time.time()
timeTaken = end - start
print "Time elapsed: ", timeTaken |
998,855 | df56a5e7c77a303a85b69a757bc9dcb380792310 | """Math functions for calculator."""
def add(numbers):
"""Return the sum of the two inputs."""
sum1 = 0
for i in numbers:
sum1 = sum1+i
return sum1
def subtract(numbers):
"""Return the second number subtracted from the first."""
dif = numbers[0]
for i in numbers[1:]:
dif = dif - i
return dif
def multiply(numbers):
"""Multiply the two inputs together."""
prod = 1
for i in numbers:
prod = prod*i
return prod
def divide(numbers):
"""Divide the first input by the second, returning a floating point."""
quot = numbers[0]
for i in numbers[1:]:
quot = quot / i
return quot
def square(numbers):
"""Return the square of the input."""
# Needs only one argument
return numbers[0] ** 2
def cube(numbers):
"""Return the cube of the input."""
# Needs only one argument
return numbers[0] ** 3
def power(numbers):
"""Raise num1 to the power of num and return the value."""
result = numbers[0]
for i in numbers[1:]:
result = result**i
return result
def mod(numbers):
"""Return the remainder of num / num2."""
result = numbers[0]
for i in numbers[1:]:
result = result % i
return result
|
998,856 | 3537d78049e8c5a78c57b4e86a118ea59c717d99 | import media
import movie_trailer
dwaraka=media.Movie("Dwaraka","Making a thief change into a god man",
"http://www.telugumirchi.com/en/wp-content/uploads/2016/08/dwaraka-movie-poster.jpg",
"https://www.youtube.com/watch?v=DDWPxs8zTzg",
"srinivasa ravindra",
2017)
#print(toy_story.storyline)
katamraudu=media.Movie("Katamrayudu",
"It is Beautuful love song",
"http://thehansindia.com/assets/7574_2535_pspk_katamarayudu_new_poster.jpg",
"https://www.youtube.com/watch?v=zggQw2wb60M",
"kishore kumar pardasani",
2017)
sailajareddy_alludu=media.Movie("sailajareddyalludu",
"stale story of egos",
"http://www.atlantawishesh.com/media/k2/galleries/67589/Sailaja-Reddy-Alludu-08.jpg",
"https://www.youtube.com/watch?v=4djA8T6K4iQ",
"maruti",
2018)
marakata_mani=media.Movie("Marakatamani",
"A film about ancient diamond",
"https://4.bp.blogspot.com/-SCcfrZULKfQ/WUPqfJeXl_I/AAAAAAAEvUw/5J-NZdPKJfI7SYRHFNqzmgQ0Uo1E_t6mACLcBGAs/s1600/marakathamani%2B....jpg",
"https://www.youtube.com/watch?v=_e3IMX7JIIc",
"A.R.K.Sarvanan",
2017)
rx_100=media.Movie("Rx100",
"RX 100 is a story of love, lust and passion marred by excessive and unnecessary gore",
"https://www.filmibeat.com/ph-big/2018/07/lip-lock-scenes-from-rx-100_153128190270.jpg",
"https://www.youtube.com/watch?v=pffgnV5meN4",
"Ajay Bhupathi",
2018)
sivam=media.Movie("Sivam",
"Dragged out love story by Ram Pothineni",
"https://www.filmibeat.com/ph-big/2015/10/shivam_144384768660.jpg",
"https://www.youtube.com/watch?v=QydZny84h3E",
"Ravi Kishore",
2015)
angel=media.Movie("Angel",
"The story revolves around two guys who make use of different ways to earn money and enjoy life,but their life changes when they accidentally get a statue, which is an angel from heaven.",
"http://www.idlebrain.com/images5/poster-angel6.jpg",
"https://www.youtube.com/watch?v=pHlnKdHiZ2Y",
"Palani",
2017)
chalo=media.Movie("Chalo",
"Chalo” is Naga Shaurya latest love story and Kannada heroine Rashmika debut film in Telugu",
"https://www.apherald.com/ImageStore/images/movies/movies-wallpapers/Hero-Naga-Shourya-Chalo-Telugu-Movie-Latest-Stills14.jpg",
"https://www.youtube.com/watch?v=6_BxEjvWsqs",
"Venky Kudumula",
2018)
#print(katamraudu.storyline)
#katamraudu.show_trailer()
movies=[dwaraka,katamraudu,sailajareddy_alludu,marakata_mani,rx_100,sivam,angel,chalo]
movie_trailer.open_movies_page(movies)
#print(media.Movie.VALID_RATINGS)
print(media.Movie.__doc__)
|
998,857 | 541a47c6c88a97c3f3e81cc0e359c4f50f822772 | #-*- coding:utf-8 -*-
'''
绘制L-System曲线
根据设定的迭代规则(字符串替换),得到曲线字符串
然后逐步绘制
'''
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.animation as animation
'''
曲线规则:
start 开始迭代的字符串
reps 替换规则
level 迭代次数
rotate 旋转角度
actions 动作规则
left 左旋
right 右旋
forward 前进
[ 当前点压栈
] 出栈
'''
'''
# gosper
rules = {
"start": "A",
"reps": {"A": "A-B--B+A++AA+B-", "B": "+A-BB--B-A++A+B"},
"level": 4,
"rotate": np.pi / 3,
"actions": {"+": "left", "-": "right", "A": "forward", "B": "forward"}
}
'''
'''
# Sierpinski arrowhead
rules = {
"start": "A",
"reps": {"A": "B-A-B", "B": "A+B+A"},
"level": 8,
"rotate": np.pi / 3,
"actions": {"+": "left", "-": "right", "A": "forward", "B": "forward"}
}
'''
'''
# Hilbert
rules = {
"start": "A",
"reps": {"A": "-BF+AFA+FB-", "B": "+AF-BFB-FA+"},
"level": 5,
"rotate": np.pi / 2,
"actions": {"+": "left", "-": "right", "F": "forward"}
}
'''
'''
# Dragon
rules = {
"start": "FY",
"reps": {"X": "X+YF+", "Y": "-FX-Y"},
"level": 15,
"rotate": np.pi / 2,
"actions": {"+": "left", "-": "right", "F": "forward"}
}
'''
'''
rules = {
"start": "F-F-F-F-F",
"reps": {"F": "F-F++F+F-F-F"},
"level": 3,
"rotate": 72 * np.pi / 180,
"actions": {"+": "left", "-": "right", "F": "forward"}
}
'''
'''
rules = {
"start": "F",
"reps": {"F": "FF[-F++F][+F--F]++F--F"},
"level": 4,
"rotate": 25 * np.pi / 180,
"actions": {"+": "left", "-": "right", "F": "forward", "X" : "forward", "[" : "push", "]" : "pop"}
}
'''
'''
rules = {
"start": "F",
"reps": {"F": "+F[-F-X+]++F[+F-X-F]-X-F", "X" : "X-F+X"},
"level": 4,
"rotate": 15 * np.pi / 180,
"actions": {"+": "left", "-": "right", "F": "forward", "X" : "forward", "[" : "push", "]" : "pop"}
}
'''
'''
# sier_qua
rules = {
"start": "L--F--L--F",
"reps": {"L": "+R-F-R+", "R" : "-L+F+L-"},
"level": 10,
"rotate": 45 * np.pi / 180,
"actions": {"+": "left", "-": "right", "F": "forward", "[" : "push", "]" : "pop"}
}
'''
'''
# 36-72-72
rules = {
"start": "Q",
"reps": {"F" : "",
"P" : "--FR++++FS--FU",
"Q" : "FT++FR----FS++",
"R" : "++FP----FQ++FT",
"S" : "FU--FP++++FQ--",
"T" : "+FU--FP+",
"U" : "-FQ++FT-"},
"level": 7,
"rotate": 36 * np.pi / 180,
"actions": {"+": "left", "-": "right", "F": "forward", "[" : "push", "]" : "pop"}
}
'''
'''
# koch
rules = {
"start": "F--F--F",
"reps": {"F" : "F+F--F+F"},
"level": 8,
"rotate": 60 * np.pi / 180,
"actions": {"+": "left", "-": "right", "F": "forward", "[" : "push", "]" : "pop"}
}
'''
# 32段岛屿
rules = {
"start": "A-A-A-A",
"reps": {"A" : "-A+A-A-A+A+AA-A+A+AA+A-A-AA+AA-AA+A+A-AA-A-A+AA-A-A+A+A-A+"},
"level": 2,
"rotate": 90 * np.pi / 180,
"actions": {"+": "left", "-": "right", "A": "forward", "[" : "push", "]" : "pop"}
}
# 解析字符串
def parse_string(level):
'''
根据迭代次数进行替换字符串
level为0的话则返回start
'''
if level == 0:
return rules["start"]
sub_str = parse_string(level - 1) # 递归替换
string = ""
for c in sub_str:
try:
string += rules["reps"][c] # 如果字符c在reps规则里面,则替换
except KeyError:
string += c
return string
def generate_points(start_points, start_angle, string):
'''
根据字符串生成一系列点
start_points 初始点
start_angle 初始角度
string 字符串规则
'''
points = [p for p in start_points]
point = start_points[-1]
angle = start_angle
stack = [] # 栈
for c in string:
if c == '+' or c == '-':
try:
if rules['actions'][c] == "left":
angle += rules['rotate'] # 左旋
elif rules['actions'][c] == "right":
angle -= rules['rotate'] # 右旋
except KeyError:
pass
elif c == '[' or c == ']':
try:
if rules['actions'][c] == 'push':
stack.append((point, angle)) # 入栈
elif rules['actions'][c] == 'pop':
point, angle = stack[-1] # 出栈
stack = stack[0:-1]
except KeyError:
pass
else:
try:
if rules['actions'][c] == 'forward':
x = point[0]
y = point[1]
points.append([x + np.cos(angle), y + np.sin(angle)]) # 前进一步
point = points[-1]
except KeyError:
pass
return points
def main():
string = parse_string(level = rules["level"])
points = generate_points(start_points = [[0.0, 0.0]], start_angle = 0.0, string = string)
x_list = [p[0] for p in points]
y_list = [p[1] for p in points]
plt.figure(figsize = (8.0, 8.0))
plt.plot(x_list, y_list)
plt.show()
main() |
998,858 | 37df11ac0354d68d9546f1e63d33bf829bf3bbc8 | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--interactive", action="store_true")
args = parser.parse_args()
import trajoptpy
import openravepy as rave
import numpy as np
import json
from trajoptpy.check_traj import traj_is_safe
if rave.__version__ < "0.9":
raise Exception("this example only works with openrave >= 0.9 due to recent jacobian bugfix")
def position_base_request(robot, link_name, xyz_targ, quat_targ):
request = {
# BEGIN basic_info
"basic_info" : {
"n_steps" : 1,
"manip" : "active",
"start_fixed" : False
},
# END basic_info
"costs" : [
{
"type" : "collision",
"params" : {"coeffs" : [10],"dist_pen" : [0.025]}
}
],
"constraints" : [
{
"type" : "pose",
"name" : "final_pose",
"params" : {
"pos_coeffs" : [1,1,1],
"rot_coeffs" : [1,1,1],
"xyz" : list(xyz_targ),
"wxyz" : list(quat_targ),
"link" : link_name,
},
}
],
"init_info" : {
}
}
# BEGIN random_init
# randomly select joint values with uniform distribution over joint limits
lower,upper = robot.GetActiveDOFLimits()
lower = np.clip(lower, -np.pi, np.pi) # continuous joints have huge ranges, so we clip them to [-pi, pi]
upper = np.clip(upper, -np.pi, np.pi) # to avoid poor numerical conditioning
rands = np.random.rand(len(lower))
dofvals_init = lower*rands + upper*(1-rands)
# we'll treat the base pose specially, choosing a random angle and then setting a reasonable
# position based on this angle
angle_init = np.random.rand() * 2*np.pi
x_init = xyz_targ[0] - .5*np.cos(angle_init)
y_init = xyz_targ[1] - .5*np.sin(angle_init)
dofvals_init[-3:] = [x_init, y_init, angle_init]
# END random_init
request["init_info"]["type"] = "given_traj"
request["init_info"]["data"] = [dofvals_init.tolist()]
return request
def check_result(result, robot):
print "checking trajectory for safety and constraint satisfaction..."
success = True
if not traj_is_safe(result.GetTraj(), robot):
success = False
print "trajectory has a collision!"
abstol = 1e-3
for (name, val) in result.GetConstraints():
if (val > abstol):
success = False
print "constraint %s wasn't satisfied (%.2e > %.2e)"%(name, val, abstol)
return success
### Parameters ###
ENV_FILE = "data/pr2test1.env.xml"
XYZ_TARGET = [.5,0,.9]
QUAT_TARGET = [1,0,0,0]
LINK_NAME = "r_gripper_tool_frame"
##################
### Env setup ####
env = rave.Environment()
env.StopSimulation()
env.Load(ENV_FILE)
robot = env.GetRobots()[0]
robot.SetDOFValues([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
robot.SetTransform(rave.matrixFromPose([1, 0, 0, 0, -3.4, -1.4, 0.05]))
# BEGIN set_active
robot.SetActiveDOFs(np.r_[robot.GetManipulator("rightarm").GetArmIndices(),
robot.GetJoint("torso_lift_joint").GetDOFIndex()],
rave.DOFAffine.X + rave.DOFAffine.Y + rave.DOFAffine.RotationAxis, [0,0,1])
# END set_active
##################
success = False
for i_try in xrange(100):
request = position_base_request(robot, LINK_NAME, XYZ_TARGET, QUAT_TARGET)
s = json.dumps(request)
trajoptpy.SetInteractive(args.interactive)
prob = trajoptpy.ConstructProblem(s, env)
result = trajoptpy.OptimizeProblem(prob)
if check_result(result, robot):
success = True
break
if success:
print "succeeded on try %i"%(i_try)
print result
else:
print "failed to find a valid solution :("
|
998,859 | fa1f6c3a88350960be3d56ebdf312ae3b4bda20a | import numpy as np
# import matplotlib.pyplot as plt
from scipy import interpolate
class Extrapolator:
"""Extrapolate and interpolate a sparse vector field into a dense one"""
def __init__(self):
return
# @staticmethod
# def plot_vector_field(x, y, step=1, scale=1, show_plot=True):
# assert x.shape == y.shape
# x_loc = np.arange(0, x.shape[1])
# y_loc = np.arange(0, x.shape[0])
# plt.quiver(x_loc[::step], y_loc[::step], x[::step, ::step], y[::step, ::step], angles='xy', scale_units='xy', scale=1*scale)
# if show_plot:
# plt.show()
def extrapolate(self, x, y, z1, z2, out_size):
# Given two sparse functions f1(x, y) = z1 and f2(x, y) = z2, calculate function values z1_out and z2_out on
# each point of a regular grid with dimensions out_size
# add zero-length vectors to boundary (x=0 or x=width-1, y=0 or y=height-1) in the sparse field
# top edge
x_padded = np.concatenate((x, np.arange(0, out_size[1])))
y_padded = np.concatenate((y, np.zeros(out_size[1], dtype=int)))
z1_padded = np.concatenate((z1, np.zeros(out_size[1], dtype=float)))
z2_padded = np.concatenate((z2, np.zeros(out_size[1], dtype=float)))
# left edge
x_padded = np.concatenate((x_padded, np.zeros(out_size[0]-1, dtype=int)))
y_padded = np.concatenate((y_padded, np.arange(1, out_size[0])))
z1_padded = np.concatenate((z1_padded, np.zeros(out_size[0]-1, dtype=float)))
z2_padded = np.concatenate((z2_padded, np.zeros(out_size[0]-1, dtype=float)))
# right edge
x_padded = np.concatenate((x_padded, np.full(out_size[0]-1, out_size[1]-1)))
y_padded = np.concatenate((y_padded, np.arange(1, out_size[0])))
z1_padded = np.concatenate((z1_padded, np.zeros(out_size[0]-1, dtype=float)))
z2_padded = np.concatenate((z2_padded, np.zeros(out_size[0]-1, dtype=float)))
# bottom edge
x_padded = np.concatenate((x_padded, np.arange(1, out_size[1]-1)))
y_padded = np.concatenate((y_padded, np.full(out_size[1]-2, out_size[0]-1)))
z1_padded = np.concatenate((z1_padded, np.zeros(out_size[1]-2, dtype=float)))
z2_padded = np.concatenate((z2_padded, np.zeros(out_size[1]-2, dtype=float)))
# interpolate all points using griddata
points = np.column_stack((y_padded, x_padded))
grid_y, grid_x = np.mgrid[0:out_size[0], 0:out_size[1]]
z1_out = interpolate.griddata(points, z1_padded, (grid_y, grid_x), method="linear")
z2_out = interpolate.griddata(points, z2_padded, (grid_y, grid_x), method="linear")
return z1_out, z2_out
# if __name__ == '__main__':
# img_size = [100, 200]
# e = Extrapolator()
# # Generate a random sparse vector field
# n_samples = 60
# max_val = 7
# row = np.random.randint(img_size[0], size=[n_samples])
# col = np.random.randint(img_size[1], size=[n_samples])
# data_x = np.random.rand(n_samples) * max_val - (max_val / 2)
# data_y = np.random.rand(n_samples) * max_val - (max_val / 2)
# # Plot the sparse vector field
# x_orig = np.full(img_size, np.nan)
# y_orig = np.full(img_size, np.nan)
# x_orig[row, col] = data_x
# y_orig[row, col] = data_y
# fig = plt.figure(figsize=(20, 10))
# plt.subplot(1, 2, 1)
# plt.title('Sparse vector field')
# e.plot_vector_field(x_orig, y_orig, show_plot=False)
# # Generate a dense vector field from the sparse one
# x, y = e.extrapolate(col, row, data_x, data_y, img_size)
# # Plot the dense vector field
# plt.subplot(1, 2, 2)
# plt.title('Dense vector field')
# e.plot_vector_field(x, y, step=5)
# # You might want to add more test cases with non-random data, to make sure everything works as expected
|
998,860 | 0eb77a35566f7c229e336e93e22cab2a2bba5bed | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 09:43:11 2020
@author: Utilisateur
"""
from autograd import grad
from lik_functions import ord_loglik_j, binom_loglik_j, categ_loglik_j
###########################################################################
# Binary/count gradient
###########################################################################
def bin_grad_j(lambda_bin_j, y_bin_j, zM, k, ps_y, p_z_ys, nj_bin_j):
''' Compute the gradient of the expected log-likelihood for each binomial variable y_j
lambda_bin_j ( (r + 1) 1darray): Coefficients of the binomial distributions in the GLLVM layer
y_bin_j (numobs 1darray): The subset containing only the binary/count variables in the dataset
zM (M x r x k ndarray): M Monte Carlo copies of z for each component k1 of the mixture
k (int): The number of components of the mixture
ps_y (numobs x k ndarray): p(s_i = k1 | y_i) for all k1 in [1,k] and i in [1,numobs]
p_z_ys (M x numobs x k ndarray): p(z_i | y_i, s_i = k) for all m in [1,M], k1 in [1,k] and i in [1,numobs]
nj_bin (int): The number of possible values/maximum values of the jth binary/count variable
--------------------------------------------------------------
returns (float): grad_j(E_{zM, s | y, theta}(y_bin_j | zM, s1 = k1))
'''
grad_bin_lik = grad(binom_loglik_j)
return grad_bin_lik(lambda_bin_j, y_bin_j, zM, k, ps_y, p_z_ys, nj_bin_j)
###########################################################################
# Ordinal gradient
###########################################################################
def ord_grad_j(lambda_ord_j, y_oh, zM, k, ps_y, p_z_ys, nj_ord_j):
''' Compute the gradient of the expected log-likelihood for each ordinal variable y_j
lambda_ord_j ( (nj_ord_j + r - 1) 1darray): Coefficients of the ordinal distributions in the GLLVM layer
y_oh_j (numobs 1darray): The subset containing only the ordinal variables in the dataset
zM (M x r x k ndarray): M Monte Carlo copies of z for each component k1 of the mixture
k (int): The number of components of the mixture
ps_y (numobs x k ndarray): p(s_i = k1 | y_i) for all k1 in [1,k] and i in [1,numobs]
p_z_ys (M x numobs x k ndarray): p(z_i | y_i, s_i = k) for all m in [1,M], k1 in [1,k] and i in [1,numobs]
nj_ord_j (int): The number of possible values of the jth ordinal variable
--------------------------------------------------------------
returns (float): grad_j(E_{zM, s | y, theta}(y_ord_j | zM, s1 = k1))
'''
grad_ord_lik = grad(ord_loglik_j)
return grad_ord_lik(lambda_ord_j, y_oh, zM, k, ps_y, p_z_ys, nj_ord_j)
###########################################################################
# Categorical gradient
###########################################################################
def categ_grad_j(lambda_categ_j, y_categ_j, zM, k, ps_y, p_z_ys, nj_categ_j):
''' Compute the gradient of the expected log-likelihood for each categorical variable y_j
lambda_categ_j (nj_categ x (r + 1) ndarray): Coefficients of the categorical distributions in the GLLVM layer
y_categ_j (numobs 1darray): The jth categorical variable in the dataset
zM (M x r x k ndarray): M Monte Carlo copies of z for each component k1 of the mixture
k (int): The number of components of the mixture
ps_y (numobs x k ndarray): p(s_i = k1 | y_i) for all k1 in [1,k] and i in [1,numobs]
p_z_ys (M x numobs x k ndarray): p(z_i | y_i, s_i = k) for all m in [1,M], k1 in [1,k] and i in [1,numobs]
nj_categ_j (int): The number of possible values values of the jth categorical variable
--------------------------------------------------------------
returns (float): grad_j(E_{zM, s | y, theta}(y_categ_j | zM, s1 = k1))
'''
grad_categ_lik = grad(categ_loglik_j)
return grad_categ_lik(lambda_categ_j, y_categ_j, zM, k, ps_y, p_z_ys, nj_categ_j)
|
998,861 | 16759af55432f707454cfa9990846ca07325f7f9 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
featurizer for lite gem
"""
import numpy as np
import paddle
import pgl
from pahelix.utils.compound_tools import new_smiles_to_graph_data
__all__ = [
'LiteGEMTransformFn',
'LiteGEMCollateFn',
]
class LiteGEMTransformFn(object):
"""tbd"""
def __init__(self, config):
self.config = config
def __call__(self, raw_data):
"""
Gen features according to raw data and return a single graph data.
Args:
raw_data: It contains smiles and label,we convert smiles to mol
by rdkit,then convert mol to graph data.
Returns:
data: It contains reshape label and smiles.
"""
smiles = raw_data['smiles']
label = np.array([0]) if 'label' not in raw_data else raw_data['label']
feature_dict = new_smiles_to_graph_data(smiles)
if feature_dict is None:
return None
feature_dict["label"] = label
new_graph = {}
new_graph["num_nodes"] = len(feature_dict['atomic_num'])
new_graph["nfeat"] = {key: feature_dict[key] for key in self.config.atom_names + self.config.atom_float_names}
new_graph["efeat"] = {key: feature_dict[key] for key in self.config.bond_names}
new_graph["edges"] = feature_dict['edges']
new_graph["label"] = feature_dict['label'] if "label" in feature_dict else None
return new_graph
class LiteGEMCollateFn(object):
"""CollateFn for attribute mask model of pretrain gnns"""
def __init__(self):
pass
def __call__(self, batch):
"""
"""
graph_list = []
labels = []
smiles_list = []
#for gdata in batch_data:
for admet_d_item in batch:
gdata = admet_d_item.get_feature()
g = pgl.Graph(edges=gdata['edges'],
num_nodes=gdata['num_nodes'],
node_feat=gdata['nfeat'],
edge_feat=gdata['efeat'])
graph_list.append(g)
labels.append(gdata['label'])
smiles_list.append(gdata['smiles'])
labels = paddle.to_tensor(np.array(labels, dtype="float32"))
g = pgl.Graph.batch(graph_list).tensor()
return {'graph': g, 'labels': labels}
|
998,862 | ae3fcd0efb4a137c07b3df9993b41639d6490d24 | # -*- coding: utf-8 -*-
#############################################################################
# #
# EIDEAnalog library (excerpt). #
# #
# Librería EIDEAnalog (extracto). Anexo III. Uso conjunto con EIDEGraphics #
# Ver Anexo III en EIDEAnalog_ASI_SE_HIZO.pdf #
# para más información (https://github.com/Clave-EIDEAnalog/DOCS) #
# #
# Copyright (c) 2020. Clave Ingenieros S.L.; #
# vicente.fombellida@claveingenieros.es #
# #
#############################################################################
import EIDEAnalog
# Import and instance EIDEGraphics
import facade as EGClient
EGUser = EGClient.EIDE.EIDEGraphics(1) # 1 sec. interval
instancedSensors = []
currentValues = [0, 0, 0, 0]
# Instance buses and sensors. Add them to list.
myBus = EIDEAnalog.ADS1115(0x48)
NTC_china = EIDEAnalog.sensor('ADS1115_NTC', myBus, 1, name='NTC_placa')
T_Exterior = EIDEAnalog.sensor('LM35', myBus, 2, name='TE_LM35')
instancedSensors.append(NTC_china)
instancedSensors.append(T_Exterior)
myOtherBus = EIDEAnalog.oneWire(19)
DS18B20_1 = EIDEAnalog.sensor('DS18B20', myOtherBus, 1, name='oW1')
DS18B20_2 = EIDEAnalog.sensor('DS18B20', myOtherBus, 2, name='oW2')
instancedSensors.append(DS18B20_1)
instancedSensors.append(DS18B20_2)
while True:
for counter, i in enumerate(instancedSensors):
i.sensorBus.setChannelGain(i)
i.sensorBus.singleShot()
while not(i.sensorBus.ready()):
pass
currentValues[counter] = i.readout(i.sensorBus.readConversion())
EGUser.EIDEGLoop(currentValues)
EIDEAnalog.quit()
|
998,863 | 956c9cc5abd5b77f8e532b584f17d0375c65be29 | '''
@Author: your name
@Date: 2020-06-08 15:28:39
@LastEditTime: 2020-06-08 18:52:10
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /Cracking_the_Code_Interview/Leetcode/String/28.Implement_strStr.py
'''
# Implement strStr().
# Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
''' Example 1:
Input: haystack = "hello", needle = "ll"
Output: 2
Example 2:
Input: haystack = "aaaaa", needle = "bba"
Output: -1
'''
class Solution:
def strStr(self, haystack, needle):
i = j = 0
if len(needle) == 0:
return 0
while i < len(haystack):
if i < len(haystack) and haystack[i] == needle[j]:
j += 1
else:
i -= j
j = 0
i += 1
if len(needle) == j:
return i - j
return -1
# ----------------------------------------------------------------
class Solution:
def strStr(self, haystack, needle):
if not needle:
return 0
for i in range(len(haystack)):
if haystack[i:i+len(needle)] == needle:
return i
return -1
# -----------------------------------------------------------------
class Solution:
def strStr(self, haystack, needle) :
if len(needle) == 0 :
return 0
return haystack.find(needle) |
998,864 | a2fac703505da20c75337ad76565a3c53f8d6f77 | __author__ = 'Lunzhy'
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import font_manager
from Submissions.IWCE2014 import *
## 5000, 10000, 15000 points
possoin_time = [515, 1367, 2025]
dd_time = [662, 2427, 4785]
total_time = [1294, 4030, 7151]
others_time = [total_time[i] - possoin_time[i] - dd_time[i] for i in range(0, 3)]
ind = np.arange(0.2, 1.6, 0.5)
width = 0.3
fig = plt.figure()
ax = fig.add_axes([0.13, 0.13, 0.8, 0.8])
ax.set_xlim(0, 1.7)
b1 = ax.bar(ind, others_time, width, color='r', linewidth=0)
b2 = ax.bar(ind, possoin_time, width, bottom=others_time, color=r'#E6DACE', linewidth=0)
b3 = ax.bar(ind, dd_time, width, bottom=[others_time[i] + possoin_time[i] for i in range(0, 3)], color='b', linewidth=0)
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(['5000', '10000', '15000'])
legend = ax.legend(['Others', 'Poisson', 'Transport'], loc='upper left', prop={'size': 32})
ax.set_xlabel('Number of Vertices (#)')
ax.set_ylabel('Excution Time Elapsed (s)')
ax.set_yticks([0, 2000, 4000, 6000, 8000])
ax.set_yticklabels(['0', '2k', '4k', '6k', '8k'])
### boders
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(2)
### legend
legend_font = font_manager.FontProperties(family='times new roman', style='normal',
size=26, weight='normal', stretch='normal')
for item in (legend.get_texts()):
item.set_fontproperties(legend_font)
legend.set_frame_on(False)
### axis label
ticks_font = font_manager.FontProperties(family='times new roman', style='normal',
size=24, weight='normal', stretch='normal')
labels_font = font_manager.FontProperties(family='times new roman', style='normal',
size=26, weight='normal', stretch='normal')
for item in ([ax.xaxis.label, ax.yaxis.label] ):
item.set_fontproperties(labels_font)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontproperties(ticks_font)
### axis tick
ax.xaxis.set_tick_params(which='major', width=2, size=0)
ax.yaxis.set_tick_params(which='major', width=2, size=0)
# plt.show()
fig_path = os.path.join(Save_Fig_Folder, 'timing')
fig.savefig(fig_path, dpi=1020, bbox_inches='tight', pad_inches=0.1)
|
998,865 | 5f903f2764f8a6e68d02eff1b0677e1fc87fad9b | import maya.cmds as mc
def getSGfromShader(shader=None):
if shader:
if mc.objExists(shader):
sgq = mc.listConnections(shader, d=True, et=True, t='shadingEngine')
if sgq:
return sgq[0]
return None
def assignObjectListToShader(objList=None, shader=None):
"""
Assign the shader to the object list
arguments:
objList: list of objects or faces
"""
# assign selection to the shader
shaderSG = getSGfromShader(shader)
if objList:
if shaderSG:
mc.sets(objList, e=True, forceElement=shaderSG)
else:
print 'The provided shader didn\'t returned a shaderSG'
else:
print 'Please select one or more objects'
def assignSelectionToShader(shader=None):
sel = mc.ls(sl=True, l=True)
if sel:
assignObjectListToShader(sel, shader)
assignSelectionToShader('lambert2') |
998,866 | 1c77d11406255b45dcee25fd294b13cdca97c20d | import os
import logging
import requests
from weather import get_weather, get_radar
from telegram.ext import Updater, CommandHandler
logging.basicConfig(
filename="log.txt",
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
token = os.environ["telegram_bot_token"]
def dog(bot, update):
contents = requests.get("https://random.dog/woof.json").json()
url = contents["url"]
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def jolie(bot, update):
chat_id = update.message.chat_id
with open("./media/jolie.webp", "rb") as photo:
bot.send_photo(chat_id=chat_id, photo=photo)
def weather(bot, update):
chat_id = update.message.chat_id
summary, tmax, tmin = get_weather()
message = f"""Weather Summary for today: {summary},
Max: {tmax},
Min: {tmin}
"""
bot.send_message(chat_id=chat_id, text=message)
def radar(bot, update):
chat_id = update.message.chat_id
fn = get_radar()
with open(fn, "rb") as photo:
bot.send_photo(chat_id=chat_id, photo=photo)
def main():
updater = Updater(token)
dp = updater.dispatcher
dp.add_handler(CommandHandler("dog", dog))
dp.add_handler(CommandHandler("jolie", jolie))
dp.add_handler(CommandHandler("weather", weather))
dp.add_handler(CommandHandler("buien", radar))
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
|
998,867 | 3b2a4730cb306d91e8f1cedc25bd176465ba30b6 | monday_temperatures = [9.1, 8.8, 7.5]
print(monday_temperatures)
monday_temperatures.append(8.1)
print(monday_temperatures)
monday_temperatures.clear()
print(monday_temperatures)
monday_temperatures = [9.1, 8.8, 7.5]
print(monday_temperatures)
print(monday_temperatures.index(8.8))
# print(monday_temperatures.index(8.8, 2))
print(monday_temperatures.__getitem__(1))
print(monday_temperatures[1])
|
998,868 | 5646606518b4cb0fbc600d5a3c6ffdab348a946c | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 09:49:17 2020
@author: Claudio Collado
"""
#Ejercicio 4.13
import random
import numpy as np
def error():
error = random.normalvariate(0,0.2)
return error
N = 999
temp = np.array([float(37.5 + error()) for i in range(N)])
np.save('Temperaturas',temp)
print(temp)
#Maximo
maximo = temp.max()
#Minimo
minimo = temp.min()
#Promedio
promedio = temp.mean()
#Mediana
mediana = np.median(temp)
#Imprimo los resultados
print(f'El Maximo de temperatura es {maximo:.4f}.')
print(f'El Minimo de temperatura es {minimo:.4f}.')
print(f'El Promedio de temperatura es {promedio:.4f}.')
print(f'La mediana de temperatura es {mediana:.4f}.')
#Extra
primer_cuartil = np.quantile(temp,0.25)
segundo_cuartil = np.quantile(temp,0.5)
tercer_cuartil = np.quantile(temp,0.75)
print(f'El 1° cuartil de temperatura es {primer_cuartil:.4f}.')
print(f'El 2° cuartil de temperatura es {segundo_cuartil:.4f}.')
print(f'El 3° cuartil de temperatura es {tercer_cuartil:.4f}.') |
998,869 | c4d5d83231a4f67e44943d2fcd756aa8a8e2e27a | import pickle
words = open("garbled_email_dictionary.txt").read().split("\n")
"""
wdict = [defaultdict(lambda:[]) for i in xrange(11)]
for w in words:
print w
for i,c in enumerate(w):
letters = list(w)
letters[i] = "*"
wdict[len(w)]["".join(letters)].append(w)
plainwdict = map(dict,wdict)
"""
wdict = defaultdict(list)
for w in words:
print w
for i,c in enumerate(w[:5]):
letters = list(w[:5])
letters[i] = "*"
wdict["".join(letters)].append(w)
ddwdict = wdict
plainwdict = dict(wdict)
pickle.dump(plainwdict,open("wdict.pkl","w"))
|
998,870 | 405fe9b05ffd7712c89d75fed6b34f21337da4aa | import gzip
import json
with gzip.open('./models/dataset/Digital_Music_5.json.gz', 'rb') as g:
data = {}
i = 0
fiveStar = 0
fourStar = 0
threeStar = 0
twoStar = 0
oneStar = 0
items = set()
users = set()
for line in g:
line = json.loads(line)
keys = ['reviewerID', 'asin', 'overall', 'unixReviewTime']
line['overall'] = int(line['overall'])
l = {k: line[k] for k in keys}
data[i] = l
# Find the number of unique users
users.add(data[i]["reviewerID"])
# Find the number of unique items(asin)
items.add(data[i]["asin"])
# Find the number of stars
if data[i]["overall"] == 5:
fiveStar += 1
if data[i]["overall"] == 4:
fourStar += 1
if data[i]["overall"] == 3:
threeStar += 1
if data[i]["overall"] == 2:
twoStar += 1
if data[i]["overall"] == 1:
oneStar += 1
i += 1
print("Total length of data %d" %len(data))
print("Total five star ratings %d" %fiveStar)
print("Total four star ratings %d" %fourStar)
print("Total three star ratings %d" %threeStar)
print("Total two star ratings %d" %twoStar)
print("Total one star ratings %d" %oneStar)
print("Total number of unique users %d" %len(users))
print("Total number of unique items %d" %len(items))
|
998,871 | 17b44f215f7d2903d2469e378027b5532e6e650b | from django.db import models as db
from django.utils.importlib import import_module
from django.contrib.contenttypes.fields import GenericRelation
# relation constants
FOREIGN_KEY = 30
MANY_TO_MANY = 31
ONE_TO_ONE = 32
GENERIC = 33
# relations choices for type select box
RELATION_CHOICES = (
(FOREIGN_KEY, 'Foreign Key'),
(MANY_TO_MANY, 'Many to Many'),
(ONE_TO_ONE, 'One to One'),
(GENERIC, 'Generic Relation')
)
# forward lookup to associate relation constant to django field type
RELATED_FIELD_MAP = {
FOREIGN_KEY: db.ForeignKey,
MANY_TO_MANY: db.ManyToManyField,
ONE_TO_ONE: db.OneToOneField,
GENERIC: GenericRelation
}
# reverse lookup django relation type -> built-in relation constant for data wrappers to create virtual fields
REVERSE_RELATION_MAP = dict(zip(RELATED_FIELD_MAP.values(), RELATED_FIELD_MAP.keys()))
class FieldRegistryData(db.Model):
"""
Data store to save ids for custom fields so that they always get the same id
"""
name = db.CharField(max_length=64)
module = db.CharField(max_length=128)
classname = db.CharField(max_length=64)
accessor = db.CharField(max_length=32)
class FieldRegistry(object):
"""
Registry for dynamic field types. Generates choices fields for field types and generates mappings between
django fields and dynamic fields.
"""
def __init__(self):
super(FieldRegistry, self).__init__()
self._db_field_map = {}
def __iter__(self):
return iter(self.field_choices)
@property
def field_map(self):
return {field.id: field.accessor for field in FieldRegistryData.objects.all()}
@property
def field_choices(self):
try:
return [(field.id, field.name) for field in FieldRegistryData.objects.all()]
except:
return []
def get_type(self, type):
return FieldRegistryData.objects.get(name=type)
def field_type(self, type):
if type not in self._db_field_map:
field_data = FieldRegistryData.objects.get(id=type)
try:
module = import_module(field_data.module)
except ImportError:
module = import_module('apps.{0}'.format(field_data.module))
field_type = getattr(module, field_data.classname)
#cache field types since it's an expensive op
self._db_field_map[type] = field_type
return self._db_field_map[type]
def register(self, name, path):
parts = path.split('.')
module_path = '.'.join(parts[:-1])
classname = parts[-1]
try:
module = import_module(module_path)
except ImportError:
module = import_module('apps.{0}'.format(module_path))
field_type = getattr(module, classname)
field = field_type._meta.get_field_by_name('field')[0]
accessor = field.rel.related_name
field_data, created = FieldRegistryData.objects.get_or_create(name=name, accessor=accessor, module=module_path, classname=classname)
self._db_field_map[field_data.id] = field_type
field_registry = FieldRegistry()
BIG_INT = 8
BOOLEAN = 6
CHAR = 1
DATE = 11
DATETIME = 13
DECIMAL = 10
EMAIL = 4
FILE = 14
FLOAT = 9
IMAGE = 15
INT = 7
SLUG = 3
TEXT = 2
TIME = 12
URL = 5
# reverse lookup django field type -> built-in type constant for data wrappers to create virtual fields
REVERSE_FIELD_MAP = {
db.CharField: CHAR,
db.TextField: TEXT,
db.SlugField: SLUG,
db.EmailField: EMAIL,
db.URLField: URL,
db.BooleanField: BOOLEAN,
db.IntegerField: INT,
db.FloatField: FLOAT,
db.BigIntegerField: BIG_INT,
db.DecimalField: DECIMAL,
db.DateField: DATE,
db.TimeField: TIME,
db.DateTimeField: DATETIME,
db.FileField: FILE,
db.ImageField: IMAGE
}
class UploadLocation(db.Model):
"""
Database model to represent locations that file and image fields can upload to
"""
class Meta:
verbose_name = 'Upload location'
def __unicode__(self):
return self.path
path = db.CharField(max_length=255, default='files/')
class BaseField(db.Model):
"""
Abstract database model to represent dynamodel fields and relationships
"""
def __unicode__(self):
return self.name
model = db.ForeignKey('Model')
name = db.CharField(max_length=64)
verbose_name = db.CharField(max_length=64, blank=True, verbose_name='Caption')
help_text = db.CharField(max_length=255, blank=True, null=True)
required = db.BooleanField(default=False)
unique = db.BooleanField(default=False)
sort_order = db.IntegerField(blank=True, null=True)
def _data_equality(self, other):
"""
Compare data between this record and another
(Django model equality only checks that the class is the same)
"""
if self.__class__ is not other.__class__:
raise ValueError('Both classes must be the same')
for field in self._meta.fields:
if getattr(self, field.name, None) != getattr(other, field.name, None):
return False
return True
@property
def specific(self):
try:
return self.field
except AttributeError:
return self.relation
class Field(BaseField):
"""
Database model to represent dynamodel fields
"""
class Meta:
verbose_name = 'Field'
#unique_together = ['model', 'name']
type = db.IntegerField(choices=field_registry)
primary_key = db.BooleanField(default=False, verbose_name='PK')
index = db.BooleanField(default=False)
virtual = db.BooleanField(default=False)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
"""
Saves the current instance.
"""
# virtual fields (for wrappers around hard coded models) don't do db management
if self.virtual:
super(Field, self).save(force_insert, force_update, using, update_fields)
return
exists = self.id
if exists:
existing = Field.objects.get(pk=self.id)
if not self._data_equality(existing):
super(Field, self).save(force_insert, force_update, using, update_fields)
# if the field type has changed we need to delete the old subfield data and create
# the appropriate new subtype
if existing.type != self.type:
existing.specific.delete()
# create subfield data settings
field_type = field_registry.field_type(self.type)
field = field_type(field=self)
field.save()
existing_field = self.model.model._meta.get_field_by_name(existing.name)[0]
new_field = self._db_field()
# make the required changes to the database
self.model._model_remove_field(existing)
self.model._model_add_field(self)
self.model.alter_field(existing_field, new_field)
else:
super(Field, self).save(force_insert, force_update, using, update_fields)
# create subfield data settings
field_type = field_registry.field_type(self.type)
field = field_type(field=self)
field.save()
self.model.add_field(self)
if not self.sort_order:
self.sort_order = self.id
self.save()
def delete(self, using=None):
"""
Deletes the current instance
"""
self.model.remove_field(self)
field_attr = field_registry.field_map[self.type]
specific = getattr(self, field_attr, None)
specific.delete()
super(Field, self).delete(using)
@property
def specific(self):
"""
Return the subfield for this field
"""
field_attr = field_registry.field_map[self.type]
return getattr(self, field_attr, None)
def _db_field(self):
"""
Internal function to generate generic field attributes
"""
return self.specific._db_field({
'verbose_name': self.verbose_name,
'help_text': self.help_text,
'blank': not self.required,
'null': not self.required,
'unique': self.unique,
'primary_key': self.primary_key,
'db_index': self.index or None,
})
class Relation(BaseField):
"""
Database model to represent dynamodel relations
"""
class Meta:
verbose_name = 'Relation'
#nique_together = ['model', 'name']
type = db.IntegerField(choices=RELATION_CHOICES)
related_model = db.ForeignKey('Model', related_name='reverse')
reverse_name = db.CharField(max_length=64, blank=True)
virtual = db.BooleanField(default=False)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
"""
Saves the current instance.
"""
# handle wrappers around hard coded models separately
if self.virtual:
super(Relation, self).save(force_insert, force_update, using, update_fields)
return
if not self.reverse_name:
self.reverse_name = '{0}_{1}'.format(self.model.name, self.name)
exists = self.id
if exists:
existing = Relation.objects.get(pk=self.pk)
if not self._data_equality(existing):
super(Relation, self).save(force_insert, force_update, using, update_fields)
existing_field = self.model.model._meta.get_field_by_name(existing.name)[0]
new_field = self._db_field()
self.model._model_remove_field(existing)
self.model._model_add_field(self)
self.model.alter_field(existing_field, new_field)
else:
super(Relation, self).save(force_insert, force_update, using, update_fields)
if self.type != GENERIC:
self.model.add_field(self)
else:
self.model._model_add_field(self)
def delete(self, using=None):
"""
Deletes the current instance
"""
self.model.remove_field(self)
def _db_field(self):
"""
Internal function to generate generic relation attributes
"""
type = RELATED_FIELD_MAP[self.type]
attrs = {
'verbose_name': self.verbose_name,
'help_text': self.help_text,
'blank': not self.required,
'null': not self.required,
'unique': self.unique,
'related_name': self.reverse_name
}
# special handling for self referential relationships
if self.related_model.name == self.name:
return type('self', **attrs)
else:
try:
return type(self.related_model.model, **attrs if self.type != GENERIC else {})
except ValueError:
# if related model has not been evaluated by django yet, we need to expose it, but need to be careful
# of mutually recursive relationships between models creating runtime exceptions, so create a model
# sans relations, create the relationship then contribute the other models relations
model = self.related_model._create_deferred_relation_model()
relation_field = type(self.related_model.name, **attrs if self.type != GENERIC else {})
self.related_model._contribute_relations(model)
return relation_field
class ExtendedFieldOption(db.Model):
"""
Base subfield options and save mechanics. Abstract model.
"""
class Meta:
abstract = True
def __unicode__(self):
return '{0} : {1}'.format(self.field.model.name, self.field.name)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if not self.id:
return super(ExtendedFieldOption, self).save(force_insert, force_update, using, update_fields)
model = self.field.model.model
existing = Field.objects.get(pk=self.field.pk)
existing_field = model._meta.get_field_by_name(existing.name)[0]
super(ExtendedFieldOption, self).save(force_insert, force_update, using, update_fields)
self.field.model._model_remove_field(existing)
self.field.model._model_add_field(self.field)
new_field = self.field._db_field()
self.field.model.alter_field(existing_field, new_field)
class CharField(ExtendedFieldOption):
"""
Database model for character fields on dynamodels
"""
class Meta:
verbose_name = 'Character field'
field = db.OneToOneField('Field', related_name='charfield')
max_length = db.IntegerField(default=50)
choices = db.TextField(blank=True, help_text='Enter one choice per line')
default = db.CharField(max_length=255, blank=True)
def _db_field(self, attrs):
if self.choices:
choices = self.choices.replace('\r', '').split('\n')
choices = zip(choices, choices)
attrs ['choices'] = choices
attrs.update({
'max_length': self.max_length,
'default': self.default or None
})
return db.CharField(**attrs)
class TextField(ExtendedFieldOption):
"""
Database model for text fields on dynamodels
"""
class Meta:
verbose_name = 'Text field'
field = db.OneToOneField('Field', related_name='textfield')
max_length = db.IntegerField(blank=True, null=True)
default = db.TextField(blank=True)
def _db_field(self, attrs):
attrs.update({
'max_length': self.max_length or None,
'default': self.default or None
})
return db.TextField(**attrs)
class SlugField(ExtendedFieldOption):
"""
Database model for slugfields fields on dynamodels
"""
class Meta:
verbose_name = 'Slug field'
field = db.OneToOneField('Field', related_name='slugfield')
max_length = db.IntegerField(default=50)
default = db.CharField(max_length=255, blank=True)
populate_from = db.ForeignKey('CharField', blank=True, null=True, related_name='slugs')
def _db_field(self, attrs):
attrs.update({
'max_length': self.max_length or None,
'default': self.default or None
})
return db.SlugField(**attrs)
class EmailField(ExtendedFieldOption):
"""
Database model for emailfields fields on dynamodels
"""
class Meta:
verbose_name = 'Email field'
field = db.OneToOneField('Field', related_name='emailfield')
max_length = db.IntegerField(default=75)
default = db.EmailField(blank=True)
def _db_field(self, attrs):
attrs.update({
'max_length': self.max_length or None,
'default': self.default or None
})
return db.EmailField(**attrs)
class UrlField(ExtendedFieldOption):
"""
Database model for urlfields fields on dynamodels
"""
class Meta:
verbose_name = 'URL field'
field = db.OneToOneField('Field', related_name='urlfield')
max_length = db.IntegerField(default=200)
default = db.URLField(blank=True)
def _db_field(self, attrs):
attrs.update({
'max_length': self.max_length or None,
'default': self.default or None
})
return db.URLField(**attrs)
class BooleanField(ExtendedFieldOption):
"""
Database model for boolfields fields on dynamodels
"""
class Meta:
verbose_name = 'Boolean field'
field = db.OneToOneField('Field', related_name='booleanfield')
default = db.BooleanField(default=False)
def _db_field(self, attrs):
attrs.update({
'default': self.default or None
})
return db.BooleanField(**attrs)
class IntegerField(ExtendedFieldOption):
"""
Database model for integerfields fields on dynamodels
"""
class Meta:
verbose_name = 'Integer field'
field = db.OneToOneField('Field', related_name='integerfield')
default = db.IntegerField(blank=True, null=True)
def _db_field(self, attrs):
attrs.update({
'default': self.default or None
})
return db.IntegerField(**attrs)
class BigIntegerField(ExtendedFieldOption):
"""
Database model for bigintegerfields fields on dynamodels
"""
class Meta:
verbose_name = 'Big integer field'
field = db.OneToOneField('Field', related_name='bigintegerfield')
default = db.BigIntegerField(blank=True, null=True)
def _db_field(self, attrs):
attrs.update({
'default': self.default or None
})
return db.BigIntegerField(**attrs)
class FloatField(ExtendedFieldOption):
"""
Database model for float fields on dynamodels
"""
class Meta:
verbose_name = 'Float field'
field = db.OneToOneField('Field', related_name='floatfield')
default = db.FloatField(blank=True, null=True)
def _db_field(self, attrs):
attrs.update({
'default': self.default or None
})
return db.FloatField(**attrs)
class DecimalField(ExtendedFieldOption):
"""
Database model for decimalfields on dynamodels
"""
class Meta:
verbose_name = 'Decimal field'
field = db.OneToOneField('Field', related_name='decimalfield')
max_digits = db.IntegerField()
decimal_places = db.IntegerField()
default = db.DecimalField(max_digits=20, decimal_places=20)
def _db_field(self, attrs):
attrs.update({
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'default': self.default or None
})
return db.DecimalField(**attrs)
class DateField(ExtendedFieldOption):
"""
Database model for datefields on dynamodels
"""
class Meta:
verbose_name = 'Date field'
field = db.OneToOneField('Field', related_name='datefield')
auto_now = db.BooleanField(default=False, help_text='Set this field to current date when record is saved')
auto_now_add = db.BooleanField(default=False, help_text='Set this field to current date when record is created')
default = db.DateField(blank=True, null=True)
def _db_field(self, attrs):
attrs.update({
'auto_now': self.auto_now or False,
'auto_now_add': self.auto_now_add or False,
'default': self.default or None
})
return db.DateField(**attrs)
class TimeField(ExtendedFieldOption):
"""
Database model for timefields on dynamodels
"""
class Meta:
verbose_name = 'Time field'
field = db.OneToOneField('Field', related_name='timefield')
default = db.TimeField(blank=True, null=True)
def _db_field(self, attrs):
attrs.update({
'default': self.default or None
})
return db.TimeField(**attrs)
class DateTimeField(ExtendedFieldOption):
"""
Database model for datetimefields on dynamodels
"""
class Meta:
verbose_name = 'Date/time field'
field = db.OneToOneField('Field', related_name='datetimefield')
auto_now = db.BooleanField(default=False, help_text='Set this field to current date/time when record is saved')
auto_now_add = db.BooleanField(default=False, help_text='Set this field to current date/time when record is created')
default = db.DateTimeField(blank=True, null=True)
def _db_field(self, attrs):
attrs.update({
'auto_now': self.auto_now or False,
'auto_now_add': self.auto_now_add or False,
'default': self.default or None
})
return db.DateTimeField(**attrs)
class FileField(ExtendedFieldOption):
"""
Database model for filefields on dynamodels
"""
class Meta:
verbose_name = 'File field'
field = db.OneToOneField('Field', related_name='filefield')
upload_to = db.ForeignKey('UploadLocation', related_name='files', default=1)
max_length = db.IntegerField(default=100)
def _db_field(self, attrs):
attrs.update({
'upload_to': self.upload_to.path,
'max_length': self.max_length
})
return db.FileField(**attrs)
class ImageField(ExtendedFieldOption):
"""
Database model for imagefields on dynamodels
"""
class Meta:
verbose_name = 'Image field'
field = db.OneToOneField('Field', related_name='imagefield')
upload_to = db.ForeignKey('UploadLocation', related_name='images', default=1)
max_length = db.IntegerField(default=100)
def _db_field(self, attrs):
attrs.update({
'upload_to': self.upload_to.path,
'max_length': self.max_length
})
return db.ImageField(**attrs)
|
998,872 | ae4f7003a7e20a967a24215d52432cfc5ad25ceb | import dash_core_components as dcc
import dash_html_components as html
from data_reader import *
from Sentiment import *
import dash_table
from graphs import *
import base64
def no_with_comma(n):
return "{:,}".format(n)
def no_to_percentage(n):
return "{0:.2f}%".format(n)
# For individual boxes at top like total active cases, recovered etc.
colors = {
'background': '#282b38',
#'text': '#7FDBFF'
}
mini_container_style = {
"border-radius": "10px",
"background-color": "#f9f9f9",
"margin-left": "40px",
"margin-right": "40px",
"padding": "0%",
"position": "relative",
# 'box-shadow': '2px 2px 2px lightgrey',
"width": "25%",
"border-style": "solid",
"border-width": "2px",
"border-color": "black",
}
tab2_mini_container_style = {
"border-radius": "10px",
"background-color": "#f9f9f9",
"margin-left": "40px",
"margin-right": "40px",
"padding": "0%",
"position": "relative",
# 'box-shadow': '2px 2px 2px lightgrey',
"width": "50%",
"border-style": "solid",
"border-width": "2px",
"border-color": "black",
}
# For div containg above boxex
whole_container = {
"display": "flex",
"text-align": "center",
}
tab_style = {"fontWeight": "bold", "font-size": "22px"}
tab_selected_style = {"fontWeight": "bold", "font-size": "22px"}
def home_tab_layout():
return html.Div(
[
html.Br(),
html.Br(),
html.H1(
children="Churn Analysis-Dashboard",
style={"color": "black", "textAlign": "center"},
),
html.Div(
[
html.Div(
[
html.Div(
[
html.P(
html.B(
"Active Customer",
style={"font-size": "25px"},
)
),
html.P(
no_with_comma(active_customer),
style={"font-size": "22px"},
),
],
style=mini_container_style,
),
# html.Div([html.P("<b>Active Customer</b>"),html.P(no_with_comma(active_customer))],style = mini_container_style),
html.Div(
[
html.P(
html.B(
"Total Customer",
style={"font-size": "25px"},
)
),
html.P(
no_with_comma(total_customer),
style={"font-size": "22px"},
),
],
style=mini_container_style,
),
html.Div(
[
html.P(
html.B(
"Churn Rate", style={"font-size": "25px"}
)
),
html.P(
no_to_percentage(churn_rate),
style={"font-size": "22px"},
),
],
style=mini_container_style,
),
html.Div(
[
html.P(
html.B(
"Monthly revenue",
style={"font-size": "25px"},
)
),
html.P(
no_with_comma(expected_monthly_income),
style={"font-size": "22px"},
),
],
style=mini_container_style,
),
],
style=whole_container,
)
],
className="row",
),
html.Div(
children=[
html.Div(
[
html.Div(
children=[
dcc.Graph(
id="bar-graph2",
figure=fig5,
style={
"display": "inline-block",
"width": "50%",
"text-align": "center",
},
),
dcc.Graph(
id="bar-graph3",
figure=fig3,
style={
"display": "inline-block",
"width": "50%",
},
),
],
style={"display": "flex"},
)
]
),
html.Div(
[
dcc.Graph(id="bar-graph1", figure=fig),
]
),
html.Div(
[
html.Div(
children=[
dcc.Graph(
id="bar-graph4",
figure=fig2,
style={
"display": "inline-block",
"width": "50%",
},
),
dcc.Graph(
id="bar-graph5",
figure=fig4,
style={
"display": "inline-block",
"width": "50%",
},
),
],
style={"display": "flex"},
)
]
),
]
),
]
)
def churn_data_tab():
return html.Div(
[
html.Br(),
html.Br(),
html.H1(
children="Churn Data", style={"color": "black", "textAlign": "center"}
),
html.Div(
[
html.Div(
[
html.Div(
[
html.P(
html.B(
"Predicted Churn Count in percentage:",
style={"font-size": "25px"},
)
),
html.P(
no_to_percentage(pred_churn_count_percent),
style={"font-size": "22px"},
),
],
style=tab2_mini_container_style,
),
html.Div(
[
html.P(
html.B(
"Predicted Churn Count",
style={"font-size": "25px"},
)
),
html.P(
no_with_comma(pred_churn_list_true),
style={"font-size": "22px"},
),
],
style=tab2_mini_container_style,
),
],
style=whole_container,
)
],
className="row",
),
html.Br(),
html.H6(
children=["Select Dataset to be Displayed"],
style={"text-align": "center", "font-size": "20px", "color": "black", "font-weight":"bold"},
),
html.Div(
[
dcc.Dropdown(
id="dropdown-table",
options=[
{"label": "Train Data", "value": "train"},
{"label": "Predicted Results", "value": "pred_res"},
{"label": "Test Data", "value": "test"},
{"label": "Churn Customers", "value": "churn_cust"},
# {"label": "Churn Changes", "value": "churn_changes"},
],
value="pred_res",
),
]
),
# html.Div(id='dd-output-container'),
html.Br(),
html.H6(
children=["Select Columns to Display In table"],
style={"text-align": "center", "font-size": "20px", "color": "black", "font-weight":"bold"},
),
html.Div(
[
dcc.Dropdown(
id="dropdown-column-select",
options=[],
multi=True,
)
]
),
html.Br(),
html.Br(),
html.Div(id="table-div")
# html.Div([
# dash_table.DataTable(
# id='table',
# columns=[{"name": i, "id": i}
# for i in data_pred.columns],
# data=data_pred.to_dict('records'),
# page_size=15,
# fixed_rows={'headers': True},
# )
# ]),
]
)
def serve_roc_curve():
fpr, tpr, threshold = roc_curve(df_churn, preds)
auc_score = roc_auc_score(y_true=df_churn, y_score=preds)
trace0 = go.Scatter(
x=fpr, y=tpr, mode="lines", name="Test Data", marker={"color": "#13c6e9"}
)
layout = go.Layout(
title=f"ROC Curve (AUC = {auc_score:.3f})",
xaxis=dict(title="False Positive Rate", gridcolor="#2f3445"),
yaxis=dict(title="True Positive Rate", gridcolor="#2f3445"),
legend=dict(x=0, y=1.05, orientation="h"),
margin=dict(l=100, r=10, t=25, b=40),
plot_bgcolor="#282b38",
paper_bgcolor="#282b38",
font={"color": "#a5b1cd"},
)
data0 = [trace0]
figure = go.Figure(data=data0, layout=layout)
return figure
def serve_pie_confusion_matrix():
# Compute threshold
# scaled_threshold = threshold * (Z.max() - Z.min()) + Z.min()
# y_pred_test = (model.decision_function(X_test) > scaled_threshold).astype(int)
matrix = confusion_matrix(y_true=df_churn, y_pred=preds)
tn, fp, fn, tp = matrix.ravel()
values = [tp, fn, fp, tn]
label_text = ["True Positive", "False Negative", "False Positive", "True Negative"]
labels = ["TP", "FN", "FP", "TN"]
blue = cl.flipper()["seq"]["9"]["Blues"]
red = cl.flipper()["seq"]["9"]["Reds"]
colors = ["#2c3e50", "#bdc3c7", "#f39c12", "#e74c3c", "#7ccc63"]
trace0 = go.Pie(
labels=label_text,
values=values,
hoverinfo="label+value+percent",
textinfo="text+value",
text=labels,
sort=False,
marker=dict(colors=colors),
insidetextfont={"color": "white"},
rotation=90,
)
layout = go.Layout(
title="Confusion Matrix",
margin=dict(l=50, r=50, t=100, b=10),
legend=dict(bgcolor="#282b38", font={"color": "#a5b1cd"}, orientation="h"),
plot_bgcolor="#282b38",
paper_bgcolor="#282b38",
font={"color": "#a5b1cd"},
)
data0 = [trace0]
figure = go.Figure(data=data0, layout=layout)
return figure
def churn_model_tab_layout():
roc_figure = serve_roc_curve()
confusion_figure = serve_pie_confusion_matrix()
return html.Div(
[
html.Br(),
html.Br(),
html.H6(
children=["Select ML Model"],
style={"text-align": "center", "font-size": "20px", "color": "black", "font-weight":"bold"},
),
dcc.Dropdown(
id="model-select-dropdown",
options=[
{"label": "xgboost", "value": "xgboost"},
],
value="xgboost",
clearable=False,
),
html.Br(),
html.Br(),
html.Div(
children = [
html.Center(
children = [
dcc.Graph(
id="bar-pred-churn",
figure=fig_pred_churn,
style={
"display": "inline-block",
"width": "50%",
},
),
])
]
),
html.Br(),
html.Br(),
html.Div(
id="graphs-container",
children=[
html.Div(
children=[
dcc.Loading(
className="graph-wrapper",
children=dcc.Graph(
id="graph-line-roc-curve", figure=roc_figure
),
)
],
style={
"display": "inline-block",
"float": "left",
"width": "50%",
},
),
html.Div(
children=[
dcc.Loading(
className="graph-wrapper",
children=dcc.Graph(
id="graph-pie-confusion-matrix",
figure=confusion_figure,
),
),
],
style={
"display": "inline-block",
"float": "left",
"width": "50%",
},
),
],
),
]
)
# def image_display_func():
# image_filename = './data/wordcloud-img.png'
# encoded_image = base64.b64encode(open(image_filename, 'rb').read())
# return 'data:image/png;base64,{}'.format(encoded_image)
def sentiment_tab():
return html.Div(
[
html.Br(),
html.Br(),
html.H1(
children="Sentiment Analysis", style={"color": "black", "textAlign": "center"}
),
html.Div(
[
html.Div(
[
html.Div(
[
html.P(
html.B(
"Review Count",
style={"font-size": "25px"},
)
),
html.P(
no_with_comma(review_count),
style={"font-size": "22px"},
),
],
style=mini_container_style,
),
# html.Div([html.P("<b>Active Customer</b>"),html.P(no_with_comma(active_customer))],style = mini_container_style),
html.Div(
[
html.P(
html.B(
"Positive Count",
style={"font-size": "25px"},
)
),
html.P(
no_with_comma(positive),
style={"font-size": "22px"},
),
],
style=mini_container_style,
),
html.Div(
[
html.P(
html.B(
"Negative Count", style={"font-size": "25px"}
)
),
html.P(
no_with_comma(negative),
style={"font-size": "22px"},
),
],
style=mini_container_style,
),
html.Div(
[
html.P(
html.B(
"Neutral Count",
style={"font-size": "25px"},
)
),
html.P(
no_with_comma(neutral),
style={"font-size": "22px"},
),
],
style=mini_container_style,
),
],
style=whole_container,
)
],
className="row",
),
html.Div(
children=[
html.Div(
[
html.Div(
children=[
dcc.Graph(
id="sg1",
figure=senti_pie,
style={
"display": "inline-block",
"width": "50%",
"text-align": "center",
},
),
dcc.Graph(
id="sg2",
figure=senti_barh,
style={
"display": "inline-block",
"width": "50%",
},
),
],
style={"display": "flex"},
)
]
),
]
),
html.Div(
[
html.Div(
children=[
dcc.Graph(
id="sg3",
figure=senti_word,
style={
"display": "inline-block",
"width": "100%",
},
),
],
style={"display": "flex"},
),
]
),
html.Br(),
html.H6(
children=["Select Dataset to be Displayed"],
style={"text-align": "center", "font-size": "20px", "color": "black", "font-weight":"bold"},
),
html.Div(
[
dcc.Dropdown(
id="dropdown-table1",
options=[
{"label": "Positive Reviews", "value": "positive_r"},
{"label": "Negative Reviews", "value": "negative_r"},
{"label": "Neutral Reviews", "value": "neutral_r"},
],
value="neutral_r",
),
]
),
html.Br(),
html.Br(),
html.Div(id="table-div_r")
]
)
def tab_layout():
return html.Div(
[
dcc.Tabs(
id="tabs-example",
value="tab-1",
children=[
# Home Tab
dcc.Tab(
label="Home Tab",
value="tab-1",
style=tab_style,
selected_style=tab_selected_style,
children=[home_tab_layout()],
),
# Churn Predictions tab
dcc.Tab(
label="Churn Data",
value="tab-2",
style=tab_style,
selected_style=tab_selected_style,
children=[churn_data_tab()],
),
# Churn Model Tab
dcc.Tab(
label="Churn Model",
value="tab-3",
style=tab_style,
selected_style=tab_selected_style,
children=[churn_model_tab_layout()],
),
dcc.Tab(
label="Sentiment Analysis",
value="tab-4",
style=tab_style,
selected_style=tab_selected_style,
children=[sentiment_tab()],
),
],
),
]
)
|
998,873 | 67ce968596d9dd06e8112cb6979eea94fde4dd4d | # Source: https://stackoverflow.com/a/26284607/6759699
from numpy.core.numeric import binary_repr, identity, asanyarray, dot
def matrix_power_mod(M, n, mod_val):
# Implementation shadows numpy's matrix_power, but with modulo included
M = asanyarray(M)
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
from numpy.linalg import inv
if n == 0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n < 0:
M = inv(M)
n *= -1
result = M % mod_val
if n <= 3:
for _ in range(n - 1):
result = dot(result, M) % mod_val
return result
# binary decompositon to reduce the number of matrix
# multiplications for n > 3
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t - q - 1] == '0':
Z = dot(Z, Z) % mod_val
q += 1
result = Z
for k in range(q + 1, t):
Z = dot(Z, Z) % mod_val
if beta[t - k - 1] == '1':
result = dot(result, Z) % mod_val
return result % mod_val
|
998,874 | 77b0aa26dd8d77336ae69518ed81c96d4c4f8862 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 17:14:39 2019
@author: jivitesh's PC
"""
import numpy as np
arr3=np.arange(5)
print(arr3)
condition=[True,False,True]
x=[10,20,30]
y=[70,80,90]
condition=np.array(condition)
x=np.array([True,False,True,False,True])
y=np.array([False,True,False,False,True])
arr=np.where(x & y,0,np.where(x,1,np.where(y,2,3)))
print(arr)
np.loadtxt? |
998,875 | adb64d75ebe0b54476805c8024ae7689c6a7c99d | import os
from flask import Flask, session
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import csv
app = Flask(__name__)
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
def main():
f = open("books.csv")
reader = csv.reader(f)
#ignore header in books1.csv
next(reader, None)
#setting a counter just to keep track of number of books added
count=0
for isbn, title, author, year in reader:
db.execute("INSERT INTO cs50wbooks (ISBN, TITLE, AUTHOR, YEAR) VALUES (:isbn, :title, :author, :year)",
{"isbn":isbn, "title":title, "author":author, "year":year})
db.commit()
#incrementing counter
count+=1
#printing counter output
print(str(count) + "books added")
if __name__ == "__main__":
main() |
998,876 | 14379b3e9d48e0a610e25ab1ff7b609468ee09b3 | print ("excel to python process")
print ("-----------------------")
import xlrd
wbv=xlrd.open_workbook("info1.xls")
ns=wbv.nsheets
sn=wbv.sheet_names()
print ("Number of sheets in the info1.xls is:",ns)
print ("sheet Names are :",sn)
wsv=wbv.sheet_by_name("Information")
nr=wsv.nrows
nc=wsv.ncols
print ("total number of rows contain data:",nr)
print ("Total number of colums contain data:",nc)
for i in range(nr):
for j in range(nc):
v=wsv.cell(i,j).value
print (v)
|
998,877 | 915de9a96c40deaa5a8846fba8326cb72103b1e8 | from django import urls
from django.urls import path
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('m-data',views.m_data,name='mdata'),
path('stazioni_appaltanti',views.stazioni_appaltanti)
]
|
998,878 | a78fc71a0bc4ecac9bb3a6a7a7421ef9a654325e | import sys
import json
from flask import request
sys.path.extend([r'D:\bmd\bmd_server\src\company\flask_server'])
# from flask_server.bmd_celery.tasks import check_online_total,check_tel_total
from flask import Blueprint
import os
api = Blueprint("api",__name__)
from flask_server.common import get_log
from flask_server.process_.Reserve_total import check_reserve
from flask_server.process_.result_push import Data_push
from werkzeug.utils import secure_filename
from flask_server.process_.excel_push import read_excel,tel_handler
from flask import current_app
@api.route('/check_data',methods=["GET","POST"])
def check_online_total():
"""
检查数据是否推送
:return:
"""
try:
userid = request.form.get("userid")
start = check_reserve()
# 1. 检测数据是否推送
sucess_count,To_push,Not_push,push_lt = start.check_online_total()
dict = {}
if sucess_count == 0:
dict["code"] = 200
dict["msg"] = "暂时没有需要查询推送的数据"
dict["push_lt"] = []
else:
# log.info(f"数据总数{sucess_count}推送状态查询完成")
dict["code"] = 200
dict["msg"] = f'推送状态查询完成,数据总数{sucess_count},已经推送的数据总共有{To_push},还未推送的数据有{Not_push}'
dict["push_lt"] = push_lt
dict["sucess_count"] = sucess_count
dict["To_push"] = To_push
dict["Not_push"] = Not_push
return json.dumps(dict)
except Exception as e:
current_app.logger.info(e)
@api.route('/check_tel',methods=["GET","POST"])
def check_tel_total():
# 检查数据号码状态
try:
userid = request.form.get('userid')
print(userid)
start = check_reserve()
# 2. 检测数据号码是否是实号
sucessount,sucess_lt,totalCount,total_lt,phoneNum = start.check_tel_total()
dict = {}
dict["msg"] = f'已经检测{sucessount}条数据,还有{totalCount}条数据需要进行空号清洗,可推送的数据有{phoneNum}条'
dict["sucessount"] = sucessount
dict["sucess_lt"] = sucess_lt
dict["totalCount"] = totalCount
dict["total_lt"] = total_lt
return json.dumps(dict)
except Exception as e:
print(e)
@api.route('/data_push',methods=["GET","POST"])
def data_push():
try:
print("进入函数")
userid = request.form.get("userid")
liquid_name = request.form.get("trade")
result_count = request.form.get("date_count")
# "http://127.0.0.1:8082/api/data_push?liquid_name=知产&liquid_count=1000"
if userid and liquid_name and result_count:
start = Data_push()
result_count = str(result_count).replace("'","")
dict = start.data_handle(liquid_name,result_count)
# print(d)
dict["code"] = 200
return json.dumps(dict)
else:
dict = {}
dict["code"] = 503
dict["errmsg"] = "参数错误"
return json.dumps(dict)
except Exception as e:
current_app.logger.info(e)
@api.route("/file_update",methods=["GET","POST"],)
def file_update():
"""
上传文件
:return:
"""
dict = {}
if request.method == "POST":
try:
print("进入上传函数")
# file = request.files.get('file')
file = request.files.getlist('files')
userid = request.form.get('userid')
trade = request.form.get('trade')
base_path = (os.path.abspath(os.path.dirname(__file__))).replace(r'views',"files")
if userid and file and trade:
user_path = base_path + '\{}'.format(userid)
if not os.path.exists(user_path):
os.mkdir(user_path)
for item in file:
filename = user_path + r'\{}'.format(item.filename)
if os.path.exists(filename):
os.remove(filename)
item.save(filename)
total,sucess,err,data = read_excel(user_id=userid,trade=trade)
sucess_count = tel_handler(data)
dict["code"] = 200
lt = [{"sum":total,"com_succes_num":sucess,"faild_num":err,"phone_sucess_num":sucess_count}]
dict["list"] = lt
dict["msg"] = '用户:{}上传的总数据为-{},公司去重后成功存入的数据-{},失败的数据-{},' \
'手机去重后的功存入的数据-{}'.format(userid,total,sucess,err,sucess_count)
return json.dumps(dict)
else:
dict["code"] = 502
dict["err_msg"] = "请求参数有误"
return json.dumps(dict)
except Exception as e:
dict["code"] = 503
dict["err_msg"] = "后台程序处理出错"
current_app.logger.info(e)
return json.dumps(dict)
else:
dict["code"] = 403
dict["err_msg"] = "请求错误,只能是post请求"
return json.dumps(dict)
@api.route('/test',methods=["GET","POST"])
def tests():
dict = {}
try:
file = request.files.get('file')
print(file)
filename = r"C:\Users\Administrator\Documents\Tencent Files\1173638836\FileRecv\test.wav"
file.save(filename)
if file:
dict["code"] = 200
dict["msg"] = '请求成功'
else:
dict["err_msg"] = "文件错误"
return json.dumps(dict)
except Exception as e:
print(e)
|
998,879 | 0810f491d0d6fd59635da9a45ac135a73d9e5c62 | import unittest
from unittest.mock import Mock
from parameterized import parameterized, parameterized_class
from module1 import attack_damage
from unittest import mock
class Module1Tests(unittest.TestCase):
@mock.patch("module1.randint", return_value=1, autospec=True)
def test(self, mocked_randint):
self.assertEqual(attack_damage(500), 500)
def test_patch_with_context_manager(self):
with mock.patch("module1.randint", return_value=1, autospec=True):
self.assertEqual(attack_damage(500), 500)
def test2(self):
mock = Mock()
mock.x = "zmienna x"
mock.funkcja_x = Mock(return_value="wartosc zwrocona przez funkcje")
mock.funckja_rzucajaca_wyjatek = Mock(side_effect=BaseException("Rzucilem wyjatkiem"))
print(mock.x)
print(mock.funkcja_x())
mock.funckja_rzucajaca_wyjatek()
@parameterized.expand(["foo", "bar"])
def test_sequence(self, name):
print(name)
@parameterized_class(('a', 'b', 'expected_sum', 'expected_product'), [
(1, 2, 3, 2),
(5, 5, 10, 25),
])
class TestMathClass(unittest.TestCase):
def test_add(self):
self.assertEqual(self.a + self.b, self.expected_sum)
def test_multiply(self):
self.assertEqual(self.a * self.b, self.expected_product)
if __name__ == '__main__':
unittest.main()
|
998,880 | a594679614eb2f101dbfab2937c7157f522c5323 | from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog
from Ui_mainui import Ui_Dialog
class Dialog(QDialog, Ui_Dialog):
"""
Class documentation goes here.
"""
def __init__(self, parent = None):
super(Dialog, self).__init__(parent)
self.setupUi(self)
@pyqtSlot()
def on_okbtn_clicked(self):
a = self.intxt.toPlainText()
self.outtxt.setText(a)
if __name__ == "__main__":
import sys
from PyQt5 import QtWidgets
app = QtWidgets.QApplication(sys.argv)
dlg = Dialog()
dlg.show()
sys.exit(app.exec_())
|
998,881 | c636d43e056d27f7e4c24dd4a456304092d1dec5 | from euler_util import is_palindrome
def euler55():
"""http://projecteuler.net/problem=55
2520 is the smallest number that can be divided by each of the numbers from
1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the
numbers from 1 to 20?
"""
lychrel = []
for n in range(10000):
candidate = n
is_lychrel = True
for round in range(50):
candidate += int(str(candidate)[::-1])
if is_palindrome(candidate):
is_lychrel = False
break
if is_lychrel:
lychrel.append(n)
return len(lychrel)
|
998,882 | 9c801855bf78ef458912784f7cbf0215cf120d5d | from __future__ import print_function
import re
import os
import gzip
import time
import matplotlib.pyplot as pl
import matplotlib
from matplotlib.animation import FuncAnimation
import numpy as np
import scipy
from .utils import mystr as _mystr
from .utils import pyname
from collections import namedtuple
from .pydataobj import dataobj
from . import tfsdata
from .survey import rot_mad, get_w_from_angles
from .tablemixin import TableMixIn
def rng(x, a, b):
"return (x<b) & (x>a)"
return (x < b) & (x > a)
def errors_getmn(self, errorname="b6"):
"""returns list of (m,n) for multiople error errorname"""
kind = errorname[0]
order = int(errorname[1:])
return getmn(order=order, kind=kind)
def getmn(order, kind="b"):
"""returns list of (m,n) of * resonances of order o
with * = 't': all resonances
'a': skew multipoles n=odd
'b': normal multipoles n=even
's': sum resonances (m>0,n>0), loss of beam
'd': difference resonances (m<0,n>0) or (m>0,n<0), exchange between planes
"""
"""Return resonances given the order and type"""
out = []
if "t" in kind:
kind = "ab"
for m in range(0, order + 1):
n = order - m
if "b" in kind and n % 2 == 0 or m == 0:
out.append((m, n))
if n > 0:
out.append((m, -n))
if "a" in kind and n % 2 == 1 and m > 0:
out.append((m, n))
if n > 0:
out.append((m, -n))
if "s" in kind and (n > 0 and m > 0):
out.append((m, n))
if "d" in kind and (n > 0 and m > 0):
out.append((m, -n))
return list(set(out))
class optics(dataobj, TableMixIn):
_is_s_begin = False
_name_char = 16
_entry_char = 12
_entry_prec = 3
@classmethod
def open(cls, fn):
return cls(tfsdata.open(fn))
def save(self, fn, floatfmt="%20.9f"):
tfsdata.save(self._data, fn, floatfmt)
def __init__(self, data={}, idx=False):
self.update(data)
if hasattr(data, "summary"):
self.header = data.summary
if hasattr(data, "name"):
self.name = np.array([a.split(":")[0] for a in self.name])
self._fdate = 0
def col_names(self):
if "_col_names" in self._data:
return self._data["_col_names"]
else:
return self._data.col_names
def copy(self):
data = {}
for k, v in list(self._data.items()):
if hasattr(v, "copy"):
vv = v.copy()
# elif hasattr(v,'__getitem__'):
# vv=v[:]
else:
vv = v
data[k] = vv
return optics(data)
def reload(self):
if "filename" in self._data:
fdate = os.stat(self.filename).st_ctime
if fdate > self._fdate:
self._data = tfsdata.open(self.filename)
self._fdate = fdate
return True
return False
def get_idx(self, name=None, count=0):
if type(name) is str:
return np.where(self.name == name)[0][count]
else:
return count
def row(self, index):
return {cc: self[cc][index] for cc in self.col_names()()}
def twissdata(self, location, data):
idx = np.where(self.pattern(location))[0][-1]
out = dict(location=location)
for name in data.split():
vec = self.__dict__.get(name)
if vec is None:
out[name] = 0
else:
out[name] = vec[idx]
out["sequence"] = self.header.get("sequence")
return out
def range(self, pat1, pat2):
"""return a mask relative to range"""
try:
id1 = np.where(self.pattern(pat1))[0][-1]
except IndexError:
raise ValueError("%s pattern not found in table" % pat1)
try:
id2 = np.where(self.pattern(pat2))[0][-1]
except IndexError:
raise ValueError("%s pattern not found in table" % pat2)
out = np.zeros(len(self.name), dtype=bool)
if id2 > id1:
out[id1 : id2 + 1] = True
else:
out[id1:] = True
out[: id2 + 1] = True
return out
def plot(
self,
yl="",
yr="",
x="s",
idx=slice(None),
clist="k r b g c m",
lattice=True,
newfig=True,
pre=None,
autoupdate=False,
ip_label=False,
):
out = qdplot(
self,
x=x,
yl=yl,
yr=yr,
idx=idx,
lattice=lattice,
newfig=newfig,
clist=clist,
pre=pre,
)
self._plot = out
if ip_label:
self.set_xaxis_ip()
if autoupdate:
if "wx" in matplotlib.get_backend().lower():
self._plot.wx_autoupdate()
else:
self._plot.ani_autoupdate()
# self._target.append(out)
return self
def plotbeta(self, **nargs):
return self.plot("betx bety", "dx dy", **nargs)
def plotsigma(self, emit=2.5e-6 / 7000 * 0.938, deltap=1.1e-4, **nargs):
self.sigx = np.sqrt(self.betx * emit) * 1000
self.sigy = np.sqrt(self.bety * emit) * 1000
self.sigdx = self.dx * deltap * 1000
self.plot("sigx sigy sigdx", **nargs)
ya, yb = pl.ylim()
pl.twinx()
bmax = max(self.betx.max(), self.bety.max())
rng = list(range(0, int(np.ceil(np.log10(bmax))) + 1))
bval = np.array([n * 10**dd for dd in rng for n in [1, 2, 5]])
bval = bval[bval < bmax]
pl.ylim(ya, yb)
return self
def plotcross(self, **nargs):
return self.plot("x y", "dx dy", **nargs)
def plottune(self, newfig=True, **nargs):
q4x, q3x, q2x, q1x, q0x = scipy.polyfit(self.deltap, self.q1, 4)
q4y, q3y, q2y, q1y, q0y = scipy.polyfit(self.deltap, self.q2, 4)
qx = (self.q1 - self.q1.round())[abs(self.deltap) < 1e-15][0]
qy = (self.q2 - self.q2.round())[abs(self.deltap) < 1e-15][0]
fmt = r"$%s=%4.2f %+4.2f \delta"
fmt += r"%+4.2f \frac{\delta^2}{2\cdot10^{-3}}"
fmt += r"%+4.2f \frac{\delta^3}{6\cdot10^{-6}}$"
fmtx = fmt % ("Q_x", q0x, q1x, q2x * 2e-3, q3x * 6e-6)
fmty = fmt % ("Q_y", q0y, q1y, q2y * 2e-3, q3y * 6e-6)
if newfig:
pl.figure()
pl.title(r"Tune vs $\delta$")
pl.xlabel("$\delta$")
pl.ylabel("Fractional tune")
pl.plot(self.deltap, self.q1 - self.q1.round(), label=fmtx, **nargs)
pl.plot(self.deltap, self.q2 - self.q2.round(), label=fmty, **nargs)
# pl.text(0.0,qx,r"$Q_x$")
# pl.text(0.0,qy,r"$Q_y$")
pl.grid(True)
pl.legend(loc=0)
return self
def plotbetabeat(self, t1, dp="0.0003", **nargs):
pl.title(r"$\rm{Beta beat: 1 - \beta(\delta=%s)/\beta(\delta=0)}$" % dp)
pl.ylabel(r"$\Delta\beta/\beta$")
pl.xlabel(r"$s$")
pl.plot(
self.s, 1 - t1.betx / self.betx, label=r"$\Delta\beta_x/\beta_x$", **nargs
)
pl.plot(
self.s, 1 - t1.bety / self.bety, label=r"$\Delta\beta_y/\beta_y$", **nargs
)
pl.grid(True)
pl.legend()
return self
def plotw(self, lbl="", **nargs):
pl.title(r"Chromatic function: %s" % lbl)
# pl.ylabel(r"$w=(\Delta\beta/\beta)/\delta$")
pl.ylabel(r"$w$")
pl.xlabel(r"$s$")
pl.plot(self.s, self.wx, label=r"$w_x$", **nargs)
pl.plot(self.s, self.wy, label=r"$w_y$", **nargs)
pl.grid(True)
pl.legend()
return self
def plotap(self, ap=None, nlim=30, ref=12.6, newfig=True, eref=None, **nargs):
if ap is None:
apfn = self.filename.replace("twiss", "ap")
ap = optics.open(apfn)
if eref is not None:
ap.s -= ap.s[ap // eref]
self.s -= self.s[self // eref]
self.ss = ap.s
self.n1 = ap.n1
self = self.plot(x="ss", yl="n1", newfig=newfig, **nargs)
p = self._plot
p.figure.gca().plot(self.ss, self.ss * 0 + ref)
p.figure.gca().set_ylim(0, nlim)
p.figure.canvas.draw()
self._plot = p
self.ap = ap
return self
def mk_betamax(self):
self.betxmax = np.zeros_like(self.betx)
self.betymax = np.zeros_like(self.bety)
bufname = ""
for i in range(len(self.k1l)):
k1l = self.k1l[i]
l = self.l[i]
betx = self.betx[i]
alfx = self.alfx[i]
bety = self.bety[i]
alfy = self.alfy[i]
if l > 0: # thick
alfxm = self.alfx[i - 1]
alfym = self.alfy[i - 1]
betxm = self.betx[i - 1]
betym = self.bety[i - 1]
if k1l >= 0: # focus
if alfxm < 0 and alfx > 0:
self.betxmax[i] = betx + alfx**2 / betx / k1l * l
else:
self.betxmax[i] = max(betxm, betx)
self.betymax[i] = max(bety, betym)
else: # defocu
if alfym < 0 and alfy > 0:
self.betymax[i] = bety + alfy**2 / bety / (-k1l) * l
else:
self.betymax[i] = max(bety, betym)
self.betxmax[i] = max(betxm, betx)
elif abs(k1l) > 0: # thin
name = self.name[i].split("..")[0]
if bufname != name:
if bufname != "":
idx = np.where(self.name == bufname)[0][0]
self.betxmax[idx] = max(bufx)
self.betymax[idx] = max(bufy)
bufx = [self.betx[i]]
bufy = [self.bety[i]]
bufname = name
else:
bufx.append(self.betx[i])
bufy.append(self.bety[i])
return self
def maxbety(f):
return f.bety + f.alfy**2 / f.bety / abs(f.k1l / f.l)
def maxbety(f):
return f.bety + f.alfy**2 / f.bety / abs(f.k1l / f.l)
# def chromx(f):
# if not hasattr(f,'k1l'):
# f.k1l=f.k1l
# return -sum(f.k1l*f.betx)/4/pi
# def chromy(f):
# if not hasattr(f,'k1l'):
# f.k1l=f.k1l
# return sum(f.k1l*f.bety)/4/pi
def ndx(self):
return self.dx / np.sqrt(self.betx)
def ndpx(self):
return self.dpx * np.sqrt(self.betx) + self.dx / np.sqrt(self.betx) * self.alfx
def alphac(self):
return sum(self("dx*kn0l")) / sum(self.l)
def gammatr(self):
af = self._alphac()
if af > 0:
return np.sqrt(1 / af)
else:
return -np.sqrt(-1 / af)
def transferMatrix(self, i1=0, i2=-1, plane="x"):
"""Return the transfer matrix from position i1 to position i2
see Y.Lee 2.68 pag 53 for definition
"""
B2 = self.normMat(i2, plane=plane)
B1 = self.normMat(i1, plane=plane)
psi = 2 * np.pi * (self["mu" + plane][i2] - self["mu" + plane][i1])
R = np.array([[np.cos(psi), np.sin(psi)], [-np.sin(psi), np.cos(psi)]])
return np.dot(np.dot(B2, R), np.linang.inv(B1))
def normMat(self, i, plane="x"):
beta = self["bet" + plane][i]
alpha = self["alf" + plane][i]
return np.array(
[[np.sqrt(beta), 0], [-alpha / np.sqrt(beta), 1 / np.sqrt(beta)]]
)
def mk_intkbeta(self, on_sext=True):
self.intkbetx = self.k1l * 0.0
self.intkbety = self.k1l * 0.0
for i in range(len(self.k1l)):
kl = self.k1l[i]
k2l = self.k2l[i]
l = self.l[i]
betx = self.betx[i]
bety = self.bety[i]
intkbetx = intkbety = 0
if abs(kl) > 0:
if l == 0:
intkbetx = kl * betx
intkbety = kl * bety
else:
alfx = self.alfx[i]
alfy = self.alfy[i]
gamx = (1 + alfx**2) / betx
gamy = (1 + alfy**2) / bety
k = kl / l
ak = np.abs(k)
rk = np.sqrt(ak)
rkl = rk * l
crkl = np.cos(rkl)
srkl = -np.sin(rkl)
chrkl = np.cosh(rkl)
shrkl = -np.sinh(rkl)
if k > 0: # backtrack
r11 = crkl
r12 = srkl / rk
r21 = -srkl * rk
r33 = chrkl
r34 = shrkl / rk
r43 = shrkl * rk
else:
r33 = crkl
r34 = srkl / rk
r43 = -srkl * rk
r11 = chrkl
r12 = shrkl / rk
r21 = shrkl * rk
r22 = r11
r44 = r33
betx0 = r11**2 * betx - 2.0 * r11 * r12 * alfx + r12**2 * gamx
bety0 = r33**2 * bety - 2.0 * r33 * r34 * alfy + r34**2 * gamy
alfx0 = (
-r11 * r21 * betx
+ (1.0 + 2.0 * r12 * r21) * alfx
- r12 * r22 * gamx
)
alfy0 = (
-r33 * r43 * bety
+ (1.0 + 2.0 * r34 * r43) * alfy
- r34 * r44 * gamy
)
gamx0 = (1.0 + alfx0**2) / betx0
gamy0 = (1.0 + alfy0**2) / bety0
if k > 0:
ax = 0.5 * (l + 0.5 / rk * np.sin(2.0 * rkl))
bx = 0.25 / ak * (1.0 - np.cos(2.0 * rkl))
cx = 0.5 / ak * (l - 0.5 / rk * np.sin(2.0 * rkl))
ay = 0.5 * (l + 0.5 / rk * np.sinh(2.0 * rkl))
by = -0.25 / ak * (1.0 - np.cosh(2.0 * rkl))
cy = -0.5 / ak * (l - 0.5 / rk * np.sinh(2.0 * rkl))
else:
ay = 0.5 * (l + 0.5 / rk * np.sin(2.0 * rkl))
by = 0.25 / ak * (1.0 - np.cos(2.0 * rkl))
cy = 0.5 / ak * (l - 0.5 / rk * np.sin(2.0 * rkl))
ax = 0.5 * (l + 0.5 / rk * np.sinh(2.0 * rkl))
bx = -0.25 / ak * (1.0 - np.cosh(2.0 * rkl))
cx = -0.5 / ak * (l - 0.5 / rk * np.sinh(2.0 * rkl))
# if (self//'MQX.*L5')[i]:
# print kl,l,k,ak,rk,rkl
# print betx ,bety ,alfx ,alfy ,gamx ,gamy
# print r11,r12,r21,r22,r33,r34,r43,r44
# print betx0,bety0,alfx0,alfy0,gamx0,gamy0
# print ax,bx,cx,ay,by,cy
intkbetx = k * (ax * betx0 - 2.0 * bx * alfx0 + cx * gamx0)
intkbety = -k * (ay * bety0 - 2.0 * by * alfy0 + cy * gamy0)
elif abs(k2l) > 0:
dx = self.dx[i]
dy = self.dy[i]
intkbetx = -k2l * dx * betx
intkbety = k2l * dx * bety
self.intkbetx[i] = intkbetx
self.intkbety[i] = intkbety
return self
def mk_AB(self):
"""Values
qpp1: (k2 D -k1)' beta
qpp2: k2 D''
qpp3: k1 beta'
qpp4: k2 D beta'
"""
betx, bety, dx = self.betx, self.bety, self.dx
self.Bx = betx * self.wx * np.cos(2 * np.pi * self.phix)
self.Ax = betx * self.wx * np.sin(2 * np.pi * self.phix)
self.By = bety * self.wy * np.cos(2 * np.pi * self.phiy)
self.Ay = bety * self.wy * np.sin(2 * np.pi * self.phiy)
k1l = self.k1l
k2l = self.k2l
self.qp1x = 1.0 / 4 / np.pi * np.cumsum(-betx * k1l)
self.qp1y = 1.0 / 4 / np.pi * np.cumsum(bety * k1l)
self.qp2x = 1.0 / 4 / np.pi * np.cumsum(betx * k2l * dx)
self.qp2y = 1.0 / 4 / np.pi * np.cumsum(-bety * k2l * dx)
self.qpx = self.qp1x + self.qp2x
self.qpy = self.qp1y + self.qp2y
self.qpp1x = -2 * self.qpx
self.qpp1y = -2 * self.qpy
self.qpp2x = 1.0 / 2 / np.pi * np.cumsum(k2l * self.ddx * betx)
self.qpp2y = 1.0 / 2 / np.pi * np.cumsum(-k2l * self.ddx * bety)
self.qpp3x = 1.0 / 4 / np.pi * np.cumsum(-k1l * self.Bx)
self.qpp3y = 1.0 / 4 / np.pi * np.cumsum(k1l * self.By)
self.qpp4x = 1.0 / 4 / np.pi * np.cumsum(k2l * dx * self.Bx)
self.qpp4y = 1.0 / 4 / np.pi * np.cumsum(-k2l * dx * self.By)
self.qppx = self.qpp1x + self.qpp2x + self.qpp3x + self.qpp4x
self.qppy = self.qpp1y + self.qpp2y + self.qpp3y + self.qpp4y
qp1x = self.qp1x[-1]
qp2x = self.qp2x[-1]
qpx = self.qpx[-1]
qpp1x = self.qpp1x[-1]
qpp2x = self.qpp2x[-1]
qpp3x = self.qpp3x[-1]
qpp4x = self.qpp4x[-1]
qppx = self.qppx[-1]
qp1y = self.qp1y[-1]
qp2y = self.qp2y[-1]
qpy = self.qpy[-1]
qpp1y = self.qpp1y[-1]
qpp2y = self.qpp2y[-1]
qpp3y = self.qpp3y[-1]
qpp4y = self.qpp4y[-1]
qppy = self.qppy[-1]
print("Qx' = %10g%+10g = %10g" % (qp1x, qp2x, qpx))
print("Qy' = %10g%+10g = %10g" % (qp1y, qp2y, qpy))
print("Qx''= %10g%+10g%+10g%+10g = %10g" % (qpp1x, qpp2x, qpp3x, qpp4x, qppx))
print("Qy''= %10g%+10g%+10g%+10g = %10g" % (qpp1y, qpp2y, qpp3y, qpp4y, qppy))
self.qp1x = qp1x
self.qp2x = qp2x
self.qpx = qpx
self.qpp1x = qpp1x
self.qpp2x = qpp2x
self.qpp3x = qpp3x
self.qpp4x = qpp4x
self.qppx = qppx
self.qp1y = qp1y
self.qp2y = qp2y
self.qpy = qpy
self.qpp1y = qpp1y
self.qpp2y = qpp2y
self.qpp3y = qpp3y
self.qpp4y = qpp4y
self.qppy = qppy
return self
def interp(self, snew, namenew=None, sname="s"):
"Interpolate with piecewise linear all columns using a new s coordinate"
for cname in self.col_names():
if cname != sname and np.isreal(self[cname][0]):
self[cname] = np.interp(snew, self[sname], self[cname])
self[sname] = snew
self.name = namenew
def _first_idx(self, name):
if type(name) is str:
name = np.where(self.name == name)[0][0]
return name
def _iter_columns(self):
ln = len(self.name)
for k, v in list(self._data.items()):
if hasattr(v, "__len__") and len(v) == ln:
yield k,v
def cycle(self, name, reorder=True):
idx = self._first_idx(name)
for vn in ["s", "mux", "muy", "phix", "phiy"]:
if vn in self:
v = self[vn]
vm = v[-1]
v -= v[idx]
if reorder:
v[:idx] += vm
if reorder:
for vn,v in self._iter_columns():
v = self[vn]
self[vn] = np.concatenate([v[idx:], v[:idx]])
if hasattr(self, "ap"):
self.ap.cycle(idx, reorder=reorder)
return self
def center(self, ref):
idx = np.where(self // ref)[0][0]
if self.header["type"] in ["TWISS", "APERTURE"]:
for vn in ["s", "mux", "muy", "phix", "phiy"]:
if vn in self:
v = self[vn]
v -= v[idx]
elif self.header["type"] == "SURVEY":
theta0 = self.theta[idx]
c0 = np.cos(theta0)
s0 = np.sin(theta0)
x0 = self.x[idx]
y0 = self.y[idx]
z0 = self.z[idx]
xx = self.x - x0
yy = self.y - y0
zz = self.z - z0
xxx = xx * c0 - zz * s0
zzz = xx * s0 + zz * c0
self.x = xxx
self.z = zzz
self.s -= self.s[idx]
return self
def select(self, a, b, shift=True):
a = self._first_idx(a)
b = self._first_idx(b)
data = {}
ln = len(self.name)
for k, v in list(self._data.items()):
if hasattr(v, "__len__") and len(v) == ln:
vv = v[a : b + 1]
elif hasattr(v, "copy"):
vv = v.copy()
# elif hasattr(v,'__getitem__'):
# vv=v[:]
else:
vv = v
data[k] = vv
if shift:
for vn in ["s", "mux", "muy", "phix", "phiy"]:
if vn in data:
data[vn] -= data[vn][0]
return optics(data)
def append(self, t):
data = {}
for k, v in list(self._data.items()):
if k in self.col_names():
data[k] = np.concatenate([v, t[k]])
else:
data[k] = v
return optics(data)
def resize(self, nn):
data = {}
for k, v in list(self._data.items()):
if k in self.col_names():
data[k] = np.zeros(nn, dtype=v.dtype)
else:
data[k] = v
return optics(data)
def errors_add(self, error_table):
"""Add error columns"""
klist = []
for k, val in list(error_table.items()):
if k.startswith("k") and sum(abs(val)) > 0:
klist.append([k, val])
self[k] = self.get(k, np.zeros(len(self.name)))
for idxerror, name in enumerate(error_table["name"]):
idxself = np.where(self.name == name)[0]
for k, val in klist:
self[k][idxself] += val[idxerror]
return self
def drvterm(t, m=0, n=0, p=0, q=0):
dv = t.betx ** (abs(m) / 2.0) * t.bety ** (abs(n) / 2.0)
dv = dv * np.exp(+2j * np.pi * ((m - 2 * p) * t.mux + (n - 2 * q) * t.muy))
return dv
def errors_kvector(self, i, maxorder=10):
rng = list(range(maxorder))
kn, ks = [], []
for n in rng:
kname = "k%dl" % n
if kname in self:
kn.append(self[kname][i])
else:
kn.append(0.0)
kname = "k%dsl" % n
if kname in self:
ks.append(self[kname][i])
else:
ks.append(0.0)
return kn, ks
def errors_ktob(self, maxorder=6):
nelem = len(self.name)
rng = list(range(maxorder))
xx = self.x
yy = self.y
for n in rng:
self["b%d" % (n + 1)] = np.zeros(nelem)
self["a%d" % (n + 1)] = np.zeros(nelem)
for i in range(nelem):
kn, ks = self.errors_kvector(i, maxorder)
x = xx[i]
y = yy[i]
cn = k2b(kn, ks, x, y)
for ib, b in enumerate(cn):
self["b%d" % (ib + 1)][i] = b.real
self["a%d" % (ib + 1)][i] = b.imag
return self
def errors_detuning(self, ex, ey, xs, ys, order):
nelem = len(self.name)
Jx = ex / 2 * xs**2
Jy = ey / 2 * ys**2
Dq = [Dq2, Dq4, Dq6, Dq8, Dq10, Dq12, Dq14, Dq16][order / 2 - 1]
betx = self.betx
bety = self.bety
bnn = self["b%d" % order]
dqxx = np.zeros(nelem)
dqyy = np.zeros(nelem)
for i in range(nelem):
Bx = betx[i]
By = bety[i]
bn = bnn[i]
dqx, dqy = Dq(bn, Bx, Jx, By, Jy)
dqxx[i] = dqx
dqyy[i] = dqy
# print bn,Bx,Jx,By,Jy,dqx,dqx
self["DQx%d" % order] = dqxx
self["DQy%d" % order] = dqyy
return self
def errors_footprint(
self,
ex=3.75e-6 / 450 * 0.938,
ey=3.75e-6 / 450 * 0.938,
nsigma=12,
nangles=7,
orders=[4, 6],
wp=(0.28, 0.31),
label="footprint",
):
x, y = mk_grid(nsigma, nangles)
tunx = []
tuny = []
for order in orders:
if order % 2 == 1 or order < 2 or order > 8:
print("Order supported are 2,4,6,8,10,12,14,16")
for xs, ys in zip(x, y):
dqx = wp[0]
dqy = wp[1]
for order in orders:
self.errors_detuning(ex, ey, xs, ys, order)
dqx += sum(self["DQx%d" % order])
dqy += sum(self["DQy%d" % order])
tunx.append(dqx)
tuny.append(dqy)
return Footprint(x, y, tunx, tuny, nsigma, nangles)
def set_xaxis_ip(self):
idx = self // "IP.$"
sl = self.s[idx]
ns = self.name[idx]
ax = pl.gca()
ax.set_xticks(sl)
ax.set_xticklabels(ns)
def idx_from_namelist(self, namelist):
iilist = 0
currname = namelist[iilist]
out = []
for ii, name in enumerate(self.name):
if name == currname:
iilist += 1
if iilist < len(namelist):
currname = namelist[iilist]
out.append(ii)
return out
def cox(self, elem):
el = np.where(self // elem)[0][0]
mu0 = self.mux[el]
bet0 = self.betx[el]
pq0 = np.pi * self.header["q1"]
return (
0.5
/ np.sin(pq0)
* np.sqrt(bet0 * self.betx)
* np.cos(2 * np.pi * abs(mu0 - self.mux) - pq0)
)
def coy(self, elem):
el = np.where(self // elem)[0][0]
mu0 = self.muy[el]
bet0 = self.bety[el]
pq0 = np.pi * self.header["q2"]
return (
0.5
/ np.sin(pq0)
* np.sqrt(bet0 * self.bety)
* np.cos(2 * np.pi * abs(mu0 - self.muy) - pq0)
)
def get_rotmat(self, i):
return rot_mad(self.theta[i], self.phi[i], self.psi[i])
def get_pos(self, i):
return np.array([self.x[i], self.y[i], self.z[i]])
def _mylbl(d, x):
return d.get(x, r"$%s$" % x)
class qdplot(object):
lglabel = {
"betx": r"$\beta_x$",
"bety": r"$\beta_y$",
"dx": r"$D_x$",
"dy": r"$D_y$",
"mux": r"$\mu_x$",
"muy": r"$\mu_y$",
"Ax": "$A_x$",
"Ay": "$A_y$",
"Bx": "$B_x$",
"By": "$B_y$",
"wx": "$w_x$",
"wy": "$w_y$",
"sigx": r"$\sigma_x=\sqrt{\beta_x \epsilon}$",
"sigy": r"$\sigma_y=\sqrt{\beta_y \epsilon}$",
"sigdx": r"$\sigma_{D_x}=D_x \delta$",
"n1": r"Aperture [$\sigma$]",
}
axlabel = {
"s": r"$s [m]$",
"ss": r"$s [m]$",
"betx": r"$\beta [m]$",
"bety": r"$\beta [m]$",
"mux": r"$\mu/(2 \pi)$",
"muy": r"$\mu/(2 \pi)$",
"dx": r"$D [m]$",
"dy": r"$D [m]$",
"x": r"$co [m]$",
"y": r"$co [m]$",
"sigx": r"$\sigma$ [mm]",
"sigy": r"$\sigma$ [mm]",
"sigdx": r"$\sigma$ [mm]",
"n1": r"Aperture [$\sigma$]",
}
autoupdate = []
def ani_autoupdate(self):
from matplotlib.animation import FuncAnimation
self._ani = FuncAnimation(self.figure, self.update, blit=False, interval=1000)
def ani_stopupdate(self):
del self._ani
@classmethod
def on_updated(cls, fun):
cls.on_update = fun
def __init__(
self,
t,
x="",
yl="",
yr="",
idx=slice(None),
clist="k r b g c m",
lattice=None,
newfig=True,
pre=None,
):
yl, yr, clist = list(map(str.split, (yl, yr, clist)))
# timeit('Init',True)
self.color = {}
self.left = None
self.right = None
self.lattice = None
self.pre = None
self.t, self.x, self.yl, self.yr, self.idx, self.clist = (
t,
x,
yl,
yr,
idx,
clist,
)
for i in self.yl + self.yr:
self.color[i] = self.clist.pop(0)
self.clist.append(self.color[i])
if newfig is True:
self.figure = pl.figure()
elif newfig is False:
self.figure = pl.gcf()
self.figure.clf()
else:
self.figure = newfig
self.figure.clf()
if lattice:
self.lattice = self._new_axes()
# self.lattice.set_autoscale_on(False)
self.lattice.yaxis.set_visible(False)
if yl:
self.left = self._new_axes()
# self.left.set_autoscale_on(False)
self.left.yaxis.set_label_position("left")
self.left.yaxis.set_ticks_position("left")
if yr:
self.right = self._new_axes()
# self.right.set_autoscale_on(False)
self.right.yaxis.set_label_position("right")
self.right.yaxis.set_ticks_position("right")
# timeit('Setup')
self.run()
if lattice is not None:
self.lattice.set_autoscale_on(False)
if yl:
self.left.set_autoscale_on(False)
if yr:
self.right.set_autoscale_on(False)
# timeit('Update')
def _new_axes(self):
if self.figure.axes:
ax = self.figure.axes[-1]
out = self.figure.add_axes(ax.get_position(), sharex=ax, frameon=False)
else:
# adjust plot dimensions
out = self.figure.add_axes([0.17, 0.12, 0.6, 0.8])
return out
def __repr__(self):
return object.__repr__(self)
def _trig(self):
print("optics trig")
self.run()
def update(self, *args):
if hasattr(self.t, "reload"):
if self.t.reload():
self.run()
return self
return False
# def _wx_callback(self,*args):
# self.update()
# wx.WakeUpIdle()
#
# def autoupdate(self):
# if pl.rcParams['backend']=='WXAgg':
# wx.EVT_IDLE.Bind(wx.GetApp(),wx.ID_ANY,wx.ID_ANY,self._wx_callback)
# return self
#
# def stop_update(self):
# if pl.rcParams['backend']=='WXAgg':
# wx.EVT_IDLE.Unbind(wx.GetApp(),wx.ID_ANY,wx.ID_ANY,self._callback)
#
# def __del__(self):
# if hasattr(self,'_callback'):
# self.stop_update()
def run(self):
# print 'optics run'
self.ont = self.t
self.xaxis = getattr(self.ont, self.x)
is_ion = pl.isinteractive()
pl.interactive(False)
self.lines = []
self.legends = []
# self.figure.lines=[]
# self.figure.patches=[]
# self.figure.texts=[]
# self.figure.images = []
self.figure.legends = []
if self.lattice:
self.lattice.clear()
self._lattice(["k0l", "kn0l", "angle"], "#a0ffa0", "Bend h")
self._lattice(["ks0l"], "#ffa0a0", "Bend v")
self._lattice(["kn1l", "k1l"], "#a0a0ff", "Quad")
self._lattice(["hkick"], "#e0a0e0", "Kick h")
self._lattice(["vkick"], "#a0e0e0", "Kick v")
self._lattice(["kn2l", "k2l"], "#e0e0a0", "Sext")
if self.left:
self.left.clear()
for i in self.yl:
self._column(i, self.left, self.color[i])
if self.right:
self.right.clear()
for i in self.yr:
self._column(i, self.right, self.color[i])
ca = self.figure.gca()
ca.set_xlabel(_mylbl(self.axlabel, self.x))
ca.set_xlim(min(self.xaxis[self.idx]), max(self.xaxis[self.idx]))
self.figure.legend(self.lines, self.legends, "upper right")
ca.grid(True)
# self.figure.canvas.mpl_connect('button_release_event',self.button_press)
self.figure.canvas.mpl_connect("pick_event", self.pick)
pl.interactive(is_ion)
self.figure.canvas.draw()
if hasattr(self, "on_run"):
self.on_run(self)
def pick(self, event):
pos = np.array([event.mouseevent.x, event.mouseevent.y])
name = event.artist.elemname
prop = event.artist.elemprop
value = event.artist.elemvalue
print("\n %s.%s=%s" % (name, prop, value), end=" ")
# def button_press(self,mouseevent):
# rel=np.array([mouseevent.x,mouseevent.y])
# dx,dy=self.pickpos/rel
# print 'release'
# self.t[self.pickname][self.pickprop]*=dy
# self.t.track()
# self.update()
def _lattice(self, names, color, lbl):
# timeit('start lattice %s' % names,1)
vd = 0
sp = self.lattice
s = self.ont.s
l = self.ont.l
for i in names:
myvd = self.ont.__dict__.get(i, None)
if myvd is not None:
vdname = i
vd = myvd[self.idx] + vd
if np.any(vd != 0):
m = np.abs(vd).max()
if m > 1e-10:
c = np.where(abs(vd) > m * 1e-4)[0]
if len(c) > 0:
if np.all(l[c] > 0):
vd[c] = vd[c] / l[c]
m = abs(vd[c]).max()
vd[c] /= m
if self.ont._is_s_begin:
plt = self.lattice.bar(
s[c] + l[c] / 2, vd[c], l[c], picker=True
) # changed
else:
plt = self.lattice.bar(
s[c] - l[c] / 2, vd[c], l[c], picker=True
) # changed
pl.setp(plt, facecolor=color, edgecolor=color)
if plt:
self.lines.append(plt[0])
self.legends.append(lbl)
row_names = self.ont.name
for r, i in zip(plt, c):
r.elemname = row_names[i]
r.elemprop = vdname
r.elemvalue = getattr(self.ont, vdname)[i]
self.lattice.set_ylim(-1.5, 1.5)
# timeit('end lattice')
def _column(self, name, sp, color):
fig, s = self.figure, self.xaxis
y = self.ont(name)[self.idx]
(bxp,) = sp.plot(s, y, color, label=_mylbl(self.lglabel, name))
sp.set_ylabel(_mylbl(self.axlabel, name))
self.lines.append(bxp)
self.legends.append(_mylbl(self.lglabel, name))
sp.autoscale_view()
def savefig(self, name):
self.figure.savefig(name)
return self
class Footprint(object):
# class Footprint(ObjDebug):
def __init__(self, x, y, tunx, tuny, nsigma, nangles, label="detuning"):
self.nsigma = nsigma
self.nangles = nangles
self.x = np.array(x)
self.y = np.array(y)
self.tunx = np.array(tunx)
self.tuny = np.array(tuny)
self.label = label.replace("_", " ")
def plot_grid(self, nsigma=None, lw=1):
if nsigma is None:
nsigma = self.nsigma
nangles = self.nangles
ranges = self.mkranges(nsigma)
for i in ranges:
if hasattr(i, "step"):
lw = i.step == nangles and i.start / 2.0 or 1
pl.plot(self.x[i], self.y[i], "-k", lw=lw)
def mkranges(self, nsigma=None):
if nsigma is None:
nsigma = self.nsigma
nangles = self.nangles
ranges = []
for i in range(nangles):
ranges.append([0, i])
for i in range(nangles):
ranges.append(slice(1 + i, nangles * nsigma + 1, nangles))
for i in range(nsigma):
ranges.append(slice(1 + nangles * i, 1 + nangles * (i + 1)))
return ranges
def plot_footprint(
t, nsigma=None, wp=(0.28, 0.31), spread=0.01, label=None, color=None
):
ranges = t.mkranges(nsigma)
lw = 1
out = []
lbl = True
if label is None:
label = t.label
if color is None:
color = "k"
for i in ranges:
if lbl:
p = pl.plot(t.tunx[i], t.tuny[i], "-%s" % color, lw=lw, label=label)
lbl = False
else:
p = pl.plot(t.tunx[i], t.tuny[i], "-%s" % color, lw=lw)
out.append(p[0])
pl.ylabel("$Q_y$")
pl.xlabel("$Q_x$")
pl.grid(True)
qx, qy = wp
pl.xlim(qx - spread, qx + spread)
pl.ylim(qy - spread, qy + spread)
return out
def triangulate(t):
tr = matplotlib.delaunay.triangulate.Triangulation(t.tunx, t.tuny)
for i in tr.triangle_nodes:
pl.plot(t.tunx[i], t.tuny[i])
def reshape(self):
"""return tunes in [sigma,angles]"""
qx = self.tunx[1:].reshape(self.nsigma, self.nangles)
qy = self.tuny[1:].reshape(self.nsigma, self.nangles)
return qx, qy
class FootTrack(Footprint):
def __init__(self, dynapfn, nangles=7, nsigma=12, label="dynap"):
self.label = label.replace("_", " ")
t = tfsdata.open(dynapfn)
self.tunx = t["tunx"]
self.tuny = t["tuny"]
self.tx = t["x"]
self.ty = t["y"]
self.nangles = nangles
self.nsigma = nsigma
# self.t=t
def mk_grid(nsigma, nangles):
small = 0.05
big = np.sqrt(1.0 - small**2)
n = 1
m = 0
# sigma angle multiplier
x = [small]
y = [small]
while n <= nsigma:
angle = 90.0 / (nangles - 1) * m * np.pi / 180
if m == 0:
xs = n * big
ys = n * small
elif m == nangles - 1:
xs = n * small
ys = n * big
else:
xs = n * np.cos(angle)
ys = n * np.sin(angle)
m = m + 1
if m == nangles:
m = 0
n = n + 1
x.append(xs)
y.append(ys)
return np.array(x), np.array(y)
def Dq2(b2, Bx, Jx, By, Jy):
# b2=k1l
Dqx = b2 * Bx / (4.0 * np.pi)
Dqy = -b2 * By / (4.0 * np.pi)
return Dqx, Dqy
def Dq4(b4, Bx, Jx, By, Jy):
# b4=k3l/6.
Dqx = b4 * 3 * Bx * (Bx * Jx - 2 * By * Jy) / (8.0 * np.pi)
Dqy = b4 * 3 * By * (By * Jy - 2 * Bx * Jx) / (8.0 * np.pi)
return Dqx, Dqy
def Dq6(b6, Bx, Jx, By, Jy):
# b6=k5l/120.
Dqx = (
b6
* 5
* Bx
* (Bx**2 * Jx**2 - 6 * Bx * By * Jx * Jy + 3 * By**2 * Jy**2)
/ (8.0 * np.pi)
)
Dqy = (
-b6
* 5
* By
* (3 * Bx**2 * Jx**2 - 6 * Bx * By * Jx * Jy + By**2 * Jy**2)
/ (8.0 * np.pi)
)
return Dqx, Dqy
def Dq8(b8, Bx, Jx, By, Jy):
# b8=k7l/5040.
Dqx = (
b8
* 35
* Bx
* (
Bx**3 * Jx**3
- 12 * Bx**2 * By * Jx**2 * Jy
+ 18 * Bx * By**2 * Jx * Jy**2
- 4 * By**3 * Jy**3
)
/ (32.0 * np.pi)
)
Dqy = (
b8
* 35
* By
* (
-4 * Bx**3 * Jx**3
+ 18 * Bx**2 * By * Jx**2 * Jy
- 12 * Bx * By**2 * Jx * Jy**2
+ By**3 * Jy**3
)
/ (32.0 * np.pi)
)
return Dqx, Dqy
def Dq10(b10, Bx, Jx, By, Jy):
# b10=k9l/362880.
Dqx = (
b10
* 63
* Bx
* (
Bx**4 * Jx**4
- 20 * Bx**3 * By * Jx**3 * Jy
+ 60 * Bx**2 * By**2 * Jx**2 * Jy**2
- 40 * Bx * By**3 * Jx * Jy**3
+ 5 * By**4 * Jy**4
)
/ (32.0 * np.pi)
)
Dqy = (
-b10
* 63
* By
* (
5 * Bx**4 * Jx**4
- 40 * Bx**3 * By * Jx**3 * Jy
+ 60 * Bx**2 * By**2 * Jx**2 * Jy**2
- 20 * Bx * By**3 * Jx * Jy**3
+ By**4 * Jy**4
)
/ (32.0 * np.pi)
)
return Dqx, Dqy
def Dq12(b12, Bx, Jx, By, Jy):
# b12=k11l/39916800.
Dqx = (
b12
* 231
* Bx
* (
Bx**5 * Jx**5
- 30 * Bx**4 * By * Jx**4 * Jy
+ 150 * Bx**3 * By**2 * Jx**3 * Jy**2
- 200 * Bx**2 * By**3 * Jx**2 * Jy**3
+ 75 * Bx * By**4 * Jx * Jy**4
- 6 * By**5 * Jy**5
)
/ (64.0 * np.pi)
)
Dqy = (
b12
* 231
* By
* (
-6 * Bx**5 * Jx**5
+ 75 * Bx**4 * By * Jx**4 * Jy
- 200 * Bx**3 * By**2 * Jx**3 * Jy**2
+ 150 * Bx**2 * By**3 * Jx**2 * Jy**3
- 30 * Bx * By**4 * Jx * Jy**4
+ By**5 * Jy**5
)
/ (64.0 * np.pi)
)
return Dqx, Dqy
def Dq14(b14, Bx, Jx, By, Jy):
# b14=k13l/6227020800.
Dqx = (
b14
* 429
* Bx
* (
Bx**6 * Jx**6
- 42 * Bx**5 * By * Jx**5 * Jy
+ 315 * Bx**4 * By**2 * Jx**4 * Jy**2
- 700 * Bx**3 * By**3 * Jx**3 * Jy**3
+ 525 * Bx**2 * By**4 * Jx**2 * Jy**4
- 126 * Bx * By**5 * Jx * Jy**5
+ 7 * By**6 * Jy**6
)
/ (64.0 * np.pi)
)
Dqy = (
-b14
* 429
* By
* (
7 * Bx**6 * Jx**6
- 126 * Bx**5 * By * Jx**5 * Jy
+ 525 * Bx**4 * By**2 * Jx**4 * Jy**2
- 700 * Bx**3 * By**3 * Jx**3 * Jy**3
+ 315 * Bx**2 * By**4 * Jx**2 * Jy**4
- 42 * Bx * By**5 * Jx * Jy**5
+ By**6 * Jy**6
)
/ (64.0 * np.pi)
)
return Dqx, Dqy
def Dq16(b16, Bx, Jx, By, Jy):
# b16=k15l/1307674368000.
Dqx = (
b16
* 6435
* Bx
* (
Bx**7 * Jx**7
- 56 * Bx**6 * By * Jx**6 * Jy
+ 588 * Bx**5 * By**2 * Jx**5 * Jy**2
- 1960 * Bx**4 * By**3 * Jx**4 * Jy**3
+ 2450 * Bx**3 * By**4 * Jx**3 * Jy**4
- 1176 * Bx**2 * By**5 * Jx**2 * Jy**5
+ 196 * Bx * By**6 * Jx * Jy**6
- 8 * By**7 * Jy**7
)
/ (512.0 * np.pi)
)
Dqy = (
b16
* 6435
* By
* (
-8 * Bx**7 * Jx**7
+ 196 * Bx**6 * By * Jx**6 * Jy
- 1176 * Bx**5 * By**2 * Jx**5 * Jy**2
+ 2450 * Bx**4 * By**3 * Jx**4 * Jy**3
- 1960 * Bx**3 * By**4 * Jx**3 * Jy**4
+ 588 * Bx**2 * By**5 * Jx**2 * Jy**5
- 56 * Bx * By**6 * Jx * Jy**6
+ By**7 * Jy**7
)
/ (512.0 * np.pi)
)
return Dqx, Dqy
def nchoosek(n, k):
bc = [1 for i in range(0, k + 1)]
for j in range(1, n - k + 1):
for i in range(1, k + 1):
bc[i] = bc[i - 1] + bc[i]
return bc[k]
def factorial(n):
fact = 1
for x in range(1, n + 1):
fact *= x
return fact
def FeedDown(bn, an, x, y, i):
cn = [b + 1j * a for b, a in zip(bn, an)]
z = x + 1j * y
n = len(cn)
fd = sum([nchoosek(k, i) * cn[k - 1] * z ** (k - i) for k in range(i, n + 1)])
return fd.real, fd.imag
def k2b(kn, ks, x=0, y=0):
bn = [k / float(factorial(n)) for n, k in enumerate(kn)]
an = [k / float(factorial(n)) for n, k in enumerate(ks)]
z = complex(x, y)
cn = [complex(b, a) for b, a in zip(bn, an)]
n = len(cn)
zn = [z**k for k in range(n)]
fn = [
sum([nchoosek(k, i) * cn[k - 1] * z ** (k - i) for k in range(i, n + 1)])
for i in range(1, n + 1)
]
return fn
def twiss2map(bet1, alf1, bet2, alf2, mu):
b1b2 = np.sqrt(bet1 * bet2)
b1onb2 = np.sqrt(bet1 / bet2)
c = np.cos(2 * np.pi * mu)
s = np.sin(2 * np.pi * mu)
r11 = (c + alf1 * s) / b1onb2
r12 = b1b2 * s
r21 = ((alf1 - alf2) * c - (1 + alf1 * alf2) * s) / b1b2
r22 = b1onb2 * (c - alf2 * s)
return [[r11, r12], [r21, r22]]
mycolors = list("rcgmb")
|
998,883 | 408c5867835dc29639a7c3a177a1e12bbd275bf1 | #!/usr/bin/env python
import queue
q = queue.PriorityQueue()
alist = [5, 2, -2, 7, 0, 1, 4] # -2 0 1 2 4 5 7
size = 3
for item in alist:
q.put(item)
if q.qsize() == size:
break
# now go over remaining items and check if q min is < next element
start = size
while(start < len(alist)):
top = q.get()
if top < alist[start]:
q.put(alist[start])
else:
q.put(top)
start += 1
print(q.get())
|
998,884 | 27b3e3859dd92bc246ee0de747c996ee8adbebb6 | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Optional
import torch
from merlin_standard_lib import Schema
from merlin_standard_lib.utils.doc_utils import docstring_parameter
from ..tabular.base import (
TABULAR_MODULE_PARAMS_DOCSTRING,
FilterFeatures,
TabularAggregationType,
TabularTransformationType,
)
from .base import InputBlock
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING)
class ContinuousFeatures(InputBlock):
"""Input block for continuous features.
Parameters
----------
features: List[str]
List of continuous features to include in this module.
{tabular_module_parameters}
"""
def __init__(
self,
features: List[str],
pre: Optional[TabularTransformationType] = None,
post: Optional[TabularTransformationType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
**kwargs
):
super().__init__(aggregation=aggregation, pre=pre, post=post, schema=schema)
self.filter_features = FilterFeatures(features)
@classmethod
def from_features(cls, features, **kwargs):
return cls(features, **kwargs)
def forward(self, inputs, **kwargs):
cont_features = self.filter_features(inputs)
cont_features = {k: v.unsqueeze(-1) for k, v in cont_features.items()}
return cont_features
def forward_output_size(self, input_sizes):
cont_features_sizes = self.filter_features.forward_output_size(input_sizes)
cont_features_sizes = {k: torch.Size(list(v) + [1]) for k, v in cont_features_sizes.items()}
return cont_features_sizes
|
998,885 | 76b6c25f6c0ca6754da728ef50831a839b00385d | from single_layer import single_neutron
import numpy as np
import logging
logger = logging.getLogger(__name__)
class NeuralNetwork:
"""
Implement a Neural network of given required shape.
It implements both forward propogation and Backward propogation.
"""
def __init__(self, number_of_layers_neuron_dictionary):
self.number_of_layers = len(number_of_layers_neuron_dictionary)
def forward_pass(self):
pass
def backward_pass(self):
pass
|
998,886 | 53287824244fc056bdbf92c21618673cf3685f4c | # Guess that number game
import sys
from random import randint
count=0
ranNum = randint(1,20)
list=['Four','Three','Two','One','Zero']
print('******* Guess a number between 1 and 20. You have 5 tries *****\n\n')
while count < 5:
guess = int(input('Guess a number: '))
count=count+1
if guess<ranNum:
print('\t\t**** Too low! ',end='')
elif guess>ranNum:
print('\t\t**** Too high! ',end='')
elif guess == ranNum:
print('\nYou got it! You solved it in %s %s.'
%(str(count),'try' if count==1 else 'tries'))
sys.exit()
print('%s tries left. ****' %list[count-1])
print('Game over.')
|
998,887 | 45f28232068e75a52044aa7dac9002a10a977f35 | #
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains high-level API functions.
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.ir.graph import Graph
def import_onnx(onnx_model: "onnx.ModelProto") -> Graph:
"""
Import an onnx-graphsurgeon Graph from the provided ONNX model.
Args:
onnx_model (onnx.ModelProto): The ONNX model.
Returns:
Graph: A corresponding onnx-graphsurgeon Graph.
"""
from onnx_graphsurgeon.importers.onnx_importer import OnnxImporter
return OnnxImporter.import_graph(onnx_model.graph, opset=OnnxImporter.get_opset(onnx_model))
def export_onnx(graph: Graph, do_type_check=True, **kwargs) -> "onnx.ModelProto":
"""
Exports an onnx-graphsurgeon Graph to an ONNX model.
Args:
graph (Graph): The graph to export
Optional Args:
do_type_check (bool): Whether to check that input and output tensors have data types defined, and fail if not.
**kwargs: Additional arguments to onnx.helper.make_model
Returns:
onnx.ModelProto: A corresponding ONNX model.
"""
from onnx_graphsurgeon.exporters.onnx_exporter import OnnxExporter
import onnx
onnx_graph = OnnxExporter.export_graph(graph, do_type_check=do_type_check)
if "opset_imports" not in kwargs:
kwargs["opset_imports"] = [onnx.helper.make_opsetid("", graph.opset)]
return onnx.helper.make_model(onnx_graph, **kwargs)
|
998,888 | 74c0989ac0a0779fb83f6326ce977d45a09e8fc1 | import sys
import numpy as np
from sklearn import linear_model # 回归模型
import matplotlib.pyplot as plt # 可视化
import sklearn.metrics as sm # 预测评估
# prepare data for y=f(x)
filename = sys.argv[1]
X = []
Y = []
with open(filename,'r') as f:
for line in f.readlines():
xt,yt = [float(i) for i in line.split(',')]
X.append(xt)
Y.append(yt)
# seperate dataset to training and testing
num_training = int(0.8 * len(X))
num_test = len(X) - num_training
# training dataset
X_train = np.array(X[:num_training]).reshape((num_training,1))
Y_train = np.array(Y[:num_training])
# testing dataset
X_test = np.array(X[num_training:]).reshape((num_test,1))
Y_test = np.array(Y[num_training:])
''' 1.1
# training the model : linear regressor
# create linear regressor object
linear_regressor = linear_model.LinearRegression()
linear_regressor.fit(X_train,Y_train)
# 可视化训练数据
y_train_pred = linear_regressor.predict(X_train)
plt.figure()
plt.scatter(X_train,Y_train,color='green')
plt.plot(X_train,y_train_pred,color='red',linewidth=4)
plt.title("Training Data")
plt.show()
# 可视化测试数据
y_test_pred = linear_regressor.predict(X_test)
plt.scatter(X_test,Y_test,color='green')
plt.plot(X_test,y_test_pred,color='yellow',linewidth=4)
plt.title('Test data')
plt.show()
# 预测评估:平均误差,平均方差,误差中值,解释性方差评分,R2评分
# 一个好的模型表现:平均方差低,而解释性方差评分高
print("\nMean absolute error =",round(sm.mean_absolute_error(Y_test,y_test_pred),2))
print("\nMean squared error =",round(sm.mean_squared_error(Y_test,y_test_pred),2))
print("\nMedian absolute error =",round(sm.median_absolute_error(Y_test,y_test_pred),2))
print("\nExplained variance score =",round(sm.explained_variance_score(Y_test,y_test_pred),2))
print("\nR2 score =",round(sm.r2_score(Y_test,y_test_pred),2))
'''
''' 1.2
# 模型持久化归档
import pickle
achive_model_file = 'saved_model.pkl'
with open('saved_model.pkl','wb') as f:
pickle.dump(linear_regressor,f)
with open('saved_model.pkl','rb') as f:
model_linear=pickle.load(f)
y_test_pred_new = model_linear.predict(X_test)
print("\nNew mean absolute error =",round(sm.mean_absolute_error(Y_test,y_test_pred_new),2))
'''
''' 1.3
# Ridge Regressor
ridge_regressor = linear_model.Ridge(alpha=0.01,fit_intercept=True,max_iter=10000)
ridge_regressor.fit(X_train,Y_train)
y_test_pred_ridge = ridge_regressor.predict(X_test)
plt.figure()
plt.scatter(X_test,Y_test,color='black')
plt.plot(X_test,y_test_pred_ridge,color='red',linewidth=4)
plt.title("Ridge Regressor")
plt.show()
print("\nMean absolute error =",round(sm.mean_absolute_error(Y_test,y_test_pred_ridge),2))
print("\nMean squared error =",round(sm.mean_squared_error(Y_test,y_test_pred_ridge),2))
print("\nMedian absolute error =",round(sm.median_absolute_error(Y_test,y_test_pred_ridge),2))
print("\nExplained variance score =",round(sm.explained_variance_score(Y_test,y_test_pred_ridge),2))
print("\nR2 score =",round(sm.r2_score(Y_test,y_test_pred_ridge),2))
''' |
998,889 | c28b937060b60583c8e90ed6f3b54c19280f3544 | lst = list()
print(type(lst))
for i in range(0,5):
a = "ok"
lst = lst.append(a)
print(lst)
|
998,890 | 1be66317fd59d0e7bdadaeb7339061494d85ceec | import json
import os
import urllib
from datetime import datetime
import re
from urllib.request import urlopen
import youtube_dl
from bs4 import BeautifulSoup
from gtts import gTTS
from newspaper import Article
gamingStreamIdList =["feed/http://feeds.abcnews.com/abcnews/topstories"]
# gamingStreamIdList =["feed/http://feeds.gawker.com/kotaku/vip"]
# "feed/http://www.gamespot.com/rss/game_updates.php",
# "feed/http://feeds.ign.com/ign/games-all",
# "feed/http://n4g.com/rss/news?channel=&sort=latest",
# "feed/http://www.polygon.com/rss/index.xml"
# "feed/http://feeds.feedburner.com/Techcrunch",
# "feed/http://www.engadget.com/rss-full.xml",
# "feed/http://feeds.wired.com/wired/index",
# "feed/http://feeds.mashable.com/Mashable",
# "feed/http://feeds.arstechnica.com/arstechnica/index/",
# "feed/http://sethgodin.typepad.com/seths_blog/atom.xml",
# "feed/http://feeds2.feedburner.com/businessinsider",
# "feed/http://feeds.feedburner.com/entrepreneur/latest",
# "feed/http://feeds.harvardbusiness.org/harvardbusiness/",
# "feed/http://feeds.feedburner.com/fastcompany/headlines",
# "feed/http://rss.cnn.com/rss/cnn_topstories.rss",
# "feed/http://www.nytimes.com/services/xml/rss/nyt/HomePage.xml",
# "feed/http://www.npr.org/rss/rss.php?id=1001",
# "feed/http://feeds.abcnews.com/abcnews/topstories",
# "feed/http://newsrss.bbc.co.uk/rss/sportonline_world_edition/front_page/rss.xml",
# "feed/http://www.skysports.com/rss/0,20514,11095,00.xml",
# "feed/http://www.skysports.com/rss/0,20514,11095,00.xml",
# "feed/http://newsrss.bbc.co.uk/rss/sportonline_uk_edition/football/rss.xml",
# "feed/http://www.nfl.com/rss/rsslanding?searchString=home",
# "feed/http://www.nba.com/rss/nba_rss.xml",
# "feed/http://rss.news.yahoo.com/rss/celebrity",
# "feed/http://rss.people.com/web/people/rss/topheadlines/index.xml",
# "feed/http://wonderwall.msn.com/rss/all.xml"]
ApiUrl = "http://cloud.feedly.com"
Mixes = "/v3/mixes/contents?"
# Request to get a mix of the best article for a stream, return a JSON data stream
def mixRequest(streamId,count,hours,backfill,locale):
Mixesrequest = ApiUrl + Mixes + 'streamId='+ streamId + '&' + 'count='+ count + '&' + 'hours='+ hours + '&' + 'backfill=' + backfill + '&' + 'locale='+locale
#Call Mixes request
MixResponse = urlopen(Mixesrequest)
content = MixResponse.read()
MixResponseText = content.decode('utf8')
MixesJsonData = json.loads(MixResponseText)
return MixResponseText
def containsYoutubeVideo(url):
ytubeUrls =[]
try:
html = urlopen(url)
except:
return youtubeUrls
soup = BeautifulSoup(html, "html.parser")
href_tags = soup.find_all('iframe')
for iframe in href_tags:
if ' src' in str(iframe):
if 'youtube' in str(iframe.attrs['src']):
youtubeUrls.append(str(iframe.attrs['src']))
if 'data-recommend-id' in str(iframe):
id = str(iframe.attrs['data-recommend-id']).replace('youtube://', '')
ytubeUrls.append('https://www.youtube.com/watch?v=' + id)
return ytubeUrls
urls=[]
print (gamingStreamIdList.__len__())
i=0
topimageurls=[]
for streamid in gamingStreamIdList:
print (i)
i+=1
command = 'http://cloud.feedly.com/v3/mixes/contents?streamId='+streamid+'&count=20&hours=2&backfill=1&locale=en'
snippetFilePath = 'polygon.json'
mixesJsonData = mixRequest(streamid, '50', '2', '1', 'en')
j=json.loads(mixesJsonData)
for item in j['items']:
if j.get('alternate'):
urls.append(item['alternate'][0]['href'])
if item.get('visual'):
topimageurls.append(item['visual']['url'])
# downloadpath = os.getcwd()+'/'+str(datetime.now().timestamp())
# os.makedirs(downloadpath)
# for topimageurl in topimageurls:
# topimageurlsplit = topimageurl.split('/')
# imagefilepath = downloadpath+ '/' + topimageurlsplit[topimageurlsplit.__len__()-1]
# try:
# urllib.request.urlretrieve(topimageurl,imagefilepath)
# except:
# continue
urlsWithYoutubeVideo=[]
youtubeUrls=[]
i=0
data = {}
data["title"] = ""
data["description"] = None
data["favicon"] = None
data["facebook"] = {}
data["twitter"] = {}
youtubeDownloader = youtube_dl.YoutubeDL({'nocheckcertificate': True, 'max-filesize' : '250m', 'f' : str(22).encode('utf-8')})
for url in urls:
i+=1
print(url)
html = urlopen(url)
soup = BeautifulSoup(html, "html.parser")
href_tags = soup.find_all('iframe')
print (url)
if soup.findAll('iframe', attrs={'data-recommend-id': re.compile("^youtube")}):
for tag in soup.findAll('iframe', attrs={'data-recommend-id': re.compile("^youtube")}):
if 'data-recommend-id' in tag.attrs:
id_youtube = str(tag['data-recommend-id']).replace('youtube://','')
print(' https://www.youtube.com/watch?v='+id_youtube)
try:
infos = youtubeDownloader.extract_info('https://www.youtube.com/watch?v=' + id_youtube,False)
except:
continue
print ('Video duration :' + str(infos['duration']/60) + ' min')
if infos['duration']<600:
try:
youtubeDownloader.extract_info('https://www.youtube.com/watch?v=' + id_youtube, True)
except:
continue
print ('https://www.youtube.com/watch?v='+id_youtube+' downloading')
if soup.findAll('iframe', attrs={'class': re.compile("^youtube")}):
for tag in soup.findAll('iframe', attrs={'class': re.compile("^youtube")}):
if 'src' in tag.attrs:
id_youtube = str(tag['src']).split('?')[0]
print(id_youtube)
if soup.findAll('video', attrs={'src': re.compile("^http")}):
for tag in soup.findAll('video', attrs={'src': re.compile("^http")}):
if 'src' in tag.attrs:
print(' ' + str(tag['src']))
# if soup.findAll('a', attrs={'href' : re.compile("^http")}):
# for tag in soup.findAll('a', attrs={'href' : re.compile("^http")}):
# print(str(tag['href']))
#
# if soup.findAll('div', attrs={'class': re.compile("^player")}):
# for tag in soup.findAll('div', attrs={'class': re.compile("^player")}):
# if 'data-config' in tag.attrs:
# print(' ' + str(tag['data-config']))
# if soup.findAll('meta', attrs={'name': re.compile("^twitter")}):
# for tag in soup.findAll('meta', attrs={'name': re.compile("^twitter")}):
# tag_type = tag['name']
# if 'content' in tag.attrs:
# data["twitter"][tag_type] = tag['content']
# if tag_type == "twitter:description" and data["description"] is None:
# data["description"] = tag["content"]
# # print(data)
#
# if soup.findAll('img', attrs={'src': re.compile("^http")}):
# for tag in soup.findAll('img', attrs={'class': re.compile("^Natural")}):
# if 'data_srcset' in tag.attrs:
# print(tag['data_srcset'])
#
# if soup.findAll('iframe', attrs={'id': re.compile("^twitter")}):
# for tag in soup.findAll('iframe', attrs={'id': re.compile("^twitter")}):
# if 'id' in tag.attrs:
# print(tag['id'])
#
# if soup.findAll('img', attrs={'src': re.compile("^")}):
# for tag in soup.findAll('img', attrs={'src': re.compile("^")}):
# if 'data-webm-src' in tag.attrs:
# print(tag['data-webm-src'])
# for iframe in href_tags:
# print(iframe)
# iframeChildren = iframe.findChildren()
# for iframechild in iframeChildren:
# print(iframechild)
# article = Article(url,keep_article_html=True)
# article.download()
# # print(article.html)
# article.parse()
# with open(str(i)+'.html','w') as article_html:
# article_html.write('<a href='+url+'> link</a>')
# article_html.write(article.html)
# # article_html.write(html)
# print('Title : '+ article.title)
#
# article.nlp()
# print('Keywords : ')
# for keyword in article.keywords :
# print(keyword)
# print('Text : ' + article.summary)
# print('Image links :')
#
# for imageurl in article.images:
# print (imageurl)
# print('Movie links :' )
# for videourl in article.movies :
# print (videourl)
# print('\n\n\n')
#
# for url in urls:
# tubeUrls = containsYoutubeVideo(url)
# if tubeUrls:
# print('url containing youtube video : '+url)
# urlsWithYoutubeVideo.append(url)
# youtubeUrls.append(tubeUrls)
#
# print (urlsWithYoutubeVideo)
|
998,891 | ee851f2ffaf21d781df8beb587a9e0343daf41ac | from agent import Agent
from planet import Planet
from rule import Rule
def main(args):
agents = [Agent(p=0.2) for _ in range(10)]
rule = Rule(planet=Planet(Resources=2000, N=10),
agents=agents, viz=args.viz)
for age in range(1, args.max_age):
rule.tick()
if age % args.print_freq == 0:
print('age {}, {} living!'.format(age, len(rule.agents)))
if len(rule.agents) == 0:
break
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Living Game')
parser.add_argument('--print-freq', default=10,
type=int, help='print frequency')
parser.add_argument('--viz', default=True,
help='Visualization game progress')
parser.add_argument('--max-age', default=10000,
type=int, metavar='N', help='max age')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
998,892 | 16581ef16eec8cc60d937c33969af4a4c51b3d48 | class Solution:
def containsNearbyDuplicate(self, nums, k):
dic={}
for index,n in enumerate(nums):
if n not in dic:
dic[n]=[]
dic[n].append(index)
for n in dic:
list=dic[n]
for i in range(len(list)-1):
if list[i+1]-list[i]<=k:
return True
return False
s=Solution()
test=[
{"input": [[1,2,3,1],3], "output":True},
{"input": [[1,0,1,1],1], "output":True},
]
for t in test:
r=s.containsNearbyDuplicate(t['input'][0],t['input'][1])
if r!=t['output']:
print("error:"+str(t)+" out:"+str(r))
r = s.containsNearbyDuplicate(t['input'][0], t['input'][1]) |
998,893 | aa2c20509dcaa5178b6a2bdbe7aaee468f1b45ca | from django.shortcuts import render
from rest_framework import generics, permissions
from user.models import Profile
from django.contrib.auth.models import User
from user.serializers import ProfileSerializer, UserSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'users': reverse('user-list', request=request, format=format),
'profiles': reverse('profile-list', request=request, format=format)
})
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class ProfileList(generics.ListCreateAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
# permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class ProfileDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
# permission_classes = [permissions.IsAuthenticatedOrReadOnly]
|
998,894 | 652fe7dbff05ebc3699f4827e4521fec27288a1d | from setuptools import setup, find_packages
setup(name='Test', packages=find_packages())
|
998,895 | 8fe12c843c2e0ac72dedbd28a5d770ac3d814712 | from django.db import models
from django.utils import timezone
# Create your models here.
class Sales(models.Model):
descuento = models.IntegerField(verbose_name="Descuento")
fecha_inicio = models.DateTimeField(verbose_name="Fecha de inicio", default=timezone.now)
fecha_fin = models.DateTimeField(verbose_name="Fecha fin", default=timezone.now)
class Meta:
verbose_name = "oferta"
verbose_name_plural = "ofertas"
ordering = ['id']
def __str__(self):
return str(self.descuento) + "%" |
998,896 | 5539c27a2727647d2f1d72841e4b6cc473c7a686 | import os
from os import path
for i in os.listdir('/tmp'):
if '-' in i : print i
print a
try :
st = os.stat(filepath)
except:
continue
print filepath
print d_size.setdefault(st.st_size, []).append(filepath) |
998,897 | a1c51b56b70e767be8b8ced737f0c0561727ee25 | # Due to some modifications of the Python interface in the latest Caffe version,
# in order to run the code in this file please replace
#
# if ms != self.inputs[in_][1:]:
# raise ValueError('Mean shape incompatible with input shape.')
#
# in caffe_root/python/caffe/io.py line 253-254, with
#
# if ms != self.inputs[in_][1:]:
# print(self.inputs[in_])
# in_shape = self.inputs[in_][1:]
# m_min, m_max = mean.min(), mean.max()
# normal_mean = (mean - m_min) / (m_max - m_min)
# mean = resize_image(normal_mean.transpose((1,2,0)),
# in_shape[1:]).transpose((2,0,1)) * \
# (m_max - m_min) + m_min
#
# Reference: http://stackoverflow.com/questions/28692209/using-gpu-despite-setting-cpu-only-yielding-unexpected-keyword-argument
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
caffe_root = '/home/mshduan/Programs/caffe-master/'
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
files_path = './'
def get_mean_image(path):
proto_obj = caffe.io.caffe_pb2.BlobProto()
proto_file = open(path,'rb')
proto_data = proto_file.read()
proto_obj.ParseFromString(proto_data)
means = np.asarray(proto_obj.data)
return means.reshape(3,256,256)
MODEL_FILE = files_path + 'finetuned_places205CNN_deploy.prototxt'
PRETRAINED = files_path + 'places205CNN_det_finetune_iter_3000.caffemodel'
mean = get_mean_image(files_path + 'places205CNN_mean.binaryproto')
net = caffe.Classifier(MODEL_FILE, PRETRAINED,mean=mean, channel_swap = (2, 1, 0),raw_scale = 255, image_dims=(256, 256))
caffe.set_mode_cpu()
import os
# test the fine-tuned model on roadworks images
imagePath = "/home/mshduan/Programs/caffe-master/examples/construction_cnn/test/bau/"
pred_list=[]
for image in sorted(os.listdir(imagePath)):
input_image = caffe.io.load_image(imagePath+image)
prediction = net.predict([input_image],oversample=True)
pred_list.append(prediction[0].argmax())
# number of false positives predictions
len(filter(lambda x: x==0,pred_list))
# test the fine-tuned model on non-roadworks images
imagePath = "/home/mshduan/Programs/caffe-master/examples/construction_cnn/test/nothing/"
pred_list_2=[]
for image in sorted(os.listdir(imagePath)):
input_image = caffe.io.load_image(imagePath+image)
prediction = net.predict([input_image],oversample=True)
pred_list_2.append(prediction[0].argmax())
###########################################
# copy the incorrectly classified images
###########################################
import shutil
import os
srcpath = '/home/mshduan/Programs/caffe-master/examples/construction_cnn/test/bau/'
dst = '/home/mshduan/Desktop/cons_cnn/wrong_bau/'
count = 0
for image in sorted(os.listdir(srcpath)):
if pred_list[count] == 0:
shutil.copy(srcpath+image, dst)
count += 1
srcpath = '/home/mshduan/Programs/caffe-master/examples/construction_cnn/test/nothing/'
dst = '/home/mshduan/Desktop/cons_cnn/wrong_noting/'
count = 0
for image in sorted(os.listdir(srcpath)):
if pred_list_2[count] == 1:
shutil.copy(srcpath+image, dst)
count += 1
|
998,898 | ad71159d1e6af3910669f1b7b7bd4ede6916483b | from library.views.type.type_create import *
from library.views.type.type_update import *
from library.views.type.type_delete import *
|
998,899 | b919d081c2e4f64bf6a731fd0d017ad764f40de9 | #!/bin/python3
import sys
# case가 'MMMMMMMMM' , 'MMMMMMMMMMMMMMMMM' 이런거일때 결국 n제곱..
def solution(members, maleFans, totalCase):
result = 0
failCaseDict = {}
for memberIdx in range(len(members)):
member = members[memberIdx]
if member == 'F':
continue
else:
for maleFanIdx in maleFans:
caseIdx = maleFanIdx - memberIdx
if not(caseIdx < 0 or caseIdx >= totalCase):
failCaseDict[caseIdx] = 1
result = totalCase - sum(failCaseDict.values())
return result
if __name__ == '__main__':
C = int(sys.stdin.readline())
for _ in range(C):
members = sys.stdin.readline().rstrip()
fans = sys.stdin.readline().rstrip()
maleFans = []
for idx in range(len(fans)):
fan = fans[idx]
if fan == 'M':
maleFans.append(idx)
totalCase = len(fans)-len(members)+1
print(solution(members, maleFans, totalCase))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.