id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1
value | extension stringclasses 14
values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12
values | repo_extraction_date stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9,300 | classes.py | rembo10_headphones/headphones/classes.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
#######################################
# Stolen from Sick-Beard's classes.py #
#######################################
import urllib.request, urllib.parse, urllib.error
from .common import USER_AGENT
class HeadphonesURLopener(urllib.request.FancyURLopener):
version = USER_AGENT
class AuthURLOpener(HeadphonesURLopener):
"""
URLOpener class that supports http auth without needing interactive password entry.
If the provided username/password don't work it simply fails.
user: username to use for HTTP auth
pw: password to use for HTTP auth
"""
def __init__(self, user, pw):
self.username = user
self.password = pw
# remember if we've tried the username/password before
self.numTries = 0
# call the base class
urllib.request.FancyURLopener.__init__(self)
def prompt_user_passwd(self, host, realm):
"""
Override this function and instead of prompting just give the
username/password that were provided when the class was instantiated.
"""
# if this is the first try then provide a username/password
if self.numTries == 0:
self.numTries = 1
return (self.username, self.password)
# if we've tried before then return blank which cancels the request
else:
return ('', '')
# this is pretty much just a hack for convenience
def openit(self, url):
self.numTries = 0
return HeadphonesURLopener.open(self, url)
class SearchResult:
"""
Represents a search result from an indexer.
"""
def __init__(self):
self.provider = -1
# URL to the NZB/torrent file
self.url = ""
# used by some providers to store extra info associated with the result
self.extraInfo = []
# quality of the release
self.quality = -1
# release name
self.name = ""
def __str__(self):
if self.provider is None:
return "Invalid provider, unable to print self"
myString = self.provider.name + " @ " + self.url + "\n"
myString += "Extra Info:\n"
for extra in self.extraInfo:
myString += " " + extra + "\n"
return myString
class NZBSearchResult(SearchResult):
"""
Regular NZB result with an URL to the NZB
"""
resultType = "nzb"
class NZBDataSearchResult(SearchResult):
"""
NZB result where the actual NZB XML data is stored in the extraInfo
"""
resultType = "nzbdata"
class TorrentSearchResult(SearchResult):
"""
Torrent result with an URL to the torrent
"""
resultType = "torrent"
class Proper:
def __init__(self, name, url, date):
self.name = name
self.url = url
self.date = date
self.provider = None
self.quality = -1
self.tvdbid = -1
self.season = -1
self.episode = -1
def __str__(self):
return str(self.date) + " " + self.name + " " + str(self.season) + "x" + str(
self.episode) + " of " + str(self.tvdbid)
| 3,782 | Python | .py | 101 | 31.09901 | 87 | 0.639506 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,301 | searcher.py | rembo10_headphones/headphones/searcher.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
# NZBGet support added by CurlyMo <curlymoo1@gmail.com> as a part of XBian - XBMC on the Raspberry Pi
import os
import re
import string
import random
import urllib.request, urllib.parse, urllib.error
import datetime
import subprocess
import unicodedata
import urllib.parse
from base64 import b16encode, b32decode
from hashlib import sha1
from bencode import encode as bencode
from bencode import decode as bdecode
from pygazelle import api as gazelleapi
from pygazelle import encoding as gazelleencoding
from pygazelle import format as gazelleformat
from pygazelle import release_type as gazellerelease_type
from unidecode import unidecode
import headphones
from headphones.common import USER_AGENT
from headphones.helpers import (
bytes_to_mb,
has_token,
piratesize,
replace_all,
replace_illegal_chars,
sab_replace_dots,
sab_replace_spaces,
sab_sanitize_foldername,
split_string
)
from headphones.types import Result
from headphones import logger, db, classes, sab, nzbget, request
from headphones import (
bandcamp,
deluge,
notifiers,
qbittorrent,
rutracker,
soulseek,
transmission,
utorrent
)
# Magnet to torrent services, for Black hole. Stolen from CouchPotato.
TORRENT_TO_MAGNET_SERVICES = [
'https://itorrents.org/torrent/%s.torrent',
'https://cache.torrentgalaxy.org/get/%s',
'https://www.seedpeer.me/torrent/%s'
]
# Persistent Orpheus.network API object
orpheusobj = None
ruobj = None
# Persistent RED API object
redobj = None
def fix_url(s, charset="utf-8"):
"""
Fix the URL so it is proper formatted and encoded.
"""
scheme, netloc, path, qs, anchor = urllib.parse.urlsplit(s)
path = urllib.parse.quote(path, '/%')
qs = urllib.parse.quote_plus(qs, ':&=')
return urllib.parse.urlunsplit((scheme, netloc, path, qs, anchor))
def torrent_to_file(target_file, data):
"""
Write torrent data to file, and change permissions accordingly. Will return
None in case of a write error. If changing permissions fails, it will
continue anyway.
"""
# Write data to file
try:
with open(target_file, "wb") as fp:
fp.write(data)
except IOError as e:
logger.error(
f"Could not write `{target_file}`: {str(e)}"
)
return
# Try to change permissions
if headphones.CONFIG.FILE_PERMISSIONS_ENABLED:
try:
os.chmod(target_file, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
except OSError as e:
logger.warn(f"Could not change permissions for `{target_file}`: {e}")
else:
logger.debug(
f"Not changing file permissions for `{target_file}, since it is disabled")
# Done
return True
def read_torrent_name(torrent_file, default_name=None):
"""
Read the torrent file and return the torrent name. If the torrent name
cannot be determined, it will return the `default_name`.
"""
# Open file
try:
with open(torrent_file, "rb") as fp:
torrent_info = bdecode(fp.read())
except IOError as e:
logger.error("Unable to open torrent file: %s", torrent_file)
return
# Read dictionary
if torrent_info:
try:
return torrent_info["info"]["name"]
except KeyError:
if default_name:
logger.warning("Couldn't get name from torrent file: %s. "
"Defaulting to '%s'", e, default_name)
else:
logger.warning("Couldn't get name from torrent file: %s. No "
"default given", e)
# Return default
return default_name
def calculate_torrent_hash(link, data=None):
"""
Calculate the torrent hash from a magnet link or data. Raises a ValueError
when it cannot create a torrent hash given the input data.
"""
if link.startswith("magnet:"):
torrent_hash = re.findall(r"urn:btih:([\w]{32,40})", link)[0]
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).lower()
elif data:
info = bdecode(data)[b"info"]
torrent_hash = sha1(bencode(info)).hexdigest()
else:
raise ValueError("Cannot calculate torrent hash without magnet link "
"or data")
return torrent_hash.upper()
def get_seed_ratio(provider):
"""
Return the seed ratio for the specified provider if applicable. Defaults to
None in case of an error.
"""
if provider == 'rutracker.org':
seed_ratio = headphones.CONFIG.RUTRACKER_RATIO
elif provider == 'Orpheus.network':
seed_ratio = headphones.CONFIG.ORPHEUS_RATIO
elif provider == 'Redacted':
seed_ratio = headphones.CONFIG.REDACTED_RATIO
elif provider == 'The Pirate Bay':
seed_ratio = headphones.CONFIG.PIRATEBAY_RATIO
elif provider.startswith("Torznab"):
host = provider.split('|')[2]
if host == headphones.CONFIG.TORZNAB_HOST:
seed_ratio = headphones.CONFIG.TORZNAB_RATIO
else:
for torznab in headphones.CONFIG.get_extra_torznabs():
if host == torznab[0]:
seed_ratio = torznab[2]
break
else:
seed_ratio = None
if seed_ratio is not None:
try:
seed_ratio = float(seed_ratio)
except ValueError:
logger.warn("Could not get seed ratio for %s" % provider)
return seed_ratio
def get_provider_name(provider):
"""
Return the provider name for the provider
"""
if provider.startswith("Torznab"):
provider_name = "Torznab " + provider.split("|")[1]
elif provider.startswith(("http://", "https://")):
provider_name = provider.split("//")[1]
else:
provider_name = provider
return provider_name
def pirate_bay_get_magnet(info_hash, torrent_name):
trackers = [
"udp://tracker.coppersurfer.tk:6969/announce",
"udp://9.rarbg.me:2850/announce",
"udp://9.rarbg.to:2920/announce",
"udp://tracker.opentrackr.org:1337",
"udp://tracker.internetwarriors.net:1337/announce",
"udp://tracker.leechers-paradise.org:6969/announce",
"udp://tracker.pirateparty.gr:6969/announce",
"udp://tracker.cyberia.is:6969/announce",
]
trackers = "".join([f"&tr={urllib.parse.quote(t, safe='')}" for t in trackers])
name = urllib.parse.quote(torrent_name, safe="")
return f"magnet:?xt=urn:btih:{info_hash}&dn={name}{trackers}"
def searchforalbum(albumid=None, new=False, losslessOnly=False,
choose_specific_download=False):
logger.info('Searching for wanted albums')
myDB = db.DBConnection()
if not albumid:
results = myDB.select(
'SELECT * from albums WHERE Status="Wanted" OR Status="Wanted Lossless"')
for album in results:
if not album['AlbumTitle'] or not album['ArtistName']:
logger.warn('Skipping release %s. No title available', album['AlbumID'])
continue
if headphones.CONFIG.WAIT_UNTIL_RELEASE_DATE and album['ReleaseDate']:
release_date = strptime_musicbrainz(album['ReleaseDate'])
if not release_date:
logger.warn("No valid date for: %s. Skipping automatic search" %
album['AlbumTitle'])
continue
elif release_date > datetime.datetime.today():
logger.info("Skipping: %s. Waiting for release date of: %s" % (
album['AlbumTitle'], album['ReleaseDate']))
continue
new = True
if album['Status'] == "Wanted Lossless":
losslessOnly = True
logger.info('Searching for "%s - %s" since it is marked as wanted' % (
album['ArtistName'], album['AlbumTitle']))
do_sorted_search(album, new, losslessOnly)
elif albumid and choose_specific_download:
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [albumid]).fetchone()
logger.info('Searching for "%s - %s"' % (album['ArtistName'], album['AlbumTitle']))
results = do_sorted_search(album, new, losslessOnly, choose_specific_download=True)
return results
else:
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [albumid]).fetchone()
logger.info('Searching for "%s - %s" since it was marked as wanted' % (
album['ArtistName'], album['AlbumTitle']))
do_sorted_search(album, new, losslessOnly)
logger.info('Search for wanted albums complete')
def strptime_musicbrainz(date_str):
"""
Release date as returned by Musicbrainz may contain the full date (Year-Month-Day)
but it may as well be just Year-Month or even just the year.
Args:
date_str: the date as a string (ex: "2003-05-01", "2003-03", "2003")
Returns:
The more accurate datetime object we can create or None if parse failed
"""
acceptable_formats = ('%Y-%m-%d', '%Y-%m', '%Y')
for date_format in acceptable_formats:
try:
return datetime.datetime.strptime(date_str, date_format)
except:
pass
return None
def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
NZB_PROVIDERS = (headphones.CONFIG.HEADPHONES_INDEXER or
headphones.CONFIG.NEWZNAB or
headphones.CONFIG.NZBSORG or
headphones.CONFIG.OMGWTFNZBS)
NZB_DOWNLOADERS = (headphones.CONFIG.SAB_HOST or
headphones.CONFIG.BLACKHOLE_DIR or
headphones.CONFIG.NZBGET_HOST)
TORRENT_PROVIDERS = (headphones.CONFIG.TORZNAB or
headphones.CONFIG.PIRATEBAY or
headphones.CONFIG.RUTRACKER or
headphones.CONFIG.ORPHEUS or
headphones.CONFIG.REDACTED)
BANDCAMP = 1 if (headphones.CONFIG.BANDCAMP and
headphones.CONFIG.BANDCAMP_DIR) else 0
SOULSEEK = 1 if (headphones.CONFIG.SOULSEEK and
headphones.CONFIG.SOULSEEK_API_URL and
headphones.CONFIG.SOULSEEK_API_KEY and
headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR and
headphones.CONFIG.SOULSEEK_INCOMPLETE_DOWNLOAD_DIR) else 0
results = []
myDB = db.DBConnection()
albumlength = myDB.select('SELECT sum(TrackDuration) from tracks WHERE AlbumID=?',
[album['AlbumID']])[0][0]
# NZBs
if headphones.CONFIG.PREFER_TORRENTS == 0 and not choose_specific_download:
if NZB_PROVIDERS and NZB_DOWNLOADERS:
results = searchNZB(album, new, losslessOnly, albumlength)
if not results and TORRENT_PROVIDERS:
results = searchTorrent(album, new, losslessOnly, albumlength)
if not results and BANDCAMP:
results = searchBandcamp(album, new, albumlength)
if not results and SOULSEEK:
results = searchSoulseek(album, new, losslessOnly, albumlength)
# Torrents
elif headphones.CONFIG.PREFER_TORRENTS == 1 and not choose_specific_download:
if TORRENT_PROVIDERS:
results = searchTorrent(album, new, losslessOnly, albumlength)
if not results and NZB_PROVIDERS and NZB_DOWNLOADERS:
results = searchNZB(album, new, losslessOnly, albumlength)
if not results and BANDCAMP:
results = searchBandcamp(album, new, albumlength)
if not results and SOULSEEK:
results = searchSoulseek(album, new, losslessOnly, albumlength)
# Soulseek
elif headphones.CONFIG.PREFER_TORRENTS == 2 and not choose_specific_download:
results = searchSoulseek(album, new, losslessOnly, albumlength)
if not results and NZB_PROVIDERS and NZB_DOWNLOADERS:
results = searchNZB(album, new, losslessOnly, albumlength)
if not results and TORRENT_PROVIDERS:
results = searchTorrent(album, new, losslessOnly, albumlength)
if not results and BANDCAMP:
results = searchBandcamp(album, new, albumlength)
else:
# No Preference
nzb_results = []
torrent_results = []
bandcamp_results = []
soulseek_results = []
if NZB_PROVIDERS and NZB_DOWNLOADERS:
nzb_results = searchNZB(album, new, losslessOnly,
albumlength, choose_specific_download)
if TORRENT_PROVIDERS:
torrent_results = searchTorrent(album, new, losslessOnly,
albumlength, choose_specific_download)
if BANDCAMP:
bandcamp_results = searchBandcamp(album, new, albumlength)
# TODO: get this working
# if SOULSEEK:
# soulseek_results = searchSoulseek(album, new, losslessOnly,
# albumlength, choose_specific_download)
results = nzb_results + torrent_results + bandcamp_results + soulseek_results
if choose_specific_download:
return results
# Filter all results that do not comply
results = [result for result in results if result.matches]
# Sort the remaining results
sorted_search_results = sort_search_results(results, album, new, albumlength)
if not sorted_search_results:
return
logger.info(
"Making sure we can download the best result: "
f"{sorted_search_results[0].title} from {get_provider_name(sorted_search_results[0].provider)}"
)
(data, result) = preprocess(sorted_search_results)
if data and result:
send_to_downloader(data, result, album)
def more_filtering(results, album, albumlength, new):
low_size_limit = None
high_size_limit = None
allow_lossless = False
myDB = db.DBConnection()
# Lossless - ignore results if target size outside bitrate range
if headphones.CONFIG.PREFERRED_QUALITY == 3 and albumlength and (
headphones.CONFIG.LOSSLESS_BITRATE_FROM or headphones.CONFIG.LOSSLESS_BITRATE_TO):
if headphones.CONFIG.LOSSLESS_BITRATE_FROM:
low_size_limit = albumlength / 1000 * int(headphones.CONFIG.LOSSLESS_BITRATE_FROM) * 128
if headphones.CONFIG.LOSSLESS_BITRATE_TO:
high_size_limit = albumlength / 1000 * int(headphones.CONFIG.LOSSLESS_BITRATE_TO) * 128
# Preferred Bitrate - ignore results if target size outside % buffer
elif headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE:
logger.debug('Target bitrate: %s kbps' % headphones.CONFIG.PREFERRED_BITRATE)
if albumlength:
targetsize = albumlength / 1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
logger.info('Target size: %s' % bytes_to_mb(targetsize))
if headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER:
low_size_limit = targetsize - targetsize * int(
headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER) / 100
if headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER:
high_size_limit = targetsize + targetsize * int(
headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER) / 100
if headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
allow_lossless = True
newlist = []
for result in results:
if low_size_limit and result.size < low_size_limit:
logger.info(
f"{result.title} from {get_provider_name(result.provider)} is too small for this album. "
f"(Size: {bytes_to_mb(result.size)}, MinSize: {bytes_to_mb(low_size_limit)})"
)
continue
if high_size_limit and result.size > high_size_limit:
logger.info(
f"{result.title} from {get_provider_name(result.provider)} is too large for this album. "
f"(Size: {bytes_to_mb(result.size)}, MaxSize: {bytes_to_mb(high_size_limit)})"
)
# Keep lossless results if there are no good lossy matches
if not (allow_lossless and 'flac' in result.title.lower()):
continue
if new:
alreadydownloaded = myDB.select(
"SELECT * from snatched WHERE URL=?", [result.url]
)
if len(alreadydownloaded):
logger.info(
f"{result.title} has already been downloaded from "
f"{get_provider_name(result.provider)}. Skipping."
)
continue
newlist.append(result)
return newlist
def sort_by_priority_then_size(rs):
return list(map(lambda x: x[0],
sorted(
rs,
key=lambda x: (x[0].matches, x[1], x[0].size),
reverse=True
)
))
def sort_search_results(resultlist, album, new, albumlength):
if new and not len(resultlist):
logger.info(
'No more results found for: %s - %s' % (album['ArtistName'], album['AlbumTitle']))
return None
# Add a priority if it has any of the preferred words
results_with_priority = []
preferred_words = split_string(headphones.CONFIG.PREFERRED_WORDS)
for result in resultlist:
priority = 0
for word in preferred_words:
if word.lower() in [result.title.lower(), result.provider.lower()]:
priority += len(preferred_words) - preferred_words.index(word)
results_with_priority.append((result, priority))
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE:
try:
targetsize = albumlength / 1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
if not targetsize:
logger.info(
f"No track information for {album['ArtistName']} - "
f"{album['AlbumTitle']}. Defaulting to highest quality"
)
return sort_by_priority_then_size(results_with_priority)
else:
lossy_results_with_delta = []
lossless_results = []
for result, priority in results_with_priority:
# Add lossless results to the "flac list" which we can use if there are no good lossy matches
if 'flac' in result.title.lower():
lossless_results.append((result, priority))
else:
delta = abs(targetsize - result.size)
lossy_results_with_delta.append((result, priority, delta))
if len(lossy_results_with_delta):
return list(map(lambda x: x[0],
sorted(
lossy_results_with_delta,
key=lambda x: (-x[0].matches, -x[1], x[2])
)
))
if (
not len(lossy_results_with_delta)
and len(lossless_results)
and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS
):
logger.info(
"Since there were no appropriate lossy matches "
"(and at least one lossless match), going to use "
"lossless instead"
)
return sort_by_priority_then_size(lossless_results)
except Exception:
logger.exception('Unhandled exception')
logger.info(
f"No track information for {album['ArtistName']} - "
f"{album['AlbumTitle']}. Defaulting to highest quality"
)
return sort_by_priority_then_size(results_with_priority)
else:
return sort_by_priority_then_size(results_with_priority)
logger.info(
f"No appropriate matches found for {album['ArtistName']} - "
f"{album['AlbumTitle']}"
)
return None
def get_year_from_release_date(release_date):
try:
year = release_date[:4]
except TypeError:
year = ''
return year
def searchBandcamp(album, new=False, albumlength=None):
return bandcamp.search(album)
def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
choose_specific_download=False):
reldate = album['ReleaseDate']
year = get_year_from_release_date(reldate)
replacements = {
'...': '',
' & ': ' ',
' = ': ' ',
'?': '',
'$': 's',
' + ': ' ',
'"': '',
',': '',
'*': '',
'.': '',
':': ''
}
cleanalbum = unidecode(replace_all(album['AlbumTitle'], replacements)).strip()
cleanartist = unidecode(replace_all(album['ArtistName'], replacements)).strip()
# Use the provided search term if available, otherwise build a search term
if album['SearchTerm']:
term = album['SearchTerm']
elif album['Type'] == 'part of':
term = cleanalbum + " " + year
else:
# FLAC usually doesn't have a year for some reason so leave it out.
# Various Artist albums might be listed as VA, so I'll leave that out too
# Only use the year if the term could return a bunch of different albums, i.e. self-titled albums
if album['ArtistName'] in album['AlbumTitle'] or len(album['ArtistName']) < 4 or len(
album['AlbumTitle']) < 4:
term = cleanartist + ' ' + cleanalbum + ' ' + year
elif album['ArtistName'] == 'Various Artists':
term = cleanalbum + ' ' + year
else:
term = cleanartist + ' ' + cleanalbum
# Replace bad characters in the term
term = re.sub(r'[\.\-\/]', r' ', term)
artistterm = re.sub(r'[\.\-\/]', r' ', cleanartist)
# If Preferred Bitrate and High Limit and Allow Lossless then get both lossy and lossless
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE and headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
allow_lossless = True
else:
allow_lossless = False
logger.debug("Using search term: %s" % term)
resultlist = []
if headphones.CONFIG.HEADPHONES_INDEXER:
provider = "headphones"
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
categories = "3040"
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
categories = "3040,3010"
else:
categories = "3010"
if album['Type'] == 'Other':
logger.info("Album type is audiobook/spokenword. Using audiobook category")
categories = "3030"
# Request results
logger.info('Searching Headphones Indexer with search term: %s' % term)
headers = {'User-Agent': USER_AGENT}
params = {
"t": "search",
"cat": categories,
"apikey": '964d601959918a578a670984bdee9357',
"maxage": headphones.CONFIG.USENET_RETENTION,
"q": term
}
data = request.request_feed(
url="https://indexer.codeshy.com/api",
params=params, headers=headers,
auth=(headphones.CONFIG.HPUSER, headphones.CONFIG.HPPASS)
)
# Process feed
if data:
if not len(data.entries):
logger.info("No results found from %s for %s" % ('Headphones Index', term))
else:
for item in data.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
resultlist.append(Result(title, size, url, provider, 'nzb', True))
logger.info('Found %s. Size: %s' % (title, bytes_to_mb(size)))
except Exception as e:
logger.error("An unknown error occurred trying to parse the feed: %s" % e)
if headphones.CONFIG.NEWZNAB:
provider = "newznab"
newznab_hosts = []
if headphones.CONFIG.NEWZNAB_HOST and headphones.CONFIG.NEWZNAB_ENABLED:
newznab_hosts.append((headphones.CONFIG.NEWZNAB_HOST, headphones.CONFIG.NEWZNAB_APIKEY,
headphones.CONFIG.NEWZNAB_ENABLED))
for newznab_host in headphones.CONFIG.get_extra_newznabs():
if newznab_host[2] == '1' or newznab_host[2] == 1:
newznab_hosts.append(newznab_host)
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
categories = "3040"
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
categories = "3040,3010"
else:
categories = "3010"
if album['Type'] == 'Other':
categories = "3030"
logger.info("Album type is audiobook/spokenword. Using audiobook category")
for newznab_host in newznab_hosts:
provider = newznab_host[0]
# Add a little mod for kere.ws
if newznab_host[0] == "https://kere.ws":
if categories == "3040":
categories = categories + ",4070"
elif categories == "3040,3010":
categories = categories + ",4070,4010"
elif categories == "3010":
categories = categories + ",4010"
else:
categories = categories + ",4050"
# Request results
logger.info('Parsing results from %s using search term: %s' % (newznab_host[0], term))
headers = {'User-Agent': USER_AGENT}
params = {
"t": "search",
"apikey": newznab_host[1],
"cat": categories,
"maxage": headphones.CONFIG.USENET_RETENTION,
"q": term
}
data = request.request_feed(
url=newznab_host[0] + '/api?',
params=params, headers=headers
)
# Process feed
if data:
if not len(data.entries):
logger.info("No results found from %s for %s", newznab_host[0], term)
else:
for item in data.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
if all(word.lower() in title.lower() for word in term.split()):
logger.info(
'Found %s. Size: %s' % (title, bytes_to_mb(size)))
resultlist.append(Result(title, size, url, provider, 'nzb', True))
else:
logger.info('Skipping %s, not all search term words found' % title)
except Exception as e:
logger.exception(
"An unknown error occurred trying to parse the feed: %s" % e)
if headphones.CONFIG.NZBSORG:
provider = "nzbsorg"
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
categories = "3040"
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
categories = "3040,3010"
else:
categories = "3010"
if album['Type'] == 'Other':
categories = "3030"
logger.info("Album type is audiobook/spokenword. Using audiobook category")
headers = {'User-Agent': USER_AGENT}
params = {
"t": "search",
"apikey": headphones.CONFIG.NZBSORG_HASH,
"cat": categories,
"maxage": headphones.CONFIG.USENET_RETENTION,
"q": term
}
data = request.request_feed(
url='https://beta.nzbs.org/api',
params=params, headers=headers,
timeout=5
)
logger.info('Parsing results from nzbs.org using search term: %s' % term)
# Process feed
if data:
if not len(data.entries):
logger.info("No results found from nzbs.org for %s" % term)
else:
for item in data.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
resultlist.append(Result(title, size, url, provider, 'nzb', True))
logger.info('Found %s. Size: %s' % (title, bytes_to_mb(size)))
except Exception as e:
logger.exception("Unhandled exception while parsing feed")
if headphones.CONFIG.OMGWTFNZBS:
provider = "omgwtfnzbs"
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
categories = "22"
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
categories = "22,7"
else:
categories = "7"
if album['Type'] == 'Other':
categories = "29"
logger.info("Album type is audiobook/spokenword. Searching all music categories")
# Request results
logger.info('Parsing results from omgwtfnzbs using search term: %s' % term)
headers = {'User-Agent': USER_AGENT}
params = {
"user": headphones.CONFIG.OMGWTFNZBS_UID,
"api": headphones.CONFIG.OMGWTFNZBS_APIKEY,
"catid": categories,
"retention": headphones.CONFIG.USENET_RETENTION,
"search": term
}
data = request.request_json(
url='https://api.omgwtfnzbs.me/json/',
params=params, headers=headers
)
# Parse response
if data:
if 'notice' in data:
logger.info("No results returned from omgwtfnzbs: %s" % data['notice'])
else:
for item in data:
try:
url = item['getnzb']
title = item['release']
size = int(item['sizebytes'])
resultlist.append(Result(title, size, url, provider, 'nzb', True))
logger.info('Found %s. Size: %s', title, bytes_to_mb(size))
except Exception as e:
logger.exception("Unhandled exception")
# attempt to verify that this isn't a substring result
# when looking for "Foo - Foo" we don't want "Foobar"
# this should be less of an issue when it isn't a self-titled album so we'll only check vs artist
#
# Also will filter flac & remix albums if not specifically looking for it
# This code also checks the ignored words and required words
results = [result for result in resultlist if
verifyresult(result.title, artistterm, term, losslessOnly)]
# Additional filtering for size etc
if results and not choose_specific_download:
results = more_filtering(results, album, albumlength, new)
return results
def send_to_downloader(data, result, album):
logger.info(
f"Found best result from {get_provider_name(result.provider)}: <a href=\"{result.url}\">"
f"{result.title}</a> - {bytes_to_mb(result.size)}"
)
# Get rid of any dodgy chars here so we can prevent sab from renaming our downloads
kind = result.kind
seed_ratio = None
torrentid = None
if kind == 'nzb':
folder_name = sab_sanitize_foldername(result.title)
if headphones.CONFIG.NZB_DOWNLOADER == 1:
nzb = classes.NZBDataSearchResult()
nzb.extraInfo.append(data)
nzb.name = folder_name
if not nzbget.sendNZB(nzb):
return
elif headphones.CONFIG.NZB_DOWNLOADER == 0:
nzb = classes.NZBDataSearchResult()
nzb.extraInfo.append(data)
nzb.name = folder_name
if not sab.sendNZB(nzb):
return
# If we sent the file to sab, we can check how it was renamed and insert that into the snatched table
(replace_spaces, replace_dots) = sab.checkConfig()
if replace_dots:
folder_name = sab_replace_dots(folder_name)
if replace_spaces:
folder_name = sab_replace_spaces(folder_name)
else:
nzb_name = folder_name + '.nzb'
download_path = os.path.join(headphones.CONFIG.BLACKHOLE_DIR, nzb_name)
try:
prev = os.umask(headphones.UMASK)
with open(download_path, 'wb') as fp:
fp.write(data)
os.umask(prev)
logger.info('File saved to: %s', nzb_name)
except Exception as e:
logger.error('Couldn\'t write NZB file: %s', e)
return
elif kind == 'bandcamp':
folder_name = bandcamp.download(album, result)
logger.info("Setting folder_name to: {}".format(folder_name))
elif kind == 'soulseek':
try:
soulseek.download(user=result.user, filelist=result.files)
folder_name = '{' + result.user + '}' + result.folder
logger.info(f"Soulseek folder name: {result.folder}")
except Exception as e:
logger.error(f"Soulseek error, check server logs: {e}")
return
else:
folder_name = '%s - %s [%s]' % (
unidecode(album['ArtistName']).replace('/', '_'),
unidecode(album['AlbumTitle']).replace('/', '_'),
get_year_from_release_date(album['ReleaseDate']))
# Blackhole
if headphones.CONFIG.TORRENT_DOWNLOADER == 0:
# Get torrent name from .torrent, this is usually used by the torrent client as the folder name
torrent_name = replace_illegal_chars(folder_name) + '.torrent'
download_path = os.path.join(headphones.CONFIG.TORRENTBLACKHOLE_DIR, torrent_name)
if result.url.lower().startswith("magnet:"):
if headphones.CONFIG.MAGNET_LINKS == 1:
try:
if headphones.SYS_PLATFORM == 'win32':
os.startfile(result.url)
elif headphones.SYS_PLATFORM == 'darwin':
subprocess.Popen(["open", result.url], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
subprocess.Popen(["xdg-open", result.url], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Gonna just take a guess at this..... Is there a better way to find this out?
folder_name = result.title
except Exception as e:
logger.error("Error opening magnet link: %s" % str(e))
return
elif headphones.CONFIG.MAGNET_LINKS == 2:
# Procedure adapted from CouchPotato
torrent_hash = calculate_torrent_hash(result.url)
# Randomize list of services
services = TORRENT_TO_MAGNET_SERVICES[:]
random.shuffle(services)
headers = {'User-Agent': USER_AGENT}
for service in services:
data = request.request_content(service % torrent_hash, headers=headers)
if data:
if not torrent_to_file(download_path, data):
return
# Extract folder name from torrent
folder_name = read_torrent_name(
download_path,
result.title)
# Break for loop
break
else:
# No service succeeded
logger.warning("Unable to convert magnet with hash "
"'%s' into a torrent file.", torrent_hash)
return
elif headphones.CONFIG.MAGNET_LINKS == 3:
torrent_to_file(download_path, data)
return
else:
logger.error("Cannot save magnet link in blackhole. "
"Please switch your torrent downloader to "
"Transmission, uTorrent or Deluge, or allow Headphones "
"to open or convert magnet links")
return
else:
if not torrent_to_file(download_path, data):
return
# Extract folder name from torrent
folder_name = read_torrent_name(download_path, result.title)
if folder_name:
logger.info('Torrent folder name: %s' % folder_name)
elif headphones.CONFIG.TORRENT_DOWNLOADER == 1:
logger.info("Sending torrent to Transmission")
# Add torrent
if result.provider == 'rutracker.org':
torrentid = transmission.addTorrent('', data)
else:
torrentid = transmission.addTorrent(result.url)
if not torrentid:
logger.error("Error sending torrent to Transmission. Are you sure it's running?")
return
folder_name = transmission.getName(torrentid)
if folder_name:
logger.info('Torrent name: %s' % folder_name)
else:
logger.error('Torrent name could not be determined')
return
# Set Seed Ratio
seed_ratio = get_seed_ratio(result.provider)
if seed_ratio is not None:
transmission.setSeedRatio(torrentid, seed_ratio)
elif headphones.CONFIG.TORRENT_DOWNLOADER == 3: # Deluge
logger.info("Sending torrent to Deluge")
try:
# Add torrent
if result.provider == 'rutracker.org':
torrentid = deluge.addTorrent('', data)
else:
torrentid = deluge.addTorrent(result.url)
if not torrentid:
logger.error("Error sending torrent to Deluge. Are you sure it's running? Maybe the torrent already exists?")
return
# Set Label
if headphones.CONFIG.DELUGE_LABEL:
deluge.setTorrentLabel({'hash': torrentid})
# Set Seed Ratio
seed_ratio = get_seed_ratio(result.provider)
if seed_ratio is not None:
deluge.setSeedRatio({'hash': torrentid, 'ratio': seed_ratio})
# Get folder name from Deluge, it's usually the torrent name
folder_name = deluge.getTorrentFolder({'hash': torrentid})
if folder_name:
logger.info('Torrent folder name: %s' % folder_name)
else:
logger.error('Torrent folder name could not be determined')
return
except Exception as e:
logger.error('Error sending torrent to Deluge: %s' % str(e))
elif headphones.CONFIG.TORRENT_DOWNLOADER == 2:
logger.info("Sending torrent to uTorrent")
# Add torrent
if result.provider == 'rutracker.org':
ruobj.utorrent_add_file(data)
else:
utorrent.addTorrent(result.url)
# Get hash
torrentid = calculate_torrent_hash(result.url, data)
if not torrentid:
logger.error('Torrent id could not be determined')
return
# Get folder
folder_name = utorrent.getFolder(torrentid)
if folder_name:
logger.info('Torrent folder name: %s' % folder_name)
else:
logger.error('Torrent folder name could not be determined')
return
# Set Label
if headphones.CONFIG.UTORRENT_LABEL:
utorrent.labelTorrent(torrentid)
# Set Seed Ratio
seed_ratio = get_seed_ratio(result.provider)
if seed_ratio is not None:
utorrent.setSeedRatio(torrentid, seed_ratio)
else: # if headphones.CONFIG.TORRENT_DOWNLOADER == 4:
logger.info("Sending torrent to QBiTorrent")
# Add torrent
if result.provider == 'rutracker.org':
if qbittorrent.apiVersion2:
qbittorrent.addFile(data)
else:
ruobj.qbittorrent_add_file(data)
else:
qbittorrent.addTorrent(result.url)
# Get hash
torrentid = calculate_torrent_hash(result.url, data)
torrentid = torrentid.lower()
if not torrentid:
logger.error('Torrent id could not be determined')
return
# Get name
folder_name = qbittorrent.getName(torrentid)
if folder_name:
logger.info('Torrent name: %s' % folder_name)
else:
logger.error('Torrent name could not be determined')
return
# Set Seed Ratio
# Oh my god why is this repeated again for the 100th time
seed_ratio = get_seed_ratio(result.provider)
if seed_ratio is not None:
qbittorrent.setSeedRatio(torrentid, seed_ratio)
myDB = db.DBConnection()
myDB.action('UPDATE albums SET status = "Snatched" WHERE AlbumID=?', [album['AlbumID']])
myDB.action(
"INSERT INTO snatched VALUES (?, ?, ?, ?, DATETIME('NOW', 'localtime'), "
"?, ?, ?, ?)", [
album['AlbumID'],
result.title,
result.size,
result.url,
"Snatched",
folder_name,
kind,
torrentid
]
)
# Additional record for post processing or scheduled job to remove the torrent when finished seeding
if seed_ratio is not None and seed_ratio != 0 and torrentid:
myDB.action(
"INSERT INTO snatched VALUES (?, ?, ?, ?, DATETIME('NOW', 'localtime'), "
"?, ?, ?, ?)", [
album['AlbumID'],
result.title,
result.size,
result.url,
"Seed_Snatched",
folder_name,
kind,
torrentid
]
)
# notify
artist = album[1]
albumname = album[2]
rgid = album[6]
title = artist + ' - ' + albumname
provider = get_provider_name(result.provider)
name = folder_name if folder_name else None
if headphones.CONFIG.GROWL_ENABLED and headphones.CONFIG.GROWL_ONSNATCH:
logger.info("Sending Growl notification")
growl = notifiers.GROWL()
growl.notify(name, "Download started")
if headphones.CONFIG.PROWL_ENABLED and headphones.CONFIG.PROWL_ONSNATCH:
logger.info("Sending Prowl notification")
prowl = notifiers.PROWL()
prowl.notify(name, "Download started")
if headphones.CONFIG.PUSHOVER_ENABLED and headphones.CONFIG.PUSHOVER_ONSNATCH:
logger.info("Sending Pushover notification")
prowl = notifiers.PUSHOVER()
prowl.notify(name, "Download started")
if headphones.CONFIG.PUSHBULLET_ENABLED and headphones.CONFIG.PUSHBULLET_ONSNATCH:
logger.info("Sending PushBullet notification")
pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(name, "Download started")
if headphones.CONFIG.JOIN_ENABLED and headphones.CONFIG.JOIN_ONSNATCH:
logger.info("Sending Join notification")
join = notifiers.JOIN()
join.notify(name, "Download started")
if headphones.CONFIG.SLACK_ENABLED and headphones.CONFIG.SLACK_ONSNATCH:
logger.info("Sending Slack notification")
slack = notifiers.SLACK()
slack.notify(name, "Download started")
if headphones.CONFIG.TELEGRAM_ENABLED and headphones.CONFIG.TELEGRAM_ONSNATCH:
logger.info("Sending Telegram notification")
from headphones import cache
c = cache.Cache()
album_art = c.get_artwork_from_cache(None, rgid)
telegram = notifiers.TELEGRAM()
message = 'Snatched from ' + provider + '. ' + name
telegram.notify(message, "Snatched: " + title, rgid, image=album_art)
if headphones.CONFIG.TWITTER_ENABLED and headphones.CONFIG.TWITTER_ONSNATCH:
logger.info("Twitter notifications temporarily disabled")
#logger.info("Sending Twitter notification")
#twitter = notifiers.TwitterNotifier()
#twitter.notify_snatch(name)
if headphones.CONFIG.NMA_ENABLED and headphones.CONFIG.NMA_ONSNATCH:
logger.info("Sending NMA notification")
nma = notifiers.NMA()
nma.notify(snatched=name)
if headphones.CONFIG.PUSHALOT_ENABLED and headphones.CONFIG.PUSHALOT_ONSNATCH:
logger.info("Sending Pushalot notification")
pushalot = notifiers.PUSHALOT()
pushalot.notify(name, "Download started")
if headphones.CONFIG.OSX_NOTIFY_ENABLED and headphones.CONFIG.OSX_NOTIFY_ONSNATCH:
from headphones import cache
c = cache.Cache()
album_art = c.get_artwork_from_cache(None, rgid)
logger.info("Sending OS X notification")
osx_notify = notifiers.OSX_NOTIFY()
osx_notify.notify(artist,
albumname,
'Snatched: ' + provider + '. ' + name,
image=album_art)
if headphones.CONFIG.BOXCAR_ENABLED and headphones.CONFIG.BOXCAR_ONSNATCH:
logger.info("Sending Boxcar2 notification")
b2msg = 'From ' + provider + '<br></br>' + name
boxcar = notifiers.BOXCAR()
boxcar.notify('Headphones snatched: ' + title, b2msg, rgid)
if headphones.CONFIG.EMAIL_ENABLED and headphones.CONFIG.EMAIL_ONSNATCH:
logger.info("Sending Email notification")
email = notifiers.Email()
message = 'Snatched from ' + provider + '. ' + name
email.notify("Snatched: " + title, message)
def verifyresult(title, artistterm, term, lossless):
title = re.sub(r'[\.\-\/\_]', r' ', title)
# if artistterm != 'Various Artists':
#
# if not re.search('^' + re.escape(artistterm), title, re.IGNORECASE):
# #logger.info("Removed from results: " + title + " (artist not at string start).")
# #return False
# elif re.search(re.escape(artistterm) + '\w', title, re.IGNORECASE | re.UNICODE):
# logger.info("Removed from results: " + title + " (post substring result).")
# return False
# elif re.search('\w' + re.escape(artistterm), title, re.IGNORECASE | re.UNICODE):
# logger.info("Removed from results: " + title + " (pre substring result).")
# return False
# another attempt to weed out substrings. We don't want "Vol III" when we were looking for "Vol II"
# Filter out remix search results (if we're not looking for it)
if 'remix' not in term.lower() and 'remix' in title.lower():
logger.info(
"Removed %s from results because it's a remix album and we're not looking for a remix album right now.",
title)
return False
# Filter out FLAC if we're not specifically looking for it
if (headphones.CONFIG.PREFERRED_QUALITY == 0 or headphones.CONFIG.PREFERRED_QUALITY == '0') \
and 'flac' in title.lower() and not lossless:
logger.info(
"Removed %s from results because it's a lossless album and we're not looking for a lossless album right now.",
title)
return False
if headphones.CONFIG.IGNORED_WORDS:
for each_word in split_string(headphones.CONFIG.IGNORED_WORDS):
if each_word.lower() in title.lower():
logger.info("Removed '%s' from results because it contains ignored word: '%s'",
title, each_word)
return False
if headphones.CONFIG.REQUIRED_WORDS:
for each_word in split_string(headphones.CONFIG.REQUIRED_WORDS):
if ' OR ' in each_word:
or_words = split_string(each_word, 'OR')
if any(word.lower() in title.lower() for word in or_words):
continue
else:
logger.info(
"Removed '%s' from results because it doesn't contain any of the required words in: '%s'",
title, str(or_words))
return False
if each_word.lower() not in title.lower():
logger.info(
"Removed '%s' from results because it doesn't contain required word: '%s'",
title, each_word)
return False
if headphones.CONFIG.IGNORE_CLEAN_RELEASES:
for each_word in ['clean', 'edited', 'censored']:
# logger.debug("Checking if '%s' is in search result: '%s'", each_word, title)
if each_word.lower() in title.lower() and each_word.lower() not in term.lower():
logger.info("Removed '%s' from results because it contains clean album word: '%s'",
title, each_word)
return False
tokens = re.split(r'\W', term, re.IGNORECASE | re.UNICODE)
for token in tokens:
if not token:
continue
if token == 'Various' or token == 'Artists' or token == 'VA':
continue
if not has_token(title, token):
cleantoken = ''.join(c for c in token if c not in string.punctuation)
if not has_token(title, cleantoken):
dic = {'!': 'i', '$': 's'}
dumbtoken = replace_all(token, dic)
if not has_token(title, dumbtoken):
logger.info(
"Removed from results: %s (missing tokens: [%s, %s, %s])",
title, token, cleantoken, dumbtoken)
return False
return True
def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
choose_specific_download=False):
global orpheusobj # persistent orpheus.network api object to reduce number of login attempts
global redobj # persistent redacted api object to reduce number of login attempts
global ruobj # and rutracker
reldate = album['ReleaseDate']
year = get_year_from_release_date(reldate)
# MERGE THIS WITH THE TERM CLEANUP FROM searchNZB
replacements = {
'...': '',
' & ': ' ',
' = ': ' ',
'?': '',
'$': 's',
' + ': ' ',
'"': '',
',': ' ',
'*': ''
}
semi_cleanalbum = replace_all(album['AlbumTitle'], replacements)
cleanalbum = unidecode(semi_cleanalbum)
semi_cleanartist = replace_all(album['ArtistName'], replacements)
cleanartist = unidecode(semi_cleanartist)
# Use provided term if available, otherwise build our own (this code needs to be cleaned up since a lot
# of these torrent providers are just using cleanartist/cleanalbum terms
if album['SearchTerm']:
term = album['SearchTerm']
elif album['Type'] == 'part of':
term = cleanalbum + " " + year
else:
# FLAC usually doesn't have a year for some reason so I'll leave it out
# Various Artist albums might be listed as VA, so I'll leave that out too
# Only use the year if the term could return a bunch of different albums, i.e. self-titled albums
if album['ArtistName'] in album['AlbumTitle'] or len(album['ArtistName']) < 4 or len(
album['AlbumTitle']) < 4:
term = cleanartist + ' ' + cleanalbum + ' ' + year
elif album['ArtistName'] == 'Various Artists':
term = cleanalbum + ' ' + year
else:
term = cleanartist + ' ' + cleanalbum
# Save user search term
if album['SearchTerm']:
usersearchterm = term
else:
usersearchterm = ''
semi_clean_artist_term = re.sub(r'[\.\-\/]', r' ', semi_cleanartist)
semi_clean_album_term = re.sub(r'[\.\-\/]', r' ', semi_cleanalbum)
# Replace bad characters in the term
term = re.sub(r'[\.\-\/]', r' ', term)
artistterm = re.sub(r'[\.\-\/]', r' ', cleanartist)
albumterm = re.sub(r'[\.\-\/]', r' ', cleanalbum)
# If Preferred Bitrate and High Limit and Allow Lossless then get both lossy and lossless
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE and headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
allow_lossless = True
else:
allow_lossless = False
logger.debug("Using search term: %s" % term)
resultlist = []
minimumseeders = int(headphones.CONFIG.NUMBEROFSEEDERS) - 1
def set_proxy(proxy_url):
if not proxy_url.startswith('http'):
proxy_url = 'https://' + proxy_url
if proxy_url.endswith('/'):
proxy_url = proxy_url[:-1]
return proxy_url
if headphones.CONFIG.TORZNAB:
provider = "torznab"
torznab_hosts = []
if headphones.CONFIG.TORZNAB_HOST and headphones.CONFIG.TORZNAB_ENABLED:
torznab_hosts.append((headphones.CONFIG.TORZNAB_HOST, headphones.CONFIG.TORZNAB_APIKEY,
headphones.CONFIG.TORZNAB_RATIO, headphones.CONFIG.TORZNAB_ENABLED))
for torznab_host in headphones.CONFIG.get_extra_torznabs():
if torznab_host[3] == '1' or torznab_host[3] == 1:
torznab_hosts.append(torznab_host)
parent_category = "3000"
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
categories = "3040"
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
categories = "3040,3010,3050"
maxsize = 10000000000
else:
categories = "3010,3050"
maxsize = 300000000
if album['Type'] == 'Other':
categories = "3030"
logger.info("Album type is audiobook/spokenword. Using audiobook category")
categories = categories + "," + parent_category
for torznab_host in torznab_hosts:
provider = torznab_host[0]
provider_name = torznab_host[0]
# Format Jackett provider
if "api/v2.0/indexers" in torznab_host[0]:
provider_name = provider.split("/indexers/", 1)[1].split('/', 1)[0]
provider = "Torznab" + '|' + provider_name + '|' + torznab_host[0]
# Request results
logger.info('Parsing results from Torznab %s using search term: %s' % (provider_name, term))
headers = {'User-Agent': USER_AGENT}
params = {
"t": "search",
"apikey": torznab_host[1],
#"cat": categories,
"cat": parent_category, # search using '3000' and filter below
#"maxage": headphones.CONFIG.USENET_RETENTION,
"q": term
}
data = request.request_soup(
url=torznab_host[0],
params=params, headers=headers
)
# Process feed
if data:
items = data.find_all('item')
if not items:
logger.info("No results found from %s for %s", provider_name, term)
else:
for item in items:
try:
title = item.title.get_text()
url = item.find("link").next_sibling.strip()
seeders = int(item.find("torznab:attr", attrs={"name": "seeders"}).get('value'))
if item.size:
size = int(item.size.string)
else:
size = int(item.find("torznab:attr", attrs={"name": "size"}).get('value'))
category = item.find("torznab:attr", attrs={"name": "category"}).get('value')
if category not in categories:
logger.info(f"Skipping {title}, size {bytes_to_mb(size)}, incorrect category {category}")
continue
if all(word.lower() in title.lower() for word in term.split()):
if size < maxsize and minimumseeders < seeders:
logger.info('Found %s. Size: %s' % (title, bytes_to_mb(size)))
if item.prowlarrindexer:
provider = "Torznab" + '|' + item.prowlarrindexer.get_text() + '|' + \
torznab_host[0]
elif item.jackettindexer:
provider = "Torznab" + '|' + item.jackettindexer.get_text() + '|' + \
torznab_host[0]
resultlist.append(Result(title, size, url, provider, 'torrent', True))
else:
logger.info(
'%s is larger than the maxsize or has too little seeders for this category, '
'skipping. (Size: %s, Seeders: %d)',
title, bytes_to_mb(size), seeders)
else:
logger.info('Skipping %s, not all search term words found' % title)
except Exception as e:
logger.exception(
"An unknown error occurred trying to parse the feed: %s" % e)
# rutracker.org
if headphones.CONFIG.RUTRACKER:
provider = "rutracker.org"
# Ignore if release date not specified, results too unpredictable
if not year and not usersearchterm:
logger.info("Release date not specified, ignoring for rutracker.org")
else:
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
format = 'lossless'
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
format = 'lossless+mp3'
else:
format = 'mp3'
# Login
if not ruobj or not ruobj.logged_in():
ruobj = rutracker.Rutracker()
if not ruobj.login():
ruobj = None
if ruobj and ruobj.logged_in():
# build search url
if not usersearchterm:
searchURL = ruobj.searchurl(artistterm, albumterm, year, format)
else:
searchURL = ruobj.searchurl(usersearchterm, ' ', ' ', format)
# parse results
rulist = ruobj.search(searchURL)
if rulist:
resultlist.extend(rulist)
if headphones.CONFIG.ORPHEUS:
provider = "Orpheus.network"
providerurl = "https://orpheus.network/"
bitrate = None
bitrate_string = bitrate
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly: # Lossless Only mode
search_formats = [gazelleformat.FLAC]
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 2: # Preferred quality mode
search_formats = [None] # should return all
bitrate = headphones.CONFIG.PREFERRED_BITRATE
if bitrate:
if 225 <= int(bitrate) < 256:
bitrate = 'V0'
elif 200 <= int(bitrate) < 225:
bitrate = 'V1'
elif 175 <= int(bitrate) < 200:
bitrate = 'V2'
for encoding_string in gazelleencoding.ALL_ENCODINGS:
if re.search(bitrate, encoding_string, flags=re.I):
bitrate_string = encoding_string
if bitrate_string not in gazelleencoding.ALL_ENCODINGS:
logger.info(
"Your preferred bitrate is not one of the available Orpheus.network filters, so not using it as a search parameter.")
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless: # Highest quality including lossless
search_formats = [gazelleformat.FLAC, gazelleformat.MP3]
maxsize = 10000000000
else: # Highest quality excluding lossless
search_formats = [gazelleformat.MP3]
maxsize = 300000000
if not orpheusobj or not orpheusobj.logged_in():
try:
logger.info("Attempting to log in to Orpheus.network...")
orpheusobj = gazelleapi.GazelleAPI(headphones.CONFIG.ORPHEUS_USERNAME,
headphones.CONFIG.ORPHEUS_PASSWORD,
headphones.CONFIG.ORPHEUS_URL)
orpheusobj._login()
except Exception as e:
orpheusobj = None
logger.error("Orpheus.network credentials incorrect or site is down. Error: %s %s" % (
e.__class__.__name__, str(e)))
if orpheusobj and orpheusobj.logged_in():
logger.info("Searching %s..." % provider)
all_torrents = []
album_type = ""
# Specify release types to filter by
if album['Type'] == 'Album':
album_type = [gazellerelease_type.ALBUM]
if album['Type'] == 'Soundtrack':
album_type = [gazellerelease_type.SOUNDTRACK]
if album['Type'] == 'EP':
album_type = [gazellerelease_type.EP]
# No musicbrainz match for this type
# if album['Type'] == 'Anthology':
# album_type = [gazellerelease_type.ANTHOLOGY]
if album['Type'] == 'Compilation':
album_type = [gazellerelease_type.COMPILATION]
if album['Type'] == 'DJ-mix':
album_type = [gazellerelease_type.DJ_MIX]
if album['Type'] == 'Single':
album_type = [gazellerelease_type.SINGLE]
if album['Type'] == 'Live':
album_type = [gazellerelease_type.LIVE_ALBUM]
if album['Type'] == 'Remix':
album_type = [gazellerelease_type.REMIX]
if album['Type'] == 'Bootleg':
album_type = [gazellerelease_type.BOOTLEG]
if album['Type'] == 'Interview':
album_type = [gazellerelease_type.INTERVIEW]
if album['Type'] == 'Mixtape/Street':
album_type = [gazellerelease_type.MIXTAPE]
if album['Type'] == 'Other':
album_type = [gazellerelease_type.UNKNOWN]
for search_format in search_formats:
if usersearchterm:
all_torrents.extend(
orpheusobj.search_torrents(searchstr=usersearchterm, format=search_format,
encoding=bitrate_string, releasetype=album_type)['results'])
else:
all_torrents.extend(orpheusobj.search_torrents(artistname=semi_clean_artist_term,
groupname=semi_clean_album_term,
format=search_format,
encoding=bitrate_string,
releasetype=album_type)['results'])
# filter on format, size, and num seeders
logger.info("Filtering torrents by format, maximum size, and minimum seeders...")
match_torrents = [t for t in all_torrents if
t.size <= maxsize and t.seeders >= minimumseeders]
logger.info(
"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
# sort by times d/l'd
if not len(match_torrents):
logger.info("No results found from %s for %s after filtering" % (provider, term))
elif len(match_torrents) > 1:
logger.info("Found %d matching releases from %s for %s - %s after filtering" %
(len(match_torrents), provider, artistterm, albumterm))
logger.info('Sorting torrents by number of seeders...')
match_torrents.sort(key=lambda x: int(x.seeders), reverse=True)
if gazelleformat.MP3 in search_formats:
logger.info('Sorting torrents by seeders...')
match_torrents.sort(key=lambda x: int(x.seeders), reverse=True)
if search_formats and None not in search_formats:
match_torrents.sort(
key=lambda x: int(search_formats.index(x.format))) # prefer lossless
# if bitrate:
# match_torrents.sort(key=lambda x: re.match("mp3", x.getTorrentDetails(), flags=re.I), reverse=True)
# match_torrents.sort(key=lambda x: str(bitrate) in x.getTorrentFolderName(), reverse=True)
logger.info(
"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
for torrent in match_torrents:
if not torrent.file_path:
torrent.group.update_group_data() # will load the file_path for the individual torrents
resultlist.append(
Result(
torrent.file_path,
torrent.size,
orpheusobj.generate_torrent_link(torrent.id),
provider,
'torrent',
True
)
)
# Redacted - Using same logic as What.CD as it's also Gazelle, so should really make this into something reusable
if headphones.CONFIG.REDACTED:
provider = "Redacted"
providerurl = "https://redacted.ch"
bitrate = None
bitrate_string = bitrate
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly: # Lossless Only mode
search_formats = [gazelleformat.FLAC]
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 2: # Preferred quality mode
search_formats = [None] # should return all
bitrate = headphones.CONFIG.PREFERRED_BITRATE
if bitrate:
if 225 <= int(bitrate) < 256:
bitrate = 'V0'
elif 200 <= int(bitrate) < 225:
bitrate = 'V1'
elif 175 <= int(bitrate) < 200:
bitrate = 'V2'
for encoding_string in gazelleencoding.ALL_ENCODINGS:
if re.search(bitrate, encoding_string, flags=re.I):
bitrate_string = encoding_string
if bitrate_string not in gazelleencoding.ALL_ENCODINGS:
logger.info(
"Your preferred bitrate is not one of the available RED filters, so not using it as a search parameter.")
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless: # Highest quality including lossless
search_formats = [gazelleformat.FLAC, gazelleformat.MP3]
maxsize = 10000000000
else: # Highest quality excluding lossless
search_formats = [gazelleformat.MP3]
maxsize = 300000000
if not redobj or not redobj.logged_in():
try:
logger.info("Attempting to log in to Redacted...")
redobj = gazelleapi.GazelleAPI(headphones.CONFIG.REDACTED_USERNAME,
headphones.CONFIG.REDACTED_PASSWORD,
providerurl)
redobj._login()
except Exception as e:
redobj = None
logger.error("Redacted credentials incorrect or site is down. Error: %s %s" % (
e.__class__.__name__, str(e)))
if redobj and redobj.logged_in():
logger.info("Searching %s..." % provider)
all_torrents = []
for search_format in search_formats:
if usersearchterm:
all_torrents.extend(
redobj.search_torrents(searchstr=usersearchterm, format=search_format,
encoding=bitrate_string)['results'])
else:
all_torrents.extend(redobj.search_torrents(artistname=semi_clean_artist_term,
groupname=semi_clean_album_term,
format=search_format,
encoding=bitrate_string)['results'])
# filter on format, size, and num seeders
logger.info("Filtering torrents by format, maximum size, and minimum seeders...")
match_torrents = [t for t in all_torrents if
t.size <= maxsize and t.seeders >= minimumseeders]
logger.info(
"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
# sort by times d/l'd
if not len(match_torrents):
logger.info("No results found from %s for %s after filtering" % (provider, term))
elif len(match_torrents) > 1:
logger.info("Found %d matching releases from %s for %s - %s after filtering" %
(len(match_torrents), provider, artistterm, albumterm))
logger.info(
"Sorting torrents by times snatched and preferred bitrate %s..." % bitrate_string)
match_torrents.sort(key=lambda x: int(x.snatched), reverse=True)
if gazelleformat.MP3 in search_formats:
# sort by size after rounding to nearest 10MB...hacky, but will favor highest quality
match_torrents.sort(key=lambda x: int(10 * round(x.size / 1024. / 1024. / 10.)),
reverse=True)
if search_formats and None not in search_formats:
match_torrents.sort(
key=lambda x: int(search_formats.index(x.format))) # prefer lossless
# if bitrate:
# match_torrents.sort(key=lambda x: re.match("mp3", x.getTorrentDetails(), flags=re.I), reverse=True)
# match_torrents.sort(key=lambda x: str(bitrate) in x.getTorrentFolderName(), reverse=True)
logger.info(
"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
for torrent in match_torrents:
if not torrent.file_path:
torrent.group.update_group_data() # will load the file_path for the individual torrents
use_token = headphones.CONFIG.REDACTED_USE_FLTOKEN and torrent.can_use_token
resultlist.append(
Result(
torrent.file_path,
torrent.size,
redobj.generate_torrent_link(torrent.id, use_token),
provider,
'torrent',
True
)
)
# PIRATE BAY
# 09/08/2024 - thepiratebay.org no longer working, switch to apibay.org as default
# Pirate Bay
if (headphones.CONFIG.PIRATEBAY):
logger.info(f"Searching The Pirate Bay using term: {term}")
provider = "The Pirate Bay"
tpb_term = term.replace("!", "").replace("'", " ").replace(" ", "%20")
# Pick category for torrents
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
category = '104' # FLAC
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
category = '100' # General audio category
maxsize = 10000000000
else:
category = '101' # MP3 only
maxsize = 300000000
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
# Use proxy if specified
if headphones.CONFIG.PIRATEBAY_PROXY_URL and "apibay.org" not in headphones.CONFIG.PIRATEBAY_PROXY_URL:
apibay = False
providerurl = fix_url(set_proxy(headphones.CONFIG.PIRATEBAY_PROXY_URL))
providerurl = providerurl + "/search/" + tpb_term + "/0/7/" # 7 is sort by seeders
data = request.request_soup(url=providerurl + category, headers=headers)
rows = []
if data:
rows = data.select('table tbody tr')[1:]
if not rows:
rows = data.select('table tr')[1:]
else:
# Use apibay
apibay = True
rows = request.request_json(f"http://apibay.org/q.php?q={term}&cat={category}", headers=headers)
for item in rows:
# apibay
if apibay:
title = item["name"]
if title == "No results returned":
rows = None
break
size = int(item["size"])
seeders = int(item["seeders"])
url = pirate_bay_get_magnet(item["info_hash"], item["name"])
else:
# proxy
try:
# proxy format 1
columns = item.find_all('td')
description = columns[1].text.strip().split('\n\n')
title = description[0]
url = columns[3].select('a[href^="magnet"]')[0]['href']
formatted_size = columns[4].text.replace('\xa0', ' ')
size = piratesize(formatted_size)
seeders = int(columns[5].text)
except:
# proxy format 2
try:
title = ''.join(item.find("a", {"class": "detLink"}))
seeders = int(''.join(item.find("td", {"align": "right"})))
url = item.findAll("a")[3]["href"]
formatted_size = re.search('Size (.*),', str(item)).group(1).replace('\xa0', ' ')
size = piratesize(formatted_size)
except Exception as e:
logger.error("Cannot parse results with this proxy, leave setting blank for default apibay.org "
f"or try a different proxy. Error: {e}")
break
if size < maxsize and minimumseeders < seeders and url is not None:
match = True
logger.info(f"Found {title}. Size: {bytes_to_mb(size)}")
else:
match = False
logger.info(f"{title} is larger than the maxsize or has too little seeders for this category, skipping."
f" (Size: {bytes_to_mb(size)}, Seeders: {seeders})")
resultlist.append(Result(title, size, url, provider, "torrent", match))
if not rows:
logger.info(f"No valid results found from The Pirate Bay using term: {term}")
# attempt to verify that this isn't a substring result
# when looking for "Foo - Foo" we don't want "Foobar"
# this should be less of an issue when it isn't a self-titled album so we'll only check vs artist
results = [result for result in resultlist if verifyresult(result.title, artistterm, term, losslessOnly)]
# Additional filtering for size etc
if results and not choose_specific_download:
results = more_filtering(results, album, albumlength, new)
return results
def searchSoulseek(album, new=False, losslessOnly=False, albumlength=None,
choose_specific_download=False):
# Not using some of the input stuff for now or ever
replacements = {
'...': '',
' & ': ' ',
' = ': ' ',
'?': '',
'$': '',
' + ': ' ',
'"': '',
',': '',
'*': '',
'.': '',
':': ''
}
num_tracks = get_album_track_count(album['AlbumID'])
year = get_year_from_release_date(album['ReleaseDate'])
cleanalbum = unidecode(replace_all(album['AlbumTitle'], replacements)).strip()
cleanartist = unidecode(replace_all(album['ArtistName'], replacements)).strip()
# If Preferred Bitrate and High Limit and Allow Lossless then get both lossy and lossless
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE and headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
allow_lossless = True
else:
allow_lossless = False
if headphones.CONFIG.PREFERRED_QUALITY == 3 :
losslessOnly = True
elif headphones.CONFIG.PREFERRED_QUALITY == 1:
allow_lossless = True
if album['SearchTerm']:
term = album['SearchTerm']
else:
term = ''
try:
resultlist = soulseek.search(artist=cleanartist, album=cleanalbum, year=year, losslessOnly=losslessOnly,
allow_lossless=allow_lossless, num_tracks=num_tracks, user_search_term=term)
if not resultlist:
logger.info("No valid results found from Soulseek")
# filter results
results = [result for result in resultlist if verifyresult(result.title, cleanartist, term, losslessOnly)]
# Additional filtering for size etc
if results and not choose_specific_download:
results = more_filtering(results, album, albumlength, new)
return results
except Exception as e:
logger.error(f"Soulseek error, check server logs: {e}")
return None
def get_album_track_count(album_id):
# Not sure if this should be considered a helper function.
myDB = db.DBConnection()
track_count = myDB.select('SELECT COUNT(*) as count FROM tracks WHERE AlbumID=?', [album_id])[0]['count']
return track_count
# THIS IS KIND OF A MESS AND PROBABLY NEEDS TO BE CLEANED UP
def preprocess(resultlist):
for result in resultlist:
headers = {'User-Agent': USER_AGENT}
if result.kind == 'soulseek':
return True, result
if result.kind == 'torrent':
# rutracker always needs the torrent data
if result.provider == 'rutracker.org':
return ruobj.get_torrent_data(result.url), result
# Torznab sometimes redirects
if result.provider.startswith("Torznab") or 'torznab' in result.provider.lower():
r = request.request_response(url=result.url, headers=headers, allow_redirects=False)
if r:
link = r.headers.get('Location')
if link and link != result.url:
if link.startswith('magnet:'):
result = Result(
result.title,
result.size,
link,
result.provider,
"magnet",
result.matches
)
return "d10:magnet-uri%d:%se" % (len(link), link), result
else:
result = Result(
result.title,
result.size,
link,
result.provider,
result.kind,
result.matches
)
return True, result
else:
return r.content, result
# Get out of here if we're using Transmission or Deluge
# if not a magnet link still need the .torrent to generate hash... uTorrent support labeling
if headphones.CONFIG.TORRENT_DOWNLOADER in [1, 3]:
return True, result
# Get out of here if it's a magnet link
if result.url.lower().startswith("magnet:"):
return True, result
# Download the torrent file
if result.provider in ["The Pirate Bay"]:
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/41.0.2243.2 Safari/537.36'
}
return request.request_content(url=result.url, headers=headers), result
elif result.kind == 'magnet':
magnet_link = result.url
return "d10:magnet-uri%d:%se" % (len(magnet_link), magnet_link), result
elif result.kind == 'bandcamp':
return True, result
else:
if result.provider == 'headphones':
return request.request_content(
url=result.url,
headers=headers,
auth=(headphones.CONFIG.HPUSER, headphones.CONFIG.HPPASS)
), result
else:
return request.request_content(url=result.url, headers=headers), result
| 83,699 | Python | .py | 1,708 | 35.241218 | 195 | 0.563367 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,302 | softchroot.py | rembo10_headphones/headphones/softchroot.py | import os
from headphones.exceptions import SoftChrootError
class SoftChroot(object):
""" SoftChroot provides SOFT chrooting for UI
IMPORTANT: call methods of this class just in modules, which generates data for client UI. Try to avoid unnecessary usage.
"""
enabled = False
chroot = None
def __init__(self, path):
if not path:
# disabled
return
path = path.strip()
if not path:
return
if (not os.path.exists(path) or
not os.path.isdir(path)):
raise SoftChrootError('No such directory: %s' % path)
path = path.rstrip(os.path.sep) + os.path.sep
self.enabled = True
self.chroot = path
def isEnabled(self):
return self.enabled
def getRoot(self):
return self.chroot
def apply(self, path):
if not self.enabled:
return path
if not path:
return path
p = path.strip()
if not p:
return path
if path.startswith(self.chroot):
p = os.path.sep + path[len(self.chroot):]
else:
p = os.path.sep
return p
def revoke(self, path):
if not self.enabled:
return path
if not path:
return path
p = path.strip()
if not p:
return path
if os.path.sep == p[0]:
p = p[1:]
p = self.chroot + p
return p
| 1,490 | Python | .py | 50 | 20.44 | 126 | 0.554302 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,303 | logger.py | rembo10_headphones/headphones/logger.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from logging import handlers
import multiprocessing
import contextlib
import threading
import traceback
import logging
import errno
import sys
import os
from headphones import helpers
from logutils.queue import QueueHandler, QueueListener
import headphones
# These settings are for file logging only
FILENAME = "headphones.log"
MAX_SIZE = 1000000 # 1 MB
MAX_FILES = 5
# Headphones logger
logger = logging.getLogger("headphones")
# Global queue for multiprocessing logging
queue = None
class LogListHandler(logging.Handler):
"""
Log handler for Web UI.
"""
def emit(self, record):
message = self.format(record)
message = message.replace("\n", "<br />")
headphones.LOG_LIST.insert(0, (helpers.now(), message, record.levelname, record.threadName))
@contextlib.contextmanager
def listener():
"""
Wrapper that create a QueueListener, starts it and automatically stops it.
To be used in a with statement in the main process, for multiprocessing.
"""
global queue
# Initialize queue if not already done
if queue is None:
try:
queue = multiprocessing.Queue()
except OSError as e:
queue = False
# Some machines don't have access to /dev/shm. See
# http://stackoverflow.com/questions/2009278 for more information.
if e.errno == errno.EACCES:
logger.warning("Multiprocess logging disabled, because "
"current user cannot map shared memory. You won't see any"
"logging generated by the worker processed.")
# Multiprocess logging may be disabled.
if not queue:
yield
else:
queue_listener = QueueListener(queue, *logger.handlers)
try:
queue_listener.start()
yield
finally:
queue_listener.stop()
def initMultiprocessing():
"""
Remove all handlers and add QueueHandler on top. This should only be called
inside a multiprocessing worker process, since it changes the logger
completely.
"""
# Multiprocess logging may be disabled.
if not queue:
return
# Remove all handlers and add the Queue handler as the only one.
for handler in logger.handlers[:]:
logger.removeHandler(handler)
queue_handler = QueueHandler(queue)
queue_handler.setLevel(logging.DEBUG)
logger.addHandler(queue_handler)
# Change current thread name for log record
threading.current_thread().name = multiprocessing.current_process().name
def initLogger(console=False, log_dir=False, verbose=False):
"""
Setup logging for Headphones. It uses the logger instance with the name
'headphones'. Three log handlers are added:
* RotatingFileHandler: for the file headphones.log
* LogListHandler: for Web UI
* StreamHandler: for console (if console)
Console logging is only enabled if console is set to True. This method can
be invoked multiple times, during different stages of Headphones.
"""
# Close and remove old handlers. This is required to reinit the loggers
# at runtime
for handler in logger.handlers[:]:
# Just make sure it is cleaned up.
if isinstance(handler, handlers.RotatingFileHandler):
handler.close()
elif isinstance(handler, logging.StreamHandler):
handler.flush()
logger.removeHandler(handler)
# Configure the logger to accept all messages
logger.propagate = False
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
# Add list logger
loglist_handler = LogListHandler()
loglist_handler.setLevel(logging.DEBUG)
logger.addHandler(loglist_handler)
# Setup file logger
if log_dir:
filename = os.path.join(log_dir, FILENAME)
file_formatter = logging.Formatter(
'%(asctime)s - %(levelname)-7s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
file_handler = handlers.RotatingFileHandler(filename, maxBytes=MAX_SIZE,
backupCount=MAX_FILES,
encoding='utf8')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
# Setup console logger
if console:
console_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setFormatter(console_formatter)
console_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
# Install exception hooks
initHooks()
def initHooks(global_exceptions=True, thread_exceptions=True, pass_original=True):
"""
This method installs exception catching mechanisms. Any exception caught
will pass through the exception hook, and will be logged to the logger as
an error. Additionally, a traceback is provided.
This is very useful for crashing threads and any other bugs, that may not
be exposed when running as daemon.
The default exception hook is still considered, if pass_original is True.
"""
def excepthook(*exception_info):
# We should always catch this to prevent loops!
try:
message = "".join(traceback.format_exception(*exception_info))
logger.error("Uncaught exception: %s", message)
except:
pass
# Original excepthook
if pass_original:
sys.__excepthook__(*exception_info)
# Global exception hook
if global_exceptions:
sys.excepthook = excepthook
# Thread exception hook
if thread_exceptions:
old_init = threading.Thread.__init__
def new_init(self, *args, **kwargs):
old_init(self, *args, **kwargs)
old_run = self.run
def new_run(*args, **kwargs):
try:
old_run(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
excepthook(*sys.exc_info())
self.run = new_run
# Monkey patch the run() by monkey patching the __init__ method
threading.Thread.__init__ = new_init
# Expose logger methods
info = logger.info
warn = logger.warn
error = logger.error
debug = logger.debug
warning = logger.warning
exception = logger.exception
| 7,230 | Python | .py | 180 | 32.861111 | 100 | 0.674003 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,304 | deluge.py | rembo10_headphones/headphones/deluge.py | # -*- coding: utf-8 -*-
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
# Parts of this file are a part of SickRage.
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
# Adapted for Headphones by <noamgit@gmail.com>
# URL: https://github.com/noam09
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from headphones import logger
import time
import re
import os
import json
import headphones
import requests
from base64 import b64encode
import traceback
delugeweb_auth = {}
delugeweb_url = ''
deluge_verify_cert = False
scrub_logs = True
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
def _scrubber(text):
if scrub_logs:
try:
# URL parameter values
text = re.sub(r'=[0-9a-zA-Z]*', r'=REMOVED', text)
# Local host with port
# text = re.sub('\:\/\/.*\:', '://REMOVED:', text) # just host
text = re.sub(r'\:\/\/.*\:[0-9]*', r'://REMOVED:', text)
# Session cookie
text = re.sub(r"_session_id'\: '.*'", r"_session_id': 'REMOVED'", text)
# Local Windows user path
if text.lower().startswith('c:\\users\\'):
k = text.split('\\')
text = '\\'.join([k[0], k[1], '.....', k[-1]])
# partial_link = re.sub('(auth.*?)=.*&','\g<1>=SECRETZ&', link)
# partial_link = re.sub('(\w)=[0-9a-zA-Z]*&*','\g<1>=REMOVED&', link)
except Exception as e:
logger.debug('Deluge: Scrubber failed: %s' % str(e))
return text
def addTorrent(link, data=None, name=None):
try:
# Authenticate anyway
logger.debug('Deluge: addTorrent Authentication')
_get_auth()
result = {}
retid = False
url_orpheus = ['https://orpheus.network/', 'http://orpheus.network/']
if link.lower().startswith('magnet:'):
logger.debug('Deluge: Got a magnet link: %s' % _scrubber(link))
result = {'type': 'magnet',
'url': link}
retid = _add_torrent_magnet(result)
elif link.lower().startswith('http://') or link.lower().startswith('https://'):
logger.debug('Deluge: Got a URL: %s' % _scrubber(link))
if link.lower().startswith(tuple(url_orpheus)):
logger.debug('Deluge: Using different User-Agent for this site')
user_agent = 'Headphones'
# This method will make Deluge download the file
# logger.debug('Deluge: Letting Deluge download this')
# local_torrent_path = _add_torrent_url({'url': link})
# logger.debug('Deluge: Returned this local path: %s' % _scrubber(local_torrent_path))
# return addTorrent(local_torrent_path)
else:
user_agent = 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'
get_headers = {'User-Agent': user_agent}
torrentfile = ''
logger.debug('Deluge: Trying to download (GET)')
try:
r = requests.get(link, headers=get_headers)
if r.status_code == 200:
logger.debug('Deluge: 200 OK')
# .text will ruin the encoding for some torrents
torrentfile = r.content
else:
logger.debug('Deluge: Trying to GET %s returned status %d' % (_scrubber(link), r.status_code))
return False
except Exception as e:
logger.debug('Deluge: Download failed: %s' % str(e))
if 'announce' not in str(torrentfile)[:40]:
logger.debug('Deluge: Contents of %s doesn\'t look like a torrent file' % _scrubber(link))
return False
if not name:
# Extract torrent name from .torrent
try:
logger.debug('Deluge: Getting torrent name length')
name_length = int(re.findall(r'name([0-9]*)\:.*?\:', str(torrentfile))[0])
logger.debug('Deluge: Getting torrent name')
name = re.findall(r'name[0-9]*\:(.*?)\:', str(torrentfile))[0][:name_length]
except Exception as e:
logger.debug('Deluge: Could not get torrent name, getting file name')
# get last part of link/path (name only)
name = link.split('\\')[-1].split('/')[-1]
# remove '.torrent' suffix
if name[-len('.torrent'):] == '.torrent':
name = name[:-len('.torrent')]
try:
logger.debug('Deluge: Sending Deluge torrent with name %s and content [%s...]' % (name, str(torrentfile)[:40]))
except:
logger.debug('Deluge: Sending Deluge torrent with problematic name and some content')
result = {'type': 'torrent',
'name': name,
'content': torrentfile}
retid = _add_torrent_file(result)
# elif link.endswith('.torrent') or data:
elif not (link.lower().startswith('http://') or link.lower().startswith('https://')):
if data:
logger.debug('Deluge: Getting .torrent data')
torrentfile = data
else:
logger.debug('Deluge: Getting .torrent file')
with open(link, 'rb') as f:
torrentfile = f.read()
if not name:
# Extract torrent name from .torrent
try:
logger.debug('Deluge: Getting torrent name length')
name_length = int(re.findall(r'name([0-9]*)\:.*?\:', str(torrentfile))[0])
logger.debug('Deluge: Getting torrent name')
name = re.findall(r'name[0-9]*\:(.*?)\:', str(torrentfile))[0][:name_length]
except Exception as e:
logger.debug('Deluge: Could not get torrent name, getting file name')
# get last part of link/path (name only)
name = link.split('\\')[-1].split('/')[-1]
# remove '.torrent' suffix
if name[-len('.torrent'):] == '.torrent':
name = name[:-len('.torrent')]
try:
logger.debug('Deluge: Sending Deluge torrent with name %s and content [%s...]' % (name, str(torrentfile)[:40]))
except UnicodeDecodeError:
logger.debug('Deluge: Sending Deluge torrent with name %s and content [%s...]' % (name.decode('utf-8'), str(torrentfile)[:40]))
result = {'type': 'torrent',
'name': name,
'content': torrentfile}
retid = _add_torrent_file(result)
else:
logger.error('Deluge: Unknown file type: %s' % link)
if retid:
logger.info('Deluge: Torrent sent to Deluge successfully (%s)' % retid)
return retid
else:
logger.info('Deluge: Returned status %s' % retid)
return False
except Exception as e:
logger.error(str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
def getTorrentFolder(result):
logger.debug('Deluge: Get torrent folder name')
if not any(delugeweb_auth):
_get_auth()
try:
post_data = json.dumps({"method": "web.get_torrent_status",
"params": [
result['hash'],
["total_done"]
],
"id": 21})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result['total_done'] = json.loads(response.text)['result']['total_done']
tries = 0
while result['total_done'] == 0 and tries < 10:
tries += 1
time.sleep(5)
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result['total_done'] = json.loads(response.text)['result']['total_done']
post_data = json.dumps({"method": "web.get_torrent_status",
"params": [
result['hash'],
[
"name",
"save_path",
"total_size",
"num_files",
"message",
"tracker",
"comment"
]
],
"id": 23})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result['save_path'] = json.loads(response.text)['result']['save_path']
result['name'] = json.loads(response.text)['result']['name']
return json.loads(response.text)['result']['name']
except Exception as e:
logger.debug('Deluge: Could not get torrent folder name: %s' % str(e))
def removeTorrent(torrentid, remove_data=False):
logger.debug('Deluge: Remove torrent %s' % torrentid)
if not any(delugeweb_auth):
_get_auth()
try:
logger.debug('Deluge: Checking if torrent %s finished seeding' % str(torrentid))
post_data = json.dumps({"method": "web.get_torrent_status",
"params": [
torrentid,
[
"name",
"ratio",
"state"
]
],
"id": 26})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
try:
state = json.loads(response.text)['result']['state']
except KeyError as e:
logger.debug('Deluge: "state" KeyError when trying to remove torrent %s' % str(torrentid))
return False
not_finished = ["queued", "seeding", "downloading", "checking", "error"]
result = False
if state.lower() in not_finished:
logger.debug('Deluge: Torrent %s is either queued or seeding, not removing yet' % str(torrentid))
return False
else:
logger.debug('Deluge: Removing torrent %s' % str(torrentid))
post_data = json.dumps({"method": "core.remove_torrent",
"params": [
torrentid,
remove_data
],
"id": 25})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result = json.loads(response.text)['result']
return result
except Exception as e:
logger.error('Deluge: Removing torrent failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return None
def _get_auth():
logger.debug('Deluge: Authenticating...')
global delugeweb_auth, delugeweb_url, deluge_verify_cert
delugeweb_auth = {}
delugeweb_host = headphones.CONFIG.DELUGE_HOST
delugeweb_cert = headphones.CONFIG.DELUGE_CERT
delugeweb_password = headphones.CONFIG.DELUGE_PASSWORD
if len(delugeweb_password) > 0:
logger.debug('Deluge: Using password %s******%s' % (delugeweb_password[0], delugeweb_password[-1]))
if not delugeweb_host.startswith('http'):
delugeweb_host = 'http://%s' % delugeweb_host
if delugeweb_cert is None or delugeweb_cert.strip() == '':
deluge_verify_cert = False
logger.debug('Deluge: FYI no SSL certificate configured')
else:
deluge_verify_cert = delugeweb_cert
delugeweb_host = delugeweb_host.replace('http:', 'https:')
logger.debug('Deluge: Using certificate %s, host is now %s' % (_scrubber(deluge_verify_cert), _scrubber(delugeweb_host)))
if delugeweb_host.endswith('/'):
delugeweb_host = delugeweb_host[:-1]
delugeweb_url = delugeweb_host + '/json'
post_data = json.dumps({"method": "auth.login",
"params": [delugeweb_password],
"id": 1})
try:
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
except requests.ConnectionError:
try:
logger.debug('Deluge: Connection failed, let\'s try HTTPS just in case')
response = requests.post(delugeweb_url.replace('http:', 'https:'), data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
# If the previous line didn't fail, change delugeweb_url for the rest of this session
logger.error('Deluge: Switching to HTTPS, but certificate won\'t be verified because NO CERTIFICATE WAS CONFIGURED!')
delugeweb_url = delugeweb_url.replace('http:', 'https:')
except Exception as e:
logger.error('Deluge: Authentication failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return None
except Exception as e:
logger.error('Deluge: Authentication failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return None
auth = json.loads(response.text)["result"]
auth_error = json.loads(response.text)["error"]
logger.debug('Deluge: Authentication result: %s, Error: %s' % (auth, auth_error))
delugeweb_auth = response.cookies
logger.debug('Deluge: Authentication cookies: %s' % _scrubber(str(delugeweb_auth.get_dict())))
post_data = json.dumps({"method": "web.connected",
"params": [],
"id": 10})
try:
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
except Exception as e:
logger.error('Deluge: Authentication failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return None
connected = json.loads(response.text)['result']
connected_error = json.loads(response.text)['error']
logger.debug('Deluge: Connection result: %s, Error: %s' % (connected, connected_error))
if not connected:
post_data = json.dumps({"method": "web.get_hosts",
"params": [],
"id": 11})
try:
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
except Exception as e:
logger.error('Deluge: Authentication failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return None
delugeweb_hosts = json.loads(response.text)['result']
# Check if delugeweb_hosts is None before checking its length
if not delugeweb_hosts or len(delugeweb_hosts) == 0:
logger.error('Deluge: WebUI does not contain daemons')
return None
post_data = json.dumps({"method": "web.connect",
"params": [delugeweb_hosts[0][0]],
"id": 11})
try:
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
except Exception as e:
logger.error('Deluge: Authentication failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return None
post_data = json.dumps({"method": "web.connected",
"params": [],
"id": 10})
try:
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
except Exception as e:
logger.error('Deluge: Authentication failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return None
connected = json.loads(response.text)['result']
if not connected:
logger.error('Deluge: WebUI could not connect to daemon')
return None
return auth
def _add_torrent_magnet(result):
logger.debug('Deluge: Adding magnet')
if not any(delugeweb_auth):
_get_auth()
try:
post_data = json.dumps({"method": "core.add_torrent_magnet",
"params": [result['url'], {}],
"id": 2})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result['hash'] = json.loads(response.text)['result']
logger.debug('Deluge: Response was %s' % str(json.loads(response.text)))
return json.loads(response.text)['result']
except Exception as e:
logger.error('Deluge: Adding torrent magnet failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return False
def _add_torrent_url(result):
logger.debug('Deluge: Adding URL')
if not any(delugeweb_auth):
_get_auth()
try:
post_data = json.dumps({"method": "web.download_torrent_from_url",
"params": [result['url'], {}],
"id": 32})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result['location'] = json.loads(response.text)['result']
logger.debug('Deluge: Response was %s' % str(json.loads(response.text)))
return json.loads(response.text)['result']
except Exception as e:
logger.error('Deluge: Adding torrent URL failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return False
def _add_torrent_file(result):
logger.debug('Deluge: Adding file')
options = {}
if headphones.CONFIG.DELUGE_DOWNLOAD_DIRECTORY:
options['download_location'] = headphones.CONFIG.DELUGE_DOWNLOAD_DIRECTORY
if headphones.CONFIG.DELUGE_DONE_DIRECTORY or headphones.CONFIG.DOWNLOAD_TORRENT_DIR:
options['move_completed'] = 1
if headphones.CONFIG.DELUGE_DONE_DIRECTORY:
options['move_completed_path'] = headphones.CONFIG.DELUGE_DONE_DIRECTORY
else:
options['move_completed_path'] = headphones.CONFIG.DOWNLOAD_TORRENT_DIR
if headphones.CONFIG.DELUGE_PAUSED:
options['add_paused'] = headphones.CONFIG.DELUGE_PAUSED
if not any(delugeweb_auth):
_get_auth()
try:
# content is torrent file contents that needs to be encoded to base64
post_data = json.dumps({"method": "core.add_torrent_file",
"params": [result['name'] + '.torrent',
b64encode(result['content'].encode('utf8')),
options],
"id": 2})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result['hash'] = json.loads(response.text)['result']
logger.debug('Deluge: Response was %s' % str(json.loads(response.text)))
return json.loads(response.text)['result']
except UnicodeDecodeError:
try:
# content is torrent file contents that needs to be encoded to base64
# this time let's try leaving the encoding as is
logger.debug('Deluge: There was a decoding issue, let\'s try again')
post_data = json.dumps({"method": "core.add_torrent_file",
"params": [result['name'].decode('utf8') + '.torrent',
b64encode(result['content']),
options],
"id": 22})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result['hash'] = json.loads(response.text)['result']
logger.debug('Deluge: Response was %s' % str(json.loads(response.text)))
return json.loads(response.text)['result']
except Exception as e:
logger.error('Deluge: Adding torrent file failed after decode: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return False
except Exception as e:
logger.error('Deluge: Adding torrent file failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return False
def setTorrentLabel(result):
logger.debug('Deluge: Setting label')
label = headphones.CONFIG.DELUGE_LABEL
if not any(delugeweb_auth):
_get_auth()
if ' ' in label:
logger.error('Deluge: Invalid label. Label can\'t contain spaces - replacing with underscores')
label = label.replace(' ', '_')
if label:
# check if label already exists and create it if not
post_data = json.dumps({"method": 'label.get_labels',
"params": [],
"id": 3})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
labels = json.loads(response.text)['result']
if labels is not None:
if label not in labels:
try:
logger.debug('Deluge: %s label doesn\'t exist in Deluge, let\'s add it' % label)
post_data = json.dumps({"method": 'label.add',
"params": [label],
"id": 4})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
logger.debug('Deluge: %s label added to Deluge' % label)
except Exception as e:
logger.error('Deluge: Setting label failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
# add label to torrent
post_data = json.dumps({"method": 'label.set_torrent',
"params": [result['hash'], label],
"id": 5})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
logger.debug('Deluge: %s label added to torrent' % label)
else:
logger.debug('Deluge: Label plugin not detected')
return False
return not json.loads(response.text)['error']
def setSeedRatio(result):
logger.debug('Deluge: Setting seed ratio')
if not any(delugeweb_auth):
_get_auth()
ratio = None
if result['ratio']:
ratio = result['ratio']
try:
if ratio:
post_data = json.dumps({"method": "core.set_torrent_stop_at_ratio",
"params": [result['hash'], True],
"id": 5})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
post_data = json.dumps({"method": "core.set_torrent_stop_ratio",
"params": [result['hash'], float(ratio)],
"id": 6})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
return not json.loads(response.text)['error']
return True
except Exception as e:
logger.error('Deluge: Setting seed ratio failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
logger.error('; '.join(formatted_lines))
return None
| 27,190 | Python | .py | 523 | 38.441683 | 143 | 0.561774 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,305 | updater.py | rembo10_headphones/headphones/updater.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from headphones import logger, db, importer
def dbUpdate(forcefull=False):
myDB = db.DBConnection()
active_artists = myDB.select(
'SELECT ArtistID, ArtistName from artists WHERE Status="Active" or Status="Loading" order by LastUpdated ASC')
logger.info('Starting update for %i active artists', len(active_artists))
for artist in active_artists:
artistid = artist[0]
importer.addArtisttoDB(artistid=artistid, extrasonly=False, forcefull=forcefull)
logger.info('Active artist update complete')
| 1,218 | Python | .py | 24 | 47.666667 | 118 | 0.758418 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,306 | torrentfinished.py | rembo10_headphones/headphones/torrentfinished.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from headphones import db, utorrent, transmission, deluge, qbittorrent, logger
import headphones
def checkTorrentFinished():
"""
Remove Torrent + data if Post Processed and finished Seeding
"""
logger.info("Checking if any torrents have finished seeding and can be removed")
myDB = db.DBConnection()
results = myDB.select('SELECT * from snatched WHERE Status="Seed_Processed"')
for album in results:
hash = album['TorrentHash']
albumid = album['AlbumID']
torrent_removed = False
if headphones.CONFIG.TORRENT_DOWNLOADER == 1:
torrent_removed = transmission.removeTorrent(hash, True)
elif headphones.CONFIG.TORRENT_DOWNLOADER == 2:
torrent_removed = utorrent.removeTorrent(hash, True)
elif headphones.CONFIG.TORRENT_DOWNLOADER == 3:
torrent_removed = deluge.removeTorrent(hash, True)
else:
torrent_removed = qbittorrent.removeTorrent(hash, True)
if torrent_removed:
myDB.action('DELETE from snatched WHERE status = "Seed_Processed" and AlbumID=?',
[albumid])
logger.info("Checking finished torrents completed")
| 1,875 | Python | .py | 39 | 42.205128 | 93 | 0.712486 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,307 | webserve.py | rembo10_headphones/headphones/webserve.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
# NZBGet support added by CurlyMo <curlymoo1@gmail.com> as a part of XBian - XBMC on the Raspberry Pi
import json
import os
import random
import re
import secrets
import sys
import threading
import time
from collections import OrderedDict
from dataclasses import asdict
from html import escape as html_escape
from operator import itemgetter
from urllib import parse
import cherrypy
from mako import exceptions
from mako.lookup import TemplateLookup
import headphones
from headphones import (
crier,
db,
importer,
lastfm,
librarysync,
logger,
mb,
notifiers,
searcher,
)
from headphones.helpers import (
checked,
clean_name,
have_pct_have_total,
pattern_substitute,
radio,
replace_illegal_chars,
today,
)
from headphones.types import Result
def serve_template(templatename, **kwargs):
interface_dir = os.path.join(str(headphones.PROG_DIR), 'data/interfaces/')
template_dir = os.path.join(str(interface_dir), headphones.CONFIG.INTERFACE)
_hplookup = TemplateLookup(directories=[template_dir])
try:
template = _hplookup.get_template(templatename)
return template.render(**kwargs)
except:
return exceptions.html_error_template().render()
class WebInterface(object):
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def home(self):
myDB = db.DBConnection()
artists = myDB.select('SELECT * from artists order by ArtistSortName COLLATE NOCASE')
return serve_template(templatename="index.html", title="Home", artists=artists)
@cherrypy.expose
def threads(self):
crier.cry()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def artistPage(self, ArtistID):
myDB = db.DBConnection()
artist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [ArtistID]).fetchone()
# Don't redirect to the artist page until it has the bare minimum info inserted
# Redirect to the home page if we still can't get it after 5 seconds
retry = 0
while not artist and retry < 5:
time.sleep(1)
artist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [ArtistID]).fetchone()
retry += 1
if not artist:
raise cherrypy.HTTPRedirect("home")
albums = myDB.select('SELECT * from albums WHERE ArtistID=? order by ReleaseDate DESC',
[ArtistID])
# Serve the extras up as a dict to make things easier for new templates (append new extras to the end)
extras_list = headphones.POSSIBLE_EXTRAS
if artist['Extras']:
artist_extras = list(map(int, artist['Extras'].split(',')))
else:
artist_extras = []
extras_dict = OrderedDict()
i = 1
for extra in extras_list:
if i in artist_extras:
extras_dict[extra] = "checked"
else:
extras_dict[extra] = ""
i += 1
return serve_template(templatename="artist.html", title=artist['ArtistName'], artist=artist,
albums=albums, extras=extras_dict)
@cherrypy.expose
def albumPage(self, AlbumID):
myDB = db.DBConnection()
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
retry = 0
while retry < 5:
if not album:
time.sleep(1)
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
retry += 1
else:
break
if not album:
raise cherrypy.HTTPRedirect("home")
tracks = myDB.select(
'SELECT * from tracks WHERE AlbumID=? ORDER BY CAST(TrackNumber AS INTEGER)', [AlbumID])
description = myDB.action('SELECT * from descriptions WHERE ReleaseGroupID=?',
[AlbumID]).fetchone()
if not album['ArtistName']:
title = ' - '
else:
title = album['ArtistName'] + ' - '
if not album['AlbumTitle']:
title = title + ""
else:
title = title + album['AlbumTitle']
return serve_template(templatename="album.html", title=title, album=album, tracks=tracks,
description=description)
@cherrypy.expose
def search(self, name, type):
if len(name) == 0:
raise cherrypy.HTTPRedirect("home")
if type == 'artist':
searchresults = mb.findArtist(name, limit=100)
elif type == 'album':
searchresults = mb.findRelease(name, limit=100)
else:
searchresults = mb.findSeries(name, limit=100)
return serve_template(templatename="searchresults.html",
title='Search Results for: "' + html_escape(name) + '"',
searchresults=searchresults, name=html_escape(name), type=type)
@cherrypy.expose
def addArtist(self, artistid):
thread = threading.Thread(target=importer.addArtisttoDB, args=[artistid])
thread.start()
thread.join(1)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % artistid)
@cherrypy.expose
def addSeries(self, seriesid):
thread = threading.Thread(target=importer.addArtisttoDB,
args=[seriesid, False, False, "series"])
thread.start()
thread.join(1)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % seriesid)
@cherrypy.expose
def getExtras(self, ArtistID, newstyle=False, **kwargs):
# if calling this function without the newstyle, they're using the old format
# which doesn't separate extras, so we'll grab all of them
#
# If they are, we need to convert kwargs to string format
if not newstyle:
extras = "1,2,3,4,5,6,7,8,9,10,11,12,13,14"
else:
temp_extras_list = []
i = 1
for extra in headphones.POSSIBLE_EXTRAS:
if extra in kwargs:
temp_extras_list.append(i)
i += 1
extras = ','.join(str(n) for n in temp_extras_list)
myDB = db.DBConnection()
controlValueDict = {'ArtistID': ArtistID}
newValueDict = {'IncludeExtras': 1,
'Extras': extras}
myDB.upsert("artists", newValueDict, controlValueDict)
thread = threading.Thread(target=importer.addArtisttoDB, args=[ArtistID, True, False])
thread.start()
thread.join(1)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
@cherrypy.expose
def removeExtras(self, ArtistID, ArtistName):
myDB = db.DBConnection()
controlValueDict = {'ArtistID': ArtistID}
newValueDict = {'IncludeExtras': 0}
myDB.upsert("artists", newValueDict, controlValueDict)
extraalbums = myDB.select(
'SELECT AlbumID from albums WHERE ArtistID=? AND Status="Skipped" AND Type!="Album"',
[ArtistID])
for album in extraalbums:
myDB.action('DELETE from tracks WHERE ArtistID=? AND AlbumID=?',
[ArtistID, album['AlbumID']])
myDB.action('DELETE from albums WHERE ArtistID=? AND AlbumID=?',
[ArtistID, album['AlbumID']])
myDB.action('DELETE from allalbums WHERE ArtistID=? AND AlbumID=?',
[ArtistID, album['AlbumID']])
myDB.action('DELETE from alltracks WHERE ArtistID=? AND AlbumID=?',
[ArtistID, album['AlbumID']])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [album['AlbumID']])
from headphones import cache
c = cache.Cache()
c.remove_from_cache(AlbumID=album['AlbumID'])
importer.finalize_update(ArtistID, ArtistName)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
@cherrypy.expose
def pauseArtist(self, ArtistID):
logger.info("Pausing artist: " + ArtistID)
myDB = db.DBConnection()
controlValueDict = {'ArtistID': ArtistID}
newValueDict = {'Status': 'Paused'}
myDB.upsert("artists", newValueDict, controlValueDict)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
@cherrypy.expose
def resumeArtist(self, ArtistID):
logger.info("Resuming artist: " + ArtistID)
myDB = db.DBConnection()
controlValueDict = {'ArtistID': ArtistID}
newValueDict = {'Status': 'Active'}
myDB.upsert("artists", newValueDict, controlValueDict)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
def removeArtist(self, ArtistID):
myDB = db.DBConnection()
namecheck = myDB.select('SELECT ArtistName from artists where ArtistID=?', [ArtistID])
for name in namecheck:
artistname = name['ArtistName']
try:
logger.info("Deleting all traces of artist: " + artistname)
except TypeError:
logger.info("Deleting all traces of artist: null")
myDB.action('DELETE from artists WHERE ArtistID=?', [ArtistID])
from headphones import cache
c = cache.Cache()
rgids = myDB.select(
'SELECT AlbumID FROM albums WHERE ArtistID=? UNION SELECT AlbumID FROM allalbums WHERE ArtistID=?',
[ArtistID, ArtistID])
for rgid in rgids:
albumid = rgid['AlbumID']
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [albumid])
myDB.action('DELETE from have WHERE Matched=?', [albumid])
c.remove_from_cache(AlbumID=albumid)
myDB.action('DELETE from descriptions WHERE ReleaseGroupID=?', [albumid])
myDB.action('DELETE from albums WHERE ArtistID=?', [ArtistID])
myDB.action('DELETE from tracks WHERE ArtistID=?', [ArtistID])
myDB.action('DELETE from allalbums WHERE ArtistID=?', [ArtistID])
myDB.action('DELETE from alltracks WHERE ArtistID=?', [ArtistID])
myDB.action('DELETE from have WHERE ArtistName=?', [artistname])
c.remove_from_cache(ArtistID=ArtistID)
myDB.action('DELETE from descriptions WHERE ArtistID=?', [ArtistID])
myDB.action('INSERT OR REPLACE into blacklist VALUES (?)', [ArtistID])
@cherrypy.expose
def deleteArtist(self, ArtistID):
self.removeArtist(ArtistID)
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def scanArtist(self, ArtistID):
myDB = db.DBConnection()
artist_name = myDB.select('SELECT DISTINCT ArtistName FROM artists WHERE ArtistID=?', [ArtistID])[0][0]
logger.info("Scanning artist: %s", artist_name)
full_folder_format = headphones.CONFIG.FOLDER_FORMAT
folder_format = re.findall(r'(.*?[Aa]rtist?)\.*', full_folder_format)[0]
acceptable_formats = ["$artist", "$sortartist", "$first/$artist", "$first/$sortartist"]
if not folder_format.lower() in acceptable_formats:
logger.info("Can't determine the artist folder from the configured folder_format. Not scanning")
return
# Format the folder to match the settings
artist = artist_name.replace('/', '_')
if headphones.CONFIG.FILE_UNDERSCORES:
artist = artist.replace(' ', '_')
if artist.startswith('The '):
sortname = artist[4:] + ", The"
else:
sortname = artist
if sortname[0].isdigit():
firstchar = '0-9'
else:
firstchar = sortname[0]
values = {'$Artist': artist,
'$SortArtist': sortname,
'$First': firstchar.upper(),
'$artist': artist.lower(),
'$sortartist': sortname.lower(),
'$first': firstchar.lower(),
}
folder = pattern_substitute(folder_format.strip(), values, normalize=True)
folder = replace_illegal_chars(folder, type="folder")
folder = folder.replace('./', '_/').replace('/.', '/_')
if folder.endswith('.'):
folder = folder[:-1] + '_'
if folder.startswith('.'):
folder = '_' + folder[1:]
dirs = []
if headphones.CONFIG.MUSIC_DIR:
dirs.append(headphones.CONFIG.MUSIC_DIR)
if headphones.CONFIG.DESTINATION_DIR:
dirs.append(headphones.CONFIG.DESTINATION_DIR)
if headphones.CONFIG.LOSSLESS_DESTINATION_DIR:
dirs.append(headphones.CONFIG.LOSSLESS_DESTINATION_DIR)
dirs = set(dirs)
try:
for dir in dirs:
artistfolder = os.path.join(dir, folder)
if not os.path.isdir(artistfolder.encode(headphones.SYS_ENCODING)):
logger.debug("Cannot find directory: " + artistfolder)
continue
threading.Thread(target=librarysync.libraryScan,
kwargs={"dir": artistfolder, "artistScan": True, "ArtistID": ArtistID,
"ArtistName": artist_name}).start()
except Exception as e:
logger.error('Unable to complete the scan: %s' % e)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
@cherrypy.expose
def deleteEmptyArtists(self):
logger.info("Deleting all empty artists")
myDB = db.DBConnection()
emptyArtistIDs = [row['ArtistID'] for row in
myDB.select("SELECT ArtistID FROM artists WHERE LatestAlbum IS NULL")]
for ArtistID in emptyArtistIDs:
self.removeArtist(ArtistID)
@cherrypy.expose
def refreshArtist(self, ArtistID):
thread = threading.Thread(target=importer.addArtisttoDB, args=[ArtistID, False, True])
thread.start()
thread.join(1)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
@cherrypy.expose
def markAlbums(self, ArtistID=None, action=None, **args):
myDB = db.DBConnection()
if action == 'WantedNew' or action == 'WantedLossless':
newaction = 'Wanted'
else:
newaction = action
for mbid in args:
logger.info("Marking %s as %s" % (mbid, newaction))
controlValueDict = {'AlbumID': mbid}
newValueDict = {'Status': newaction}
myDB.upsert("albums", newValueDict, controlValueDict)
if action == 'Wanted':
searcher.searchforalbum(mbid, new=False)
if action == 'WantedNew':
searcher.searchforalbum(mbid, new=True)
if action == 'WantedLossless':
searcher.searchforalbum(mbid, lossless=True)
if ArtistID:
ArtistIDT = ArtistID
else:
ArtistIDT = myDB.action('SELECT ArtistID FROM albums WHERE AlbumID=?', [mbid]).fetchone()[0]
myDB.action(
'UPDATE artists SET TotalTracks=(SELECT COUNT(*) FROM tracks WHERE ArtistID = ? AND AlbumTitle IN (SELECT AlbumTitle FROM albums WHERE Status != "Ignored")) WHERE ArtistID = ?',
[ArtistIDT, ArtistIDT])
if ArtistID:
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
else:
raise cherrypy.HTTPRedirect("upcoming")
@cherrypy.expose
def addArtists(self, action=None, **args):
if action == "add":
threading.Thread(target=importer.artistlist_to_mbids, args=[args, True]).start()
if action == "ignore":
myDB = db.DBConnection()
for artist in args:
myDB.action('DELETE FROM newartists WHERE ArtistName=?',
[artist.decode(headphones.SYS_ENCODING, 'replace')])
myDB.action('UPDATE have SET Matched="Ignored" WHERE ArtistName=?',
[artist.decode(headphones.SYS_ENCODING, 'replace')])
logger.info("Artist %s removed from new artist list and set to ignored" % artist)
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def queueAlbum(self, AlbumID, ArtistID=None, new=False, redirect=None, lossless=False):
logger.info("Marking album: " + AlbumID + " as wanted...")
myDB = db.DBConnection()
controlValueDict = {'AlbumID': AlbumID}
if lossless:
newValueDict = {'Status': 'Wanted Lossless'}
logger.info("...lossless only!")
else:
newValueDict = {'Status': 'Wanted'}
myDB.upsert("albums", newValueDict, controlValueDict)
searcher.searchforalbum(AlbumID, new)
if ArtistID:
redirect = "artistPage?ArtistID=%s" % ArtistID
raise cherrypy.HTTPRedirect(redirect)
@cherrypy.expose
@cherrypy.tools.json_out()
def choose_specific_download(self, AlbumID):
results = searcher.searchforalbum(AlbumID, choose_specific_download=True) or []
return list(map(asdict, results))
@cherrypy.expose
@cherrypy.tools.json_out()
def download_specific_release(self, AlbumID, title, size, url, provider, kind, **kwargs):
# Handle situations where the torrent url contains arguments that are parsed
if kwargs:
url = parse.quote(url, safe=":?/=&") + '&' + parse.urlencode(kwargs)
try:
result = [Result(title, int(size), url, provider, kind, True)]
except ValueError:
result = [Result(title, float(size), url, provider, kind, True)]
logger.info("Making sure we can download the chosen result")
data, result = searcher.preprocess(result)
if data and result:
myDB = db.DBConnection()
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
searcher.send_to_downloader(data, result, album)
return {'result': 'success'}
else:
return {'result': 'failure'}
@cherrypy.expose
def unqueueAlbum(self, AlbumID, ArtistID):
logger.info("Marking album: " + AlbumID + "as skipped...")
myDB = db.DBConnection()
controlValueDict = {'AlbumID': AlbumID}
newValueDict = {'Status': 'Skipped'}
myDB.upsert("albums", newValueDict, controlValueDict)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
@cherrypy.expose
def deleteAlbum(self, AlbumID, ArtistID=None):
logger.info("Deleting all traces of album: " + AlbumID)
myDB = db.DBConnection()
myDB.action('DELETE from have WHERE Matched=?', [AlbumID])
album = myDB.action('SELECT ArtistID, ArtistName, AlbumTitle from albums where AlbumID=?',
[AlbumID]).fetchone()
if album:
ArtistID = album['ArtistID']
myDB.action('DELETE from have WHERE ArtistName=? AND AlbumTitle=?',
[album['ArtistName'], album['AlbumTitle']])
myDB.action('DELETE from albums WHERE AlbumID=?', [AlbumID])
myDB.action('DELETE from tracks WHERE AlbumID=?', [AlbumID])
myDB.action('DELETE from allalbums WHERE AlbumID=?', [AlbumID])
myDB.action('DELETE from alltracks WHERE AlbumID=?', [AlbumID])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [AlbumID])
myDB.action('DELETE from descriptions WHERE ReleaseGroupID=?', [AlbumID])
from headphones import cache
c = cache.Cache()
c.remove_from_cache(AlbumID=AlbumID)
if ArtistID:
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
else:
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def switchAlbum(self, AlbumID, ReleaseID):
"""
Take the values from allalbums/alltracks (based on the ReleaseID) and
swap it into the album & track tables
"""
from headphones import albumswitcher
albumswitcher.switch(AlbumID, ReleaseID)
raise cherrypy.HTTPRedirect("albumPage?AlbumID=%s" % AlbumID)
@cherrypy.expose
def editSearchTerm(self, AlbumID, SearchTerm):
logger.info("Updating search term for albumid: " + AlbumID)
myDB = db.DBConnection()
controlValueDict = {'AlbumID': AlbumID}
newValueDict = {'SearchTerm': SearchTerm}
myDB.upsert("albums", newValueDict, controlValueDict)
raise cherrypy.HTTPRedirect("albumPage?AlbumID=%s" % AlbumID)
@cherrypy.expose
def upcoming(self):
myDB = db.DBConnection()
upcoming = myDB.select(
"SELECT * from albums WHERE ReleaseDate > date('now') order by ReleaseDate ASC")
wanted = myDB.select("SELECT * from albums WHERE Status='Wanted' order by ReleaseDate ASC")
return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming,
wanted=wanted)
@cherrypy.expose
def manage(self):
myDB = db.DBConnection()
emptyArtists = myDB.select("SELECT * FROM artists WHERE LatestAlbum IS NULL")
return serve_template(templatename="manage.html", title="Manage", emptyArtists=emptyArtists)
@cherrypy.expose
def manageArtists(self):
myDB = db.DBConnection()
artists = myDB.select('SELECT * from artists order by ArtistSortName COLLATE NOCASE')
return serve_template(templatename="manageartists.html", title="Manage Artists",
artists=artists)
@cherrypy.expose
def manageAlbums(self, Status=None):
myDB = db.DBConnection()
if Status == "Upcoming":
albums = myDB.select("SELECT * from albums WHERE ReleaseDate > date('now')")
elif Status:
albums = myDB.select('SELECT * from albums WHERE Status=?', [Status])
else:
albums = myDB.select('SELECT * from albums')
return serve_template(templatename="managealbums.html", title="Manage Albums",
albums=albums)
@cherrypy.expose
def manageNew(self):
myDB = db.DBConnection()
newartists = myDB.select('SELECT * from newartists')
return serve_template(templatename="managenew.html", title="Manage New Artists",
newartists=newartists)
@cherrypy.expose
def manageUnmatched(self):
myDB = db.DBConnection()
have_album_dictionary = []
headphones_album_dictionary = []
have_albums = myDB.select(
'SELECT ArtistName, AlbumTitle, TrackTitle, CleanName from have WHERE Matched = "Failed" GROUP BY AlbumTitle ORDER BY ArtistName')
for albums in have_albums:
# Have to skip over manually matched tracks
if albums['ArtistName'] and albums['AlbumTitle'] and albums['TrackTitle']:
original_clean = clean_name(
albums['ArtistName'] + " " + albums['AlbumTitle'] + " " + albums['TrackTitle'])
# else:
# original_clean = None
if original_clean == albums['CleanName']:
have_dict = {'ArtistName': albums['ArtistName'],
'AlbumTitle': albums['AlbumTitle']}
have_album_dictionary.append(have_dict)
headphones_albums = myDB.select(
'SELECT ArtistName, AlbumTitle from albums ORDER BY ArtistName')
for albums in headphones_albums:
if albums['ArtistName'] and albums['AlbumTitle']:
headphones_dict = {'ArtistName': albums['ArtistName'],
'AlbumTitle': albums['AlbumTitle']}
headphones_album_dictionary.append(headphones_dict)
# unmatchedalbums = [f for f in have_album_dictionary if f not in [x for x in headphones_album_dictionary]]
check = set(
[(clean_name(d['ArtistName']).lower(),
clean_name(d['AlbumTitle']).lower()) for d in
headphones_album_dictionary])
unmatchedalbums = [d for d in have_album_dictionary if (
clean_name(d['ArtistName']).lower(),
clean_name(d['AlbumTitle']).lower()) not in check]
return serve_template(templatename="manageunmatched.html", title="Manage Unmatched Items",
unmatchedalbums=unmatchedalbums)
@cherrypy.expose
def markUnmatched(self, action=None, existing_artist=None, existing_album=None, new_artist=None,
new_album=None):
myDB = db.DBConnection()
if action == "ignoreArtist":
artist = existing_artist
myDB.action(
'UPDATE have SET Matched="Ignored" WHERE ArtistName=? AND Matched = "Failed"',
[artist])
elif action == "ignoreAlbum":
artist = existing_artist
album = existing_album
myDB.action(
'UPDATE have SET Matched="Ignored" WHERE ArtistName=? AND AlbumTitle=? AND Matched = "Failed"',
(artist, album))
elif action == "matchArtist":
existing_artist_clean = clean_name(existing_artist).lower()
new_artist_clean = clean_name(new_artist).lower()
if new_artist_clean != existing_artist_clean:
have_tracks = myDB.action(
'SELECT Matched, CleanName, Location, BitRate, Format FROM have WHERE ArtistName=?',
[existing_artist])
update_count = 0
artist_id = None
for entry in have_tracks:
old_clean_filename = entry['CleanName']
if old_clean_filename.startswith(existing_artist_clean):
new_clean_filename = old_clean_filename.replace(existing_artist_clean,
new_artist_clean, 1)
myDB.action(
'UPDATE have SET CleanName=? WHERE ArtistName=? AND CleanName=?',
[new_clean_filename, existing_artist, old_clean_filename])
# Attempt to match tracks with new CleanName
match_alltracks = myDB.action(
'SELECT CleanName FROM alltracks WHERE CleanName = ?',
[new_clean_filename]).fetchone()
if match_alltracks:
myDB.action(
'UPDATE alltracks SET Location = ?, BitRate = ?, Format = ? WHERE CleanName = ?',
[entry['Location'], entry['BitRate'], entry['Format'], new_clean_filename])
match_tracks = myDB.action(
'SELECT ArtistID, CleanName, AlbumID FROM tracks WHERE CleanName = ?',
[new_clean_filename]).fetchone()
if match_tracks:
myDB.action(
'UPDATE tracks SET Location = ?, BitRate = ?, Format = ? WHERE CleanName = ?',
[entry['Location'], entry['BitRate'], entry['Format'], new_clean_filename])
myDB.action('UPDATE have SET Matched="Manual" WHERE CleanName=?',
[new_clean_filename])
update_count += 1
artist_id = match_tracks['Artist_ID']
logger.info("Manual matching yielded %s new matches for Artist: %s" % (update_count, new_artist))
if artist_id:
librarysync.update_album_status(ArtistID=artist_id)
else:
logger.info(
"Artist %s already named appropriately; nothing to modify" % existing_artist)
elif action == "matchAlbum":
existing_artist_clean = clean_name(existing_artist).lower()
new_artist_clean = clean_name(new_artist).lower()
existing_album_clean = clean_name(existing_album).lower()
new_album_clean = clean_name(new_album).lower()
existing_clean_string = existing_artist_clean + " " + existing_album_clean
new_clean_string = new_artist_clean + " " + new_album_clean
if existing_clean_string != new_clean_string:
have_tracks = myDB.action(
'SELECT Matched, CleanName, Location, BitRate, Format FROM have WHERE ArtistName=? AND AlbumTitle=?',
(existing_artist, existing_album))
update_count = 0
for entry in have_tracks:
old_clean_filename = entry['CleanName']
if old_clean_filename.startswith(existing_clean_string):
new_clean_filename = old_clean_filename.replace(existing_clean_string,
new_clean_string, 1)
myDB.action(
'UPDATE have SET CleanName=? WHERE ArtistName=? AND AlbumTitle=? AND CleanName=?',
[new_clean_filename, existing_artist, existing_album,
old_clean_filename])
# Attempt to match tracks with new CleanName
match_alltracks = myDB.action(
'SELECT CleanName FROM alltracks WHERE CleanName = ?',
[new_clean_filename]).fetchone()
if match_alltracks:
myDB.action(
'UPDATE alltracks SET Location = ?, BitRate = ?, Format = ? WHERE CleanName = ?',
[entry['Location'], entry['BitRate'], entry['Format'], new_clean_filename])
match_tracks = myDB.action(
'SELECT CleanName, AlbumID FROM tracks WHERE CleanName = ?',
[new_clean_filename]).fetchone()
if match_tracks:
myDB.action(
'UPDATE tracks SET Location = ?, BitRate = ?, Format = ? WHERE CleanName = ?',
[entry['Location'], entry['BitRate'], entry['Format'], new_clean_filename])
myDB.action('UPDATE have SET Matched="Manual" WHERE CleanName=?',
[new_clean_filename])
album_id = match_tracks['AlbumID']
update_count += 1
logger.info("Manual matching yielded %s new matches for Artist: %s / Album: %s" % (
update_count, new_artist, new_album))
if update_count > 0:
librarysync.update_album_status(album_id)
else:
logger.info(
"Artist %s / Album %s already named appropriately; nothing to modify" % (
existing_artist, existing_album))
@cherrypy.expose
def manageManual(self):
myDB = db.DBConnection()
manual_albums = []
manualalbums = myDB.select(
'SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have')
for albums in manualalbums:
if albums['ArtistName'] and albums['AlbumTitle'] and albums['TrackTitle']:
original_clean = clean_name(
albums['ArtistName'] + " " + albums['AlbumTitle'] + " " + albums['TrackTitle'])
if albums['Matched'] == "Ignored" or albums['Matched'] == "Manual" or albums[
'CleanName'] != original_clean:
if albums['Matched'] == "Ignored":
album_status = "Ignored"
elif albums['Matched'] == "Manual" or albums['CleanName'] != original_clean:
album_status = "Matched"
manual_dict = {'ArtistName': albums['ArtistName'],
'AlbumTitle': albums['AlbumTitle'], 'AlbumStatus': album_status}
if manual_dict not in manual_albums:
manual_albums.append(manual_dict)
manual_albums_sorted = sorted(manual_albums, key=itemgetter('ArtistName', 'AlbumTitle'))
return serve_template(templatename="managemanual.html", title="Manage Manual Items",
manualalbums=manual_albums_sorted)
@cherrypy.expose
def markManual(self, action=None, existing_artist=None, existing_album=None):
myDB = db.DBConnection()
if action == "unignoreArtist":
artist = existing_artist
myDB.action('UPDATE have SET Matched="Failed" WHERE ArtistName=? AND Matched="Ignored"',
[artist])
logger.info("Artist: %s successfully restored to unmatched list" % artist)
elif action == "unignoreAlbum":
artist = existing_artist
album = existing_album
myDB.action(
'UPDATE have SET Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND Matched="Ignored"',
(artist, album))
logger.info("Album: %s successfully restored to unmatched list" % album)
elif action == "unmatchArtist":
artist = existing_artist
update_clean = myDB.select(
'SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have WHERE ArtistName=?',
[artist])
update_count = 0
for tracks in update_clean:
original_clean = clean_name(
tracks['ArtistName'] + " " + tracks['AlbumTitle'] + " " + tracks[
'TrackTitle']).lower()
album = tracks['AlbumTitle']
track_title = tracks['TrackTitle']
if tracks['CleanName'] != original_clean:
artist_id_check = myDB.action('SELECT ArtistID FROM tracks WHERE CleanName = ?',
[tracks['CleanName']]).fetchone()
if artist_id_check:
artist_id = artist_id_check[0]
myDB.action(
'UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE CleanName = ?',
[None, None, None, tracks['CleanName']])
myDB.action(
'UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE CleanName = ?',
[None, None, None, tracks['CleanName']])
myDB.action(
'UPDATE have SET CleanName=?, Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND TrackTitle=?',
(original_clean, artist, album, track_title))
update_count += 1
if update_count > 0:
librarysync.update_album_status(ArtistID=artist_id)
logger.info("Artist: %s successfully restored to unmatched list" % artist)
elif action == "unmatchAlbum":
artist = existing_artist
album = existing_album
update_clean = myDB.select(
'SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched FROM have WHERE ArtistName=? AND AlbumTitle=?',
(artist, album))
update_count = 0
for tracks in update_clean:
original_clean = clean_name(
tracks['ArtistName'] + " " + tracks['AlbumTitle'] + " " + tracks[
'TrackTitle']).lower()
track_title = tracks['TrackTitle']
if tracks['CleanName'] != original_clean:
album_id_check = myDB.action('SELECT AlbumID FROM tracks WHERE CleanName = ?',
[tracks['CleanName']]).fetchone()
if album_id_check:
album_id = album_id_check[0]
myDB.action(
'UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE CleanName = ?',
[None, None, None, tracks['CleanName']])
myDB.action(
'UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE CleanName = ?',
[None, None, None, tracks['CleanName']])
myDB.action(
'UPDATE have SET CleanName=?, Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND TrackTitle=?',
(original_clean, artist, album, track_title))
update_count += 1
if update_count > 0:
librarysync.update_album_status(album_id)
logger.info("Album: %s successfully restored to unmatched list" % album)
@cherrypy.expose
def markArtists(self, action=None, **args):
myDB = db.DBConnection()
artistsToAdd = []
for ArtistID in args:
if action == 'delete':
self.removeArtist(ArtistID)
elif action == 'pause':
controlValueDict = {'ArtistID': ArtistID}
newValueDict = {'Status': 'Paused'}
myDB.upsert("artists", newValueDict, controlValueDict)
elif action == 'resume':
controlValueDict = {'ArtistID': ArtistID}
newValueDict = {'Status': 'Active'}
myDB.upsert("artists", newValueDict, controlValueDict)
else:
artistsToAdd.append(ArtistID)
if len(artistsToAdd) > 0:
logger.debug("Refreshing artists: %s" % artistsToAdd)
threading.Thread(target=importer.addArtistIDListToDB, args=[artistsToAdd]).start()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def importLastFM(self, username):
headphones.CONFIG.LASTFM_USERNAME = username
headphones.CONFIG.write()
threading.Thread(target=lastfm.getArtists).start()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def importLastFMTag(self, tag, limit):
threading.Thread(target=lastfm.getTagTopArtists, args=(tag, limit)).start()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def importItunes(self, path):
headphones.CONFIG.PATH_TO_XML = path
headphones.CONFIG.write()
thread = threading.Thread(target=importer.itunesImport, args=[path])
thread.start()
thread.join(10)
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def musicScan(self, path, scan=0, redirect=None, autoadd=0, libraryscan=0):
headphones.CONFIG.LIBRARYSCAN = libraryscan
headphones.CONFIG.AUTO_ADD_ARTISTS = autoadd
try:
params = {}
headphones.CONFIG.MUSIC_DIR = path
headphones.CONFIG.write()
except Exception as e:
logger.warn("Cannot save scan directory to config: %s", e)
if scan:
params = {"dir": path}
if scan:
try:
threading.Thread(target=librarysync.libraryScan, kwargs=params).start()
except Exception as e:
logger.error('Unable to complete the scan: %s' % e)
if redirect:
raise cherrypy.HTTPRedirect(redirect)
else:
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def forceUpdate(self):
from headphones import updater
threading.Thread(target=updater.dbUpdate, args=[False]).start()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def forceFullUpdate(self):
from headphones import updater
threading.Thread(target=updater.dbUpdate, args=[True]).start()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def forceSearch(self):
from headphones import searcher
threading.Thread(target=searcher.searchforalbum).start()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def forcePostProcess(self, dir=None, album_dir=None, keep_original_folder=False):
from headphones import postprocessor
threading.Thread(target=postprocessor.forcePostProcess,
kwargs={'dir': dir, 'album_dir': album_dir,
'keep_original_folder': keep_original_folder == 'True'}).start()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def checkGithub(self):
from headphones import versioncheck
versioncheck.checkGithub()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def history(self):
myDB = db.DBConnection()
history = myDB.select(
'''SELECT AlbumID, Title, Size, URL, DateAdded, Status, Kind, ifnull(FolderName, '?') FolderName FROM snatched WHERE Status NOT LIKE "Seed%" ORDER BY DateAdded DESC''')
return serve_template(templatename="history.html", title="History", history=history)
@cherrypy.expose
def logs(self):
return serve_template(templatename="logs.html", title="Log", lineList=headphones.LOG_LIST)
@cherrypy.expose
def clearLogs(self):
headphones.LOG_LIST = []
logger.info("Web logs cleared")
raise cherrypy.HTTPRedirect("logs")
@cherrypy.expose
def toggleVerbose(self):
headphones.VERBOSE = not headphones.VERBOSE
logger.initLogger(console=not headphones.QUIET,
log_dir=headphones.CONFIG.LOG_DIR, verbose=headphones.VERBOSE)
logger.info("Verbose toggled, set to %s", headphones.VERBOSE)
logger.debug("If you read this message, debug logging is available")
raise cherrypy.HTTPRedirect("logs")
@cherrypy.expose
@cherrypy.tools.json_out()
def getLog(self, iDisplayStart=0, iDisplayLength=100, iSortCol_0=0, sSortDir_0="desc",
sSearch="", **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
filtered = []
if sSearch == "":
filtered = headphones.LOG_LIST[::]
else:
filtered = [row for row in headphones.LOG_LIST for column in row if
sSearch.lower() in column.lower()]
sortcolumn = 0
if iSortCol_0 == '1':
sortcolumn = 2
elif iSortCol_0 == '2':
sortcolumn = 1
filtered.sort(key=lambda x: x[sortcolumn], reverse=sSortDir_0 == "desc")
rows = filtered[iDisplayStart:(iDisplayStart + iDisplayLength)]
rows = [[row[0], row[2], row[1]] for row in rows]
return {
'iTotalDisplayRecords': len(filtered),
'iTotalRecords': len(headphones.LOG_LIST),
'aaData': rows,
}
@cherrypy.expose
@cherrypy.tools.json_out()
def getArtists_json(self, iDisplayStart=0, iDisplayLength=100, sSearch="", iSortCol_0='0',
sSortDir_0='asc', **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
filtered = []
totalcount = 0
myDB = db.DBConnection()
sortcolumn = 'ArtistSortName'
sortbyhavepercent = False
if iSortCol_0 == '2':
sortcolumn = 'Status'
elif iSortCol_0 == '3':
sortcolumn = 'ReleaseDate'
elif iSortCol_0 == '4':
sortbyhavepercent = True
if sSearch == "":
query = 'SELECT * from artists order by %s COLLATE NOCASE %s' % (sortcolumn, sSortDir_0)
filtered = myDB.select(query)
totalcount = len(filtered)
else:
query = 'SELECT * from artists WHERE ArtistSortName LIKE "%' + sSearch + '%" OR LatestAlbum LIKE "%' + sSearch + '%"' + 'ORDER BY %s COLLATE NOCASE %s' % (
sortcolumn, sSortDir_0)
filtered = myDB.select(query)
totalcount = myDB.select('SELECT COUNT(*) from artists')[0][0]
if sortbyhavepercent:
filtered.sort(key=have_pct_have_total, reverse=sSortDir_0 == "asc")
# can't figure out how to change the datatables default sorting order when its using an ajax datasource so ill
# just reverse it here and the first click on the "Latest Album" header will sort by descending release date
if sortcolumn == 'ReleaseDate':
filtered.reverse()
artists = filtered[iDisplayStart:(iDisplayStart + iDisplayLength)]
rows = []
for artist in artists:
row = {"ArtistID": artist['ArtistID'],
"ArtistName": artist["ArtistName"],
"ArtistSortName": artist["ArtistSortName"],
"Status": artist["Status"],
"TotalTracks": artist["TotalTracks"],
"HaveTracks": artist["HaveTracks"],
"LatestAlbum": "",
"ReleaseDate": "",
"ReleaseInFuture": "False",
"AlbumID": "",
}
if not row['HaveTracks']:
row['HaveTracks'] = 0
if artist['ReleaseDate'] and artist['LatestAlbum']:
row['ReleaseDate'] = artist['ReleaseDate']
row['LatestAlbum'] = artist['LatestAlbum']
row['AlbumID'] = artist['AlbumID']
if artist['ReleaseDate'] > today():
row['ReleaseInFuture'] = "True"
elif artist['LatestAlbum']:
row['ReleaseDate'] = ''
row['LatestAlbum'] = artist['LatestAlbum']
row['AlbumID'] = artist['AlbumID']
rows.append(row)
data = {'iTotalDisplayRecords': len(filtered),
'iTotalRecords': totalcount,
'aaData': rows,
}
return data
@cherrypy.expose
@cherrypy.tools.json_out()
def getAlbumsByArtist_json(self, artist=None):
myDB = db.DBConnection()
data = {}
counter = 0
album_list = myDB.select("SELECT AlbumTitle from albums WHERE ArtistName=?", [artist])
for album in album_list:
data[counter] = album['AlbumTitle']
counter += 1
return data
@cherrypy.expose
@cherrypy.tools.json_out()
def getArtistjson(self, ArtistID, **kwargs):
myDB = db.DBConnection()
artist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [ArtistID]).fetchone()
return {
'ArtistName': artist['ArtistName'],
'Status': artist['Status']
}
@cherrypy.expose
@cherrypy.tools.json_out()
def getAlbumjson(self, AlbumID, **kwargs):
myDB = db.DBConnection()
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
return {
'AlbumTitle': album['AlbumTitle'],
'ArtistName': album['ArtistName'],
'Status': album['Status']
}
@cherrypy.expose
def clearhistory(self, type=None, date_added=None, title=None):
myDB = db.DBConnection()
if type:
if type == 'all':
logger.info("Clearing all history")
myDB.action('DELETE from snatched WHERE Status NOT LIKE "Seed%"')
else:
logger.info("Clearing history where status is %s" % type)
myDB.action('DELETE from snatched WHERE Status=?', [type])
else:
logger.info("Deleting '%s' from history" % title)
myDB.action(
'DELETE from snatched WHERE Status NOT LIKE "Seed%" AND Title=? AND DateAdded=?',
[title, date_added])
raise cherrypy.HTTPRedirect("history")
@cherrypy.expose
def generateAPI(self):
apikey = secrets.token_hex(nbytes=16)
logger.info("New API generated")
return apikey
@cherrypy.expose
def forceScan(self, keepmatched=None):
myDB = db.DBConnection()
#########################################
# NEED TO MOVE THIS INTO A SEPARATE FUNCTION BEFORE RELEASE
myDB.select('DELETE from Have')
logger.info('Removed all entries in local library database')
myDB.select('UPDATE alltracks SET Location=NULL, BitRate=NULL, Format=NULL')
myDB.select('UPDATE tracks SET Location=NULL, BitRate=NULL, Format=NULL')
logger.info('All tracks in library unmatched')
myDB.action('UPDATE artists SET HaveTracks=NULL')
logger.info('Reset track counts for all artists')
myDB.action(
'UPDATE albums SET Status="Skipped" WHERE Status="Skipped" OR Status="Downloaded"')
logger.info('Marking all unwanted albums as Skipped')
try:
threading.Thread(target=librarysync.libraryScan).start()
except Exception as e:
logger.error('Unable to complete the scan: %s' % e)
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def config(self):
interface_dir = os.path.join(headphones.PROG_DIR, 'data/interfaces/')
interface_list = [name for name in os.listdir(interface_dir) if
os.path.isdir(os.path.join(interface_dir, name))]
config = {
"http_host": headphones.CONFIG.HTTP_HOST,
"http_username": headphones.CONFIG.HTTP_USERNAME,
"http_port": headphones.CONFIG.HTTP_PORT,
"http_password": headphones.CONFIG.HTTP_PASSWORD,
"launch_browser": checked(headphones.CONFIG.LAUNCH_BROWSER),
"enable_https": checked(headphones.CONFIG.ENABLE_HTTPS),
"https_cert": headphones.CONFIG.HTTPS_CERT,
"https_key": headphones.CONFIG.HTTPS_KEY,
"api_enabled": checked(headphones.CONFIG.API_ENABLED),
"api_key": headphones.CONFIG.API_KEY,
"download_scan_interval": headphones.CONFIG.DOWNLOAD_SCAN_INTERVAL,
"update_db_interval": headphones.CONFIG.UPDATE_DB_INTERVAL,
"mb_ignore_age": headphones.CONFIG.MB_IGNORE_AGE,
"mb_ignore_age_missing": checked(headphones.CONFIG.MB_IGNORE_AGE_MISSING),
"search_interval": headphones.CONFIG.SEARCH_INTERVAL,
"libraryscan_interval": headphones.CONFIG.LIBRARYSCAN_INTERVAL,
"sab_host": headphones.CONFIG.SAB_HOST,
"sab_username": headphones.CONFIG.SAB_USERNAME,
"sab_apikey": headphones.CONFIG.SAB_APIKEY,
"sab_password": headphones.CONFIG.SAB_PASSWORD,
"sab_category": headphones.CONFIG.SAB_CATEGORY,
"nzbget_host": headphones.CONFIG.NZBGET_HOST,
"nzbget_username": headphones.CONFIG.NZBGET_USERNAME,
"nzbget_password": headphones.CONFIG.NZBGET_PASSWORD,
"nzbget_category": headphones.CONFIG.NZBGET_CATEGORY,
"nzbget_priority": headphones.CONFIG.NZBGET_PRIORITY,
"qbittorrent_host": headphones.CONFIG.QBITTORRENT_HOST,
"qbittorrent_username": headphones.CONFIG.QBITTORRENT_USERNAME,
"qbittorrent_password": headphones.CONFIG.QBITTORRENT_PASSWORD,
"qbittorrent_label": headphones.CONFIG.QBITTORRENT_LABEL,
"transmission_host": headphones.CONFIG.TRANSMISSION_HOST,
"transmission_username": headphones.CONFIG.TRANSMISSION_USERNAME,
"transmission_password": headphones.CONFIG.TRANSMISSION_PASSWORD,
"deluge_host": headphones.CONFIG.DELUGE_HOST,
"deluge_cert": headphones.CONFIG.DELUGE_CERT,
"deluge_password": headphones.CONFIG.DELUGE_PASSWORD,
"deluge_label": headphones.CONFIG.DELUGE_LABEL,
"deluge_done_directory": headphones.CONFIG.DELUGE_DONE_DIRECTORY,
"deluge_download_directory": headphones.CONFIG.DELUGE_DOWNLOAD_DIRECTORY,
"deluge_paused": checked(headphones.CONFIG.DELUGE_PAUSED),
"utorrent_host": headphones.CONFIG.UTORRENT_HOST,
"utorrent_username": headphones.CONFIG.UTORRENT_USERNAME,
"utorrent_password": headphones.CONFIG.UTORRENT_PASSWORD,
"utorrent_label": headphones.CONFIG.UTORRENT_LABEL,
"nzb_downloader_sabnzbd": radio(headphones.CONFIG.NZB_DOWNLOADER, 0),
"nzb_downloader_nzbget": radio(headphones.CONFIG.NZB_DOWNLOADER, 1),
"nzb_downloader_blackhole": radio(headphones.CONFIG.NZB_DOWNLOADER, 2),
"torrent_downloader_blackhole": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 0),
"torrent_downloader_transmission": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 1),
"torrent_downloader_utorrent": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 2),
"torrent_downloader_deluge": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 3),
"torrent_downloader_qbittorrent": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 4),
"download_dir": headphones.CONFIG.DOWNLOAD_DIR,
"soulseek_download_dir": headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR,
"soulseek_incomplete_download_dir": headphones.CONFIG.SOULSEEK_INCOMPLETE_DOWNLOAD_DIR,
"use_blackhole": checked(headphones.CONFIG.BLACKHOLE),
"blackhole_dir": headphones.CONFIG.BLACKHOLE_DIR,
"usenet_retention": headphones.CONFIG.USENET_RETENTION,
"headphones_indexer": checked(headphones.CONFIG.HEADPHONES_INDEXER),
"use_newznab": checked(headphones.CONFIG.NEWZNAB),
"newznab_host": headphones.CONFIG.NEWZNAB_HOST,
"newznab_apikey": headphones.CONFIG.NEWZNAB_APIKEY,
"newznab_enabled": checked(headphones.CONFIG.NEWZNAB_ENABLED),
"extra_newznabs": headphones.CONFIG.get_extra_newznabs(),
"use_torznab": checked(headphones.CONFIG.TORZNAB),
"torznab_host": headphones.CONFIG.TORZNAB_HOST,
"torznab_apikey": headphones.CONFIG.TORZNAB_APIKEY,
"torznab_ratio": headphones.CONFIG.TORZNAB_RATIO,
"torznab_enabled": checked(headphones.CONFIG.TORZNAB_ENABLED),
"extra_torznabs": headphones.CONFIG.get_extra_torznabs(),
"use_nzbsorg": checked(headphones.CONFIG.NZBSORG),
"nzbsorg_uid": headphones.CONFIG.NZBSORG_UID,
"nzbsorg_hash": headphones.CONFIG.NZBSORG_HASH,
"use_omgwtfnzbs": checked(headphones.CONFIG.OMGWTFNZBS),
"omgwtfnzbs_uid": headphones.CONFIG.OMGWTFNZBS_UID,
"omgwtfnzbs_apikey": headphones.CONFIG.OMGWTFNZBS_APIKEY,
"preferred_words": headphones.CONFIG.PREFERRED_WORDS,
"ignored_words": headphones.CONFIG.IGNORED_WORDS,
"required_words": headphones.CONFIG.REQUIRED_WORDS,
"ignore_clean_releases": checked(headphones.CONFIG.IGNORE_CLEAN_RELEASES),
"torrentblackhole_dir": headphones.CONFIG.TORRENTBLACKHOLE_DIR,
"download_torrent_dir": headphones.CONFIG.DOWNLOAD_TORRENT_DIR,
"numberofseeders": headphones.CONFIG.NUMBEROFSEEDERS,
"use_piratebay": checked(headphones.CONFIG.PIRATEBAY),
"piratebay_proxy_url": headphones.CONFIG.PIRATEBAY_PROXY_URL,
"piratebay_ratio": headphones.CONFIG.PIRATEBAY_RATIO,
"use_rutracker": checked(headphones.CONFIG.RUTRACKER),
"rutracker_user": headphones.CONFIG.RUTRACKER_USER,
"rutracker_password": headphones.CONFIG.RUTRACKER_PASSWORD,
"rutracker_ratio": headphones.CONFIG.RUTRACKER_RATIO,
"rutracker_cookie": headphones.CONFIG.RUTRACKER_COOKIE,
"use_orpheus": checked(headphones.CONFIG.ORPHEUS),
"orpheus_username": headphones.CONFIG.ORPHEUS_USERNAME,
"orpheus_password": headphones.CONFIG.ORPHEUS_PASSWORD,
"orpheus_ratio": headphones.CONFIG.ORPHEUS_RATIO,
"orpheus_url": headphones.CONFIG.ORPHEUS_URL,
"use_redacted": checked(headphones.CONFIG.REDACTED),
"redacted_username": headphones.CONFIG.REDACTED_USERNAME,
"redacted_password": headphones.CONFIG.REDACTED_PASSWORD,
"redacted_ratio": headphones.CONFIG.REDACTED_RATIO,
"redacted_use_fltoken": checked(headphones.CONFIG.REDACTED_USE_FLTOKEN),
"pref_qual_0": radio(headphones.CONFIG.PREFERRED_QUALITY, 0),
"pref_qual_1": radio(headphones.CONFIG.PREFERRED_QUALITY, 1),
"pref_qual_2": radio(headphones.CONFIG.PREFERRED_QUALITY, 2),
"pref_qual_3": radio(headphones.CONFIG.PREFERRED_QUALITY, 3),
"preferred_bitrate": headphones.CONFIG.PREFERRED_BITRATE,
"preferred_bitrate_high": headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER,
"preferred_bitrate_low": headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER,
"preferred_bitrate_allow_lossless": checked(
headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS),
"detect_bitrate": checked(headphones.CONFIG.DETECT_BITRATE),
"lossless_bitrate_from": headphones.CONFIG.LOSSLESS_BITRATE_FROM,
"lossless_bitrate_to": headphones.CONFIG.LOSSLESS_BITRATE_TO,
"freeze_db": checked(headphones.CONFIG.FREEZE_DB),
"cue_split": checked(headphones.CONFIG.CUE_SPLIT),
"cue_split_flac_path": headphones.CONFIG.CUE_SPLIT_FLAC_PATH,
"cue_split_shntool_path": headphones.CONFIG.CUE_SPLIT_SHNTOOL_PATH,
"move_files": checked(headphones.CONFIG.MOVE_FILES),
"rename_files": checked(headphones.CONFIG.RENAME_FILES),
"rename_single_disc_ignore": checked(headphones.CONFIG.RENAME_SINGLE_DISC_IGNORE),
"correct_metadata": checked(headphones.CONFIG.CORRECT_METADATA),
"cleanup_files": checked(headphones.CONFIG.CLEANUP_FILES),
"keep_nfo": checked(headphones.CONFIG.KEEP_NFO),
"add_album_art": checked(headphones.CONFIG.ADD_ALBUM_ART),
"album_art_format": headphones.CONFIG.ALBUM_ART_FORMAT,
"album_art_min_width": headphones.CONFIG.ALBUM_ART_MIN_WIDTH,
"album_art_max_width": headphones.CONFIG.ALBUM_ART_MAX_WIDTH,
"embed_album_art": checked(headphones.CONFIG.EMBED_ALBUM_ART),
"embed_lyrics": checked(headphones.CONFIG.EMBED_LYRICS),
"replace_existing_folders": checked(headphones.CONFIG.REPLACE_EXISTING_FOLDERS),
"keep_original_folder": checked(headphones.CONFIG.KEEP_ORIGINAL_FOLDER),
"destination_dir": headphones.CONFIG.DESTINATION_DIR,
"lossless_destination_dir": headphones.CONFIG.LOSSLESS_DESTINATION_DIR,
"folder_format": headphones.CONFIG.FOLDER_FORMAT,
"file_format": headphones.CONFIG.FILE_FORMAT,
"file_underscores": checked(headphones.CONFIG.FILE_UNDERSCORES),
"include_extras": checked(headphones.CONFIG.INCLUDE_EXTRAS),
"official_releases_only": checked(headphones.CONFIG.OFFICIAL_RELEASES_ONLY),
"wait_until_release_date": checked(headphones.CONFIG.WAIT_UNTIL_RELEASE_DATE),
"autowant_upcoming": checked(headphones.CONFIG.AUTOWANT_UPCOMING),
"autowant_all": checked(headphones.CONFIG.AUTOWANT_ALL),
"autowant_manually_added": checked(headphones.CONFIG.AUTOWANT_MANUALLY_ADDED),
"do_not_process_unmatched": checked(headphones.CONFIG.DO_NOT_PROCESS_UNMATCHED),
"keep_torrent_files": checked(headphones.CONFIG.KEEP_TORRENT_FILES),
"prefer_torrents_0": radio(headphones.CONFIG.PREFER_TORRENTS, 0),
"prefer_torrents_1": radio(headphones.CONFIG.PREFER_TORRENTS, 1),
"prefer_torrents_2": radio(headphones.CONFIG.PREFER_TORRENTS, 2),
"prefer_torrents_3": radio(headphones.CONFIG.PREFER_TORRENTS, 3),
"magnet_links_0": radio(headphones.CONFIG.MAGNET_LINKS, 0),
"magnet_links_1": radio(headphones.CONFIG.MAGNET_LINKS, 1),
"magnet_links_2": radio(headphones.CONFIG.MAGNET_LINKS, 2),
"magnet_links_3": radio(headphones.CONFIG.MAGNET_LINKS, 3),
"log_dir": headphones.CONFIG.LOG_DIR,
"cache_dir": headphones.CONFIG.CACHE_DIR,
"keep_torrent_files_dir": headphones.CONFIG.KEEP_TORRENT_FILES_DIR,
"interface_list": interface_list,
"music_encoder": checked(headphones.CONFIG.MUSIC_ENCODER),
"encoder": headphones.CONFIG.ENCODER,
"xldprofile": headphones.CONFIG.XLDPROFILE,
"bitrate": int(headphones.CONFIG.BITRATE),
"encoder_path": headphones.CONFIG.ENCODER_PATH,
"advancedencoder": headphones.CONFIG.ADVANCEDENCODER,
"encoderoutputformat": headphones.CONFIG.ENCODEROUTPUTFORMAT,
"samplingfrequency": headphones.CONFIG.SAMPLINGFREQUENCY,
"encodervbrcbr": headphones.CONFIG.ENCODERVBRCBR,
"encoderquality": headphones.CONFIG.ENCODERQUALITY,
"encoderlossless": checked(headphones.CONFIG.ENCODERLOSSLESS),
"encoder_multicore": checked(headphones.CONFIG.ENCODER_MULTICORE),
"encoder_multicore_count": int(headphones.CONFIG.ENCODER_MULTICORE_COUNT),
"delete_lossless_files": checked(headphones.CONFIG.DELETE_LOSSLESS_FILES),
"growl_enabled": checked(headphones.CONFIG.GROWL_ENABLED),
"growl_onsnatch": checked(headphones.CONFIG.GROWL_ONSNATCH),
"growl_host": headphones.CONFIG.GROWL_HOST,
"growl_password": headphones.CONFIG.GROWL_PASSWORD,
"prowl_enabled": checked(headphones.CONFIG.PROWL_ENABLED),
"prowl_onsnatch": checked(headphones.CONFIG.PROWL_ONSNATCH),
"prowl_keys": headphones.CONFIG.PROWL_KEYS,
"prowl_priority": headphones.CONFIG.PROWL_PRIORITY,
"xbmc_enabled": checked(headphones.CONFIG.XBMC_ENABLED),
"xbmc_host": headphones.CONFIG.XBMC_HOST,
"xbmc_username": headphones.CONFIG.XBMC_USERNAME,
"xbmc_password": headphones.CONFIG.XBMC_PASSWORD,
"xbmc_update": checked(headphones.CONFIG.XBMC_UPDATE),
"xbmc_notify": checked(headphones.CONFIG.XBMC_NOTIFY),
"lms_enabled": checked(headphones.CONFIG.LMS_ENABLED),
"lms_host": headphones.CONFIG.LMS_HOST,
"plex_enabled": checked(headphones.CONFIG.PLEX_ENABLED),
"plex_server_host": headphones.CONFIG.PLEX_SERVER_HOST,
"plex_client_host": headphones.CONFIG.PLEX_CLIENT_HOST,
"plex_username": headphones.CONFIG.PLEX_USERNAME,
"plex_password": headphones.CONFIG.PLEX_PASSWORD,
"plex_token": headphones.CONFIG.PLEX_TOKEN,
"plex_update": checked(headphones.CONFIG.PLEX_UPDATE),
"plex_notify": checked(headphones.CONFIG.PLEX_NOTIFY),
"nma_enabled": checked(headphones.CONFIG.NMA_ENABLED),
"nma_apikey": headphones.CONFIG.NMA_APIKEY,
"nma_priority": int(headphones.CONFIG.NMA_PRIORITY),
"nma_onsnatch": checked(headphones.CONFIG.NMA_ONSNATCH),
"pushalot_enabled": checked(headphones.CONFIG.PUSHALOT_ENABLED),
"pushalot_apikey": headphones.CONFIG.PUSHALOT_APIKEY,
"pushalot_onsnatch": checked(headphones.CONFIG.PUSHALOT_ONSNATCH),
"synoindex_enabled": checked(headphones.CONFIG.SYNOINDEX_ENABLED),
"pushover_enabled": checked(headphones.CONFIG.PUSHOVER_ENABLED),
"pushover_onsnatch": checked(headphones.CONFIG.PUSHOVER_ONSNATCH),
"pushover_keys": headphones.CONFIG.PUSHOVER_KEYS,
"pushover_apitoken": headphones.CONFIG.PUSHOVER_APITOKEN,
"pushover_priority": headphones.CONFIG.PUSHOVER_PRIORITY,
"pushbullet_enabled": checked(headphones.CONFIG.PUSHBULLET_ENABLED),
"pushbullet_onsnatch": checked(headphones.CONFIG.PUSHBULLET_ONSNATCH),
"pushbullet_apikey": headphones.CONFIG.PUSHBULLET_APIKEY,
"pushbullet_deviceid": headphones.CONFIG.PUSHBULLET_DEVICEID,
"telegram_enabled": checked(headphones.CONFIG.TELEGRAM_ENABLED),
"telegram_onsnatch": checked(headphones.CONFIG.TELEGRAM_ONSNATCH),
"telegram_token": headphones.CONFIG.TELEGRAM_TOKEN,
"telegram_userid": headphones.CONFIG.TELEGRAM_USERID,
"subsonic_enabled": checked(headphones.CONFIG.SUBSONIC_ENABLED),
"subsonic_host": headphones.CONFIG.SUBSONIC_HOST,
"subsonic_username": headphones.CONFIG.SUBSONIC_USERNAME,
"subsonic_password": headphones.CONFIG.SUBSONIC_PASSWORD,
"twitter_enabled": checked(headphones.CONFIG.TWITTER_ENABLED),
"twitter_onsnatch": checked(headphones.CONFIG.TWITTER_ONSNATCH),
"osx_notify_enabled": checked(headphones.CONFIG.OSX_NOTIFY_ENABLED),
"osx_notify_onsnatch": checked(headphones.CONFIG.OSX_NOTIFY_ONSNATCH),
"osx_notify_app": headphones.CONFIG.OSX_NOTIFY_APP,
"boxcar_enabled": checked(headphones.CONFIG.BOXCAR_ENABLED),
"boxcar_onsnatch": checked(headphones.CONFIG.BOXCAR_ONSNATCH),
"boxcar_token": headphones.CONFIG.BOXCAR_TOKEN,
"mirrorlist": headphones.MIRRORLIST,
"mirror": headphones.CONFIG.MIRROR,
"customhost": headphones.CONFIG.CUSTOMHOST,
"customport": headphones.CONFIG.CUSTOMPORT,
"customsleep": headphones.CONFIG.CUSTOMSLEEP,
"customauth": checked(headphones.CONFIG.CUSTOMAUTH),
"customuser": headphones.CONFIG.CUSTOMUSER,
"custompass": headphones.CONFIG.CUSTOMPASS,
"hpuser": headphones.CONFIG.HPUSER,
"hppass": headphones.CONFIG.HPPASS,
"lastfm_apikey": headphones.CONFIG.LASTFM_APIKEY,
"songkick_enabled": checked(headphones.CONFIG.SONGKICK_ENABLED),
"songkick_apikey": headphones.CONFIG.SONGKICK_APIKEY,
"songkick_location": headphones.CONFIG.SONGKICK_LOCATION,
"songkick_filter_enabled": checked(headphones.CONFIG.SONGKICK_FILTER_ENABLED),
"cache_sizemb": headphones.CONFIG.CACHE_SIZEMB,
"file_permissions": headphones.CONFIG.FILE_PERMISSIONS,
"folder_permissions": headphones.CONFIG.FOLDER_PERMISSIONS,
"mpc_enabled": checked(headphones.CONFIG.MPC_ENABLED),
"email_enabled": checked(headphones.CONFIG.EMAIL_ENABLED),
"email_from": headphones.CONFIG.EMAIL_FROM,
"email_to": headphones.CONFIG.EMAIL_TO,
"email_smtp_server": headphones.CONFIG.EMAIL_SMTP_SERVER,
"email_smtp_user": headphones.CONFIG.EMAIL_SMTP_USER,
"email_smtp_password": headphones.CONFIG.EMAIL_SMTP_PASSWORD,
"email_smtp_port": int(headphones.CONFIG.EMAIL_SMTP_PORT),
"email_ssl": checked(headphones.CONFIG.EMAIL_SSL),
"email_tls": checked(headphones.CONFIG.EMAIL_TLS),
"email_onsnatch": checked(headphones.CONFIG.EMAIL_ONSNATCH),
"idtag": checked(headphones.CONFIG.IDTAG),
"slack_enabled": checked(headphones.CONFIG.SLACK_ENABLED),
"slack_url": headphones.CONFIG.SLACK_URL,
"slack_channel": headphones.CONFIG.SLACK_CHANNEL,
"slack_emoji": headphones.CONFIG.SLACK_EMOJI,
"slack_onsnatch": checked(headphones.CONFIG.SLACK_ONSNATCH),
"join_enabled": checked(headphones.CONFIG.JOIN_ENABLED),
"join_onsnatch": checked(headphones.CONFIG.JOIN_ONSNATCH),
"join_apikey": headphones.CONFIG.JOIN_APIKEY,
"join_deviceid": headphones.CONFIG.JOIN_DEVICEID,
"use_bandcamp": checked(headphones.CONFIG.BANDCAMP),
"bandcamp_dir": headphones.CONFIG.BANDCAMP_DIR,
'soulseek_api_url': headphones.CONFIG.SOULSEEK_API_URL,
'soulseek_api_key': headphones.CONFIG.SOULSEEK_API_KEY,
'use_soulseek': checked(headphones.CONFIG.SOULSEEK)
}
for k, v in config.items():
if isinstance(v, headphones.config.path):
# need to apply SoftChroot to paths:
nv = headphones.SOFT_CHROOT.apply(v)
if v != nv:
config[k] = headphones.config.path(nv)
# Need to convert EXTRAS to a dictionary we can pass to the config:
# it'll come in as a string like 2,5,6,8
extra_munges = {
"dj-mix": "dj_mix",
"mixtape/street": "mixtape_street"
}
extras_list = [extra_munges.get(x, x) for x in headphones.POSSIBLE_EXTRAS]
if headphones.CONFIG.EXTRAS:
extras = list(map(int, headphones.CONFIG.EXTRAS.split(',')))
else:
extras = []
extras_dict = OrderedDict()
i = 1
for extra in extras_list:
if i in extras:
extras_dict[extra] = "checked"
else:
extras_dict[extra] = ""
i += 1
config["extras"] = extras_dict
return serve_template(templatename="config.html", title="Settings", config=config)
@cherrypy.expose
def configUpdate(self, **kwargs):
# Handle the variable config options. Note - keys with False values aren't getting passed
checked_configs = [
"launch_browser", "enable_https", "api_enabled", "use_blackhole", "headphones_indexer",
"use_newznab", "newznab_enabled", "use_torznab", "torznab_enabled",
"use_nzbsorg", "use_omgwtfnzbs", "use_piratebay", "use_rutracker",
"use_orpheus", "use_redacted", "redacted_use_fltoken", "preferred_bitrate_allow_lossless",
"detect_bitrate", "ignore_clean_releases", "freeze_db", "cue_split", "move_files",
"rename_files", "rename_single_disc_ignore", "correct_metadata", "cleanup_files",
"keep_nfo", "add_album_art", "embed_album_art", "embed_lyrics",
"replace_existing_folders", "keep_original_folder", "file_underscores",
"include_extras", "official_releases_only",
"wait_until_release_date", "autowant_upcoming", "autowant_all",
"autowant_manually_added", "do_not_process_unmatched", "keep_torrent_files",
"music_encoder", "mb_ignore_age_missing",
"encoderlossless", "encoder_multicore", "delete_lossless_files", "growl_enabled",
"growl_onsnatch", "prowl_enabled",
"prowl_onsnatch", "xbmc_enabled", "xbmc_update", "xbmc_notify", "lms_enabled",
"plex_enabled", "plex_update", "plex_notify",
"nma_enabled", "nma_onsnatch", "pushalot_enabled", "pushalot_onsnatch",
"synoindex_enabled", "pushover_enabled",
"pushover_onsnatch", "pushbullet_enabled", "pushbullet_onsnatch", "subsonic_enabled",
"twitter_enabled", "twitter_onsnatch",
"telegram_enabled", "telegram_onsnatch",
"osx_notify_enabled", "osx_notify_onsnatch", "boxcar_enabled", "boxcar_onsnatch",
"songkick_enabled", "songkick_filter_enabled",
"mpc_enabled", "email_enabled", "email_ssl", "email_tls", "email_onsnatch",
"customauth", "idtag", "deluge_paused",
"join_enabled", "join_onsnatch", "use_bandcamp", "use_soulseek"
]
for checked_config in checked_configs:
if checked_config not in kwargs:
# checked items should be zero or one. if they were not sent then the item was not checked
kwargs[checked_config] = 0
for plain_config, use_config in [(x[4:], x) for x in kwargs if x.startswith('use_')]:
# the use prefix is fairly nice in the html, but does not match the actual config
kwargs[plain_config] = kwargs[use_config]
del kwargs[use_config]
for k, v in kwargs.items():
# TODO : HUGE crutch. It is all because there is no way to deal with options...
try:
_conf = headphones.CONFIG._define(k)
except KeyError:
continue
conftype = _conf[1]
if conftype is headphones.config.path:
nv = headphones.SOFT_CHROOT.revoke(v)
if nv != v:
kwargs[k] = nv
# Check if encoderoutputformat is set multiple times
if len(kwargs['encoderoutputformat'][-1]) > 1:
kwargs['encoderoutputformat'] = kwargs['encoderoutputformat'][-1]
else:
kwargs['encoderoutputformat'] = kwargs['encoderoutputformat'][0]
extra_newznabs = []
for kwarg in [x for x in kwargs if x.startswith('newznab_host')]:
newznab_host_key = kwarg
newznab_number = kwarg[12:]
if len(newznab_number):
newznab_api_key = 'newznab_api' + newznab_number
newznab_enabled_key = 'newznab_enabled' + newznab_number
newznab_host = kwargs.get(newznab_host_key, '')
newznab_api = kwargs.get(newznab_api_key, '')
newznab_enabled = int(kwargs.get(newznab_enabled_key, 0))
for key in [newznab_host_key, newznab_api_key, newznab_enabled_key]:
if key in kwargs:
del kwargs[key]
extra_newznabs.append((newznab_host, newznab_api, newznab_enabled))
extra_torznabs = []
for kwarg in [x for x in kwargs if x.startswith('torznab_host')]:
torznab_host_key = kwarg
torznab_number = kwarg[12:]
if len(torznab_number):
torznab_api_key = 'torznab_api' + torznab_number
torznab_enabled_key = 'torznab_enabled' + torznab_number
torznab_ratio_key = 'torznab_ratio' + torznab_number
torznab_host = kwargs.get(torznab_host_key, '')
torznab_api = kwargs.get(torznab_api_key, '')
torznab_enabled = int(kwargs.get(torznab_enabled_key, 0))
torznab_ratio = kwargs.get(torznab_ratio_key, '')
for key in [torznab_host_key, torznab_api_key, torznab_enabled_key, torznab_ratio_key]:
if key in kwargs:
del kwargs[key]
extra_torznabs.append((torznab_host, torznab_api, torznab_ratio, torznab_enabled))
# Convert the extras to list then string. Coming in as 0 or 1 (append new extras to the end)
temp_extras_list = []
extra_munges = {
"dj-mix": "dj_mix",
"mixtape/street": "mixtape_street"
}
expected_extras = [extra_munges.get(x, x) for x in headphones.POSSIBLE_EXTRAS]
extras_list = [kwargs.get(x, 0) for x in expected_extras]
i = 1
for extra in extras_list:
if extra:
temp_extras_list.append(i)
i += 1
for extra in expected_extras:
temp = '%s_temp' % extra
if temp in kwargs:
del kwargs[temp]
if extra in kwargs:
del kwargs[extra]
headphones.CONFIG.EXTRAS = ','.join(str(n) for n in temp_extras_list)
headphones.CONFIG.clear_extra_newznabs()
headphones.CONFIG.clear_extra_torznabs()
headphones.CONFIG.process_kwargs(kwargs)
for extra_newznab in extra_newznabs:
headphones.CONFIG.add_extra_newznab(extra_newznab)
for extra_torznab in extra_torznabs:
headphones.CONFIG.add_extra_torznab(extra_torznab)
# Sanity checking
if headphones.CONFIG.SEARCH_INTERVAL and headphones.CONFIG.SEARCH_INTERVAL < 360:
logger.info("Search interval too low. Resetting to 6 hour minimum")
headphones.CONFIG.SEARCH_INTERVAL = 360
# Write the config
headphones.CONFIG.write()
# Reconfigure scheduler
headphones.initialize_scheduler()
# Reconfigure musicbrainz database connection with the new values
mb.startmb()
raise cherrypy.HTTPRedirect("config")
@cherrypy.expose
def do_state_change(self, signal, title, timer):
headphones.SIGNAL = signal
message = title + '...'
return serve_template(templatename="shutdown.html", title=title,
message=message, timer=timer)
@cherrypy.expose
def shutdown(self):
return self.do_state_change('shutdown', 'Shutting Down', 15)
@cherrypy.expose
def restart(self):
return self.do_state_change('restart', 'Restarting', 30)
@cherrypy.expose
def update(self):
return self.do_state_change('update', 'Updating', 120)
@cherrypy.expose
def extras(self):
myDB = db.DBConnection()
cloudlist = myDB.select('SELECT * from lastfmcloud')
return serve_template(templatename="extras.html", title="Extras", cloudlist=cloudlist)
@cherrypy.expose
def addReleaseById(self, rid, rgid=None):
threading.Thread(target=importer.addReleaseById, args=[rid, rgid]).start()
if rgid:
raise cherrypy.HTTPRedirect("albumPage?AlbumID=%s" % rgid)
else:
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def updateCloud(self):
lastfm.getSimilar()
raise cherrypy.HTTPRedirect("extras")
@cherrypy.expose
def api(self, *args, **kwargs):
from headphones.api import Api
a = Api()
a.checkParams(*args, **kwargs)
return a.fetchData()
@cherrypy.expose
@cherrypy.tools.json_out()
def getInfo(self, ArtistID=None, AlbumID=None):
from headphones import cache
info_dict = cache.getInfo(ArtistID, AlbumID)
return info_dict
@cherrypy.expose
def getArtwork(self, ArtistID=None, AlbumID=None):
from headphones import cache
return cache.getArtwork(ArtistID, AlbumID)
@cherrypy.expose
def getThumb(self, ArtistID=None, AlbumID=None):
from headphones import cache
return cache.getThumb(ArtistID, AlbumID)
# If you just want to get the last.fm image links for an album, make sure
# to pass a releaseid and not a releasegroupid
@cherrypy.expose
@cherrypy.tools.json_out()
def getImageLinks(self, ArtistID=None, AlbumID=None):
from headphones import cache
image_dict = cache.getImageLinks(ArtistID, AlbumID)
# Return the Cover Art Archive urls if not found on last.fm
if AlbumID and not image_dict:
image_url = "https://coverartarchive.org/release/%s/front-500.jpg" % AlbumID
thumb_url = "https://coverartarchive.org/release/%s/front-250.jpg" % AlbumID
image_dict = {'artwork': image_url, 'thumbnail': thumb_url}
elif AlbumID and (not image_dict['artwork'] or not image_dict['thumbnail']):
if not image_dict['artwork']:
image_dict[
'artwork'] = "https://coverartarchive.org/release/%s/front-500.jpg" % AlbumID
if not image_dict['thumbnail']:
image_dict[
'thumbnail'] = "https://coverartarchive.org/release/%s/front-250.jpg" % AlbumID
return image_dict
@cherrypy.expose
def twitterStep1(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
tweet = notifiers.TwitterNotifier()
return tweet._get_authorization()
@cherrypy.expose
def twitterStep2(self, key):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
tweet = notifiers.TwitterNotifier()
result = tweet._get_credentials(key)
logger.info("result: " + str(result))
if result:
return "Key verification successful"
else:
return "Unable to verify key"
@cherrypy.expose
def testTwitter(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
tweet = notifiers.TwitterNotifier()
result = tweet.test_notify()
if result:
return "Tweet successful, check your twitter to make sure it worked"
else:
return "Error sending tweet"
@cherrypy.expose
def osxnotifyregister(self, app):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
from osxnotify import registerapp as osxnotify
result, msg = osxnotify.registerapp(app)
if result:
osx_notify = notifiers.OSX_NOTIFY()
osx_notify.notify('Registered', result, 'Success :-)')
logger.info(
'Registered %s, to re-register a different app, delete this app first' % result)
else:
logger.warn(msg)
return msg
@cherrypy.expose
def testPushover(self):
logger.info("Sending Pushover notification")
pushover = notifiers.PUSHOVER()
result = pushover.notify("hooray!", "This is a test")
return str(result)
@cherrypy.expose
def testPlex(self):
logger.info("Testing plex update")
plex = notifiers.Plex()
plex.update()
@cherrypy.expose
def testPushbullet(self):
logger.info("Testing Pushbullet notifications")
pushbullet = notifiers.PUSHBULLET()
pushbullet.notify("it works!", "Test message")
@cherrypy.expose
def testTelegram(self):
logger.info("Testing Telegram notifications")
telegram = notifiers.TELEGRAM()
telegram.notify("it works!", "lazers pew pew")
@cherrypy.expose
def testJoin(self):
logger.info("Testing Join notifications")
join = notifiers.JOIN()
join.notify("it works!", "Test message")
class Artwork(object):
@cherrypy.expose
def index(self):
return "Artwork"
@cherrypy.expose
def default(self, ArtistOrAlbum="", ID=None):
from headphones import cache
ArtistID = None
AlbumID = None
if ArtistOrAlbum == "artist":
ArtistID = ID
elif ArtistOrAlbum == "album":
AlbumID = ID
relpath = cache.getArtwork(ArtistID, AlbumID)
if not relpath:
relpath = "data/interfaces/default/images/no-cover-art.png"
basedir = os.path.dirname(sys.argv[0])
path = os.path.join(basedir, relpath)
cherrypy.response.headers['Content-type'] = 'image/png'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
else:
relpath = relpath.replace('cache/', '', 1)
path = os.path.join(headphones.CONFIG.CACHE_DIR, relpath)
fileext = os.path.splitext(relpath)[1][1::]
cherrypy.response.headers['Content-type'] = 'image/' + fileext
cherrypy.response.headers['Cache-Control'] = 'max-age=31556926'
with open(os.path.normpath(path), "rb") as fp:
return fp.read()
class Thumbs(object):
@cherrypy.expose
def index(self):
return "Here be thumbs"
@cherrypy.expose
def default(self, ArtistOrAlbum="", ID=None):
from headphones import cache
ArtistID = None
AlbumID = None
if ArtistOrAlbum == "artist":
ArtistID = ID
elif ArtistOrAlbum == "album":
AlbumID = ID
relpath = cache.getThumb(ArtistID, AlbumID)
if not relpath:
relpath = "data/interfaces/default/images/no-cover-artist.png"
basedir = os.path.dirname(sys.argv[0])
path = os.path.join(basedir, relpath)
cherrypy.response.headers['Content-type'] = 'image/png'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
else:
relpath = relpath.replace('cache/', '', 1)
path = os.path.join(headphones.CONFIG.CACHE_DIR, relpath)
fileext = os.path.splitext(relpath)[1][1::]
cherrypy.response.headers['Content-type'] = 'image/' + fileext
cherrypy.response.headers['Cache-Control'] = 'max-age=31556926'
with open(os.path.normpath(path), "rb") as fp:
return fp.read()
thumbs = Thumbs()
WebInterface.artwork = Artwork()
| 85,048 | Python | .py | 1,622 | 40.056104 | 193 | 0.610277 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,308 | webstart.py | rembo10_headphones/headphones/webstart.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import cherrypy
import headphones
from headphones import logger
from headphones.webserve import WebInterface
from headphones.helpers import create_https_certificates
def initialize(options):
# HTTPS stuff stolen from sickbeard
enable_https = options['enable_https']
https_cert = options['https_cert']
https_key = options['https_key']
if enable_https:
# If either the HTTPS certificate or key do not exist, try to make
# self-signed ones.
if not (https_cert and os.path.exists(https_cert)) or not (
https_key and os.path.exists(https_key)):
if not create_https_certificates(https_cert, https_key):
logger.warn("Unable to create certificate and key. Disabling "
"HTTPS")
enable_https = False
if not (os.path.exists(https_cert) and os.path.exists(https_key)):
logger.warn("Disabled HTTPS because of missing certificate and "
"key.")
enable_https = False
options_dict = {
'server.socket_port': options['http_port'],
'server.socket_host': options['http_host'],
'server.thread_pool': 10,
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
'tools.decode.on': True,
'log.screen': False,
'engine.autoreload.on': False,
}
if enable_https:
options_dict['server.ssl_certificate'] = https_cert
options_dict['server.ssl_private_key'] = https_key
protocol = "https"
else:
protocol = "http"
logger.info("Starting Headphones web server on %s://%s:%d/", protocol,
options['http_host'], options['http_port'])
cherrypy.config.update(options_dict)
conf = {
'/': {
'tools.staticdir.root': os.path.join(headphones.PROG_DIR, 'data'),
'tools.proxy.on': options['http_proxy'] # pay attention to X-Forwarded-Proto header
},
'/interfaces': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "interfaces"
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "images"
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "css"
},
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "js"
},
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(os.path.abspath(
os.curdir), "images" + os.sep + "favicon.ico")
},
'/cache': {
'tools.staticdir.on': True,
'tools.staticdir.dir': headphones.CONFIG.CACHE_DIR
}
}
if options['http_password']:
logger.info("Web server authentication is enabled, username is '%s'",
options['http_username'])
conf['/'].update({
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'Headphones web server',
'tools.auth_basic.checkpassword': cherrypy.lib.auth_basic.checkpassword_dict({
options['http_username']: options['http_password']
})
})
conf['/api'] = {'tools.auth_basic.on': False}
cherrypy.tree.mount(WebInterface(), str(options['http_root']), config=conf)
try:
cherrypy.server.start()
except IOError:
sys.stderr.write(
'Failed to start on port: %i. Is something else running?\n' % (options['http_port']))
sys.exit(1)
cherrypy.server.wait()
| 4,333 | Python | .py | 108 | 31.574074 | 97 | 0.608551 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,309 | soulseek.py | rembo10_headphones/headphones/soulseek.py | from collections import defaultdict, namedtuple
import os
import time
import slskd_api
import headphones
from headphones import logger
from datetime import datetime, timedelta
Result = namedtuple('Result', ['title', 'size', 'user', 'provider', 'type', 'matches', 'bandwidth', 'hasFreeUploadSlot', 'queueLength', 'files', 'kind', 'url', 'folder'])
def initialize_soulseek_client():
host = headphones.CONFIG.SOULSEEK_API_URL
api_key = headphones.CONFIG.SOULSEEK_API_KEY
return slskd_api.SlskdClient(host=host, api_key=api_key)
# Search logic, calling search and processing fucntions
def search(artist, album, year, num_tracks, losslessOnly, allow_lossless, user_search_term):
client = initialize_soulseek_client()
# override search string with user provided search term if entered
if user_search_term:
artist = user_search_term
album = ''
year = ''
# Stage 1: Search with artist, album, year, and num_tracks
logger.info(f"Searching Soulseek using term: {artist} {album} {year}")
results = execute_search(client, artist, album, year, losslessOnly, allow_lossless)
processed_results = process_results(results, losslessOnly, allow_lossless, num_tracks)
if processed_results or user_search_term or album.lower() == artist.lower():
return processed_results
# Stage 2: If Stage 1 fails, search with artist, album, and num_tracks (excluding year)
logger.info("Soulseek search stage 1 did not meet criteria. Retrying without year...")
results = execute_search(client, artist, album, None, losslessOnly, allow_lossless)
processed_results = process_results(results, losslessOnly, allow_lossless, num_tracks)
if processed_results or artist == "Various Artists":
return processed_results
# Stage 3: Final attempt, search only with artist and album
logger.info("Soulseek search stage 2 did not meet criteria. Final attempt with only artist and album.")
results = execute_search(client, artist, album, None, losslessOnly, allow_lossless)
processed_results = process_results(results, losslessOnly, allow_lossless, num_tracks, ignore_track_count=True)
return processed_results
def execute_search(client, artist, album, year, losslessOnly, allow_lossless):
search_text = f"{artist} {album}"
if year:
search_text += f" {year}"
if losslessOnly:
search_text += " flac"
elif not allow_lossless:
search_text += " mp3"
# Actual search
search_response = client.searches.search_text(searchText=search_text, filterResponses=True)
search_id = search_response.get('id')
# Wait for search completion and return response
while not client.searches.state(id=search_id).get('isComplete'):
time.sleep(2)
return client.searches.search_responses(id=search_id)
# Processing the search result passed
def process_results(results, losslessOnly, allow_lossless, num_tracks, ignore_track_count=False):
if losslessOnly:
valid_extensions = {'.flac'}
elif allow_lossless:
valid_extensions = {'.mp3', '.flac'}
else:
valid_extensions = {'.mp3'}
albums = defaultdict(lambda: {'files': [], 'user': None, 'hasFreeUploadSlot': None, 'queueLength': None, 'uploadSpeed': None})
# Extract info from the api response and combine files at album level
for result in results:
user = result.get('username')
hasFreeUploadSlot = result.get('hasFreeUploadSlot')
queueLength = result.get('queueLength')
uploadSpeed = result.get('uploadSpeed')
# Only handle .mp3 and .flac
for file in result.get('files', []):
filename = file.get('filename')
file_extension = os.path.splitext(filename)[1].lower()
if file_extension in valid_extensions:
#album_directory = os.path.dirname(filename)
album_directory = filename.rsplit('\\', 1)[0]
albums[album_directory]['files'].append(file)
# Update metadata only once per album_directory
if albums[album_directory]['user'] is None:
albums[album_directory].update({
'user': user,
'hasFreeUploadSlot': hasFreeUploadSlot,
'queueLength': queueLength,
'uploadSpeed': uploadSpeed,
})
# Filter albums based on num_tracks, add bunch of useful info to the compiled album
final_results = []
for directory, album_data in albums.items():
if ignore_track_count and len(album_data['files']) > 1 or len(album_data['files']) == num_tracks:
#album_title = os.path.basename(directory)
album_title = directory.rsplit('\\', 1)[1]
total_size = sum(file.get('size', 0) for file in album_data['files'])
final_results.append(Result(
title=album_title,
size=int(total_size),
user=album_data['user'],
provider="soulseek",
type="soulseek",
matches=True,
bandwidth=album_data['uploadSpeed'],
hasFreeUploadSlot=album_data['hasFreeUploadSlot'],
queueLength=album_data['queueLength'],
files=album_data['files'],
kind='soulseek',
url='http://' + album_data['user'] + album_title, # URL is needed in other parts of the program.
#folder=os.path.basename(directory)
folder = album_title
))
return final_results
def download(user, filelist):
client = initialize_soulseek_client()
client.transfers.enqueue(username=user, files=filelist)
def download_completed():
client = initialize_soulseek_client()
all_downloads = client.transfers.get_all_downloads(includeRemoved=False)
album_completion_tracker = {} # Tracks completion state of each album's songs
album_errored_tracker = {} # Tracks albums with errored downloads
# Anything older than 24 hours will be canceled
cutoff_time = datetime.now() - timedelta(hours=24)
# Identify errored and completed albums
for download in all_downloads:
directories = download.get('directories', [])
for directory in directories:
album_part = directory.get('directory', '').split('\\')[-1]
files = directory.get('files', [])
for file_data in files:
state = file_data.get('state', '')
requested_at_str = file_data.get('requestedAt', '1900-01-01 00:00:00')
requested_at = parse_datetime(requested_at_str)
# Initialize or update album entry in trackers
if album_part not in album_completion_tracker:
album_completion_tracker[album_part] = {'total': 0, 'completed': 0, 'errored': 0}
if album_part not in album_errored_tracker:
album_errored_tracker[album_part] = False
album_completion_tracker[album_part]['total'] += 1
if 'Completed, Succeeded' in state:
album_completion_tracker[album_part]['completed'] += 1
elif 'Completed, Errored' in state or requested_at < cutoff_time:
album_completion_tracker[album_part]['errored'] += 1
album_errored_tracker[album_part] = True # Mark album as having errored downloads
# Identify errored albums
errored_albums = {album for album, errored in album_errored_tracker.items() if errored}
# Cancel downloads for errored albums
for download in all_downloads:
directories = download.get('directories', [])
for directory in directories:
album_part = directory.get('directory', '').split('\\')[-1]
files = directory.get('files', [])
for file_data in files:
if album_part in errored_albums:
# Extract 'id' and 'username' for each file to cancel the download
file_id = file_data.get('id', '')
username = file_data.get('username', '')
success = client.transfers.cancel_download(username, file_id)
if not success:
logger.debug(f"Soulseek failed to cancel download for file ID: {file_id}")
# Clear completed/canceled/errored stuff from client downloads
try:
client.transfers.remove_completed_downloads()
except Exception as e:
logger.debug(f"Soulseek failed to remove completed downloads: {e}")
# Identify completed albums
completed_albums = {album for album, counts in album_completion_tracker.items() if counts['total'] == counts['completed']}
# Return both completed and errored albums
return completed_albums, errored_albums
def download_completed_album(username, foldername):
client = initialize_soulseek_client()
downloads = client.transfers.get_downloads(username)
# Anything older than 24 hours will be canceled
cutoff_time = datetime.now() - timedelta(hours=24)
total_count = 0
completed_count = 0
errored_count = 0
file_ids = []
# Identify errored and completed album
directories = downloads.get('directories', [])
for directory in directories:
album_part = directory.get('directory', '').split('\\')[-1]
if album_part == foldername:
files = directory.get('files', [])
for file_data in files:
state = file_data.get('state', '')
requested_at_str = file_data.get('requestedAt', '1900-01-01 00:00:00')
requested_at = parse_datetime(requested_at_str)
total_count += 1
file_id = file_data.get('id', '')
file_ids.append(file_id)
if 'Completed, Succeeded' in state:
completed_count += 1
elif 'Completed, Errored' in state or requested_at < cutoff_time:
errored_count += 1
break
completed = True if completed_count == total_count else False
errored = True if errored_count else False
# Cancel downloads for errored album
if errored:
for file_id in file_ids:
try:
success = client.transfers.cancel_download(username, file_id, remove=True)
except Exception as e:
logger.debug(f"Soulseek failed to cancel download for folder with file ID: {foldername} {file_id}")
return completed, errored
def parse_datetime(datetime_string):
# Parse the datetime api response
if '.' in datetime_string:
datetime_string = datetime_string[:datetime_string.index('.')+7]
return datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S.%f') | 10,887 | Python | .py | 205 | 42.946341 | 170 | 0.640584 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,310 | lyrics.py | rembo10_headphones/headphones/lyrics.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import html.entities
import re
from headphones import logger, request
def getLyrics(artist, song):
params = {"artist": artist.encode('utf-8'),
"song": song.encode('utf-8'),
"fmt": 'xml'
}
url = 'https://lyrics.wikia.com/api.php'
data = request.request_minidom(url, params=params)
if not data:
return
url = data.getElementsByTagName("url")
if url:
lyricsurl = url[0].firstChild.nodeValue
else:
logger.info('No lyrics found for %s - %s' % (artist, song))
return
lyricspage = request.request_content(lyricsurl)
if not lyricspage:
logger.warn('Error fetching lyrics from: %s' % lyricsurl)
return
m = re.compile('''<div class='lyricbox'><div class='rtMatcher'>.*?</div>(.*?)<!--''').search(
lyricspage)
if not m:
m = re.compile(
'''<div class='lyricbox'><span style="padding:1em"><a href="/Category:Instrumental" title="Instrumental">''').search(
lyricspage)
if m:
return '(Instrumental)'
else:
logger.warn('Cannot find lyrics on: %s' % lyricsurl)
return
lyrics = convert_html_entities(m.group(1)).replace('<br />', '\n')
lyrics = re.sub('<.*?>', '', lyrics)
return lyrics
def convert_html_entities(s):
matches = re.findall("&#\d+;", s)
if len(matches) > 0:
hits = set(matches)
for hit in hits:
name = hit[2:-1]
try:
entnum = int(name)
s = s.replace(hit, chr(entnum))
except ValueError:
pass
matches = re.findall("&\w+;", s)
hits = set(matches)
amp = "&"
if amp in hits:
hits.remove(amp)
for hit in hits:
name = hit[1:-1]
if name in html.entities.name2codepoint:
s = s.replace(hit, chr(html.entities.name2codepoint[name]))
s = s.replace(amp, "&")
return s
| 2,658 | Python | .py | 72 | 29.986111 | 129 | 0.615804 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,311 | exceptions.py | rembo10_headphones/headphones/exceptions.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
class HeadphonesException(Exception):
"""
Generic Headphones Exception - should never be thrown, only subclassed
"""
class NewzbinAPIThrottled(HeadphonesException):
"""
Newzbin has throttled us, deal with it
"""
class SoftChrootError(HeadphonesException):
"""
Fatal errors in SoftChroot module
"""
pass
| 1,025 | Python | .py | 27 | 35.259259 | 74 | 0.753024 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,312 | rutracker.py | rembo10_headphones/headphones/rutracker.py | #!/usr/bin/env python
import urllib.request, urllib.parse, urllib.error
import time
from urllib.parse import urlparse
import re
import requests as requests
# from requests.auth import HTTPDigestAuth
from bs4 import BeautifulSoup
import headphones
from headphones import logger
from headphones.types import Result
class Rutracker(object):
def __init__(self):
self.session = requests.session()
self.timeout = 60
self.loggedin = False
self.maxsize = 0
self.search_referer = 'https://rutracker.org/forum/tracker.php'
def logged_in(self):
return self.loggedin
def still_logged_in(self, html):
if not html or "action=\"https://rutracker.org/forum/login.php\">" in html:
return False
else:
return True
def login(self):
"""
Logs in user
"""
loginpage = 'https://rutracker.org/forum/login.php'
post_params = {
'login_username': headphones.CONFIG.RUTRACKER_USER,
'login_password': headphones.CONFIG.RUTRACKER_PASSWORD,
'login': b'\xc2\xf5\xee\xe4' # '%C2%F5%EE%E4'
}
headers = {
'User-Agent' : 'Headphones'
}
logger.info("Attempting to log in to rutracker...")
try:
r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False, headers=headers)
# try again
if not self.has_bb_session_cookie(r):
time.sleep(10)
if headphones.CONFIG.RUTRACKER_COOKIE:
logger.info("Attempting to log in using predefined cookie...")
r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False, headers=headers, cookies={'bb_session': headphones.CONFIG.RUTRACKER_COOKIE})
else:
r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False, headers=headers)
if self.has_bb_session_cookie(r):
self.loggedin = True
logger.info("Successfully logged in to rutracker")
else:
logger.error(
"Could not login to rutracker, credentials maybe incorrect, site is down or too many attempts. Try again later")
self.loggedin = False
return self.loggedin
except Exception as e:
logger.error("Unknown error logging in to rutracker: %s" % e)
self.loggedin = False
return self.loggedin
def has_bb_session_cookie(self, response):
if 'bb_session' in list(response.cookies.keys()):
return True
# Rutracker randomly send a 302 redirect code, cookie may be present in response history
return next(('bb_session' in list(r.cookies.keys()) for r in response.history), False)
def searchurl(self, artist, album, year, format):
"""
Return the search url
"""
# Build search url
searchterm = ''
if artist != 'Various Artists':
searchterm = artist
searchterm = searchterm + ' '
searchterm = searchterm + album
searchterm = searchterm + ' '
searchterm = searchterm + year
if format == 'lossless':
format = '+lossless||TR24'
self.maxsize = 10000000000
elif format == 'lossless+mp3':
format = '+lossless||TR24||mp3||aac'
self.maxsize = 10000000000
else:
format = '+mp3||aac'
self.maxsize = 300000000
# sort by size, descending.
sort = '&o=7&s=2'
try:
searchurl = "%s?nm=%s%s%s" % (self.search_referer, urllib.parse.quote(searchterm), format, sort)
except:
searchterm = searchterm.encode('utf-8')
searchurl = "%s?nm=%s%s%s" % (self.search_referer, urllib.parse.quote(searchterm), format, sort)
logger.info("Searching rutracker using term: %s", searchterm)
return searchurl
def search(self, searchurl):
"""
Parse the search results and return valid torrent list
"""
try:
headers = {
'Referer': self.search_referer,
'User-Agent' : 'Headphones'
}
r = self.session.get(url=searchurl, headers=headers, timeout=self.timeout)
soup = BeautifulSoup(r.content, 'html.parser')
# Debug
# logger.debug (soup.prettify())
# Check if still logged in
if not self.still_logged_in(soup):
self.login()
r = self.session.get(url=searchurl, timeout=self.timeout)
soup = BeautifulSoup(r.content, 'html.parser')
if not self.still_logged_in(soup):
logger.error("Error getting rutracker data")
return None
# Process
rulist = []
i = soup.find('table', id='tor-tbl')
if not i:
logger.info("No valid results found from rutracker")
return None
minimumseeders = int(headphones.CONFIG.NUMBEROFSEEDERS) - 1
for item in zip(i.find_all(class_='hl-tags'), i.find_all(class_='dl-stub'),
i.find_all(class_='seedmed')):
title = item[0].get_text()
url = item[1].get('href')
size_formatted = item[1].get_text()[:-2]
seeds = item[2].get_text()
size_parts = size_formatted.split()
size = float(size_parts[0])
if size_parts[1] == 'KB':
size *= 1024
if size_parts[1] == 'MB':
size *= 1024 ** 2
if size_parts[1] == 'GB':
size *= 1024 ** 3
if size_parts[1] == 'TB':
size *= 1024 ** 4
if size < self.maxsize and minimumseeders < int(seeds):
logger.info('Found %s. Size: %s' % (title, size_formatted))
# Torrent topic page
torrent_id = dict([part.split('=') for part in urlparse(url)[4].split('&')])[
't']
topicurl = 'https://rutracker.org/forum/viewtopic.php?t=' + torrent_id
rulist.append(Result(title, size, url, 'rutracker.org', 'torrent', True))
else:
logger.info("%s is larger than the maxsize or has too little seeders for this category, "
"skipping. (Size: %i bytes, Seeders: %i)" % (title, size, int(seeds)))
if not rulist:
logger.info("No valid results found from rutracker")
return rulist
except Exception as e:
logger.error("An unknown error occurred in the rutracker parser: %s" % e)
return None
def get_torrent_data(self, url):
"""
return the .torrent data
"""
torrent_id = dict([part.split('=') for part in urlparse(url)[4].split('&')])['t']
downloadurl = 'https://rutracker.org/forum/dl.php?t=' + torrent_id
cookie = {'bb_dl': torrent_id}
try:
headers = {
'Referer': url,
'User-Agent' : 'Headphones'
}
r = self.session.post(url=downloadurl, cookies=cookie, headers=headers,
timeout=self.timeout)
return r.content
except Exception as e:
logger.error('Error getting torrent: %s', e)
return False
# TODO get this working in utorrent.py
def utorrent_add_file(self, data):
host = headphones.CONFIG.UTORRENT_HOST
if not host.startswith('http'):
host = 'http://' + host
if host.endswith('/'):
host = host[:-1]
if host.endswith('/gui'):
host = host[:-4]
base_url = host
url = base_url + '/gui/'
self.session.auth = (headphones.CONFIG.UTORRENT_USERNAME, headphones.CONFIG.UTORRENT_PASSWORD)
try:
r = self.session.get(url + 'token.html')
except Exception as e:
logger.error('Error getting token: %s', e)
return
if r.status_code == 401:
logger.debug('Error reaching utorrent')
return
regex = re.search(r'.+>([^<]+)</div></html>', r.text)
if regex is None:
logger.debug('Error reading token')
return
self.session.params = {'token': regex.group(1)}
files = {'torrent_file': ("", data)}
try:
self.session.post(url, params={'action': 'add-file'}, files=files)
except Exception as e:
logger.exception('Error adding file to utorrent %s', e)
# TODO get this working in qbittorrent.py
def qbittorrent_add_file(self, data):
host = headphones.CONFIG.QBITTORRENT_HOST
if not host.startswith('http'):
host = 'http://' + host
if host.endswith('/'):
host = host[:-1]
if host.endswith('/gui'):
host = host[:-4]
base_url = host
# self.session.auth = HTTPDigestAuth(headphones.CONFIG.QBITTORRENT_USERNAME, headphones.CONFIG.QBITTORRENT_PASSWORD)
url = base_url + '/login'
try:
self.session.post(url, data={'username': headphones.CONFIG.QBITTORRENT_USERNAME,
'password': headphones.CONFIG.QBITTORRENT_PASSWORD})
except Exception as e:
logger.exception('Error adding file to qbittorrent %s', e)
return
url = base_url + '/command/upload'
args = {'savepath': headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
if headphones.CONFIG.QBITTORRENT_LABEL:
args['category'] = headphones.CONFIG.QBITTORRENT_LABEL
torrent_files = {'torrents': data}
try:
self.session.post(url, data=args, files=torrent_files)
except Exception as e:
logger.exception('Error adding file to qbittorrent %s', e)
| 10,233 | Python | .py | 231 | 32.073593 | 192 | 0.558478 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,313 | unittestcompat.py | rembo10_headphones/headphones/unittestcompat.py | import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
from unittest2 import TestCase as TC
else:
import unittest
from unittest import TestCase as TC
skip = unittest.skip
_dummy = False
# less than 2.6 ...
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
_dummy = True
def _d(f):
def decorate(self, *args, **kw):
if not _dummy:
return f(self, *args, **kw)
return self.assertTrue(True)
return decorate
class TestCase(TC):
"""
Wrapper for python 2.6 stubs
"""
def assertIsInstance(self, obj, cls, msg=None):
if not _dummy:
return super(TestCase, self).assertIsInstance(obj, cls, msg)
tst = isinstance(obj, cls)
return self.assertTrue(tst, msg)
@_d
def assertNotIsInstance(self, *args, **kw):
return super(TestCase, self).assertNotIsInstance(*args, **kw)
@_d
def assertIn(self, *args, **kw):
return super(TestCase, self).assertIn(*args, **kw)
@_d
def assertRegexpMatches(self, *args, **kw):
return super(TestCase, self).assertRegex(*args, **kw)
# -----------------------------------------------------------
# NOT DUMMY ASSERTIONS
# -----------------------------------------------------------
def assertIsNone(self, val, msg=None):
if not _dummy:
return super(TestCase, self).assertIsNone(val, msg)
tst = val is None
return super(TestCase, self).assertTrue(tst, msg)
def assertIsNotNone(self, val, msg=None):
if not _dummy:
return super(TestCase, self).assertIsNotNone(val, msg)
tst = val is not None
return super(TestCase, self).assertTrue(tst, msg)
def assertRaises(self, exc, msg=None):
if not _dummy:
return super(TestCase, self).assertRaises(exc, msg)
return TestCase._TestCaseRaiseStub(self, exc, msg=msg)
def assertRaisesRegexp(self, exc, regex, msg=None):
if not _dummy:
return super(TestCase, self).assertRaises(exc, msg)
return TestCase._TestCaseRaiseStub(self, exc, regex=regex, msg=msg)
class _TestCaseRaiseStub:
""" Internal stuff for stubbing `assertRaises*` """
def __init__(self, test_case, exc, regex=None, msg=None):
self.exc = exc
self.test_case = test_case
self.regex = regex
self.msg = msg
def __enter__(self):
return self
def __exit__(self, tp, value, traceback):
tst = tp is self.exc
self.test_case.assertTrue(tst, msg=self.msg)
self.exception = value
# TODO: implement self.regex checking
# True indicates, that exception is handled
return True
def TestArgs(*parameters):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
def decorator(method, parameters=parameters):
for parameter in (tuplify(x) for x in parameters):
def method_for_parameter(self, method=method, parameter=parameter):
method(self, *parameter)
args_for_parameter = ",".join(repr(v) for v in parameter)
name_for_parameter = method.__name__ + "(" + args_for_parameter + ")"
frame = sys._getframe(1) # pylint: disable-msg=W0212
frame.f_locals[name_for_parameter] = method_for_parameter
frame.f_locals[name_for_parameter].__doc__ = method.__doc__ + '(' + args_for_parameter + ')'
method_for_parameter.__name__ = name_for_parameter + '(' + args_for_parameter + ')'
return None
return decorator
| 3,681 | Python | .py | 90 | 32.555556 | 104 | 0.59198 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,314 | helpers.py | rembo10_headphones/headphones/helpers.py | # -*- coding: utf-8 -*-
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import shutil
import sys
import tempfile
import time
import unicodedata
from contextlib import contextmanager
from datetime import datetime, date
from fnmatch import fnmatch
from functools import cmp_to_key
from glob import glob
from operator import itemgetter
from beets import logging as beetslogging
from mediafile import MediaFile, FileTypeError, UnreadableFileError
from six import text_type
from unidecode import unidecode
import headphones
# Modified from https://github.com/Verrus/beets-plugin-featInTitle
RE_FEATURING = re.compile(r"[fF]t\.|[fF]eaturing|[fF]eat\.|\b[wW]ith\b|&|vs\.")
RE_CD_ALBUM = re.compile(r"\(?((CD|disc)\s*[0-9]+)\)?", re.I)
RE_CD = re.compile(r"^(CD|dics)\s*[0-9]+$", re.I)
def cmp(x, y):
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
https://portingguide.readthedocs.io/en/latest/comparisons.html#the-cmp-function
"""
if x is None and y is None:
return 0
elif x is None:
return -1
elif y is None:
return 1
else:
return (x > y) - (x < y)
def multikeysort(items, columns):
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, key=cmp_to_key(comparer))
def checked(variable):
if variable:
return 'Checked'
else:
return ''
def radio(variable, pos):
if variable == pos:
return 'Checked'
else:
return ''
def latinToAscii(unicrap):
"""
From couch potato
"""
xlate = {
0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',
0xc6: 'Ae', 0xc7: 'C',
0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e', 0x39e: 'E',
0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',
0xd0: 'Th', 0xd1: 'N',
0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',
0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',
0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',
0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',
0xe6: 'ae', 0xe7: 'c',
0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',
0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',
0xf0: 'th', 0xf1: 'n',
0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',
0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',
0xfd: 'y', 0xfe: 'th', 0xff: 'y',
0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'",
0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
0xd7: '*', 0xf7: '/'
}
r = ''
for i in unicrap:
if ord(i) in xlate:
r += xlate[ord(i)]
elif ord(i) >= 0x80:
pass
else:
r += str(i)
return r
def convert_milliseconds(ms):
seconds = ms / 1000
gmtime = time.gmtime(seconds)
if seconds > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
else:
minutes = time.strftime("%M:%S", gmtime)
return minutes
def convert_seconds(s):
gmtime = time.gmtime(s)
if s > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
else:
minutes = time.strftime("%M:%S", gmtime)
return minutes
def today():
return date.isoformat(date.today())
def now():
now = datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
def is_valid_date(d):
if not d:
return False
else:
return bool(re.match(r'\d{4}-\d{2}-\d{2}', d))
def age(d):
'''Requires a valid date'''
delta = date.today() - date.fromisoformat(d)
return delta.days
def bytes_to_mb(bytes):
mb = int(bytes) / 1048576
size = '%.1f MB' % mb
return size
def mb_to_bytes(mb_str):
result = re.search(r"^(\d+(?:\.\d+)?)\s?(?:mb)?", mb_str, flags=re.I)
if result:
return int(float(result.group(1)) * 1048576)
def piratesize(size):
split = size.split(" ")
factor = float(split[0])
unit = split[1].upper()
if unit == 'MIB':
size = factor * 1048576
elif unit == 'MB':
size = factor * 1000000
elif unit == 'GIB':
size = factor * 1073741824
elif unit == 'GB':
size = factor * 1000000000
elif unit == 'KIB':
size = factor * 1024
elif unit == 'KB':
size = factor * 1000
elif unit == "B":
size = factor
else:
size = 0
return size
def pattern_substitute(pattern, dic, normalize=False):
"""
Execute path rendering/substitution based on replacement dictionary
e.g. pattern = $Artist/$Album
dic = {Artist: 'My artist', Album: 'My album'}
returns My artist/My album
"""
from headphones import pathrender
if not pattern:
return ''
if normalize:
new_dic = {}
for i, j in dic.items():
if j is not None:
try:
if sys.platform == 'darwin':
j = unicodedata.normalize('NFD', j)
else:
j = unicodedata.normalize('NFC', j)
except TypeError:
j = unicodedata.normalize('NFC',
j.decode(headphones.SYS_ENCODING, 'replace'))
new_dic[i] = j
dic = new_dic
return pathrender.render(pattern, dic)[0]
def replace_all(text, dic):
if not text:
return ''
for i, j in dic.items():
text = text.replace(i, j)
return text
def replace_illegal_chars(string, type="file"):
if type == "file":
string = re.sub(r"[\?\"*:|<>/]", "_", string)
if type == "folder":
string = re.sub(r"[:\?<>\"|*]", "_", string)
return string
_CN_RE1 = re.compile(r'[^\w]+', re.UNICODE)
_CN_RE2 = re.compile(r'[\s_]+', re.UNICODE)
_XLATE_GRAPHICAL_AND_DIACRITICAL = {
# Translation table.
# Covers the following letters, for which NFD fails because of lack of
# combining character:
# ©ª«®²³¹»¼½¾ÆÐØÞßæðøþĐđĦħıIJijĸĿŀŁłŒœŦŧDŽDždžLJLjljNJNjnjǤǥDZDzdzȤȥ. This
# includes also some graphical symbols which can be easily replaced and
# usually are written by people who don't have appropriate keyboard layout.
'©': '(C)', 'ª': 'a.', '«': '<<', '®': '(R)', '²': '2', '³': '3',
'¹': '1', '»': '>>', '¼': ' 1/4 ', '½': ' 1/2 ', '¾': ' 3/4 ',
'Æ': 'AE', 'Ð': 'D', 'Ø': 'O', 'Þ': 'Th', 'ß': 'ss', 'æ': 'ae',
'ð': 'd', 'ø': 'o', 'þ': 'th', 'Đ': 'D', 'đ': 'd', 'Ħ': 'H',
'ħ': 'h', 'ı': 'i', 'IJ': 'IJ', 'ij': 'ij', 'ĸ': 'q', 'Ŀ': 'L',
'ŀ': 'l', 'Ł': 'L', 'ł': 'l', 'Œ': 'OE', 'œ': 'oe', 'Ŧ': 'T',
'ŧ': 't', 'DŽ': 'DZ', 'Dž': 'Dz', 'LJ': 'LJ', 'Lj': 'Lj',
'lj': 'lj', 'NJ': 'NJ', 'Nj': 'Nj', 'nj': 'nj',
'Ǥ': 'G', 'ǥ': 'g', 'DZ': 'DZ', 'Dz': 'Dz', 'dz': 'dz',
'Ȥ': 'Z', 'ȥ': 'z', '№': 'No.',
'º': 'o.', # normalize Nº abbrev (popular w/ classical music),
# this is 'masculine ordering indicator', not degree
}
_XLATE_SPECIAL = {
# Translation table.
# Cover additional special characters processing normalization.
"'": '', # replace apostrophe with nothing
"’": '', # replace musicbrainz style apostrophe with nothing
'&': ' and ', # expand & to ' and '
}
_XLATE_MUSICBRAINZ = {
# Translation table for Musicbrainz.
"…": '...', # HORIZONTAL ELLIPSIS (U+2026)
"’": "'", # APOSTROPHE (U+0027)
"‐": "-", # EN DASH (U+2013)
}
def _translate(s, dictionary):
# type: (basestring,Mapping[basestring,basestring])->basestring
return ''.join(dictionary.get(x, x) for x in s)
_COMBINING_RANGES = (
(0x0300, 0x036f), # Combining Diacritical Marks
(0x1ab0, 0x1aff), # Combining Diacritical Marks Extended
(0x20d0, 0x20ff), # Combining Diacritical Marks for Symbols
(0x1dc0, 0x1dff) # Combining Diacritical Marks Supplement
)
def _is_unicode_combining(u):
# type: (unicode)->bool
"""
Check if input unicode is combining diacritical mark.
"""
i = ord(u)
for r in _COMBINING_RANGES:
if r[0] <= i <= r[1]:
return True
return False
def _transliterate(u, xlate):
# type: (unicode)->unicode
"""
Perform transliteration using the specified dictionary
"""
u = unicodedata.normalize('NFD', u)
u = ''.join(['' if _is_unicode_combining(x) else x for x in u])
u = _translate(u, xlate)
# at this point output is either unicode, or plain ascii
return str(u)
def clean_name(s):
# type: (basestring)->unicode
"""Remove non-alphanumeric characters from the string, perform
normalization and substitution of some special characters; coalesce spaces.
:param s: string to clean up, possibly unicode one.
:return: cleaned-up version of input string.
"""
if not isinstance(s, str):
# ignore extended chars if someone was dumb enough to pass non-ascii
# narrow string here, use only unicode for meaningful texts
u = str(s, 'ascii', 'replace')
else:
u = s
# 1. don't bother doing normalization NFKC, rather transliterate
# using special translation table
u = _transliterate(u, _XLATE_GRAPHICAL_AND_DIACRITICAL)
# 2. normalize NFKC the result
u = unicodedata.normalize('NFKC', u)
# 3. translate spacials
u = _translate(u, _XLATE_SPECIAL)
# 4. replace any non-alphanumeric character sequences by spaces
u = _CN_RE1.sub(' ', u)
# 5. coalesce interleaved space/underscore sequences
u = _CN_RE2.sub(' ', u)
# 6. trim
u = u.strip()
# 7. lowercase
u = u.lower()
return u
def clean_musicbrainz_name(s, return_as_string=True):
# type: (basestring)->unicode
"""Substitute special Musicbrainz characters.
:param s: string to clean up, probably unicode.
:return: cleaned-up version of input string.
"""
if not isinstance(s, str):
u = str(s, 'ascii', 'replace')
else:
u = s
u = _translate(u, _XLATE_MUSICBRAINZ)
if return_as_string:
return u.encode('utf-8')
else:
return u
def cleanTitle(title):
title = re.sub(r"[\.\-\/\_]", " ", title).lower()
# Strip out extra whitespace
title = ' '.join(title.split())
title = title.title()
return title
def split_path(f):
"""
Split a path into components, starting with the drive letter (if any). Given
a path, os.path.join(*split_path(f)) should be path equal to f.
"""
components = []
drive, path = os.path.splitdrive(f)
# Strip the folder from the path, iterate until nothing is left
while True:
path, folder = os.path.split(path)
if folder:
components.append(folder)
else:
if path:
components.append(path)
break
# Append the drive (if any)
if drive:
components.append(drive)
# Reverse components
components.reverse()
# Done
return components
def expand_subfolders(f):
"""
Try to expand a given folder and search for subfolders containing media
files. This should work for discographies indexed per album in the same
root, possibly with folders per CD (if any).
This algorithm will return nothing if the result is only one folder. In this
case, normal post processing will be better.
"""
from headphones import logger
# Find all folders with media files in them
media_folders = []
for root, dirs, files in os.walk(f):
for file in files:
extension = os.path.splitext(file)[1].lower()[1:]
if extension in headphones.MEDIA_FORMATS:
if root not in media_folders:
media_folders.append(root)
# Stop here if nothing found
if len(media_folders) == 0:
return
# Split into path components
media_folders = [split_path(media_folder) for media_folder in media_folders]
# Correct folder endings such as CD1 etc.
for index, media_folder in enumerate(media_folders):
if RE_CD.match(media_folder[-1]):
media_folders[index] = media_folders[index][:-1]
# Verify the result by computing path depth relative to root.
path_depths = [len(media_folder) for media_folder in media_folders]
difference = max(path_depths) - min(path_depths)
if difference > 0:
logger.info(
f"Found {len(media_folders)} media folders, but depth difference between lowest and deepest media folder is {difference} (expected zero). If this is a discography or a collection of albums, make sure albums are per folder.")
# While already failed, advice the user what he could try. We assume the
# directory may contain separate CD's and maybe some extra's. The
# structure may look like X albums at same depth, and (one or more)
# extra folders with a higher depth.
extra_media_folders = [media_folder[:min(path_depths)] for media_folder in media_folders if
len(media_folder) > min(path_depths)]
extra_media_folders = list(
set([os.path.join(*media_folder) for media_folder in extra_media_folders]))
logger.info(
f"Please look at the following folder(s), since they cause the depth difference: {extra_media_folders}")
return
# Convert back to paths and remove duplicates, which may be there after
# correcting the paths
media_folders = list(set([os.path.join(*media_folder) for media_folder in media_folders]))
# Don't return a result if the number of subfolders is one. In this case,
# this algorithm will not improve processing and will likely interfere
# with other attempts such as MusicBrainz release group IDs.
if len(media_folders) == 1:
logger.debug("Did not expand subfolder, as it resulted in one folder.")
return
logger.debug(f"Expanded subfolders in folder: {media_folders}")
return media_folders
def path_match_patterns(path, patterns):
"""
Check if a path matches one or more patterns. The whole path will be
matched be matched against the patterns.
"""
for pattern in patterns:
if fnmatch(path, pattern):
return True
# No match
return False
def path_filter_patterns(paths, patterns, root=''):
"""
Scan for ignored paths based on glob patterns. Note that the whole path
will be matched, therefore paths should only contain the relative paths.
The root is optional, and only used for producing meaningful debug info.
"""
from headphones import logger
ignored = 0
for path in paths[:]:
if path_match_patterns(path, patterns):
logger.debug(f"Path ignored by pattern: {os.path.join(root, path)}")
ignored += 1
paths.remove(path)
# Return number of ignored paths
return ignored
def extract_data(s):
s = s.replace('_', ' ')
# headphones default format
pattern = re.compile(r'(?P<name>.*?)\s\-\s(?P<album>.*?)\s[\[\(](?P<year>.*?)[\]\)]',
re.VERBOSE)
match = pattern.match(s)
if match:
name = match.group("name")
album = match.group("album")
year = match.group("year")
return (name, album, year)
# Gonna take a guess on this one - might be enough to search on mb
pat = re.compile(r"(?P<name>.*?)\s*-\s*(?P<album>[^\[(-]*)")
match = pat.match(s)
if match:
name = match.group("name")
album = match.group("album")
year = None
return (name, album, year)
else:
return (None, None, None)
def extract_metadata(f):
"""
Scan all files in the given directory and decide on an artist, album and
year based on the metadata. A decision is based on the number of different
artists, albums and years found in the media files.
"""
from headphones import logger
# Walk directory and scan all media files
results = []
count = 0
for root, dirs, files in os.walk(f):
for file in files:
# Count the number of potential media files
extension = os.path.splitext(file)[1].lower()[1:]
if extension in headphones.MEDIA_FORMATS:
count += 1
# Try to read the file info
try:
media_file = MediaFile(os.path.join(root, file))
except (FileTypeError, UnreadableFileError):
# Probably not a media file
continue
# Append metadata to file
artist = media_file.albumartist or media_file.artist
album = media_file.album
year = media_file.year
if artist and album and year:
results.append((artist.lower(), album.lower(), year))
# Verify results
if len(results) == 0:
logger.info("No metadata in media files found, ignoring.")
return (None, None, None)
# Require that some percentage of files have tags
count_ratio = 0.75
if count < (count_ratio * len(results)):
logger.info(f"Counted {count} media files, but only {len(results)} have tags, ignoring.")
return (None, None, None)
# Count distinct values
artists = list(set([x[0] for x in results]))
albums = list(set([x[1] for x in results]))
years = list(set([x[2] for x in results]))
# Remove things such as CD2 from album names
if len(albums) > 1:
new_albums = list(albums)
# Replace occurences of e.g. CD1
for index, album in enumerate(new_albums):
if RE_CD_ALBUM.search(album):
old_album = new_albums[index]
new_albums[index] = RE_CD_ALBUM.sub("", album).strip()
logger.debug(f"Stripped album number identifier: {old_album} -> {new_albums[index]}")
# Remove duplicates
new_albums = list(set(new_albums))
# Safety check: if nothing has merged, then ignore the work. This can
# happen if only one CD of a multi part CD is processed.
if len(new_albums) < len(albums):
albums = new_albums
# All files have the same metadata, so it's trivial
if len(artists) == 1 and len(albums) == 1:
return (artists[0], albums[0], years[0])
# (Lots of) different artists. Could be a featuring album, so test for this.
if len(artists) > 1 and len(albums) == 1:
split_artists = [RE_FEATURING.split(x) for x in artists]
featurings = [len(split_artist) - 1 for split_artist in split_artists]
logger.info("Album seem to feature {sum(featurings)} different artists")
if sum(featurings) > 0:
# Find the artist of which the least splits have been generated.
# Ideally, this should be 0, which should be the album artist
# itself.
artist = split_artists[featurings.index(min(featurings))][0]
# Done
return (artist, albums[0], years[0])
# Not sure what to do here.
logger.info(
f"Found {len(artists)} artists, {len(albums)} albums and "
f"{len(years)} years in metadata, so ignoring"
)
logger.debug("Artists: {artists}, Albums: {albums}, Years: {years}")
return (None, None, None)
def get_downloaded_track_list(albumpath):
"""
Return a list of audio files for the given directory.
"""
downloaded_track_list = []
for root, dirs, files in os.walk(albumpath):
for _file in files:
extension = os.path.splitext(_file)[1].lower()[1:]
if extension in headphones.MEDIA_FORMATS:
downloaded_track_list.append(os.path.join(root, _file))
return downloaded_track_list
def preserve_torrent_directory(albumpath, forced=False, single=False):
"""
Copy torrent directory to temp headphones_ directory to keep files for seeding.
"""
from headphones import logger
# Create temp dir
if headphones.CONFIG.KEEP_TORRENT_FILES_DIR:
tempdir = headphones.CONFIG.KEEP_TORRENT_FILES_DIR
else:
tempdir = tempfile.gettempdir()
logger.info(f"Preparing to copy to a temporary directory for post processing: {albumpath}")
try:
file_name = os.path.basename(os.path.normpath(albumpath))
if not single:
prefix = "headphones_" + file_name + "_@hp@_"
else:
prefix = "headphones_" + os.path.splitext(file_name)[0] + "_@hp@_"
new_folder = tempfile.mkdtemp(prefix=prefix, dir=tempdir)
except Exception as e:
logger.error(f"Cannot create temp directory: {tempdir}. Error: {e}")
return None
# Attempt to stop multiple temp dirs being created for the same albumpath
if not forced:
try:
workdir = os.path.join(tempdir, prefix)
workdir = re.sub(r'\[', '[[]', workdir)
workdir = re.sub(r'(?<!\[)\]', '[]]', workdir)
if len(glob(workdir + '*/')) >= 3:
logger.error(
"Looks like a temp directory has previously been created "
"for this albumpath, not continuing "
)
shutil.rmtree(new_folder)
return None
except Exception as e:
logger.warn(
"Cannot determine if already copied/processed, will copy anyway. "
f"Warning: {e}"
)
# Copy to temp dir
try:
subdir = os.path.join(new_folder, "headphones")
logger.info(f"Copying files to {subdir}")
if not single:
shutil.copytree(albumpath, subdir)
else:
os.makedirs(subdir)
shutil.copy(albumpath, subdir)
# Update the album path with the new location
return subdir
except Exception as e:
logger.warn(
f"Cannot copy/move files to temp directory: {new_folder}. "
f"Not continuing. Error: {e}"
)
shutil.rmtree(new_folder)
return None
def cue_split(albumpath, keep_original_folder=False):
"""
Attempts to check and split audio files by a cue for the given directory.
"""
# Walk directory and scan all media files
count = 0
cue_count = 0
cue_dirs = []
for root, dirs, files in os.walk(albumpath):
for _file in files:
extension = os.path.splitext(_file)[1].lower()[1:]
if extension in headphones.MEDIA_FORMATS:
count += 1
elif extension == 'cue':
cue_count += 1
if root not in cue_dirs:
cue_dirs.append(root)
# Split cue
if cue_count and cue_count >= count and cue_dirs:
# Copy to temp directory
if keep_original_folder:
temppath = preserve_torrent_directory(albumpath)
if temppath:
cue_dirs = [cue_dir.replace(albumpath, temppath) for cue_dir in cue_dirs]
albumpath = temppath
else:
return None
from headphones import logger, cuesplit
logger.info("Attempting to split audio files by cue")
cwd = os.getcwd()
for cue_dir in cue_dirs:
try:
cuesplit.split(cue_dir)
except Exception as e:
os.chdir(cwd)
logger.warn(f"Cue not split. Error: {e}")
return None
os.chdir(cwd)
return albumpath
return None
def extract_logline(s):
# Default log format
pattern = re.compile(
r'(?P<timestamp>.*?)\s\-\s(?P<level>.*?)\s*\:\:\s(?P<thread>.*?)\s\:\s(?P<message>.*)',
re.VERBOSE)
match = pattern.match(s)
if match:
timestamp = match.group("timestamp")
level = match.group("level")
thread = match.group("thread")
message = match.group("message")
return (timestamp, level, thread, message)
else:
return None
def extract_song_data(s):
from headphones import logger
# headphones default format
pattern = re.compile(r'(?P<name>.*?)\s\-\s(?P<album>.*?)\s\[(?P<year>.*?)\]', re.VERBOSE)
match = pattern.match(s)
if match:
name = match.group("name")
album = match.group("album")
year = match.group("year")
return (name, album, year)
else:
logger.info(f"Couldn't parse {s} into a valid default format")
# newzbin default format
pattern = re.compile(r'(?P<name>.*?)\s\-\s(?P<album>.*?)\s\((?P<year>\d+?\))', re.VERBOSE)
match = pattern.match(s)
if match:
name = match.group("name")
album = match.group("album")
year = match.group("year")
return (name, album, year)
else:
logger.info(f"Couldn't parse {s} into a valid Newbin format")
return (name, album, year)
def smartMove(src, dest, delete=True):
from headphones import logger
source_dir = os.path.dirname(src)
filename = os.path.basename(src)
source_path = os.path.join(source_dir, filename)
dest_path = os.path.join(dest, filename)
if os.path.isfile(dest_path):
logger.info(f"Destination file exists: {dest_path}")
title = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1]
i = 1
while True:
newfile = title + '(' + str(i) + ')' + ext
if os.path.isfile(os.path.join(dest, newfile)):
i += 1
else:
logger.info(f"Renaming to {newfile}")
try:
os.rename(src, os.path.join(source_dir, newfile))
filename = newfile
source_path = os.path.join(source_dir, filename)
dest_path = os.path.join(dest, filename)
except Exception as e:
logger.warn(f"Error renaming {src}: {e}")
break
if delete:
try:
logger.info('Moving "%s" to "%s"', source_path, dest_path)
shutil.move(source_path, dest_path)
except Exception as e:
exists = os.path.exists(dest_path)
if exists and os.path.getsize(source_path) == os.path.getsize(dest_path):
logger.warn(
f"Successfully moved {filename}, but something went wrong: {e}"
)
os.unlink(source_path)
else:
# remove faultly copied file
if exists:
os.unlink(dest_path)
raise
else:
try:
logger.info(f"Copying {source_path} to {dest_path}")
shutil.copy(source_path, dest_path)
return True
except Exception as e:
logger.warn(f"Error copying {filename}: {e}")
def walk_directory(basedir, followlinks=True):
"""
Enhanced version of 'os.walk' where symlink directores are traversed, but
with care. In case a folder is already processed, don't traverse it again.
"""
from . import logger
# Add the base path, because symlinks poiting to the basedir should not be
# traversed again.
traversed = [os.path.abspath(basedir)]
def _inner(root, directories, files):
for directory in directories:
path = os.path.join(root, directory)
if followlinks and os.path.islink(path):
real_path = os.path.abspath(os.readlink(path))
if real_path in traversed:
logger.debug(
f"Skipping {path} since it is a symlink to "
f"{real_path}, which is already visited."
)
else:
traversed.append(real_path)
for args in os.walk(real_path):
for result in _inner(*args):
yield result
# Pass on actual result
yield root, directories, files
# Start traversing
for args in os.walk(basedir):
for result in _inner(*args):
yield result
#########################
# Sab renaming functions #
#########################
# TODO: Grab config values from sab to know when these options are checked. For now we'll just iterate through all combinations
def sab_replace_dots(name):
return name.replace('.', ' ')
def sab_replace_spaces(name):
return name.replace(' ', '_')
def sab_sanitize_foldername(name):
""" Return foldername with dodgy chars converted to safe ones
Remove any leading and trailing dot and space characters
"""
CH_ILLEGAL = r'\/<>?*|"'
CH_LEGAL = r'++{}!@#`'
FL_ILLEGAL = CH_ILLEGAL + ':\x92"'
FL_LEGAL = CH_LEGAL + "-''"
if not name:
return
name = unidecode(name)
lst = []
for ch in name.strip():
if ch in FL_ILLEGAL:
ch = FL_LEGAL[FL_ILLEGAL.find(ch)]
lst.append(ch)
else:
lst.append(ch)
name = ''.join(lst)
name = name.strip('. ')
if not name:
name = 'unknown'
# maxlen = cfg.folder_max_length()
# if len(name) > maxlen:
# name = name[:maxlen]
return name
def split_string(mystring, splitvar=','):
mylist = []
for each_word in mystring.split(splitvar):
mylist.append(each_word.strip())
return mylist
def create_https_certificates(ssl_cert, ssl_key):
"""
Create a pair of self-signed HTTPS certificares and store in them in
'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed.
This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard).
"""
from headphones import logger
from OpenSSL import crypto
from certgen import createKeyPair, createCertRequest, createCertificate, \
TYPE_RSA, serial
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 2048)
careq = createCertRequest(cakey, CN="Certificate Authority")
cacert = createCertificate(careq, (careq, cakey), serial,
(0, 60 * 60 * 24 * 365 * 10)) # ten years
pkey = createKeyPair(TYPE_RSA, 2048)
req = createCertRequest(pkey, CN="Headphones")
cert = createCertificate(req, (cacert, cakey), serial,
(0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
with open(ssl_key, "w") as fp:
fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
with open(ssl_cert, "w") as fp:
fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except IOError as e:
logger.error(f"Error creating SSL key and certificate: e")
return False
return True
class BeetsLogCapture(beetslogging.Handler):
def __init__(self):
beetslogging.Handler.__init__(self)
self.messages = []
def emit(self, record):
self.messages.append(text_type(record.msg))
@contextmanager
def capture_beets_log(logger='beets'):
capture = BeetsLogCapture()
log = beetslogging.getLogger(logger)
log.addHandler(capture)
try:
yield capture.messages
finally:
log.removeHandler(capture)
def have_pct_have_total(db_artist):
have_tracks = db_artist['HaveTracks'] or 0
total_tracks = db_artist['TotalTracks'] or 0
have_pct = have_tracks / total_tracks if total_tracks else 0
return (have_pct, total_tracks)
def has_token(title, token):
return bool(
re.search(rf'(?:\W|^)+{token}(?:\W|$)+',
title,
re.IGNORECASE | re.UNICODE)
)
| 33,155 | Python | .py | 842 | 31.362233 | 236 | 0.595453 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,315 | sab.py | rembo10_headphones/headphones/sab.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
###################################
# Stolen from Sick-Beard's sab.py #
###################################
import http.cookiejar
import headphones
from headphones.common import USER_AGENT
from headphones import logger, helpers, request
def sab_api_call(request_type=None, params={}, **kwargs):
if not headphones.CONFIG.SAB_HOST.startswith('http'):
headphones.CONFIG.SAB_HOST = 'http://' + headphones.CONFIG.SAB_HOST
if headphones.CONFIG.SAB_HOST.endswith('/'):
headphones.CONFIG.SAB_HOST = headphones.CONFIG.SAB_HOST[
0:len(headphones.CONFIG.SAB_HOST) - 1]
url = headphones.CONFIG.SAB_HOST + "/" + "api?"
if headphones.CONFIG.SAB_USERNAME:
params['ma_username'] = headphones.CONFIG.SAB_USERNAME
if headphones.CONFIG.SAB_PASSWORD:
params['ma_password'] = headphones.CONFIG.SAB_PASSWORD
if headphones.CONFIG.SAB_APIKEY:
params['apikey'] = headphones.CONFIG.SAB_APIKEY
if request_type == 'send_nzb' and headphones.CONFIG.SAB_CATEGORY:
params['cat'] = headphones.CONFIG.SAB_CATEGORY
params['output'] = 'json'
response = request.request_json(url, params=params, **kwargs)
if not response:
logger.error("Error connecting to SABnzbd on url: %s" % headphones.CONFIG.SAB_HOST)
return False
else:
logger.debug("Successfully connected to SABnzbd on url: %s" % headphones.CONFIG.SAB_HOST)
return response
def sendNZB(nzb):
params = {}
# if it's a normal result we just pass SAB the URL
if nzb.resultType == "nzb":
# for newzbin results send the ID to sab specifically
if nzb.provider.getID() == 'newzbin':
id = nzb.provider.getIDFromURL(nzb.url)
if not id:
logger.info("Unable to send NZB to sab, can't find ID in URL " + str(nzb.url))
return False
params['mode'] = 'addid'
params['name'] = id
else:
params['mode'] = 'addurl'
params['name'] = nzb.url
# if we get a raw data result we want to upload it to SAB
elif nzb.resultType == "nzbdata":
nzbdata = nzb.extraInfo[0]
params['mode'] = 'addfile'
files = {"nzbfile": (nzb.name + ".nzb", nzbdata)}
headers = {'User-Agent': USER_AGENT}
logger.info("Attempting to connect to SABnzbd on url: %s" % headphones.CONFIG.SAB_HOST)
if nzb.resultType == "nzb":
response = sab_api_call('send_nzb', params=params)
elif nzb.resultType == "nzbdata":
cookies = http.cookiejar.CookieJar()
response = sab_api_call('send_nzb', params=params, method="post", files=files,
cookies=cookies, headers=headers)
if not response:
logger.info("No data returned from SABnzbd, NZB not sent")
return False
if response['status']:
logger.info("NZB sent to SABnzbd successfully")
return True
else:
logger.error("Error sending NZB to SABnzbd: %s" % response['error'])
return False
def checkConfig():
params = {'mode': 'get_config',
'section': 'misc',
}
config_options = sab_api_call(params=params)
if not config_options:
logger.warn(
"Unable to read SABnzbd config file - cannot determine renaming options (might affect auto & forced post processing)")
return (0, 0)
replace_spaces = config_options['config']['misc']['replace_spaces']
replace_dots = config_options['config']['misc']['replace_dots']
return (replace_spaces, replace_dots)
| 4,289 | Python | .py | 93 | 38.956989 | 130 | 0.647172 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,316 | common.py | rembo10_headphones/headphones/common.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Aug 1, 2011
@author: Michael
'''
import platform
import operator
import os
import re
from headphones import version
from functools import reduce
# Identify Our Application
USER_AGENT = 'Headphones/-' + version.HEADPHONES_VERSION + ' (' + platform.system() + ' ' + platform.release() + ')'
# Notification Types
NOTIFY_SNATCH = 1
NOTIFY_DOWNLOAD = 2
notifyStrings = {}
notifyStrings[NOTIFY_SNATCH] = "Started Download"
notifyStrings[NOTIFY_DOWNLOAD] = "Download Finished"
# Release statuses
UNKNOWN = -1 # should never happen
UNAIRED = 1 # releases that haven't dropped yet
SNATCHED = 2 # qualified with quality
WANTED = 3 # releases we don't have but want to get
DOWNLOADED = 4 # qualified with quality
SKIPPED = 5 # releases we don't want
ARCHIVED = 6 # releases that you don't have locally (counts toward download completion stats)
IGNORED = 7 # releases that you don't want included in your download stats
SNATCHED_PROPER = 9 # qualified with quality
class Quality:
NONE = 0
B192 = 1 << 1 # 2
VBR = 1 << 2 # 4
B256 = 1 << 3 # 8
B320 = 1 << 4 # 16
FLAC = 1 << 5 # 32
# put these bits at the other end of the spectrum, far enough out that they shouldn't interfere
UNKNOWN = 1 << 15
qualityStrings = {NONE: "N/A",
UNKNOWN: "Unknown",
B192: "MP3 192",
VBR: "MP3 VBR",
B256: "MP3 256",
B320: "MP3 320",
FLAC: "Flac"}
statusPrefixes = {DOWNLOADED: "Downloaded",
SNATCHED: "Snatched"}
@staticmethod
def _getStatusStrings(status):
toReturn = {}
for x in list(Quality.qualityStrings.keys()):
toReturn[Quality.compositeStatus(status, x)] = Quality.statusPrefixes[status] + " (" + \
Quality.qualityStrings[x] + ")"
return toReturn
@staticmethod
def combineQualities(anyQualities, bestQualities):
anyQuality = 0
bestQuality = 0
if anyQualities:
anyQuality = reduce(operator.or_, anyQualities)
if bestQualities:
bestQuality = reduce(operator.or_, bestQualities)
return anyQuality | (bestQuality << 16)
@staticmethod
def splitQuality(quality):
anyQualities = []
bestQualities = []
for curQual in list(Quality.qualityStrings.keys()):
if curQual & quality:
anyQualities.append(curQual)
if curQual << 16 & quality:
bestQualities.append(curQual)
return (anyQualities, bestQualities)
@staticmethod
def assumeQuality(name):
if name.lower().endswith(".mp3"):
return Quality.MP3
elif name.lower().endswith(".flac"):
return Quality.LOSSLESS
else:
return Quality.UNKNOWN
@staticmethod
def compositeStatus(status, quality):
return status + 100 * quality
@staticmethod
def qualityDownloaded(status):
return (status - DOWNLOADED) / 100
@staticmethod
def splitCompositeStatus(status):
"""Returns a tuple containing (status, quality)"""
for x in sorted(list(Quality.qualityStrings.keys()), reverse=True):
if status > x * 100:
return (status - x * 100, x)
return (Quality.NONE, status)
DOWNLOADED = None
SNATCHED = None
SNATCHED_PROPER = None
Quality.DOWNLOADED = [Quality.compositeStatus(DOWNLOADED, x) for x in list(Quality.qualityStrings.keys())]
Quality.SNATCHED = [Quality.compositeStatus(SNATCHED, x) for x in list(Quality.qualityStrings.keys())]
Quality.SNATCHED_PROPER = [Quality.compositeStatus(SNATCHED_PROPER, x) for x in
list(Quality.qualityStrings.keys())]
MP3 = Quality.combineQualities([Quality.B192, Quality.B256, Quality.B320, Quality.VBR], [])
LOSSLESS = Quality.combineQualities([Quality.FLAC], [])
ANY = Quality.combineQualities(
[Quality.B192, Quality.B256, Quality.B320, Quality.VBR, Quality.FLAC], [])
qualityPresets = (MP3, LOSSLESS, ANY)
qualityPresetStrings = {MP3: "MP3 (All bitrates 192+)",
LOSSLESS: "Lossless (flac)",
ANY: "Any"}
| 4,980 | Python | .py | 122 | 33.729508 | 116 | 0.65266 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,317 | music_encoder.py | rembo10_headphones/headphones/music_encoder.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import time
import datetime
import shutil
import subprocess
import multiprocessing
import os
import headphones
from headphones import logger
from mediafile import MediaFile
# xld
from . import getXldProfile
def encode(albumPath):
use_xld = headphones.CONFIG.ENCODER == 'xld'
# Return if xld details not found
if use_xld:
(xldProfile, xldFormat, xldBitrate) = getXldProfile.getXldProfile(
headphones.CONFIG.XLDPROFILE)
if not xldFormat:
logger.error('Details for xld profile \'%s\' not found, files will not be re-encoded',
xldProfile)
return None
else:
xldProfile = None
tempDirEncode = os.path.join(albumPath, "temp")
musicFiles = []
musicFinalFiles = []
musicTempFiles = []
encoder = ""
# Create temporary directory, but remove the old one first.
try:
if os.path.exists(tempDirEncode):
shutil.rmtree(tempDirEncode)
time.sleep(1)
os.mkdir(tempDirEncode)
except Exception as e:
logger.exception("Unable to create temporary directory")
return None
for r, d, f in os.walk(albumPath):
for music in f:
if any(music.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
if not use_xld:
encoderFormat = headphones.CONFIG.ENCODEROUTPUTFORMAT
else:
xldMusicFile = os.path.join(r, music)
xldInfoMusic = MediaFile(xldMusicFile)
encoderFormat = xldFormat
if headphones.CONFIG.ENCODERLOSSLESS:
ext = os.path.normpath(os.path.splitext(music)[1].lstrip(".")).lower()
if not use_xld and ext == 'flac' or use_xld and (
ext != xldFormat and (xldInfoMusic.bitrate / 1000 > 400)):
musicFiles.append(os.path.join(r, music))
musicTemp = os.path.normpath(
os.path.splitext(music)[0] + '.' + encoderFormat)
musicTempFiles.append(os.path.join(tempDirEncode, musicTemp))
else:
logger.debug('%s is already encoded', music)
else:
musicFiles.append(os.path.join(r, music))
musicTemp = os.path.normpath(os.path.splitext(music)[0] + '.' + encoderFormat)
musicTempFiles.append(os.path.join(tempDirEncode, musicTemp))
if headphones.CONFIG.ENCODER_PATH:
encoder = headphones.CONFIG.ENCODER_PATH
else:
if use_xld:
encoder = os.path.join('/Applications', 'xld')
elif headphones.CONFIG.ENCODER == 'lame':
if headphones.SYS_PLATFORM == "win32":
# NEED THE DEFAULT LAME INSTALL ON WIN!
encoder = "C:/Program Files/lame/lame.exe"
else:
encoder = "lame"
elif headphones.CONFIG.ENCODER == 'ffmpeg':
if headphones.SYS_PLATFORM == "win32":
encoder = "C:/Program Files/ffmpeg/bin/ffmpeg.exe"
else:
encoder = "ffmpeg"
elif headphones.CONFIG.ENCODER == 'libav':
if headphones.SYS_PLATFORM == "win32":
encoder = "C:/Program Files/libav/bin/avconv.exe"
else:
encoder = "avconv"
i = 0
encoder_failed = False
jobs = []
for music in musicFiles:
infoMusic = MediaFile(music)
encode = False
if use_xld:
if xldBitrate and (infoMusic.bitrate / 1000 <= xldBitrate):
logger.info(f"{music} has bitrate <= {xldBitrate}kb, will not be re-encoded")
else:
encode = True
elif headphones.CONFIG.ENCODER == 'lame':
if not any(
music.lower().endswith('.' + x) for x
in ["mp3", "wav"]):
logger.warn('Lame cannot encode %s format for %s, use ffmpeg',
os.path.splitext(music)[1], music)
else:
if music.lower().endswith('.mp3') and (
int(infoMusic.bitrate / 1000) <= headphones.CONFIG.BITRATE):
logger.info('%s has bitrate <= %skb, will not be re-encoded', music,
headphones.CONFIG.BITRATE)
else:
encode = True
else:
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'ogg':
if music.lower().endswith('.ogg'):
logger.warn(f"Cannot re-encode .ogg {music}")
else:
encode = True
else:
if music.lower().endswith('.' + headphones.CONFIG.ENCODEROUTPUTFORMAT) and (int(infoMusic.bitrate / 1000) <= headphones.CONFIG.BITRATE):
logger.info('%s has bitrate <= %skb, will not be re-encoded', music, headphones.CONFIG.BITRATE)
else:
encode = True
# encode
if encode:
job = (encoder, music, musicTempFiles[i], albumPath, xldProfile)
jobs.append(job)
else:
musicFiles[i] = None
musicTempFiles[i] = None
i = i + 1
# Encode music files
if len(jobs) > 0:
processes = 1
# Use multicore if enabled
if headphones.CONFIG.ENCODER_MULTICORE:
if headphones.CONFIG.ENCODER_MULTICORE_COUNT == 0:
processes = multiprocessing.cpu_count()
else:
processes = headphones.CONFIG.ENCODER_MULTICORE_COUNT
logger.debug("Multi-core encoding enabled, spawning %d processes",
processes)
# Use multiprocessing only if it's worth the overhead. and if it is
# enabled. If not, then use the old fashioned way.
if processes > 1:
with logger.listener():
pool = multiprocessing.Pool(processes=processes)
results = pool.map_async(command_map, jobs)
# No new processes will be created, so close it and wait for all
# processes to finish
pool.close()
pool.join()
# Retrieve the results
results = results.get()
else:
results = list(map(command_map, jobs))
# The results are either True or False, so determine if one is False
encoder_failed = not all(results)
musicFiles = [_f for _f in musicFiles if _f]
musicTempFiles = [_f for _f in musicTempFiles if _f]
# check all files to be encoded now exist in temp directory
if not encoder_failed and musicTempFiles:
for dest in musicTempFiles:
if not os.path.exists(dest):
encoder_failed = True
logger.error("Encoded file '%s' does not exist in the destination temp directory",
dest)
# No errors, move from temp to parent
if not encoder_failed and musicTempFiles:
i = 0
for dest in musicTempFiles:
if os.path.exists(dest):
source = musicFiles[i]
if headphones.CONFIG.DELETE_LOSSLESS_FILES:
os.remove(source)
check_dest = os.path.join(albumPath, os.path.split(dest)[1])
if os.path.exists(check_dest):
os.remove(check_dest)
try:
shutil.move(dest, albumPath)
except Exception as e:
logger.error('Could not move %s to %s: %s', dest, albumPath, e)
encoder_failed = True
break
i += 1
# remove temp directory
shutil.rmtree(tempDirEncode)
# Return with error if any encoding errors
if encoder_failed:
logger.error(
"One or more files failed to encode. Ensure you have the latest version of %s installed.",
headphones.CONFIG.ENCODER)
return None
time.sleep(1)
for r, d, f in os.walk(albumPath):
for music in f:
if any(music.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
musicFinalFiles.append(os.path.join(r, music))
if not musicTempFiles:
logger.info('Encoding for folder \'%s\' is not required', albumPath)
return musicFinalFiles
def command_map(args):
"""
Wrapper for the '[multiprocessing.]map()' method, to unpack the arguments
and wrap exceptions.
"""
# Initialize multiprocessing logger
if multiprocessing.current_process().name != "MainProcess":
logger.initMultiprocessing()
# Start encoding
try:
return command(*args)
except Exception:
logger.exception("Encoder raised an exception.")
return False
def command(encoder, musicSource, musicDest, albumPath, xldProfile):
"""
Encode a given music file with a certain encoder. Returns True on success,
or False otherwise.
"""
startMusicTime = time.time()
cmd = []
if xldProfile:
xldDestDir = os.path.split(musicDest)[0]
cmd = [encoder]
cmd.extend([musicSource])
cmd.extend(['--profile'])
cmd.extend([xldProfile])
cmd.extend(['-o'])
cmd.extend([xldDestDir])
# Lame
elif headphones.CONFIG.ENCODER == 'lame':
cmd = [encoder]
opts = []
if not headphones.CONFIG.ADVANCEDENCODER:
opts.extend(['-h'])
if headphones.CONFIG.ENCODERVBRCBR == 'cbr':
opts.extend(['--resample', str(headphones.CONFIG.SAMPLINGFREQUENCY), '-b',
str(headphones.CONFIG.BITRATE)])
elif headphones.CONFIG.ENCODERVBRCBR == 'vbr':
opts.extend(['-v', str(headphones.CONFIG.ENCODERQUALITY)])
else:
advanced = (headphones.CONFIG.ADVANCEDENCODER.split())
for tok in advanced:
opts.extend([tok.encode(headphones.SYS_ENCODING)])
opts.extend([musicSource])
opts.extend([musicDest])
cmd.extend(opts)
# FFmpeg
elif headphones.CONFIG.ENCODER == 'ffmpeg':
cmd = [encoder, '-i', musicSource]
opts = []
if not headphones.CONFIG.ADVANCEDENCODER:
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'ogg':
opts.extend(['-acodec', 'libvorbis'])
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'm4a':
opts.extend(['-strict', 'experimental'])
if headphones.CONFIG.ENCODERVBRCBR == 'cbr':
opts.extend(['-ar', str(headphones.CONFIG.SAMPLINGFREQUENCY), '-ab',
str(headphones.CONFIG.BITRATE) + 'k'])
elif headphones.CONFIG.ENCODERVBRCBR == 'vbr':
opts.extend(['-aq', str(headphones.CONFIG.ENCODERQUALITY)])
opts.extend(['-y', '-ac', '2', '-vn'])
else:
advanced = (headphones.CONFIG.ADVANCEDENCODER.split())
for tok in advanced:
opts.extend([tok.encode(headphones.SYS_ENCODING)])
opts.extend([musicDest])
cmd.extend(opts)
# Libav
elif headphones.CONFIG.ENCODER == "libav":
cmd = [encoder, '-i', musicSource]
opts = []
if not headphones.CONFIG.ADVANCEDENCODER:
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'ogg':
opts.extend(['-acodec', 'libvorbis'])
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'm4a':
opts.extend(['-strict', 'experimental'])
if headphones.CONFIG.ENCODERVBRCBR == 'cbr':
opts.extend(['-ar', str(headphones.CONFIG.SAMPLINGFREQUENCY), '-ab',
str(headphones.CONFIG.BITRATE) + 'k'])
elif headphones.CONFIG.ENCODERVBRCBR == 'vbr':
opts.extend(['-aq', str(headphones.CONFIG.ENCODERQUALITY)])
opts.extend(['-y', '-ac', '2', '-vn'])
else:
advanced = (headphones.CONFIG.ADVANCEDENCODER.split())
for tok in advanced:
opts.extend([tok.encode(headphones.SYS_ENCODING)])
opts.extend([musicDest])
cmd.extend(opts)
# Prevent Windows from opening a terminal window
startupinfo = None
if headphones.SYS_PLATFORM == "win32":
startupinfo = subprocess.STARTUPINFO()
try:
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
# Encode
logger.info(f"Encoding {musicSource}")
logger.debug(subprocess.list2cmdline(cmd))
process = subprocess.Popen(cmd, startupinfo=startupinfo,
stdin=open(os.devnull, 'rb'), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True)
stdout, stderr = process.communicate(headphones.CONFIG.ENCODER)
# Error if return code not zero
if process.returncode:
logger.error(f"Encoding failed for {musicSource}")
out = stdout or stderr
outlast2lines = '\n'.join(out.splitlines()[-2:])
logger.error(f"{headphones.CONFIG.ENCODER} error details: {outlast2lines}")
out = out.rstrip("\n")
logger.debug(out)
encoded = False
else:
logger.info(f"{musicSource} encoded in {getTimeEncode(startMusicTime)}")
encoded = True
return encoded
def getTimeEncode(start):
finish = time.time()
seconds = int(finish - start)
return datetime.timedelta(seconds=seconds)
| 14,371 | Python | .py | 329 | 32.300912 | 152 | 0.587592 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,318 | bandcamp.py | rembo10_headphones/headphones/bandcamp.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>
import headphones
import json
import os
import re
from headphones import logger, helpers, metadata, request
from headphones.common import USER_AGENT
from headphones.types import Result
from mediafile import MediaFile, UnreadableFileError
from bs4 import BeautifulSoup
from bs4 import FeatureNotFound
def search(album, albumlength=None, page=1, resultlist=None):
dic = {'...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ',
'"': '', ',': '', '*': '', '.': '', ':': ''}
if resultlist is None:
resultlist = []
cleanalbum = helpers.latinToAscii(
helpers.replace_all(album['AlbumTitle'], dic)
).strip()
cleanartist = helpers.latinToAscii(
helpers.replace_all(album['ArtistName'], dic)
).strip()
headers = {'User-Agent': USER_AGENT}
params = {
"page": page,
"q": cleanalbum,
}
logger.info("Looking up https://bandcamp.com/search with {}".format(
params))
content = request.request_content(
url='https://bandcamp.com/search',
params=params,
headers=headers
).decode('utf8')
try:
soup = BeautifulSoup(content, "html5lib")
except FeatureNotFound:
soup = BeautifulSoup(content, "html.parser")
for item in soup.find_all("li", class_="searchresult"):
type = item.find('div', class_='itemtype').text.strip().lower()
if type == "album":
data = parse_album(item)
cleanartist_found = helpers.latinToAscii(data['artist'])
cleanalbum_found = helpers.latinToAscii(data['album'])
logger.debug(u"{} - {}".format(data['album'], cleanalbum_found))
logger.debug("Comparing {} to {}".format(
cleanalbum, cleanalbum_found))
if (cleanartist.lower() == cleanartist_found.lower() and
cleanalbum.lower() == cleanalbum_found.lower()):
resultlist.append(Result(
data['title'], data['size'], data['url'],
'bandcamp', 'bandcamp', True))
else:
continue
if(soup.find('a', class_='next')):
page += 1
logger.debug("Calling next page ({})".format(page))
search(album, albumlength=albumlength,
page=page, resultlist=resultlist)
return resultlist
def download(album, bestqual):
html = request.request_content(url=bestqual.url).decode('utf-8')
trackinfo = []
try:
trackinfo = json.loads(
re.search(r"trackinfo":(\[.*?\]),", html)
.group(1)
.replace('"', '"'))
except ValueError as e:
logger.warn("Couldn't load json: {}".format(e))
directory = os.path.join(
headphones.CONFIG.BANDCAMP_DIR,
u'{} - {}'.format(
album['ArtistName'].replace('/', '_'),
album['AlbumTitle'].replace('/', '_')))
directory = helpers.latinToAscii(directory)
if not os.path.exists(directory):
try:
os.makedirs(directory)
except Exception as e:
logger.warn("Could not create directory ({})".format(e))
index = 1
for track in trackinfo:
filename = helpers.replace_illegal_chars(
u'{:02d} - {}.mp3'.format(index, track['title']))
fullname = os.path.join(directory.encode('utf-8'),
filename.encode('utf-8'))
logger.debug("Downloading to {}".format(fullname))
if 'file' in track and track['file'] != None and 'mp3-128' in track['file']:
content = request.request_content(track['file']['mp3-128'])
open(fullname, 'wb').write(content)
try:
f = MediaFile(fullname)
date, year = metadata._date_year(album)
f.update({
'artist': album['ArtistName'].encode('utf-8'),
'album': album['AlbumTitle'].encode('utf-8'),
'title': track['title'].encode('utf-8'),
'track': track['track_num'],
'tracktotal': len(trackinfo),
'year': year,
})
f.save()
except UnreadableFileError as ex:
logger.warn("MediaFile couldn't parse: %s (%s)",
fullname,
str(ex))
index += 1
return directory
def parse_album(item):
album = item.find('div', class_='heading').text.strip()
artist = item.find('div', class_='subhead').text.strip().replace("by ", "")
released = item.find('div', class_='released').text.strip().replace(
"released ", "")
year = re.search(r"(\d{4})", released).group(1)
url = item.find('div', class_='heading').find('a')['href'].split("?")[0]
length = item.find('div', class_='length').text.strip()
tracks, minutes = length.split(",")
tracks = tracks.replace(" tracks", "").replace(" track", "").strip()
minutes = minutes.replace(" minutes", "").strip()
# bandcamp offers mp3 128b with should be 960KB/minute
size = int(minutes) * 983040
data = {"title": u'{} - {} [{}]'.format(artist, album, year),
"artist": artist, "album": album,
"url": url, "size": size}
return data
| 5,989 | Python | .py | 139 | 34.079137 | 84 | 0.581831 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,319 | metacritic.py | rembo10_headphones/headphones/metacritic.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import json
from headphones import db, helpers, logger, request
def update(artistid, artist_name, release_groups):
""" Pretty simple and crude function to find the artist page on metacritic,
then parse that page to get critic & user scores for albums"""
# First let's modify the artist name to fit the metacritic convention.
# We could just do a search, then take the top result, but at least this will
# cut down on api calls. If it's ineffective then we'll switch to search
replacements = {" & ": " ", ".": ""}
mc_artist_name = helpers.clean_musicbrainz_name(artist_name, return_as_string=False)
mc_artist_name = mc_artist_name.replace("'", " ")
mc_artist_name = helpers.replace_all(artist_name.lower(), replacements)
mc_artist_name = mc_artist_name.replace(" ", "-")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
url = "https://www.metacritic.com/person/" + mc_artist_name + "?filter-options=music&sort_options=date&num_items=100"
res = request.request_soup(url, headers=headers, whitelist_status_code=404)
rows = None
try:
table = res.find("table", class_="credits person_credits")
rows = table.tbody.find_all('tr')
except:
logger.info("Unable to get metacritic scores for: %s" % artist_name)
myDB = db.DBConnection()
artist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [artistid]).fetchone()
score_list = []
# If we couldn't get anything from MetaCritic for whatever reason,
# let's try to load scores from the db
if not rows:
if artist['MetaCritic']:
score_list = json.loads(artist['MetaCritic'])
else:
return
# If we did get scores, let's update the db with them
else:
for row in rows:
title = row.a.string
scores = row.find_all("span")
critic_score = scores[0].string
user_score = scores[1].string
score_dict = {'title': title, 'critic_score': critic_score, 'user_score': user_score}
score_list.append(score_dict)
# Save scores to the database
controlValueDict = {"ArtistID": artistid}
newValueDict = {'MetaCritic': json.dumps(score_list)}
myDB.upsert("artists", newValueDict, controlValueDict)
for score in score_list:
title = score['title']
# Iterate through the release groups we got passed to see if we can find
# a match
for rg in release_groups:
if rg['title'].lower() == title.lower():
critic_score = score['critic_score']
user_score = score['user_score']
controlValueDict = {"AlbumID": rg['id']}
newValueDict = {'CriticScore': critic_score, 'UserScore': user_score}
myDB.upsert("albums", newValueDict, controlValueDict)
| 3,644 | Python | .py | 71 | 44.338028 | 137 | 0.665354 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,320 | albumart.py | rembo10_headphones/headphones/albumart.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import struct
from six.moves.urllib.parse import urlencode
from io import BytesIO
import headphones
from headphones import db, request, logger
def getAlbumArt(albumid):
artwork_path = None
artwork = None
# CAA
logger.info("Searching for artwork at CAA")
artwork_path = 'https://coverartarchive.org/release-group/%s/front' % albumid
artwork = getartwork(artwork_path)
if artwork:
logger.info("Artwork found at CAA")
return artwork_path, artwork
# Amazon
logger.info("Searching for artwork at Amazon")
myDB = db.DBConnection()
dbalbum = myDB.action(
'SELECT ArtistName, AlbumTitle, ReleaseID, AlbumASIN FROM albums WHERE AlbumID=?',
[albumid]).fetchone()
if dbalbum['AlbumASIN']:
artwork_path = 'https://ec1.images-amazon.com/images/P/%s.01.LZZZZZZZ.jpg' % dbalbum['AlbumASIN']
artwork = getartwork(artwork_path)
if artwork:
logger.info("Artwork found at Amazon")
return artwork_path, artwork
# last.fm
from headphones import lastfm
logger.info("Searching for artwork at last.fm")
if dbalbum['ReleaseID'] != albumid:
data = lastfm.request_lastfm("album.getinfo", mbid=dbalbum['ReleaseID'])
if not data:
data = lastfm.request_lastfm("album.getinfo", artist=dbalbum['ArtistName'],
album=dbalbum['AlbumTitle'])
else:
data = lastfm.request_lastfm("album.getinfo", artist=dbalbum['ArtistName'],
album=dbalbum['AlbumTitle'])
if data:
try:
images = data['album']['image']
for image in images:
if image['size'] == 'extralarge':
artwork_path = image['#text']
elif image['size'] == 'mega':
artwork_path = image['#text']
break
except KeyError:
artwork_path = None
if artwork_path:
artwork = getartwork(artwork_path)
if artwork:
logger.info("Artwork found at last.fm")
return artwork_path, artwork
logger.info("No suitable album art found.")
return None, None
def jpeg(bites):
fhandle = BytesIO(bites)
try:
fhandle.seek(0)
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
fhandle.seek(1, 1)
height, width = struct.unpack('>HH', fhandle.read(4))
return width, height
except struct.error:
return None, None
except TypeError:
return None, None
def png(bites):
try:
check = struct.unpack('>i', bites[4:8])[0]
if check != 0x0d0a1a0a:
return None, None
return struct.unpack('>ii', bites[16:24])
except struct.error:
return None, None
def get_image_data(bites):
type = None
width = None
height = None
if len(bites) < 24:
return None, None, None
peek = bites[0:2]
if peek == b'\xff\xd8':
width, height = jpeg(bites)
type = 'jpg'
elif peek == b'\x89P':
width, height = png(bites)
type = 'png'
return type, width, height
def getartwork(artwork_path):
artwork = bytes()
minwidth = 0
maxwidth = 0
if headphones.CONFIG.ALBUM_ART_MIN_WIDTH:
minwidth = int(headphones.CONFIG.ALBUM_ART_MIN_WIDTH)
if headphones.CONFIG.ALBUM_ART_MAX_WIDTH:
maxwidth = int(headphones.CONFIG.ALBUM_ART_MAX_WIDTH)
resp = request.request_response(artwork_path, timeout=20, stream=True, whitelist_status_code=404)
if resp:
img_width = None
for chunk in resp.iter_content(chunk_size=1024):
artwork += chunk
if not img_width and (minwidth or maxwidth):
img_type, img_width, img_height = get_image_data(artwork)
# Check min/max
if img_width and (minwidth or maxwidth):
if minwidth and img_width < minwidth:
logger.info("Artwork is too small. Type: %s. Width: %s. Height: %s",
img_type, img_width, img_height)
artwork = None
break
elif maxwidth and img_width > maxwidth:
# Downsize using proxy service to max width
artwork = bytes()
url = "https://images.weserv.nl"
params = {
"url": artwork_path,
"w": maxwidth
}
r = request.request_response(
url,
params=params,
timeout=20,
stream=True,
whitelist_status_code=404
)
if r:
for chunk in r.iter_content(chunk_size=1024):
artwork += chunk
r.close()
logger.info("Artwork is greater than the maximum width, downsized using proxy service")
break
resp.close()
return artwork
def getCachedArt(albumid):
from headphones import cache
c = cache.Cache()
artwork_path = c.get_artwork_from_cache(AlbumID=albumid)
if not artwork_path:
return
if artwork_path.startswith("http"):
artwork = request.request_content(artwork_path, timeout=20)
if not artwork:
logger.warn("Unable to open url: %s", artwork_path)
return
else:
with open(artwork_path, "r") as fp:
return fp.read()
| 6,590 | Python | .py | 171 | 28.187135 | 111 | 0.579656 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,321 | softchroot_test.py | rembo10_headphones/headphones/softchroot_test.py | import os
import mock
from headphones.unittestcompat import TestCase, TestArgs
from headphones.softchroot import SoftChroot
from headphones.exceptions import SoftChrootError
class SoftChrootTest(TestCase):
def test_create(self):
""" create headphones.SoftChroot """
cf = SoftChroot('/tmp/')
self.assertIsInstance(cf, SoftChroot)
self.assertTrue(cf.isEnabled())
self.assertEqual(cf.getRoot(), '/tmp/')
@TestArgs(
(None),
(''),
(' '),
)
def test_create_disabled(self, empty_path):
""" create DISABLED SoftChroot """
cf = SoftChroot(empty_path)
self.assertIsInstance(cf, SoftChroot)
self.assertFalse(cf.isEnabled())
self.assertIsNone(cf.getRoot())
def test_create_on_not_exists_dir(self):
""" create SoftChroot on non existent dir """
path = os.path.join('/tmp', 'notexist', 'asdf', '11', '12', 'np', 'itsssss')
cf = None
with self.assertRaises(SoftChrootError) as exc:
cf = SoftChroot(path)
self.assertIsNone(cf)
self.assertRegex(str(exc.exception), r'No such directory')
self.assertRegex(str(exc.exception), path)
@mock.patch('headphones.softchroot.os', wrap=os, name='OsMock')
def test_create_on_file(self, os_mock):
""" create SoftChroot on file, not a directory """
path = os.path.join('/tmp', 'notexist', 'asdf', '11', '12', 'np', 'itsssss')
os_mock.path.sep = os.path.sep
os_mock.path.isdir.side_effect = lambda x: x != path
cf = None
with self.assertRaises(SoftChrootError) as exc:
cf = SoftChroot(path)
self.assertIsNone(cf)
self.assertTrue(os_mock.path.isdir.called)
self.assertRegex(str(exc.exception), r'No such directory')
self.assertRegex(str(exc.exception), path)
@TestArgs(
(None, None),
('', ''),
(' ', ' '),
('/tmp/', '/'),
('/tmp/asdf', '/asdf'),
)
def test_apply(self, p, e):
""" apply SoftChroot """
sc = SoftChroot('/tmp/')
a = sc.apply(p)
self.assertEqual(a, e)
@TestArgs(
('/'),
('/nonch/path/asdf'),
('tmp/asdf'),
)
def test_apply_out_of_root(self, p):
""" apply SoftChroot to paths outside of the chroot """
sc = SoftChroot('/tmp/')
a = sc.apply(p)
self.assertEqual(a, '/')
@TestArgs(
(None, None),
('', ''),
(' ', ' '),
('/', '/tmp/'),
('/asdf', '/tmp/asdf'),
('/asdf/', '/tmp/asdf/'),
('localdir/adf', '/tmp/localdir/adf'),
('localdir/adf/', '/tmp/localdir/adf/'),
)
def test_revoke(self, p, e):
""" revoke SoftChroot """
sc = SoftChroot('/tmp/')
a = sc.revoke(p)
self.assertEqual(a, e)
@TestArgs(
(None),
(''),
(' '),
('/tmp'),
('/tmp/'),
('/tmp/asdf'),
('/tmp/localdir/adf'),
('localdir/adf'),
('localdir/adf/'),
)
def test_actions_on_disabled(self, p):
""" disabled SoftChroot should not change args on apply and revoke """
sc = SoftChroot(None)
a = sc.apply(p)
self.assertEqual(a, p)
r = sc.revoke(p)
self.assertEqual(r, p)
| 3,387 | Python | .py | 100 | 25.82 | 84 | 0.546846 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,322 | metadata_test.py | rembo10_headphones/headphones/metadata_test.py | # encoding=utf8
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
"""
Test module for metadata.
"""
import headphones as _h
import headphones.metadata as _md
import headphones.helpers as _hp
from headphones.metadata import MetadataDict
import datetime
from .unittestcompat import TestCase
__author__ = "Andrzej Ciarkowski <andrzej.ciarkowski@gmail.com>"
class _MockMediaFile(object):
def __init__(self, artist, album, year, track, title, label):
self.artist = artist
self.album = album
self.year = year
self.track = track
self.title = title
self.label = label
self.art = 'THIS IS ART BLOB'
@classmethod
def readable_fields(cls):
return 'artist', 'album', 'year', 'track', 'title', 'label', 'art'
class _MockDatabaseRow(object):
def __init__(self, d):
self._dict = dict(d)
def keys(self):
return iter(self._dict.keys())
def __getitem__(self, item):
return self._dict[item]
class MetadataTest(TestCase):
"""
Tests for metadata module.
"""
def test_metadata_dict_ci(self):
"""MetadataDict: case-insensitive lookup"""
expected = 'naïve'
key_var = '$TitlE'
m = MetadataDict({key_var.lower(): 'naïve'})
self.assertFalse('$track' in m)
self.assertTrue('$tITLe' in m, "cross-case lookup with 'in'")
self.assertEqual(m[key_var], expected, "cross-case lookup success")
self.assertEqual(m[key_var.lower()], expected, "same-case lookup "
"succes")
def test_metadata_dict_cs(self):
"""MetadataDice: case-preserving lookup"""
expected_var = 'NaïVe'
key_var = '$TitlE'
m = MetadataDict({
key_var.lower(): expected_var.lower(),
key_var: expected_var
})
self.assertFalse('$track' in m)
self.assertTrue('$tITLe' in m, "cross-case lookup with 'in'")
self.assertEqual(m[key_var.lower()], expected_var.lower(),
"case-preserving lookup lower")
self.assertEqual(m[key_var], expected_var,
"case-preserving lookup variable")
def test_dict_intersect(self):
"""metadata: check dictionary intersect function validity"""
d1 = {
'one': 'one',
'two': 'two',
'three': 'zonk'
}
d2 = {
'two': 'two',
'three': 'three'
}
expected = {
'two': 'two'
}
self.assertItemsEqual(
expected, _md._intersect(d1, d2), "check dictionary intersection "
"is common part indeed"
)
del d1['two']
expected = {}
self.assertItemsEqual(
expected, _md._intersect(d1, d2), "check intersection empty"
)
def test_album_metadata_builder(self):
"""AlbumMetadataBuilder: check validity"""
mb = _md.AlbumMetadataBuilder()
f1 = _MockMediaFile('artist', 'album', 2000, 1, 'track1', 'Ant-Zen')
mb.add_media_file(f1)
f2 = _MockMediaFile('artist', 'album', 2000, 2, 'track2', 'Ant-Zen')
mb.add_media_file(f2)
md = mb.build()
expected = {
_md.Vars.ARTIST_LOWER: 'artist',
_md.Vars.ALBUM_LOWER: 'album',
_md.Vars.YEAR.lower(): 2000,
'$label': 'Ant-Zen'
}
self.assertItemsEqual(
expected, md, "check AlbumMetadataBuilder validity"
)
def test_populate_from_row(self):
"""metadata: check populating metadata from database row"""
row = _MockDatabaseRow({
'ArtistName': 'artist',
'AlbumTitle': 'album',
'ReleaseDate': datetime.date(2004, 11, 28),
'Variation': 5,
'WrongTyped': complex(1, -1)
})
md = _md.MetadataDict()
_md._row_to_dict(row, md)
expected = {
'$ArtistName': 'artist',
'$AlbumTitle': 'album',
'$ReleaseDate': '2004-11-28',
'$Variation': '5'
}
self.assertItemsEqual(expected, md, "check _row_to_dict() valid")
def test_album_metadata_with_None(self):
"""metadata: check handling of None metadata values"""
row = _MockDatabaseRow({
'ArtistName': 'artist',
'AlbumTitle': 'Album',
'Type': None,
'ReleaseDate': None,
})
mb = _md.AlbumMetadataBuilder()
f1 = _MockMediaFile('artist', None, None, None, None, None)
mb.add_media_file(f1)
f2 = _MockMediaFile('artist', None, None, 2, 'track2', None)
mb.add_media_file(f2)
md = _md.album_metadata("/music/Artist - Album [2002]", row, mb.build())
# tests don't undergo normal Headphones init, SYS_ENCODING is not set
if not _h.SYS_ENCODING:
_h.SYS_ENCODING = 'UTF-8'
res = _hp.pattern_substitute(
"/music/$First/$Artist/$Artist - $Album{ [$Year]}", md, True)
self.assertEqual(res, "/music/A/artist/artist - Album",
"check correct rendering of None via pattern_substitute()")
| 5,883 | Python | .py | 151 | 29.966887 | 84 | 0.582997 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,323 | types.py | rembo10_headphones/headphones/types.py | from dataclasses import dataclass
@dataclass(frozen=True)
class Result:
title: str
size: int
url: str
provider: str
kind: str
matches: bool
| 165 | Python | .py | 9 | 14.555556 | 33 | 0.709677 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,324 | clean_pyc.sh | rembo10_headphones/contrib/clean_pyc.sh | #!/bin/bash
# Display information
echo "This script will remove *.pyc files. These files are generated by Python, but they can cause conflicts after an upgrade. It's safe to remove them, because they will be regenerated."
echo "Press enter to continue, or CTRL + C to quit."
read
# Remove the *.pyc
find "`dirname $0`/.." -type f -name "*.pyc" -exec rm -rf {} \; | 364 | Python | .py | 7 | 50.857143 | 187 | 0.72191 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,325 | sni_test.py | rembo10_headphones/contrib/sni_test.py | #!/usr/bin/env python
import os
import sys
# Ensure that we use the Headphones provided libraries.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../lib"))
import urllib.parse
def can_import(module):
"""
Return True if a given module can be imported or not.
"""
try:
__import__(module)
except ImportError:
return False
# Module can be imported
return True
def check_installation():
"""
Check if some core modules are available. Info is based on this topic:
https://github.com/rembo10/headphones/issues/2210.
"""
if can_import("requests"):
import requests
requests_version = requests.__version__
else:
requests_version = "no"
if can_import("OpenSSL"):
import OpenSSL
openssl_version = OpenSSL.__version__
else:
openssl_version = "no"
if can_import("cryptography"):
import cryptography
cryptography_version = cryptography.__version__
else:
cryptography_version = "no"
if can_import("pyasn1"):
import pyasn1
pyasn1_version = pyasn1.__version__
else:
pyasn1_version = "no"
if can_import("ndg.httpsclient"):
from ndg import httpsclient
ndg_version = httpsclient.__date__
else:
ndg_version = "no"
# Print some system information.
sys.stdout.write(
"* Checking Python version: %s.%s.%s\n" % sys.version_info[:3])
sys.stdout.write("* Operating system: %s\n" % sys.platform)
sys.stdout.write(
"* Checking if requests can be imported: %s\n" % requests_version)
sys.stdout.write(
"* Checking if pyOpenSSL is installed: %s\n" % openssl_version)
sys.stdout.write(
"* Checking if cryptography is installed: %s\n" % cryptography_version)
sys.stdout.write(
"* Checking if pyasn1 is installed: %s\n" % pyasn1_version)
sys.stdout.write(
"* Checking if ndg.httpsclient is installed: %s\n" % ndg_version)
def main():
"""
Test if the current Headphones installation can connect to SNI-enabled
servers.
"""
# Read the URL to test.
if len(sys.argv) == 1:
url = "https://sni.velox.ch/"
else:
url = sys.argv[1]
# Check if it is a HTTPS website.
parts = urllib.parse.urlparse(url)
if parts.scheme.lower() != "https":
sys.stderr.write(
"Error: provided URL does not start with https://\n")
return 1
# Gather information
check_installation()
# Do the request.
if not can_import("requests"):
sys.stderr.exit("Error: cannot continue without requests module!\n")
return 1
sys.stdout.write("* Performing request: %s\n" % url)
import requests
requests.packages.urllib3.disable_warnings()
try:
try:
response = requests.get(url)
except requests.exceptions.SSLError as e:
sys.stdout.write(
"- Server certificate seems invalid. I will disable "
"certificate check and try again. You'll see the real "
"exception if it fails again.\n")
sys.stdout.write(
"* Retrying request with certificate verification off.\n")
response = requests.get(url)
except Exception as e:
sys.stdout.write(
"- An error occured while performing the request. The "
"exception was: %s\n" % e.message)
sys.stdout.write(
"- Consult the Troubleshooting wiki (https://github.com/"
"rembo10/headphones/wiki/Troubleshooting) before you post an "
"issue!")
return 0
# Verify the response.
if response.status_code == 200:
sys.stdout.write("+ Got a valid response. All seems OK!\n")
else:
sys.stdout.write(
"- Server returned status code %s. Expected a status code 200.\n",
response.status_code)
sys.stdout.write(
"- However, I was able to communicate to the server!\n")
# E.g. `python sni_test.py https://example.org'.
if __name__ == "__main__":
sys.exit(main())
| 4,155 | Python | .py | 117 | 28.08547 | 79 | 0.620449 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,326 | apache-fcgi.conf | rembo10_headphones/lib/cherrypy/scaffold/apache-fcgi.conf | # Apache2 server conf file for using CherryPy with mod_fcgid.
# This doesn't have to be "C:/", but it has to be a directory somewhere, and
# MUST match the directory used in the FastCgiExternalServer directive, below.
DocumentRoot "C:/"
ServerName 127.0.0.1
Listen 80
LoadModule fastcgi_module modules/mod_fastcgi.dll
LoadModule rewrite_module modules/mod_rewrite.so
Options ExecCGI
SetHandler fastcgi-script
RewriteEngine On
# Send requests for any URI to our fastcgi handler.
RewriteRule ^(.*)$ /fastcgi.pyc [L]
# The FastCgiExternalServer directive defines filename as an external FastCGI application.
# If filename does not begin with a slash (/) then it is assumed to be relative to the ServerRoot.
# The filename does not have to exist in the local filesystem. URIs that Apache resolves to this
# filename will be handled by this external FastCGI application.
FastCgiExternalServer "C:/fastcgi.pyc" -host 127.0.0.1:8088
| 930 | Python | .cgi | 18 | 50.444444 | 98 | 0.805066 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,327 | apache-fcgi.conf | rembo10_headphones/lib/cherrypy/scaffold/apache-fcgi.conf | # Apache2 server conf file for using CherryPy with mod_fcgid.
# This doesn't have to be "C:/", but it has to be a directory somewhere, and
# MUST match the directory used in the FastCgiExternalServer directive, below.
DocumentRoot "C:/"
ServerName 127.0.0.1
Listen 80
LoadModule fastcgi_module modules/mod_fastcgi.dll
LoadModule rewrite_module modules/mod_rewrite.so
Options ExecCGI
SetHandler fastcgi-script
RewriteEngine On
# Send requests for any URI to our fastcgi handler.
RewriteRule ^(.*)$ /fastcgi.pyc [L]
# The FastCgiExternalServer directive defines filename as an external FastCGI application.
# If filename does not begin with a slash (/) then it is assumed to be relative to the ServerRoot.
# The filename does not have to exist in the local filesystem. URIs that Apache resolves to this
# filename will be handled by this external FastCGI application.
FastCgiExternalServer "C:/fastcgi.pyc" -host 127.0.0.1:8088
| 930 | Python | .fcgi | 18 | 50.444444 | 98 | 0.805066 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,328 | _pytest_plugin.py | rembo10_headphones/lib/cheroot/test/_pytest_plugin.py | """Local pytest plugin.
Contains hooks, which are tightly bound to the Cheroot framework
itself, useless for end-users' app testing.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
pytest_version = tuple(map(int, pytest.__version__.split('.')))
def pytest_load_initial_conftests(early_config, parser, args):
"""Drop unfilterable warning ignores."""
if pytest_version < (6, 2, 0):
return
# pytest>=6.2.0 under Python 3.8:
# Refs:
# * https://docs.pytest.org/en/stable/usage.html#unraisable
# * https://github.com/pytest-dev/pytest/issues/5299
early_config._inicache['filterwarnings'].extend((
'ignore:Exception in thread CP Server Thread-:'
'pytest.PytestUnhandledThreadExceptionWarning:_pytest.threadexception',
'ignore:Exception in thread Thread-:'
'pytest.PytestUnhandledThreadExceptionWarning:_pytest.threadexception',
'ignore:Exception ignored in. '
'<socket.socket fd=-1, family=AddressFamily.AF_INET, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
'ignore:Exception ignored in. '
'<socket.socket fd=-1, family=AddressFamily.AF_INET6, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
'ignore:Exception ignored in. '
'<socket.socket fd=-1, family=AF_INET, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
'ignore:Exception ignored in. '
'<socket.socket fd=-1, family=AF_INET6, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
'ignore:Exception ignored in. '
'<ssl.SSLSocket fd=-1, family=AddressFamily.AF_UNIX, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
))
| 2,085 | Python | .pyt | 42 | 42.928571 | 79 | 0.700737 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,329 | metacritic.py | rembo10_headphones/headphones/metacritic.py | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import json
from headphones import db, helpers, logger, request
def update(artistid, artist_name, release_groups):
""" Pretty simple and crude function to find the artist page on metacritic,
then parse that page to get critic & user scores for albums"""
# First let's modify the artist name to fit the metacritic convention.
# We could just do a search, then take the top result, but at least this will
# cut down on api calls. If it's ineffective then we'll switch to search
replacements = {" & ": " ", ".": ""}
mc_artist_name = helpers.clean_musicbrainz_name(artist_name, return_as_string=False)
mc_artist_name = mc_artist_name.replace("'", " ")
mc_artist_name = helpers.replace_all(artist_name.lower(), replacements)
mc_artist_name = mc_artist_name.replace(" ", "-")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
url = "https://www.metacritic.com/person/" + mc_artist_name + "?filter-options=music&sort_options=date&num_items=100"
res = request.request_soup(url, headers=headers, whitelist_status_code=404)
rows = None
try:
table = res.find("table", class_="credits person_credits")
rows = table.tbody.find_all('tr')
except:
logger.info("Unable to get metacritic scores for: %s" % artist_name)
myDB = db.DBConnection()
artist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [artistid]).fetchone()
score_list = []
# If we couldn't get anything from MetaCritic for whatever reason,
# let's try to load scores from the db
if not rows:
if artist['MetaCritic']:
score_list = json.loads(artist['MetaCritic'])
else:
return
# If we did get scores, let's update the db with them
else:
for row in rows:
title = row.a.string
scores = row.find_all("span")
critic_score = scores[0].string
user_score = scores[1].string
score_dict = {'title': title, 'critic_score': critic_score, 'user_score': user_score}
score_list.append(score_dict)
# Save scores to the database
controlValueDict = {"ArtistID": artistid}
newValueDict = {'MetaCritic': json.dumps(score_list)}
myDB.upsert("artists", newValueDict, controlValueDict)
for score in score_list:
title = score['title']
# Iterate through the release groups we got passed to see if we can find
# a match
for rg in release_groups:
if rg['title'].lower() == title.lower():
critic_score = score['critic_score']
user_score = score['user_score']
controlValueDict = {"AlbumID": rg['id']}
newValueDict = {'CriticScore': critic_score, 'UserScore': user_score}
myDB.upsert("albums", newValueDict, controlValueDict)
| 3,644 | Python | .tac | 71 | 44.338028 | 137 | 0.665354 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,330 | _cpwsgi_server.py | rembo10_headphones/lib/cherrypy/_cpwsgi_server.py | """WSGI server interface (see PEP 333).
This adds some CP-specific bits to the framework-agnostic cheroot
package.
"""
import sys
import cheroot.wsgi
import cheroot.server
import cherrypy
class CPWSGIHTTPRequest(cheroot.server.HTTPRequest):
"""Wrapper for cheroot.server.HTTPRequest.
This is a layer, which preserves URI parsing mode like it which was
before Cheroot v5.8.0.
"""
def __init__(self, server, conn):
"""Initialize HTTP request container instance.
Args:
server (cheroot.server.HTTPServer):
web server object receiving this request
conn (cheroot.server.HTTPConnection):
HTTP connection object for this request
"""
super(CPWSGIHTTPRequest, self).__init__(
server, conn, proxy_mode=True
)
class CPWSGIServer(cheroot.wsgi.Server):
"""Wrapper for cheroot.wsgi.Server.
cheroot has been designed to not reference CherryPy in any way, so
that it can be used in other frameworks and applications. Therefore,
we wrap it here, so we can set our own mount points from
cherrypy.tree and apply some attributes from config ->
cherrypy.server -> wsgi.Server.
"""
fmt = 'CherryPy/{cherrypy.__version__} {cheroot.wsgi.Server.version}'
version = fmt.format(**globals())
def __init__(self, server_adapter=cherrypy.server):
"""Initialize CPWSGIServer instance.
Args:
server_adapter (cherrypy._cpserver.Server): ...
"""
self.server_adapter = server_adapter
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0
)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0
)
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
self.wsgi_version = self.server_adapter.wsgi_version
super(CPWSGIServer, self).__init__(
server_adapter.bind_addr, cherrypy.tree,
self.server_adapter.thread_pool,
server_name,
max=self.server_adapter.thread_pool_max,
request_queue_size=self.server_adapter.socket_queue_size,
timeout=self.server_adapter.socket_timeout,
shutdown_timeout=self.server_adapter.shutdown_timeout,
accepted_queue_size=self.server_adapter.accepted_queue_size,
accepted_queue_timeout=self.server_adapter.accepted_queue_timeout,
peercreds_enabled=self.server_adapter.peercreds,
peercreds_resolve_enabled=self.server_adapter.peercreds_resolve,
)
self.ConnectionClass.RequestHandlerClass = CPWSGIHTTPRequest
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
if sys.version_info >= (3, 0):
ssl_module = self.server_adapter.ssl_module or 'builtin'
else:
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.stats['Enabled'] = getattr(
self.server_adapter, 'statistics', False)
def error_log(self, msg='', level=20, traceback=False):
"""Write given message to the error log."""
cherrypy.engine.log(msg, level, traceback)
| 4,190 | Python | .wsgi | 90 | 36.588889 | 78 | 0.652611 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,331 | _cpwsgi.py | rembo10_headphones/lib/cherrypy/_cpwsgi.py | """WSGI interface (see PEP 333 and 3333).
Note that WSGI environ keys and values are 'native strings'; that is,
whatever the type of "" is. For Python 2, that's a byte string; for
Python 3, it's a unicode string. But PEP 3333 says: "even if Python's
str type is actually Unicode "under the hood", the content of native
strings must still be translatable to bytes via the Latin-1 encoding!"
"""
import sys as _sys
import io
import cherrypy as _cherrypy
from cherrypy._cpcompat import ntou
from cherrypy import _cperror
from cherrypy.lib import httputil
from cherrypy.lib import is_closable_iterator
def downgrade_wsgi_ux_to_1x(environ):
"""Return a new environ dict for WSGI 1.x from the given WSGI u.x environ.
"""
env1x = {}
url_encoding = environ[ntou('wsgi.url_encoding')]
for k, v in environ.copy().items():
if k in [ntou('PATH_INFO'), ntou('SCRIPT_NAME'), ntou('QUERY_STRING')]:
v = v.encode(url_encoding)
elif isinstance(v, str):
v = v.encode('ISO-8859-1')
env1x[k.encode('ISO-8859-1')] = v
return env1x
class VirtualHost(object):
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different applications. For example::
root = Root()
RootApp = cherrypy.Application(root)
Domain2App = cherrypy.Application(root)
SecureApp = cherrypy.Application(Secure())
vhost = cherrypy._cpwsgi.VirtualHost(
RootApp,
domains={
'www.domain2.example': Domain2App,
'www.domain2.example:443': SecureApp,
},
)
cherrypy.tree.graft(vhost)
"""
default = None
"""Required.
The default WSGI application.
"""
use_x_forwarded_host = True
"""If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying."""
domains = {}
"""A dict of {host header value: application} pairs.
The incoming "Host" request header is looked up in this dict, and,
if a match is found, the corresponding WSGI application will be
called instead of the default. Note that you often need separate
entries for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
def __init__(self, default, domains=None, use_x_forwarded_host=True):
self.default = default
self.domains = domains or {}
self.use_x_forwarded_host = use_x_forwarded_host
def __call__(self, environ, start_response):
domain = environ.get('HTTP_HOST', '')
if self.use_x_forwarded_host:
domain = environ.get('HTTP_X_FORWARDED_HOST', domain)
nextapp = self.domains.get(domain)
if nextapp is None:
nextapp = self.default
return nextapp(environ, start_response)
class InternalRedirector(object):
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive=False):
self.nextapp = nextapp
self.recursive = recursive
def __call__(self, environ, start_response):
redirections = []
while True:
environ = environ.copy()
try:
return self.nextapp(environ, start_response)
except _cherrypy.InternalRedirect:
ir = _sys.exc_info()[1]
sn = environ.get('SCRIPT_NAME', '')
path = environ.get('PATH_INFO', '')
qs = environ.get('QUERY_STRING', '')
# Add the *previous* path_info + qs to redirections.
old_uri = sn + path
if qs:
old_uri += '?' + qs
redirections.append(old_uri)
if not self.recursive:
# Check to see if the new URI has been redirected to
# already
new_uri = sn + ir.path
if ir.query_string:
new_uri += '?' + ir.query_string
if new_uri in redirections:
ir.request.close()
tmpl = (
'InternalRedirector visited the same URL twice: %r'
)
raise RuntimeError(tmpl % new_uri)
# Munge the environment and try again.
environ['REQUEST_METHOD'] = 'GET'
environ['PATH_INFO'] = ir.path
environ['QUERY_STRING'] = ir.query_string
environ['wsgi.input'] = io.BytesIO()
environ['CONTENT_LENGTH'] = '0'
environ['cherrypy.previous_request'] = ir.request
class ExceptionTrapper(object):
"""WSGI middleware that traps exceptions."""
def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):
self.nextapp = nextapp
self.throws = throws
def __call__(self, environ, start_response):
return _TrappedResponse(
self.nextapp,
environ,
start_response,
self.throws
)
class _TrappedResponse(object):
response = iter([])
def __init__(self, nextapp, environ, start_response, throws):
self.nextapp = nextapp
self.environ = environ
self.start_response = start_response
self.throws = throws
self.started_response = False
self.response = self.trap(
self.nextapp, self.environ, self.start_response,
)
self.iter_response = iter(self.response)
def __iter__(self):
self.started_response = True
return self
def __next__(self):
return self.trap(next, self.iter_response)
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except StopIteration:
raise
except Exception:
tb = _cperror.format_exc()
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ''
s, h, b = _cperror.bare_error(tb)
if True:
# What fun.
s = s.decode('ISO-8859-1')
h = [
(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in h
]
if self.started_response:
# Empty our iterable (so future calls raise StopIteration)
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except Exception:
# "The application must not trap any exceptions raised by
# start_response, if it called start_response with exc_info.
# Instead, it should allow such exceptions to propagate
# back to the server or gateway."
# But we still log and call close() to clean up ourselves.
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return b''.join(b)
else:
return b
# WSGI-to-CP Adapter #
class AppResponse(object):
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
self.cpapp = cpapp
try:
self.environ = environ
self.run()
r = _cherrypy.serving.response
outstatus = r.output_status
if not isinstance(outstatus, bytes):
raise TypeError('response.output_status is not a byte string.')
outheaders = []
for k, v in r.header_list:
if not isinstance(k, bytes):
tmpl = 'response.header_list key %r is not a byte string.'
raise TypeError(tmpl % k)
if not isinstance(v, bytes):
tmpl = (
'response.header_list value %r is not a byte string.'
)
raise TypeError(tmpl % v)
outheaders.append((k, v))
if True:
# According to PEP 3333, when using Python 3, the response
# status and headers must be bytes masquerading as unicode;
# that is, they must be of type "str" but are restricted to
# code points in the "latin-1" set.
outstatus = outstatus.decode('ISO-8859-1')
outheaders = [
(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in outheaders
]
self.iter_response = iter(r.body)
self.write = start_response(outstatus, outheaders)
except BaseException:
self.close()
raise
def __iter__(self):
return self
def __next__(self):
return next(self.iter_response)
def close(self):
"""Close and de-reference the current request and response.
(Core)
"""
streaming = _cherrypy.serving.response.stream
self.cpapp.release_serving()
# We avoid the expense of examining the iterator to see if it's
# closable unless we are streaming the response, as that's the
# only situation where we are going to have an iterator which
# may not have been exhausted yet.
if streaming and is_closable_iterator(self.iter_response):
iter_close = self.iter_response.close
try:
iter_close()
except Exception:
_cherrypy.log(traceback=True, severity=40)
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host(
'',
int(env('SERVER_PORT', 80) or -1),
env('SERVER_NAME', ''),
)
remote = httputil.Host(
env('REMOTE_ADDR', ''),
int(env('REMOTE_PORT', -1) or -1),
env('REMOTE_HOST', ''),
)
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', 'HTTP/1.1')
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
# LOGON_USER is served by IIS, and is the name of the
# user after having been mapped to a local account.
# Both IIS and Apache set REMOTE_USER, when possible.
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(
self.environ.get('SCRIPT_NAME', ''),
self.environ.get('PATH_INFO', ''),
)
qs = self.environ.get('QUERY_STRING', '')
path, qs = self.recode_path_qs(path, qs) or (path, qs)
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
headerNames = {
'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
'REMOTE_HOST': 'Remote-Host',
'REMOTE_ADDR': 'Remote-Addr',
}
def recode_path_qs(self, path, qs):
# This isn't perfect; if the given PATH_INFO is in the
# wrong encoding, it may fail to match the appropriate config
# section URI. But meh.
old_enc = self.environ.get('wsgi.url_encoding', 'ISO-8859-1')
new_enc = self.cpapp.find_config(
self.environ.get('PATH_INFO', ''),
'request.uri_encoding', 'utf-8',
)
if new_enc.lower() == old_enc.lower():
return
# Even though the path and qs are unicode, the WSGI server
# is required by PEP 3333 to coerce them to ISO-8859-1
# masquerading as unicode. So we have to encode back to
# bytes and then decode again using the "correct" encoding.
try:
return (
path.encode(old_enc).decode(new_enc),
qs.encode(old_enc).decode(new_enc),
)
except (UnicodeEncodeError, UnicodeDecodeError):
# Just pass them through without transcoding and hope.
pass
def translate_headers(self, environ):
"""Translate CGI-environ header names to HTTP header names."""
for cgiName in environ:
# We assume all incoming header keys are uppercase already.
if cgiName in self.headerNames:
yield self.headerNames[cgiName], environ[cgiName]
elif cgiName[:5] == 'HTTP_':
# Hackish attempt at recovering original header names.
translatedHeader = cgiName[5:].replace('_', '-')
yield translatedHeader, environ[cgiName]
class CPWSGIApp(object):
"""A WSGI application object for a CherryPy Application."""
pipeline = [
('ExceptionTrapper', ExceptionTrapper),
('InternalRedirector', InternalRedirector),
]
"""A list of (name, wsgiapp) pairs.
Each 'wsgiapp' MUST be a constructor that takes an initial,
positional 'nextapp' argument, plus optional keyword arguments, and
returns a WSGI application (that takes environ and start_response
arguments). The 'name' can be any you choose, and will correspond to
keys in self.config.
"""
head = None
"""Rather than nest all apps in the pipeline on each call, it's only
done the first time, and the result is memoized into self.head. Set
this to None again if you change self.pipeline after calling self."""
config = {}
"""A dict whose keys match names listed in the pipeline.
Each value is a further dict which will be passed to the
corresponding named WSGI callable (from the pipeline) as keyword
arguments.
"""
response_class = AppResponse
"""The class to instantiate and return as the next app in the WSGI chain.
"""
def __init__(self, cpapp, pipeline=None):
self.cpapp = cpapp
self.pipeline = self.pipeline[:]
if pipeline:
self.pipeline.extend(pipeline)
self.config = self.config.copy()
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
You probably shouldn't call this; call self.__call__ instead, so
that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
def __call__(self, environ, start_response):
head = self.head
if head is None:
# Create and nest the WSGI apps in our pipeline (in reverse order).
# Then memoize the result in self.head.
head = self.tail
for name, callable in self.pipeline[::-1]:
conf = self.config.get(name, {})
head = callable(head, **conf)
self.head = head
return head(environ, start_response)
def namespace_handler(self, k, v):
"""Config handler for the 'wsgi' namespace."""
if k == 'pipeline':
# Note this allows multiple 'wsgi.pipeline' config entries
# (but each entry will be processed in a 'random' order).
# It should also allow developers to set default middleware
# in code (passed to self.__init__) that deployers can add to
# (but not remove) via config.
self.pipeline.extend(v)
elif k == 'response_class':
self.response_class = v
else:
name, arg = k.split('.', 1)
bucket = self.config.setdefault(name, {})
bucket[arg] = v
| 16,438 | Python | .wsgi | 379 | 32.598945 | 79 | 0.584455 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,332 | test_wsgi.py | rembo10_headphones/lib/cheroot/test/test_wsgi.py | """Test wsgi."""
from concurrent.futures.thread import ThreadPoolExecutor
from traceback import print_tb
import pytest
import portend
import requests
from requests_toolbelt.sessions import BaseUrlSession as Session
from jaraco.context import ExceptionTrap
from cheroot import wsgi
from cheroot._compat import IS_MACOS, IS_WINDOWS
IS_SLOW_ENV = IS_MACOS or IS_WINDOWS
@pytest.fixture
def simple_wsgi_server():
"""Fucking simple wsgi server fixture (duh)."""
port = portend.find_available_local_port()
def app(_environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [b'Hello world!']
host = '::'
addr = host, port
server = wsgi.Server(addr, app, timeout=600 if IS_SLOW_ENV else 20)
# pylint: disable=possibly-unused-variable
url = 'http://localhost:{port}/'.format(**locals())
# pylint: disable=possibly-unused-variable
with server._run_in_thread() as thread:
yield locals()
def test_connection_keepalive(simple_wsgi_server):
"""Test the connection keepalive works (duh)."""
session = Session(base_url=simple_wsgi_server['url'])
pooled = requests.adapters.HTTPAdapter(
pool_connections=1, pool_maxsize=1000,
)
session.mount('http://', pooled)
def do_request():
with ExceptionTrap(requests.exceptions.ConnectionError) as trap:
resp = session.get('info')
resp.raise_for_status()
print_tb(trap.tb)
return bool(trap)
with ThreadPoolExecutor(max_workers=10 if IS_SLOW_ENV else 50) as pool:
tasks = [
pool.submit(do_request)
for n in range(250 if IS_SLOW_ENV else 1000)
]
failures = sum(task.result() for task in tasks)
assert not failures
def test_gateway_start_response_called_twice(monkeypatch):
"""Verify that repeat calls of ``Gateway.start_response()`` fail."""
monkeypatch.setattr(wsgi.Gateway, 'get_environ', lambda self: {})
wsgi_gateway = wsgi.Gateway(None)
wsgi_gateway.started_response = True
err_msg = '^WSGI start_response called a second time with no exc_info.$'
with pytest.raises(RuntimeError, match=err_msg):
wsgi_gateway.start_response('200', (), None)
def test_gateway_write_needs_start_response_called_before(monkeypatch):
"""Check that calling ``Gateway.write()`` needs started response."""
monkeypatch.setattr(wsgi.Gateway, 'get_environ', lambda self: {})
wsgi_gateway = wsgi.Gateway(None)
err_msg = '^WSGI write called before start_response.$'
with pytest.raises(RuntimeError, match=err_msg):
wsgi_gateway.write(None) # The actual arg value is unimportant
| 2,758 | Python | .wsgi | 63 | 38.079365 | 76 | 0.698318 | rembo10/headphones | 3,370 | 601 | 527 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,333 | printcore.py | kliment_Printrun/printcore.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import time
import getopt
import sys
import getopt
from printrun.printcore import printcore
from printrun.utils import setup_logging
from printrun import gcoder
if __name__ == '__main__':
setup_logging(sys.stderr)
baud = 115200
loud = False
statusreport = False
from printrun.printcore import __version__ as printcore_version
usage = "Usage:\n"+\
" printcore [OPTIONS] PORT FILE\n\n"+\
"Options:\n"+\
" -b, --baud=BAUD_RATE"+\
"\t\tSet baud rate value. Default value is 115200\n"+\
" -s, --statusreport\t\tPrint progress as percentage\n"+\
" -v, --verbose\t\t\tPrint additional progress information\n"+\
" -V, --version\t\t\tPrint program's version number and exit\n"+\
" -h, --help\t\t\tPrint this help message and exit\n"
try:
opts, args = getopt.getopt(sys.argv[1:], "b:svVh",
["baud=", "statusreport", "verbose", "version", "help"])
except getopt.GetoptError as err:
print(str(err))
print(usage)
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
print(usage)
sys.exit(0)
elif o in ('-V','--version'):
print("printrun "+printcore_version)
sys.exit(0)
elif o in ('-b','--baud'):
try:
baud = int(a)
except ValueError:
print("ValueError:")
print("\tInvalid BAUD_RATE value '%s'" % a)
print("\tBAUD_RATE must be an integer\n")
# FIXME: This should output a more appropriate error message when
# not a good baud rate is passed as an argument
# i.e: when baud <= 1000 or > 225000
print(usage)
sys.exit(2)
elif o in ('-v', '--verbose'):
loud = True
elif o in ('-s', '--statusreport'):
statusreport = True
if len(args) <= 1:
print("Error: Port or gcode file were not specified.\n")
print(usage)
sys.exit(2)
elif len(args) > 1:
port = args[-2]
filename = args[-1]
print("Printing: %s on %s with baudrate %d" % (filename, port, baud))
p = printcore(port, baud)
p.loud = loud
time.sleep(2)
gcode = [i.strip() for i in open(filename)]
gcode = gcoder.LightGCode(gcode)
p.startprint(gcode)
try:
if statusreport:
p.loud = False
sys.stdout.write("Progress: 00.0%\r")
sys.stdout.flush()
while p.printing:
time.sleep(1)
if statusreport:
progress = 100 * float(p.queueindex) / len(p.mainqueue)
sys.stdout.write("Progress: %02.1f%%\r" % progress)
sys.stdout.flush()
p.disconnect()
sys.exit(0)
except:
p.disconnect()
| 3,641 | Python | .py | 96 | 29.364583 | 81 | 0.580764 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,334 | setup.py | kliment_Printrun/setup.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import ast
import glob
from setuptools import Extension, find_packages, setup
def get_install_requires():
with open('requirements.txt') as f:
return f.readlines()
def get_version():
with open('printrun/printcore.py', encoding="utf-8") as f:
for line in f.readlines():
if line.startswith("__version__"):
return ast.literal_eval(line.split("=")[1].strip())
return "unknown"
def multiglob(*globs):
paths = []
for g in globs:
paths.extend(glob.glob(g))
return paths
def get_data_files():
data_files = [
('share/pixmaps', multiglob('*.png')),
('share/applications', multiglob('*.desktop')),
('share/metainfo', multiglob('*.appdata.xml')),
('share/pronterface/images', multiglob('images/*.png',
'images/*.svg')),
]
for locale in glob.glob('locale/*/LC_MESSAGES/'):
data_files.append((f'share/{locale}', glob.glob(f'{locale}/*.mo')))
return data_files
def get_extensions():
extensions = [
Extension(name="printrun.gcoder_line",
sources=["printrun/gcoder_line.pyx"])
]
return extensions
setup(
version=get_version(),
data_files=get_data_files(),
packages=find_packages(),
scripts=["pronsole.py", "pronterface.py", "plater.py", "printcore.py"],
ext_modules=get_extensions(),
install_requires=get_install_requires(),
zip_safe=False,
)
| 2,176 | Python | .py | 58 | 32.017241 | 75 | 0.665557 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,335 | gcodeplater.py | kliment_Printrun/gcodeplater.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import wx
from printrun.gcodeplater import GcodePlater
if __name__ == '__main__':
app = wx.App(False)
main = GcodePlater(filenames = sys.argv[1:])
main.Show()
app.MainLoop()
| 895 | Python | .py | 23 | 37.043478 | 70 | 0.75576 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,336 | pronterface.py | kliment_Printrun/pronterface.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import getopt
try:
import wx # NOQA
if wx.VERSION < (4,):
raise ImportError()
except:
print("wxPython >= 4 is not installed. This program requires wxPython >=4 to run.")
raise
from printrun.pronterface import PronterApp
if __name__ == '__main__':
from printrun.printcore import __version__ as printcore_version
os.environ['GDK_BACKEND'] = 'x11'
usage = "Usage:\n"+\
" pronterface [OPTIONS] [FILE]\n\n"+\
"Options:\n"+\
" -h, --help\t\t\tPrint this help message and exit\n"+\
" -V, --version\t\t\tPrint program's version number and exit\n"+\
" -v, --verbose\t\t\tIncrease verbosity\n"+\
" -a, --autoconnect\t\tAutomatically try to connect to printer on startup\n"+\
" -c, --conf, --config=CONFIG_FILE\tLoad this file on startup instead of .pronsolerc; you may chain config files, if so settings auto-save will use the last specified file\n"+\
" -e, --execute=COMMAND\t\tExecutes command after configuration/.pronsolerc is loaded; macros/settings from these commands are not autosaved"
try:
opts, args = getopt.getopt(sys.argv[1:], "hVvac:e:", ["help", "version", "verbose", "autoconnect", "conf=", "config=", "execute="])
except getopt.GetoptError as err:
print(str(err))
print(usage)
sys.exit(2)
for o, a in opts:
if o in ('-V','--version'):
print("printrun "+printcore_version)
sys.exit(0)
elif o in ('-h', '--help'):
print(usage)
sys.exit(0)
app = PronterApp(False)
try:
app.MainLoop()
except KeyboardInterrupt:
pass
del app
| 2,428 | Python | .py | 57 | 36.649123 | 189 | 0.654384 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,337 | calibrateextruder.py | kliment_Printrun/calibrateextruder.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
# Interactive RepRap e axis calibration program
# (C) Nathan Zadoks 2011
s = 300 # Extrusion speed (mm/min)
n = 100 # Default length to extrude
m = 0 # User-entered measured extrusion length
k = 300 # Default amount of steps per mm
port = '/dev/ttyUSB0' # Default serial port to connect to printer
temp = 210 # Default extrusion temperature
tempmax = 250 # Maximum extrusion temperature
t = int(n * 60) / s # Time to wait for extrusion
try:
from printdummy import printcore
except ImportError:
from printcore import printcore
import time
import getopt
import sys
import os
def float_input(prompt=''):
f = None
while f is None:
s = input(prompt)
try:
f = float(s)
except ValueError:
sys.stderr.write("Not a valid floating-point number.\n")
sys.stderr.flush()
return f
def wait(t, m=''):
sys.stdout.write(m + '[' + (' ' * t) + ']\r' + m + '[')
sys.stdout.flush()
for i in range(t):
for s in ['|\b', '/\b', '-\b', '\\\b', '|']:
sys.stdout.write(s)
sys.stdout.flush()
time.sleep(1.0 / 5)
print()
def w(s):
sys.stdout.write(s)
sys.stdout.flush()
def heatup(p, temp, s = 0):
curtemp = gettemp(p)
p.send_now('M109 S%03d' % temp)
p.temp = 0
if not s: w("Heating extruder up..")
f = False
while curtemp <= (temp - 1):
p.send_now('M105')
time.sleep(0.5)
if not f:
time.sleep(1.5)
f = True
curtemp = gettemp(p)
if curtemp: w("\rHeating extruder up.. %3d \xb0C" % curtemp)
if s: print()
else: print("\nReady.")
def gettemp(p):
try: p.logl
except: setattr(p, 'logl', 0)
try: p.temp
except: setattr(p, 'temp', 0)
for n in range(p.logl, len(p.log)):
line = p.log[n]
if 'T:' in line:
try:
setattr(p, 'temp', int(line.split('T:')[1].split()[0]))
except: print(line)
p.logl = len(p.log)
return p.temp
if not os.path.exists(port):
port = 0
# Parse options
help = """
%s [ -l DISTANCE ] [ -s STEPS ] [ -t TEMP ] [ -p PORT ]
-l --length Length of filament to extrude for each calibration step (default: %d mm)
-s --steps Initial amount of steps to use (default: %d steps)
-t --temp Extrusion temperature in degrees Celsius (default: %d \xb0C, max %d \xb0C)
-p --port Serial port the printer is connected to (default: %s)
-h --help This cruft.
"""[1:-1].encode('utf-8') % (sys.argv[0], n, k, temp, tempmax, port if port else 'auto')
try:
opts, args = getopt.getopt(sys.argv[1:], "hl:s:t:p:", ["help", "length=", "steps=", "temp=", "port="])
except getopt.GetoptError as err:
print(str(err))
print(help)
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
print(help)
sys.exit()
elif o in ('-l', '--length'):
n = float(a)
elif o in ('-s', '--steps'):
k = int(a)
elif o in ('-t', '--temp'):
temp = int(a)
if temp >= tempmax:
print(('%d \xb0C? Are you insane?'.encode('utf-8') % temp) + (" That's over nine thousand!" if temp > 9000 else ''))
sys.exit(255)
elif o in ('-p', '--port'):
port = a
# Show initial parameters
print("Initial parameters")
print("Steps per mm: %3d steps" % k)
print("Length extruded: %3d mm" % n)
print()
print("Serial port: %s" % (port if port else 'auto'))
p = None
try:
# Connect to printer
w("Connecting to printer..")
try:
p = printcore(port, 115200)
except:
print('Error.')
raise
while not p.online:
time.sleep(1)
w('.')
print(" connected.")
heatup(p, temp)
# Calibration loop
while n != m:
heatup(p, temp, True)
p.send_now("G92 E0") # Reset e axis
p.send_now("G1 E%d F%d" % (n, s)) # Extrude length of filament
wait(t, 'Extruding.. ')
m = float_input("How many millimeters of filament were extruded? ")
if m == 0: continue
if n != m:
k = (n / m) * k
p.send_now("M92 E%d" % int(round(k))) # Set new step count
print("Steps per mm: %3d steps" % k) # Tell user
print('Calibration completed.') # Yay!
except KeyboardInterrupt:
pass
finally:
if p: p.disconnect()
| 5,132 | Python | .py | 153 | 28.078431 | 128 | 0.587429 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,338 | pronsole.py | kliment_Printrun/pronsole.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import traceback
import logging
from printrun.pronsole import pronsole
import getopt
if __name__ == "__main__":
from printrun.printcore import __version__ as printcore_version
usage = "Usage:\n"+\
" pronsole [OPTIONS] [FILE]\n\n"+\
"Options:\n"+\
" -h, --help\t\t\tPrint this help message and exit\n"+\
" -V, --version\t\t\tPrint program's version number and exit\n"+\
" -v, --verbose\t\t\tIncrease verbosity\n"+\
" -c, --conf, --config=CONFIG_FILE\tLoad this file on startup instead of .pronsolerc; you may chain config files, if so settings auto-save will use the last specified file\n"+\
" -e, --execute=COMMAND\t\tExecutes command after configuration/.pronsolerc is loaded; macros/settings from these commands are not autosaved"
try:
opts, args = getopt.getopt(sys.argv[1:], "hVvc:e:", ["help", "version", "verbose", "conf=", "config=", "execute="])
except getopt.GetoptError as err:
print(str(err))
print(usage)
sys.exit(2)
for o, a in opts:
if o in ('-V','--version'):
print("printrun "+printcore_version)
sys.exit(0)
elif o in ('-h', '--help'):
print(usage)
sys.exit(0)
interp = pronsole()
interp.parse_cmdline(sys.argv[1:])
try:
interp.cmdloop()
except SystemExit:
interp.p.disconnect()
except:
logging.error(_("Caught an exception, exiting:")
+ "\n" + traceback.format_exc())
interp.p.disconnect()
| 2,295 | Python | .py | 53 | 37 | 189 | 0.649664 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,339 | plater.py | kliment_Printrun/plater.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import wx
import getopt
from printrun.stlplater import StlPlater
if __name__ == '__main__':
from printrun.printcore import __version__ as printcore_version
os.environ['GDK_BACKEND'] = 'x11'
usage = "Usage:\n"+\
" plater [OPTION]\n"+\
" plater FILES\n\n"+\
"Options:\n"+\
" -V, --version\t\t\tPrint program's version number and exit\n"+\
" -h, --help\t\t\tPrint this help message and exit\n" \
" --no-gl\t\t\tUse 2D implementation, that seems unusable"
try:
opts, args = getopt.getopt(sys.argv[1:], "hV", ["help", "version", 'no-gl'])
except getopt.GetoptError as err:
print(str(err))
print(usage)
sys.exit(2)
for o, a in opts:
if o in ('-V','--version'):
print("printrun "+printcore_version)
sys.exit(0)
elif o in ('-h', '--help'):
print(usage)
sys.exit(0)
app = wx.App(False)
main = StlPlater(filenames = sys.argv[1:])
main.Show()
app.MainLoop()
| 1,780 | Python | .py | 47 | 32.255319 | 84 | 0.643271 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,340 | mock-printer.py | kliment_Printrun/testtools/mock-printer.py | #!/usr/bin/env python3
# Test network communication without networked 3d printer
# Usage:
# bash1$ ./testtools/mock-printer.py
# bash2$ ./pronterface.py
# Enter localhost:8080 in Port, press Connect, Load file, Print
import socket
with socket.socket() as s:
s.bind(('127.0.0.1', 8080))
s.listen(1)
c, addr = s.accept()
print(c)
temp = 0
try:
c.sendall(b'start\n')
while True:
msg = c.recv(1024)
if not msg:
break
print(msg)
if msg == b'M105\n':
# c.sendall(('ok T:%d\n'%(20 + temp)).encode('ascii'))
# test multiple extruders, see #1234
c.sendall('ok T0:24.06 /34.00 B:23.45 /0.00 T1:44.28 /54 @:0 B@:0 @0:0 @1:0\n'.encode('ascii'))
temp = (temp + 1)%30
else:
c.sendall(b'ok\n')
finally:
c.close()
| 906 | Python | .py | 29 | 23.068966 | 111 | 0.530217 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,341 | lengthy.py | kliment_Printrun/testtools/lengthy.py | #!/usr/bin/python3
#generate many g1 to test serial buffer overflow in run_gcode_script
#run like this:
#in pronsole> run_gcode_script ./testtools/lengthy.py
print('G28 X')
print('G1 X0')
for x in range(100):
print()
print(' ')
print('G1 X', x)
| 258 | Python | .py | 10 | 23.6 | 68 | 0.697581 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,342 | gcodeviewer.py | kliment_Printrun/testtools/gcodeviewer.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import logging
logging.basicConfig(level=logging.INFO)
import wx
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from printrun.gcview import GcodeViewFrame
from printrun import gcoder
app = wx.App(redirect = False)
build_dimensions = [200, 200, 100, -100, -100, 0]
build_dimensions = [200, 200, 100, 0, 0, 0]
frame = GcodeViewFrame(None, wx.ID_ANY, 'Gcode view, shift to move view, mousewheel to set layer', size = (800, 800), build_dimensions = build_dimensions)
gcode = gcoder.GCode(open(sys.argv[1]))
print("Gcode loaded")
frame.addfile(gcode)
first_move = None
for i in range(len(gcode.lines)):
if gcode.lines[i].is_move:
first_move = gcode.lines[i]
break
last_move = None
for i in range(len(gcode.lines) - 1, -1, -1):
if gcode.lines[i].is_move:
last_move = gcode.lines[i]
break
nsteps = 20
steptime = 50
lines = [first_move] \
+ [gcode.lines[int(float(i) * (len(gcode.lines) - 1) / nsteps)]
for i in range(1, nsteps)] + [last_move]
current_line = 0
def setLine():
global current_line
frame.set_current_gline(lines[current_line])
current_line = (current_line + 1) % len(lines)
timer.Start()
timer = wx.CallLater(steptime, setLine)
timer.Start()
frame.Show(True)
app.MainLoop()
app.Destroy()
| 2,014 | Python | .py | 56 | 33.607143 | 154 | 0.72704 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,343 | test_device.py | kliment_Printrun/tests/test_device.py | """Test suite for `printrun/device.py`"""
# How to run the tests (requires Python 3.11+):
# python3 -m unittest discover tests
# Standard libraries:
import socket
import unittest
from unittest import mock
# Third-party libraries:
import serial
# Custom libraries:
# pylint: disable-next=no-name-in-module
from printrun import device
def mock_sttyhup(cls):
"""Fake stty control"""
# Needed to avoid error:
# "stty: /mocked/port: No such file or directory"
cls.enterClassContext(
mock.patch("printrun.device.Device._disable_ttyhup"))
def patch_serial(function, **kwargs):
"""Patch a function of serial.Serial"""
return mock.patch(f"serial.Serial.{function}", **kwargs)
def patch_serial_is_open():
"""Patch the serial.Serial class and make `is_open` always True"""
class_mock = mock.create_autospec(serial.Serial)
instance_mock = class_mock.return_value
instance_mock.is_open = True
return mock.patch("serial.Serial", class_mock)
def patch_socket(function, **kwargs):
"""Patch a function of socket.socket"""
return mock.patch(f"socket.socket.{function}", **kwargs)
def patch_socketio(function, **kwargs):
"""Patch a function of socket.SocketIO"""
return mock.patch(f"socket.SocketIO.{function}", **kwargs)
def setup_serial(test):
"""Set up a Device through a mocked serial connection"""
dev = device.Device()
test.addCleanup(dev.disconnect)
mocked_open = test.enterContext(patch_serial("open"))
dev.connect("/mocked/port")
return dev, mocked_open
def setup_socket(test):
"""Set up a Device through a mocked socket connection"""
dev = device.Device()
test.addCleanup(dev.disconnect)
mocked_socket = test.enterContext(patch_socket("connect"))
dev.connect("127.0.0.1:80")
return dev, mocked_socket
class TestInit(unittest.TestCase):
"""Test Device constructor"""
def test_type_serial(self):
"""Check detecting serial devices"""
dev = device.Device("/any/port")
with self.subTest("`serial` type is set"):
# pylint: disable-next=protected-access
self.assertEqual(dev._type, "serial")
with self.subTest("No flow control is set"):
self.assertFalse(dev.has_flow_control)
def test_type_socket(self):
"""Check detecting socket devices"""
dev = device.Device("127.0.0.1:80")
with self.subTest("Check `socket` type is set"):
# pylint: disable-next=protected-access
self.assertEqual(dev._type, "socket")
with self.subTest("Check flow control is set"):
self.assertTrue(dev.has_flow_control)
def test_default_type(self):
"""`serial` type is assigned by default when type unknown"""
# If URL cannot be identified, a serial port is assumed
dev = device.Device("/any/port:")
# pylint: disable-next=protected-access
self.assertEqual(dev._type, "serial")
class TestDisconnect(unittest.TestCase):
"""Test disconnect functionality"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def test_silent_on_no_device(self):
"""No error is raised when disconnecting a device not connected"""
dev = device.Device()
dev.disconnect()
def test_socket_erorr(self):
"""DeviceError is raised if socket fails at disconnect"""
dev, _ = setup_socket(self)
with mock.patch('socket.socket.close', side_effect=socket.error):
with self.assertRaises(device.DeviceError):
dev.disconnect()
def test_serial_erorr(self):
"""DeviceError is raised if serial fails at disconnect"""
dev, _ = setup_serial(self)
with patch_serial("close", side_effect=serial.SerialException):
with self.assertRaises(device.DeviceError):
dev.disconnect()
class TestConnect(unittest.TestCase):
"""Test connect functionality"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def setUp(self):
self.dev = device.Device()
self.addCleanup(self.dev.disconnect)
def _fake_serial_connect(self, port=None, baudrate=None, **kargs):
# Mock a serial connection with optional keyword arguments
with patch_serial("open", **kargs) as mocked_open:
self.dev.connect(port=port, baudrate=baudrate)
mocked_open.assert_called()
def _fake_socket_connect(self, port=None, **kargs):
# Mock a socket connection with optional keyword arguments
with patch_socket("connect", **kargs) as mocked_connect:
self.dev.connect(port)
mocked_connect.assert_called_once()
def test_error_on_no_device(self):
"""DeviceError is raised when connecting to no port/URL"""
with self.assertRaises(device.DeviceError):
self.dev.connect()
self.assertFalse(self.dev.is_connected)
def test_erorr_on_bad_port(self):
"""DeviceError is raised when port does not exist"""
# Serial raises a FileNotFoundError
with self.assertRaises(device.DeviceError):
self.dev.connect("/non/existent/port")
self.assertFalse(self.dev.is_connected)
def test_call_socket_connect(self):
"""socket.socket.connect is called and `is_connected` is set"""
self._fake_socket_connect("127.0.0.1:80")
self.assertTrue(self.dev.is_connected)
def test_call_serial_open(self):
"""serial.Serial.open is called and `is_connected` is set"""
with patch_serial_is_open() as mocked_serial:
self.dev.connect("/mocked/port")
mocked_serial.return_value.open.assert_called_once()
self.assertTrue(self.dev.is_connected)
def test_set_baudrate(self):
"""Successful connection sets `port` and `baudrate`"""
self._fake_serial_connect("/mocked/port", 250000)
self.assertTrue(self.dev.port == "/mocked/port")
self.assertTrue(self.dev.baudrate == 250000)
def test_set_dtr(self):
"""Test no error raised on setting DTR on connect"""
self._fake_serial_connect("/mocked/port", dtr=True)
def test_connect_already_connected(self):
"""Test connecting an already connected device"""
self._fake_serial_connect("/mocked/port")
self._fake_serial_connect("/mocked/port2")
self.assertTrue(self.dev.port == "/mocked/port2")
def test_connect_serial_to_socket(self):
"""Test connecting from a port to a socket"""
# pylint: disable=protected-access
self._fake_serial_connect("/mocked/port")
self.assertEqual(self.dev._type, "serial")
self._fake_socket_connect("127.0.0.1:80")
self.assertEqual(self.dev._type, "socket")
def test_socket_error(self):
"""DeviceError is raised on socket.error on connect"""
with self.assertRaises(device.DeviceError):
self._fake_socket_connect("127.0.0.1:80", side_effect=socket.error)
self.assertFalse(self.dev.is_connected)
class TestReset(unittest.TestCase):
"""Test reset functionality"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def setUp(self):
self.serial_dev, _ = setup_serial(self)
self.socket_dev, _ = setup_socket(self)
def test_reset_serial(self):
# TODO: this simply tests that no errors are raised
self.serial_dev.reset()
def test_reset_socket(self):
# TODO: this simply tests that no errors are raised
self.socket_dev.reset()
def test_reset_disconnected(self):
# TODO: this simply tests that no errors are raised
dev = device.Device("/a/port")
dev.reset()
class TestReadSerial(unittest.TestCase):
"""Test readline functionality on serial connections"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def setUp(self):
self.dev, _ = setup_serial(self)
def _fake_read(self, **kargs):
# Allows mocking a serial read operation for different return values
with patch_serial("readline", **kargs) as mocked_read:
data = self.dev.readline()
mocked_read.assert_called_once()
return data
def test_calls_readline(self):
"""serial.Serial.readline is called"""
self._fake_read()
def test_read_data(self):
"""Data returned by serial.Serial.readline is passed as is"""
data = self._fake_read(return_value=b"data\n")
self.assertEqual(data, b"data\n")
def test_read_serial_exception(self):
"""DeviceError is raised on serial error during reading"""
with self.assertRaises(device.DeviceError):
self._fake_read(side_effect=serial.SerialException)
def test_read_empty(self):
"""READ_EMPTY is returned when there's nothing to read"""
# Serial.readline() returns b'' (aka `READ_EMPTY`) on timeout
self.assertEqual(self._fake_read(return_value=b''), device.READ_EMPTY)
def test_read_disconnected(self):
"""DeviceError is raised when reading from a disconnected device"""
dev = device.Device("/a/port")
with self.assertRaises(device.DeviceError):
dev.readline()
class TestReadSocket(unittest.TestCase):
"""Test readline functionality on socket connections"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def setUp(self):
self.dev, _ = setup_socket(self)
def _fake_read(self, **kargs):
with patch_socketio("read", **kargs) as mocked_read:
data = self.dev.readline()
mocked_read.assert_called()
return data
def test_read_empty(self):
"""READ_EMPTY is returned when there's nothing to read"""
# If the socket is non-blocking and no bytes are available,
# None is returned by readinto()
# Device remains connected in this scenario
data = self._fake_read(return_value=None)
self.assertEqual(data, device.READ_EMPTY)
self.assertTrue(self.dev.is_connected)
def test_read_eof(self):
"""READ_EOF is returned when connection is terminated"""
# A 0 return value from readinto() indicates that the
# connection was shutdown at the other end
# Device is no longer connected in this scenario
data = self._fake_read(return_value=0)
self.assertEqual(data, device.READ_EOF)
self.assertFalse(self.dev.is_connected)
def test_read_no_endpoint(self):
"""DeviceError is raised when connection is lost"""
# OSError: [Errno 107] Transport endpoint is not connected
# Thrown when trying to read but connection was lost
with self.assertRaises(device.DeviceError):
self.dev.readline()
self.assertFalse(self.dev.is_connected)
def test_read_data(self):
"""Data returned by socket.socket.read is passed as is"""
with mock.patch('socket.SocketIO.read', return_value=b"data\n"):
self.assertEqual(self.dev.readline(), b"data\n")
class TestWriteSerial(unittest.TestCase):
"""Test write functionality on serial connections"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def _setup_serial_write(self, side_effect=None):
# Set up a mocked serial with optional side effects for the
# serial.Serial.write function
class_mock = mock.create_autospec(serial.Serial)
instance_mock = class_mock.return_value
instance_mock.is_open = True
if side_effect is not None:
instance_mock.write.side_effect = side_effect
mocked_serial = self.enterContext(mock.patch("serial.Serial",
class_mock))
dev = device.Device()
self.addCleanup(dev.disconnect)
dev.connect("/mocked/port")
return dev, mocked_serial
def test_write_no_device(self):
"""DeviceError is raised when device is not connected"""
# This test serves for socket connections as well, this functionality
# is independent of the underlying connection type
empty_dev = device.Device()
with self.assertRaises(device.DeviceError):
empty_dev.write("test")
def test_calls_serial_write(self):
"""serial.Serial.write is called"""
dev, mocked_serial = self._setup_serial_write()
dev.write("test")
mocked_serial.return_value.write.assert_called_once_with("test")
def test_write_serial_error(self):
"""DeviceError is raised on serial error during writing"""
dev, _ = self._setup_serial_write(serial.SerialException)
with self.assertRaises(device.DeviceError):
dev.write("test")
class TestWriteSocket(unittest.TestCase):
"""Test write functionality on socket connections"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def setUp(self):
self.dev, _ = setup_socket(self)
def _fake_write(self, data, **kwargs):
# Perform a fake write operation. `kwargs` allows to set different
# return values for the write operation
with patch_socketio("write", **kwargs) as mocked_write:
self.dev.write(data)
mocked_write.assert_called_once_with(data)
def test_calls_socket_write(self):
"""socket.socket.write is called"""
self._fake_write(b"test")
def test_write_errors(self):
"""DeviceError is raised on socket errors during writing"""
# On errors during writing, the function is expected to raise a
# DeviceError and terminate the connection
self.assertTrue(self.dev.is_connected)
for e in [OSError, RuntimeError]:
with self.subTest(error=e):
with self.assertRaises(device.DeviceError):
self._fake_write(b"test", side_effect=e)
self.assertFalse(self.dev.is_connected)
def test_not_bytes(self):
"""TypeError is raised if argument is not of bytes type"""
with self.assertRaises(TypeError):
self.dev.write("string")
def test_flush_timeout(self):
"""Silent on socket timeout during flushing"""
# Current behavior is to silently ignore socket.timeout
with mock.patch('socket.SocketIO.flush', side_effect=socket.timeout):
self._fake_write(b"test")
| 14,464 | Python | .py | 317 | 37.466877 | 79 | 0.660026 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,344 | test_printcore.py | kliment_Printrun/tests/test_printcore.py | """Test suite for `printrun/printcore.py`.
It also serves as a test for printrun.eventhandler.PrinterEventHandler.
"""
# How to run the tests (requires Python 3.11+):
# python3 -m unittest discover tests
# Standard libraries:
import random
import socket
import time
import unittest
from unittest import mock
# 3rd Party/Custom libraries:
import serial
from printrun import eventhandler
from printrun import gcoder
from printrun import printcore
DEFAULT_ANSWER = 'ok:\n'
CNC_PROCESS_TIME = 0.02 # in s
def slow_printer(*args):
"""Simulate a slow processing printer"""
time.sleep(CNC_PROCESS_TIME*random.randint(0, 90)/100)
return DEFAULT_ANSWER.encode()
def wait_printer_cycles(cycles):
"""Wait for a slow printer to process"""
time.sleep(CNC_PROCESS_TIME*cycles)
def mock_sttyhup(cls):
"""Fake stty control"""
# Needed to avoid error:
# "stty: /mocked/port: No such file or directory"
cls.enterClassContext(
mock.patch("printrun.device.Device._disable_ttyhup"))
def mock_serial(test, read_function=slow_printer):
"""Fake Serial device with slow response and always open"""
class_mock = mock.create_autospec(serial.Serial)
instance_mock = class_mock.return_value
instance_mock.readline.side_effect = read_function
instance_mock.is_open = True
return test.enterContext(mock.patch("serial.Serial", class_mock))
def mock_socket(test, read_function=slow_printer):
"""Fake socket with slow response"""
class_mock = mock.create_autospec(socket.socket)
instance_mock = class_mock.return_value
socket_file = instance_mock.makefile.return_value
socket_file.read.side_effect = read_function
return test.enterContext(mock.patch("socket.socket", class_mock))
def add_mocked_handler(core):
"""Add a fake PrinterEventHandler to a printcore instance"""
mocked_handler = mock.create_autospec(
spec=eventhandler.PrinterEventHandler)
core.addEventHandler(mocked_handler)
return mocked_handler
def mock_callback(test, core, callback, **kwargs):
"""Fake a callback function of a printcore instance"""
# Parameters
# test: unittest.TestCase instance
# core: printcore.printcore instance
# callback: string with callback name, e.g. "onlinecb"
return test.enterContext(mock.patch.object(core, callback, **kwargs))
def fake_preprintsend(gline, *args):
"""Dummy function that returns its first argument"""
return gline
def assert_equal_glines(test, gline_a, gline_b):
"""Check if two gcoder.Lines are equal"""
# Had to work around the two gline objects being "different". gcoder.Line
# objects don't have comparison built in and are seen as different even
# though they contain the same information
test.assertTrue(gline_a.raw == gline_b.raw)
def subtest_mock(test, msg, mocks, check, *args):
"""Perform same test on a list of mocked objects"""
for item in mocks:
with test.subTest(msg, mock=item):
getattr(item, check)(*args)
def setup_serial_core(test):
"""Set up printcore and connect it to a with a fake Serial"""
core = printcore.printcore()
test.addCleanup(core.disconnect)
mocked_serial = mock_serial(test)
core.connect("/mocked/port", 1000)
wait_printer_cycles(2)
return core, mocked_serial
def setup_socket_core(test):
"""Set up printcore and connect it to a with a fake socket"""
core = printcore.printcore()
test.addCleanup(core.disconnect)
mocked_socket = mock_socket(test)
core.connect("1.2.3.4:56", 1000)
wait_printer_cycles(2)
return core, mocked_socket
def setup_test_command():
"""Set up a command to test"""
command = "Random Command"
parsed_command = f"{command}\n".encode('ascii')
parsed_gline = gcoder.GCode().append(command, store=False)
return {'raw': command,
'parsed': parsed_command,
'gline': parsed_gline}
def checksum_command(command, lineno=0):
"""Add line number and checksum to a command"""
core = printcore.printcore()
prefixed_command = f"N{lineno} {command}"
# pylint: disable-next=protected-access
checksum = str(core._checksum(prefixed_command))
checksummed_command = f"{prefixed_command}*{checksum}\n"
return checksummed_command.encode('ascii')
class TestInit(unittest.TestCase):
"""Functional checks for printcore's constructor"""
def test_handler_on_init(self):
"""Test that the `on_init` event is triggered"""
mocked_handler = mock.Mock(spec=eventhandler.PrinterEventHandler)
with mock.patch('printrun.printcore.PRINTCORE_HANDLER',
[mocked_handler]):
printcore.printcore()
mocked_handler.on_init.assert_called_once()
class TestConnect(unittest.TestCase):
"""Functional checks for the connect method"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def setUp(self):
self.core = printcore.printcore()
def test_connection_events(self):
"""Test events on a successful connection"""
mock_serial(self)
mocked_handler = add_mocked_handler(self.core)
online_cb = mock_callback(self, self.core, "onlinecb")
self.core.connect("/mocked/port", 1000)
wait_printer_cycles(2)
with self.subTest("Check `online` is set"):
self.assertTrue(self.core.online)
with self.subTest("Check read and send threads started"):
self.assertIsNotNone(self.core.read_thread)
self.assertTrue(self.core.read_thread.is_alive)
self.assertIsNotNone(self.core.send_thread)
self.assertTrue(self.core.send_thread.is_alive)
with self.subTest("Check the `on_connect` event is triggered"):
mocked_handler.on_connect.assert_called_once()
subtest_mock(self, "Check triggering the `online` event/callback",
(mocked_handler.on_online, online_cb),
"assert_called_once")
def test_calls_socket_connect(self):
"""Test that socket.socket.connect() is called"""
mocked_socket = mock_socket(self)
url = ("192.168.1.200", 1234)
self.core.connect(f"{url[0]}:{url[1]}", 1000)
wait_printer_cycles(2)
with self.subTest("Check the socket is opened"):
mocked_socket.return_value.connect.assert_called_once_with(url)
with self.subTest("Check underlying file-like resource is opened"):
mocked_socket.return_value.makefile.assert_called_once()
def test_calls_serial_open(self):
"""Test that serial.Serial.open() is called"""
mocked_serial = mock_serial(self)
self.core.connect("/mocked/port", 1000, 1)
wait_printer_cycles(2)
mocked_serial.return_value.open.assert_called_once()
def test_bad_ports(self):
"""Test that an error is logged if connection fails"""
for port in ("/mocked/port", "1.2.3.4:56"):
with self.subTest(port=port):
with self.assertLogs(level="ERROR"):
self.core.connect(port, 1000)
def test_ioerror(self):
"""Test that an error is logged if connection fails with IOError"""
mocked_serial = mock_serial(self)
mocked_serial.return_value.open.side_effect = IOError
with self.assertLogs(level="ERROR"):
self.core.connect("/mocked/port", 1000)
def test_no_port(self):
"""Silent on attempting to connect to nothing"""
self.core.connect()
def test_already_connected(self):
"""Test that a previous connection is disconnected"""
mock_serial(self)
first_port = "/first/port"
second_port = "/second/port"
self.core.connect(first_port, 1000)
wait_printer_cycles(2)
self.assertEqual(self.core.printer.port, first_port)
self.core.connect(second_port, 1000)
wait_printer_cycles(4)
self.assertEqual(self.core.printer.port, second_port)
def test_handler_on_error(self):
"""Test that the `error` event and callback are triggered"""
err_msg = "Not connected to printer."
mocked_cb = mock_callback(self, self.core, "errorcb")
mocked_handler = add_mocked_handler(self.core)
self.core.send_now("Random Command")
subtest_mock(self, "", (mocked_handler.on_error, mocked_cb),
"assert_called_once_with", err_msg)
def tearDown(self):
self.core.disconnect()
self.core.eventhandler = [] # empty eventhandler
class TestDisconnect(unittest.TestCase):
"""Functional checks for the disconnect method"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def setUp(self):
self.core, self.mocked_serial = setup_serial_core(self)
def test_calls_serial_close(self):
"""Test that serial.Serial.close() is called"""
self.core.disconnect()
self.mocked_serial.return_value.close.assert_called()
def test_calls_socket_close(self):
"""Test that socket.socket.close() is called"""
core, mocked_socket = setup_socket_core(self)
core.disconnect()
wait_printer_cycles(2)
with self.subTest("Check the socket is closed"):
mocked_socket.return_value.close.assert_called_once()
with self.subTest("Check underlying file-like resource is closed"):
socket_file = mocked_socket.return_value.makefile.return_value
socket_file.close.assert_called_once()
def test_disconnection_events(self):
"""Test events on a successful disconnection"""
mocked_handler = add_mocked_handler(self.core)
with self.subTest("Check `online` was set before test"):
self.assertTrue(self.core.online)
self.core.disconnect()
with self.subTest("Check `online` is unset"):
self.assertFalse(self.core.online)
with self.subTest("Check the `on_disconnect` event is triggered"):
mocked_handler.on_disconnect.assert_called_once()
with self.subTest("Check read and send threads were removed"):
self.assertIsNone(self.core.read_thread)
self.assertIsNone(self.core.send_thread)
def test_disconnect_error(self):
"""Test that an error is logged if disconnection fails"""
with (
mock.patch.object(self.mocked_serial.return_value, "close",
side_effect=serial.SerialException),
self.assertLogs(level="ERROR")
):
self.core.disconnect()
# Check that `online` is unset even after an error
self.assertFalse(self.core.online)
class TestSends(unittest.TestCase):
"""Functional checks for send and send_now methods"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
cls.command = setup_test_command()['raw']
def setUp(self):
self.core, self.mocked_serial = setup_serial_core(self)
def test_send_now_priqueue(self):
"""Test that a command is put into `priqueue`"""
with mock.patch.object(self.core, "priqueue") as mocked_queue:
self.core.send_now(self.command)
mocked_queue.put_nowait.assert_called_once_with(self.command)
def test_send_priqueue(self):
"""Test that a command is put into `priqueue` when not printing"""
with mock.patch.object(self.core, "priqueue") as mocked_queue:
self.assertFalse(self.core.printing)
self.core.send(self.command)
mocked_queue.put_nowait.assert_called_once_with(self.command)
def test_send_mainqueue(self):
"""Test that a command is put into `mainqueue` when printing"""
with (
mock.patch.object(self.core, "printing", True),
mock.patch.object(self.core, "mainqueue") as mocked_queue
):
self.assertTrue(self.core.printing)
self.core.send(self.command)
mocked_queue.append.assert_called_once_with(self.command)
def test_send_now_not_connected(self):
"""Test that an error is logged when attempting to send a
command but printer is not online"""
core = printcore.printcore()
with self.assertLogs(level="ERROR"):
core.send_now("Random Command")
def test_send_not_connected(self):
"""Test that an error is logged when attempting to send a
command but printer is not online"""
core = printcore.printcore()
with self.assertLogs(level="ERROR"):
core.send("Random Command")
class TestPrint(unittest.TestCase):
"""Functional checks for startprint, cancelprint, pause and resume
methods"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
# print_code parsed_print_code
# ---------- -----------------
# "G0 X0" b'N0 G0 X0*97\n'
# "G0 X1" b'N1 G0 X1*97\n'
# "G0 X2" b'N2 G0 X2*97\n'
# "G0 X3" b'N3 G0 X3*97\n'
# ... ...
cls.print_layer_count = 10
tmp = []
for i in range(cls.print_layer_count):
tmp = tmp + [
f"G1 Z{i}", # move to layer (i)
"G0 X1",
f"G1 X100 E{i+1}",
]
cls.print_line_count = len(tmp)
cls.print_code = gcoder.GCode(tmp)
cls.parsed_print_code = []
for i in range(cls.print_line_count):
command = cls.print_code.lines[i].raw
cls.parsed_print_code.append(checksum_command(command, i))
def setUp(self):
self.core, self.mocked_serial = setup_serial_core(self)
self.mocked_handler = add_mocked_handler(self.core)
self.start_cb = mock_callback(self, self.core, "startcb")
self.end_cb = mock_callback(self, self.core, "endcb")
def check_unfinished_print(self):
"""Check the first command was sent but the last wasn't"""
first_call = mock.call(self.parsed_print_code[0])
last_call = mock.call(
self.parsed_print_code[self.print_line_count-1])
write_calls = self.mocked_serial.return_value.write.mock_calls
self.assertIn(first_call, write_calls)
self.assertNotIn(last_call, write_calls)
def check_finished_print(self):
"""Check that all commands were sent to the printer"""
for line in self.parsed_print_code:
self.mocked_serial.return_value.write.assert_any_call(line)
def test_start_finish(self):
"""Test events from print start to finish"""
layerchange_cb = mock_callback(self, self.core, "layerchangecb")
preprintsend_cb = mock_callback(self, self.core, "preprintsendcb",
side_effect=fake_preprintsend)
printsend_cb = mock_callback(self, self.core, "printsendcb")
with self.subTest("Check `printing` was unset before test"):
self.assertFalse(self.core.printing)
with self.subTest("Check startprint returns True on success"):
self.assertTrue(self.core.startprint(self.print_code))
# Wait for the print to commence
wait_printer_cycles(4)
with self.subTest("Check `printing` is set"):
self.assertTrue(self.core.printing)
subtest_mock(self, "Check triggering `start` event/callback",
(self.start_cb, self.mocked_handler.on_start),
"assert_called_once_with", False)
# Let the print finish
wait_printer_cycles(self.print_line_count*1.5)
with self.subTest("Check that serial.Serial.write() was called"):
self.check_finished_print()
subtest_mock(self, "Check triggering `end` event/callback",
(self.end_cb, self.mocked_handler.on_end),
"assert_called_once")
for item in (self.mocked_handler.on_layerchange, layerchange_cb):
with self.subTest("Check triggering `layerchange` event/callback",
mock=item):
for i in range(1, self.print_layer_count):
item.assert_any_call(i)
with self.subTest("Check triggering `preprintsend` event"):
event = self.mocked_handler.on_preprintsend
# Had to use this workaround. See test_handler_on_send
for i in range(self.print_line_count):
# Get the arguments from the ith call to event
call_args = event.call_args_list[i].args
# Check the arguments of the call are as expected
assert_equal_glines(self, call_args[0],
self.core.mainqueue.lines[i])
self.assertEqual(call_args[1], i)
self.assertEqual(call_args[2], self.core.mainqueue)
with self.subTest("Check triggering `preprintsend` callback"):
# Had to use this workaround. See test_handler_on_send
for i in range(self.print_line_count-1):
# Get the arguments from the ith call to callback
call_args = preprintsend_cb.call_args_list[i].args
# Check the arguments of the call are as expected
assert_equal_glines(self, call_args[0],
self.core.mainqueue.lines[i])
assert_equal_glines(self, call_args[1],
self.core.mainqueue.lines[i+1])
i = self.print_line_count - 1
last_call_args = preprintsend_cb.call_args_list[i].args
assert_equal_glines(self, last_call_args[0],
self.core.mainqueue.lines[i])
self.assertEqual(last_call_args[1], None)
for item in (self.mocked_handler.on_printsend, printsend_cb):
with self.subTest("Check triggering `printsend` event/callback",
mock=item):
# Had to use this workaround. See test_handler_on_send
for i in range(self.print_line_count):
# Get the arguments from the ith call to event
call_args = item.call_args_list[i].args
assert_equal_glines(self, call_args[0],
self.core.mainqueue.lines[i])
def test_start_startindex(self):
"""Test that only commands after resume point are sent to the
printer"""
# print_code parsed_print_code
# ---------- -----------------
# "G0 X0" not sent
# "G0 X1" not sent
# "G0 X2" b'N0 G0 X2*99\n'
# "G0 X3" b'N1 G0 X3*99\n'
# ... ...
resume_index = 2 # resume_index < self.print_line_count
parsed_print_code = []
lineno = 0
for i in range(resume_index, self.print_line_count):
command = self.print_code.lines[i].raw
parsed_print_code.append(checksum_command(command, lineno))
lineno += 1
self.core.startprint(self.print_code, startindex=resume_index)
wait_printer_cycles(self.print_line_count*1.5)
for line in parsed_print_code:
self.mocked_serial.return_value.write.assert_any_call(line)
def test_start_already_printing(self):
"""Test startprint returns False if already printing"""
self.core.startprint(self.print_code)
self.assertFalse(self.core.startprint(self.print_code))
def test_start_offline(self):
"""Test startprint returns False if not connected"""
core = printcore.printcore()
self.assertFalse(core.startprint(self.print_code))
def test_pause_resume(self):
"""Test events during pausing and resuming a print"""
self.core.startprint(self.print_code)
wait_printer_cycles(6)
with self.subTest("Check `paused` is unset before pausing"):
self.assertFalse(self.core.paused)
with self.subTest("Check `printing` is set before pausing"):
self.assertTrue(self.core.printing)
# Pause mid print
self.core.pause()
wait_printer_cycles(6)
with self.subTest("Check `paused` is set after pausing"):
self.assertTrue(self.core.paused)
with self.subTest("Check `printing` is unset after pausing"):
self.assertFalse(self.core.printing)
with self.subTest("Check print didn't finish yet"):
self.check_unfinished_print()
subtest_mock(self, "Check triggering `end` event/callback on pause",
(self.mocked_handler.on_end, self.end_cb),
"assert_called_once")
# Resume print
self.core.resume()
wait_printer_cycles(6)
with self.subTest("Check `paused` is unset after resuming"):
self.assertFalse(self.core.paused)
with self.subTest("Check `printing` is set after resuming"):
self.assertTrue(self.core.printing)
subtest_mock(self, "Check `start` event/callback when resuming",
(self.start_cb, self.mocked_handler.on_start),
"assert_called_with", True)
# Let the print finish
wait_printer_cycles(self.print_line_count*1.5)
with self.subTest("Check that `resume` finishes the print"):
self.check_finished_print()
def test_resume_offline(self):
"""Test resume returns False if not connected"""
core = printcore.printcore()
self.assertFalse(core.resume())
def test_pause_offline(self):
"""Test pause returns False if not connected"""
core = printcore.printcore()
self.assertFalse(core.pause())
def test_cancel(self):
"""Test events after canceling a print"""
# Start a print and cancel it mid-print
self.core.startprint(self.print_code)
wait_printer_cycles(self.print_line_count/3)
self.core.cancelprint()
wait_printer_cycles(6)
with self.subTest("Check `printing` is unset"):
self.assertFalse(self.core.printing)
with self.subTest("Check `paused` is unset"):
self.assertFalse(self.core.paused)
with self.subTest("Check `mainqueue` is deleted"):
self.assertIsNone(self.core.mainqueue)
with self.subTest("Check the print is unfinished"):
self.check_unfinished_print()
subtest_mock(self, "Test triggering `end` event/callback",
(self.mocked_handler.on_end, self.end_cb),
"assert_called_once")
def test_host_command(self):
"""Test calling host-commands"""
print_lines = []
for i in range(2):
print_lines.append(f"G1 X{i}")
print_lines.append(';@pause')
print_code = gcoder.GCode(print_lines)
self.core.startprint(print_code)
wait_printer_cycles(len(print_lines)*2)
self.assertTrue(self.core.paused)
class TestReset(unittest.TestCase):
"""Functional checks for the reset method"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def setUp(self):
self.core, self.mocked_serial = setup_serial_core(self)
def test_calls_serial_dtr(self):
"""Check that reset sets DTR attribute to zero"""
self.core.reset()
# check the DTR attribute was disabled
mocked_dtr = self.mocked_serial.return_value.dtr
self.assertEqual(mocked_dtr, 0)
class TestSendThread(unittest.TestCase):
"""Functional checks for the sending thread"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
test_command = setup_test_command()
cls.command = test_command['raw']
cls.parsed_command = test_command['parsed']
cls.parsed_gline = test_command['gline']
def setUp(self):
self.core, self.mocked_serial = setup_serial_core(self)
def test_priority_command(self):
"""Test that commands are sent to the printer from priqueue"""
self.core.send_now(self.command)
wait_printer_cycles(2)
self.mocked_serial.return_value.write.assert_called_with(
self.parsed_command)
self.assertTrue(self.core.writefailures == 0)
def test_calls_socket_write(self):
"""Test that socket file resource is written to"""
core, mocked_socket = setup_socket_core(self)
socket_file = mocked_socket.return_value.makefile.return_value
core.send_now(self.command)
wait_printer_cycles(2)
socket_file.write.assert_called_with(self.parsed_command)
self.assertTrue(self.core.writefailures == 0)
def test_handler_on_send(self):
"""Test that the `on_send` event is triggered"""
mocked_handler = add_mocked_handler(self.core)
mocked_cb = mock_callback(self, self.core, "sendcb")
self.core.send_now(self.command)
wait_printer_cycles(2)
# Ideal code:
# func = mocked_handler.on_send
# func.assert_called_once_with(self.command, self.parsed_gline)
#
# Had to use a workaround. See `compare_glines`
for item in (mocked_handler.on_send, mocked_cb):
with self.subTest("Check triggering `send` event/callback",
mock=item):
self.assertEqual(self.command, item.call_args.args[0])
assert_equal_glines(self, self.parsed_gline,
item.call_args.args[1])
def test_write_serial_error(self):
"""Test an error is logged when serial error during writing"""
with (
mock.patch.object(self.mocked_serial.return_value, "write",
side_effect=serial.SerialException),
self.assertLogs(level="ERROR")
):
self.core.send(self.command)
wait_printer_cycles(2)
self.assertEqual(self.core.writefailures, 1)
def test_write_socket_error(self):
"""Test an error is logged when socket error during writing"""
core, mocked_socket = setup_socket_core(self)
socket_file = mocked_socket.return_value.makefile.return_value
with (
mock.patch.object(socket_file, "write",
side_effect=socket.error),
self.assertLogs(level="ERROR")
):
core.send(self.command)
wait_printer_cycles(2)
self.assertEqual(core.writefailures, 1)
class TestListenThread(unittest.TestCase):
"""Functional checks for the listening thread"""
@classmethod
def setUpClass(cls):
mock_sttyhup(cls)
def custom_slow_printer(self):
"""Simulate a slow processing printer"""
time.sleep(CNC_PROCESS_TIME*random.randint(0, 90)/100)
return self.printer_answer
def setUp(self):
self.printer_answer = DEFAULT_ANSWER.encode()
self.mocked_serial = mock_serial(self, self.custom_slow_printer)
self.core = printcore.printcore()
self.mocked_handler = add_mocked_handler(self.core)
self.recvcb = mock_callback(self, self.core, "recvcb")
self.core.connect("/mocked/port", 1000)
wait_printer_cycles(2)
def test_handler_on_recv(self):
"""Test that the `on_recv` event is triggered"""
event = self.mocked_handler.on_recv
cb = self.recvcb
subtest_mock(self, "", (event, cb), "assert_any_call", DEFAULT_ANSWER)
def test_handler_on_temp(self):
"""Test that the `on_temp` event is triggered"""
event = self.mocked_handler.on_temp
cb = mock_callback(self, self.core, "tempcb")
answer = f"{DEFAULT_ANSWER} T:"
self.printer_answer = answer.encode()
wait_printer_cycles(2)
subtest_mock(self, "", (event, cb), "assert_any_call", answer)
def test_read_resend(self):
"""Check resendfrom is set when resend is read"""
self.printer_answer = "rs N2 Expected checksum 67".encode()
wait_printer_cycles(2)
self.assertEqual(self.core.resendfrom, 2)
def test_read_none(self):
"""Test that an error is logged if None is read"""
with self.assertLogs(level="ERROR"):
self.printer_answer = None
wait_printer_cycles(2)
def test_read_bad_encoding(self):
"""Check that an error is logged on bad enconding"""
with self.assertLogs(level="ERROR"):
self.printer_answer = b'\xC0'
wait_printer_cycles(2)
def test_read_serial_error(self):
"""Check error is logged when serial error while reading"""
with (
self.assertLogs(level="ERROR"),
mock.patch.object(self.mocked_serial.return_value, "readline",
side_effect=serial.SerialException)
):
wait_printer_cycles(2)
def test_calls_socket_read(self):
"""Test that socket file resource is read"""
core, mocked_socket = setup_socket_core(self)
socket_file = mocked_socket.return_value.makefile.return_value
socket_file.read.assert_called()
def test_read_socket_error(self):
"""Check error is logged when socket error while reading"""
with (
self.assertLogs(level="ERROR"),
mock.patch.object(self.mocked_serial.return_value, "readline",
side_effect=socket.error)
):
wait_printer_cycles(2)
def test_read_error(self):
"""Test that an error is logged if 'Error' is read"""
answer = "Error check."
with self.assertLogs(level="ERROR"):
self.printer_answer = answer.encode()
wait_printer_cycles(2)
def tearDown(self):
self.printer_answer = DEFAULT_ANSWER.encode()
wait_printer_cycles(2)
self.core.disconnect()
| 30,032 | Python | .py | 637 | 37.453689 | 78 | 0.631914 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,345 | device.py | kliment_Printrun/printrun/device.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
# Standard libraries:
import os
import platform
import re
import selectors
import socket
import time
# Third-party libraries
import serial
READ_EMPTY = b''
"""Constant to represent empty or no data"""
READ_EOF = None
"""Constant to represent an end-of-file"""
class Device():
"""Handler for serial and web socket connections.
Provides the same functions for both so it abstracts what kind of
connection is being used.
Parameters
----------
port : str, optional
Either a device name, such as '/dev/ttyUSB0' or 'COM3', or an URL with
port, such as '192.168.0.10:80' or 'http://www.example.com:8080'.
baudrate : int, optional
Communication speed in bit/s, such as 9600, 115200 or 250000.
(Default is 9600)
force_dtr : bool or None, optional
On serial connections, force the DTR bit to a specific logic level
(1 or 0) after a successful connection. Not all OS/drivers support
this functionality. By default it is set to "None" to let the system
handle it automatically.
parity_workaround : bool, optional
On serial connections, enable/disable a workaround on parity
checking. Not all platforms need to do this parity workaround, and
some drivers don't support it. By default it is disabled.
Attributes
----------
is_connected
has_flow_control
"""
def __init__(self, port=None, baudrate=9600, force_dtr=None,
parity_workaround=False):
self.port = port
self.baudrate = baudrate
self.force_dtr = force_dtr
self.parity_workaround = parity_workaround
# Private
self._device = None
self._is_connected = False
self._hostname = None
self._socketfile = None
self._port_number = None
self._read_buffer = []
self._selector = None
self._timeout = 0.25
self._type = None
if port is not None:
self._parse_type()
def connect(self, port=None, baudrate=None):
"""Establishes the connection to the device.
Parameters
----------
port : str, optional
See `port` attribute. Only required if it was not provided
already.
baudrate : int, optional
See `baudrate` attribute. Only required if it was not provided
already.
Raises
------
DeviceError
If an error occurred when attempting to connect.
"""
if port is not None:
self.port = port
if baudrate is not None:
self.baudrate = baudrate
if self.port is not None:
self._parse_type()
getattr(self, "_connect_" + self._type)()
else:
raise DeviceError("No port or URL specified")
def disconnect(self):
"""Terminates the connection to the device."""
if self._device is not None:
getattr(self, "_disconnect_" + self._type)()
@property
def is_connected(self):
"""True if connection to peer is alive.
Warnings
--------
Current implementation for socket connections only tracks status of
the connection but does not actually check it. So, if it is used to
check the connection before sending data, it might fail to prevent an
error being raised due to a lost connection.
"""
if self._device is not None:
return getattr(self, "_is_connected_" + self._type)()
return False
@property
def has_flow_control(self):
"""True if the device has flow control mechanics."""
if self._type == 'socket':
return True
return False
def readline(self) -> bytes:
"""Read one line from the device stream.
Returns
-------
bytes
Array containing the feedback received from the
device. `READ_EMPTY` will be returned if no data was
available. `READ_EOF` is returned if connection was terminated at
the other end.
Raises
------
DeviceError
If connected peer is unreachable.
"""
# TODO: silent fail on no device? return timeout?
if self._device is not None:
return getattr(self, "_readline_" + self._type)()
raise DeviceError("Attempted to read when disconnected")
def reset(self):
"""Attempt to reset the connection to the device.
Warnings
--------
Current implementation has no effect on socket connections.
"""
if self._device is not None:
if self._type == 'serial':
getattr(self, "_reset_" + self._type)()
def write(self, data: bytes):
"""Write data to the connected peer.
Parameters
----------
data: bytes
The bytes data to be written. This should be of type `bytes` (or
compatible such as `bytearray` or `memoryview`). Unicode strings
must be encoded.
Raises
------
DeviceError
If connected peer is unreachable.
TypeError
If `data` is not of 'bytes' type.
"""
if self._device is not None:
getattr(self, "_write_" + self._type)(data)
else:
raise DeviceError("Attempted to write when disconnected")
def _parse_type(self):
# Guess which type of connection is being used
if self._is_url(self.port):
self._type = 'socket'
else:
self._type = 'serial'
def _is_url(self, text):
# TODO: Rearrange to avoid long line
host_regexp = re.compile("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$|^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$")
if ':' in text:
bits = text.split(":")
if len(bits) == 2:
self._hostname = bits[0]
try:
self._port_number = int(bits[1])
if (host_regexp.match(self._hostname) and
1 <= self._port_number <= 65535):
return True
except:
# TODO: avoid catch-all clauses
pass
return False
# ------------------------------------------------------------------------
# Serial Functions
# ------------------------------------------------------------------------
def _connect_serial(self):
# Disable HUPCL
# TODO: Check if still required
self._disable_ttyhup()
try:
# TODO: Check if this trick is still needed
if self.parity_workaround:
self._device = serial.Serial(port=self.port,
baudrate=self.baudrate,
timeout=0.25,
parity=serial.PARITY_ODD)
self._device.close()
self._device.parity = serial.PARITY_NONE
else:
self._device = serial.Serial(baudrate=self.baudrate,
timeout=0.25,
parity=serial.PARITY_NONE)
self._device.port = self.port
# TODO: Check if this is still required
if self.force_dtr is not None:
self._device.dtr = self.force_dtr
self._device.open()
except (serial.SerialException, IOError) as e:
msg = "Could not connect to serial port '{}'".format(self.port)
raise DeviceError(msg, e) from e
def _is_connected_serial(self):
return self._device.is_open
def _disconnect_serial(self):
try:
self._device.close()
except serial.SerialException as e:
msg = "Error on serial disconnection"
raise DeviceError(msg, e) from e
def _readline_serial(self):
try:
# Serial.readline() returns b'' (aka `READ_EMPTY`) on timeout
return self._device.readline()
except (serial.SerialException, OSError) as e:
msg = f"Unable to read from serial port '{self.port}'"
raise DeviceError(msg, e) from e
def _reset_serial(self):
self._device.dtr = True
time.sleep(0.2)
self._device.dtr = False
def _write_serial(self, data):
try:
self._device.write(data)
except serial.SerialException as e:
msg = "Unable to write to serial port '{self.port}'"
raise DeviceError(msg, e) from e
def _disable_ttyhup(self):
if platform.system() == "Linux":
os.system("stty -F %s -hup" % self.port)
# ------------------------------------------------------------------------
# Socket Functions
# ------------------------------------------------------------------------
def _connect_socket(self):
self._device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._device.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self._timeout = 0.25
self._device.settimeout(1.0)
try:
self._device.connect((self._hostname, self._port_number))
# A single read timeout raises OSError for all later reads
# probably since python 3.5 use non blocking instead
self._device.settimeout(0)
self._socketfile = self._device.makefile('rwb', buffering=0)
self._selector = selectors.DefaultSelector()
self._selector.register(self._device, selectors.EVENT_READ)
self._is_connected = True
except OSError as e:
self._disconnect_socket()
msg = "Could not connect to {}:{}".format(self._hostname,
self._port_number)
raise DeviceError(msg, e) from e
def _is_connected_socket(self):
# TODO: current implementation tracks status of connection but
# does not actually check it. Ref. is_connected()
return self._is_connected
def _disconnect_socket(self):
self._is_connected = False
try:
if self._socketfile is not None:
self._socketfile.close()
if self._selector is not None:
self._selector.unregister(self._device)
self._selector.close()
self._selector = None
self._device.close()
except OSError as e:
msg = "Error on socket disconnection"
raise DeviceError(msg, e) from e
def _readline_socket(self):
SYS_AGAIN = None # python's marker for timeout/no data
# SYS_EOF = b'' # python's marker for EOF
try:
line = self._readline_buf()
if line:
return line
chunk_size = 256
while True:
chunk = self._socketfile.read(chunk_size)
if (chunk is SYS_AGAIN and
self._selector.select(self._timeout)):
chunk = self._socketfile.read(chunk_size)
if chunk:
self._read_buffer.append(chunk)
line = self._readline_buf()
if line:
return line
elif chunk is SYS_AGAIN:
return READ_EMPTY
else: # chunk is SYS_EOF
line = b''.join(self._read_buffer)
self._read_buffer = []
if line:
return line
self._is_connected = False
return READ_EOF
except OSError as e:
self._is_connected = False
msg = ("Unable to read from {}:{}. Connection lost"
).format(self._hostname, self._port_number)
raise DeviceError(msg, e) from e
def _readline_buf(self):
# Try to readline from buffer
if self._read_buffer:
chunk = self._read_buffer[-1]
eol = chunk.find(b'\n')
if eol >= 0:
line = b''.join(self._read_buffer[:-1]) + chunk[:(eol+1)]
self._read_buffer = []
if eol + 1 < len(chunk):
self._read_buffer.append(chunk[(eol+1):])
return line
return READ_EMPTY
def _write_socket(self, data):
try:
self._socketfile.write(data)
try:
self._socketfile.flush()
except socket.timeout:
pass
except (OSError, RuntimeError) as e:
self._is_connected = False
msg = ("Unable to write to {}:{}. Connection lost"
).format(self._hostname, self._port_number)
raise DeviceError(msg, e) from e
class DeviceError(Exception):
"""Raised on any connection error.
One exception groups all connection errors regardless of the underlying
connection or error type.
Parameters
----------
msg : str
Error message.
cause : Exception, optional
Underlying error.
Attributes
----------
cause
"""
def __init__(self, msg, cause=None):
super().__init__(msg)
self.cause = cause
| 14,218 | Python | .py | 354 | 29.276836 | 249 | 0.5505 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,346 | printcore.py | kliment_Printrun/printrun/printcore.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
__version__ = "2.2.0"
import sys
if sys.version_info.major < 3:
print("You need to run this on Python 3")
sys.exit(-1)
import threading
from queue import Queue, Empty as QueueEmpty
import time
import logging
import traceback
from functools import wraps, reduce
from collections import deque
from printrun import gcoder
from printrun import device
from .utils import set_utf8_locale, install_locale, decode_utf8
try:
set_utf8_locale()
except:
pass
install_locale('pronterface')
from printrun.plugins import PRINTCORE_HANDLER
def locked(f):
@wraps(f)
def inner(*args, **kw):
with inner.lock:
return f(*args, **kw)
inner.lock = threading.Lock()
return inner
PR_EOF = None #printrun's marker for EOF
PR_AGAIN = b'' #printrun's marker for timeout/no data
SYS_EOF = b'' #python's marker for EOF
SYS_AGAIN = None #python's marker for timeout/no data
class printcore():
"""Core 3D printer host functionality.
If `port` and `baud` are specified, `connect` is called immediately.
Parameters
----------
port : str, optional
Either a device name, such as '/dev/ttyUSB0' or 'COM3', or an URL with
port, such as '192.168.0.10:80' or 'http://www.example.com:8080'. Only
required if it was not provided already.
baud : int, optional
Communication speed in bit/s, such as 9600, 115200 or 250000. Only
required if it was not provided already.
dtr : bool, optional
On serial connections, enable/disable hardware DTR flow
control. (Default is None)
Attributes (WIP)
----------
analyzer : GCode
A `printrun.gcoder.GCode` object containing all the G-code commands
sent to the printer.
baud
dtr
event_handler : list of PrinterEventHandler
Collection of event-handling objects. The relevant method of each
handler on this list will be triggered at the relevant process
stage. See `printrun.eventhandler.PrinterEventHandler`.
mainqueue : GCode
The main command queue. A `printrun.gcoder.GCode` object containing an
array of G-code commands. A call to `startprint` will populate this
list and `printcore` will then gradually send the commands in this
queue to the printer.
online : bool
True if the printer has responded to the initial command and is
active.
paused : bool
True if there is a print currently on pause.
port
printing : bool
True if there is a print currently running.
priqueue : Queue
The priority command queue. Commands in this queue will be gradually
sent to the printer. If there are commands in the `mainqueue` the ones
in `priqueue` will be sent ahead of them. See `queue.Queue`.
"""
def __init__(self, port = None, baud = None, dtr=None):
self.baud = None
self.dtr = None
self.port = None
self.analyzer = gcoder.GCode()
# Serial instance connected to the printer, should be None when
# disconnected
self.printer = None
# clear to send, enabled after responses
# FIXME: should probably be changed to a sliding window approach
self.clear = 0
# The printer has responded to the initial command and is active
self.online = False
# is a print currently running, true if printing, false if paused
self.printing = False
self.mainqueue = None
self.priqueue = Queue(0)
self.queueindex = 0
self.lineno = 0
self.resendfrom = -1
self.paused = False
self.sentlines = {}
self.log = deque(maxlen = 10000)
self.sent = []
self.writefailures = 0
self.tempcb = None # impl (wholeline)
self.recvcb = None # impl (wholeline)
self.sendcb = None # impl (wholeline)
self.preprintsendcb = None # impl (wholeline)
self.printsendcb = None # impl (wholeline)
self.layerchangecb = None # impl (wholeline)
self.errorcb = None # impl (wholeline)
self.startcb = None # impl ()
self.endcb = None # impl ()
self.onlinecb = None # impl ()
self.loud = False # emit sent and received lines to terminal
self.tcp_streaming_mode = False
self.greetings = ['start', 'Grbl ']
self.wait = 0 # default wait period for send(), send_now()
self.read_thread = None
self.stop_read_thread = False
self.send_thread = None
self.stop_send_thread = False
self.print_thread = None
self.readline_buf = []
self.selector = None
self.event_handler = PRINTCORE_HANDLER
for handler in self.event_handler:
try: handler.on_init()
except: logging.error(traceback.format_exc())
if port is not None and baud is not None:
self.connect(port, baud)
self.xy_feedrate = None
self.z_feedrate = None
def addEventHandler(self, handler):
'''
Adds an event handler.
@param handler: The handler to be added.
'''
self.event_handler.append(handler)
def logError(self, error):
for handler in self.event_handler:
try: handler.on_error(error)
except: logging.error(traceback.format_exc())
if self.errorcb:
try: self.errorcb(error)
except: logging.error(traceback.format_exc())
else:
logging.error(error)
@locked
def disconnect(self):
"""Disconnects from printer and pauses the print
"""
if self.printer:
if self.read_thread:
self.stop_read_thread = True
if threading.current_thread() != self.read_thread:
self.read_thread.join()
self.read_thread = None
if self.print_thread:
self.printing = False
self.print_thread.join()
self._stop_sender()
try:
self.printer.disconnect()
except device.DeviceError:
self.logError(traceback.format_exc())
pass
for handler in self.event_handler:
try: handler.on_disconnect()
except: logging.error(traceback.format_exc())
self.printer = None
self.online = False
self.printing = False
@locked
def connect(self, port = None, baud = None, dtr=None):
"""Set port and baudrate if given, then connect to printer
"""
if self.printer:
self.disconnect()
if port is not None:
self.port = port
if baud is not None:
self.baud = baud
if dtr is not None:
self.dtr = dtr
if self.port is not None and self.baud is not None:
self.writefailures = 0
self.printer = device.Device()
self.printer.force_dtr = self.dtr
try:
self.printer.connect(self.port, self.baud)
except device.DeviceError as e:
self.logError("Connection error: %s" % e)
self.printer = None
return
for handler in self.event_handler:
try: handler.on_connect()
except: logging.error(traceback.format_exc())
self.stop_read_thread = False
self.read_thread = threading.Thread(target = self._listen,
name='read thread')
self.read_thread.start()
self._start_sender()
def reset(self):
"""Attempt to reset the connection to the printer.
Warnings
--------
Current implementation resets a serial connection by disabling
hardware DTR flow control. It has no effect on socket connections.
"""
self.printer.reset()
def _readline(self):
try:
line_bytes = self.printer.readline()
if line_bytes is device.READ_EOF:
self.logError("Can't read from printer (disconnected?)." +
" line_bytes is None")
self.stop_read_thread = True
return PR_EOF
line = line_bytes.decode('utf-8')
if len(line) > 1:
self.log.append(line)
for handler in self.event_handler:
try: handler.on_recv(line)
except: logging.error(traceback.format_exc())
if self.recvcb:
try: self.recvcb(line)
except: self.logError(traceback.format_exc())
if self.loud: logging.info("RECV: %s" % line.rstrip())
return line
except UnicodeDecodeError:
msg = ("Got rubbish reply from {0} at baudrate {1}:\n"
"Maybe a bad baudrate?").format(self.port, self.baud)
self.logError(msg)
return None
except device.DeviceError as e:
msg = ("Can't read from printer (disconnected?) {0}"
).format(decode_utf8(str(e)))
self.logError(msg)
return None
def _listen_can_continue(self):
return (not self.stop_read_thread
and self.printer
and self.printer.is_connected)
def _listen_until_online(self):
while not self.online and self._listen_can_continue():
self._send("M105")
if self.writefailures >= 4:
logging.error(_("Aborting connection attempt after 4 failed writes."))
return
empty_lines = 0
while self._listen_can_continue():
line = self._readline()
if line is None: break # connection problem
# workaround cases where M105 was sent before printer Serial
# was online an empty line means read timeout was reached,
# meaning no data was received thus we count those empty lines,
# and once we have seen 15 in a row, we just break and send a
# new M105
# 15 was chosen based on the fact that it gives enough time for
# Gen7 bootloader to time out, and that the non received M105
# issues should be quite rare so we can wait for a long time
# before resending
if not line:
empty_lines += 1
if empty_lines == 15: break
else: empty_lines = 0
if line.startswith(tuple(self.greetings)) \
or line.startswith('ok') or "T:" in line:
self.online = True
for handler in self.event_handler:
try: handler.on_online()
except: logging.error(traceback.format_exc())
if self.onlinecb:
try: self.onlinecb()
except: self.logError(traceback.format_exc())
return
def _listen(self):
"""This function acts on messages from the firmware
"""
self.clear = True
if not self.printing:
self._listen_until_online()
while self._listen_can_continue():
line = self._readline()
if line is None:
logging.debug('_readline() is None, exiting _listen()')
break
if line.startswith('DEBUG_'):
continue
if line.startswith(tuple(self.greetings)) or line.startswith('ok'):
self.clear = True
if line.startswith('ok') and "T:" in line:
for handler in self.event_handler:
try: handler.on_temp(line)
except: logging.error(traceback.format_exc())
if self.tempcb:
# callback for temp, status, whatever
try: self.tempcb(line)
except: self.logError(traceback.format_exc())
elif line.startswith('Error'):
self.logError(line)
# Teststrings for resend parsing # Firmware exp. result
# line="rs N2 Expected checksum 67" # Teacup 2
if line.lower().startswith("resend") or line.startswith("rs"):
for haystack in ["N:", "N", ":"]:
line = line.replace(haystack, " ")
linewords = line.split()
while len(linewords) != 0:
try:
toresend = int(linewords.pop(0))
self.resendfrom = toresend
break
except:
pass
self.clear = True
self.clear = True
logging.debug('Exiting read thread')
def _start_sender(self):
self.stop_send_thread = False
self.send_thread = threading.Thread(target = self._sender,
name = 'send thread')
self.send_thread.start()
def _stop_sender(self):
if self.send_thread:
self.stop_send_thread = True
self.send_thread.join()
self.send_thread = None
def _sender(self):
while not self.stop_send_thread:
try:
command = self.priqueue.get(True, 0.1)
except QueueEmpty:
continue
while self.printer and self.printing and not self.clear:
time.sleep(0.001)
self._send(command)
while self.printer and self.printing and not self.clear:
time.sleep(0.001)
def _checksum(self, command):
return reduce(lambda x, y: x ^ y, map(ord, command))
def startprint(self, gcode, startindex = 0):
"""Start a print.
The `mainqueue` is populated and then commands are gradually sent to
the printer. Printing starts in a parallel thread, this function
launches the print and returns immediately, it does not wait/block
until printing has finished.
Parameters
----------
gcode : GCode
A `printrun.gcoder.GCode` object containing the array of G-code
commands. The print queue `mainqueue` will be replaced with the
contents of `gcode`.
startindex : int, default: 0
The index from the `gcode` array from which the printing will be
started.
Returns
-------
bool
True on successful print start, False if already printing or
offline.
"""
if self.printing or not self.online or not self.printer:
return False
self.queueindex = startindex
self.mainqueue = gcode
self.printing = True
self.lineno = 0
self.resendfrom = -1
if not gcode or not gcode.lines:
return True
self.clear = False
self._send("M110 N-1", -1, True)
resuming = (startindex != 0)
self.print_thread = threading.Thread(target = self._print,
name = 'print thread',
kwargs = {"resuming": resuming})
self.print_thread.start()
return True
def cancelprint(self):
"""Cancel an ongoing print."""
self.pause()
self.paused = False
self.mainqueue = None
self.clear = True
# run a simple script if it exists, no multithreading
def runSmallScript(self, filename):
if not filename: return
try:
with open(filename) as f:
for i in f:
l = i.replace("\n", "")
l = l.partition(';')[0] # remove comments
self.send_now(l)
except:
pass
def pause(self):
"""Pauses an ongoing print.
The current position of the print is saved to be able to go back to it
when resuming.
Returns
-------
bool
False if not printing.
"""
if not self.printing: return False
self.paused = True
self.printing = False
# ';@pause' in the gcode file calls pause from the print thread
if not threading.current_thread() is self.print_thread:
try:
self.print_thread.join()
except:
self.logError(traceback.format_exc())
self.print_thread = None
# saves the status
self.pauseX = self.analyzer.abs_x
self.pauseY = self.analyzer.abs_y
self.pauseZ = self.analyzer.abs_z
self.pauseE = self.analyzer.abs_e
self.pauseF = self.analyzer.current_f
self.pauseRelative = self.analyzer.relative
self.pauseRelativeE = self.analyzer.relative_e
def resume(self):
"""Resumes a paused print.
`printcore` will first attempt to set the position and conditions it
had when the print was paused and then resume the print right where it
was.
Returns
-------
bool
False if print not paused.
"""
if not self.paused: return False
# restores the status
self.send_now("G90") # go to absolute coordinates
xyFeed = '' if self.xy_feedrate is None else ' F' + str(self.xy_feedrate)
zFeed = '' if self.z_feedrate is None else ' F' + str(self.z_feedrate)
self.send_now("G1 X%s Y%s%s" % (self.pauseX, self.pauseY, xyFeed))
self.send_now("G1 Z" + str(self.pauseZ) + zFeed)
self.send_now("G92 E" + str(self.pauseE))
# go back to relative if needed
if self.pauseRelative:
self.send_now("G91")
if self.pauseRelativeE:
self.send_now('M83')
# reset old feed rate
self.send_now("G1 F" + str(self.pauseF))
self.paused = False
self.printing = True
self.print_thread = threading.Thread(target = self._print,
name = 'print thread',
kwargs = {"resuming": True})
self.print_thread.start()
def send(self, command, wait = 0):
"""Adds a command to the main queue.
If a print is ongoing, `command` is appended at the end of
`mainqueue`. If not printing, the command is added to the priority
queue `priqueue`. The `command` is added to a queue and is sent on a
parallel thread. This function is non-blocking.
Parameters
----------
command : str
Command to be sent, e.g. "M105" or "G1 X10 Y10".
wait
Ignored. Do not use.
"""
if self.online:
if self.printing:
self.mainqueue.append(command)
else:
self.priqueue.put_nowait(command)
else:
self.logError(_("Not connected to printer."))
def send_now(self, command, wait = 0):
"""Adds a command to the priority queue.
Command is appended to `priqueue`. The `command` is added to a queue
and is sent on a parallel thread. This function is non-blocking.
Parameters
----------
command : str
Command to be sent, e.g. "M105" or "G1 X10 Y10".
wait
Ignored. Do not use.
"""
if self.online:
self.priqueue.put_nowait(command)
else:
self.logError(_("Not connected to printer."))
def _print(self, resuming = False):
self._stop_sender()
try:
for handler in self.event_handler:
try: handler.on_start(resuming)
except: logging.error(traceback.format_exc())
if self.startcb:
# callback for printing started
try: self.startcb(resuming)
except:
self.logError(_("Print start callback failed with:") +
"\n" + traceback.format_exc())
while self.printing and self.printer and self.online:
self._sendnext()
self.sentlines = {}
self.log.clear()
self.sent = []
for handler in self.event_handler:
try: handler.on_end()
except: logging.error(traceback.format_exc())
if self.endcb:
# callback for printing done
try: self.endcb()
except:
self.logError(_("Print end callback failed with:") +
"\n" + traceback.format_exc())
except:
self.logError(_("Print thread died due to the following error:") +
"\n" + traceback.format_exc())
finally:
self.print_thread = None
self._start_sender()
def process_host_command(self, command):
"""only ;@pause command is implemented as a host command in printcore, but hosts are free to reimplement this method"""
command = command.lstrip()
if command.startswith(";@pause"):
self.pause()
def _sendnext(self):
if not self.printer:
return
while self.printer and self.printing and not self.clear:
time.sleep(0.001)
# Only wait for oks when using serial connections or when not using tcp
# in streaming mode
if not self.printer.has_flow_control or not self.tcp_streaming_mode:
self.clear = False
if not (self.printing and self.printer and self.online):
self.clear = True
return
if self.resendfrom < self.lineno and self.resendfrom > -1:
self._send(self.sentlines[self.resendfrom], self.resendfrom, False)
self.resendfrom += 1
return
self.resendfrom = -1
if not self.priqueue.empty():
self._send(self.priqueue.get_nowait())
self.priqueue.task_done()
return
if self.printing and self.mainqueue.has_index(self.queueindex):
(layer, line) = self.mainqueue.idxs(self.queueindex)
gline = self.mainqueue.all_layers[layer][line]
if self.queueindex > 0:
(prev_layer, prev_line) = self.mainqueue.idxs(self.queueindex - 1)
if prev_layer != layer:
for handler in self.event_handler:
try: handler.on_layerchange(layer)
except: logging.error(traceback.format_exc())
if self.layerchangecb and self.queueindex > 0:
(prev_layer, prev_line) = self.mainqueue.idxs(self.queueindex - 1)
if prev_layer != layer:
try: self.layerchangecb(layer)
except: self.logError(traceback.format_exc())
for handler in self.event_handler:
try: handler.on_preprintsend(gline, self.queueindex, self.mainqueue)
except: logging.error(traceback.format_exc())
if self.preprintsendcb:
if self.mainqueue.has_index(self.queueindex + 1):
(next_layer, next_line) = self.mainqueue.idxs(self.queueindex + 1)
next_gline = self.mainqueue.all_layers[next_layer][next_line]
else:
next_gline = None
gline = self.preprintsendcb(gline, next_gline)
if gline is None:
self.queueindex += 1
self.clear = True
return
tline = gline.raw
if tline.lstrip().startswith(";@"): # check for host command
self.process_host_command(tline)
self.queueindex += 1
self.clear = True
return
# Strip comments
tline = gcoder.gcode_strip_comment_exp.sub("", tline).strip()
if tline:
self._send(tline, self.lineno, True)
self.lineno += 1
for handler in self.event_handler:
try: handler.on_printsend(gline)
except: logging.error(traceback.format_exc())
if self.printsendcb:
try: self.printsendcb(gline)
except: self.logError(traceback.format_exc())
else:
self.clear = True
self.queueindex += 1
else:
self.printing = False
self.clear = True
if not self.paused:
self.queueindex = 0
self.lineno = 0
self._send("M110 N-1", -1, True)
def _send(self, command, lineno = 0, calcchecksum = False):
# Only add checksums if over serial (tcp does the flow control itself)
if calcchecksum and not self.printer.has_flow_control:
prefix = "N" + str(lineno) + " " + command
command = prefix + "*" + str(self._checksum(prefix))
if "M110" not in command:
self.sentlines[lineno] = command
if self.printer:
self.sent.append(command)
# run the command through the analyzer
gline = None
try:
gline = self.analyzer.append(command, store = False)
except:
logging.warning(_("Could not analyze command %s:") % command +
"\n" + traceback.format_exc())
if self.loud:
logging.info("SENT: %s" % command)
for handler in self.event_handler:
try: handler.on_send(command, gline)
except: logging.error(traceback.format_exc())
if self.sendcb:
try: self.sendcb(command, gline)
except: self.logError(traceback.format_exc())
try:
self.printer.write((command + "\n").encode('ascii'))
self.writefailures = 0
except device.DeviceError as e:
self.logError("Can't write to printer (disconnected?)"
"{0}".format(e))
self.writefailures += 1
| 27,023 | Python | .py | 644 | 29.958075 | 127 | 0.563983 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,347 | settings.py | kliment_Printrun/printrun/settings.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import logging
import traceback
import os
import sys
from functools import wraps
import wx
from pathlib import Path
from .utils import parse_build_dimensions, check_rgb_color, check_rgba_color
from .gui.widgets import get_space
def setting_add_tooltip(func):
@wraps(func)
def decorator(self, *args, **kwargs):
widget = func(self, *args, **kwargs)
helptxt = self.help or ""
sep, deftxt = "", ""
if len(helptxt):
sep = "\n"
if helptxt.find("\n") >= 0:
sep = "\n\n"
if self.default != "":
deftxt = _("Default: ")
resethelp = _("(Control-doubleclick to reset to default value)")
if len(repr(self.default)) > 10:
deftxt += "\n " + repr(self.default).strip("'") + "\n" + resethelp
else:
deftxt += repr(self.default) + " " + resethelp
helptxt += sep + deftxt
if len(helptxt):
widget.SetToolTip(helptxt)
return widget
return decorator
class Setting:
DEFAULT_GROUP = "Printer"
hidden = False
def __init__(self, name, default, label = None, help = None, group = None):
self.name = name
self.default = default
self._value = default
self.label = label
self.help = help
self.group = group if group else Setting.DEFAULT_GROUP
def _get_value(self):
return self._value
def _set_value(self, value):
raise NotImplementedError
value = property(_get_value, _set_value)
@setting_add_tooltip
def get_label(self, parent):
widget = wx.StaticText(parent, -1, self.label or self.name)
widget.set_default = self.set_default
return widget
@setting_add_tooltip
def get_widget(self, parent):
return self.get_specific_widget(parent)
def get_specific_widget(self, parent):
raise NotImplementedError
def update(self):
raise NotImplementedError
def validate(self, value):
pass
def __str__(self):
return self.name
def __repr__(self):
return self.name
class HiddenSetting(Setting):
hidden = True
def _set_value(self, value):
self._value = value
value = property(Setting._get_value, _set_value)
class wxSetting(Setting):
widget = None
def _set_value(self, value):
self._value = value
if self.widget:
self.widget.SetValue(value)
value = property(Setting._get_value, _set_value)
def update(self):
self.value = self.widget.GetValue()
def set_default(self, e):
if e.CmdDown() and e.ButtonDClick() and self.default != "":
self.widget.SetValue(self.default)
else:
e.Skip()
class StringSetting(wxSetting):
def get_specific_widget(self, parent):
self.widget = wx.TextCtrl(parent, -1, str(self.value))
return self.widget
def wxColorToStr(color, withAlpha = True):
# including Alpha seems to be non standard in CSS
format = '#{0.red:02X}{0.green:02X}{0.blue:02X}' \
+ ('{0.alpha:02X}' if withAlpha else '')
return format.format(color)
class DirSetting(wxSetting):
'''Adds a setting type that works similar to the StringSetting but with
an additional 'Browse' button that opens an directory chooser dialog.'''
def get_widget(self, parent):
# Create the text control
self.text_ctrl = wx.TextCtrl(parent, -1, str(self.value))
# Create the browse-button control
button = wx.Button(parent, -1, "Browse")
button.Bind(wx.EVT_BUTTON, self.on_browse)
self.widget = wx.BoxSizer(wx.HORIZONTAL)
self.widget.Add(self.text_ctrl, 1)
self.widget.Add(button, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, get_space('mini'))
return self.widget
def on_browse(self, event = None):
# Going to browse for file...
directory = self.text_ctrl.GetValue()
if not os.path.isdir(directory):
directory = '.'
message = _("Choose Directory...")
dlg = wx.DirDialog(None, message, directory,
wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
dlg.SetMessage(message)
if dlg.ShowModal() == wx.ID_OK:
self.text_ctrl.SetValue(dlg.GetPath())
dlg.Destroy()
def _set_value(self, value):
self._value = value
if self.text_ctrl:
self.text_ctrl.SetValue(value)
value = property(wxSetting._get_value, _set_value)
def update(self):
self.value = self.text_ctrl.GetValue()
class ColorSetting(wxSetting):
def __init__(self, name, default, label = None, help = None, group = None, isRGBA=True):
super().__init__(name, default, label, help, group)
self.isRGBA = isRGBA
def validate(self, value):
validate = check_rgba_color if self.isRGBA else check_rgb_color
validate(value)
def get_specific_widget(self, parent):
self.widget = wx.ColourPickerCtrl(parent, colour=wx.Colour(self.value),
style=wx.CLRP_USE_TEXTCTRL)
self.widget.SetValue = self.widget.SetColour
self.widget.LayoutDirection = wx.Layout_RightToLeft
return self.widget
def update(self):
self._value = wxColorToStr(self.widget.Colour, self.isRGBA)
class ComboSetting(wxSetting):
def __init__(self, name, default, choices, label = None, help = None,
group = None, size = 7 * get_space('settings')):
# size: Default length is set here, can be overwritten on creation.
super().__init__(name, default, label, help, group)
self.choices = choices
self.size = size
def get_specific_widget(self, parent):
readonly = isinstance(self.choices, tuple)
if readonly:
# wx.Choice drops its list on click, no need to click down arrow
self.widget = wx.Choice(parent, -1, choices = self.choices, size = (self.size, -1))
self.widget.GetValue = lambda: self.choices[self.widget.Selection]
self.widget.SetValue = lambda v: self.widget.SetSelection(self.choices.index(v))
self.widget.SetValue(self.value)
else:
self.widget = wx.ComboBox(parent, -1, str(self.value), choices = self.choices,
style = wx.CB_DROPDOWN, size = (self.size, -1))
return self.widget
class SpinSetting(wxSetting):
def __init__(self, name, default, min, max, label = None,
help = None, group = None, increment = 0.1):
super().__init__(name, default, label, help, group)
self.min = min
self.max = max
self.increment = increment
def get_specific_widget(self, parent):
self.widget = wx.SpinCtrlDouble(parent, -1, min = self.min, max = self.max,
size = (4 * get_space('settings'), -1))
self.widget.SetDigits(0)
self.widget.SetValue(self.value)
orig = self.widget.GetValue
self.widget.GetValue = lambda: int(orig())
return self.widget
def MySpin(parent, digits, *args, **kw):
# in GTK 3.[01], spinner is not large enough to fit text
# Could be a class, but use function to avoid load errors if wx
# not installed
# If native wx.SpinCtrlDouble has problems in different platforms
# try agw
# from wx.lib.agw.floatspin import FloatSpin
sp = wx.SpinCtrlDouble(parent, *args, **kw)
# sp = FloatSpin(parent)
sp.SetDigits(digits)
# sp.SetValue(kw['initial'])
def fitValue(ev):
text = '%%.%df' % digits % sp.Max
# native wx.SpinCtrlDouble does not return good size
# in GTK 3.0
tex = sp.GetTextExtent(text)
tsz = sp.GetSizeFromTextSize(tex.x)
if sp.MinSize.x < tsz.x:
# print('fitValue', getattr(sp, 'setting', None), sp.Value, sp.Digits, tsz.x)
sp.MinSize = tsz
# sp.Size = tsz
# sp.Bind(wx.EVT_TEXT, fitValue)
fitValue(None)
return sp
class FloatSpinSetting(SpinSetting):
def get_specific_widget(self, parent):
self.widget = MySpin(parent, 2, initial = self.value, min = self.min,
max = self.max, inc = self.increment,
size = (4 * get_space('settings'), -1))
return self.widget
class BooleanSetting(wxSetting):
def _get_value(self):
return bool(self._value)
def _set_value(self, value):
self._value = value
if self.widget:
self.widget.SetValue(bool(value))
value = property(_get_value, _set_value)
def get_specific_widget(self, parent):
self.widget = wx.CheckBox(parent, -1)
self.widget.SetValue(bool(self.value))
return self.widget
class StaticTextSetting(wxSetting):
def __init__(self, name, label = " ", text = "", help = None, group = None):
super().__init__(name, "", label, help, group)
self.text = text
def update(self):
pass
def _get_value(self):
return ""
def _set_value(self, value):
pass
def get_specific_widget(self, parent):
self.widget = wx.StaticText(parent, -1, self.text)
return self.widget
class BuildDimensionsSetting(wxSetting):
widgets = None
def _set_value(self, value):
self._value = value
if self.widgets:
self._set_widgets_values(value)
value = property(wxSetting._get_value, _set_value)
def _set_widgets_values(self, value):
build_dimensions_list = parse_build_dimensions(value)
for i, widget in enumerate(self.widgets):
widget.SetValue(build_dimensions_list[i])
def get_widget(self, parent):
build_dimensions = parse_build_dimensions(self.value)
self.widgets = []
def w(val, m, M):
self.widgets.append(MySpin(parent, 2, initial = val, min = m,
max = M, size = (5 * get_space('settings'), -1)))
def addlabel(name, pos):
self.widget.Add(wx.StaticText(parent, -1, name), pos = pos,
flag = wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.ALIGN_RIGHT,
border = get_space('mini'))
def addwidget(*pos):
self.widget.Add(self.widgets[-1], pos = pos,
flag = wx.RIGHT | wx.EXPAND, border = get_space('mini'))
self.widget = wx.GridBagSizer(vgap = get_space('mini'), hgap = get_space('mini'))
addlabel(_("Width:"), (0, 0))
w(build_dimensions[0], 0, 2000)
addwidget(0, 1)
addlabel(_("Depth:"), (0, 2))
w(build_dimensions[1], 0, 2000)
addwidget(0, 3)
addlabel(_("Height:"), (0, 4))
w(build_dimensions[2], 0, 2000)
addwidget(0, 5)
addlabel(_("X Offset:"), (1, 0))
w(build_dimensions[3], -2000, 2000)
addwidget(1, 1)
addlabel(_("Y Offset:"), (1, 2))
w(build_dimensions[4], -2000, 2000)
addwidget(1, 3)
addlabel(_("Z Offset:"), (1, 4))
w(build_dimensions[5], -2000, 2000)
addwidget(1, 5)
addlabel(_("X Home Pos.:"), (2, 0))
w(build_dimensions[6], -2000, 2000)
addwidget(2, 1)
addlabel(_("Y Home Pos.:"), (2, 2))
w(build_dimensions[7], -2000, 2000)
addwidget(2, 3)
addlabel(_("Z Home Pos.:"), (2, 4))
w(build_dimensions[8], -2000, 2000)
addwidget(2, 5)
return self.widget
def update(self):
values = [float(w.GetValue()) for w in self.widgets]
self.value = "%.02fx%.02fx%.02f%+.02f%+.02f%+.02f%+.02f%+.02f%+.02f" % tuple(values)
class Settings:
def __baudrate_list(self):
return ["2400", "9600", "19200", "38400", "57600", "115200", "250000"]
def __init__(self, root):
# defaults here.
# the initial value determines the type
self._add(StringSetting("port", "", _("Serial Port:"), _("Port used to communicate with printer")))
self._add(ComboSetting("baudrate", 115200, self.__baudrate_list(), _("Baud Rate:"), _("Communications Speed")))
self._add(BooleanSetting("tcp_streaming_mode", False, _("TCP Streaming Mode:"),
_("When using a TCP connection to the printer, the streaming mode will not wait for acks from the printer to send new commands."
"This will break things such as ETA prediction, but can result in smoother prints.")), root.update_tcp_streaming_mode)
self._add(BooleanSetting("rpc_server", True, _("RPC Server:"), _("Enable RPC server to allow remotely querying print status")), root.update_rpc_server)
self._add(BooleanSetting("dtr", True, _("DTR:"), _("Disabling DTR would prevent Arduino (RAMPS) from resetting upon connection"), "Printer"))
if sys.platform != "win32":
self._add(StringSetting("devicepath", "", _("Device Name Pattern:"), _("Custom device pattern: for example /dev/3DP_* "), "Printer"))
self._add(SpinSetting("bedtemp_abs", 110, 0, 400, _("Bed Temperature for ABS:"), _("Heated Build Platform temp for ABS (deg C)"), "Printer"), root.set_temp_preset)
self._add(SpinSetting("bedtemp_pla", 60, 0, 400, _("Bed Temperature for PLA:"), _("Heated Build Platform temp for PLA (deg C)"), "Printer"), root.set_temp_preset)
self._add(SpinSetting("temperature_abs", 230, 0, 400, _("Extruder Temperature for ABS:"), _("Extruder temp for ABS (deg C)"), "Printer"), root.set_temp_preset)
self._add(SpinSetting("temperature_pla", 185, 0, 400, _("Extruder Temperature for PLA:"), _("Extruder temp for PLA (deg C)"), "Printer"), root.set_temp_preset)
self._add(SpinSetting("xy_feedrate", 3000, 0, 50000, _("X && Y Manual Feedrate:"), _("Feedrate for Control Panel Moves in X and Y (mm/min)"), "Printer"))
self._add(SpinSetting("z_feedrate", 100, 0, 50000, _("Z Manual Feedrate:"), _("Feedrate for Control Panel Moves in Z (mm/min)"), "Printer"))
self._add(SpinSetting("e_feedrate", 100, 0, 1000, _("E Manual Feedrate:"), _("Feedrate for Control Panel Moves in Extrusions (mm/min)"), "Printer"))
defaultslicerpath = ""
if getattr(sys, 'frozen', False):
if sys.platform == "darwin":
defaultslicerpath = "/Applications/Slic3r.app/Contents/MacOS/"
elif sys.platform == "win32":
defaultslicerpath = ".\\slic3r\\"
self._add(DirSetting("slicecommandpath", defaultslicerpath, _("Path to Slicer:"), _("Path to slicer"), "External"))
slicer = 'slic3r-console' if sys.platform == 'win32' else 'slic3r'
self._add(StringSetting("slicecommand", slicer + ' $s --output $o', _("Slice Command:"), _("Slice command"), "External"))
self._add(StringSetting("sliceoptscommand", "slic3r", _("Slicer options Command:"), _("Slice settings command"), "External"))
self._add(StringSetting("start_command", "", _("Start Command:"), _("Executable to run when the print is started"), "External"))
self._add(StringSetting("final_command", "", _("Final Command:"), _("Executable to run when the print is finished"), "External"))
self._add(StringSetting("error_command", "", _("Error Command:"), _("Executable to run when an error occurs"), "External"))
self._add(DirSetting("log_path", str(Path.home()), _("Log Path:"), _("Path to the log file. An empty path will log to the console."), "UI"))
self._add(HiddenSetting("project_offset_x", 0.0))
self._add(HiddenSetting("project_offset_y", 0.0))
self._add(HiddenSetting("project_interval", 2.0))
self._add(HiddenSetting("project_pause", 2.5))
self._add(HiddenSetting("project_scale", 1.0))
self._add(HiddenSetting("project_x", 1024))
self._add(HiddenSetting("project_y", 768))
self._add(HiddenSetting("project_projected_x", 150.0))
self._add(HiddenSetting("project_direction", 0)) # 0: Top Down
self._add(HiddenSetting("project_overshoot", 3.0))
self._add(HiddenSetting("project_z_axis_rate", 200))
self._add(HiddenSetting("project_layer", 0.1))
self._add(HiddenSetting("project_prelift_gcode", ""))
self._add(HiddenSetting("project_postlift_gcode", ""))
self._add(HiddenSetting("pause_between_prints", True))
self._add(HiddenSetting("default_extrusion", 5.0))
self._add(HiddenSetting("last_extrusion", 5.0))
self._add(HiddenSetting("total_filament_used", 0.0))
self._add(HiddenSetting("spool_list", ""))
_settings = []
def __setattr__(self, name, value):
if name.startswith("_"):
return object.__setattr__(self, name, value)
if isinstance(value, Setting):
if not value.hidden:
self._settings.append(value)
object.__setattr__(self, "_" + name, value)
elif hasattr(self, "_" + name):
getattr(self, "_" + name).value = value
else:
setattr(self, name, StringSetting(name = name, default = value))
return None
def __getattr__(self, name):
if name.startswith("_"):
return object.__getattribute__(self, name)
return getattr(self, "_" + name).value
def _add(self, setting, callback = None,
alias = None, autocomplete_list = None):
setattr(self, setting.name, setting)
if callback:
setattr(self, "__" + setting.name + "_cb", callback)
if alias:
setattr(self, "__" + setting.name + "_alias", alias)
if autocomplete_list:
setattr(self, "__" + setting.name + "_list", autocomplete_list)
def _set(self, key, value):
try:
value = getattr(self, "__%s_alias" % key)()[value]
except KeyError:
pass
except AttributeError:
pass
setting = getattr(self, '_' + key)
setting.validate(value)
t = type(getattr(self, key))
if t == bool and value == "False":
value = False
setattr(self, key, t(value))
try:
cb = getattr(self, "__%s_cb" % key, None)
if cb is not None:
cb(key, value)
except:
logging.warning((_("Failed to run callback after setting '%s':") % key) +
"\n" + traceback.format_exc())
return value
def _tabcomplete(self, key):
try:
return getattr(self, "__%s_list" % key)()
except AttributeError:
pass
try:
return list(getattr(self, "__%s_alias" % key)().keys())
except AttributeError:
pass
return []
def _all_settings(self):
return self._settings
| 19,566 | Python | .py | 412 | 38.390777 | 171 | 0.60494 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,348 | gcodeplater.py | kliment_Printrun/printrun/gcodeplater.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
import sys
import os
import time
import types
import re
import math
import logging
from printrun import gcoder
from printrun.objectplater import make_plater, PlaterPanel
from printrun.gl.libtatlin import actors
import printrun.gui.viz # NOQA
from printrun import gcview
from .utils import install_locale, get_home_pos
install_locale('pronterface')
# Set up Internationalization using gettext
# searching for installed locales on /usr/share; uses relative folder if not found (windows)
def extrusion_only(gline):
return gline.e is not None \
and (gline.x, gline.y, gline.z) == (None, None, None)
# Custom method for gcoder.GCode to analyze & output gcode in a single call
def gcoder_write(self, f, line, store = False):
f.write(line)
self.append(line, store = store)
rewrite_exp = re.compile("(%s)" % "|".join(["X([-+]?[0-9]*\.?[0-9]*)",
"Y([-+]?[0-9]*\.?[0-9]*)"]))
def rewrite_gline(centeroffset, gline, cosr, sinr):
if gline.is_move and (gline.x is not None or gline.y is not None):
if gline.relative:
xc = yc = 0
cox = coy = 0
if gline.x is not None:
xc = gline.x
if gline.y is not None:
yc = gline.y
else:
xc = gline.current_x + centeroffset[0]
yc = gline.current_y + centeroffset[1]
cox = centeroffset[0]
coy = centeroffset[1]
new_x = "X%.04f" % (xc * cosr - yc * sinr - cox)
new_y = "Y%.04f" % (xc * sinr + yc * cosr - coy)
new = {"X": new_x, "Y": new_y}
new_line = rewrite_exp.sub(lambda ax: new[ax.group()[0]], gline.raw)
new_line = new_line.split(";")[0]
if gline.x is None:
new_line += " " + new_x
if gline.y is None:
new_line += " " + new_y
return new_line
return gline.raw
class GcodePlaterPanel(PlaterPanel):
load_wildcard = _("GCODE files") + " (*.gcode;*.GCODE;*.g)|*.gcode;*.gco;*.g"
save_wildcard = load_wildcard
def prepare_ui(self, filenames = [], callback = None,
parent = None, build_dimensions = None,
circular_platform = False,
antialias_samples = 0,
grid = (1, 10)):
super().prepare_ui(filenames, callback, parent, build_dimensions, cutting_tool = False)
viewer = gcview.GcodeViewPanel(self, build_dimensions = self.build_dimensions,
antialias_samples = antialias_samples)
self.set_viewer(viewer)
self.platform = actors.Platform(self.build_dimensions,
circular = circular_platform,
grid = grid)
self.platform_object = gcview.GCObject(self.platform)
self.Layout()
self.SetMinClientSize(self.topsizer.CalcMin())
self.SetTitle(_("G-Code Plate Builder"))
def get_objects(self):
return [self.platform_object] + list(self.models.values())
objects = property(get_objects)
def load_file(self, filename):
with open(filename, "r") as file:
gcode = gcoder.GCode(file, get_home_pos(self.build_dimensions))
model = actors.GcodeModel()
if gcode.filament_length > 0:
model.display_travels = False
generator = model.load_data(gcode)
generator_output = next(generator)
while generator_output is not None:
generator_output = next(generator)
obj = gcview.GCObject(model)
obj.offsets = [self.build_dimensions[3], self.build_dimensions[4], 0]
obj.gcode = gcode
obj.dims = [gcode.xmin, gcode.xmax,
gcode.ymin, gcode.ymax,
gcode.zmin, gcode.zmax]
obj.centeroffset = [-(obj.dims[1] + obj.dims[0]) / 2,
-(obj.dims[3] + obj.dims[2]) / 2,
0]
self.add_model(filename, obj)
wx.CallAfter(self.Refresh)
def done(self, event, cb):
if not os.path.exists("tempgcode"):
os.mkdir("tempgcode")
name = "tempgcode/" + str(int(time.time()) % 10000) + ".gcode"
self.export_to(name)
if cb is not None:
cb(name)
if self.destroy_on_done:
self.Destroy()
# What's hard in there ?
# 1) [x] finding the order in which the objects are printed
# 2) [x] handling layers correctly
# 3) [x] handling E correctly
# 4) [x] handling position shifts: should we either reset absolute 0 using
# G92 or should we rewrite all positions ? => we use G92s
# 5) [ ] handling the start & end gcode properly ?
# 6) [x] handling of current tool
# 7) [x] handling of Z moves for sequential printing (don't lower Z before
# reaching the next object print area)
# 8) [x] handling of absolute/relative status
# Initial implementation should just print the objects sequentially,
# but the end goal is to have a clean per-layer merge
def export_to(self, name):
return self.export_combined(name)
def export_combined(self, name):
models = list(self.models.values())
last_real_position = None
# Sort models by Z max to print smaller objects first
models.sort(key = lambda x: x.dims[-1])
alllayers = []
def add_offset(layer):
if layer.z is not None:
return layer.z + model.offsets[2]
return layer.z
for (model_i, model) in enumerate(models):
alllayers += [(add_offset(layer), model_i, layer_i)
for (layer_i, layer) in enumerate(model.gcode.all_layers) if add_offset(layer) is not None]
alllayers.sort()
laste = [0] * len(models)
lasttool = [0] * len(models)
lastrelative = [False] * len(models)
with open(name, "w") as f:
analyzer = gcoder.GCode(None, get_home_pos(self.build_dimensions))
analyzer.write = types.MethodType(lambda self, line: gcoder_write(self, f, line), analyzer)
for (layer_z, model_i, layer_i) in alllayers:
model = models[model_i]
layer = model.gcode.all_layers[layer_i]
r = math.radians(model.rot)
o = model.offsets
co = model.centeroffset
offset_pos = last_real_position if last_real_position is not None else (0, 0, 0)
analyzer.write("; %f %f %f\n" % offset_pos)
trans = (- (o[0] + co[0]),
- (o[1] + co[1]),
- (o[2] + co[2]))
trans_wpos = (offset_pos[0] + trans[0],
offset_pos[1] + trans[1],
offset_pos[2] + trans[2])
analyzer.write("; GCodePlater: Model %d Layer %d at Z = %s\n" % (model_i, layer_i, layer_z))
if lastrelative[model_i]:
analyzer.write("G91\n")
else:
analyzer.write("G90\n")
if analyzer.current_tool != lasttool[model_i]:
analyzer.write("T%d\n" % lasttool[model_i])
analyzer.write("G92 X%.5f Y%.5f Z%.5f\n" % trans_wpos)
analyzer.write("G92 E%.5f\n" % laste[model_i])
for l in layer:
if l.command != "G28" and (l.command != "G92" or extrusion_only(l)):
if r == 0:
analyzer.write(l.raw + "\n")
else:
analyzer.write(rewrite_gline(co, l, math.cos(r), math.sin(r)) + "\n")
# Find the current real position & E
last_real_position = analyzer.current_pos
laste[model_i] = analyzer.current_e
lastrelative[model_i] = analyzer.relative
lasttool[model_i] = analyzer.current_tool
logging.info(_("Exported merged G-Codes to %s") % name)
def export_sequential(self, name):
'''
Initial implementation of the gcode exporter,
which prints objects sequentially. No longer used.
'''
models = list(self.models.values())
last_real_position = None
# Sort models by Z max to print smaller objects first
models.sort(key = lambda x: x.dims[-1])
with open(name, "w") as f:
for model_i, model in enumerate(models):
r = math.radians(model.rot)
o = model.offsets
co = model.centeroffset
offset_pos = last_real_position if last_real_position is not None else (0, 0, 0)
trans = (- (o[0] + co[0]),
- (o[1] + co[1]),
- (o[2] + co[2]))
trans_wpos = (offset_pos[0] + trans[0],
offset_pos[1] + trans[1],
offset_pos[2] + trans[2])
f.write("; GCodePlater: Model %d\n" % model_i)
f.write("G90\n")
f.write("G92 X%.5f Y%.5f Z%.5f E0\n" % trans_wpos)
f.write("G1 X%.5f Y%.5f" % (-co[0], -co[1]))
for l in model.gcode:
if l.command != "G28" and (l.command != "G92" or extrusion_only(l)):
if r == 0:
f.write(l.raw + "\n")
else:
f.write(rewrite_gline(co, l, math.cos(r), math.sin(r)) + "\n")
# Find the current real position
for i in range(len(model.gcode) - 1, -1, -1):
gline = model.gcode.lines[i]
if gline.is_move:
last_real_position = (- trans[0] + gline.current_x,
- trans[1] + gline.current_y,
- trans[2] + gline.current_z)
break
logging.info(_("Exported merged G-Codes to %s") % name)
GcodePlater = make_plater(GcodePlaterPanel)
if __name__ == '__main__':
app = wx.App(False)
main = GcodePlater(filenames = sys.argv[1:])
for fn in main.filenames:
main.load_file(fn)
main.filenames = None
main.autoplate()
main.export_to("gcodeplate___test.gcode")
raise SystemExit
main.Show()
app.MainLoop()
| 11,165 | Python | .py | 240 | 34.6375 | 117 | 0.551734 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,349 | gcoder_line.pyx | kliment_Printrun/printrun/gcoder_line.pyx | #cython: language_level=3
#
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
from libc.stdlib cimport malloc, free
from libc.stdint cimport uint8_t, uint32_t
from libc.string cimport strlen, strncpy
cdef char* copy_string(object value):
value = value.encode('utf-8')
cdef char* orig = value
str_len = len(orig)
cdef char* array = <char *>malloc(str_len + 1)
strncpy(array, orig, str_len)
array[str_len] = 0;
return array
cdef enum BitPos:
pos_raw = 1 << 0
pos_command = 1 << 1
pos_is_move = 1 << 2
pos_x = 1 << 3
pos_y = 1 << 4
pos_z = 1 << 5
pos_e = 1 << 6
pos_f = 1 << 7
pos_i = 1 << 8
pos_j = 1 << 9
pos_relative = 1 << 10
pos_relative_e = 1 << 11
pos_extruding = 1 << 12
pos_current_x = 1 << 13
pos_current_y = 1 << 14
pos_current_z = 1 << 15
pos_current_tool = 1 << 16
pos_gcview_end_vertex = 1 << 17
# WARNING: don't use bits 24 to 31 as we store current_tool there
cdef inline uint32_t has_var(uint32_t status, uint32_t pos):
return status & pos
cdef inline uint32_t set_has_var(uint32_t status, uint32_t pos):
return status | pos
cdef inline uint32_t unset_has_var(uint32_t status, uint32_t pos):
return status & ~pos
cdef class GLine:
cdef char* _raw
cdef char* _command
cdef float _x, _y, _z, _e, _f, _i, _j
cdef float _current_x, _current_y, _current_z
cdef uint32_t _gcview_end_vertex
cdef uint32_t _status
__slots__ = ()
def __cinit__(self):
self._status = 0
self._raw = NULL
self._command = NULL
def __init__(self, line):
self.raw = line
def __dealloc__(self):
if self._raw != NULL: free(self._raw)
if self._command != NULL: free(self._command)
property x:
def __get__(self):
if has_var(self._status, pos_x): return self._x
else: return None
def __set__(self, value):
self._x = value
self._status = set_has_var(self._status, pos_x)
property y:
def __get__(self):
if has_var(self._status, pos_y): return self._y
else: return None
def __set__(self, value):
self._y = value
self._status = set_has_var(self._status, pos_y)
property z:
def __get__(self):
if has_var(self._status, pos_z): return self._z
else: return None
def __set__(self, value):
self._z = value
self._status = set_has_var(self._status, pos_z)
property e:
def __get__(self):
if has_var(self._status, pos_e): return self._e
else: return None
def __set__(self, value):
self._e = value
self._status = set_has_var(self._status, pos_e)
property f:
def __get__(self):
if has_var(self._status, pos_f): return self._f
else: return None
def __set__(self, value):
self._f = value
self._status = set_has_var(self._status, pos_f)
property i:
def __get__(self):
if has_var(self._status, pos_i): return self._i
else: return None
def __set__(self, value):
self._i = value
self._status = set_has_var(self._status, pos_i)
property j:
def __get__(self):
if has_var(self._status, pos_j): return self._j
else: return None
def __set__(self, value):
self._j = value
self._status = set_has_var(self._status, pos_j)
property is_move:
def __get__(self):
if has_var(self._status, pos_is_move): return True
else: return False
def __set__(self, value):
if value: self._status = set_has_var(self._status, pos_is_move)
else: self._status = unset_has_var(self._status, pos_is_move)
property relative:
def __get__(self):
if has_var(self._status, pos_relative): return True
else: return False
def __set__(self, value):
if value: self._status = set_has_var(self._status, pos_relative)
else: self._status = unset_has_var(self._status, pos_relative)
property relative_e:
def __get__(self):
if has_var(self._status, pos_relative_e): return True
else: return False
def __set__(self, value):
if value: self._status = set_has_var(self._status, pos_relative_e)
else: self._status = unset_has_var(self._status, pos_relative_e)
property extruding:
def __get__(self):
if has_var(self._status, pos_extruding): return True
else: return False
def __set__(self, value):
if value: self._status = set_has_var(self._status, pos_extruding)
else: self._status = unset_has_var(self._status, pos_extruding)
property current_x:
def __get__(self):
if has_var(self._status, pos_current_x): return self._current_x
else: return None
def __set__(self, value):
self._current_x = value
self._status = set_has_var(self._status, pos_current_x)
property current_y:
def __get__(self):
if has_var(self._status, pos_current_y): return self._current_y
else: return None
def __set__(self, value):
self._current_y = value
self._status = set_has_var(self._status, pos_current_y)
property current_z:
def __get__(self):
if has_var(self._status, pos_current_z): return self._current_z
else: return None
def __set__(self, value):
self._current_z = value
self._status = set_has_var(self._status, pos_current_z)
property current_tool:
def __get__(self):
if has_var(self._status, pos_current_tool): return self._status >> 24
else: return None
def __set__(self, value):
self._status = (self._status & ((1 << 24) - 1)) | (value << 24)
self._status = set_has_var(self._status, pos_current_tool)
property gcview_end_vertex:
def __get__(self):
if has_var(self._status, pos_gcview_end_vertex): return self._gcview_end_vertex
else: return None
def __set__(self, value):
self._gcview_end_vertex = value
self._status = set_has_var(self._status, pos_gcview_end_vertex)
property raw:
def __get__(self):
if has_var(self._status, pos_raw): return self._raw.decode('utf-8')
else: return None
def __set__(self, value):
# WARNING: memory leak could happen here, as we don't do the following :
# if self._raw != NULL: free(self._raw)
self._raw = copy_string(value)
self._status = set_has_var(self._status, pos_raw)
property command:
def __get__(self):
if has_var(self._status, pos_command): return self._command.decode('utf-8')
else: return None
def __set__(self, value):
# WARNING: memory leak could happen here, as we don't do the following :
# if self._command != NULL: free(self._command)
self._command = copy_string(value)
self._status = set_has_var(self._status, pos_command)
cdef class GLightLine:
cdef char* _raw
cdef char* _command
cdef uint8_t _status
__slots__ = ()
def __cinit__(self):
self._status = 0
self._raw = NULL
self._command = NULL
def __init__(self, line):
self.raw = line
def __dealloc__(self):
if self._raw != NULL: free(self._raw)
if self._command != NULL: free(self._command)
property raw:
def __get__(self):
if has_var(self._status, pos_raw): return self._raw.decode('utf-8')
else: return None
def __set__(self, value):
# WARNING: memory leak could happen here, as we don't do the following :
# if self._raw != NULL: free(self._raw)
self._raw = copy_string(value)
self._status = set_has_var(self._status, pos_raw)
property command:
def __get__(self):
if has_var(self._status, pos_command): return self._command.decode('utf-8')
else: return None
def __set__(self, value):
# WARNING: memory leak could happen here, as we don't do the following :
# if self._command != NULL: free(self._command)
self._command = copy_string(value)
self._status = set_has_var(self._status, pos_command)
property is_move:
def __get__(self):
if has_var(self._status, pos_is_move): return True
else: return False
def __set__(self, value):
if value: self._status = set_has_var(self._status, pos_is_move)
else: self._status = unset_has_var(self._status, pos_is_move)
| 9,791 | Python | .py | 239 | 32.317992 | 91 | 0.562867 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,350 | pronterface.py | kliment_Printrun/printrun/pronterface.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import os
import platform
import queue
import sys
import time
import threading
import traceback
import io as StringIO
import subprocess
import glob
import logging
import re
try: import simplejson as json
except ImportError: import json
from . import pronsole
from . import printcore
from printrun.spoolmanager import spoolmanager_gui
from .utils import install_locale, setup_logging, dosify, \
iconfile, configfile, format_time, format_duration, \
hexcolor_to_float, parse_temperature_report, \
prepare_command, check_rgb_color, check_rgba_color, compile_file, \
write_history_to, read_history_from
install_locale('pronterface')
try:
import wx
import wx.adv
if wx.VERSION < (4,):
raise ImportError()
except:
logging.error(_("WX >= 4 is not installed. This program requires WX >= 4 to run."))
raise
from .gui.widgets import SpecialButton, MacroEditor, PronterOptions, ButtonEdit, get_space
winsize = (800, 500)
layerindex = 0
if os.name == "nt":
winsize = (800, 530)
pronterface_quitting = False
class PronterfaceQuitException(Exception):
pass
from .gui import MainWindow
from .settings import wxSetting, HiddenSetting, StringSetting, SpinSetting, \
FloatSpinSetting, BooleanSetting, StaticTextSetting, ColorSetting, ComboSetting
from printrun import gcoder
from .pronsole import REPORT_NONE, REPORT_POS, REPORT_TEMP, REPORT_MANUAL
def format_length(mm, fractional=2):
if mm <= 10:
units = mm
suffix = 'mm'
elif mm < 1000:
units = mm / 10
suffix = 'cm'
else:
units = mm / 1000
suffix = 'm'
return '%%.%df' % fractional % units + suffix
class ConsoleOutputHandler:
"""Handle console output. All messages go through the logging submodule. We setup a logging handler to get logged messages and write them to both stdout (unless a log file path is specified, in which case we add another logging handler to write to this file) and the log panel.
We also redirect stdout and stderr to ourself to catch print messages and al."""
def __init__(self, target, log_path):
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
self.print_on_stdout = not log_path
if log_path:
setup_logging(self, log_path, reset_handlers = True)
else:
setup_logging(sys.stdout, reset_handlers = True)
self.target = target
def __del__(self):
sys.stdout = self.stdout
sys.stderr = self.stderr
def write(self, data):
try:
self.target(data)
except:
pass
if self.print_on_stdout:
self.stdout.write(data)
def flush(self):
if self.stdout:
self.stdout.flush()
class PronterWindow(MainWindow, pronsole.pronsole):
_fgcode = None
printer_progress_time = time.time()
def _get_fgcode(self):
return self._fgcode
def _set_fgcode(self, value):
self._fgcode = value
self.excluder = None
self.excluder_e = None
self.excluder_z_abs = None
self.excluder_z_rel = None
fgcode = property(_get_fgcode, _set_fgcode)
def _get_display_graph(self):
return self.settings.tempgraph
display_graph = property(_get_display_graph)
def _get_display_gauges(self):
return self.settings.tempgauges
display_gauges = property(_get_display_gauges)
def __init__(self, app, filename = None, size = winsize):
pronsole.pronsole.__init__(self)
self.app = app
self.window_ready = False
self.ui_ready = False
self._add_settings(size)
self.pauseScript = None # "pause.gcode"
self.endScript = None # "end.gcode"
self.filename = filename
self.capture_skip = {}
self.capture_skip_newline = False
self.fgcode = None
self.excluder = None
self.slicep = None
self.current_pos = [0, 0, 0]
self.paused = False
self.uploading = False
self.sentglines = queue.Queue(0)
self.cpbuttons = {
"motorsoff": SpecialButton(_("Motors off"), ("M84"), (250, 250, 250), _("Switch all motors off")),
"extrude": SpecialButton(_("Extrude"), ("pront_extrude"), (225, 200, 200), _("Advance extruder by set length")),
"reverse": SpecialButton(_("Reverse"), ("pront_reverse"), (225, 200, 200), _("Reverse extruder by set length")),
}
self.custombuttons = []
self.btndict = {}
self.filehistory = None
self.autoconnect = False
self.autoscrolldisable = False
self.parse_cmdline(sys.argv[1:])
for field in dir(self.settings):
if field.startswith("_gcview_color_"):
cleanname = field[1:]
color = hexcolor_to_float(getattr(self.settings, cleanname), 4)
setattr(self, cleanname, list(color))
# FIXME: We need to initialize the main window after loading the
# configs to restore the size, but this might have some unforeseen
# consequences.
# -- Okai, it seems it breaks things like update_gviz_params ><
os.putenv("UBUNTU_MENUPROXY", "0")
size = (self.settings.last_window_width, self.settings.last_window_height)
MainWindow.__init__(self, None, title = _("Pronterface"), size = size)
if self.settings.last_window_maximized:
self.Maximize()
self.SetIcon(wx.Icon(iconfile("pronterface.png"), wx.BITMAP_TYPE_PNG))
self.Bind(wx.EVT_SIZE, self.on_resize)
self.Bind(wx.EVT_MAXIMIZE, self.on_maximize)
self.window_ready = True
self.Bind(wx.EVT_CLOSE, self.closewin)
self.Bind(wx.EVT_CHAR_HOOK, self.on_key)
# set feedrates in printcore for pause/resume
self.p.xy_feedrate = self.settings.xy_feedrate
self.p.z_feedrate = self.settings.z_feedrate
self.panel.SetBackgroundColour(self.bgcolor)
customdict = {}
try:
exec(compile_file(configfile("custombtn.txt")), customdict)
if len(customdict["btns"]):
if not len(self.custombuttons):
try:
self.custombuttons = customdict["btns"]
for n in range(len(self.custombuttons)):
self.cbutton_save(n, self.custombuttons[n])
os.rename("custombtn.txt", "custombtn.old")
rco = open("custombtn.txt", "w")
rco.write(_("# I moved all your custom buttons into .pronsolerc.\n# Please don't add them here any more.\n# Backup of your old buttons is in custombtn.old\n"))
rco.close()
except IOError as x:
logging.error(str(x))
else:
logging.warning(_("Note!!! You have specified custom buttons in both custombtn.txt and .pronsolerc"))
logging.warning(_("Ignoring custombtn.txt. Remove all current buttons to revert to custombtn.txt"))
except:
pass
self.menustrip = wx.MenuBar()
self.reload_ui()
# disable all printer controls until we connect to a printer
self.gui_set_disconnected()
self.statusbar = self.CreateStatusBar()
self.statusbar.SetStatusText(_("Not connected to printer."))
self.t = ConsoleOutputHandler(self.catchprint, self.settings.log_path)
self.stdout = sys.stdout
self.slicing = False
self.loading_gcode = False
self.loading_gcode_message = ""
self.mini = False
self.p.sendcb = self.sentcb
self.p.preprintsendcb = self.preprintsendcb
self.p.printsendcb = self.printsentcb
self.p.startcb = self.startcb
self.p.endcb = self.endcb
self.cur_button = None
self.predisconnect_mainqueue = None
self.predisconnect_queueindex = None
self.predisconnect_layer = None
self.hsetpoint = 0.0
self.bsetpoint = 0.0
if self.autoconnect:
self.connect()
if self.filename is not None:
self.do_load(self.filename)
if self.settings.monitor:
self.update_monitor()
# --------------------------------------------------------------
# Main interface handling
# --------------------------------------------------------------
def reset_ui(self):
MainWindow.reset_ui(self)
self.custombuttons_widgets = []
def reload_ui(self, *args):
if not self.window_ready: return
temp_monitor = self.settings.monitor
self.settings.monitor = False
self.update_monitor()
self.Freeze()
# If UI is being recreated, delete current one
if self.ui_ready:
# Store log console content
logcontent = self.logbox.GetValue()
self.menustrip.SetMenus([])
if len(self.commandbox.history):
# save current command box history
if not os.path.exists(self.history_file):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
write_history_to(self.history_file, self.commandbox.history)
# Create a temporary panel to reparent widgets with state we want
# to retain across UI changes
temppanel = wx.Panel(self)
# TODO: add viz widgets to statefulControls
for control in self.statefulControls:
control.GetContainingSizer().Detach(control)
control.Reparent(temppanel)
self.panel.DestroyChildren()
self.gwindow.Destroy()
self.reset_ui()
# Create UI
self.create_menu()
self.update_recent_files("recentfiles", self.settings.recentfiles)
self.splitterwindow = None
if self.settings.uimode in (_("Tabbed"), _("Tabbed with platers")):
self.createTabbedGui()
else:
self.createGui(self.settings.uimode == _("Compact"),
self.settings.controlsmode == "Mini")
if self.splitterwindow:
self.splitterwindow.SetSashPosition(self.settings.last_sash_position)
def splitter_resize(event):
self.splitterwindow.UpdateSize()
self.splitterwindow.Bind(wx.EVT_SIZE, splitter_resize)
def sash_position_changed(event):
self.set("last_sash_position", self.splitterwindow.GetSashPosition())
self.splitterwindow.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, sash_position_changed)
# Set gcview parameters here as they don't get set when viewers are
# created
self.update_gcview_params()
# Finalize
if self.p.online:
self.gui_set_connected()
if self.ui_ready:
self.logbox.SetValue(logcontent)
temppanel.Destroy()
self.panel.Layout()
if self.fgcode:
self.start_viz_thread()
self.ui_ready = True
self.settings.monitor = temp_monitor
self.commandbox.history = read_history_from(self.history_file)
self.commandbox.histindex = len(self.commandbox.history)
self.Thaw()
if self.settings.monitor:
self.update_monitor()
def on_resize(self, event):
wx.CallAfter(self.on_resize_real)
event.Skip()
def on_resize_real(self):
maximized = self.IsMaximized()
self.set("last_window_maximized", maximized)
if not maximized and not self.IsIconized():
size = self.GetSize()
self.set("last_window_width", size[0])
self.set("last_window_height", size[1])
def on_maximize(self, event):
self.set("last_window_maximized", self.IsMaximized())
event.Skip()
def on_exit(self, event):
self.Close()
def on_settings_change(self, changed_settings):
if self.gviz:
self.gviz.on_settings_change(changed_settings)
def on_key(self, event):
if not isinstance(event.EventObject, (wx.TextCtrl, wx.ComboBox)) \
or event.HasModifiers():
ch = chr(event.KeyCode)
keys = {'B': self.btemp, 'H': self.htemp, 'J': self.xyb, 'S': self.commandbox,
'V': self.gviz}
widget = keys.get(ch)
# ignore Alt+(S, H), so it can open Settings, Help menu
if widget and (ch not in 'SH' or not event.AltDown()) \
and not (event.ControlDown() and ch == 'V'
and event.EventObject is self.commandbox):
widget.SetFocus()
return
# On MSWindows button mnemonics are processed only if the
# focus is in the parent panel
if event.AltDown() and ch < 'Z':
in_toolbar = self.toolbarsizer.GetItem(event.EventObject)
candidates = (self.connectbtn, self.connectbtn_cb_var), \
(self.pausebtn, self.pause), \
(self.printbtn, self.printfile)
for ctl, cb in candidates:
match = '&' + ch in ctl.Label.upper()
handled = in_toolbar and match
if handled:
break
# react to 'P' even for 'Restart', 'Resume'
# print('match', match, 'handled', handled, ctl.Label, ctl.Enabled)
if (match or ch == 'P' and ctl != self.connectbtn) and ctl.Enabled:
# print('call', ch, cb)
cb()
# react to only 1 of 'P' buttons, prefer Resume
return
event.Skip()
def closewin(self, e):
e.StopPropagation()
self.do_exit("force")
def kill(self, e=None):
if len(self.commandbox.history):
# save current command box history
history = self.history_file
if not os.path.exists(history):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
write_history_to(history, self.commandbox.history)
if self.p.printing or self.p.paused:
dlg = wx.MessageDialog(self, _("Print in progress ! Are you really sure you want to quit ?"), _("Exit"), wx.YES_NO | wx.ICON_WARNING)
if dlg.ShowModal() == wx.ID_NO:
return
pronsole.pronsole.kill(self)
global pronterface_quitting
pronterface_quitting = True
self.p.recvcb = None
self.p.disconnect()
if hasattr(self, "feedrates_changed"):
self.save_in_rc("set xy_feedrate", "set xy_feedrate %d" % self.settings.xy_feedrate)
self.save_in_rc("set z_feedrate", "set z_feedrate %d" % self.settings.z_feedrate)
self.save_in_rc("set e_feedrate", "set e_feedrate %d" % self.settings.e_feedrate)
if self.settings.last_extrusion != self.settings.default_extrusion:
self.save_in_rc("set last_extrusion", "set last_extrusion %d" % self.settings.last_extrusion)
if self.excluder:
self.excluder.close_window()
wx.CallAfter(self.gwindow.Destroy)
wx.CallAfter(self.Destroy)
@property
def bgcolor(self):
return (wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWFRAME)
if self.settings.bgcolor == 'auto'
else self.settings.bgcolor)
# --------------------------------------------------------------
# Main interface actions
# --------------------------------------------------------------
def do_monitor(self, l = ""):
if l.strip() == "":
self.set("monitor", not self.settings.monitor)
elif l.strip() == "off":
self.set("monitor", False)
else:
try:
self.monitor_interval = float(l)
self.set("monitor", self.monitor_interval > 0)
except:
self.log(_("Invalid period given."))
if self.settings.monitor:
self.log(_("Monitoring printer."))
else:
self.log(_("Done monitoring."))
def do_pront_extrude(self, l = ""):
if self.p.printing and not self.paused:
self.log(_("Please pause or stop print before extruding."))
return
feed = self.settings.e_feedrate
self.do_extrude_final(self.edist.GetValue(), feed)
def do_pront_reverse(self, l = ""):
if self.p.printing and not self.paused:
self.log(_("Please pause or stop print before reversing."))
return
feed = self.settings.e_feedrate
self.do_extrude_final(- self.edist.GetValue(), feed)
def do_settemp(self, l = ""):
try:
if not isinstance(l, str) or not len(l):
l = str(self.htemp.GetValue().split()[0])
l = l.lower().replace(", ", ".")
for i in self.temps.keys():
l = l.replace(i, self.temps[i])
f = float(l)
if f >= 0:
if self.p.online:
self.p.send_now("M104 S" + l)
self.log(_("Setting hotend temperature to %g degrees Celsius.") % f)
self.sethotendgui(f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the hotend off entirely, set its temperature to 0."))
except Exception as x:
self.logError(_("You must enter a temperature. (%s)") % (repr(x),))
def do_bedtemp(self, l = ""):
try:
if not isinstance(l, str) or not len(l):
l = str(self.btemp.GetValue().split()[0])
l = l.lower().replace(", ", ".")
for i in self.bedtemps.keys():
l = l.replace(i, self.bedtemps[i])
f = float(l)
if f >= 0:
if self.p.online:
self.p.send_now("M140 S" + l)
self.log(_("Setting bed temperature to %g degrees Celsius.") % f)
self.setbedgui(f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the bed off entirely, set its temperature to 0."))
except Exception as x:
self.logError(_("You must enter a temperature. (%s)") % (repr(x),))
def do_setspeed(self, l = ""):
try:
if not isinstance(l, str) or not len(l):
l = str(self.speed_slider.GetValue())
else:
l = l.lower()
speed = int(l)
if self.p.online:
self.p.send_now("M220 S" + l)
self.log(_("Setting print speed factor to %d%%.") % speed)
else:
self.logError(_("Printer is not online."))
except Exception as x:
self.logError(_("You must enter a speed. (%s)") % (repr(x),))
def do_setflow(self, l = ""):
try:
if not isinstance(l, str) or not len(l):
l = str(self.flow_slider.GetValue())
else:
l = l.lower()
flow = int(l)
if self.p.online:
self.p.send_now("M221 S" + l)
self.log(_("Setting print flow factor to %d%%.") % flow)
else:
self.logError(_("Printer is not online."))
except Exception as x:
self.logError(_("You must enter a flow. (%s)") % (repr(x),))
def setbedgui(self, f):
self.bsetpoint = f
if self.display_gauges: self.bedtgauge.SetTarget(int(f))
if self.display_graph: wx.CallAfter(self.graph.SetBedTargetTemperature, int(f))
if f > 0:
wx.CallAfter(self.btemp.SetValue, str(f))
self.set("last_bed_temperature", str(f))
wx.CallAfter(self.setboff.SetBackgroundColour, None)
wx.CallAfter(self.setboff.SetForegroundColour, None)
wx.CallAfter(self.setbbtn.SetBackgroundColour, "#FFAA66")
wx.CallAfter(self.setbbtn.SetForegroundColour, "#660000")
wx.CallAfter(self.btemp.SetBackgroundColour, "#FFDABB")
else:
wx.CallAfter(self.setboff.SetBackgroundColour, "#0044CC")
wx.CallAfter(self.setboff.SetForegroundColour, "white")
wx.CallAfter(self.setbbtn.SetBackgroundColour, None)
wx.CallAfter(self.setbbtn.SetForegroundColour, None)
wx.CallAfter(self.btemp.SetBackgroundColour, "white")
wx.CallAfter(self.btemp.Refresh)
def sethotendgui(self, f):
self.hsetpoint = f
if self.display_gauges: self.hottgauge.SetTarget(int(f))
if self.display_graph: wx.CallAfter(self.graph.SetExtruder0TargetTemperature, int(f))
if f > 0:
wx.CallAfter(self.htemp.SetValue, str(f))
self.set("last_temperature", str(f))
wx.CallAfter(self.settoff.SetBackgroundColour, None)
wx.CallAfter(self.settoff.SetForegroundColour, None)
wx.CallAfter(self.settbtn.SetBackgroundColour, "#FFAA66")
wx.CallAfter(self.settbtn.SetForegroundColour, "#660000")
wx.CallAfter(self.htemp.SetBackgroundColour, "#FFDABB")
else:
wx.CallAfter(self.settoff.SetBackgroundColour, "#0044CC")
wx.CallAfter(self.settoff.SetForegroundColour, "white")
wx.CallAfter(self.settbtn.SetBackgroundColour, None)
wx.CallAfter(self.settbtn.SetForegroundColour, None)
wx.CallAfter(self.htemp.SetBackgroundColour, "white")
wx.CallAfter(self.htemp.Refresh)
def rescanports(self, event = None):
scanned = self.scanserial()
portslist = list(scanned)
if self.settings.port != "" and self.settings.port not in portslist:
portslist.append(self.settings.port)
self.serialport.Clear()
self.serialport.AppendItems(portslist)
if os.path.exists(self.settings.port) or self.settings.port in scanned:
self.serialport.SetValue(self.settings.port)
elif portslist:
self.serialport.SetValue(portslist[0])
def appendCommandHistory(self):
cmd = self.commandbox.Value
hist = self.commandbox.history
append = cmd and (not hist or hist[-1] != cmd)
if append:
self.commandbox.history.append(cmd)
return append
def cbkey(self, e):
dir = {wx.WXK_UP: -1, wx.WXK_DOWN: 1}.get(e.KeyCode)
if dir:
if self.commandbox.histindex == len(self.commandbox.history):
if dir == 1:
# do not cycle top => bottom
return
# save unsent command before going back
self.appendCommandHistory()
self.commandbox.histindex = max(0, min(self.commandbox.histindex + dir, len(self.commandbox.history)))
self.commandbox.Value = (self.commandbox.history[self.commandbox.histindex]
if self.commandbox.histindex < len(self.commandbox.history)
else '')
self.commandbox.SetInsertionPointEnd()
else:
e.Skip()
def plate(self, e):
from . import stlplater as plater
self.log(_("Plate function activated"))
plater.StlPlater(size = (800, 580), callback = self.platecb,
parent = self,
build_dimensions = self.build_dimensions_list,
circular_platform = self.settings.circular_bed,
simarrange_path = self.settings.simarrange_path,
antialias_samples = int(self.settings.antialias3dsamples)).Show()
def plate_gcode(self, e):
from . import gcodeplater as plater
self.log(_("G-Code plate function activated"))
plater.GcodePlater(size = (800, 580), callback = self.platecb,
parent = self,
build_dimensions = self.build_dimensions_list,
circular_platform = self.settings.circular_bed,
antialias_samples = int(self.settings.antialias3dsamples)).Show()
def platecb(self, name):
self.log(_("Plated %s") % name)
self.loadfile(None, name)
if self.settings.uimode in (_("Tabbed"), _("Tabbed with platers")):
# Switch to page 1 (Status tab)
self.notebook.SetSelection(1)
def do_editgcode(self, e = None):
if self.filename is not None:
MacroEditor(self.filename, [line.raw for line in self.fgcode], self.doneediting, True)
def doneediting(self, gcode):
open(self.filename, "w").write("\n".join(gcode))
wx.CallAfter(self.loadfile, None, self.filename)
def sdmenu(self, e):
obj = e.GetEventObject()
popupmenu = wx.Menu()
item = popupmenu.Append(-1, _("SD Upload"))
if not self.fgcode:
item.Enable(False)
self.Bind(wx.EVT_MENU, self.upload, id = item.GetId())
item = popupmenu.Append(-1, _("SD Print"))
self.Bind(wx.EVT_MENU, self.sdprintfile, id = item.GetId())
self.panel.PopupMenu(popupmenu, obj.GetPosition())
def htemp_change(self, event):
if self.hsetpoint > 0:
self.do_settemp("")
wx.CallAfter(self.htemp.SetInsertionPoint, 0)
def btemp_change(self, event):
if self.bsetpoint > 0:
self.do_bedtemp("")
wx.CallAfter(self.btemp.SetInsertionPoint, 0)
def tool_change(self, event):
self.do_tool(self.extrudersel.GetValue())
def show_viz_window(self, event):
if self.fgcode:
self.gwindow.Show(True)
self.gwindow.SetToolTip(wx.ToolTip("Mousewheel zooms the display\nShift / Mousewheel scrolls layers"))
self.gwindow.Raise()
def setfeeds(self, e):
self.feedrates_changed = True
try:
if self.efeedc is not None:
self.settings._set("e_feedrate", self.efeedc.GetValue())
except:
pass
try:
self.settings._set("z_feedrate", self.zfeedc.GetValue())
except:
pass
try:
self.settings._set("xy_feedrate", self.xyfeedc.GetValue())
except:
pass
try:
self.settings._set("last_extrusion", self.edist.GetValue())
except:
pass
def homeButtonClicked(self, axis):
# When user clicks on the XY control, the Z control no longer gets spacebar/repeat signals
self.zb.clearRepeat()
if axis == "x":
self.onecmd('home X')
elif axis == "y": # upper-right
self.onecmd('home Y')
elif axis == "z":
self.onecmd('home Z')
elif axis == "all":
self.onecmd('home')
elif axis == "center":
center_x = self.build_dimensions_list[0] / 2 + self.build_dimensions_list[3]
center_y = self.build_dimensions_list[1] / 2 + self.build_dimensions_list[4]
feed = self.settings.xy_feedrate
self.onecmd('G0 X%s Y%s F%s' % (center_x, center_y, feed))
else:
return
self.p.send_now('M114')
def clamped_move_message(self):
self.log(_("Manual move outside of the build volume prevented (see the \"Clamp manual moves\" option)."))
def moveXY(self, x, y):
# When user clicks on the XY control, the Z control no longer gets spacebar/repeat signals
self.zb.clearRepeat()
if x != 0:
if self.settings.clamp_jogging:
new_x = self.current_pos[0] + x
if new_x < self.build_dimensions_list[3] or new_x > self.build_dimensions_list[0] + self.build_dimensions_list[3]:
self.clamped_move_message()
return
self.onecmd('move X %s' % x)
elif y != 0:
if self.settings.clamp_jogging:
new_y = self.current_pos[1] + y
if new_y < self.build_dimensions_list[4] or new_y > self.build_dimensions_list[1] + self.build_dimensions_list[4]:
self.clamped_move_message()
return
self.onecmd('move Y %s' % y)
else:
return
self.p.send_now('M114')
def moveZ(self, z):
if z != 0:
if self.settings.clamp_jogging:
new_z = self.current_pos[2] + z
if new_z < self.build_dimensions_list[5] or new_z > self.build_dimensions_list[2] + self.build_dimensions_list[5]:
self.clamped_move_message()
return
self.onecmd('move Z %s' % z)
self.p.send_now('M114')
# When user clicks on the Z control, the XY control no longer gets spacebar/repeat signals
self.xyb.clearRepeat()
def spacebarAction(self):
self.zb.repeatLast()
self.xyb.repeatLast()
# --------------------------------------------------------------
# Console handling
# --------------------------------------------------------------
def catchprint(self, l):
"""Called by the Tee operator to write to the log box"""
if not self.IsFrozen():
wx.CallAfter(self.addtexttolog, l)
def addtexttolog(self, text):
try:
max_length = 20000
current_length = self.logbox.GetLastPosition()
if current_length > max_length:
self.logbox.Remove(0, current_length // 10)
currentCaretPosition = self.logbox.GetInsertionPoint()
currentLengthOfText = self.logbox.GetLastPosition()
if self.autoscrolldisable:
self.logbox.Freeze()
currentSelectionStart, currentSelectionEnd = self.logbox.GetSelection()
self.logbox.SetInsertionPointEnd()
self.logbox.AppendText(text)
self.logbox.SetInsertionPoint(currentCaretPosition)
self.logbox.SetSelection(currentSelectionStart, currentSelectionEnd)
self.logbox.Thaw()
else:
self.logbox.SetInsertionPointEnd()
self.logbox.AppendText(text)
except UnicodeError:
self.log(_("Attempted to write invalid text to console, which could be due to an invalid baudrate"))
except Exception as e:
self.log(_("Unhanded exception: "), repr(e))
def clear_log(self, e):
self.logbox.Clear()
def set_verbose_communications(self, e):
self.p.loud = e.IsChecked()
def set_autoscrolldisable(self, e):
self.autoscrolldisable = e.IsChecked()
def sendline(self, e):
command = self.commandbox.Value
if not len(command):
return
logging.info(">>> " + command)
line = self.precmd(str(command))
self.onecmd(line)
self.appendCommandHistory()
self.commandbox.histindex = len(self.commandbox.history)
self.commandbox.Value = ''
# --------------------------------------------------------------
# Main menu handling & actions
# --------------------------------------------------------------
def create_menu(self):
"""Create main menu"""
# File menu
m = wx.Menu()
self.Bind(wx.EVT_MENU, self.loadfile, m.Append(-1, _("&Open...")+"\tCtrl+O", _(" Open file")))
self.savebtn = m.Append(-1, _("&Save..."), _(" Save file"))
self.savebtn.Enable(False)
self.Bind(wx.EVT_MENU, self.savefile, self.savebtn)
self.filehistory = wx.FileHistory(maxFiles = 8, idBase = wx.ID_FILE1)
recent = wx.Menu()
self.filehistory.UseMenu(recent)
self.Bind(wx.EVT_MENU_RANGE, self.load_recent_file,
id = wx.ID_FILE1, id2 = wx.ID_FILE9)
m.Append(wx.ID_ANY, _("&Recent Files"), recent)
self.Bind(wx.EVT_MENU, self.clear_log, m.Append(-1, _("Clear console")+"\tCtrl+L", _(" Clear output console")))
self.Bind(wx.EVT_MENU, self.on_exit, m.Append(wx.ID_EXIT, _("E&xit"), _(" Closes the Window")))
self.menustrip.Append(m, _("&File"))
# Tools Menu
m = wx.Menu()
self.Bind(wx.EVT_MENU, self.do_editgcode, m.Append(-1, _("&Edit..."), _(" Edit open file")))
self.Bind(wx.EVT_MENU, self.plate, m.Append(-1, _("Plater"), _(" Compose 3D models into a single plate")))
self.Bind(wx.EVT_MENU, self.plate_gcode, m.Append(-1, _("G-Code Plater"), _(" Compose G-Codes into a single plate")))
self.Bind(wx.EVT_MENU, self.exclude, m.Append(-1, _("Excluder"), _(" Exclude parts of the bed from being printed")))
self.Bind(wx.EVT_MENU, self.project, m.Append(-1, _("Projector"), _(" Project slices")))
self.Bind(wx.EVT_MENU,
self.show_spool_manager,
m.Append(-1, _("Spool Manager"),
_(" Manage different spools of filament")))
self.menustrip.Append(m, _("&Tools"))
# Advanced Menu
m = wx.Menu()
self.recoverbtn = m.Append(-1, _("Recover"), _(" Recover previous print after a disconnect (homes X, Y, restores Z and E status)"))
self.recoverbtn.Disable = lambda *a: self.recoverbtn.Enable(False)
self.Bind(wx.EVT_MENU, self.recover, self.recoverbtn)
self.menustrip.Append(m, _("&Advanced"))
if self.settings.slic3rintegration:
m = wx.Menu()
print_menu = wx.Menu()
filament_menu = wx.Menu()
printer_menu = wx.Menu()
m.AppendSubMenu(print_menu, _("Print &settings"))
m.AppendSubMenu(filament_menu, _("&Filament"))
m.AppendSubMenu(printer_menu, _("&Printer"))
menus = {"print": print_menu,
"filament": filament_menu,
"printer": printer_menu}
try:
self.load_slic3r_configs(menus)
self.menustrip.Append(m, _("&Slic3r"))
except IOError:
self.logError(_("Failed to load Slic3r configuration:") +
"\n" + traceback.format_exc())
# Settings menu
m = wx.Menu()
self.macros_menu = wx.Menu()
m.AppendSubMenu(self.macros_menu, _("&Macros"))
self.Bind(wx.EVT_MENU, self.new_macro, self.macros_menu.Append(-1, _("<&New...>")))
self.Bind(wx.EVT_MENU, lambda *e: PronterOptions(self), m.Append(-1, _("&Options"), _(" Options dialog")))
self.Bind(wx.EVT_MENU, lambda x: threading.Thread(target = lambda: self.do_slice("set")).start(), m.Append(-1, _("Slicing settings"), _(" Adjust slicing settings")))
mItem = m.AppendCheckItem(-1, _("Debug communications"),
_("Print all G-code sent to and received from the printer."))
m.Check(mItem.GetId(), self.p.loud)
self.Bind(wx.EVT_MENU, self.set_verbose_communications, mItem)
mItem = m.AppendCheckItem(-1, _("Don't autoscroll"),
_("Disables automatic scrolling of the console when new text is added"))
m.Check(mItem.GetId(), self.autoscrolldisable)
self.Bind(wx.EVT_MENU, self.set_autoscrolldisable, mItem)
self.menustrip.Append(m, _("&Settings"))
self.update_macros_menu()
self.SetMenuBar(self.menustrip)
m = wx.Menu()
self.Bind(wx.EVT_MENU, self.about,
m.Append(-1, _("&About Printrun"), _("Show about dialog")))
self.menustrip.Append(m, _("&Help"))
def project(self, event):
"""Start Projector tool"""
from printrun import projectlayer
projectlayer.SettingsFrame(self, self.p).Show()
def exclude(self, event):
"""Start part excluder tool"""
if not self.fgcode:
wx.CallAfter(self.statusbar.SetStatusText, _("No file loaded. Please use load first."))
return
if not self.excluder:
from .excluder import Excluder
self.excluder = Excluder()
self.excluder.pop_window(self.fgcode, bgcolor = self.bgcolor,
build_dimensions = self.build_dimensions_list)
def show_spool_manager(self, event):
"""Show Spool Manager Window"""
spoolmanager_gui.SpoolManagerMainWindow(self, self.spool_manager).Show()
def about(self, event):
"""Show about dialog"""
info = wx.adv.AboutDialogInfo()
info.SetIcon(wx.Icon(iconfile("pronterface.png"), wx.BITMAP_TYPE_PNG))
info.SetName('Printrun')
info.SetVersion(printcore.__version__)
description = _("Printrun is a pure Python 3D printing"
" (and other types of CNC) host software.")
description += "\n\n" + \
_("%.02fmm of filament have been extruded during prints") \
% self.settings.total_filament_used
info.SetDescription(description)
info.SetCopyright('(C) 2011 - 2024')
info.SetWebSite('https://github.com/kliment/Printrun')
licence = """\
Printrun is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Printrun is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Printrun. If not, see <http://www.gnu.org/licenses/>."""
info.SetLicence(licence)
info.AddDeveloper('Kliment Yanev @kliment (code)')
info.AddDeveloper('Guillaume Seguin @iXce (code)')
info.AddDeveloper('@DivingDuck (code)')
info.AddDeveloper('@volconst (code)')
info.AddDeveloper('Rock Storm @rockstorm101 (code, packaging)')
info.AddDeveloper('Miro Hrončok @hroncok (code, packaging)')
info.AddDeveloper('Rob Gilson @D1plo1d (code)')
info.AddDeveloper('Gary Hodgson @garyhodgson (code)')
info.AddDeveloper('Neofelis @neofelis2X (code)')
info.AddDeveloper('Duane Johnson (code,graphics)')
info.AddDeveloper('Alessandro Ranellucci @alranel (code)')
info.AddDeveloper('Travis Howse @tjhowse (code)')
info.AddDeveloper('edef (code)')
info.AddDeveloper('Steven Devijver (code)')
info.AddDeveloper('Christopher Keller (code)')
info.AddDeveloper('Igor Yeremin (code)')
info.AddDeveloper('Jeremy Hammett @jezmy (code)')
info.AddDeveloper('Spencer Bliven (code)')
info.AddDeveloper('Václav \'ax\' Hůla @AxTheB (code)')
info.AddDeveloper('Félix Sipma (code)')
info.AddDeveloper('Maja M. @SparkyCola (code)')
info.AddDeveloper('Francesco Santini @fsantini (code)')
info.AddDeveloper('Cristopher Olah @colah (code)')
info.AddDeveloper('Jeremy Kajikawa (code)')
info.AddDeveloper('Markus Hitter (code)')
info.AddDeveloper('SkateBoss (code)')
info.AddDeveloper('Kaz Walker (code)')
info.AddDeveloper('Brendan Erwin (documentation)')
info.AddDeveloper('Elias (code)')
info.AddDeveloper('Jordan Miller (code)')
info.AddDeveloper('Mikko Sivulainen (code)')
info.AddDeveloper('Clarence Risher (code)')
info.AddDeveloper('Guillaume Revaillot (code)')
info.AddDeveloper('John Tapsell (code)')
info.AddDeveloper('Youness Alaoui (code)')
info.AddDeveloper('@eldir (code)')
info.AddDeveloper('@hg42 (code)')
info.AddDeveloper('@jglauche (code, documentation)')
info.AddDeveloper('Ahmet Cem TURAN @ahmetcemturan (icons, code)')
info.AddDeveloper('Andrew Dalgleish (code)')
info.AddDeveloper('Benny (documentation)')
info.AddDeveloper('Chillance (code)')
info.AddDeveloper('Ilya Novoselov (code)')
info.AddDeveloper('Joeri Hendriks (code)')
info.AddDeveloper('Kevin Cole (code)')
info.AddDeveloper('pinaise (code)')
info.AddDeveloper('Dratone (code)')
info.AddDeveloper('ERoth3 (code)')
info.AddDeveloper('Erik Zalm (code)')
info.AddDeveloper('Felipe Corrêa da Silva Sanches (code)')
info.AddDeveloper('Geordie Bilkey (code)')
info.AddDeveloper('Ken Aaker (code)')
info.AddDeveloper('Lawrence (documentation)')
info.AddDeveloper('Loxgen (code)')
info.AddDeveloper('Matthias Urlichs (code)')
info.AddDeveloper('N Oliver (code)')
info.AddDeveloper('@nexus511 (code)')
info.AddDeveloper('Sergey Shepelev (code)')
info.AddDeveloper('Simon Maillard (code)')
info.AddDeveloper('Vanessa Dannenberg (code)')
info.AddDeveloper('@beardface (code)')
info.AddDeveloper('@hurzl (code)')
info.AddDeveloper('Justin Hawkins @beardface (code)')
info.AddDeveloper('siorai (documentation)')
info.AddDeveloper('tobbelobb (code)')
info.AddDeveloper('5ilver (packaging)')
info.AddDeveloper('Alexander Hiam (code)')
info.AddDeveloper('Alexander Zangerl (code)')
info.AddDeveloper('Cameron Currie (code)')
info.AddDeveloper('Chris DeLuca (documentation)')
info.AddDeveloper('Colin Gilgenbach (code)')
info.AddDeveloper('DanLipsitt (code)')
info.AddDeveloper('Daniel Holth (code)')
info.AddDeveloper('Denis B (code)')
info.AddDeveloper('Erik Jonsson (code)')
info.AddDeveloper('Felipe Acebes (code)')
info.AddDeveloper('Florian Gilcher (code)')
info.AddDeveloper('Henrik Brix Andersen (code)')
info.AddDeveloper('Jan Wildeboer (documentation)')
info.AddDeveloper('Javier Rios (code)')
info.AddDeveloper('Jay Proulx (code)')
info.AddDeveloper('Jim Morris (code)')
info.AddDeveloper('Kyle Evans (code)')
info.AddDeveloper('Lenbok (code)')
info.AddDeveloper('Lukas Erlacher (code)')
info.AddDeveloper('Michael Andresen @blddk (code)')
info.AddDeveloper('NeoTheFox (code)')
info.AddDeveloper('Nicolas Dandrimont (documentation)')
info.AddDeveloper('OhmEye (code)')
info.AddDeveloper('OliverEngineer (code)')
info.AddDeveloper('Paul Telfort (code)')
info.AddDeveloper('Sebastian \'Swift Geek\' Grzywna (code)')
info.AddDeveloper('Senthil (documentation)')
info.AddDeveloper('Sigma-One (code)')
info.AddDeveloper('Spacexula (documentation)')
info.AddDeveloper('Stefan Glatzel (code)')
info.AddDeveloper('Stefanowicz (code)')
info.AddDeveloper('Steven (code)')
info.AddDeveloper('Tyler Hovanec (documentation)')
info.AddDeveloper('Xabi Xab (code)')
info.AddDeveloper('Xoan Sampaiño (code)')
info.AddDeveloper('Yuri D\'Elia (code)')
info.AddDeveloper('drf5n (code)')
info.AddDeveloper('evilB (documentation)')
info.AddDeveloper('fieldOfView (code)')
info.AddDeveloper('jbh (code)')
info.AddDeveloper('kludgineer (code)')
info.AddDeveloper('l4nce0 (code)')
info.AddDeveloper('palob (code)')
info.AddDeveloper('russ (code)')
info.AddArtist('Ahmet Cem TURAN @ahmetcemturan (icons, code)')
info.AddArtist('Duane Johnson (code,graphics)')
info.AddTranslator('freddii (German translation)')
info.AddTranslator('Christian Metzen @metzench (German translation)')
info.AddTranslator('Cyril Laguilhon-Debat (French translation)')
info.AddTranslator('@AvagSayan (Armenian translation)')
info.AddTranslator('Jonathan Marsden (French translation)')
info.AddTranslator('Ruben Lubbes (NL translation)')
info.AddTranslator('aboobed (Arabic translation)')
info.AddTranslator('Alessandro Ranellucci @alranel (Italian translation)')
wx.adv.AboutBox(info)
# --------------------------------------------------------------
# Settings & command line handling (including update callbacks)
# --------------------------------------------------------------
def _add_settings(self, size):
self.settings._add(BooleanSetting("monitor", True, _("Monitor printer status"), _("Regularly monitor printer temperatures (required to have functional temperature graph or gauges)"), "Printer"), self.update_monitor)
self.settings._add(StringSetting("simarrange_path", "", _("Simarrange command"), _("Path to the simarrange binary to use in the STL plater"), "External"))
self.settings._add(BooleanSetting("circular_bed", False, _("Circular build platform"), _("Draw a circular (or oval) build platform instead of a rectangular one"), "Printer"), self.update_bed_viz)
self.settings._add(SpinSetting("extruders", 0, 1, 5, _("Extruders count"), _("Number of extruders"), "Printer"))
self.settings._add(BooleanSetting("clamp_jogging", False, _("Clamp manual moves"), _("Prevent manual moves from leaving the specified build dimensions"), "Printer"))
self.settings._add(BooleanSetting("display_progress_on_printer", False, _("Display progress on printer"), _("Show progress on printers display (sent via M117, might not be supported by all printers)"), "Printer"))
self.settings._add(SpinSetting("printer_progress_update_interval", 10., 0, 120, _("Printer progress update interval"), _("Interval in which pronterface sends the progress to the printer if enabled, in seconds"), "Printer"))
self.settings._add(BooleanSetting("cutting_as_extrusion", True, _("Display cutting moves"), _("Show moves where spindle is active as printing moves"), "Printer"))
self.settings._add(ComboSetting("uimode", _("Standard"), [_("Standard"), _("Compact"), ], _("Interface mode"), _("Standard interface is a one-page, three columns layout with controls/visualization/log\nCompact mode is a one-page, two columns layout with controls + log/visualization"), "UI"), self.reload_ui)
# self.settings._add(ComboSetting("uimode", _("Standard"), [_("Standard"), _("Compact"), _("Tabbed"), _("Tabbed with platers")], _("Interface mode"), _("Standard interface is a one-page, three columns layout with controls/visualization/log\nCompact mode is a one-page, two columns layout with controls + log/visualization"), "UI"), self.reload_ui)
self.settings._add(ComboSetting("controlsmode", _("Standard"), (_("Standard"), _("Mini"), ), _("Controls mode"), _("Standard controls include all controls needed for printer setup and calibration, while Mini controls are limited to the ones needed for daily printing"), "UI"), self.reload_ui)
self.settings._add(BooleanSetting("slic3rintegration", False, _("Enable Slic3r integration"), _("Add a menu to select Slic3r profiles directly from Pronterface"), "UI"), self.reload_ui)
self.settings._add(BooleanSetting("slic3rupdate", False, _("Update Slic3r default presets"), _("When selecting a profile in Slic3r integration menu, also save it as the default Slic3r preset"), "UI"))
self.settings._add(ComboSetting("mainviz", "3D", ("2D", "3D", _("None")), _("Main visualization"), _("Select visualization for main window."), "Viewer", 4*get_space('settings')), self.reload_ui)
self.settings._add(BooleanSetting("viz3d", False, _("Use 3D in GCode viewer window"), _("Use 3D mode instead of 2D layered mode in the visualization window"), "Viewer"), self.reload_ui)
self.settings._add(StaticTextSetting("separator_3d_viewer", _("3D viewer options"), "", group = "Viewer"))
self.settings._add(BooleanSetting("light3d", False, _("Use a lighter 3D visualization"), _("Use a lighter visualization with simple lines instead of extruded paths for 3D viewer"), "Viewer"), self.reload_ui)
self.settings._add(BooleanSetting("perspective", False, _("Use a perspective view instead of orthographic"), _("A perspective view looks more realistic, but is a bit more confusing to navigate"), "Viewer"), self.reload_ui)
self.settings._add(ComboSetting("antialias3dsamples", "0", ("0", "2", "4", "8"), _("Number of anti-aliasing samples"), _("Amount of anti-aliasing samples used in the 3D viewer"), "Viewer", 4*get_space('settings')), self.reload_ui)
self.settings._add(BooleanSetting("trackcurrentlayer3d", False, _("Track current layer in main 3D view"), _("Track the currently printing layer in the main 3D visualization"), "Viewer"))
self.settings._add(FloatSpinSetting("gcview_path_width", 0.4, 0.01, 2, _("Extrusion width for 3D viewer"), _("Width of printed path in 3D viewer"), "Viewer", increment = 0.05), self.update_gcview_params)
self.settings._add(FloatSpinSetting("gcview_path_height", 0.3, 0.01, 2, _("Layer height for 3D viewer"), _("Height of printed path in 3D viewer"), "Viewer", increment = 0.05), self.update_gcview_params)
self.settings._add(BooleanSetting("tempgraph", True, _("Display temperature graph"), _("Display time-lapse temperature graph"), "UI"), self.reload_ui)
self.settings._add(BooleanSetting("tempgauges", False, _("Display temperature gauges"), _("Display graphical gauges for temperatures visualization"), "UI"), self.reload_ui)
self.settings._add(BooleanSetting("lockbox", False, _("Display interface lock checkbox"), _("Display a checkbox that, when check, locks most of Pronterface"), "UI"), self.reload_ui)
self.settings._add(BooleanSetting("lockonstart", False, _("Lock interface upon print start"), _("If lock checkbox is enabled, lock the interface when starting a print"), "UI"))
self.settings._add(BooleanSetting("refreshwhenloading", True, _("Update UI during G-Code load"), _("Regularly update visualization during the load of a G-Code file"), "UI"))
self.settings._add(HiddenSetting("last_window_width", size[0]))
self.settings._add(HiddenSetting("last_window_height", size[1]))
self.settings._add(HiddenSetting("last_window_maximized", False))
self.settings._add(HiddenSetting("last_sash_position", -1))
self.settings._add(HiddenSetting("last_bed_temperature", 0.0))
self.settings._add(HiddenSetting("last_file_path", ""))
self.settings._add(HiddenSetting("last_file_filter", 0))
self.settings._add(HiddenSetting("last_temperature", 0.0))
self.settings._add(StaticTextSetting("separator_2d_viewer", _("2D viewer options"), "", group = "Viewer"))
self.settings._add(FloatSpinSetting("preview_extrusion_width", 0.5, 0, 10, _("Preview extrusion width"), _("Width of Extrusion in Preview"), "Viewer", increment = 0.1), self.update_gviz_params)
self.settings._add(SpinSetting("preview_grid_step1", 10., 0, 200, _("Fine grid spacing"), _("Fine Grid Spacing"), "Viewer"), self.update_gviz_params)
self.settings._add(SpinSetting("preview_grid_step2", 50., 0, 200, _("Coarse grid spacing"), _("Coarse Grid Spacing"), "Viewer"), self.update_gviz_params)
self.settings._add(StaticTextSetting("separator_colors1", _("General"), "", group = "Colors"))
self.settings._add(ColorSetting("bgcolor", self._preferred_bgcolour_hex(), _("Background color"), _("Pronterface background color"), "Colors", isRGBA=False), self.reload_ui)
self.settings._add(StaticTextSetting("separator_colors2", _("Temperature Graph"), "", group = "Colors"))
self.settings._add(ColorSetting("graph_color_background", "#FAFAC7", _("Graph background color"), _("Color of the temperature graph background"), "Colors", isRGBA=False), self.reload_ui)
self.settings._add(ColorSetting("graph_color_text", "#172C2C", _("Graph text color"), _("Color of the temperature graph text"), "Colors", isRGBA=False), self.reload_ui)
self.settings._add(ColorSetting("graph_color_grid", "#5A5A5A", _("Graph grid color"), _("Color of the temperature graph grid"), "Colors", isRGBA=False), self.reload_ui)
self.settings._add(ColorSetting("graph_color_fan", "#00000080", _("Graph fan line color"), _("Color of the temperature graph fan speed line"), "Colors"), self.reload_ui)
self.settings._add(ColorSetting("graph_color_bedtemp", "#FF000080", _("Graph bed line color"), _("Color of the temperature graph bed temperature line"), "Colors"), self.reload_ui)
self.settings._add(ColorSetting("graph_color_bedtarget", "#FF780080", _("Graph bed target line color"), _("Color of the temperature graph bed temperature target line"), "Colors"), self.reload_ui)
self.settings._add(ColorSetting("graph_color_ex0temp", "#009BFF80", _("Graph ex0 line color"), _("Color of the temperature graph extruder 0 temperature line"), "Colors"), self.reload_ui)
self.settings._add(ColorSetting("graph_color_ex0target", "#0005FF80", _("Graph ex0 target line color"), _("Color of the temperature graph extruder 0 target temperature line"), "Colors"), self.reload_ui)
self.settings._add(ColorSetting("graph_color_ex1temp", "#37370080", _("Graph ex1 line color color"), _("Color of the temperature graph extruder 1 temperature line"), "Colors"), self.reload_ui)
self.settings._add(ColorSetting("graph_color_ex1target", "#37370080", _("Graph ex1 target line color"), _("Color of the temperature graph extruder 1 temperature target line"), "Colors"), self.reload_ui)
self.settings._add(StaticTextSetting("separator_colors3", _("3D Viewer"), "", group = "Colors"))
self.settings._add(ColorSetting("gcview_color_background", "#FAFAC7FF", _("3D view background color"), _("Color of the 3D view background"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_travel", "#99999999", _("3D view travel moves color"), _("Color of travel moves in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_tool0", "#FF000099", _("3D view print moves color"), _("Color of print moves with tool 0 in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_tool1", "#AC0DFF99", _("3D view tool 1 moves color"), _("Color of print moves with tool 1 in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_tool2", "#FFCE0099", _("3D view tool 2 moves color"), _("Color of print moves with tool 2 in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_tool3", "#FF009F99", _("3D view tool 3 moves color"), _("Color of print moves with tool 3 in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_tool4", "#00FF8F99", _("3D view tool 4 moves color"), _("Color of print moves with tool 4 in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_printed", "#33BF0099", _("3D view printed moves color"), _("Color of printed moves in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_current", "#00E5FFCC", _("3D view current layer moves color"), _("Color of moves in current layer in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(ColorSetting("gcview_color_current_printed", "#196600CC", _("3D view printed current layer moves color"), _("Color of already printed moves from current layer in 3D view"), "Colors"), self.update_gcview_colors)
self.settings._add(StaticTextSetting("note1", _("Note:"), _("Changing some of these settings might require a restart to get effect"), group = "UI"))
recentfilessetting = StringSetting("recentfiles", "[]")
recentfilessetting.hidden = True
self.settings._add(recentfilessetting, self.update_recent_files)
def _preferred_bgcolour_hex(self):
id = wx.SYS_COLOUR_WINDOW \
if platform.system() == 'Windows' \
else wx.SYS_COLOUR_BACKGROUND
sys_bgcolour = wx.SystemSettings.GetColour(id)
return sys_bgcolour.GetAsString(flags=wx.C2S_HTML_SYNTAX)
def add_cmdline_arguments(self, parser):
pronsole.pronsole.add_cmdline_arguments(self, parser)
parser.add_argument('-a', '--autoconnect', help = _("automatically try to connect to printer on startup"), action = "store_true")
def process_cmdline_arguments(self, args):
pronsole.pronsole.process_cmdline_arguments(self, args)
self.autoconnect = args.autoconnect
def update_recent_files(self, param, value):
if self.filehistory is None:
return
recent_files = []
try:
recent_files = json.loads(value)
except:
self.logError(_("Failed to load recent files list:") +
"\n" + traceback.format_exc())
# Clear history
while self.filehistory.GetCount():
self.filehistory.RemoveFileFromHistory(0)
recent_files.reverse()
for f in recent_files:
self.filehistory.AddFileToHistory(f)
def update_gviz_params(self, param, value):
params_map = {"preview_extrusion_width": "extrusion_width",
"preview_grid_step1": "grid",
"preview_grid_step2": "grid"}
if param not in params_map:
return
if not hasattr(self, "gviz"):
# GUI hasn't been loaded yet, ignore this setting
return
trueparam = params_map[param]
if hasattr(self.gviz, trueparam):
gviz = self.gviz
elif hasattr(self.gwindow, "p") and hasattr(self.gwindow.p, trueparam):
gviz = self.gwindow.p
else:
return
if trueparam == "grid":
try:
item = int(param[-1]) # extract list item position
grid = list(gviz.grid)
grid[item - 1] = value
value = tuple(grid)
except:
self.logError(traceback.format_exc())
if hasattr(self.gviz, trueparam):
self.apply_gviz_params(self.gviz, trueparam, value)
if hasattr(self.gwindow, "p") and hasattr(self.gwindow.p, trueparam):
self.apply_gviz_params(self.gwindow.p, trueparam, value)
def apply_gviz_params(self, widget, param, value):
setattr(widget, param, value)
widget.dirty = 1
wx.CallAfter(widget.Refresh)
def update_gcview_colors(self, param, value):
if not self.window_ready:
return
color = hexcolor_to_float(value, 4)
# This is sort of a hack: we copy the color values into the preexisting
# color tuple so that we don't need to update the tuple used by gcview
target_color = getattr(self, param)
for i, v in enumerate(color):
target_color[i] = v
wx.CallAfter(self.Refresh)
def update_build_dimensions(self, param, value):
pronsole.pronsole.update_build_dimensions(self, param, value)
self.update_bed_viz()
def update_bed_viz(self, *args):
"""Update bed visualization when size/type changed"""
if hasattr(self, "gviz") and hasattr(self.gviz, "recreate_platform"):
self.gviz.recreate_platform(self.build_dimensions_list, self.settings.circular_bed,
grid = (self.settings.preview_grid_step1, self.settings.preview_grid_step2))
if hasattr(self, "gwindow") and hasattr(self.gwindow, "recreate_platform"):
self.gwindow.recreate_platform(self.build_dimensions_list, self.settings.circular_bed,
grid = (self.settings.preview_grid_step1, self.settings.preview_grid_step2))
def update_gcview_params(self, *args):
need_reload = False
if hasattr(self, "gviz") and hasattr(self.gviz, "set_gcview_params"):
need_reload |= self.gviz.set_gcview_params(self.settings.gcview_path_width, self.settings.gcview_path_height)
if hasattr(self, "gwindow") and hasattr(self.gwindow, "set_gcview_params"):
need_reload |= self.gwindow.set_gcview_params(self.settings.gcview_path_width, self.settings.gcview_path_height)
if need_reload:
self.start_viz_thread()
def update_monitor(self, *args):
if hasattr(self, "graph") and self.display_graph:
if self.settings.monitor:
wx.CallAfter(self.graph.StartPlotting, 1000)
else:
wx.CallAfter(self.graph.StopPlotting)
# --------------------------------------------------------------
# Statusbar handling
# --------------------------------------------------------------
def statuschecker_inner(self):
status_string = ""
if self.sdprinting or self.uploading or self.p.printing:
secondsremain, secondsestimate, progress = self.get_eta()
if self.sdprinting or self.uploading:
if self.uploading:
status_string += _("SD upload: %04.2f%% |") % (100 * progress,)
status_string += _(" Line# %d of %d lines |") % (self.p.queueindex, len(self.p.mainqueue))
else:
status_string += _("SD printing: %04.2f%% |") % (self.percentdone,)
elif self.p.printing:
status_string += _("Printing: %04.2f%% |") % (100 * float(self.p.queueindex) / len(self.p.mainqueue),)
status_string += _(" Line# %d of %d lines |") % (self.p.queueindex, len(self.p.mainqueue))
if progress > 0:
status_string += _(" Est: %s of %s remaining | ") % (format_duration(secondsremain),
format_duration(secondsestimate))
status_string += _(" Z: %.3f mm") % self.curlayer
if self.settings.display_progress_on_printer and time.time() - self.printer_progress_time >= self.settings.printer_progress_update_interval:
self.printer_progress_time = time.time()
if self.p.mainqueue is not None:
# Don't try to calculate the printer_progress_string with a None value of self.p.mainqueue.
# This happens in combination with self.p.queueindex = 0
# We pass the calculation and try it next time.
printer_progress_string = "M117 " + str(round(100 * float(self.p.queueindex) / len(self.p.mainqueue), 2)) + "% Est " + format_duration(secondsremain)
# ":" seems to be some kind of separator for G-CODE"
self.p.send_now(printer_progress_string.replace(":", "."))
if len(printer_progress_string) > 25:
logging.info(_("Warning: The print progress message might be too long to be displayed properly"))
# 13 chars for up to 99h est.
elif self.loading_gcode:
status_string = self.loading_gcode_message
wx.CallAfter(self.statusbar.SetStatusText, status_string)
wx.CallAfter(self.gviz.Refresh)
# Call pronsole's statuschecker inner loop function to handle
# temperature monitoring and status loop sleep
pronsole.pronsole.statuschecker_inner(self, self.settings.monitor)
try:
while not self.sentglines.empty():
gc = self.sentglines.get_nowait()
wx.CallAfter(self.gviz.addgcodehighlight, gc)
self.sentglines.task_done()
except queue.Empty:
pass
def statuschecker(self):
pronsole.pronsole.statuschecker(self)
wx.CallAfter(self.statusbar.SetStatusText, _("Not connected to printer."))
# --------------------------------------------------------------
# Interface lock handling
# --------------------------------------------------------------
def lock(self, event = None, force = None):
if force is not None:
self.locker.SetValue(force)
if self.locker.GetValue():
self.log(_("Locking interface."))
for panel in self.panels:
panel.Disable()
else:
self.log(_("Unlocking interface."))
for panel in self.panels:
panel.Enable()
# --------------------------------------------------------------
# Printer connection handling
# --------------------------------------------------------------
def connectbtn_cb(self, event):
# Implement toggle behavior with a single Bind
# and switched variable, so we have reference to
# the actual callback to use in on_key
self.connectbtn_cb_var()
def connect(self, event = None):
self.log(_("Connecting..."))
port = None
if self.serialport.GetValue():
port = str(self.serialport.GetValue())
else:
scanned = self.scanserial()
if scanned:
port = scanned[0]
baud = 115200
try:
baud = int(self.baud.GetValue())
except:
self.logError(_("Could not parse baud rate: ")
+ "\n" + traceback.format_exc())
if self.paused:
self.p.paused = 0
self.p.printing = 0
wx.CallAfter(self.pausebtn.SetLabel, _("&Pause"))
wx.CallAfter(self.printbtn.SetLabel, _("&Print"))
wx.CallAfter(self.toolbarsizer.Layout)
self.paused = 0
if self.sdprinting:
self.p.send_now("M26 S0")
if not self.connect_to_printer(port, baud, self.settings.dtr):
return
if port != self.settings.port:
self.set("port", port)
if baud != self.settings.baudrate:
self.set("baudrate", str(baud))
if self.predisconnect_mainqueue:
self.recoverbtn.Enable()
def store_predisconnect_state(self):
self.predisconnect_mainqueue = self.p.mainqueue
self.predisconnect_queueindex = self.p.queueindex
self.predisconnect_layer = self.curlayer
def disconnect(self, event = None):
self.log(_("Disconnected."))
if self.p.printing or self.p.paused or self.paused:
self.store_predisconnect_state()
self.p.disconnect()
self.statuscheck = False
if self.status_thread:
self.status_thread.join()
self.status_thread = None
def toggle():
self.connectbtn.SetLabel(_("&Connect"))
self.connectbtn.SetToolTip(wx.ToolTip(_("Connect to the printer")))
self.connectbtn_cb_var = self.connect
self.gui_set_disconnected()
wx.CallAfter(toggle)
if self.paused:
self.p.paused = 0
self.p.printing = 0
wx.CallAfter(self.pausebtn.SetLabel, _("&Pause"))
wx.CallAfter(self.printbtn.SetLabel, _("&Print"))
self.paused = 0
if self.sdprinting:
self.p.send_now("M26 S0")
# Relayout the toolbar to handle new buttons size
wx.CallAfter(self.toolbarsizer.Layout)
def reset(self, event):
self.log(_("Reset."))
dlg = wx.MessageDialog(self, _("Are you sure you want to reset the printer?"), _("Reset?"), wx.YES | wx.NO)
if dlg.ShowModal() == wx.ID_YES:
self.p.reset()
self.sethotendgui(0)
self.setbedgui(0)
self.p.printing = 0
wx.CallAfter(self.printbtn.SetLabel, _("&Print"))
if self.paused:
self.p.paused = 0
wx.CallAfter(self.pausebtn.SetLabel, _("&Pause"))
self.paused = 0
wx.CallAfter(self.toolbarsizer.Layout)
dlg.Destroy()
# --------------------------------------------------------------
# Print/upload handling
# --------------------------------------------------------------
def on_startprint(self):
wx.CallAfter(self.pausebtn.SetLabel, _("&Pause"))
wx.CallAfter(self.pausebtn.Enable)
wx.CallAfter(self.printbtn.SetLabel, _("Restart"))
wx.CallAfter(self.toolbarsizer.Layout)
def printfile(self, event=None):
self.extra_print_time = 0
if self.paused:
self.p.paused = 0
self.paused = 0
if self.sdprinting:
self.on_startprint()
self.p.send_now("M26 S0")
self.p.send_now("M24")
return
if not self.fgcode:
wx.CallAfter(self.statusbar.SetStatusText, _("No file loaded. Please use load first."))
return
if not self.p.online:
wx.CallAfter(self.statusbar.SetStatusText, _("Not connected to printer."))
return
self.sdprinting = False
self.on_startprint()
self.p.startprint(self.fgcode)
def sdprintfile(self, event):
self.extra_print_time = 0
self.on_startprint()
threading.Thread(target = self.getfiles).start()
def upload(self, event):
if not self.fgcode:
return
if not self.p.online:
return
dlg = wx.TextEntryDialog(self, _("Enter a target filename in 8.3 format:"), _("Pick SD filename"), dosify(self.filename))
if dlg.ShowModal() == wx.ID_OK:
self.p.send_now("M21")
self.p.send_now("M28 " + str(dlg.GetValue()))
self.recvlisteners.append(self.uploadtrigger)
dlg.Destroy()
def uploadtrigger(self, l):
if "Writing to file" in l:
self.uploading = True
self.p.startprint(self.fgcode)
self.p.endcb = self.endupload
self.recvlisteners.remove(self.uploadtrigger)
elif "open failed, File" in l:
self.recvlisteners.remove(self.uploadtrigger)
def endupload(self):
self.p.send_now("M29 ")
wx.CallAfter(self.statusbar.SetStatusText, _("File upload complete"))
time.sleep(0.5)
self.p.clear = True
self.uploading = False
def pause(self, event = None):
if not self.paused:
self.log(_("Print paused at: %s") % format_time(time.time()))
if self.settings.display_progress_on_printer:
printer_progress_string = "M117 PausedInPronterface"
self.p.send_now(printer_progress_string)
if self.sdprinting:
self.p.send_now("M25")
else:
if not self.p.printing:
return
self.p.pause()
self.p.runSmallScript(self.pauseScript)
self.paused = True
# self.p.runSmallScript(self.pauseScript)
self.extra_print_time += int(time.time() - self.starttime)
wx.CallAfter(self.pausebtn.SetLabel, _("Resume"))
wx.CallAfter(self.toolbarsizer.Layout)
else:
self.log(_("Resuming."))
if self.settings.display_progress_on_printer:
printer_progress_string = "M117 Resuming"
self.p.send_now(printer_progress_string)
self.paused = False
if self.sdprinting:
self.p.send_now("M24")
else:
self.p.resume()
wx.CallAfter(self.pausebtn.SetLabel, _("&Pause"))
wx.CallAfter(self.toolbarsizer.Layout)
def recover(self, event):
self.extra_print_time = 0
if not self.p.online:
wx.CallAfter(self.statusbar.SetStatusText, _("Not connected to printer."))
return
# Reset Z
self.p.send_now("G92 Z%f" % self.predisconnect_layer)
# Home X and Y
self.p.send_now("G28 X Y")
self.on_startprint()
self.p.startprint(self.predisconnect_mainqueue, self.p.queueindex)
# --------------------------------------------------------------
# File loading handling
# --------------------------------------------------------------
def filesloaded(self):
dlg = wx.SingleChoiceDialog(self, _("Select the file to print"), _("Pick SD file"), self.sdfiles)
if dlg.ShowModal() == wx.ID_OK:
target = dlg.GetStringSelection()
if len(target):
self.recvlisteners.append(self.waitforsdresponse)
self.p.send_now("M23 " + target.lower())
dlg.Destroy()
def getfiles(self):
if not self.p.online:
self.sdfiles = []
return
self.sdlisting = 0
self.sdfiles = []
self.recvlisteners.append(self.listfiles)
self.p.send_now("M21")
self.p.send_now("M20")
def model_to_gcode_filename(self, filename):
suffix = "_export.gcode"
for ext in [".stl", ".obj"]:
filename = filename.replace(ext, suffix)
filename = filename.replace(ext.upper(), suffix)
return filename
def slice_func(self):
try:
output_filename = self.model_to_gcode_filename(self.filename)
pararray = prepare_command(self.settings.slicecommandpath + self.settings.slicecommand,
{"$s": self.filename, "$o": output_filename})
if self.settings.slic3rintegration:
for cat, config in self.slic3r_configs.items():
if config:
fpath = os.path.join(self.slic3r_configpath, cat, config)
pararray += ["--load", fpath]
self.log(_("Running ") + " ".join(pararray))
self.slicep = subprocess.Popen(pararray, stdin=subprocess.DEVNULL, stderr = subprocess.STDOUT, stdout = subprocess.PIPE, universal_newlines = True)
while True:
o = self.slicep.stdout.read(1)
if o == '' and self.slicep.poll() is not None: break
sys.stdout.write(o)
self.slicep.wait()
self.stopsf = 1
except:
self.logError(_("Failed to execute slicing software: ")
+ "\n" + traceback.format_exc())
self.stopsf = 1
def slice_monitor(self):
while not self.stopsf:
try:
wx.CallAfter(self.statusbar.SetStatusText, _("Slicing...")) # +self.cout.getvalue().split("\n")[-1])
except:
pass
time.sleep(0.1)
fn = self.filename
try:
self.load_gcode_async(self.model_to_gcode_filename(self.filename))
except:
self.filename = fn
self.slicing = False
self.slicep = None
self.loadbtn.SetLabel, _("Load file")
def slice(self, filename):
wx.CallAfter(self.loadbtn.SetLabel, _("Cancel"))
wx.CallAfter(self.toolbarsizer.Layout)
self.log(_("Slicing ") + filename)
self.cout = StringIO.StringIO()
self.filename = filename
self.stopsf = 0
self.slicing = True
threading.Thread(target = self.slice_func).start()
threading.Thread(target = self.slice_monitor).start()
def cmdline_filename_callback(self, filename):
# Do nothing when processing a filename from command line, as we'll
# handle it when everything has been prepared
self.filename = filename
def do_load(self, l):
if hasattr(self, 'slicing'):
self.loadfile(None, l)
else:
self._do_load(l)
def load_recent_file(self, event):
fileid = event.GetId() - wx.ID_FILE1
path = self.filehistory.GetHistoryFile(fileid)
self.loadfile(None, filename = path)
def loadfile(self, event, filename = None):
if self.slicing and self.slicep is not None:
self.slicep.terminate()
return
basedir = self.settings.last_file_path
if not os.path.exists(basedir):
basedir = "."
try:
basedir = os.path.split(self.filename)[0]
except:
pass
dlg = None
if filename is None:
dlg = wx.FileDialog(self, _("Open file to print"), basedir, style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
dlg.SetWildcard(_("OBJ, STL, and GCODE files (*.gcode;*.gco;*.g;*.stl;*.STL;*.obj;*.OBJ)|*.gcode;*.gco;*.g;*.stl;*.STL;*.obj;*.OBJ|GCODE files (*.gcode;*.gco;*.g)|*.gcode;*.gco;*.g|OBJ, STL files (*.stl;*.STL;*.obj;*.OBJ)|*.stl;*.STL;*.obj;*.OBJ|All Files (*.*)|*.*"))
try:
dlg.SetFilterIndex(self.settings.last_file_filter)
except:
pass
if filename or dlg.ShowModal() == wx.ID_OK:
if filename:
name = filename
else:
name = dlg.GetPath()
self.set("last_file_filter", dlg.GetFilterIndex())
dlg.Destroy()
if not os.path.exists(name):
self.statusbar.SetStatusText(_("File not found!"))
return
path = os.path.split(name)[0]
if path != self.settings.last_file_path:
self.set("last_file_path", path)
try:
abspath = os.path.abspath(name)
recent_files = []
try:
recent_files = json.loads(self.settings.recentfiles)
except:
self.logError(_("Failed to load recent files list:") +
"\n" + traceback.format_exc())
if abspath in recent_files:
recent_files.remove(abspath)
recent_files.insert(0, abspath)
if len(recent_files) > 5:
recent_files = recent_files[:5]
self.set("recentfiles", json.dumps(recent_files))
except:
self.logError(_("Could not update recent files list:") +
"\n" + traceback.format_exc())
if name.lower().endswith(".stl") or name.lower().endswith(".obj"):
self.slice(name)
else:
self.load_gcode_async(name)
else:
dlg.Destroy()
def load_gcode_async(self, filename):
self.filename = filename
gcode = self.pre_gcode_load()
self.log(_("Loading file: %s") % filename)
threading.Thread(target = self.load_gcode_async_thread, args = (gcode,)).start()
def load_gcode_async_thread(self, gcode):
try:
self.load_gcode(self.filename,
layer_callback = self.layer_ready_cb,
gcode = gcode)
except PronterfaceQuitException:
return
except Exception as e:
self.log(str(e))
wx.CallAfter(self.post_gcode_load, False, True)
return
wx.CallAfter(self.post_gcode_load)
def layer_ready_cb(self, gcode, layer):
global pronterface_quitting
if pronterface_quitting:
raise PronterfaceQuitException
if not self.settings.refreshwhenloading:
return
self.viz_last_layer = layer
if time.time() - self.viz_last_yield > 1.0:
time.sleep(0.2)
self.loading_gcode_message = _("Loading %s: %d layers loaded (%d lines)") % (self.filename, layer + 1, len(gcode))
self.viz_last_yield = time.time()
wx.CallAfter(self.statusbar.SetStatusText, self.loading_gcode_message)
def start_viz_thread(self, gcode = None):
threading.Thread(target = self.loadviz, args = (gcode,)).start()
def pre_gcode_load(self):
self.loading_gcode = True
self.loading_gcode_message = _("Loading %s...") % self.filename
if self.settings.mainviz == "None":
gcode = gcoder.LightGCode(deferred = True)
else:
gcode = gcoder.GCode(deferred = True, cutting_as_extrusion = self.settings.cutting_as_extrusion)
self.viz_last_yield = 0
self.viz_last_layer = -1
self.start_viz_thread(gcode)
return gcode
def post_gcode_load(self, print_stats = True, failed=False):
# Must be called in wx.CallAfter for safety
self.loading_gcode = False
if not failed:
self.SetTitle(_("Pronterface - %s") % self.filename)
message = _("Loaded %s, %d lines") % (self.filename, len(self.fgcode),)
self.log(message)
self.statusbar.SetStatusText(message)
self.savebtn.Enable(True)
self.loadbtn.SetLabel(_("Load File"))
self.printbtn.SetLabel(_("&Print"))
self.pausebtn.SetLabel(_("&Pause"))
self.pausebtn.Disable()
self.recoverbtn.Disable()
if not failed and self.p.online:
self.printbtn.Enable()
self.toolbarsizer.Layout()
self.viz_last_layer = None
if print_stats:
self.output_gcode_stats()
def calculate_remaining_filament(self, length, extruder = 0):
"""
float calculate_remaining_filament( float length, int extruder )
Calculate the remaining length of filament for the given extruder if
the given length were to be extruded.
"""
remainder = self.spool_manager.getRemainingFilament(extruder) - length
minimum_warning_length = 1000.0
if remainder < minimum_warning_length:
self.log(_("\nWARNING: Currently loaded spool for extruder ") +
_("%d will likely run out of filament during the print.\n")
% extruder)
return remainder
def output_gcode_stats(self):
gcode = self.fgcode
self.spool_manager.refresh()
self.log(_("%s of filament used in this print") % format_length(gcode.filament_length))
if len(gcode.filament_length_multi) > 1:
for i in enumerate(gcode.filament_length_multi):
if self.spool_manager.getSpoolName(i[0]) is None:
logging.info("- Extruder %d: %0.02fmm" % (i[0], i[1]))
else:
logging.info(("- Extruder %d: %0.02fmm" % (i[0], i[1]) +
" from spool '%s' (%.2fmm will remain)" %
(self.spool_manager.getSpoolName(i[0]),
self.calculate_remaining_filament(i[1], i[0]))))
elif self.spool_manager.getSpoolName(0) is not None:
self.log(
_("Using spool '%s' (%s of filament will remain)") %
(self.spool_manager.getSpoolName(0),
format_length(self.calculate_remaining_filament(
gcode.filament_length, 0))))
self.log(_("The print goes:"))
self.log(_("- from %.2f mm to %.2f mm in X and is %.2f mm wide") % (gcode.xmin, gcode.xmax, gcode.width))
self.log(_("- from %.2f mm to %.2f mm in Y and is %.2f mm deep") % (gcode.ymin, gcode.ymax, gcode.depth))
self.log(_("- from %.2f mm to %.2f mm in Z and is %.2f mm high") % (gcode.zmin, gcode.zmax, gcode.height))
self.log(_("Estimated duration: %d layers, %s") % gcode.estimate_duration())
def loadviz(self, gcode = None):
try:
self.gviz.clear()
self.gwindow.p.clear()
if gcode is not None:
generator = self.gviz.addfile_perlayer(gcode, True)
next_layer = 0
# Progressive loading of visualization
# We load layers up to the last one which has been processed in GCoder
# (self.viz_last_layer)
# Once the GCode has been entirely loaded, this variable becomes None,
# indicating that we can do the last generator call to finish the
# loading of the visualization, which will itself return None.
# During preloading we verify that the layer we added is the one we
# expected through the assert call.
while True:
global pronterface_quitting
if pronterface_quitting:
return
max_layer = self.viz_last_layer
if max_layer is None:
break
start_layer = next_layer
while next_layer <= max_layer:
assert next(generator) == next_layer
next_layer += 1
if next_layer != start_layer:
wx.CallAfter(self.gviz.Refresh)
time.sleep(0.1)
generator_output = next(generator)
while generator_output is not None:
assert generator_output == next_layer
next_layer += 1
generator_output = next(generator)
else:
# If GCode is not being loaded asynchronously, it is already
# loaded, so let's make visualization sequentially
gcode = self.fgcode
self.gviz.addfile(gcode)
wx.CallAfter(self.gviz.Refresh)
# Load external window sequentially now that everything is ready.
# We can't really do any better as the 3D viewer might clone the
# finalized model from the main visualization
self.gwindow.p.addfile(gcode)
except:
logging.error(traceback.format_exc())
wx.CallAfter(self.gviz.Refresh)
# --------------------------------------------------------------
# File saving handling
# --------------------------------------------------------------
def savefile(self, event):
basedir = self.settings.last_file_path
if not os.path.exists(basedir):
basedir = "."
try:
basedir = os.path.split(self.filename)[0]
except:
pass
dlg = wx.FileDialog(self, _("Save as"), basedir, style = wx.FD_SAVE)
dlg.SetWildcard(_("GCODE files (*.gcode;*.gco;*.g)|*.gcode;*.gco;*.g|All Files (*.*)|*.*"))
if dlg.ShowModal() == wx.ID_OK:
name = dlg.GetPath()
open(name, "w").write("\n".join((line.raw for line in self.fgcode)))
self.log(_("G-Code successfully saved to %s") % name)
dlg.Destroy()
# --------------------------------------------------------------
# Printcore callbacks
# --------------------------------------------------------------
def process_host_command(self, command):
"""Override host command handling"""
command = command.lstrip()
if command.startswith(";@pause"):
self.pause(None)
else:
pronsole.pronsole.process_host_command(self, command)
def startcb(self, resuming = False):
"""Callback on print start"""
pronsole.pronsole.startcb(self, resuming)
if self.settings.lockbox and self.settings.lockonstart:
wx.CallAfter(self.lock, force = True)
def endcb(self):
"""Callback on print end/pause"""
pronsole.pronsole.endcb(self)
if self.p.queueindex == 0:
self.p.runSmallScript(self.endScript)
if self.settings.display_progress_on_printer:
printer_progress_string = "M117 Finished Print"
self.p.send_now(printer_progress_string)
wx.CallAfter(self.pausebtn.Disable)
wx.CallAfter(self.printbtn.SetLabel, _("&Print"))
wx.CallAfter(self.toolbarsizer.Layout)
def online(self):
"""Callback when printer goes online"""
self.log(_("Printer is now online."))
wx.CallAfter(self.online_gui)
def online_gui(self):
"""Callback when printer goes online (graphical bits)"""
self.connectbtn.SetLabel(_("Dis&connect"))
self.connectbtn.SetToolTip(wx.ToolTip(_("Disconnect from the printer")))
self.connectbtn_cb_var = self.disconnect
if hasattr(self, "extrudersel"):
self.do_tool(self.extrudersel.GetValue())
self.gui_set_connected()
if self.filename:
self.printbtn.Enable()
wx.CallAfter(self.toolbarsizer.Layout)
def sentcb(self, line, gline):
"""Callback when a printer gcode has been sent"""
if not gline:
pass
elif gline.command in ["M104", "M109"]:
gline_s = gcoder.S(gline)
if gline_s is not None:
temp = gline_s
if self.display_gauges: wx.CallAfter(self.hottgauge.SetTarget, temp)
if self.display_graph: wx.CallAfter(self.graph.SetExtruder0TargetTemperature, temp)
elif gline.command in ["M140", "M190"]:
gline_s = gcoder.S(gline)
if gline_s is not None:
temp = gline_s
if self.display_gauges: wx.CallAfter(self.bedtgauge.SetTarget, temp)
if self.display_graph: wx.CallAfter(self.graph.SetBedTargetTemperature, temp)
elif gline.command in ["M106"]:
gline_s = gcoder.S(gline)
fanpow = 255
if gline_s is not None:
fanpow = gline_s
if self.display_graph: wx.CallAfter(self.graph.SetFanPower, fanpow)
elif gline.command in ["M107"]:
if self.display_graph: wx.CallAfter(self.graph.SetFanPower, 0)
elif gline.command.startswith("T"):
tool = gline.command[1:]
if hasattr(self, "extrudersel"): wx.CallAfter(self.extrudersel.SetValue, tool)
if gline.is_move:
self.sentglines.put_nowait(gline)
def is_excluded_move(self, gline):
"""Check whether the given moves ends at a position specified as
excluded in the part excluder"""
if not gline.is_move or not self.excluder or not self.excluder.rectangles:
return False
for (x0, y0, x1, y1) in self.excluder.rectangles:
if x0 <= gline.current_x <= x1 and y0 <= gline.current_y <= y1:
return True
return False
def preprintsendcb(self, gline, next_gline):
"""Callback when a printer gcode is about to be sent. We use it to
exclude moves defined by the part excluder tool"""
if not self.is_excluded_move(gline):
return gline
else:
if gline.z is not None:
if gline.relative:
if self.excluder_z_abs is not None:
self.excluder_z_abs += gline.z
elif self.excluder_z_rel is not None:
self.excluder_z_rel += gline.z
else:
self.excluder_z_rel = gline.z
else:
self.excluder_z_rel = None
self.excluder_z_abs = gline.z
if gline.e is not None and not gline.relative_e:
self.excluder_e = gline.e
# If next move won't be excluded, push the changes we have to do
if next_gline is not None and not self.is_excluded_move(next_gline):
if self.excluder_e is not None:
self.p.send_now("G92 E%.5f" % self.excluder_e)
self.excluder_e = None
if self.excluder_z_abs is not None:
if gline.relative:
self.p.send_now("G90")
self.p.send_now("G1 Z%.5f" % self.excluder_z_abs)
self.excluder_z_abs = None
if gline.relative:
self.p.send_now("G91")
if self.excluder_z_rel is not None:
if not gline.relative:
self.p.send_now("G91")
self.p.send_now("G1 Z%.5f" % self.excluder_z_rel)
self.excluder_z_rel = None
if not gline.relative:
self.p.send_now("G90")
return None
def printsentcb(self, gline):
"""Callback when a print gcode has been sent"""
if gline.is_move:
if hasattr(self.gwindow, "set_current_gline"):
wx.CallAfter(self.gwindow.set_current_gline, gline)
if hasattr(self.gviz, "set_current_gline"):
wx.CallAfter(self.gviz.set_current_gline, gline)
def layer_change_cb(self, newlayer):
"""Callback when the printed layer changed"""
pronsole.pronsole.layer_change_cb(self, newlayer)
if self.settings.mainviz != "3D" or self.settings.trackcurrentlayer3d:
wx.CallAfter(self.gviz.setlayer, newlayer)
def update_tempdisplay(self):
try:
temps = parse_temperature_report(self.tempreadings)
for name in 'T', 'T0', 'T1', 'B':
if name not in temps:
continue
current = float(temps[name][0])
target = float(temps[name][1]) if temps[name][1] else None
if name == 'T':
name = 'T0'
if self.display_graph:
prefix = 'Set' + name.replace('T', 'Extruder').replace('B', 'Bed')
wx.CallAfter(getattr(self.graph, prefix + 'Temperature'), current)
if target is not None:
wx.CallAfter(getattr(self.graph, prefix + 'TargetTemperature'), target)
if self.display_gauges:
if name[0] == 'T':
if name[1] == str(self.current_tool):
def update(c, t):
self.hottgauge.SetValue(c)
self.hottgauge.SetTarget(t or 0)
self.hottgauge.title = _('Heater%s:') % (str(self.current_tool) if self.settings.extruders > 1 else '')
wx.CallAfter(update, current, target)
else:
wx.CallAfter(self.bedtgauge.SetValue, current)
if target is not None:
wx.CallAfter(self.bedtgauge.SetTarget, target)
except:
self.logError(traceback.format_exc())
def update_pos(self):
bits = gcoder.m114_exp.findall(self.posreport)
x = None
y = None
z = None
for bit in bits:
if not bit[0]: continue
if x is None and bit[0] == "X":
x = float(bit[1])
elif y is None and bit[0] == "Y":
y = float(bit[1])
elif z is None and bit[0] == "Z":
z = float(bit[1])
if x is not None: self.current_pos[0] = x
if y is not None: self.current_pos[1] = y
if z is not None: self.current_pos[2] = z
def recvcb_actions(self, l):
if l.startswith("!!"):
if not self.paused:
wx.CallAfter(self.pause)
msg = l.split(" ", 1)
if len(msg) > 1 and not self.p.loud:
self.log(msg[1] + "\n")
return True
elif l.startswith("//"):
command = l.split(" ", 1)
if len(command) > 1:
command = command[1]
command = command.split(":")
if len(command) == 2 and command[0] == "action":
command = command[1]
self.log(_("Received command %s") % command)
if command in ["pause", "cancel"]:
if not self.paused:
wx.CallAfter(self.pause)
return True
elif command == "resume":
if self.paused:
wx.CallAfter(self.pause)
return True
elif command == "disconnect":
wx.CallAfter(self.disconnect)
return True
return False
def recvcb(self, l):
l = l.rstrip()
if not self.recvcb_actions(l):
report_type = self.recvcb_report(l)
isreport = report_type != REPORT_NONE
if report_type & REPORT_POS:
self.update_pos()
elif report_type & REPORT_TEMP:
wx.CallAfter(self.tempdisp.SetLabel, self.tempreadings.strip().replace("ok ", ""))
self.update_tempdisplay()
if not self.lineignorepattern.match(l) and not self.p.loud and (l not in ["ok", "wait"] and (not isreport or report_type & REPORT_MANUAL)):
self.log(l)
for listener in self.recvlisteners:
listener(l)
def listfiles(self, line, ignored = False):
if "Begin file list" in line:
self.sdlisting = True
elif "End file list" in line:
self.sdlisting = False
self.recvlisteners.remove(self.listfiles)
wx.CallAfter(self.filesloaded)
elif self.sdlisting:
self.sdfiles.append(re.sub(" \d+$", "", line.strip().lower())) # NOQA
def waitforsdresponse(self, l):
if "file.open failed" in l:
wx.CallAfter(self.statusbar.SetStatusText, _("Opening file failed."))
self.recvlisteners.remove(self.waitforsdresponse)
return
if "File opened" in l:
wx.CallAfter(self.statusbar.SetStatusText, l)
if "File selected" in l:
wx.CallAfter(self.statusbar.SetStatusText, _("Starting print"))
self.sdprinting = True
self.p.send_now("M24")
self.startcb()
return
if "Done printing file" in l:
wx.CallAfter(self.statusbar.SetStatusText, l)
self.sdprinting = False
self.recvlisteners.remove(self.waitforsdresponse)
self.endcb()
return
if "SD printing byte" in l:
# M27 handler
try:
resp = l.split()
vals = resp[-1].split("/")
self.percentdone = 100.0 * int(vals[0]) / int(vals[1])
except:
pass
# --------------------------------------------------------------
# Custom buttons handling
# --------------------------------------------------------------
def cbuttons_reload(self):
allcbs = getattr(self, "custombuttons_widgets", [])
for button in allcbs:
self.cbuttonssizer.Detach(button)
button.Destroy()
self.custombuttons_widgets = []
custombuttons = self.custombuttons[:] + [None]
for i, btndef in enumerate(custombuttons):
if btndef is None:
if i == len(custombuttons) - 1:
self.newbuttonbutton = b = wx.Button(self.centerpanel, -1, "+", size = (35, 18), style = wx.BU_EXACTFIT)
b.SetToolTip(wx.ToolTip(_("Click to add new custom button")))
b.Bind(wx.EVT_BUTTON, self.cbutton_edit)
else:
b = wx.StaticText(self.panel, -1, "")
else:
b = wx.Button(self.centerpanel, -1, btndef.label, style = wx.BU_EXACTFIT)
b.SetToolTip(wx.ToolTip(_("Execute command: ") + btndef.command))
if btndef.background:
b.SetBackgroundColour(btndef.background)
if b.GetBackgroundColour().GetLuminance() < 0.5:
b.SetForegroundColour("#ffffff")
b.custombutton = i
b.properties = btndef
if btndef is not None:
b.Bind(wx.EVT_BUTTON, self.process_button)
b.Bind(wx.EVT_MOUSE_EVENTS, self.editbutton)
self.custombuttons_widgets.append(b)
if isinstance(self.cbuttonssizer, wx.GridBagSizer):
self.cbuttonssizer.Add(b, pos = (i // 4, i % 4), flag = wx.EXPAND)
else:
self.cbuttonssizer.Add(b, flag = wx.EXPAND)
self.centerpanel.Layout()
self.centerpanel.GetContainingSizer().Layout()
def help_button(self):
self.log(_('Defines custom button. Usage: button <num> "title" [/c "colour"] command'))
def do_button(self, argstr):
def nextarg(rest):
rest = rest.lstrip()
if rest.startswith('"'):
return rest[1:].split('"', 1)
else:
return rest.split(None, 1)
# try:
num, argstr = nextarg(argstr)
num = int(num)
title, argstr = nextarg(argstr)
colour = None
try:
c1, c2 = nextarg(argstr)
if c1 == "/c":
colour, argstr = nextarg(c2)
except:
pass
command = argstr.strip()
if num < 0 or num >= 64:
self.log(_("Custom button number should be between 0 and 63"))
return
while num >= len(self.custombuttons):
self.custombuttons.append(None)
self.custombuttons[num] = SpecialButton(title, command)
if colour is not None:
self.custombuttons[num].background = colour
if not self.processing_rc:
self.cbuttons_reload()
def cbutton_save(self, n, bdef, new_n = None):
if new_n is None: new_n = n
if bdef is None or bdef == "":
self.save_in_rc(("button %d" % n), '')
elif bdef.background:
colour = bdef.background
if not isinstance(colour, str):
if isinstance(colour, tuple) and tuple(map(type, colour)) == (int, int, int):
colour = (x % 256 for x in colour)
colour = wx.Colour(*colour).GetAsString(wx.C2S_NAME | wx.C2S_HTML_SYNTAX)
else:
colour = wx.Colour(colour).GetAsString(wx.C2S_NAME | wx.C2S_HTML_SYNTAX)
self.save_in_rc(("button %d" % n), 'button %d "%s" /c "%s" %s' % (new_n, bdef.label, colour, bdef.command))
else:
self.save_in_rc(("button %d" % n), 'button %d "%s" %s' % (new_n, bdef.label, bdef.command))
def cbutton_edit(self, e, button = None):
bedit = ButtonEdit(self)
if button is not None:
n = button.custombutton
bedit.name.SetValue(button.properties.label)
bedit.command.SetValue(button.properties.command)
if button.properties.background:
colour = button.properties.background
if not isinstance(colour, str):
if isinstance(colour, tuple) and tuple(map(type, colour)) == (int, int, int):
colour = (x % 256 for x in colour)
colour = wx.Colour(*colour).GetAsString(wx.C2S_NAME | wx.C2S_HTML_SYNTAX)
else:
colour = wx.Colour(colour).GetAsString(wx.C2S_NAME | wx.C2S_HTML_SYNTAX)
bedit.use_colour.SetValue(True)
bedit.color.Enable()
bedit.color.SetColour(colour)
else:
n = len(self.custombuttons)
while n > 0 and self.custombuttons[n - 1] is None:
n -= 1
if bedit.ShowModal() == wx.ID_OK:
if n == len(self.custombuttons):
self.custombuttons.append(None)
self.custombuttons[n] = SpecialButton(bedit.name.GetValue().strip(), bedit.command.GetValue().strip(), custom = True)
if bedit.use_colour.GetValue():
self.custombuttons[n].background = bedit.color.GetColour().GetAsString(wx.C2S_CSS_SYNTAX)
self.cbutton_save(n, self.custombuttons[n])
wx.CallAfter(bedit.Destroy)
wx.CallAfter(self.cbuttons_reload)
def cbutton_remove(self, e, button):
n = button.custombutton
self.cbutton_save(n, None)
del self.custombuttons[n]
for i in range(n, len(self.custombuttons)):
self.cbutton_save(i, self.custombuttons[i])
wx.CallAfter(self.cbuttons_reload)
def cbutton_order(self, e, button, dir):
n = button.custombutton
if dir < 0:
n = n - 1
if n + 1 >= len(self.custombuttons):
self.custombuttons.append(None) # pad
# swap
self.custombuttons[n], self.custombuttons[n + 1] = self.custombuttons[n + 1], self.custombuttons[n]
self.cbutton_save(n, self.custombuttons[n])
self.cbutton_save(n + 1, self.custombuttons[n + 1])
wx.CallAfter(self.cbuttons_reload)
def editbutton(self, e):
if e.IsCommandEvent() or e.ButtonUp(wx.MOUSE_BTN_RIGHT):
if e.IsCommandEvent():
pos = (0, 0)
else:
pos = e.GetPosition()
popupmenu = wx.Menu()
obj = e.GetEventObject()
if hasattr(obj, "custombutton"):
item = popupmenu.Append(-1, _("Edit custom button '%s'") % e.GetEventObject().GetLabelText())
self.Bind(wx.EVT_MENU, lambda e, button = e.GetEventObject(): self.cbutton_edit(e, button), item)
item = popupmenu.Append(-1, _("Move left <<"))
self.Bind(wx.EVT_MENU, lambda e, button = e.GetEventObject(): self.cbutton_order(e, button, -1), item)
if obj.custombutton == 0: item.Enable(False)
item = popupmenu.Append(-1, _("Move right >>"))
self.Bind(wx.EVT_MENU, lambda e, button = e.GetEventObject(): self.cbutton_order(e, button, 1), item)
if obj.custombutton == 63: item.Enable(False)
buttonscount = len(self.custombuttons_widgets) - 2
if obj.custombutton == buttonscount: item.Enable(False)
pos = self.panel.ScreenToClient(e.GetEventObject().ClientToScreen(pos))
item = popupmenu.Append(-1, _("Remove custom button '%s'") % e.GetEventObject().GetLabelText())
self.Bind(wx.EVT_MENU, lambda e, button = e.GetEventObject(): self.cbutton_remove(e, button), item)
else:
item = popupmenu.Append(-1, _("Add custom button"))
self.Bind(wx.EVT_MENU, self.cbutton_edit, item)
self.panel.PopupMenu(popupmenu, pos)
elif e.Dragging() and e.LeftIsDown():
obj = e.GetEventObject()
scrpos = obj.ClientToScreen(e.GetPosition())
if not hasattr(self, "dragpos"):
self.dragpos = scrpos
e.Skip()
return
else:
dx, dy = self.dragpos[0] - scrpos[0], self.dragpos[1] - scrpos[1]
if dx * dx + dy * dy < 30 * 30: # threshold to detect dragging for jittery mice
e.Skip()
return
if not hasattr(self, "dragging"):
# init dragging of the custom button
if hasattr(obj, "custombutton") and (not hasattr(obj, "properties") or obj.properties is not None):
for b in self.custombuttons_widgets:
if not hasattr(b, "properties") or b.properties is None:
b.Enable()
b.SetLabel("")
b.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
b.SetForegroundColour("black")
b.SetSize(obj.GetSize())
if self.toolbarsizer.GetItem(b) is not None:
self.toolbarsizer.SetItemMinSize(b, obj.GetSize())
self.mainsizer.Layout()
self.dragging = wx.Button(self.panel, -1, obj.GetLabel(), style = wx.BU_EXACTFIT)
self.dragging.SetBackgroundColour(obj.GetBackgroundColour())
self.dragging.SetForegroundColour(obj.GetForegroundColour())
self.dragging.sourcebutton = obj
self.dragging.Raise()
self.dragging.Disable()
self.dragging.SetPosition(self.panel.ScreenToClient(scrpos))
self.last_drag_dest = obj
self.dragging.label = obj.s_label = obj.GetLabel()
self.dragging.bgc = obj.s_bgc = obj.GetBackgroundColour()
self.dragging.fgc = obj.s_fgc = obj.GetForegroundColour()
else:
# dragging in progress
self.dragging.SetPosition(self.panel.ScreenToClient(scrpos))
wx.CallAfter(self.dragging.Refresh)
dst = None
src = self.dragging.sourcebutton
drg = self.dragging
for b in self.custombuttons_widgets:
if b.GetScreenRect().Contains(scrpos):
dst = b
break
if dst is not self.last_drag_dest:
if self.last_drag_dest is not None:
self.last_drag_dest.SetBackgroundColour(self.last_drag_dest.s_bgc)
self.last_drag_dest.SetForegroundColour(self.last_drag_dest.s_fgc)
self.last_drag_dest.SetLabel(self.last_drag_dest.s_label)
if dst is not None and dst is not src:
dst.s_bgc = dst.GetBackgroundColour()
dst.s_fgc = dst.GetForegroundColour()
dst.s_label = dst.GetLabel()
src.SetBackgroundColour(dst.GetBackgroundColour())
src.SetForegroundColour(dst.GetForegroundColour())
src.SetLabel(dst.GetLabel())
dst.SetBackgroundColour(drg.bgc)
dst.SetForegroundColour(drg.fgc)
dst.SetLabel(drg.label)
else:
src.SetBackgroundColour(drg.bgc)
src.SetForegroundColour(drg.fgc)
src.SetLabel(drg.label)
self.last_drag_dest = dst
elif hasattr(self, "dragging") and not e.LeftIsDown():
# dragging finished
obj = e.GetEventObject()
scrpos = obj.ClientToScreen(e.GetPosition())
dst = None
src = self.dragging.sourcebutton
drg = self.dragging
for b in self.custombuttons_widgets:
if b.GetScreenRect().Contains(scrpos):
dst = b
break
if dst is not None and hasattr(dst, "custombutton"):
src_i = src.custombutton
dst_i = dst.custombutton
self.custombuttons[src_i], self.custombuttons[dst_i] = self.custombuttons[dst_i], self.custombuttons[src_i]
self.cbutton_save(src_i, self.custombuttons[src_i])
self.cbutton_save(dst_i, self.custombuttons[dst_i])
while self.custombuttons[-1] is None:
del self.custombuttons[-1]
wx.CallAfter(self.dragging.Destroy)
del self.dragging
wx.CallAfter(self.cbuttons_reload)
del self.last_drag_dest
del self.dragpos
else:
e.Skip()
def process_button(self, e):
try:
if hasattr(e.GetEventObject(), "custombutton"):
if wx.GetKeyState(wx.WXK_CONTROL) or wx.GetKeyState(wx.WXK_ALT):
return self.editbutton(e)
self.cur_button = e.GetEventObject().custombutton
command = e.GetEventObject().properties.command
command = self.precmd(command)
self.onecmd(command)
self.cur_button = None
except:
self.log(_("Failed to handle button"))
self.cur_button = None
raise
# --------------------------------------------------------------
# Macros handling
# --------------------------------------------------------------
def start_macro(self, macro_name, old_macro_definition = ""):
if not self.processing_rc:
def cb(definition):
if len(definition.strip()) == 0:
if old_macro_definition != "":
dialog = wx.MessageDialog(self, _("Do you want to erase the macro?"), style = wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION)
if dialog.ShowModal() == wx.ID_YES:
self.delete_macro(macro_name)
return
self.log(_("Cancelled."))
return
self.cur_macro_name = macro_name
self.cur_macro_def = definition
self.end_macro()
MacroEditor(macro_name, old_macro_definition, cb)
else:
pronsole.pronsole.start_macro(self, macro_name, old_macro_definition)
def end_macro(self):
pronsole.pronsole.end_macro(self)
self.update_macros_menu()
def delete_macro(self, macro_name):
pronsole.pronsole.delete_macro(self, macro_name)
self.update_macros_menu()
def new_macro(self, e = None):
dialog = wx.Dialog(self, -1, _("Enter macro name"))
panel = wx.Panel(dialog)
textsizer = wx.BoxSizer(wx.HORIZONTAL)
text = wx.StaticText(panel, -1, _("Macro name:"))
namectrl = wx.TextCtrl(panel, -1, style = wx.TE_PROCESS_ENTER)
dialog.Bind(wx.EVT_TEXT_ENTER,
lambda e: dialog.EndModal(wx.ID_OK), namectrl)
# Layout
## Set a minimum size for the name control box
min_size = namectrl.GetTextExtent('Default Long Macro Name')
namectrl.SetMinSize(wx.Size(min_size.width, -1))
## Group the text and the name control box horizontally
topsizer = wx.BoxSizer(wx.VERTICAL)
textsizer.Add(text, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT)
textsizer.AddSpacer(get_space('minor'))
textsizer.Add(namectrl, 1, wx.EXPAND | wx.ALIGN_LEFT)
panel.SetSizer(textsizer)
topsizer.Add(panel, 1, wx.ALL, get_space('major'))
## Group everything vertically
topsizer.Add(wx.StaticLine(dialog, -1, style = wx.LI_HORIZONTAL), 0, wx.EXPAND)
topsizer.Add(dialog.CreateButtonSizer(wx.OK | wx.CANCEL), 0, wx.ALIGN_RIGHT | wx.ALL, get_space('stddlg'))
dialog.SetSizer(topsizer)
topsizer.Fit(dialog)
dialog.CentreOnParent()
namectrl.SetFocus()
macro = ""
if dialog.ShowModal() == wx.ID_OK:
macro = namectrl.GetValue()
if macro != "":
wx.CallAfter(self.edit_macro, macro)
dialog.Destroy()
return macro
def edit_macro(self, macro):
if macro == "": return self.new_macro()
if macro in self.macros:
old_def = self.macros[macro]
elif len([chr(c) for c in macro.encode("ascii", "replace") if not chr(c).isalnum() and chr(c) != "_"]):
self.log(_("Macro name may contain only ASCII alphanumeric symbols and underscores"))
return
elif hasattr(self.__class__, "do_" + macro):
self.log(_("Name '%s' is being used by built-in command") % macro)
return
else:
old_def = ""
self.start_macro(macro, old_def)
return macro
def update_macros_menu(self):
if not hasattr(self, "macros_menu"):
return # too early, menu not yet built
try:
while True:
item = self.macros_menu.FindItemByPosition(1)
if item is None: break
self.macros_menu.DestroyItem(item)
except:
pass
for macro in self.macros.keys():
self.Bind(wx.EVT_MENU, lambda x, m = macro: self.start_macro(m, self.macros[m]), self.macros_menu.Append(-1, macro))
# --------------------------------------------------------------
# Slic3r integration
# --------------------------------------------------------------
def load_slic3r_configs(self, menus):
"""List Slic3r configurations and create menu"""
# Hack to get correct path for Slic3r config
orig_appname = self.app.GetAppName()
self.app.SetAppName("Slic3r")
configpath = wx.StandardPaths.Get().GetUserDataDir()
self.slic3r_configpath = configpath
configfile = os.path.join(configpath, "slic3r.ini")
if not os.path.exists(configfile):
self.app.SetAppName("Slic3rPE")
configpath = wx.StandardPaths.Get().GetUserDataDir()
self.slic3r_configpath = configpath
configfile = os.path.join(configpath, "slic3r.ini")
if not os.path.exists(configfile):
self.settings.slic3rintegration = False
return
self.app.SetAppName(orig_appname)
config = self.read_slic3r_config(configfile)
version = config.get("dummy", "version") # Slic3r version
self.slic3r_configs = {}
for cat in menus:
menu = menus[cat]
pattern = os.path.join(configpath, cat, "*.ini")
files = sorted(glob.glob(pattern))
try:
preset = config.get("presets", cat)
# Starting from Slic3r 1.3.0, preset names have no extension
if version.split(".") >= ["1", "3", "0"]:
preset += ".ini"
self.slic3r_configs[cat] = preset
except:
preset = None
self.slic3r_configs[cat] = None
for f in files:
name = os.path.splitext(os.path.basename(f))[0]
item = menu.Append(-1, name, f, wx.ITEM_RADIO)
item.Check(os.path.basename(f) == preset)
self.Bind(wx.EVT_MENU,
lambda event, cat = cat, f = f:
self.set_slic3r_config(configfile, cat, f), item)
def read_slic3r_config(self, configfile, parser = None):
"""Helper to read a Slic3r configuration file"""
import configparser
parser = configparser.RawConfigParser()
class add_header:
def __init__(self, f):
self.f = f
self.header = '[dummy]'
def readline(self):
if self.header:
try: return self.header
finally: self.header = None
else:
return self.f.readline()
def __iter__(self):
import itertools
return itertools.chain([self.header], iter(self.f))
parser.read_file(add_header(open(configfile)), configfile)
return parser
def set_slic3r_config(self, configfile, cat, file):
"""Set new preset for a given category"""
self.slic3r_configs[cat] = file
if self.settings.slic3rupdate:
config = self.read_slic3r_config(configfile)
version = config.get("dummy", "version") # Slic3r version
preset = os.path.basename(file)
# Starting from Slic3r 1.3.0, preset names have no extension
if version.split(".") >= ["1", "3", "0"]:
preset = os.path.splitext(preset)[0]
config.set("presets", cat, preset)
f = StringIO.StringIO()
config.write(f)
data = f.getvalue()
f.close()
data = data.replace("[dummy]\n", "")
with open(configfile, "w") as f:
f.write(data)
class PronterApp(wx.App):
mainwindow = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.SetAppName("Pronterface")
lang = wx.Locale.GetSystemLanguage()
# Fall back to English if unable to determine language
if lang == wx.LANGUAGE_UNKNOWN:
lang = wx.LANGUAGE_ENGLISH_US
self.locale = wx.Locale(lang)
self.mainwindow = PronterWindow(self)
self.mainwindow.Show()
| 121,132 | Python | .py | 2,361 | 39.148666 | 355 | 0.579597 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,351 | gcview.py | kliment_Printrun/printrun/gcview.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import logging
import wx
from . import gcoder
from .gl.panel import wxGLPanel
from .gl.trackball import build_rotmatrix
from .gl.libtatlin import actors
from .injectgcode import injector, injector_edit
from pyglet.gl import glPushMatrix, glPopMatrix, \
glTranslatef, glRotatef, glScalef, glMultMatrixd, \
glGetDoublev, GL_MODELVIEW_MATRIX, GLdouble
from .gviz import GvizBaseFrame
from .gui.widgets import get_space
from .utils import imagefile, install_locale, get_home_pos
install_locale('pronterface')
def create_model(light):
if light:
return actors.GcodeModelLight()
else:
return actors.GcodeModel()
def gcode_dims(g):
return ((g.xmin, g.xmax, g.width),
(g.ymin, g.ymax, g.depth),
(g.zmin, g.zmax, g.height))
def set_model_colors(model, root):
for field in dir(model):
if field.startswith("color_"):
root_fieldname = "gcview_" + field
if hasattr(root, root_fieldname):
setattr(model, field, getattr(root, root_fieldname))
def recreate_platform(self, build_dimensions, circular, grid):
self.platform = actors.Platform(build_dimensions, circular = circular, grid = grid)
self.objects[0].model = self.platform
wx.CallAfter(self.Refresh)
def set_gcview_params(self, path_width, path_height):
self.path_halfwidth = path_width / 2
self.path_halfheight = path_height / 2
has_changed = False
for obj in self.objects[1:]:
if isinstance(obj.model, actors.GcodeModel):
obj.model.set_path_size(self.path_halfwidth, self.path_halfheight)
has_changed = True
return has_changed
# E selected for Up because is above D
LAYER_UP_KEYS = ord('U'), ord('E'), wx.WXK_UP
LAYER_DOWN_KEYS = ord('D'), wx.WXK_DOWN
ZOOM_IN_KEYS = wx.WXK_PAGEDOWN, 388, wx.WXK_RIGHT, ord('+')
ZOOM_OUT_KEYS = wx.WXK_PAGEUP, 390, wx.WXK_LEFT, ord('-')
FIT_KEYS = [ord('F')]
CURRENT_LAYER_KEYS = [ord('C')]
RESET_KEYS = [ord('R')]
class GcodeViewPanel(wxGLPanel):
def __init__(self, parent,
build_dimensions = (200, 200, 100, 0, 0, 0),
realparent = None, antialias_samples = 0, perspective=False):
if perspective:
self.orthographic=False
super().__init__(parent, wx.DefaultPosition,
wx.DefaultSize, 0,
antialias_samples = antialias_samples)
self.canvas.Bind(wx.EVT_MOUSE_EVENTS, self.move)
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.double)
# self.canvas.Bind(wx.EVT_KEY_DOWN, self.keypress)
# in Windows event inspector shows only EVT_CHAR_HOOK events
self.canvas.Bind(wx.EVT_CHAR_HOOK, self.keypress)
self.initialized = 0
self.canvas.Bind(wx.EVT_MOUSEWHEEL, self.wheel)
self.parent = realparent or parent
self.initpos = None
self.build_dimensions = build_dimensions
self.dist = max(self.build_dimensions[:2])
self.basequat = [0, 0, 0, 1]
self.mousepos = [0, 0]
def inject(self):
l = self.parent.model.num_layers_to_draw
filtered = [k for k, v in self.parent.model.layer_idxs_map.items() if v == l]
if filtered:
injector(self.parent.model.gcode, l, filtered[0])
else:
logging.error(_("Invalid layer for injection"))
def editlayer(self):
l = self.parent.model.num_layers_to_draw
filtered = [k for k, v in self.parent.model.layer_idxs_map.items() if v == l]
if filtered:
injector_edit(self.parent.model.gcode, l, filtered[0])
else:
logging.error(_("Invalid layer for edition"))
def setlayercb(self, layer):
pass
def OnInitGL(self, *args, **kwargs):
super().OnInitGL(*args, **kwargs)
filenames = getattr(self.parent, 'filenames', None)
if filenames:
for filename in filenames:
self.parent.load_file(filename)
self.parent.autoplate()
getattr(self.parent, 'loadcb', bool)()
self.parent.filenames = None
def create_objects(self):
'''create opengl objects when opengl is initialized'''
for obj in self.parent.objects:
if obj.model and obj.model.loaded and not obj.model.initialized:
obj.model.init()
def update_object_resize(self):
'''called when the window receives only if opengl is initialized'''
pass
def draw_objects(self):
'''called in the middle of ondraw after the buffer has been cleared'''
self.create_objects()
glPushMatrix()
# Rotate according to trackball
glMultMatrixd(build_rotmatrix(self.basequat))
# Move origin to bottom left of platform
platformx0 = -self.build_dimensions[3] - self.parent.platform.width / 2
platformy0 = -self.build_dimensions[4] - self.parent.platform.depth / 2
glTranslatef(platformx0, platformy0, 0)
for obj in self.parent.objects:
if not obj.model \
or not obj.model.loaded:
continue
# Skip (comment out) initialized check, which safely causes empty
# model during progressive load. This can cause exceptions/garbage
# render, but seems fine for now
# May need to lock init() and draw_objects() together
# if not obj.model.initialized:
# continue
glPushMatrix()
glTranslatef(*(obj.offsets))
glRotatef(obj.rot, 0.0, 0.0, 1.0)
glTranslatef(*(obj.centeroffset))
glScalef(*obj.scale)
obj.model.display()
glPopMatrix()
glPopMatrix()
# ==========================================================================
# Utils
# ==========================================================================
def get_modelview_mat(self, local_transform):
mvmat = (GLdouble * 16)()
if local_transform:
glPushMatrix()
# Rotate according to trackball
glMultMatrixd(build_rotmatrix(self.basequat))
# Move origin to bottom left of platform
platformx0 = -self.build_dimensions[3] - self.parent.platform.width / 2
platformy0 = -self.build_dimensions[4] - self.parent.platform.depth / 2
glTranslatef(platformx0, platformy0, 0)
glGetDoublev(GL_MODELVIEW_MATRIX, mvmat)
glPopMatrix()
else:
glGetDoublev(GL_MODELVIEW_MATRIX, mvmat)
return mvmat
def double(self, event):
getattr(self.parent, 'clickcb', bool)(event)
def move(self, event):
"""react to mouse actions:
no mouse: show red mousedrop
LMB: rotate viewport
RMB: move viewport
"""
if event.Entering():
self.canvas.SetFocus()
event.Skip()
return
if event.Dragging():
if event.LeftIsDown():
self.handle_rotation(event)
elif event.RightIsDown():
self.handle_translation(event)
self.Refresh(False)
elif event.LeftUp() or event.RightUp():
self.initpos = None
event.Skip()
def layerup(self):
if not getattr(self.parent, 'model', False):
return
max_layers = self.parent.model.max_layers
current_layer = self.parent.model.num_layers_to_draw
# accept going up to max_layers + 1
# max_layers means visualizing the last layer differently,
# max_layers + 1 means visualizing all layers with the same color
new_layer = min(max_layers + 1, current_layer + 1)
self.parent.model.num_layers_to_draw = new_layer
self.parent.setlayercb(new_layer)
wx.CallAfter(self.Refresh)
def layerdown(self):
if not getattr(self.parent, 'model', False):
return
current_layer = self.parent.model.num_layers_to_draw
new_layer = max(1, current_layer - 1)
self.parent.model.num_layers_to_draw = new_layer
self.parent.setlayercb(new_layer)
wx.CallAfter(self.Refresh)
wheelTimestamp = None
def handle_wheel(self, event):
if self.wheelTimestamp == event.Timestamp:
# filter duplicate event delivery in Ubuntu, Debian issue #1110
return
self.wheelTimestamp = event.Timestamp
delta = event.GetWheelRotation()
factor = 1.05
if event.ControlDown():
factor = 1.02
if hasattr(self.parent, "model") and event.ShiftDown():
if not self.parent.model:
return
count = 1 if not event.ControlDown() else 10
for i in range(count):
if delta > 0:
self.layerup()
else:
self.layerdown()
return
x, y = event.GetPosition() * self.GetContentScaleFactor()
x, y, _ = self.mouse_to_3d(x, y)
if delta > 0:
self.zoom(factor, (x, y))
else:
self.zoom(1 / factor, (x, y))
def wheel(self, event):
"""react to mouse wheel actions:
without shift: set max layer
with shift: zoom viewport
"""
self.handle_wheel(event)
wx.CallAfter(self.Refresh)
def fit(self):
if not self.parent.model or not self.parent.model.loaded:
return
self.canvas.SetCurrent(self.context)
dims = gcode_dims(self.parent.model.gcode)
self.reset_mview(1.0)
center_x = (dims[0][0] + dims[0][1]) / 2
center_y = (dims[1][0] + dims[1][1]) / 2
center_x = self.build_dimensions[0] / 2 - center_x
center_y = self.build_dimensions[1] / 2 - center_y
if self.orthographic:
ratio = float(self.dist) / max(dims[0][2], dims[1][2])
glScalef(ratio, ratio, 1)
glTranslatef(center_x, center_y, 0)
wx.CallAfter(self.Refresh)
def keypress(self, event):
"""gets keypress events and moves/rotates active shape"""
if event.HasModifiers():
# let alt+c bubble up
event.Skip()
return
step = event.ControlDown() and 1.05 or 1.1
key = event.GetKeyCode()
if key in LAYER_UP_KEYS:
self.layerup()
return # prevent shifting focus to other controls
elif key in LAYER_DOWN_KEYS:
self.layerdown()
return
# x, y, _ = self.mouse_to_3d(self.width / 2, self.height / 2)
elif key in ZOOM_IN_KEYS:
self.zoom_to_center(step)
return
elif key in ZOOM_OUT_KEYS:
self.zoom_to_center(1 / step)
return
elif key in FIT_KEYS:
self.fit()
elif key in CURRENT_LAYER_KEYS:
if not self.parent.model or not self.parent.model.loaded:
return
self.parent.model.only_current = not self.parent.model.only_current
wx.CallAfter(self.Refresh)
elif key in RESET_KEYS:
self.resetview()
event.Skip()
def resetview(self):
self.canvas.SetCurrent(self.context)
self.reset_mview(0.9)
self.basequat = [0, 0, 0, 1]
wx.CallAfter(self.Refresh)
class GCObject:
def __init__(self, model):
self.offsets = [0, 0, 0]
self.centeroffset = [0, 0, 0]
self.rot = 0
self.curlayer = 0.0
self.scale = [1.0, 1.0, 1.0]
self.model = model
class GcodeViewLoader:
path_halfwidth = 0.2
path_halfheight = 0.15
def addfile_perlayer(self, gcode = None, showall = False):
self.model = create_model(self.root.settings.light3d
if self.root else False)
if isinstance(self.model, actors.GcodeModel):
self.model.set_path_size(self.path_halfwidth, self.path_halfheight)
self.objects[-1].model = self.model
if self.root:
set_model_colors(self.model, self.root)
if gcode is not None:
generator = self.model.load_data(gcode)
generator_output = next(generator)
while generator_output is not None:
yield generator_output
generator_output = next(generator)
wx.CallAfter(self.Refresh)
yield None
def addfile(self, gcode = None, showall = False):
generator = self.addfile_perlayer(gcode, showall)
while next(generator) is not None:
continue
def set_gcview_params(self, path_width, path_height):
return set_gcview_params(self, path_width, path_height)
from printrun.gviz import BaseViz
class GcodeViewMainWrapper(GcodeViewLoader, BaseViz):
def __init__(self, parent, build_dimensions, root, circular, antialias_samples, grid, perspective=False):
self.root = root
self.glpanel = GcodeViewPanel(parent, realparent = self,
build_dimensions = build_dimensions,
antialias_samples = antialias_samples, perspective=perspective)
self.glpanel.SetMinSize((150, 150))
if self.root and hasattr(self.root, "gcview_color_background"):
self.glpanel.color_background = self.root.gcview_color_background
self.clickcb = None
self.widget = self.glpanel
self.refresh_timer = wx.CallLater(100, self.Refresh)
self.p = self # Hack for backwards compatibility with gviz API
self.grid = grid
self.platform = actors.Platform(build_dimensions, circular = circular, grid = grid)
self.model = None
self.objects = [GCObject(self.platform), GCObject(None)]
def __getattr__(self, name):
return getattr(self.glpanel, name)
def on_settings_change(self, changed_settings):
if self.model:
for s in changed_settings:
if s.name.startswith('gcview_color_'):
self.model.update_colors()
break
def set_current_gline(self, gline):
if gline.is_move and gline.gcview_end_vertex is not None \
and self.model and self.model.loaded:
self.model.printed_until = gline.gcview_end_vertex
if not self.refresh_timer.IsRunning():
self.refresh_timer.Start()
def recreate_platform(self, build_dimensions, circular, grid):
return recreate_platform(self, build_dimensions, circular, grid)
def setlayer(self, layer):
if layer in self.model.layer_idxs_map:
viz_layer = self.model.layer_idxs_map[layer]
self.parent.model.num_layers_to_draw = viz_layer
wx.CallAfter(self.Refresh)
def clear(self):
self.model = None
self.objects[-1].model = None
wx.CallAfter(self.Refresh)
class GcodeViewFrame(GvizBaseFrame, GcodeViewLoader):
'''A simple class for using OpenGL with wxPython.'''
def __init__(self, parent, ID, title, build_dimensions, objects = None,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.DEFAULT_FRAME_STYLE, root = None, circular = False,
antialias_samples = 0,
grid = (1, 10), perspective=False):
GvizBaseFrame.__init__(self, parent, ID, title,
pos, size, style)
self.root = root
panel, h_sizer = self.create_base_ui()
self.refresh_timer = wx.CallLater(100, self.Refresh)
self.p = self # Hack for backwards compatibility with gviz API
self.clonefrom = objects
self.platform = actors.Platform(build_dimensions, circular = circular, grid = grid)
self.model = objects[1].model if objects else None
self.objects = [GCObject(self.platform), GCObject(None)]
fit_image = wx.Image(imagefile('fit.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
tool_pos = self.toolbar.GetToolPos(3) + 1
self.toolbar.InsertTool(tool_pos, 10, " " + _("Fit to view"), fit_image,
shortHelp = _("Fit to view [F]"), longHelp = _("Fit view to display entire print"))
self.toolbar.Realize()
self.glpanel = GcodeViewPanel(panel,
build_dimensions = build_dimensions,
realparent = self,
antialias_samples = antialias_samples, perspective=perspective)
if self.root and hasattr(self.root, "gcview_color_background"):
self.glpanel.color_background = self.root.gcview_color_background
h_sizer.Add(self.glpanel, 1, wx.EXPAND)
h_sizer.Add(self.layerslider, 0, wx.EXPAND | wx.ALL, get_space('minor'))
self.glpanel.SetToolTip(_("Click [Mouse-Right] to move the view, click [Mouse-Left] to pan the view, scroll with [Mouse Wheel] to zoom, and scroll with [Shift]+[Mouse Wheel] to change the layer."))
minsize = self.toolbar.GetEffectiveMinSize().width
self.SetMinClientSize((minsize, minsize))
self.Bind(wx.EVT_TOOL, lambda x: self.glpanel.zoom_to_center(1 / 1.2), id = 1)
self.Bind(wx.EVT_TOOL, lambda x: self.glpanel.zoom_to_center(1.2), id = 2)
self.Bind(wx.EVT_TOOL, lambda x: self.glpanel.resetview(), id = 3)
self.Bind(wx.EVT_TOOL, lambda x: self.glpanel.layerdown(), id = 4)
self.Bind(wx.EVT_TOOL, lambda x: self.glpanel.layerup(), id = 5)
self.Bind(wx.EVT_TOOL, lambda x: self.glpanel.fit(), id = 10)
self.Bind(wx.EVT_TOOL, lambda x: self.glpanel.inject(), id = 6)
self.Bind(wx.EVT_TOOL, lambda x: self.glpanel.editlayer(), id = 7)
self.Bind(wx.EVT_TOOL, lambda x: self.Close(), id = 9)
def setlayercb(self, layer):
self.layerslider.SetValue(layer)
self.update_status("")
def update_status(self, extra):
layer = self.model.num_layers_to_draw
filtered = [k for k, v in self.model.layer_idxs_map.items() if v == layer]
if filtered:
true_layer = filtered[0]
z = self.model.gcode.all_layers[true_layer].z
message = _("Layer %d: %s Z = %.03f mm") % (layer, extra, z)
else:
message = _("Last layer: Object complete")
wx.CallAfter(self.SetStatusText, message, 0)
def process_slider(self, event):
if self.model is not None:
new_layer = self.layerslider.GetValue()
new_layer = min(self.model.max_layers + 1, new_layer)
new_layer = max(1, new_layer)
self.model.num_layers_to_draw = new_layer
self.update_status("")
wx.CallAfter(self.Refresh)
else:
logging.info(_("G-Code Viewer: Can't process slider. Please wait until model is loaded completely."))
def set_current_gline(self, gline):
if gline.is_move and gline.gcview_end_vertex is not None \
and self.model and self.model.loaded:
self.model.printed_until = gline.gcview_end_vertex
if not self.refresh_timer.IsRunning():
self.refresh_timer.Start()
def recreate_platform(self, build_dimensions, circular, grid):
return recreate_platform(self, build_dimensions, circular, grid)
def addfile(self, gcode = None):
if self.clonefrom:
self.model = self.clonefrom[-1].model.copy()
self.objects[-1].model = self.model
else:
GcodeViewLoader.addfile(self, gcode)
self.layerslider.SetRange(1, self.model.max_layers + 1)
self.layerslider.SetValue(self.model.max_layers + 1)
wx.CallAfter(self.SetStatusText, _("Last layer: Object complete"), 0)
wx.CallAfter(self.Refresh)
def clear(self):
self.model = None
self.objects[-1].model = None
wx.CallAfter(self.Refresh)
if __name__ == "__main__":
import sys
app = wx.App(redirect = False)
build_dimensions = [200, 200, 100, 0, 0, 0]
title = _("G-Code Viewer")
frame = GcodeViewFrame(None, wx.ID_ANY, title, size = (400, 400),
build_dimensions = build_dimensions)
gcode = gcoder.GCode(open(sys.argv[1]), get_home_pos(build_dimensions))
frame.addfile(gcode)
first_move = None
for i in range(len(gcode.lines)):
if gcode.lines[i].is_move:
first_move = gcode.lines[i]
break
last_move = None
for i in range(len(gcode.lines) - 1, -1, -1):
if gcode.lines[i].is_move:
last_move = gcode.lines[i]
break
nsteps = 20
steptime = 500
lines = [first_move] + [gcode.lines[int(float(i) * (len(gcode.lines) - 1) / nsteps)] for i in range(1, nsteps)] + [last_move]
current_line = 0
def setLine():
global current_line
frame.set_current_gline(lines[current_line])
current_line = (current_line + 1) % len(lines)
timer.Start()
timer = wx.CallLater(steptime, setLine)
timer.Start()
frame.Show(True)
app.MainLoop()
app.Destroy()
| 21,874 | Python | .py | 487 | 35.131417 | 205 | 0.61048 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,352 | gviz.py | kliment_Printrun/printrun/gviz.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
from queue import Queue
from collections import deque
import numpy
import wx
import time
from . import gcoder
from .injectgcode import injector, injector_edit
from printrun.gui.viz import BaseViz
from .gui.widgets import get_space
from .utils import imagefile, install_locale, get_home_pos
install_locale('pronterface')
class GvizBaseFrame(wx.Frame):
def create_base_ui(self):
self.CreateStatusBar(1)
self.SetStatusText(_("Layer number and Z position show here when you scroll"))
panel = wx.Panel(self, -1)
v_sizer = wx.BoxSizer(wx.VERTICAL)
h_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.toolbar = wx.ToolBar(panel, -1, style = wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_HORZ_TEXT)
self.build_toolbar()
self.toolbar.Realize()
v_sizer.Add(self.toolbar, 0, wx.EXPAND)
v_sizer.Add(h_sizer, 1, wx.EXPAND)
self.layerslider = wx.Slider(panel, style = wx.SL_VERTICAL | wx.SL_AUTOTICKS | wx.SL_LEFT | wx.SL_INVERSE)
self.layerslider.Bind(wx.EVT_SCROLL, self.process_slider)
panel.SetSizer(v_sizer)
return panel, h_sizer
def build_toolbar(self):
self.toolbar.SetMargins(get_space('minor'), get_space('mini'))
self.toolbar.SetToolPacking(get_space('minor'))
self.toolbar.AddTool(1, '', wx.Image(imagefile('zoom_out.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), _("Zoom Out [-]"))
self.toolbar.AddTool(2, '', wx.Image(imagefile('zoom_in.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), _("Zoom In [+]"),)
self.toolbar.AddTool(3, _("Reset View"), wx.Image(imagefile('fit.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(),
shortHelp = _("Reset View [R]"))
self.toolbar.AddSeparator()
self.toolbar.AddTool(4, '', wx.Image(imagefile('arrow_down.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), _("Move Down a Layer [D]"))
self.toolbar.AddTool(5, '', wx.Image(imagefile('arrow_up.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), _("Move Up a Layer [U]"))
self.toolbar.AddSeparator()
self.toolbar.AddTool(6, _("Inject"), wx.Image(imagefile('inject.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(),
wx.NullBitmap, shortHelp = _("Inject G-Code"), longHelp = _("Insert code at the beginning of this layer"))
self.toolbar.AddTool(7, _("Edit"), wx.Image(imagefile('edit.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(),
wx.NullBitmap, shortHelp = _("Edit Layer"), longHelp = _("Edit the G-Code of this layer"))
self.toolbar.AddStretchableSpace()
self.toolbar.AddSeparator()
self.toolbar.AddTool(9, _("Close"), wx.Image(imagefile('fit.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(),
shortHelp = _("Close Window"))
def setlayercb(self, layer):
self.layerslider.SetValue(layer)
def process_slider(self, event):
raise NotImplementedError
ID_ABOUT = 101
ID_EXIT = 110
class GvizWindow(GvizBaseFrame):
def __init__(self, f = None, size = (550, 550), build_dimensions = [200, 200, 100, 0, 0, 0], grid = (10, 50), extrusion_width = 0.5, bgcolor = "#000000"):
super().__init__(None, title = _("G-Code Viewer"), size = size, style = wx.DEFAULT_FRAME_STYLE)
panel, h_sizer = self.create_base_ui()
self.p = Gviz(panel, size = size, build_dimensions = build_dimensions, grid = grid, extrusion_width = extrusion_width, bgcolor = bgcolor, realparent = self)
h_sizer.Add(self.p, 1, wx.EXPAND)
h_sizer.Add(self.layerslider, 0, wx.EXPAND | wx.ALL, get_space('minor'))
self.p.SetToolTip(_("Click [Mouse-Right] to move the view, click [Mouse-Left]to pan the view, scroll with [Mouse Wheel] to zoom, and scroll with [Shift]+[Mouse Wheel] to change the layer."))
self.Layout()
size = (size[0] + self.layerslider.GetEffectiveMinSize().width,
size[1] + self.toolbar.GetEffectiveMinSize().height)
minsize = self.toolbar.GetEffectiveMinSize().width
self.SetMinClientSize((minsize, minsize))
self.SetClientSize(size)
self.Bind(wx.EVT_TOOL, lambda x: self.p.zoom(-1, -1, 1 / 1.2), id = 1)
self.Bind(wx.EVT_TOOL, lambda x: self.p.zoom(-1, -1, 1.2), id = 2)
self.Bind(wx.EVT_TOOL, self.reset_view, id = 3)
self.Bind(wx.EVT_TOOL, lambda x: self.p.layerdown(), id = 4)
self.Bind(wx.EVT_TOOL, lambda x: self.p.layerup(), id = 5)
self.Bind(wx.EVT_TOOL, lambda x: self.p.inject(), id = 6)
self.Bind(wx.EVT_TOOL, lambda x: self.p.editlayer(), id = 7)
self.Bind(wx.EVT_TOOL, self.reset_selection, id = 8)
self.Bind(wx.EVT_TOOL, lambda x: self.Close(), id = 9)
self.initpos = None
self.p.Bind(wx.EVT_KEY_DOWN, self.key)
self.Bind(wx.EVT_KEY_DOWN, self.key)
self.p.Bind(wx.EVT_MOUSEWHEEL, self.zoom)
self.Bind(wx.EVT_MOUSEWHEEL, self.zoom)
self.p.Bind(wx.EVT_MOUSE_EVENTS, self.mouse)
self.Bind(wx.EVT_MOUSE_EVENTS, self.mouse)
if f:
gcode = gcoder.GCode(f, get_home_pos(self.p.build_dimensions))
self.p.addfile(gcode)
self.CenterOnParent()
def set_current_gline(self, gline):
return
def process_slider(self, event):
self.p.layerindex = self.layerslider.GetValue()
z = self.p.get_currentz()
wx.CallAfter(self.SetStatusText, _("Layer %d: Z = %.03f mm") % (self.p.layerindex + 1, z), 0)
self.p.dirty = True
wx.CallAfter(self.p.Refresh)
def reset_view(self, event):
self.p.translate = [0.0, 0.0]
self.p.scale = self.p.basescale
self.p.zoom(0, 0, 1.0)
def reset_selection(self, event):
self.parent.rectangles = []
wx.CallAfter(self.p.Refresh)
def mouse(self, event):
if event.ButtonUp(wx.MOUSE_BTN_LEFT) or event.ButtonUp(wx.MOUSE_BTN_RIGHT):
if self.initpos is not None:
self.initpos = None
elif event.Dragging():
e = event.GetPosition()
if self.initpos is None:
self.initpos = e
self.basetrans = self.p.translate
self.p.translate = [self.basetrans[0] + (e[0] - self.initpos[0]),
self.basetrans[1] + (e[1] - self.initpos[1])]
self.p.dirty = True
wx.CallAfter(self.p.Refresh)
else:
event.Skip()
def key(self, event):
# Keycode definitions
kup = [85, 315] # Up keys
kdo = [68, 317] # Down Keys
kzi = [388, 316, 61] # Zoom In Keys
kzo = [390, 314, 45] # Zoom Out Keys
x = event.GetKeyCode()
cx, cy = self.p.translate
if x in kup:
self.p.layerup()
if x in kdo:
self.p.layerdown()
if x in kzi:
self.p.zoom(cx, cy, 1.2)
if x in kzo:
self.p.zoom(cx, cy, 1 / 1.2)
def zoom(self, event):
z = event.GetWheelRotation()
if event.ShiftDown():
if z > 0:
self.p.layerdown()
elif z < 0:
self.p.layerup()
else:
if z > 0:
self.p.zoom(event.GetX(), event.GetY(), 1.2)
elif z < 0:
self.p.zoom(event.GetX(), event.GetY(), 1 / 1.2)
class Gviz(wx.Panel, BaseViz):
# Mark canvas as dirty when setting showall
_showall = 0
def _get_showall(self):
return self._showall
def _set_showall(self, showall):
if showall != self._showall:
self.dirty = True
self._showall = showall
showall = property(_get_showall, _set_showall)
def __init__(self, parent, size = (200, 200), build_dimensions = [200, 200, 100, 0, 0, 0], grid = (10, 50), extrusion_width = 0.5, bgcolor = "#000000", realparent = None):
wx.Panel.__init__(self, parent, -1)
self.widget = self
size = [max(1.0, x) for x in size]
ratio = size[0] / size[1]
self.SetMinSize((150, 150 / ratio))
self.parent = realparent if realparent else parent
self.size = size
self.build_dimensions = build_dimensions
self.grid = grid
self.Bind(wx.EVT_PAINT, self.paint)
self.Bind(wx.EVT_SIZE, self.resize)
self.hilight = deque()
self.hilightarcs = deque()
self.hilightqueue = Queue(0)
self.hilightarcsqueue = Queue(0)
self.clear()
self.filament_width = extrusion_width # set it to 0 to disable scaling lines with zoom
self.update_basescale()
self.scale = self.basescale
penwidth = max(1, int(self.filament_width * ((self.scale[0] + self.scale[1]) / 2.0)))
self.translate = [0.0, 0.0]
self.mainpen = wx.Pen(wx.Colour(0, 0, 0), penwidth)
self.arcpen = wx.Pen(wx.Colour(255, 0, 0), penwidth)
self.travelpen = wx.Pen(wx.Colour(10, 80, 80), penwidth)
self.hlpen = wx.Pen(wx.Colour(200, 50, 50), penwidth)
self.fades = [wx.Pen(wx.Colour(int(250 - 0.6 ** i * 100), int(250 - 0.6 ** i * 100), int(200 - 0.4 ** i * 50)), penwidth) for i in range(6)]
self.penslist = [self.mainpen, self.arcpen, self.travelpen, self.hlpen] + self.fades
self.bgcolor = wx.Colour()
self.bgcolor.Set(bgcolor)
self.blitmap = wx.Bitmap(self.GetClientSize()[0], self.GetClientSize()[1], -1)
self.paint_overlay = None
def inject(self):
layer = self.layers[self.layerindex]
injector(self.gcode, self.layerindex, layer)
def editlayer(self):
layer = self.layers[self.layerindex]
injector_edit(self.gcode, self.layerindex, layer)
def clearhilights(self):
self.hilight.clear()
self.hilightarcs.clear()
while not self.hilightqueue.empty():
self.hilightqueue.get_nowait()
while not self.hilightarcsqueue.empty():
self.hilightarcsqueue.get_nowait()
def clear(self):
self.gcode = None
self.lastpos = [0, 0, 0, 0, 0, 0, 0]
self.hilightpos = self.lastpos[:]
self.lines = {}
self.pens = {}
self.arcs = {}
self.arcpens = {}
self.layers = {}
self.layersz = []
self.clearhilights()
self.layerindex = 0
self.showall = 0
self.dirty = True
self.partial = False
self.painted_layers = set()
wx.CallAfter(self.Refresh)
def get_currentz(self):
z = self.layersz[self.layerindex]
z = 0. if z is None else z
return z
def layerup(self):
if self.layerindex + 1 < len(self.layers):
self.layerindex += 1
z = self.get_currentz()
wx.CallAfter(self.parent.SetStatusText, _("Layer %d: Z = %.03f mm") % (self.layerindex + 1, z), 0)
self.dirty = True
self.parent.setlayercb(self.layerindex)
wx.CallAfter(self.Refresh)
def layerdown(self):
if self.layerindex > 0:
self.layerindex -= 1
z = self.get_currentz()
wx.CallAfter(self.parent.SetStatusText, _("Layer %d: Z = %.03f mm") % (self.layerindex + 1, z), 0)
self.dirty = True
self.parent.setlayercb(self.layerindex)
wx.CallAfter(self.Refresh)
def setlayer(self, layer):
if layer in self.layers:
self.clearhilights()
self.layerindex = self.layers[layer]
self.dirty = True
self.showall = 0
wx.CallAfter(self.Refresh)
def update_basescale(self):
self.basescale = 2 * [min(float(self.size[0] - 1) / self.build_dimensions[0],
float(self.size[1] - 1) / self.build_dimensions[1])]
def resize(self, event):
old_basescale = self.basescale
width, height = self.GetClientSize()
if width < 1 or height < 1:
return
self.size = (width, height)
self.update_basescale()
zoomratio = float(self.basescale[0]) / old_basescale[0]
wx.CallLater(200, self.zoom, 0, 0, zoomratio)
def zoom(self, x, y, factor):
if x == -1 and y == -1:
side = min(self.size)
x = y = side / 2
self.scale = [s * factor for s in self.scale]
self.translate = [x - (x - self.translate[0]) * factor,
y - (y - self.translate[1]) * factor]
penwidth = max(1, int(self.filament_width * ((self.scale[0] + self.scale[1]) / 2.0)))
for pen in self.penslist:
pen.SetWidth(penwidth)
self.dirty = True
wx.CallAfter(self.Refresh)
def _line_scaler(self, x):
return (int(self.scale[0] * x[0]),
int(self.scale[1] * x[1]),
int(self.scale[0] * x[2]),
int(self.scale[1] * x[3]),)
def _arc_scaler(self, x):
return (int(self.scale[0] * x[0]),
int(self.scale[1] * x[1]),
int(self.scale[0] * x[2]),
int(self.scale[1] * x[3]),
int(self.scale[0] * x[4]),
int(self.scale[1] * x[5]),)
def _drawlines(self, dc, lines, pens):
scaled_lines = [self._line_scaler(l) for l in lines]
dc.DrawLineList(scaled_lines, pens)
def _drawarcs(self, dc, arcs, pens):
scaled_arcs = [self._arc_scaler(a) for a in arcs]
dc.SetBrush(wx.TRANSPARENT_BRUSH)
for i, arc in enumerate(scaled_arcs):
dc.SetPen(pens[i] if isinstance(pens, numpy.ndarray) else pens)
dc.DrawArc(*arc)
def repaint_everything(self):
width = self.scale[0] * self.build_dimensions[0]
height = self.scale[1] * self.build_dimensions[1]
self.blitmap = wx.Bitmap(int(width) + 1, int(height) + 1, -1)
dc = wx.MemoryDC()
dc.SelectObject(self.blitmap)
dc.SetBackground(wx.Brush((250, 250, 200)))
dc.Clear()
dc.SetPen(wx.Pen(wx.Colour(180, 180, 150)))
for grid_unit in self.grid:
if grid_unit > 0:
for x in range(int(self.build_dimensions[0] / grid_unit) + 1):
draw_x = self.scale[0] * x * grid_unit
dc.DrawLine(int(draw_x), 0, int(draw_x), int(height))
for y in range(int(self.build_dimensions[1] / grid_unit) + 1):
draw_y = self.scale[1] * (self.build_dimensions[1] - y * grid_unit)
dc.DrawLine(0, int(draw_y), int(width), int(draw_y))
dc.SetPen(wx.Pen(wx.Colour(0, 0, 0)))
if not self.showall:
# Draw layer gauge
dc.SetBrush(wx.Brush((43, 144, 255)))
dc.DrawRectangle(int(width) - 15, 0, 15, int(height))
dc.SetBrush(wx.Brush((0, 255, 0)))
if self.layers:
dc.DrawRectangle(int(width) - 14, int((1.0 - (1.0 * (self.layerindex + 1)) / len(self.layers)) * height), 13, int(height) - 1)
if self.showall:
for i in range(len(self.layersz)):
self.painted_layers.add(i)
self._drawlines(dc, self.lines[i], self.pens[i])
self._drawarcs(dc, self.arcs[i], self.arcpens[i])
dc.SelectObject(wx.NullBitmap)
return
if self.layerindex < len(self.layers) and self.layerindex in self.lines:
for layer_i in range(max(0, self.layerindex - 6), self.layerindex):
self._drawlines(dc, self.lines[layer_i], self.fades[self.layerindex - layer_i - 1])
self._drawarcs(dc, self.arcs[layer_i], self.fades[self.layerindex - layer_i - 1])
self._drawlines(dc, self.lines[self.layerindex], self.pens[self.layerindex])
self._drawarcs(dc, self.arcs[self.layerindex], self.arcpens[self.layerindex])
self._drawlines(dc, self.hilight, self.hlpen)
self._drawarcs(dc, self.hilightarcs, self.hlpen)
self.paint_hilights(dc)
dc.SelectObject(wx.NullBitmap)
def repaint_partial(self):
if self.showall:
dc = wx.MemoryDC()
dc.SelectObject(self.blitmap)
for i in set(range(len(self.layersz))).difference(self.painted_layers):
self.painted_layers.add(i)
self._drawlines(dc, self.lines[i], self.pens[i])
self._drawarcs(dc, self.arcs[i], self.arcpens[i])
dc.SelectObject(wx.NullBitmap)
def paint_hilights(self, dc = None):
if self.hilightqueue.empty() and self.hilightarcsqueue.empty():
return
hl = []
if not dc:
dc = wx.MemoryDC()
dc.SelectObject(self.blitmap)
while not self.hilightqueue.empty():
hl.append(self.hilightqueue.get_nowait())
self._drawlines(dc, hl, self.hlpen)
hlarcs = []
while not self.hilightarcsqueue.empty():
hlarcs.append(self.hilightarcsqueue.get_nowait())
self._drawarcs(dc, hlarcs, self.hlpen)
dc.SelectObject(wx.NullBitmap)
def paint(self, event):
if self.dirty:
self.dirty = False
self.partial = False
self.repaint_everything()
elif self.partial:
self.partial = False
self.repaint_partial()
self.paint_hilights()
dc = wx.PaintDC(self)
dc.SetBackground(wx.Brush(self.bgcolor))
dc.Clear()
dc.DrawBitmap(self.blitmap, int(self.translate[0]), int(self.translate[1]))
if self.paint_overlay:
self.paint_overlay(dc)
def addfile_perlayer(self, gcode, showall = False):
self.clear()
self.gcode = gcode
self.showall = showall
generator = self.add_parsed_gcodes(gcode)
generator_output = next(generator)
while generator_output is not None:
yield generator_output
generator_output = next(generator)
max_layers = len(self.layers)
if hasattr(self.parent, "layerslider"):
self.parent.layerslider.SetRange(0, max_layers - 1)
self.parent.layerslider.SetValue(0)
yield None
def addfile(self, gcode = None, showall = False):
generator = self.addfile_perlayer(gcode, showall)
while next(generator) is not None:
continue
def _get_movement(self, start_pos, gline):
"""Takes a start position and a gcode, and returns a 3-uple containing
(final position, line, arc), with line and arc being None if not
used"""
target = start_pos[:]
target[5] = 0.0
target[6] = 0.0
if gline.current_x is not None:
target[0] = gline.current_x
if gline.current_y is not None:
target[1] = gline.current_y
if gline.current_z is not None:
target[2] = gline.current_z
if gline.e is not None:
if gline.relative_e:
target[3] += gline.e
else:
target[3] = gline.e
if gline.f is not None:
target[4] = gline.f
if gline.i is not None:
target[5] = gline.i
if gline.j is not None:
target[6] = gline.j
if gline.command in ["G2", "G3"]:
# startpos, endpos, arc center
arc = [self._x(start_pos[0]), self._y(start_pos[1]),
self._x(target[0]), self._y(target[1]),
self._x(start_pos[0] + target[5]), self._y(start_pos[1] + target[6])]
if gline.command == "G2": # clockwise, reverse endpoints
arc[0], arc[1], arc[2], arc[3] = arc[2], arc[3], arc[0], arc[1]
return target, None, arc
# ["G0", "G1"]:
line = [self._x(start_pos[0]),
self._y(start_pos[1]),
self._x(target[0]),
self._y(target[1])]
return target, line, None
def _y(self, y):
return self.build_dimensions[1] - (y - self.build_dimensions[4])
def _x(self, x):
return x - self.build_dimensions[3]
def add_parsed_gcodes(self, gcode):
start_time = time.time()
layer_idx = 0
while layer_idx < len(gcode.all_layers):
layer = gcode.all_layers[layer_idx]
has_move = False
for gline in layer:
if gline.is_move:
has_move = True
break
if not has_move:
yield layer_idx
layer_idx += 1
continue
viz_layer = len(self.layers)
self.lines[viz_layer] = []
self.pens[viz_layer] = []
self.arcs[viz_layer] = []
self.arcpens[viz_layer] = []
for gline in layer:
if not gline.is_move:
continue
target, line, arc = self._get_movement(self.lastpos[:], gline)
if line is not None:
self.lines[viz_layer].append(line)
self.pens[viz_layer].append(self.mainpen if target[3] != self.lastpos[3] or gline.extruding else self.travelpen)
elif arc is not None:
self.arcs[viz_layer].append(arc)
self.arcpens[viz_layer].append(self.arcpen)
self.lastpos = target
# Transform into a numpy array for memory efficiency
self.lines[viz_layer] = numpy.asarray(self.lines[viz_layer], dtype = numpy.float32)
self.pens[viz_layer] = numpy.asarray(self.pens[viz_layer])
self.arcs[viz_layer] = numpy.asarray(self.arcs[viz_layer], dtype = numpy.float32)
self.arcpens[viz_layer] = numpy.asarray(self.arcpens[viz_layer])
# Only add layer to self.layers now to prevent the display of an
# unfinished layer
self.layers[layer_idx] = viz_layer
self.layersz.append(layer.z)
# Refresh display if more than 0.2s have passed
if time.time() - start_time > 0.2:
start_time = time.time()
self.partial = True
wx.CallAfter(self.Refresh)
yield layer_idx
layer_idx += 1
self.dirty = True
wx.CallAfter(self.Refresh)
yield None
def addgcodehighlight(self, gline):
if gline.command not in ["G0", "G1", "G2", "G3"]:
return
target, line, arc = self._get_movement(self.hilightpos[:], gline)
if line is not None:
self.hilight.append(line)
self.hilightqueue.put_nowait(line)
elif arc is not None:
self.hilightarcs.append(arc)
self.hilightarcsqueue.put_nowait(arc)
self.hilightpos = target
wx.CallAfter(self.Refresh)
if __name__ == '__main__':
import sys
app = wx.App(False)
with open(sys.argv[1], "r") as arg:
main = GvizWindow(arg)
main.Show()
app.MainLoop()
| 23,682 | Python | .py | 511 | 35.837573 | 198 | 0.580919 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,353 | gcoder.py | kliment_Printrun/printrun/gcoder.py | #!/usr/bin/env python3
#
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import math
import datetime
import logging
from array import array
gcode_parsed_args = ["x", "y", "e", "f", "z", "i", "j"]
gcode_parsed_nonargs = 'gtmnd'
to_parse = "".join(gcode_parsed_args) + gcode_parsed_nonargs
gcode_exp = re.compile("\([^\(\)]*\)|;.*|[/\*].*\n|([%s])\s*([-+]?[0-9]*\.?[0-9]*)" % to_parse)
gcode_strip_comment_exp = re.compile("\([^\(\)]*\)|;.*|[/\*].*\n")
m114_exp = re.compile("\([^\(\)]*\)|[/\*].*\n|([XYZ]):?([-+]?[0-9]*\.?[0-9]*)")
specific_exp = "(?:\([^\(\)]*\))|(?:;.*)|(?:[/\*].*\n)|(%s[-+]?[0-9]*\.?[0-9]*)"
move_gcodes = ["G0", "G1", "G2", "G3"]
class PyLine:
__slots__ = ('x', 'y', 'z', 'e', 'f', 'i', 'j',
'raw', 'command', 'is_move',
'relative', 'relative_e',
'current_x', 'current_y', 'current_z', 'extruding',
'current_tool',
'gcview_end_vertex')
def __init__(self, l):
self.raw = l
def __getattr__(self, name):
return None
class PyLightLine:
__slots__ = ('raw', 'command')
def __init__(self, l):
self.raw = l
def __getattr__(self, name):
return None
try:
from . import gcoder_line
Line = gcoder_line.GLine
LightLine = gcoder_line.GLightLine
except Exception as e:
logging.warning("Memory-efficient GCoder implementation unavailable: %s" % e)
Line = PyLine
LightLine = PyLightLine
def find_specific_code(line, code):
exp = specific_exp % code
bits = [bit for bit in re.findall(exp, line.raw) if bit]
if not bits: return None
else: return float(bits[0][1:])
def S(line):
return find_specific_code(line, "S")
def P(line):
return find_specific_code(line, "P")
def split(line):
split_raw = gcode_exp.findall(line.raw.lower())
if split_raw and split_raw[0][0] == "n":
del split_raw[0]
if not split_raw:
line.command = line.raw
line.is_move = False
logging.warning("raw G-Code line \"%s\" could not be parsed" % line.raw)
return [line.raw]
command = split_raw[0]
line.command = command[0].upper() + command[1]
line.is_move = line.command in move_gcodes
return split_raw
def parse_coordinates(line, split_raw, imperial = False, force = False):
# Not a G-line, we don't want to parse its arguments
if not force and line.command[0] != "G":
return
unit_factor = 25.4 if imperial else 1
for bit in split_raw:
code = bit[0]
if code not in gcode_parsed_nonargs and bit[1]:
setattr(line, code, unit_factor * float(bit[1]))
class Layer(list):
__slots__ = ("duration", "z")
def __init__(self, lines, z = None):
super(Layer, self).__init__(lines)
self.z = z
self.duration = 0
class GCode:
line_class = Line
lines = None
layers = None
all_layers = None
layer_idxs = None
line_idxs = None
append_layer = None
append_layer_id = None
imperial = False
cutting = False
relative = False
relative_e = False
current_tool = 0
# Home position: current absolute position counted from machine origin
home_x = 0
home_y = 0
home_z = 0
# Current position: current absolute position counted from machine origin
current_x = 0
current_y = 0
current_z = 0
# For E this is the absolute position from machine start
current_e = 0
current_e_multi=[0]
total_e = 0
total_e_multi=[0]
max_e = 0
max_e_multi=[0]
# Current feedrate
current_f = 0
# Offset: current offset between the machine origin and the machine current
# absolute coordinate system (as shifted by G92s)
offset_x = 0
offset_y = 0
offset_z = 0
offset_e = 0
offset_e_multi = [0]
# Expected behavior:
# - G28 X => X axis is homed, offset_x <- 0, current_x <- home_x
# - G92 Xk => X axis does not move, so current_x does not change
# and offset_x <- current_x - k,
# - absolute G1 Xk => X axis moves, current_x <- offset_x + k
# How to get...
# current abs X from machine origin: current_x
# current abs X in machine current coordinate system: current_x - offset_x
filament_length = None
filament_length_multi=[0]
duration = None
xmin = None
xmax = None
ymin = None
ymax = None
zmin = None
zmax = None
width = None
depth = None
height = None
est_layer_height = None
# abs_x is the current absolute X in machine current coordinate system
# (after the various G92 transformations) and can be used to store the
# absolute position of the head at a given time
def _get_abs_x(self):
return self.current_x - self.offset_x
abs_x = property(_get_abs_x)
def _get_abs_y(self):
return self.current_y - self.offset_y
abs_y = property(_get_abs_y)
def _get_abs_z(self):
return self.current_z - self.offset_z
abs_z = property(_get_abs_z)
def _get_abs_e(self):
return self.current_e - self.offset_e
abs_e = property(_get_abs_e)
def _get_abs_e_multi(self,i):
return self.current_e_multi[i] - self.offset_e_multi[i]
abs_e = property(_get_abs_e)
def _get_abs_pos(self):
return (self.abs_x, self.abs_y, self.abs_z)
abs_pos = property(_get_abs_pos)
def _get_current_pos(self):
return (self.current_x, self.current_y, self.current_z)
current_pos = property(_get_current_pos)
def _get_home_pos(self):
return (self.home_x, self.home_y, self.home_z)
def _set_home_pos(self, home_pos):
if home_pos:
self.home_x, self.home_y, self.home_z = home_pos
home_pos = property(_get_home_pos, _set_home_pos)
def _get_layers_count(self):
return len(self.all_zs)
layers_count = property(_get_layers_count)
def __init__(self, data = None, home_pos = None,
layer_callback = None, deferred = False,
cutting_as_extrusion = False):
self.cutting_as_extrusion = cutting_as_extrusion
if not deferred:
self.prepare(data, home_pos, layer_callback)
def prepare(self, data = None, home_pos = None, layer_callback = None):
self.home_pos = home_pos
if data:
line_class = self.line_class
self.lines = [line_class(l2) for l2 in
(l.strip() for l in data)
if l2]
self._preprocess(build_layers = True,
layer_callback = layer_callback)
else:
self.lines = []
self.append_layer_id = 0
self.append_layer = Layer([])
self.all_layers = [self.append_layer]
self.all_zs = set()
self.layers = {}
self.layer_idxs = array('I', [])
self.line_idxs = array('I', [])
def has_index(self, i):
return i < len(self)
def __len__(self):
return len(self.line_idxs)
def __iter__(self):
return self.lines.__iter__()
def prepend_to_layer(self, commands, layer_idx):
# Prepend commands in reverse order
commands = [c.strip() for c in commands[::-1] if c.strip()]
layer = self.all_layers[layer_idx]
# Find start index to append lines
# and end index to append new indices
start_index = self.layer_idxs.index(layer_idx)
for i in range(start_index, len(self.layer_idxs)):
if self.layer_idxs[i] != layer_idx:
end_index = i
break
else:
end_index = i + 1
end_line = self.line_idxs[end_index - 1]
for i, command in enumerate(commands):
gline = Line(command)
# Split to get command
split(gline)
# Force is_move to False
gline.is_move = False
# Insert gline at beginning of layer
layer.insert(0, gline)
# Insert gline at beginning of list
self.lines.insert(start_index, gline)
# Update indices arrays & global gcodes list
self.layer_idxs.insert(end_index + i, layer_idx)
self.line_idxs.insert(end_index + i, end_line + i + 1)
return commands[::-1]
def rewrite_layer(self, commands, layer_idx):
# Prepend commands in reverse order
commands = [c.strip() for c in commands[::-1] if c.strip()]
layer = self.all_layers[layer_idx]
# Find start index to append lines
# and end index to append new indices
start_index = self.layer_idxs.index(layer_idx)
for i in range(start_index, len(self.layer_idxs)):
if self.layer_idxs[i] != layer_idx:
end_index = i
break
else:
end_index = i + 1
self.layer_idxs = self.layer_idxs[:start_index] + array('I', len(commands) * [layer_idx]) + self.layer_idxs[end_index:]
self.line_idxs = self.line_idxs[:start_index] + array('I', range(len(commands))) + self.line_idxs[end_index:]
del self.lines[start_index:end_index]
del layer[:]
for i, command in enumerate(commands):
gline = Line(command)
# Split to get command
split(gline)
# Force is_move to False
gline.is_move = False
# Insert gline at beginning of layer
layer.insert(0, gline)
# Insert gline at beginning of list
self.lines.insert(start_index, gline)
return commands[::-1]
def append(self, command, store = True):
'''Add a G-code command to the list
Parameters
----------
command : str
Command to be added, e.g. "G1 X10".
store : bool, default: True
If True, `command` is appended to the current list of
commands. If False, processed command is returned but not
added to the list.
Returns
-------
Line
A `printrun.gcoder.Line` object containing the processed
`command`.
'''
command = command.strip()
if not command:
# TODO: return None or empty gline? Pylint #R1710
return
gline = Line(command)
self._preprocess([gline])
if store:
self.lines.append(gline)
self.append_layer.append(gline)
self.layer_idxs.append(self.append_layer_id)
self.line_idxs.append(len(self.append_layer)-1)
return gline
def _preprocess(self, lines = None, build_layers = False,
layer_callback = None):
"""Checks for imperial/relativeness settings and tool changes"""
if not lines:
lines = self.lines
imperial = self.imperial
relative = self.relative
relative_e = self.relative_e
current_tool = self.current_tool
current_x = self.current_x
current_y = self.current_y
current_z = self.current_z
offset_x = self.offset_x
offset_y = self.offset_y
offset_z = self.offset_z
# Extrusion computation
current_e = self.current_e
offset_e = self.offset_e
total_e = self.total_e
max_e = self.max_e
cutting = self.cutting
current_e_multi = self.current_e_multi[current_tool]
offset_e_multi = self.offset_e_multi[current_tool]
total_e_multi = self.total_e_multi[current_tool]
max_e_multi = self.max_e_multi[current_tool]
# Store this one out of the build_layers scope for efficiency
cur_layer_has_extrusion = False
# Initialize layers and other global computations
if build_layers:
# Bounding box computation
xmin = float("inf")
ymin = float("inf")
zmin = 0
xmax = float("-inf")
ymax = float("-inf")
zmax = float("-inf")
# Also compute extrusion-only values
xmin_e = float("inf")
ymin_e = float("inf")
xmax_e = float("-inf")
ymax_e = float("-inf")
# Duration estimation
# TODO:
# get device caps from firmware: max speed, acceleration/axis
# (including extruder)
# calculate the maximum move duration accounting for above ;)
lastx = lasty = lastz = None
laste = lastf = 0
lastdx = 0
lastdy = 0
x = y = e = f = 0.0
currenttravel = 0.0
moveduration = 0.0
totalduration = 0.0
acceleration = 2000.0 # mm/s^2
layerbeginduration = 0.0
# Initialize layers
all_layers = self.all_layers = []
all_zs = self.all_zs = set()
layer_idxs = self.layer_idxs = []
line_idxs = self.line_idxs = []
last_layer_z = None
prev_z = None
cur_z = None
cur_lines = []
def append_lines(lines, isEnd):
if not build_layers:
return
nonlocal layerbeginduration, last_layer_z
if cur_layer_has_extrusion and prev_z != last_layer_z \
or not all_layers:
layer = Layer([], prev_z)
last_layer_z = prev_z
finished_layer = len(all_layers)-1 if all_layers else None
all_layers.append(layer)
all_zs.add(prev_z)
else:
layer = all_layers[-1]
finished_layer = None
layer_id = len(all_layers)-1
layer_line = len(layer)
for i, ln in enumerate(lines):
layer.append(ln)
layer_idxs.append(layer_id)
line_idxs.append(layer_line+i)
layer.duration += totalduration - layerbeginduration
layerbeginduration = totalduration
if layer_callback:
# we finish a layer when inserting the next
if finished_layer is not None:
layer_callback(self, finished_layer)
# notify about end layer, there will not be next
if isEnd:
layer_callback(self, layer_id)
if self.line_class != Line:
get_line = lambda l: Line(l.raw)
else:
get_line = lambda l: l
for true_line in lines:
# # Parse line
# Use a heavy copy of the light line to preprocess
line = get_line(true_line)
split_raw = split(line)
if line.command:
# Update properties
if line.is_move:
line.relative = relative
line.relative_e = relative_e
line.current_tool = current_tool
elif line.command == "G20":
imperial = True
elif line.command == "G21":
imperial = False
elif line.command == "G90":
relative = False
relative_e = False
elif line.command == "G91":
relative = True
relative_e = True
elif line.command == "M82":
relative_e = False
elif line.command == "M83":
relative_e = True
elif line.command[0] == "T":
try:
current_tool = int(line.command[1:])
except:
pass #handle T? by treating it as no tool change
while current_tool+1 > len(self.current_e_multi):
self.current_e_multi+=[0]
self.offset_e_multi+=[0]
self.total_e_multi+=[0]
self.max_e_multi+=[0]
elif line.command == "M3" or line.command == "M4":
cutting = True
elif line.command == "M5":
cutting = False
current_e_multi = self.current_e_multi[current_tool]
offset_e_multi = self.offset_e_multi[current_tool]
total_e_multi = self.total_e_multi[current_tool]
max_e_multi = self.max_e_multi[current_tool]
if line.command[0] == "G":
parse_coordinates(line, split_raw, imperial)
# Compute current position
if line.is_move:
x = line.x
y = line.y
z = line.z
if line.f is not None:
self.current_f = line.f
if line.relative:
x = current_x + (x or 0)
y = current_y + (y or 0)
z = current_z + (z or 0)
else:
if x is not None: x = x + offset_x
if y is not None: y = y + offset_y
if z is not None: z = z + offset_z
if x is not None: current_x = x
if y is not None: current_y = y
if z is not None: current_z = z
elif line.command == "G28":
home_all = not any([line.x, line.y, line.z])
if home_all or line.x is not None:
offset_x = 0
current_x = self.home_x
if home_all or line.y is not None:
offset_y = 0
current_y = self.home_y
if home_all or line.z is not None:
offset_z = 0
current_z = self.home_z
elif line.command == "G92":
if line.x is not None: offset_x = current_x - line.x
if line.y is not None: offset_y = current_y - line.y
if line.z is not None: offset_z = current_z - line.z
line.current_x = current_x
line.current_y = current_y
line.current_z = current_z
# # Process extrusion
if line.e is not None:
if line.is_move:
if line.relative_e:
line.extruding = line.e > 0
total_e += line.e
current_e += line.e
total_e_multi += line.e
current_e_multi += line.e
else:
new_e = line.e + offset_e
line.extruding = new_e > current_e
total_e += new_e - current_e
current_e = new_e
new_e_multi = line.e + offset_e_multi
total_e_multi += new_e_multi - current_e_multi
current_e_multi = new_e_multi
max_e = max(max_e, total_e)
max_e_multi=max(max_e_multi, total_e_multi)
cur_layer_has_extrusion |= line.extruding and (line.x is not None or line.y is not None)
elif line.command == "G92":
offset_e = current_e - line.e
offset_e_multi = current_e_multi - line.e
if cutting and self.cutting_as_extrusion:
line.extruding = True
self.current_e_multi[current_tool]=current_e_multi
self.offset_e_multi[current_tool]=offset_e_multi
self.max_e_multi[current_tool]=max_e_multi
self.total_e_multi[current_tool]=total_e_multi
# # Create layers and perform global computations
if build_layers:
# Update bounding box
if line.is_move:
if line.extruding:
if line.current_x is not None:
# G0 X10 ; G1 X20 E5 results in 10..20 even as G0 is not extruding
xmin_e = min(xmin_e, line.current_x, xmin_e if lastx is None else lastx)
xmax_e = max(xmax_e, line.current_x, xmax_e if lastx is None else lastx)
if line.current_y is not None:
ymin_e = min(ymin_e, line.current_y, ymin_e if lasty is None else lasty)
ymax_e = max(ymax_e, line.current_y, ymax_e if lasty is None else lasty)
if max_e <= 0:
if line.current_x is not None:
xmin = min(xmin, line.current_x)
xmax = max(xmax, line.current_x)
if line.current_y is not None:
ymin = min(ymin, line.current_y)
ymax = max(ymax, line.current_y)
# Compute duration
if line.command == "G0" or line.command == "G1":
x = line.x if line.x is not None else (lastx or 0)
y = line.y if line.y is not None else (lasty or 0)
z = line.z if line.z is not None else (lastz or 0)
e = line.e if line.e is not None else laste
# mm/s vs mm/m => divide by 60
f = line.f / 60.0 if line.f is not None else lastf
# given last feedrate and current feedrate calculate the
# distance needed to achieve current feedrate.
# if travel is longer than req'd distance, then subtract
# distance to achieve full speed, and add the time it took
# to get there.
# then calculate the time taken to complete the remaining
# distance
# FIXME: this code has been proven to be super wrong when 2
# subsquent moves are in opposite directions, as requested
# speed is constant but printer has to fully decellerate
# and reaccelerate
# The following code tries to fix it by forcing a full
# reacceleration if this move is in the opposite direction
# of the previous one
dx = x - (lastx or 0)
dy = y - (lasty or 0)
if dx * lastdx + dy * lastdy <= 0:
lastf = 0
currenttravel = math.hypot(dx, dy)
if currenttravel == 0:
if line.z is not None:
currenttravel = abs(line.z) if line.relative else abs(line.z - (lastz or 0))
elif line.e is not None:
currenttravel = abs(line.e) if line.relative_e else abs(line.e - laste)
# Feedrate hasn't changed, no acceleration/decceleration planned
if f == lastf:
moveduration = currenttravel / f if f != 0 else 0.
else:
# FIXME: review this better
# this looks wrong : there's little chance that the feedrate we'll decelerate to is the previous feedrate
# shouldn't we instead look at three consecutive moves ?
distance = 2 * abs(((lastf + f) * (f - lastf) * 0.5) / acceleration) # multiply by 2 because we have to accelerate and decelerate
if distance <= currenttravel and lastf + f != 0 and f != 0:
moveduration = 2 * distance / (lastf + f) # This is distance / mean(lastf, f)
moveduration += (currenttravel - distance) / f
else:
moveduration = 2 * currenttravel / (lastf + f) # This is currenttravel / mean(lastf, f)
# FIXME: probably a little bit optimistic, but probably a much better estimate than the previous one:
# moveduration = math.sqrt(2 * distance / acceleration) # probably buggy : not taking actual travel into account
lastdx = dx
lastdy = dy
totalduration += moveduration
lastx = x
lasty = y
lastz = z
laste = e
lastf = f
elif line.command == "G4":
moveduration = P(line)
if moveduration:
moveduration /= 1000.0
totalduration += moveduration
# FIXME : looks like this needs to be tested with "lift Z on move"
if line.z is not None:
if line.command == "G92":
cur_z = line.z
elif line.is_move:
if line.relative and cur_z is not None:
cur_z += line.z
else:
cur_z = line.z
if cur_z != prev_z and cur_layer_has_extrusion:
append_lines(cur_lines, False)
cur_lines = []
cur_layer_has_extrusion = False
if build_layers:
cur_lines.append(true_line)
prev_z = cur_z
# ## Loop done
# Store current status
self.imperial = imperial
self.relative = relative
self.relative_e = relative_e
self.current_tool = current_tool
self.current_x = current_x
self.current_y = current_y
self.current_z = current_z
self.offset_x = offset_x
self.offset_y = offset_y
self.offset_z = offset_z
self.current_e = current_e
self.offset_e = offset_e
self.max_e = max_e
self.total_e = total_e
self.current_e_multi[current_tool]=current_e_multi
self.offset_e_multi[current_tool]=offset_e_multi
self.max_e_multi[current_tool]=max_e_multi
self.total_e_multi[current_tool]=total_e_multi
self.cutting = cutting
# Finalize layers
if build_layers:
if cur_lines:
append_lines(cur_lines, True)
self.append_layer_id = len(all_layers)
self.append_layer = Layer([])
self.append_layer.duration = 0
all_layers.append(self.append_layer)
self.layer_idxs = array('I', layer_idxs)
self.line_idxs = array('I', line_idxs)
# Compute bounding box
all_zs = self.all_zs.union({zmin}).difference({None})
zmin = min(all_zs)
zmax = max(all_zs)
self.filament_length = self.max_e
while len(self.filament_length_multi)<len(self.max_e_multi):
self.filament_length_multi+=[0]
for i in enumerate(self.max_e_multi):
self.filament_length_multi[i[0]]=i[1]
if self.filament_length > 0:
self.xmin = xmin_e if not math.isinf(xmin_e) else 0
self.xmax = xmax_e if not math.isinf(xmax_e) else 0
self.ymin = ymin_e if not math.isinf(ymin_e) else 0
self.ymax = ymax_e if not math.isinf(ymax_e) else 0
else:
self.xmin = xmin if not math.isinf(xmin) else 0
self.xmax = xmax if not math.isinf(xmax) else 0
self.ymin = ymin if not math.isinf(ymin) else 0
self.ymax = ymax if not math.isinf(ymax) else 0
self.zmin = zmin if not math.isinf(zmin) else 0
self.zmax = zmax if not math.isinf(zmax) else 0
self.width = self.xmax - self.xmin
self.depth = self.ymax - self.ymin
self.height = self.zmax - self.zmin
# Finalize duration
totaltime = datetime.timedelta(seconds = int(totalduration))
self.duration = totaltime
def idxs(self, i):
return self.layer_idxs[i], self.line_idxs[i]
def estimate_duration(self):
return self.layers_count, self.duration
class LightGCode(GCode):
line_class = LightLine
def main():
if len(sys.argv) < 2:
print("usage: %s filename.gcode" % sys.argv[0])
return
print("Line object size:", sys.getsizeof(Line("G0 X0")))
print("Light line object size:", sys.getsizeof(LightLine("G0 X0")))
gcode = GCode(open(sys.argv[1], "rU"))
print("Dimensions:")
xdims = (gcode.xmin, gcode.xmax, gcode.width)
print("\tX: %0.02f - %0.02f (%0.02f)" % xdims)
ydims = (gcode.ymin, gcode.ymax, gcode.depth)
print("\tY: %0.02f - %0.02f (%0.02f)" % ydims)
zdims = (gcode.zmin, gcode.zmax, gcode.height)
print("\tZ: %0.02f - %0.02f (%0.02f)" % zdims)
print("Filament used: %0.02fmm" % gcode.filament_length)
for i in enumerate(gcode.filament_length_multi):
print("E%d %0.02fmm" % (i[0],i[1]))
print("Number of layers: %d" % gcode.layers_count)
print("Estimated duration: %s" % gcode.estimate_duration()[1])
if __name__ == '__main__':
main()
| 30,564 | Python | .py | 686 | 30.846939 | 158 | 0.519056 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,354 | excluder.py | kliment_Printrun/printrun/excluder.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from printrun import gviz
from .utils import imagefile, install_locale
install_locale('pronterface')
class ExcluderWindow(gviz.GvizWindow):
def __init__(self, excluder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.SetTitle(_("Print Excluder"))
self.parent = excluder
tool_pos = self.toolbar.GetToolPos(6)
self.toolbar.InsertTool(tool_pos, 8, _('Reset Selection'),
wx.Image(imagefile('reset.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(),
shortHelp = _("Reset Selection"))
self.toolbar.DeleteTool(6)
self.toolbar.DeleteTool(7)
self.toolbar.Realize()
minsize = self.toolbar.GetEffectiveMinSize().width
self.SetMinClientSize((minsize, minsize))
self.p.SetToolTip(
_("Draw rectangles where print instructions should be ignored.") +
_("\nExcluder always affects all layers, layer setting is disregarded."))
self.p.paint_overlay = self.paint_selection
self.p.layerup()
self.CenterOnParent()
def real_to_gcode(self, x, y):
return (x + self.p.build_dimensions[3],
self.p.build_dimensions[4] + self.p.build_dimensions[1] - y)
def gcode_to_real(self, x, y):
return (x - self.p.build_dimensions[3],
self.p.build_dimensions[1] - (y - self.p.build_dimensions[4]))
def mouse(self, event):
if event.ButtonUp(wx.MOUSE_BTN_LEFT) \
or event.ButtonUp(wx.MOUSE_BTN_RIGHT):
self.initpos = None
elif event.Dragging() and event.RightIsDown():
e = event.GetPosition()
if not self.initpos or not hasattr(self, "basetrans"):
self.initpos = e
self.basetrans = self.p.translate
self.p.translate = [self.basetrans[0] + (e[0] - self.initpos[0]),
self.basetrans[1] + (e[1] - self.initpos[1])]
self.p.dirty = 1
wx.CallAfter(self.p.Refresh)
elif event.Dragging() and event.LeftIsDown():
x, y = event.GetPosition()
if not self.initpos:
self.basetrans = self.p.translate
x = (x - self.basetrans[0]) / self.p.scale[0]
y = (y - self.basetrans[1]) / self.p.scale[1]
x, y = self.real_to_gcode(x, y)
if not self.initpos:
self.initpos = (x, y)
self.parent.rectangles.append((0, 0, 0, 0))
else:
pos = (x, y)
x0 = min(self.initpos[0], pos[0])
y0 = min(self.initpos[1], pos[1])
x1 = max(self.initpos[0], pos[0])
y1 = max(self.initpos[1], pos[1])
self.parent.rectangles[-1] = (x0, y0, x1, y1)
wx.CallAfter(self.p.Refresh)
else:
event.Skip()
def _line_scaler(self, orig):
# Arguments:
# orig: coordinates of two corners of a rectangle (x0, y0, x1, y1)
# Returns:
# rectangle coordinates as (x, y, width, height)
x0, y0 = self.gcode_to_real(orig[0], orig[1])
x0 = self.p.scale[0] * x0 + self.p.translate[0]
y0 = self.p.scale[1] * y0 + self.p.translate[1]
x1, y1 = self.gcode_to_real(orig[2], orig[3])
x1 = self.p.scale[0] * x1 + self.p.translate[0]
y1 = self.p.scale[1] * y1 + self.p.translate[1]
width = max(x0, x1) - min(x0, x1) + 1
height = max(y0, y1) - min(y0, y1) + 1
rectangle = (min(x0, x1), min(y0, y1), width, height)
return tuple(map(int, rectangle))
def paint_selection(self, dc):
dc = wx.GCDC(dc)
dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangleList([self._line_scaler(rect)
for rect in self.parent.rectangles],
None, wx.Brush((200, 200, 200, 150)))
class Excluder:
def __init__(self):
self.rectangles = []
self.window = None
def pop_window(self, gcode, *args, **kwargs):
if not self.window:
self.window = ExcluderWindow(self, *args, **kwargs)
self.window.p.addfile(gcode, True)
self.window.Bind(wx.EVT_CLOSE, self.close_window)
self.window.Show()
else:
self.window.Show()
self.window.Raise()
def close_window(self, event = None):
if self.window:
self.window.Destroy()
self.window = None
if __name__ == '__main__':
import sys
from . import gcoder
gcode = gcoder.GCode(open(sys.argv[1]))
app = wx.App(False)
ex = Excluder()
ex.pop_window(gcode)
app.MainLoop()
| 5,405 | Python | .py | 123 | 34.073171 | 103 | 0.583096 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,355 | utils.py | kliment_Printrun/printrun/utils.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import os
import platform
import sys
import re
import gettext
import datetime
import subprocess
import shlex
import locale
import logging
from pathlib import Path
DATADIR = os.path.join(sys.prefix, 'share')
def set_utf8_locale():
"""Make sure we read/write all text files in UTF-8"""
lang, encoding = locale.getlocale()
if encoding != 'UTF-8':
locale.setlocale(locale.LC_CTYPE, (lang, 'UTF-8'))
# Set up Internationalization using gettext
# searching for installed locales on /usr/share; uses relative folder if not
# found (windows)
def install_locale(domain):
shared_locale_dir = os.path.join(DATADIR, 'locale')
translation = None
lang = locale.getdefaultlocale()
osPlatform = platform.system()
if osPlatform == "Darwin":
# improvised workaround for macOS crash with gettext.translation, see issue #1154
if os.path.exists(shared_locale_dir):
gettext.install(domain, shared_locale_dir)
else:
gettext.install(domain, './locale')
else:
if os.path.exists('./locale'):
translation = gettext.translation(domain, './locale',
languages=[lang[0]], fallback= True)
else:
translation = gettext.translation(domain, shared_locale_dir,
languages=[lang[0]], fallback= True)
translation.install()
class LogFormatter(logging.Formatter):
def __init__(self, format_default, format_info):
super().__init__(format_info)
self.format_default = format_default
self.format_info = format_info
def format(self, record):
if record.levelno == logging.INFO:
self._fmt = self.format_info
else:
self._fmt = self.format_default
return super().format(record)
def setup_logging(out, filepath = None, reset_handlers = False):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if reset_handlers:
logger.handlers = []
formatter = LogFormatter("[%(levelname)s] %(message)s", "%(message)s")
logging_handler = logging.StreamHandler(out)
logging_handler.setFormatter(formatter)
logger.addHandler(logging_handler)
if filepath:
if os.path.isdir(filepath):
filepath = os.path.join(filepath, "printrun.log")
else:
# Fallback for logging path of non console windows applications:
# Use users home directory in case the file path in printrunconf.ini
# is not valid or do not exist, see issue #1300
filepath = os.path.join(Path.home(), "printrun.log")
formatter = LogFormatter("%(asctime)s - [%(levelname)s] %(message)s", "%(asctime)s - %(message)s")
logging_handler = logging.FileHandler(filepath)
logging_handler.setFormatter(formatter)
logger.addHandler(logging_handler)
def iconfile(filename):
'''
Get the full path to filename by checking in standard icon locations
("pixmaps" directories) or use the frozen executable if applicable
(See the lookup_file function's documentation for behavior).
'''
if hasattr(sys, "frozen") and sys.frozen == "windows_exe":
return sys.executable
return pixmapfile(filename)
def imagefile(filename):
'''
Get the full path to filename by checking standard image locations,
those being possible locations of the pronterface "images" directory
(See the lookup_file function's documentation for behavior).
'''
my_local_share = os.path.join(
os.path.dirname(os.path.dirname(sys.argv[0])),
"share",
"pronterface"
) # Used by pip install
image_dirs = [
os.path.join(DATADIR, 'pronterface', 'images'),
os.path.join(os.path.dirname(sys.argv[0]), "images"),
os.path.join(my_local_share, "images"),
os.path.join(
getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))),
"images"
), # Check manually since lookup_file checks in frozen but not /images
]
path = lookup_file(filename, image_dirs)
if path == filename:
# The file wasn't found in any known location, so use a relative
# path.
path = os.path.join("images", filename)
return path
def lookup_file(filename, prefixes):
'''
Get the full path to filename by checking one or more prefixes,
or in the frozen data if applicable. If a result from this
(or from callers such as imagefile) is used for the wx.Image
constructor and filename isn't found, the C++ part of wx
will raise an exception (wx._core.wxAssertionError): "invalid
image".
Sequential arguments:
filename -- a filename without the path.
prefixes -- a list of paths.
Returns:
The full path if found, or filename if not found.
'''
local_candidate = os.path.join(os.path.dirname(sys.argv[0]), filename)
if os.path.exists(local_candidate):
return local_candidate
if getattr(sys, "frozen", False):
prefixes += [getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))),]
for prefix in prefixes:
candidate = os.path.join(prefix, filename)
if os.path.exists(candidate):
return candidate
return filename
def pixmapfile(filename):
'''
Get the full path to filename by checking in standard icon
("pixmaps") directories (See the lookup_file function's
documentation for behavior).
'''
shared_pixmaps_dir = os.path.join(DATADIR, 'pixmaps')
local_pixmaps_dir = os.path.join(
os.path.dirname(os.path.dirname(sys.argv[0])),
"share",
"pixmaps"
) # Used by pip install
pixmaps_dirs = [shared_pixmaps_dir, local_pixmaps_dir]
return lookup_file(filename, pixmaps_dirs)
def sharedfile(filename):
'''
Get the full path to filename by checking in the shared
directory (See the lookup_file function's documentation for behavior).
'''
shared_pronterface_dir = os.path.join(DATADIR, 'pronterface')
return lookup_file(filename, [shared_pronterface_dir])
def configfile(filename):
'''
Get the full path to filename by checking in the
standard configuration directory (See the lookup_file
function's documentation for behavior).
'''
return lookup_file(filename, [os.path.expanduser("~/.printrun/"), ])
def decode_utf8(s):
"""Attempt to decode a string, return the string otherwise"""
if isinstance(s, bytes):
return s.decode()
return s
def format_time(timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime("%H:%M:%S")
def format_duration(delta):
return str(datetime.timedelta(seconds = int(delta)))
def prepare_command(command, replaces = None):
command = shlex.split(command.replace("\\", "\\\\"))
if replaces:
replaces["$python"] = sys.executable
for pattern, rep in replaces.items():
command = [bit.replace(pattern, rep) for bit in command]
return command
def run_command(command, replaces = None, stdout = subprocess.STDOUT,
stderr = subprocess.STDOUT, blocking = False,
universal_newlines = False):
command = prepare_command(command, replaces)
if blocking:
return subprocess.call(command, universal_newlines = universal_newlines)
return subprocess.Popen(command, stderr = stderr, stdout = stdout,
universal_newlines = universal_newlines)
def get_command_output(command, replaces):
p = run_command(command, replaces,
stdout = subprocess.PIPE, stderr = subprocess.STDOUT,
blocking = False, universal_newlines = True)
return p.stdout.read()
def dosify(name):
return os.path.split(name)[1].split(".")[0][:8] + ".g"
class RemainingTimeEstimator:
drift = None
gcode = None
def __init__(self, gcode):
self.drift = 1
self.previous_layers_estimate = 0
self.current_layer_estimate = 0
self.current_layer_lines = 0
self.gcode = gcode
self.last_idx = -1
self.last_estimate = None
self.remaining_layers_estimate = sum(layer.duration for layer in gcode.all_layers)
if len(gcode) > 0:
self.update_layer(0, 0)
def update_layer(self, layer, printtime):
self.previous_layers_estimate += self.current_layer_estimate
if self.previous_layers_estimate > 1. and printtime > 1.:
self.drift = printtime / self.previous_layers_estimate
self.current_layer_estimate = self.gcode.all_layers[layer].duration
self.current_layer_lines = len(self.gcode.all_layers[layer])
self.remaining_layers_estimate -= self.current_layer_estimate
self.last_idx = -1
self.last_estimate = None
def __call__(self, idx, printtime):
if not self.current_layer_lines:
return (0, 0)
if idx == self.last_idx:
return self.last_estimate
if idx >= len(self.gcode.layer_idxs):
return self.last_estimate
layer, line = self.gcode.idxs(idx)
layer_progress = (1 - (float(line + 1) / self.current_layer_lines))
remaining = layer_progress * self.current_layer_estimate + self.remaining_layers_estimate
estimate = self.drift * remaining
total = estimate + printtime
self.last_idx = idx
self.last_estimate = (estimate, total)
return self.last_estimate
def parse_build_dimensions(bdim):
# a string containing up to six numbers delimited by almost anything
# first 0-3 numbers specify the build volume, no sign, always positive
# remaining 0-3 numbers specify the coordinates of the "southwest" corner of the build platform
# "XXX,YYY"
# "XXXxYYY+xxx-yyy"
# "XXX,YYY,ZZZ+xxx+yyy-zzz"
# etc
bdl = re.findall(r"([-+]?[0-9]*\.?[0-9]*)", bdim)
defaults = [200, 200, 100, 0, 0, 0, 0, 0, 0]
bdl = [b for b in bdl if b]
bdl_float = [float(value) if value else defaults[i] for i, value in enumerate(bdl)]
if len(bdl_float) < len(defaults):
bdl_float += [defaults[i] for i in range(len(bdl_float), len(defaults))]
for i in range(3): # Check for nonpositive dimensions for build volume
if bdl_float[i] <= 0:
bdl_float[i] = 1
return bdl_float
def get_home_pos(build_dimensions):
return build_dimensions[6:9] if len(build_dimensions) >= 9 else None
def hexcolor_to_float(color, components):
color = color[1:]
numel = len(color)
ndigits = numel // components
div = 16 ** ndigits - 1
return tuple(round(float(int(color[i:i + ndigits], 16)) / div, 2)
for i in range(0, numel, ndigits))
def check_rgb_color(color):
if len(color[1:]) % 3 != 0:
ex = ValueError(_("Color must be specified as #RGB"))
ex.from_validator = True
raise ex
def check_rgba_color(color):
if len(color[1:]) % 4 != 0:
ex = ValueError(_("Color must be specified as #RGBA"))
ex.from_validator = True
raise ex
tempreport_exp = re.compile(r"([TB]\d*):([-+]?\d*\.?\d*)(?: ?\/)?([-+]?\d*\.?\d*)")
def parse_temperature_report(report):
matches = tempreport_exp.findall(report)
return dict((m[0], (m[1], m[2])) for m in matches)
def compile_file(filename):
with open(filename, 'r', encoding='utf-8') as f:
return compile(f.read(), filename, 'exec')
def read_history_from(filename):
history = []
if os.path.exists(filename):
with open(filename, 'r', encoding='utf-8') as _hf:
for i in _hf:
history.append(i.rstrip())
return history
def write_history_to(filename, hist):
with open(filename, 'w', encoding='utf-8') as _hf:
for i in hist:
_hf.write(i + '\n')
| 12,521 | Python | .py | 296 | 35.581081 | 106 | 0.658432 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,356 | eventhandler.py | kliment_Printrun/printrun/eventhandler.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
class PrinterEventHandler:
'''
Defines a skeletton of an event-handler for printer events. It
allows attaching to the printcore and will be triggered for
different events.
'''
def __init__(self):
'''
Constructor.
'''
pass
def on_init(self):
'''
Called whenever a new printcore is initialized.
'''
pass
def on_send(self, command, gline):
'''
Called on every command sent to the printer.
@param command: The command to be sent.
@param gline: The parsed high-level command.
'''
pass
def on_recv(self, line):
'''
Called on every line read from the printer.
@param line: The data has been read from printer.
'''
pass
def on_connect(self):
'''
Called whenever printcore is connected.
'''
pass
def on_disconnect(self):
'''
Called whenever printcore is disconnected.
'''
pass
def on_error(self, error):
'''
Called whenever an error occurs.
@param error: The error that has been triggered.
'''
pass
def on_online(self):
'''
Called when printer got online.
'''
pass
def on_temp(self, line):
'''
Called for temp, status, whatever.
@param line: Line of data.
'''
pass
def on_start(self, resume):
'''
Called when printing is started.
@param resume: If true, the print is resumed.
'''
pass
def on_end(self):
'''
Called when printing ends.
'''
pass
def on_layerchange(self, layer):
'''
Called on layer changed.
@param layer: The new layer.
'''
pass
def on_preprintsend(self, gline, index, mainqueue):
'''
Called pre sending printing command.
@param gline: Line to be send.
@param index: Index in the mainqueue.
@param mainqueue: The main queue of commands.
'''
pass
def on_printsend(self, gline):
'''
Called whenever a line is sent to the printer.
@param gline: The line send to the printer.
'''
pass
| 3,125 | Python | .py | 101 | 22.29703 | 70 | 0.601257 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,357 | rpc.py | kliment_Printrun/printrun/rpc.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
from xmlrpc.server import SimpleXMLRPCServer
from threading import Thread
import socket
import logging
from .utils import install_locale, parse_temperature_report
install_locale('pronterface')
RPC_PORT = 7978
class ProntRPC:
server = None
def __init__(self, pronsole, port = RPC_PORT):
self.pronsole = pronsole
used_port = port
while True:
try:
self.server = SimpleXMLRPCServer(("localhost", used_port),
allow_none = True,
logRequests = False)
if used_port != port:
logging.warning(_("RPC server bound on non-default port %d") % used_port)
break
except socket.error as e:
if e.errno == 98:
used_port += 1
continue
else:
raise
self.server.register_function(self.get_status, 'status')
self.server.register_function(self.set_extruder_temperature,'settemp')
self.server.register_function(self.set_bed_temperature,'setbedtemp')
self.server.register_function(self.load_file,'load_file')
self.server.register_function(self.startprint,'startprint')
self.server.register_function(self.pauseprint,'pauseprint')
self.server.register_function(self.resumeprint,'resumeprint')
self.server.register_function(self.sendhome,'sendhome')
self.server.register_function(self.connect,'connect')
self.server.register_function(self.disconnect, 'disconnect')
self.server.register_function(self.send, 'send')
self.thread = Thread(target = self.run_server)
self.thread.start()
def run_server(self):
self.server.serve_forever()
def shutdown(self):
self.server.shutdown()
self.thread.join()
def get_status(self):
if self.pronsole.p.printing:
progress = 100 * float(self.pronsole.p.queueindex) / len(self.pronsole.p.mainqueue)
elif self.pronsole.sdprinting:
progress = self.pronsole.percentdone
else: progress = None
if self.pronsole.p.printing or self.pronsole.sdprinting:
eta = self.pronsole.get_eta()
else:
eta = None
if self.pronsole.tempreadings:
temps = parse_temperature_report(self.pronsole.tempreadings)
else:
temps = None
z = self.pronsole.curlayer
return {"filename": self.pronsole.filename,
"progress": progress,
"eta": eta,
"temps": temps,
"z": z,
}
def set_extruder_temperature(self, targettemp):
if self.pronsole.p.online:
self.pronsole.p.send_now("M104 S" + targettemp)
def set_bed_temperature(self,targettemp):
if self.pronsole.p.online:
self.pronsole.p.send_now("M140 S" + targettemp)
def load_file(self,filename):
self.pronsole.do_load(filename)
def startprint(self):
self.pronsole.do_print("")
def pauseprint(self):
self.pronsole.do_pause("")
def resumeprint(self):
self.pronsole.do_resume("")
def sendhome(self):
self.pronsole.do_home("")
def connect(self):
self.pronsole.do_connect("")
def disconnect(self):
self.pronsole.do_disconnect("")
def send(self, command):
self.pronsole.p.send_now(command)
| 4,192 | Python | .py | 101 | 32.247525 | 95 | 0.634535 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,358 | gcoder_heapy_support.patch | kliment_Printrun/printrun/gcoder_heapy_support.patch | --- printrun/gcoder_line.c 2013-06-15 16:08:53.260081109 +0200
+++ printrun/gcoder_line.c 2013-06-15 16:08:57.083439793 +0200
@@ -3945,2 +3945,4 @@ static int __Pyx_InitGlobals(void) {
+#include "gcoder_line_extra.h"
+
#if PY_MAJOR_VERSION < 3
@@ -4032,2 +4034,7 @@ PyMODINIT_FUNC PyInit_gcoder_line(void)
/*--- Execution code ---*/
+ nysets_heapdefs[0].type = &__pyx_type_8printrun_11gcoder_line_GLine;
+ if (PyDict_SetItemString(__pyx_d,
+ "_NyHeapDefs_",
+ PyCObject_FromVoidPtrAndDesc(&nysets_heapdefs, "NyHeapDef[] v1.0", 0)) < 0)
+{__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
| 660 | Python | .py | 13 | 49.153846 | 97 | 0.656299 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,359 | zscaper.py | kliment_Printrun/printrun/zscaper.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from .stltool import stl, genfacet, emitstl
a = wx.App()
def genscape(data = [[0, 1, 0, 0], [1, 0, 2, 0], [1, 0, 0, 0], [0, 1, 0, 1]],
pscale = 1.0, bheight = 1.0, zscale = 1.0):
o = stl(None)
datal = len(data)
datah = len(data[0])
# create bottom:
bmidpoint = (pscale * (datal - 1) / 2.0, pscale * (datah - 1) / 2.0)
# print range(datal), bmidpoint
for i in list(zip(range(datal + 1)[:-1], range(datal + 1)[1:]))[:-1]:
# print (pscale*i[0], pscale*i[1])
o.facets += [[[0, 0, -1], [[0.0, pscale * i[0], 0.0], [0.0, pscale * i[1], 0.0], [bmidpoint[0], bmidpoint[1], 0.0]]]]
o.facets += [[[0, 0, -1], [[2.0 * bmidpoint[1], pscale * i[1], 0.0], [2.0 * bmidpoint[1], pscale * i[0], 0.0], [bmidpoint[0], bmidpoint[1], 0.0]]]]
o.facets += [genfacet([[0.0, pscale * i[0], data[i[0]][0] * zscale + bheight], [0.0, pscale * i[1], data[i[1]][0] * zscale + bheight], [0.0, pscale * i[1], 0.0]])]
o.facets += [genfacet([[2.0 * bmidpoint[1], pscale * i[1], data[i[1]][datah - 1] * zscale + bheight], [2.0 * bmidpoint[1], pscale * i[0], data[i[0]][datah - 1] * zscale + bheight], [2.0 * bmidpoint[1], pscale * i[1], 0.0]])]
o.facets += [genfacet([[0.0, pscale * i[0], data[i[0]][0] * zscale + bheight], [0.0, pscale * i[1], 0.0], [0.0, pscale * i[0], 0.0]])]
o.facets += [genfacet([[2.0 * bmidpoint[1], pscale * i[1], 0.0], [2.0 * bmidpoint[1], pscale * i[0], data[i[0]][datah - 1] * zscale + bheight], [2.0 * bmidpoint[1], pscale * i[0], 0.0]])]
for i in list(zip(range(datah + 1)[: - 1], range(datah + 1)[1:]))[: - 1]:
# print (pscale * i[0], pscale * i[1])
o.facets += [[[0, 0, -1], [[pscale * i[1], 0.0, 0.0], [pscale * i[0], 0.0, 0.0], [bmidpoint[0], bmidpoint[1], 0.0]]]]
o.facets += [[[0, 0, -1], [[pscale * i[0], 2.0 * bmidpoint[0], 0.0], [pscale * i[1], 2.0 * bmidpoint[0], 0.0], [bmidpoint[0], bmidpoint[1], 0.0]]]]
o.facets += [genfacet([[pscale * i[1], 0.0, data[0][i[1]] * zscale + bheight], [pscale * i[0], 0.0, data[0][i[0]] * zscale + bheight], [pscale * i[1], 0.0, 0.0]])]
o.facets += [genfacet([[pscale * i[0], 2.0 * bmidpoint[0], data[datal - 1][i[0]] * zscale + bheight], [pscale * i[1], 2.0 * bmidpoint[0], data[datal - 1][i[1]] * zscale + bheight], [pscale * i[1], 2.0 * bmidpoint[0], 0.0]])]
o.facets += [genfacet([[pscale * i[1], 0.0, 0.0], [pscale * i[0], 0.0, data[0][i[0]] * zscale + bheight], [pscale * i[0], 0.0, 0.0]])]
o.facets += [genfacet([[pscale * i[0], 2.0 * bmidpoint[0], data[datal - 1][i[0]] * zscale + bheight], [pscale * i[1], 2.0 * bmidpoint[0], 0.0], [pscale * i[0], 2.0 * bmidpoint[0], 0.0]])]
for i in range(datah - 1):
for j in range(datal - 1):
o.facets += [genfacet([[pscale * i, pscale * j, data[j][i] * zscale + bheight], [pscale * (i + 1), pscale * (j), data[j][i + 1] * zscale + bheight], [pscale * (i + 1), pscale * (j + 1), data[j + 1][i + 1] * zscale + bheight]])]
o.facets += [genfacet([[pscale * (i), pscale * (j + 1), data[j + 1][i] * zscale + bheight], [pscale * i, pscale * j, data[j][i] * zscale + bheight], [pscale * (i + 1), pscale * (j + 1), data[j + 1][i + 1] * zscale + bheight]])]
# print o.facets[-1]
return o
def zimage(name, out):
i = wx.Image(name)
s = i.GetSize()
b = list(map(ord, i.GetData()[::3]))
print(b)
data = []
for i in range(s[0]):
data += [b[i * s[1]:(i + 1) * s[1]]]
# data = [i[::5] for i in data[::5]]
emitstl(out, genscape(data, zscale = 0.1).facets, name)
"""
class scapewin(wx.Frame):
def __init__(self, size = (400, 530)):
wx.Frame.__init__(self, None,
title = "Right-click to load an image", size = size)
self.SetIcon(wx.Icon("plater.png", wx.BITMAP_TYPE_PNG))
self.SetClientSize(size)
self.panel = wx.Panel(self, size = size)
"""
if __name__ == '__main__':
"""
app = wx.App(False)
main = scapewin()
main.Show()
app.MainLoop()
"""
zimage("catposthtmap2.jpg", "testobj.stl")
del a
| 4,774 | Python | .py | 75 | 58.12 | 239 | 0.552833 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,360 | projectlayer.py | kliment_Printrun/printrun/projectlayer.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import xml.etree.ElementTree as ET
import wx
import wx.svg
import os
import time
import zipfile
import tempfile
import puremagic
import copy
import re
from collections import OrderedDict
import math
from printrun.gui.widgets import get_space
from .utils import install_locale
# Set up Internationalization using gettext
install_locale('pronterface')
class DisplayFrame(wx.Frame):
def __init__(self, parent, statusbar, title, res = (1024, 768), printer = None, scale = 1.0, offset = (0, 0)):
super().__init__(parent = parent, title = title, size = res)
self.printer = printer
self.control_frame = parent
self.slicer = 'Bitmap'
self.dpi = 96
self.status = statusbar
self.scale = scale
self.index = 0
self.offset = offset
self.running = False
self.layer_red = False
self.startime = 0
# Closing the DisplayFrame calls the close method of Settingsframe
self.Bind(wx.EVT_CLOSE, self.control_frame.on_close)
x_res = self.control_frame.X.GetValue()
y_res = self.control_frame.Y.GetValue()
self.size = (x_res, y_res)
self.bitmap = wx.Bitmap(*self.size)
dc = wx.MemoryDC(self.bitmap)
dc.SetBackground(wx.BLACK_BRUSH)
dc.Clear()
dc.SelectObject(wx.NullBitmap)
panel = wx.Panel(self)
panel.SetBackgroundColour(wx.BLACK)
self.bitmap_widget = wx.GenericStaticBitmap(panel, -1, self.bitmap)
self.bitmap_widget.SetScaleMode(0)
self.bitmap_widget.Hide()
sizer = wx.BoxSizer()
sizer.Add(self.bitmap_widget, wx.ALIGN_LEFT | wx.ALIGN_TOP)
panel.SetSizer(sizer)
self.SetDoubleBuffered(True)
self.CentreOnParent()
def clear_layer(self):
dc = wx.MemoryDC(self.bitmap)
dc.SetBackground(wx.BLACK_BRUSH)
dc.Clear()
dc.SelectObject(wx.NullBitmap)
self.bitmap_widget.SetBitmap(self.bitmap)
self.bitmap_widget.Show()
self.Refresh()
def resize(self, res = (1024, 768)):
self.bitmap = wx.Bitmap(*res)
dc = wx.MemoryDC(self.bitmap)
dc.SetBackground(wx.BLACK_BRUSH)
dc.Clear()
dc.SelectObject(wx.NullBitmap)
self.Refresh()
def convert_mm_to_px(self, mm_value) -> float:
resolution_x_px = self.control_frame.X.GetValue()
projected_x_mm = self.control_frame.projected_X_mm.GetValue()
return resolution_x_px / projected_x_mm * mm_value
def draw_layer(self, image = None):
dc = wx.MemoryDC(self.bitmap)
dc.SetBackground(wx.BLACK_BRUSH)
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
if self.slicer in ['Slic3r', 'Skeinforge']:
if self.layer_red:
image.set('style', 'background-color: black; fill: red;')
for element in image.findall('{http://www.w3.org/2000/svg}g')[0]:
if element.get('fill') == 'white':
element.set('fill', 'red')
if element.get('style') == 'fill: white':
element.set('style', 'fill: red')
svg_image = wx.svg.SVGimage.CreateFromBytes(ET.tostring(image), units = 'px', dpi = self.dpi)
self.status(f"Scaled width: {svg_image.width / self.dpi * 25.4 * self.scale:.2f} mm @ {round(svg_image.width * self.scale)} px")
gc.Translate(self.convert_mm_to_px(self.offset[0]), self.convert_mm_to_px(self.offset[1]))
gc.Scale(self.scale, self.scale)
svg_image.RenderToGC(gc)
elif self.slicer in ('Bitmap', 'PrusaSlicer'):
if isinstance(image, str):
image = wx.Image(image)
if self.layer_red:
image = image.AdjustChannels(1, 0, 0, 1)
width, height = image.GetSize()
if width < height:
image = image.Rotate90(clockwise = False)
real_width = max(width, height)
bitmap = gc.CreateBitmapFromImage(image)
self.status(f"Scaled width: {real_width / self.dpi * 25.4 * self.scale:.2f} mm @ {round(real_width * self.scale)} px")
gc.Translate(self.convert_mm_to_px(self.offset[0]), self.convert_mm_to_px(self.offset[1]))
gc.Scale(self.scale, self.scale)
gc.DrawBitmap(bitmap, 0, 0, image.Width, image.Height)
elif 'Calibrate' in self.slicer:
#gc.Translate(self.convert_mm_to_px(self.offset[0]), self.convert_mm_to_px(self.offset[1]))
#gc.Scale(self.scale, self.scale)
self.draw_grid(gc)
else:
self.status(_("No valid file loaded."))
return
dc.SelectObject(wx.NullBitmap)
self.bitmap_widget.SetBitmap(self.bitmap)
self.bitmap_widget.Show()
self.Refresh()
def draw_grid(self, graphics_context):
gc = graphics_context
x_res_px = self.control_frame.X.GetValue()
y_res_px = self.control_frame.Y.GetValue()
# Draw outline
path = gc.CreatePath()
path.AddRectangle(0, 0, x_res_px, y_res_px)
path.AddCircle(0, 0, 5.0)
path.AddCircle(0, 0, 14.0)
solid_pen = gc.CreatePen(wx.GraphicsPenInfo(wx.RED).Width(5.0).Style(wx.PENSTYLE_SOLID))
gc.SetPen(solid_pen)
gc.StrokePath(path)
# Calculate gridlines
aspectRatio = x_res_px / y_res_px
projected_x_mm = self.control_frame.projected_X_mm.GetValue()
projected_y_mm = round(projected_x_mm / aspectRatio, 2)
px_per_mm = x_res_px / projected_x_mm
grid_count_x = int(projected_x_mm / 10)
grid_count_y = int(projected_y_mm / 10)
# Draw gridlines
path = gc.CreatePath()
for y in range(1, grid_count_y + 1):
for x in range(1, grid_count_x + 1):
# horizontal line
path.MoveToPoint(0, int(y * (px_per_mm * 10)))
path.AddLineToPoint(x_res_px, int(y * (px_per_mm * 10)))
# vertical line
path.MoveToPoint(int(x * (px_per_mm * 10)), 0)
path.AddLineToPoint(int(x * (px_per_mm * 10)), y_res_px)
thin_pen = gc.CreatePen(wx.GraphicsPenInfo(wx.RED).Width(2.0).Style(wx.PENSTYLE_DOT))
gc.SetPen(thin_pen)
gc.StrokePath(path)
def show_img_delay(self, image):
self.status(_("Showing, Runtime {:.3f} s").format(time.perf_counter() - self.startime))
self.control_frame.set_current_layer(self.index)
self.draw_layer(image)
# AGe 2022-07-31 Python 3.10 and CallLater expects delay in milliseconds as
# integer value instead of float. Convert float value to int
timer = wx.CallLater(int(1000 * self.interval), self.hide_pic_and_rise)
self.control_frame.timers['delay'] = timer
def rise(self):
if self.direction == 0: # 0: Top Down
self.status(_("Lowering, Runtime {:.3f} s").format(time.perf_counter() - self.startime))
else: # self.direction == 1, 1: Bottom Up
self.status(_("Rising, Runtime {:.3f} s").format(time.perf_counter() - self.startime))
if self.printer is not None and self.printer.online:
self.printer.send_now('G91')
if self.prelift_gcode:
for line in self.prelift_gcode.split('\n'):
if line:
self.printer.send_now(line)
if self.direction == 0: # 0: Top Down
self.printer.send_now(f"G1 Z-{self.overshoot:.3f} F{self.z_axis_rate}")
self.printer.send_now(f"G1 Z{self.overshoot - self.thickness:.3f} F{self.z_axis_rate}")
else: # self.direction == 1, 1: Bottom Up
self.printer.send_now(f"G1 Z{self.overshoot:.3f} F{self.z_axis_rate}")
self.printer.send_now(f"G1 Z-{self.overshoot - self.thickness:.3f} F{self.z_axis_rate}")
if self.postlift_gcode:
for line in self.postlift_gcode.split('\n'):
if line:
self.printer.send_now(line)
self.printer.send_now('G90')
else:
time.sleep(self.pause)
# AGe 2022-07-31 Python 3.10 expects delay in milliseconds as
# integer value instead of float. Convert float value to int
timer = wx.CallLater(int(1000 * self.pause), self.next_img)
self.control_frame.timers['rise'] = timer
def hide_pic(self):
self.status(_("Hiding, Runtime {:.3f} s").format(time.perf_counter() - self.startime))
self.bitmap_widget.Hide()
def hide_pic_and_rise(self):
wx.CallAfter(self.hide_pic)
timer = wx.CallLater(500, self.rise)
self.control_frame.timers['hide'] = timer
def next_img(self):
if not self.running:
return
if self.index < len(self.layers):
self.status(str(self.index))
wx.CallAfter(self.show_img_delay, self.layers[self.index])
self.index += 1
else:
self.control_frame.stop_present(wx.wxEVT_NULL)
self.status(_("End"))
wx.CallAfter(self.bitmap_widget.Hide)
wx.CallAfter(self.Refresh)
def present(self,
layers,
interval = 0.5,
pause = 0.2,
overshoot = 0.0,
z_axis_rate = 200,
prelift_gcode = "",
postlift_gcode = "",
direction = 0, # 0: Top Down
thickness = 0.4,
scale = 1,
size = (1024, 768),
offset = (0, 0),
layer_red = False):
wx.CallAfter(self.bitmap_widget.Hide)
wx.CallAfter(self.Refresh)
self.layers = layers
self.scale = scale
self.thickness = thickness
self.size = size
self.interval = interval
self.pause = pause
self.overshoot = overshoot
self.z_axis_rate = z_axis_rate
self.prelift_gcode = prelift_gcode
self.postlift_gcode = postlift_gcode
self.direction = direction
self.layer_red = layer_red
self.offset = offset
self.index = 0
self.running = True
self.next_img()
class SettingsFrame(wx.Dialog):
def _set_setting(self, name, value):
if self.pronterface:
self.pronterface.set(name, value)
def _get_setting(self, name, val):
if self.pronterface:
try:
return getattr(self.pronterface.settings, name)
except AttributeError:
return val
else:
return val
def __init__(self, parent, printer = None):
super().__init__(parent, title = _("Layer Projector Control"),
style = wx.DEFAULT_DIALOG_STYLE | wx.DIALOG_NO_PARENT)
self.pronterface = parent
self.image_dir = ''
self.current_filename = ''
self.slicer = ''
self.timers = {}
# In wxPython 4.1.0 gtk3 (phoenix) wxWidgets 3.1.4
# Layout() breaks before Show(), invoke once after Show()
def fit(ev):
self.Fit()
self.Unbind(wx.EVT_ACTIVATE, handler=fit)
self.Bind(wx.EVT_ACTIVATE, fit)
self.panel = wx.Panel(self)
buttonGroup = wx.StaticBox(self.panel, label = _("Controls"))
buttonbox = wx.StaticBoxSizer(buttonGroup, wx.HORIZONTAL)
self.load_button = wx.Button(buttonGroup, -1, _("Load"))
self.load_button.Bind(wx.EVT_BUTTON, self.load_file)
self.load_button.SetToolTip(_("Choose a SVG file created from Slic3r or Skeinforge, a PrusaSlicer SL1-file "
"or a zip file of bitmap images (.3dlp.zip)."))
buttonbox.Add(self.load_button, 1,
flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border = get_space('mini'))
self.present_button = wx.Button(buttonGroup, -1, _("Start"))
self.present_button.Bind(wx.EVT_BUTTON, self.start_present)
self.present_button.SetToolTip(_("Starts the presentation of the slices."))
buttonbox.Add(self.present_button, 1,
flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border = get_space('mini'))
self.present_button.Disable()
self.pause_button = wx.Button(buttonGroup, -1, self.get_btn_label('pause'))
self.pause_button.Bind(wx.EVT_BUTTON, self.pause_present)
self.pause_button.SetToolTip(_("Pauses the presentation. Can be resumed afterwards by "
"clicking this button, or restarted by clicking start again."))
buttonbox.Add(self.pause_button, 1,
flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border = get_space('mini'))
self.pause_button.Disable()
self.stop_button = wx.Button(buttonGroup, -1, _("Stop"))
self.stop_button.Bind(wx.EVT_BUTTON, self.stop_present)
self.stop_button.SetToolTip(_("Stops presenting the slices."))
buttonbox.Add(self.stop_button, 1,
flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border = get_space('mini'))
self.stop_button.Disable()
settingsGroup = wx.StaticBox(self.panel, label = _("Settings"))
fieldboxsizer = wx.StaticBoxSizer(settingsGroup, wx.VERTICAL)
fieldsizer = wx.GridBagSizer(vgap = get_space('minor'), hgap = get_space('minor'))
# Left Column
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Layerheight (mm):")), pos = (0, 0),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.thickness = wx.SpinCtrlDouble(settingsGroup, -1, initial = self._get_setting("project_layer", 0.1),
inc = 0.01, size = (125, -1))
self.thickness.SetDigits(3)
self.thickness.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_thickness)
self.thickness.SetToolTip(_("The thickness of each slice. Should match the value used to slice the model. "
"SVG files update this value automatically, 3dlp.zip files have to be manually entered."))
fieldsizer.Add(self.thickness, pos = (0, 1))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Exposure (s):")), pos = (1, 0),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.interval = wx.SpinCtrlDouble(settingsGroup, -1, initial = self._get_setting("project_interval", 0.5),
inc = 0.1, size = (125, -1))
self.interval.SetDigits(2)
self.interval.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_interval)
self.interval.SetToolTip(_("How long each slice should be displayed."))
fieldsizer.Add(self.interval, pos = (1, 1))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Blank (s):")), pos = (2, 0),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.pause = wx.SpinCtrlDouble(settingsGroup, -1, initial = self._get_setting("project_pause", 0.5),
inc = 0.1, size = (125, -1))
self.pause.SetDigits(2)
self.pause.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_pause)
self.pause.SetToolTip(_("The pause length between slices. This should take into account any movement of the Z axis, "
"plus time to prepare the resin surface (sliding, tilting, sweeping, etc)."))
fieldsizer.Add(self.pause, pos = (2, 1))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Scale:")), pos = (3, 0),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.scale = wx.SpinCtrlDouble(settingsGroup, -1, initial = self._get_setting('project_scale', 1.0),
inc = 0.1, min = 0.05, size = (125, -1))
self.scale.SetDigits(3)
self.scale.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_scale)
self.scale.SetToolTip(_("The additional scaling of each slice."))
fieldsizer.Add(self.scale, pos = (3, 1))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Direction:")), pos = (4, 0),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.direction = wx.Choice(settingsGroup, -1, choices = [_('Top Down'), _('Bottom Up')], size = (125, -1))
saved_direction = self._get_setting('project_direction', 0)
try: # This setting used to be a string, older values need to be replaced with an index
int(saved_direction)
except ValueError:
saved_direction = 1 if saved_direction == "Bottom Up" else 0
self._set_setting('project_direction', saved_direction)
self.direction.SetSelection(int(saved_direction))
self.direction.Bind(wx.EVT_CHOICE, self.update_direction)
self.direction.SetToolTip(_("The direction the Z axis should move. Top Down is where the projector is above "
"the model, Bottom up is where the projector is below the model."))
fieldsizer.Add(self.direction, pos = (4, 1), flag = wx.ALIGN_CENTER_VERTICAL)
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Overshoot (mm):")), pos = (5, 0),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.overshoot = wx.SpinCtrlDouble(settingsGroup, -1, initial = self._get_setting('project_overshoot', 3.0),
inc = 0.1, min = 0, size = (125, -1))
self.overshoot.SetDigits(1)
self.overshoot.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_overshoot)
self.overshoot.SetToolTip(_("How far the axis should move beyond the next slice position for each slice. "
"For Top Down printers this would dunk the model under the resi and then return. "
"For Bottom Up printers this would raise the base away from the vat and then return."))
fieldsizer.Add(self.overshoot, pos = (5, 1))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Pre-lift Gcode:")), pos = (6, 0),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.prelift_gcode = wx.TextCtrl(settingsGroup, -1, str(self._get_setting("project_prelift_gcode", "").replace("\\n", '\n')),
size = (-1, 35), style = wx.TE_MULTILINE)
self.prelift_gcode.SetToolTip(_("Additional gcode to run before raising the Z-axis. "
"Be sure to take into account any additional time needed "
"in the pause value, and be careful what gcode is added!"))
self.prelift_gcode.Bind(wx.EVT_TEXT, self.update_prelift_gcode)
fieldsizer.Add(self.prelift_gcode, pos = (6, 1), span = (2, 1), flag = wx.EXPAND)
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Post-lift Gcode:")), pos = (6, 2),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.postlift_gcode = wx.TextCtrl(settingsGroup, -1, str(self._get_setting("project_postlift_gcode", "").replace("\\n", '\n')),
size = (-1, 35), style = wx.TE_MULTILINE)
self.postlift_gcode.SetToolTip(_("Additional gcode to run after raising the Z-axis. Be sure to take "
"into account any additional time needed in the pause value, "
"and be careful what gcode is added!"))
self.postlift_gcode.Bind(wx.EVT_TEXT, self.update_postlift_gcode)
fieldsizer.Add(self.postlift_gcode, pos = (6, 3), span = (2, 1), flag = wx.EXPAND)
# Right Column
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("X Resolution (px):")), pos = (0, 2), flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
projectX = int(math.floor(float(self._get_setting("project_x", 1920))))
self.X = wx.SpinCtrl(settingsGroup, -1, str(projectX), max = 999999, size = (125, -1))
self.X.Bind(wx.EVT_SPINCTRL, self.update_resolution)
self.X.SetToolTip(_("The projector resolution in the X axis."))
fieldsizer.Add(self.X, pos = (0, 3))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Y Resolution (px):")), pos = (1, 2), flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
projectY = int(math.floor(float(self._get_setting("project_y", 1200))))
self.Y = wx.SpinCtrl(settingsGroup, -1, str(projectY), max = 999999, size = (125, -1))
self.Y.Bind(wx.EVT_SPINCTRL, self.update_resolution)
self.Y.SetToolTip(_("The projector resolution in the Y axis."))
fieldsizer.Add(self.Y, pos = (1, 3))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Offset in X (mm):")), pos = (2, 2),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.offset_X = wx.SpinCtrlDouble(settingsGroup, -1, initial = self._get_setting("project_offset_x", 0.0),
inc = 1, size = (125, -1))
self.offset_X.SetDigits(1)
self.offset_X.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_offset)
self.offset_X.SetToolTip(_("How far the slice should be offset from the edge in the X axis."))
fieldsizer.Add(self.offset_X, pos = (2, 3))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Offset in Y (mm):")), pos = (3, 2),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.offset_Y = wx.SpinCtrlDouble(settingsGroup, -1, initial = self._get_setting("project_offset_y", 0.0),
inc = 1, size = (125, -1))
self.offset_Y.SetDigits(1)
self.offset_Y.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_offset)
self.offset_Y.SetToolTip(_("How far the slice should be offset from the edge in the Y axis."))
fieldsizer.Add(self.offset_Y, pos = (3, 3))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Projected X (mm):")), pos = (4, 2),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.projected_X_mm = wx.SpinCtrlDouble(settingsGroup, -1, initial = self._get_setting("project_projected_x", 100.0),
inc = 0.5, size = (125, -1), min = 1.0, max = 999.9, style = wx.SP_ARROW_KEYS)
self.projected_X_mm.SetDigits(2)
self.projected_X_mm.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_projected_Xmm)
self.projected_X_mm.SetToolTip(_("The actual width of the entire projected image. Use the Calibrate "
"grid to show the full size of the projected image, and measure "
"the width at the same level where the slice will be projected onto the resin."))
fieldsizer.Add(self.projected_X_mm, pos = (4, 3))
fieldsizer.Add(wx.StaticText(settingsGroup, -1, _("Z-Axis Speed (mm/min):")), pos = (5, 2),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.z_axis_rate = wx.SpinCtrl(settingsGroup, -1, str(self._get_setting("project_z_axis_rate", 200)),
max = 9999, size = (125, -1))
self.z_axis_rate.Bind(wx.EVT_SPINCTRL, self.update_z_axis_rate)
self.z_axis_rate.SetToolTip(_("Speed of the Z axis in mm/minute. Take into account that "
"slower rates may require a longer pause value."))
fieldsizer.Add(self.z_axis_rate, pos = (5, 3))
fieldboxsizer.Add(fieldsizer)
# Display
displayGroup = wx.StaticBox(self.panel, -1, _("Display"))
displayboxsizer = wx.StaticBoxSizer(displayGroup)
displaysizer = wx.BoxSizer(wx.HORIZONTAL)
self.fullscreen = wx.CheckBox(displayGroup, -1, _("Fullscreen"))
self.fullscreen.Bind(wx.EVT_CHECKBOX, self.update_fullscreen)
self.fullscreen.SetToolTip(_("Toggles the project screen to full size."))
displaysizer.Add(self.fullscreen, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, get_space('staticbox'))
displaysizer.AddStretchSpacer(1)
self.calibrate = wx.CheckBox(displayGroup, -1, _("Calibrate"))
self.calibrate.Bind(wx.EVT_CHECKBOX, self.show_calibrate)
self.calibrate.SetToolTip(_("Toggles the calibration grid. Each grid should be 10 mm x 10 mm in size.") +
_(" Use the grid to ensure the projected size is correct. "
"See also the help for the ProjectedX field."))
displaysizer.Add(self.calibrate, 0, wx.ALIGN_CENTER_VERTICAL)
displaysizer.AddStretchSpacer(1)
first_layer_boxer = wx.BoxSizer(wx.HORIZONTAL)
self.first_layer = wx.CheckBox(displayGroup, -1, _("1st Layer"))
self.first_layer.Bind(wx.EVT_CHECKBOX, self.show_first_layer)
self.first_layer.SetToolTip(_("Displays the first layer of the model. Use this to project "
"the first layer for longer so it holds to the base. Note: "
"this value does not affect the first layer when the 'Start' "
"run is started, it should be used manually."))
first_layer_boxer.Add(self.first_layer, flag = wx.ALIGN_CENTER_VERTICAL)
first_layer_boxer.Add(wx.StaticText(displayGroup, -1, "(s):"), flag = wx.ALIGN_CENTER_VERTICAL)
self.show_first_layer_timer = wx.SpinCtrlDouble(displayGroup, -1, initial = -1, min = -1, inc = 1, size = (125, -1))
self.show_first_layer_timer.SetDigits(1)
self.show_first_layer_timer.SetToolTip(_("How long to display the first layer for. -1 = unlimited."))
first_layer_boxer.Add(self.show_first_layer_timer, flag = wx.ALIGN_CENTER_VERTICAL | wx.LEFT, border = get_space('mini'))
displaysizer.Add(first_layer_boxer, 0, wx.ALIGN_CENTER_VERTICAL)
displaysizer.AddStretchSpacer(1)
self.layer_red = wx.CheckBox(displayGroup, -1, _("Red"))
self.layer_red.Bind(wx.EVT_CHECKBOX, self.show_layer_red)
self.layer_red.SetToolTip(_("Toggles whether the image should be red. Useful for positioning "
"whilst resin is in the printer as it should not cause a reaction."))
displaysizer.Add(self.layer_red, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, get_space('staticbox'))
displayboxsizer.Add(displaysizer, 1, wx.EXPAND)
# Info
infoGroup = wx.StaticBox(self.panel, label = _("Info"))
infosizer = wx.StaticBoxSizer(infoGroup, wx.VERTICAL)
infofieldsizer = wx.GridBagSizer(vgap = get_space('minor'), hgap = get_space('minor'))
filelabel = wx.StaticText(infoGroup, -1, _("File:"))
filelabel.SetToolTip(_("The name of the model currently loaded."))
infofieldsizer.Add(filelabel, pos = (0, 0), flag = wx.ALIGN_RIGHT)
self.filename = wx.StaticText(infoGroup, -1, "")
infofieldsizer.Add(self.filename, pos = (0, 1), flag = wx.EXPAND)
totallayerslabel = wx.StaticText(infoGroup, -1, _("Total Layers:"))
totallayerslabel.SetToolTip(_("The total number of layers found in the model."))
infofieldsizer.Add(totallayerslabel, pos = (1, 0), flag = wx.ALIGN_RIGHT)
self.total_layers = wx.StaticText(infoGroup, -1)
infofieldsizer.Add(self.total_layers, pos = (1, 1), flag = wx.EXPAND)
currentlayerlabel = wx.StaticText(infoGroup, -1, _("Current Layer:"))
currentlayerlabel.SetToolTip(_("The current layer being displayed."))
infofieldsizer.Add(currentlayerlabel, pos = (2, 0), flag = wx.ALIGN_RIGHT)
self.current_layer = wx.StaticText(infoGroup, -1, "0")
infofieldsizer.Add(self.current_layer, pos = (2, 1), flag = wx.EXPAND)
estimatedtimelabel = wx.StaticText(infoGroup, -1, _("Estimated Time:"))
estimatedtimelabel.SetToolTip(_("An estimate of the remaining time until print completion."))
infofieldsizer.Add(estimatedtimelabel, pos = (3, 0), flag = wx.ALIGN_RIGHT)
self.estimated_time = wx.StaticText(infoGroup, -1, "")
infofieldsizer.Add(self.estimated_time, pos = (3, 1), flag = wx.EXPAND)
statuslabel = wx.StaticText(infoGroup, -1, _("Status:"))
statuslabel.SetToolTip(_("Latest activity, information and error messages."))
infofieldsizer.Add(statuslabel, pos=(4, 0), flag = wx.ALIGN_RIGHT)
self.statusbar = wx.StaticText(infoGroup, -1, "", style = wx.ELLIPSIZE_END)
infofieldsizer.Add(self.statusbar, pos = (4, 1), flag = wx.EXPAND)
infofieldsizer.AddGrowableCol(1)
infosizer.Add(infofieldsizer, 1, wx.EXPAND)
# Layout
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(buttonbox, flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, border = get_space('minor'))
vbox.Add(fieldboxsizer, flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border = get_space('minor'))
vbox.Add(displayboxsizer, flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border = get_space('minor'))
vbox.Add(infosizer, flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border = get_space('minor'))
self.panel.SetSizerAndFit(vbox)
reset_button = wx.Button(self, -1, label=_("Reset"))
close_button = wx.Button(self, wx.ID_CLOSE)
bottom_button_sizer = wx.BoxSizer(wx.HORIZONTAL)
bottom_button_sizer.Add(reset_button, 0)
bottom_button_sizer.AddStretchSpacer(1)
bottom_button_sizer.Add(close_button, 0)
topsizer = wx.BoxSizer(wx.VERTICAL)
topsizer.Add(self.panel, wx.EXPAND | wx.BOTTOM, get_space('mini'))
topsizer.Add(wx.StaticLine(self, -1, style = wx.LI_HORIZONTAL), 0, wx.EXPAND)
topsizer.Add(bottom_button_sizer, 0, wx.EXPAND | wx.ALL, get_space('stddlg-frame'))
self.Bind(wx.EVT_BUTTON, self.on_close, id=wx.ID_CLOSE)
self.Bind(wx.EVT_CLOSE, self.on_close)
reset_button.Bind(wx.EVT_BUTTON, self.reset_all)
self.SetSizerAndFit(topsizer)
self.Fit()
self.CentreOnParent()
# res = (self.X.GetValue(), self.Y.GetValue())
self.display_frame = DisplayFrame(self, statusbar = self.status,
title = _("Layer Projector Display"),
res = (1024, 768),
printer = printer)
self.display_frame.Centre()
self.Raise()
self.display_frame.Show()
self.Show()
def __del__(self):
self.cleanup_temp()
if self.display_frame:
self.display_frame.Destroy()
def status(self, message: str) -> None:
return self.statusbar.SetLabel(message)
def cleanup_temp(self):
if isinstance(self.image_dir, tempfile.TemporaryDirectory):
self.image_dir.cleanup()
def set_total_layers(self, total):
self.total_layers.SetLabel(str(total))
self.set_estimated_time()
def set_current_layer(self, index):
self.current_layer.SetLabel(str(index))
self.set_estimated_time()
def display_filename(self, name):
self.filename.SetLabel(name)
def set_estimated_time(self):
if not hasattr(self, 'layers'):
return
current_layer = int(self.current_layer.GetLabel())
remaining_layers = len(self.layers[0]) - current_layer
# 0.5 for delay between hide and rise
estimated_time = remaining_layers * (self.interval.GetValue() + self.pause.GetValue() + 0.5)
self.estimated_time.SetLabel(time.strftime("%H:%M:%S", time.gmtime(estimated_time)))
def parse_svg(self, name):
et = ET.ElementTree(file = name)
namespaces = dict(node for (_, node) in ET.iterparse(name, events=['start-ns']))
slicer = 'Slic3r' if 'slic3r' in namespaces.keys() else \
'Skeinforge' if et.getroot().find('{http://www.w3.org/2000/svg}metadata') else 'None'
zlast = 0
zdiff = 0
ol = []
if slicer == 'Slic3r':
ET.register_namespace("", "http://www.w3.org/2000/svg")
height = et.getroot().get('height').replace('m', '')
width = et.getroot().get('width').replace('m', '')
self.projected_X_mm.SetValue(float(width))
self.update_projected_Xmm(wx.wxEVT_NULL)
for i in et.findall("{http://www.w3.org/2000/svg}g"):
z = float(i.get('{http://slic3r.org/namespaces/slic3r}z'))
zdiff = z - zlast
zlast = z
svgSnippet = ET.Element('{http://www.w3.org/2000/svg}svg')
svgSnippet.set('height', height + 'mm')
svgSnippet.set('width', width + 'mm')
svgSnippet.set('viewBox', '0 0 ' + width + ' ' + height)
svgSnippet.set('style', 'background-color:black;fill:white;')
svgSnippet.append(i)
ol += [svgSnippet]
elif slicer == 'Skeinforge':
slice_layers = et.findall("{http://www.w3.org/2000/svg}metadata")[0].findall("{http://www.reprap.org/slice}layers")[0]
minX = slice_layers.get('minX')
maxX = slice_layers.get('maxX')
minY = slice_layers.get('minY')
maxY = slice_layers.get('maxY')
height = str(abs(float(minY)) + abs(float(maxY)))
width = str(abs(float(minX)) + abs(float(maxX)))
self.projected_X_mm.SetValue(float(width))
self.update_projected_Xmm(wx.wxEVT_NULL)
for g in et.findall("{http://www.w3.org/2000/svg}g")[0].findall("{http://www.w3.org/2000/svg}g"):
g.set('transform', '')
text_element = g.findall("{http://www.w3.org/2000/svg}text")[0]
g.remove(text_element)
path_elements = g.findall("{http://www.w3.org/2000/svg}path")
for p in path_elements:
p.set('transform', 'translate(' + maxX + ',' + maxY + ')')
p.set('fill', 'white')
z = float(g.get('id').split("z:")[-1])
zdiff = z - zlast
zlast = z
svgSnippet = ET.Element('{http://www.w3.org/2000/svg}svg')
svgSnippet.set('height', height + 'mm')
svgSnippet.set('width', width + 'mm')
svgSnippet.set('viewBox', '0 0 ' + width + ' ' + height)
svgSnippet.set('style', 'background-color:black;fill:white;')
svgSnippet.append(g)
ol += [svgSnippet]
return ol, zdiff, slicer
def parse_3DLP_zip(self, name):
if not zipfile.is_zipfile(name):
self.status(_("{} is not a zip file.").format(os.path.split(name)[1]))
return -1, -1, "None"
accepted_image_types = ['gif', 'tiff', 'jpg', 'jpeg', 'bmp', 'png']
with zipfile.ZipFile(name, 'r') as zipFile:
# Make sure to clean up an exisiting temp dir before creating a new one
if isinstance(self.image_dir, tempfile.TemporaryDirectory):
self.image_dir.cleanup()
self.image_dir = tempfile.TemporaryDirectory()
zipFile.extractall(self.image_dir.name)
ol = []
# Note: the following funky code extracts any numbers from the filenames, matches
# them with the original then sorts them. It allows for filenames of the
# format: abc_1.png, which would be followed by abc_10.png alphabetically.
os.chdir(self.image_dir.name)
vals = [f for f in os.listdir('.') if os.path.isfile(f)]
keys = (int(re.search(r'\d+', p).group()) for p in vals)
imagefilesDict = dict(zip(keys, vals))
imagefilesOrderedDict = OrderedDict(sorted(imagefilesDict.items(), key = lambda t: t[0]))
for f in imagefilesOrderedDict.values():
path = os.path.join(self.image_dir.name, f)
if os.path.isfile(path) and puremagic.what(path) in accepted_image_types:
ol.append(path)
return ol, -1, 'Bitmap'
def parse_sl1(self, name):
if not zipfile.is_zipfile(name):
self.status(_("{} is not a zip file.").format(os.path.split(name)[1]))
return -1, -1, 'None'
accepted_image_types = ('gif', 'tiff', 'jpg', 'jpeg', 'bmp', 'png')
with zipfile.ZipFile(name, 'r') as zippy:
settings = self.load_sl1_config(zippy)
# Make sure to clean up an exisiting temp dir before creating a new one
if isinstance(self.image_dir, tempfile.TemporaryDirectory):
self.image_dir.cleanup()
self.image_dir = tempfile.TemporaryDirectory()
for f in zippy.namelist():
if f.lower().endswith(accepted_image_types) and 'thumbnail' not in f:
zippy.extract(f, self.image_dir.name)
ol = []
for f in sorted(os.listdir(self.image_dir.name)):
path = os.path.join(self.image_dir.name, f)
if os.path.isfile(path) and puremagic.what(path) in accepted_image_types:
ol.append(path)
return ol, -1, 'PrusaSlicer', settings
def load_sl1_config(self, zip_object: zipfile.ZipFile):
files = zip_object.namelist()
settings = {}
if 'prusaslicer.ini' in files:
relevant_keys = ['display_height', 'display_width', 'display_orientation',
'display_pixels_x', 'display_pixels_y',
'display_mirror_x', 'display_mirror_y',
'exposure_time', 'initial_exposure_time',
'layer_height', 'printer_model', 'printer_technology',
'material_colour']
with zip_object.open('prusaslicer.ini', 'r') as lines:
for line in lines:
element = line.decode('UTF-8').rstrip().split(' = ')
if element[0] in relevant_keys:
element[1] = self.cast_type(element[1])
settings[element[0]] = element[1]
return settings
if 'config.ini' in files:
relevant_keys = ['expTime', 'expTimeFirst', 'layerHeight', 'printerModel']
key_names = ['exposure_time', 'initial_exposure_time', 'layer_height', 'printer_model']
with zip_object.open('config.ini', 'r') as lines:
for line in lines:
element = line.decode('UTF-8').rstrip().split(' = ')
if element[0] in relevant_keys:
index = relevant_keys.index(element[0])
element[1] = self.cast_type(element[1])
settings[key_names[index]] = element[1]
return settings
def cast_type(self, var):
'''Automaticly cast int or float from str'''
for caster in (int, float):
try:
return caster(var)
except ValueError:
pass
return var
def apply_sl1_settings(self, layers: list):
thickness = layers[3].get('layer_height')
if thickness is not None:
self.thickness.SetValue(thickness)
self.update_thickness(wx.wxEVT_NULL)
else:
self.status(_("Could not load .sl1 config."))
return False
interval = layers[3].get('exposure_time')
if interval is not None:
self.interval.SetValue(interval)
self.update_interval(wx.wxEVT_NULL)
init_exp = layers[3].get('initial_exposure_time')
if init_exp is not None:
self.show_first_layer_timer.SetValue(init_exp)
x_res = layers[3].get('display_pixels_x')
if x_res is not None:
self.X.SetValue(x_res)
self.update_resolution(wx.wxEVT_NULL)
y_res = layers[3].get('display_pixels_y')
if y_res is not None:
self.Y.SetValue(y_res)
self.update_resolution(wx.wxEVT_NULL)
real_width = layers[3].get('display_width')
if real_width:
self.projected_X_mm.SetValue(real_width)
self.update_projected_Xmm(wx.wxEVT_NULL)
return True
def load_file(self, event):
self.reset_loaded_file()
dlg = wx.FileDialog(self, _("Open file to print"), style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
# On macOS, the wildcard for *.3dlp.zip is not recognised, so it is just *.zip.
dlg.SetWildcard(_("All supported files") +
" (*.svg; *.3dlp.zip; *.sl1; *.sl1s)|*.svg;*.zip;*.sl1;*.sl1s|" +
_("Slic3r or Skeinforge SVG files") + " (*.svg)|*.svg|" +
_("3DLP Zip files") + " (*.3dlp.zip)|*.zip|" +
_("Prusa SL1 files") + " (*.sl1; *.sl1s)|*.sl1;*.sl1s")
if dlg.ShowModal() == wx.ID_OK:
name = dlg.GetPath()
if not os.path.exists(name):
self.status(_("File not found!"))
return
if name.lower().endswith('.svg'):
layers = self.parse_svg(name)
elif name.lower().endswith(('.sl1', '.sl1s')):
layers = self.parse_sl1(name)
elif name.lower().endswith('.3dlp.zip'):
layers = self.parse_3DLP_zip(name)
else:
self.status(_("{} is not a sliced svg- or zip-file.").format(os.path.split(name)[1]))
return
if layers[2] in ('Slic3r', 'Skeinforge'):
layer_height = round(layers[1], 3)
self.thickness.SetValue(layer_height)
elif layers[2] == 'PrusaSlicer':
if self.apply_sl1_settings(layers):
layer_height = float(layers[3]['layer_height'])
else:
layer_height = self.thickness.GetValue()
elif layers[2] == 'Bitmap':
layer_height = self.thickness.GetValue()
else:
self.status(_("{} is not a sliced svg-file or zip-file.").format(os.path.split(name)[1]))
return
self.status(_("{} layers found, total height {:.2f} mm").format(len(layers[0]),
layer_height * len(layers[0])))
self.layers = layers
self.set_total_layers(len(layers[0]))
self.set_current_layer(0)
self.current_filename = os.path.basename(name)
self.display_filename(self.current_filename)
self.slicer = layers[2]
self.display_frame.slicer = self.slicer
self.present_button.Enable()
dlg.Destroy()
self.display_frame.Raise()
self.Raise()
def reset_loaded_file(self):
if hasattr(self, 'layers'):
delattr(self, 'layers')
self.display_filename("")
self.set_total_layers("")
self.set_current_layer(0)
self.estimated_time.SetLabel("")
def show_calibrate(self, event):
if self.calibrate.IsChecked():
self.present_calibrate(event)
else:
if hasattr(self, 'layers'):
self.display_frame.slicer = self.layers[2]
self.display_frame.scale = self.scale.GetValue()
self.display_frame.clear_layer()
def show_first_layer(self, event):
if self.first_layer.IsChecked():
self.present_first_layer(event)
else:
if hasattr(self, 'layers'):
self.display_frame.slicer = self.layers[2]
self.display_frame.scale = self.scale.GetValue()
self.display_frame.clear_layer()
def show_layer_red(self, event):
self.display_frame.layer_red = self.layer_red.IsChecked()
def present_calibrate(self, event):
if self.calibrate.IsChecked():
self.first_layer.SetValue(False)
previous_slicer = self.display_frame.slicer
self.display_frame.slicer = 'Calibrate'
self.display_frame.draw_layer()
self.display_frame.slicer = previous_slicer
self.display_frame.Raise()
self.Raise()
def present_first_layer(self, event):
if self.first_layer.GetValue():
if not hasattr(self, "layers"):
self.status(_("No model loaded!"))
self.first_layer.SetValue(False)
return
self.display_frame.offset = (self.offset_X.GetValue(), self.offset_Y.GetValue())
self.display_frame.scale = self.scale.GetValue()
self.display_frame.slicer = self.layers[2]
self.display_frame.dpi = self.get_dpi()
self.display_frame.draw_layer(copy.deepcopy(self.layers[0][0]))
self.calibrate.SetValue(False)
self.display_frame.Refresh()
sfl_timer = self.show_first_layer_timer.GetValue()
if sfl_timer > 0:
def unpresent_first_layer():
self.display_frame.clear_layer()
self.first_layer.SetValue(False)
# AGE2023-04-19 Python 3.10 expects delay in milliseconds as
# integer value instead of float. Convert float value to int
if 'first' in self.timers and self.timers['first'].IsRunning():
self.timers['first'].Stop()
timer = wx.CallLater(int(sfl_timer * 1000), unpresent_first_layer)
self.timers['first'] = timer
def update_offset(self, event):
offset_x = self.offset_X.GetValue()
offset_y = self.offset_Y.GetValue()
self.display_frame.offset = (offset_x, offset_y)
self._set_setting('project_offset_x', offset_x)
self._set_setting('project_offset_y', offset_y)
self.refresh_display(event)
def refresh_display(self, event):
self.present_calibrate(event)
self.present_first_layer(event)
def update_thickness(self, event):
self._set_setting('project_layer', self.thickness.GetValue())
self.refresh_display(event)
def update_projected_Xmm(self, event):
self._set_setting('project_projected_x', self.projected_X_mm.GetValue())
self.refresh_display(event)
def update_scale(self, event):
scale = self.scale.GetValue()
self.display_frame.scale = scale
self._set_setting('project_scale', scale)
self.refresh_display(event)
def update_interval(self, event):
interval = self.interval.GetValue()
self.display_frame.interval = interval
self._set_setting('project_interval', interval)
self.set_estimated_time()
self.refresh_display(event)
def update_pause(self, event):
pause = self.pause.GetValue()
self.display_frame.pause = pause
self._set_setting('project_pause', pause)
self.set_estimated_time()
self.refresh_display(event)
def update_overshoot(self, event):
overshoot = self.overshoot.GetValue()
self.display_frame.overshoot = overshoot
self._set_setting('project_overshoot', overshoot)
self.refresh_display(event)
def update_prelift_gcode(self, event):
prelift_gcode = self.prelift_gcode.GetValue().replace('\n', "\\n")
self.display_frame.prelift_gcode = prelift_gcode
self._set_setting('project_prelift_gcode', prelift_gcode)
def update_postlift_gcode(self, event):
postlift_gcode = self.postlift_gcode.GetValue().replace('\n', "\\n")
self.display_frame.postlift_gcode = postlift_gcode
self._set_setting('project_postlift_gcode', postlift_gcode)
def update_z_axis_rate(self, event):
z_axis_rate = self.z_axis_rate.GetValue()
self.display_frame.z_axis_rate = z_axis_rate
self._set_setting('project_z_axis_rate', z_axis_rate)
def update_direction(self, event):
direction = self.direction.GetSelection()
self.display_frame.direction = direction
self._set_setting('project_direction', direction)
def update_fullscreen(self, event):
if self.fullscreen.GetValue() and not self.display_frame.IsFullScreen():
self.display_frame.ShowFullScreen(True, wx.FULLSCREEN_ALL)
else:
self.display_frame.ShowFullScreen(False)
self.refresh_display(event)
self.Raise()
def update_resolution(self, event):
x = self.X.GetValue()
y = self.Y.GetValue()
self.display_frame.resize((x, y))
self._set_setting('project_x', x)
self._set_setting('project_y', y)
self.refresh_display(event)
def get_dpi(self):
'''Cacluate dots per inch from resolution and projection'''
resolution_x_pixels = self.X.GetValue()
projected_x_mm = self.projected_X_mm.GetValue()
projected_x_inches = projected_x_mm / 25.4
return resolution_x_pixels / projected_x_inches
def start_present(self, event):
if not hasattr(self, "layers"):
self.status(_("No model loaded!"))
return
self.status(_("Starting..."))
self.pause_button.SetLabel(self.get_btn_label('pause'))
self.pause_button.Enable()
self.stop_button.Enable()
self.set_current_layer(0)
self.display_frame.Raise()
if self.fullscreen.GetValue() and not self.display_frame.IsFullScreen():
self.display_frame.ShowFullScreen(True, wx.FULLSCREEN_ALL)
self.display_frame.slicer = self.layers[2]
self.display_frame.dpi = self.get_dpi()
self.display_frame.startime = time.perf_counter()
self.display_frame.present(self.layers[0][:],
thickness = self.thickness.GetValue(),
interval = self.interval.GetValue(),
scale = self.scale.GetValue(),
pause = self.pause.GetValue(),
overshoot = self.overshoot.GetValue(),
z_axis_rate = self.z_axis_rate.GetValue(),
prelift_gcode = self.prelift_gcode.GetValue(),
postlift_gcode = self.postlift_gcode.GetValue(),
direction = self.direction.GetSelection(),
size = (self.X.GetValue(), self.Y.GetValue()),
offset = (self.offset_X.GetValue(), self.offset_Y.GetValue()),
layer_red = self.layer_red.IsChecked())
self.present_button.Disable()
self.load_button.Disable()
self.calibrate.SetValue(False)
self.calibrate.Disable()
self.Raise()
def stop_present(self, event):
self.status(_("Stopping..."))
self.display_frame.running = False
self.pause_button.SetLabel(self.get_btn_label('pause'))
self.set_current_layer(0)
self.present_button.Enable()
self.load_button.Enable()
self.calibrate.Enable()
self.pause_button.Disable()
self.stop_button.Disable()
self.status(_("Stop"))
def pause_present(self, event):
if self.pause_button.GetLabel() == self.get_btn_label('pause'):
self.status(self.get_btn_label('pause'))
self.pause_button.SetLabel(self.get_btn_label('continue'))
self.calibrate.Enable()
self.display_frame.running = False
else:
self.status(self.get_btn_label('continue'))
self.pause_button.SetLabel(self.get_btn_label('pause'))
self.calibrate.SetValue(False)
self.calibrate.Disable()
self.display_frame.running = True
self.display_frame.next_img()
def on_close(self, event):
self.stop_present(event)
# Make sure that all running timers are
# stopped before we destroy the frames
for timer in self.timers.values():
if timer.IsRunning():
timer.Stop()
self.cleanup_temp()
if self.display_frame:
self.display_frame.DestroyLater()
self.DestroyLater()
def get_btn_label(self, value):
# This method simplifies translation of the button label
if value == 'pause':
return _("Pause")
if value == 'continue':
return _("Continue")
return ValueError(f"No button label for '{value}'")
def reset_all(self, event):
# Ask confirmation for deleting
reset_dialog = wx.MessageDialog(
self,
message = _("Are you sure you want to reset all the settings "
"to the defaults?\nBe aware that the defaults are "
"not guaranteed to work well with your machine."),
caption = _("Reset Layer Projector Settings"),
style = wx.YES_NO | wx.ICON_EXCLAMATION)
if reset_dialog.ShowModal() == wx.ID_YES:
# Reset all settings
std_settings = [
[self.thickness, 0.1, self.update_thickness],
[self.interval, 2.0, self.update_interval],
[self.pause, 2.5, self.update_pause],
[self.scale, 1.0, self.update_scale],
[self.direction, 0, self.update_direction],
[self.overshoot, 3.0, self.update_overshoot],
[self.prelift_gcode, "", self.update_prelift_gcode],
[self.X, 1024, self.update_resolution],
[self.Y, 768, self.update_resolution],
[self.offset_X, 0, self.update_offset],
[self.offset_Y, 0, self.update_offset],
[self.projected_X_mm, 100.0, self.update_projected_Xmm],
[self.z_axis_rate, 200, self.update_z_axis_rate],
[self.postlift_gcode, "", self.update_postlift_gcode],
[self.fullscreen, False, self.update_fullscreen],
[self.calibrate, False, self.show_calibrate],
[self.first_layer, False, self.show_first_layer],
[self.show_first_layer_timer, -1.0, self.show_first_layer],
[self.layer_red, False, self.show_layer_red]
]
for setting in std_settings:
self.reset_setting(event, setting[0], setting[1], setting[2])
self.reset_loaded_file()
self.status(_("Layer Projector settings reset"))
def reset_setting(self, event, name, value, update_function):
try:
# First check if the user actually changed the setting
if not value == name.GetValue():
# If so, set it back and invoke the update_function to save the value
name.SetValue(value)
update_function(event)
except AttributeError:
if not value == name.GetSelection():
name.SetSelection(value)
update_function(event)
if __name__ == "__main__":
a = wx.App()
SettingsFrame(None)
a.MainLoop()
| 56,092 | Python | .py | 1,022 | 42.353229 | 145 | 0.589843 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,361 | objectplater.py | kliment_Printrun/printrun/objectplater.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import types
import wx
from .gui.widgets import get_space
from .utils import install_locale, iconfile
install_locale('pronterface')
def patch_method(obj, method, replacement):
orig_handler = getattr(obj, method)
def wrapped(*a, **kwargs):
kwargs['orig_handler'] = orig_handler
return replacement(*a, **kwargs)
setattr(obj, method, types.MethodType(wrapped, obj))
class PlaterPanel(wx.Panel):
def __init__(self, **kwargs):
self.destroy_on_done = False
parent = kwargs.get("parent", None)
super().__init__(parent = parent)
self.prepare_ui(**kwargs)
def prepare_ui(self, filenames = [], callback = None, parent = None, build_dimensions = None, cutting_tool = True):
self.filenames = filenames
self.cut_axis_buttons = []
menu_sizer = self.menu_sizer = wx.BoxSizer(wx.VERTICAL)
list_sizer = wx.StaticBoxSizer(wx.VERTICAL, self, label = "Models")
# Load button
loadbutton = wx.Button(self, label = _("+ Add Model"))
loadbutton.Bind(wx.EVT_BUTTON, self.load)
list_sizer.Add(loadbutton, 0, wx.EXPAND | wx.BOTTOM, get_space('mini'))
# Model list
self.l = wx.ListBox(self)
list_sizer.Add(self.l, 1, wx.EXPAND | wx.BOTTOM, get_space('mini'))
# Auto arrange button
autobutton = wx.Button(self, label = _("Auto Arrange"))
autobutton.Bind(wx.EVT_BUTTON, self.autoplate)
list_sizer.Add(autobutton, 0, wx.EXPAND | wx.BOTTOM, get_space('mini'))
h_sizer = wx.BoxSizer(wx.HORIZONTAL)
# Clear button
clearbutton = wx.Button(self, label = _("Clear All"))
clearbutton.Bind(wx.EVT_BUTTON, self.clear)
h_sizer.Add(clearbutton, 1, wx.EXPAND | wx.RIGHT, get_space('mini'))
# Export button
exportbutton = wx.Button(self, label = _("Export"))
exportbutton.Bind(wx.EVT_BUTTON, self.export)
h_sizer.Add(exportbutton, 1, wx.EXPAND)
list_sizer.Add(h_sizer, 0, wx.EXPAND)
selection_sizer = wx.StaticBoxSizer(wx.VERTICAL, self, label = "Selection")
# Snap to Z = 0 button
snapbutton = wx.Button(self, label = _("Snap to Zero"))
snapbutton.Bind(wx.EVT_BUTTON, self.snap)
h2_sizer = wx.BoxSizer(wx.HORIZONTAL)
h2_sizer.Add(snapbutton, 1, wx.EXPAND | wx.RIGHT, get_space('mini'))
# Put at center button
centerbutton = wx.Button(self, label = _("Put at Center"))
centerbutton.Bind(wx.EVT_BUTTON, self.center)
h2_sizer.Add(centerbutton, 1, wx.EXPAND)
selection_sizer.Add(h2_sizer, 0, wx.EXPAND | wx.BOTTOM, get_space('mini'))
# Delete button
deletebutton = wx.Button(self, label = _("Delete"))
deletebutton.Bind(wx.EVT_BUTTON, self.delete)
selection_sizer.Add(deletebutton, 0, wx.EXPAND | wx.ALL, get_space('none'))
menu_sizer.Add(list_sizer, 1, wx.EXPAND | wx.ALL, get_space('minor'))
menu_sizer.Add(selection_sizer, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, get_space('minor'))
self.menu_buttons = [autobutton, clearbutton, exportbutton, snapbutton, centerbutton, deletebutton]
if cutting_tool:
# Insert Cutting tool (only for STL Plater)
cut_sizer = wx.StaticBoxSizer(wx.VERTICAL, self, label = _("Cutting Tool"))
# Prepare buttons for all cut axis
axis_sizer = self.axis_sizer = wx.BoxSizer(wx.HORIZONTAL)
cutxplusbutton = wx.ToggleButton(self, label = _("+X"), style = wx.BU_EXACTFIT)
cutxplusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "x", 1))
axis_sizer.Add(cutxplusbutton, 1, wx.EXPAND | wx.RIGHT, get_space('mini'))
cutyplusbutton = wx.ToggleButton(self, label = _("+Y"), style = wx.BU_EXACTFIT)
cutyplusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "y", 1))
axis_sizer.Add(cutyplusbutton, 1, wx.EXPAND | wx.RIGHT, get_space('mini'))
cutzplusbutton = wx.ToggleButton(self, label = _("+Z"), style = wx.BU_EXACTFIT)
cutzplusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "z", 1))
axis_sizer.Add(cutzplusbutton, 1, wx.EXPAND | wx.RIGHT, get_space('mini'))
cutxminusbutton = wx.ToggleButton(self, label = _("-X"), style = wx.BU_EXACTFIT)
cutxminusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "x", -1))
axis_sizer.Add(cutxminusbutton, 1, wx.EXPAND | wx.RIGHT, get_space('mini'))
cutyminusbutton = wx.ToggleButton(self, label = _("-Y"), style = wx.BU_EXACTFIT)
cutyminusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "y", -1))
axis_sizer.Add(cutyminusbutton, 1, wx.EXPAND | wx.RIGHT, get_space('mini'))
cutzminusbutton = wx.ToggleButton(self, label = _("-Z"), style = wx.BU_EXACTFIT)
cutzminusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "z", -1))
axis_sizer.Add(cutzminusbutton, 1, flag = wx.EXPAND)
self.cut_axis_buttons = [cutxplusbutton, cutyplusbutton, cutzplusbutton,
cutxminusbutton, cutyminusbutton, cutzminusbutton]
cut_sizer.Add(wx.StaticText(self, -1, _("Choose axis to cut along:")), 0, wx.BOTTOM, get_space('mini'))
cut_sizer.Add(axis_sizer, 0, wx.EXPAND, wx.BOTTOM, get_space('minor'))
cut_sizer.Add(wx.StaticText(self, -1, _("Doubleclick to set the cutting plane.")), 0, wx.TOP | wx.BOTTOM, get_space('mini'))
# Process cut button
self.cut_processbutton = wx.Button(self, label = _("Process Cut"))
self.cut_processbutton.Bind(wx.EVT_BUTTON, lambda event: self.cut_confirm(event))
cut_sizer.Add(self.cut_processbutton, 0, flag = wx.EXPAND)
menu_sizer.Add(cut_sizer, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, get_space('minor'))
self.enable_cut_button(False)
self.enable_buttons(False) # While no file is loaded all buttons are disabled
self.basedir = "."
self.models = {}
self.topsizer = wx.GridBagSizer(vgap = 0, hgap = 0)
self.topsizer.Add(menu_sizer, pos = (0, 0), span = (1, 1), flag = wx.EXPAND)
self.topsizer.Add(wx.StaticLine(self, -1, style = wx.LI_HORIZONTAL), pos = (1, 0), span = (1, 2), flag = wx.EXPAND)
if callback is not None:
self.topsizer.Add(self.CreateButtonSizer(wx.OK | wx.CANCEL), pos = (2, 0), span = (1, 2),
flag = wx.ALIGN_RIGHT | wx.ALL, border = get_space('stddlg'))
self.Bind(wx.EVT_BUTTON, lambda e: self.done(e, callback), id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, lambda e: self.Destroy(), id=wx.ID_CANCEL)
self.topsizer.AddGrowableRow(0)
self.topsizer.AddGrowableCol(1)
self.SetSizer(self.topsizer)
self.build_dimensions = build_dimensions or [200, 200, 100, 0, 0, 0]
def set_viewer(self, viewer):
# Patch handle_rotation on the fly
if hasattr(viewer, "handle_rotation"):
def handle_rotation(self, event, orig_handler):
if self.initpos is None:
self.initpos = event.GetPosition()
else:
if event.ShiftDown():
p1 = self.initpos
p2 = event.GetPosition()
x1, y1, _ = self.mouse_to_3d(p1[0], p1[1])
x2, y2, _ = self.mouse_to_3d(p2[0], p2[1])
self.parent.move_shape((x2 - x1, y2 - y1))
self.initpos = p2
else:
orig_handler(event)
patch_method(viewer, "handle_rotation", handle_rotation)
# Patch handle_wheel on the fly
if hasattr(viewer, "handle_wheel"):
def handle_wheel(self, event, orig_handler):
if event.ShiftDown():
angle = 10
if event.GetWheelRotation() < 0:
angle = -angle
self.parent.rotate_shape(angle / 2)
else:
orig_handler(event)
patch_method(viewer, "handle_wheel", handle_wheel)
self.s = viewer
self.s.SetMinSize((150, 150))
self.topsizer.Add(self.s, pos = (0, 1), span = (1, 1), flag = wx.EXPAND)
def move_shape(self, delta):
"""moves shape (selected in l, which is list ListBox of shapes)
by an offset specified in tuple delta.
Positive numbers move to (rigt, down)"""
name = self.l.GetSelection()
if name == wx.NOT_FOUND:
return False
name = self.l.GetString(name)
model = self.models[name]
model.offsets = [model.offsets[0] + delta[0],
model.offsets[1] + delta[1],
model.offsets[2]
]
return True
def rotate_shape(self, angle):
"""rotates active shape
positive angle is clockwise
"""
name = self.l.GetSelection()
if name == wx.NOT_FOUND:
return False
name = self.l.GetString(name)
model = self.models[name]
model.rot += angle
return True
def autoplate(self, event = None):
logging.info(_("Autoplating"))
separation = 2
try:
from printrun import packer
p = packer.Packer()
for i, model in self.models.items():
width = abs(model.dims[0] - model.dims[1])
height = abs(model.dims[2] - model.dims[3])
p.add_rect(width, height, data = i)
centerx = self.build_dimensions[0] / 2 + self.build_dimensions[3]
centery = self.build_dimensions[1] / 2 + self.build_dimensions[4]
rects = p.pack(padding = separation,
center = packer.Vector2(centerx, centery))
for rect in rects:
i = rect.data
position = rect.center()
self.models[i].offsets[0] = position.x
self.models[i].offsets[1] = position.y
except ImportError:
bedsize = self.build_dimensions[0:3]
cursor = [0, 0, 0]
newrow = 0
max = [0, 0]
for i, model in self.models.items():
model.offsets[2] = -1.0 * model.dims[4]
x = abs(model.dims[0] - model.dims[1])
y = abs(model.dims[2] - model.dims[3])
centre = [x / 2, y / 2]
centreoffset = [model.dims[0] + centre[0],
model.dims[2] + centre[1]]
if (cursor[0] + x + separation) >= bedsize[0]:
cursor[0] = 0
cursor[1] += newrow + separation
newrow = 0
if (newrow == 0) or (newrow < y):
newrow = y
# To the person who works out why the offsets are applied
# differently here:
# Good job, it confused the hell out of me.
model.offsets[0] = cursor[0] + centre[0] - centreoffset[0]
model.offsets[1] = cursor[1] + centre[1] - centreoffset[1]
if (max[0] == 0) or (max[0] < (cursor[0] + x)):
max[0] = cursor[0] + x
if (max[1] == 0) or (max[1] < (cursor[1] + x)):
max[1] = cursor[1] + x
cursor[0] += x + separation
if (cursor[1] + y) >= bedsize[1]:
logging.info(_("Bed full, sorry sir :("))
self.Refresh()
return
centerx = self.build_dimensions[0] / 2 + self.build_dimensions[3]
centery = self.build_dimensions[1] / 2 + self.build_dimensions[4]
centreoffset = [centerx - max[0] / 2, centery - max[1] / 2]
for i, model in self.models.items():
model.offsets[0] += centreoffset[0]
model.offsets[1] += centreoffset[1]
self.Refresh()
def clear(self, event):
result = wx.MessageBox(_('Are you sure you want to clear the grid? All unsaved changes will be lost.'),
_('Clear the grid?'),
wx.YES_NO | wx.ICON_QUESTION)
if result == 2:
self.models = {}
self.l.Clear()
self.enable_buttons(False)
if self.cut_axis_buttons:
self.enable_cut_button(False)
self.Refresh()
def enable_buttons(self, value):
# A helper method to give the user a cue which tools are available
for button in self.menu_buttons:
button.Enable(value)
if self.cut_axis_buttons: # Only STL Plater has cut axis buttons
for button in self.cut_axis_buttons:
button.SetValue(False)
button.Enable(value)
self.Refresh()
def enable_cut_button(self, value):
self.cut_processbutton.Enable(value)
self.Refresh()
def center(self, event):
i = self.l.GetSelection()
if i != -1:
m = self.models[self.l.GetString(i)]
centerx = self.build_dimensions[0] / 2 + self.build_dimensions[3]
centery = self.build_dimensions[1] / 2 + self.build_dimensions[4]
m.offsets = [centerx, centery, m.offsets[2]]
self.Refresh()
def snap(self, event):
i = self.l.GetSelection()
if i != -1:
m = self.models[self.l.GetString(i)]
m.offsets[2] = -m.dims[4]
self.Refresh()
def delete(self, event):
i = self.l.GetSelection()
if i != -1:
del self.models[self.l.GetString(i)]
self.l.Delete(i)
self.l.Select(self.l.GetCount() - 1)
if self.l.GetCount() < 1:
self.enable_buttons(False)
if self.cut_axis_buttons:
self.enable_cut_button(False)
self.Refresh()
def add_model(self, name, model):
newname = os.path.split(name.lower())[1]
if not isinstance(newname, str):
newname = str(newname, "utf-8")
c = 1
while newname in self.models:
newname = os.path.split(name.lower())[1]
newname = newname + "(%d)" % c
c += 1
self.models[newname] = model
self.l.Append(newname)
i = self.l.GetSelection()
if i == wx.NOT_FOUND:
self.l.Select(0)
self.l.Select(self.l.GetCount() - 1)
self.enable_buttons(True)
def load(self, event):
dlg = wx.FileDialog(self, _("Pick file to load"), self.basedir, style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
dlg.SetWildcard(self.load_wildcard)
if dlg.ShowModal() == wx.ID_OK:
name = dlg.GetPath()
self.enable_buttons(True)
self.load_file(name)
dlg.Destroy()
def load_file(self, filename):
raise NotImplementedError
def export(self, event):
dlg = wx.FileDialog(self, _("Pick file to save to"), self.basedir, style = wx.FD_SAVE)
dlg.SetWildcard(self.save_wildcard)
if dlg.ShowModal() == wx.ID_OK:
name = dlg.GetPath()
self.export_to(name)
dlg.Destroy()
def export_to(self, name):
raise NotImplementedError
class Plater(wx.Dialog):
def __init__(self, **kwargs):
self.destroy_on_done = True
parent = kwargs.get("parent", None)
size = kwargs.get("size", (800, 580))
if "size" in kwargs:
del kwargs["size"]
wx.Dialog.__init__(self, parent, title = _("STL Plate Builder"),
size = size, style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.SetIcon(wx.Icon(iconfile("plater.png"), wx.BITMAP_TYPE_PNG))
self.prepare_ui(**kwargs)
self.CenterOnParent()
def make_plater(panel_class):
name = panel_class.__name__.replace("Panel", "")
return type(name, (Plater, panel_class), {})
| 17,004 | Python | .py | 338 | 38.609467 | 136 | 0.575114 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,362 | packer.py | kliment_Printrun/printrun/packer.py | # Imported from python-rectangle-packer commit 32fce1aaba
# https://github.com/maxretter/python-rectangle-packer
#
# Python Rectangle Packer - Packs rectangles around a central point
# Copyright (C) 2013 Max Retter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
import Polygon
import Polygon.Utils
class Vector2:
"""Simple 2d vector / point class."""
def __init__(self, x=0, y=0):
self.x = float(x)
self.y = float(y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def add(self, other):
return Vector2(self.x + other.x, self.y + other.y)
def sub(self, other):
return Vector2(self.x - other.x, self.y - other.y)
def scale(self, factor):
return Vector2(self.x * factor, self.y * factor)
def magnitude(self):
return math.sqrt(self.dot_product(self))
def unit(self):
"""Build unit vector."""
return self.scale(1 / self.magnitude())
def dot_product(self, other):
return self.x * other.x + self.y * other.y
def distance(self, other):
"""Distance forumla for other point."""
return math.sqrt(
(other.x - self.x) ** 2 +
(other.y - self.y) ** 2
)
class Rect:
"""Simple rectangle object."""
def __init__(self, width, height, data={}):
self.width = width
self.height = height
self.data = data
# upper left
self.position = Vector2()
def half(self):
"""Half width and height."""
return Vector2(
self.width / 2,
self.height / 2
)
def expand(self, width, height):
"""Builds a new rectangle based on this one with given offsets."""
expanded = Rect(self.width + width, self.height + height)
expanded.set_center(self.center())
return expanded
def point_list(self):
top = self.position.y
right = self.position.x + self.width
bottom = self.position.y + self.height
left = self.position.x
return PointList([
(left, top),
(right, top),
(right, bottom),
(left, bottom),
])
def center(self):
"""Center of rect calculated from position and dimensions."""
return self.position.add(self.half())
def set_center(self, center):
"""Set the position based on a new center point."""
self.position = center.sub(self.half())
def area(self):
"""Area: length * width."""
return self.width * self.height
class PointList:
"""Methods for transforming a list of points."""
def __init__(self, points=[]):
self.points = points
self._polygon = None
def polygon(self):
"""Builds a polygon from the set of points."""
if not self._polygon:
self._polygon = Polygon.Polygon(self.points)
return self._polygon
def segments(self):
"""Returns a list of LineSegment objects."""
segs = []
for i, point in enumerate(self.points[1:]):
index = i + 1
segs.append(LineSegment(
Vector2(self.points[index - 1][0], self.points[index - 1][1]),
Vector2(self.points[index][0], self.points[index][1])
))
segs.append(LineSegment(
Vector2(self.points[-1][0], self.points[-1][1]),
Vector2(self.points[0][0], self.points[0][1]),
))
return segs
class LineSegment:
def __init__(self, start, end):
self.start = start
self.end = end
def length(self):
"""Length of segment vector."""
return self.end.sub(self.start).magnitude()
def closest_point_to_point(self, point):
"""Point along segment that is closest to given point."""
segment_vector = self.end.sub(self.start)
point_vector = point.sub(self.start)
seg_mag = segment_vector.magnitude()
# project point_vector on segment_vector
projection = segment_vector.dot_product(point_vector)
# scalar value used to interpolate new point along segment_vector
scalar = projection / seg_mag ** 2
# clamp on [0,1]
scalar = 1.0 if scalar > 1.0 else scalar
scalar = 0.0 if scalar < 0.0 else scalar
# interpolate scalar along segment and add start point back in
return self.start.add(segment_vector.unit().scale(scalar * seg_mag))
def closest_distance_to_point(self, point):
"""Helper method too automatically return distance."""
closest_point = self.closest_point_to_point(point)
return closest_point.distance(point)
class Packer:
def __init__(self):
self._rects = []
def add_rect(self, width, height, data={}):
self._rects.append(Rect(width, height, data))
def pack(self, padding=0, center=Vector2()):
# init everything
placed_rects = []
sorted_rects = sorted(self._rects, key=lambda rect: -rect.area())
# double padding due to halfing later on
padding *= 2
for rect in sorted_rects:
if not placed_rects:
# first rect, right on target.
rect.set_center(center)
else:
# Expand each rectangle based on new rect size and padding
# get a list of points
# build a polygon
point_lists = [
pr.expand(rect.width + padding, rect.height + padding).point_list().polygon()
for pr in placed_rects
]
# take the union of all the polygons (relies on + operator override)
# the [0] at the end returns the first "contour", which is the only one we need
bounding_points = PointList(sum(
point_lists[1:],
point_lists[0]
)[0])
# find the closest segment
closest_segments = sorted(
bounding_points.segments(),
key=lambda segment: segment.closest_distance_to_point(center)
)
# get the closest point
place_point = closest_segments[0].closest_point_to_point(center)
# set the rect position
rect.set_center(place_point)
placed_rects.append(rect)
return placed_rects
| 7,060 | Python | .py | 175 | 31.165714 | 97 | 0.598536 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,363 | injectgcode.py | kliment_Printrun/printrun/injectgcode.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import logging
from .gui.widgets import MacroEditor
from .utils import install_locale
install_locale('pronterface')
def injector(gcode, viz_layer, layer_idx):
cb = lambda toadd: inject(gcode, viz_layer, layer_idx, toadd)
z = gcode.all_layers[layer_idx].z
z = z if z is not None else 0
MacroEditor(_("Inject G-Code at layer %d (Z = %.03f)") % (viz_layer, z), "", cb, True)
def injector_edit(gcode, viz_layer, layer_idx):
cb = lambda toadd: rewritelayer(gcode, viz_layer, layer_idx, toadd)
layer = gcode.all_layers[layer_idx]
z = layer.z
z = z if z is not None else 0
lines = [line.raw for line in layer]
MacroEditor(_("Edit G-Code of layer %d (Z = %.03f)") % (viz_layer, z), lines, cb, True)
def inject(gcode, viz_layer, layer_idx, toadd):
# TODO: save modified gcode after injection ?
nlines = len(gcode.prepend_to_layer(toadd, layer_idx))
logging.info(_("Successfully injected %d lines at beginning of layer %d") % (nlines, viz_layer))
def rewritelayer(gcode, viz_layer, layer_idx, toadd):
# TODO: save modified gcode after edit ?
nlines = len(gcode.rewrite_layer(toadd, layer_idx))
logging.info(_("Successfully edited layer %d (which now contains %d lines)") % (viz_layer, nlines))
| 1,922 | Python | .py | 38 | 47.710526 | 103 | 0.721364 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,364 | stlplater.py | kliment_Printrun/printrun/stlplater.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import os
import wx
import time
import logging
import threading
import math
import sys
import re
import traceback
import subprocess
from copy import copy
from printrun import stltool
from printrun.objectplater import make_plater, PlaterPanel
from .utils import install_locale
install_locale('pronterface')
# Set up Internationalization using gettext
# searching for installed locales on /usr/share; uses relative folder if not found (windows)
glview = '--no-gl' not in sys.argv
if glview:
try:
from printrun import stlview
except ImportError:
glview = False
logging.warning(_("Could not load 3D viewer for plater:") +
"\n" + traceback.format_exc())
def evalme(s):
return eval(s[s.find("(") + 1:s.find(")")])
def transformation_matrix(model):
matrix = stltool.I
if any(model.centeroffset):
matrix = model.translation_matrix(model.centeroffset).dot(matrix)
if model.rot:
matrix = model.rotation_matrix([0, 0, model.rot]).dot(matrix)
if any(model.offsets):
matrix = model.translation_matrix(model.offsets).dot(matrix)
return matrix
class showstl(wx.Window):
def __init__(self, parent, size, pos):
super().__init__(parent, size = size, pos = pos)
self.i = 0
self.parent = parent
self.previ = 0
self.Bind(wx.EVT_MOUSEWHEEL, self.rot)
self.Bind(wx.EVT_MOUSE_EVENTS, self.move)
self.Bind(wx.EVT_PAINT, self.repaint)
self.Bind(wx.EVT_KEY_DOWN, self.keypress)
self.triggered = 0
self.initpos = None
self.prevsel = -1
def prepare_model(self, m, scale):
m.bitmap = wx.Bitmap(800, 800, 32)
dc = wx.MemoryDC()
dc.SelectObject(m.bitmap)
dc.SetBackground(wx.Brush((0, 0, 0, 0)))
dc.SetBrush(wx.Brush((0, 0, 0, 255)))
dc.SetBrush(wx.Brush(wx.Colour(128, 255, 128)))
dc.SetPen(wx.Pen(wx.Colour(128, 128, 128)))
for i in m.facets:
dc.DrawPolygon([wx.Point(400 + scale * p[0], (400 - scale * p[1])) for p in i[1]])
dc.SelectObject(wx.NullBitmap)
m.bitmap.SetMask(wx.Mask(m.bitmap, wx.Colour(0, 0, 0, 255)))
def move_shape(self, delta):
"""moves shape (selected in l, which is list ListBox of shapes)
by an offset specified in tuple delta.
Positive numbers move to (right, down)"""
name = self.parent.l.GetSelection()
if name == wx.NOT_FOUND:
return False
name = self.parent.l.GetString(name)
model = self.parent.models[name]
model.offsets = [model.offsets[0] + delta[0],
model.offsets[1] + delta[1],
model.offsets[2]
]
self.Refresh()
return True
def move(self, event):
if event.ButtonUp(wx.MOUSE_BTN_LEFT):
if self.initpos is not None:
currentpos = event.GetPosition()
delta = (0.5 * (currentpos[0] - self.initpos[0]),
-0.5 * (currentpos[1] - self.initpos[1])
)
self.move_shape(delta)
self.Refresh()
self.initpos = None
elif event.ButtonDown(wx.MOUSE_BTN_RIGHT):
self.parent.right(event)
elif event.Dragging():
if self.initpos is None:
self.initpos = event.GetPosition()
self.Refresh()
dc = wx.ClientDC(self)
p = event.GetPosition()
dc.DrawLine(self.initpos[0], self.initpos[1], p[0], p[1])
del dc
else:
event.Skip()
def rotate_shape(self, angle):
"""rotates active shape
positive angle is clockwise
"""
self.i += angle
if not self.triggered:
self.triggered = 1
threading.Thread(target = self.cr).start()
def keypress(self, event):
"""gets keypress events and moves/rotates active shape"""
keycode = event.GetKeyCode()
step = 5
angle = 18
if event.ControlDown():
step = 1
angle = 1
# h
if keycode == 72:
self.move_shape((-step, 0))
# l
if keycode == 76:
self.move_shape((step, 0))
# j
if keycode == 75:
self.move_shape((0, step))
# k
if keycode == 74:
self.move_shape((0, -step))
# [
if keycode == 91:
self.rotate_shape(-angle)
# ]
if keycode == 93:
self.rotate_shape(angle)
event.Skip()
def rotateafter(self):
if self.i != self.previ:
i = self.parent.l.GetSelection()
if i != wx.NOT_FOUND:
self.parent.models[self.parent.l.GetString(i)].rot -= 5 * (self.i - self.previ)
self.previ = self.i
self.Refresh()
def cr(self):
time.sleep(0.01)
wx.CallAfter(self.rotateafter)
self.triggered = 0
def rot(self, event):
z = event.GetWheelRotation()
s = self.parent.l.GetSelection()
if self.prevsel != s:
self.i = 0
self.prevsel = s
self.rotate_shape(-1 if z < 0 else 1)
def repaint(self, event):
dc = wx.PaintDC(self)
self.paint(dc = dc)
def paint(self, coord1 = "x", coord2 = "y", dc = None):
if dc is None:
dc = wx.ClientDC(self)
scale = 2
dc.SetPen(wx.Pen(wx.Colour(100, 100, 100)))
for i in range(20):
dc.DrawLine(0, i * scale * 10, 400, i * scale * 10)
dc.DrawLine(i * scale * 10, 0, i * scale * 10, 400)
dc.SetPen(wx.Pen(wx.Colour(0, 0, 0)))
for i in range(4):
dc.DrawLine(0, i * scale * 50, 400, i * scale * 50)
dc.DrawLine(i * scale * 50, 0, i * scale * 50, 400)
dc.SetBrush(wx.Brush(wx.Colour(128, 255, 128)))
dc.SetPen(wx.Pen(wx.Colour(128, 128, 128)))
dcs = wx.MemoryDC()
for m in self.parent.models.values():
b = m.bitmap
im = b.ConvertToImage()
imgc = wx.Point(im.GetWidth() / 2, im.GetHeight() / 2)
im = im.Rotate(math.radians(m.rot), imgc, 0)
bm = wx.BitmapFromImage(im)
dcs.SelectObject(bm)
bsz = bm.GetSize()
dc.Blit(scale * m.offsets[0] - bsz[0] / 2, 400 - (scale * m.offsets[1] + bsz[1] / 2), bsz[0], bsz[1], dcs, 0, 0, useMask = 1)
del dc
class StlPlaterPanel(PlaterPanel):
load_wildcard = _("STL files (*.stl;*.STL)|*.stl;*.STL|OpenSCAD files (*.scad)|*.scad")
save_wildcard = _("STL files (*.stl;*.STL)|*.stl;*.STL")
def prepare_ui(self, filenames = [], callback = None,
parent = None, build_dimensions = None,
circular_platform = False,
simarrange_path = None,
antialias_samples = 0):
super().prepare_ui(filenames, callback, parent, build_dimensions, cutting_tool = True)
self.cutting = False
self.cutting_axis = None
self.cutting_dist = None
if glview:
viewer = stlview.StlViewPanel(self, wx.DefaultSize,
build_dimensions = self.build_dimensions,
circular = circular_platform,
antialias_samples = antialias_samples)
else:
viewer = showstl(self, (580, 580), (0, 0))
self.simarrange_path = simarrange_path
self.set_viewer(viewer)
self.enable_cut_button(False)
self.SetMinClientSize(self.topsizer.CalcMin())
def start_cutting_tool(self, event, axis, direction):
toggle = event.EventObject
self.cutting = toggle.Value
if toggle.Value:
# Disable the other toggles
for button in self.cut_axis_buttons:
if button != toggle:
button.Value = False
self.cutting_axis = axis
self.cutting_direction = direction
else:
self.cutting_axis = None
self.cutting_direction = None
self.enable_cut_button(False)
self.cutting_dist = None
def end_cutting_tool(self):
self.cutting = False
self.cutting_dist = None
self.cutting_axis = None
self.cutting_direction = None
self.enable_cut_button(False)
for button in self.cut_axis_buttons:
button.SetValue(False)
def cut_confirm(self, event):
name = self.l.GetSelection()
name = self.l.GetString(name)
model = self.models[name]
transformation = transformation_matrix(model)
transformed = model.transform(transformation)
logging.info(_("Cutting %s alongside %s axis") % (name, self.cutting_axis.upper()))
axes = ["x", "y", "z"]
cut = transformed.cut(axes.index(self.cutting_axis),
self.cutting_direction,
self.cutting_dist)
cut.offsets = [0, 0, 0]
cut.rot = 0
cut.scale = model.scale
cut.filename = model.filename
cut.centeroffset = [0, 0, 0]
self.s.prepare_model(cut, 2)
self.models[name] = cut
self.cutting = False
self.cutting_axis = None
self.cutting_dist = None
self.cutting_direction = None
for button in self.cut_axis_buttons:
button.SetValue(False)
self.enable_cut_button(False)
def clickcb(self, event, single = False):
if not isinstance(self.s, stlview.StlViewPanel):
return
if self.cutting:
self.clickcb_cut(event)
else:
self.clickcb_rebase(event)
def clickcb_cut(self, event):
axis = self.cutting_axis
self.cutting_dist, _, _ = self.s.get_cutting_plane(axis, None,
local_transform = True)
if self.cutting_dist is not None:
self.enable_cut_button(True)
def clickcb_rebase(self, event):
x, y = event.GetPosition()
ray_near, ray_far = self.s.mouse_to_ray(x, y, local_transform = True)
best_match = None
best_facet = None
best_dist = float("inf")
for key, model in self.models.items():
transformation = transformation_matrix(model)
transformed = model.transform(transformation)
if not transformed.intersect_box(ray_near, ray_far):
logging.debug(_("Skipping %s for rebase search") % key)
continue
facet, facet_dist = transformed.intersect(ray_near, ray_far)
if facet is not None and facet_dist < best_dist:
best_match = key
best_facet = facet
best_dist = facet_dist
if best_match is not None:
logging.info(_("Rebasing %s") % best_match)
model = self.models[best_match]
newmodel = model.rebase(best_facet)
newmodel.offsets = list(model.offsets)
newmodel.rot = 0
newmodel.scale = model.scale
newmodel.filename = model.filename
newmodel.centeroffset = [-(newmodel.dims[1] + newmodel.dims[0]) / 2,
-(newmodel.dims[3] + newmodel.dims[2]) / 2,
0]
self.s.prepare_model(newmodel, 2)
self.models[best_match] = newmodel
wx.CallAfter(self.Refresh)
def done(self, event, cb):
if not os.path.exists("tempstl"):
os.mkdir("tempstl")
name = "tempstl/" + str(int(time.time()) % 10000) + ".stl"
self.export_to(name)
if cb is not None:
cb(name)
if self.destroy_on_done:
self.Destroy()
def load_file(self, filename):
if filename.lower().endswith(".stl"):
try:
self.load_stl(filename)
except:
dlg = wx.MessageDialog(self, _("Loading STL file failed"),
_("Error:") + traceback.format_exc(),
wx.OK)
dlg.ShowModal()
logging.error(_("Loading STL file failed:") +
"\n" + traceback.format_exc())
elif filename.lower().endswith(".scad"):
try:
self.load_scad(filename)
except:
dlg = wx.MessageDialog(self, _("Loading OpenSCAD file failed"),
_("Error:") + traceback.format_exc(),
wx.OK)
dlg.ShowModal()
logging.error(_("Loading OpenSCAD file failed:") +
"\n" + traceback.format_exc())
def load_scad(self, name):
with open(name) as lf:
s = [i.replace("\n", "").replace("\r", "").replace(";", "") for i in lf if "stl" in i]
for i in s:
parts = i.split()
for part in parts:
if 'translate' in part:
translate_list = evalme(part)
for part in parts:
if 'rotate' in part:
rotate_list = evalme(part)
for part in parts:
if 'import' in part:
stl_file = evalme(part)
newname = os.path.split(stl_file.lower())[1]
c = 1
while newname in self.models:
newname = os.path.split(stl_file.lower())[1]
newname = newname + "(%d)" % c
c += 1
stl_path = os.path.join(os.path.split(name)[0:len(os.path.split(stl_file)) - 1])
stl_full_path = os.path.join(stl_path[0], str(stl_file))
self.load_stl_into_model(stl_full_path, stl_file, translate_list, rotate_list[2])
def load_stl(self, name):
if not os.path.exists(name):
logging.error(_("Couldn't load non-existing file %s") % name)
return
path = os.path.split(name)[0]
self.basedir = path
if name.lower().endswith(".stl"):
for model in self.models.values():
if model.filename == name:
newmodel = copy(model)
newmodel.offsets = list(model.offsets)
newmodel.rot = model.rot
newmodel.scale = list(model.scale)
self.add_model(name, newmodel)
self.s.prepare_model(newmodel, 2)
break
else:
# Filter out the path, just show the STL filename.
self.load_stl_into_model(name, name)
wx.CallAfter(self.Refresh)
def load_stl_into_model(self, path, name, offset = None, rotation = 0, scale = [1.0, 1.0, 1.0]):
model = stltool.stl(path)
if offset is None:
offset = [self.build_dimensions[3], self.build_dimensions[4], 0]
model.offsets = list(offset)
model.rot = rotation
model.scale = list(scale)
model.filename = name
self.add_model(name, model)
model.centeroffset = [-(model.dims[1] + model.dims[0]) / 2,
-(model.dims[3] + model.dims[2]) / 2,
0]
self.s.prepare_model(model, 2)
def export_to(self, name):
with open(name.replace(".", "_") + ".scad", "w") as sf:
facets = []
for model in self.models.values():
r = model.rot
o = model.offsets
co = model.centeroffset
sf.write("translate([%s, %s, %s])"
"rotate([0, 0, %s])"
"translate([%s, %s, %s])"
"import(\"%s\");\n" % (o[0], o[1], o[2],
r,
co[0], co[1], co[2],
model.filename))
model = model.transform(transformation_matrix(model))
facets += model.facets
stltool.emitstl(name, facets, "plater_export")
logging.info(_("Wrote plate to %s") % name)
def autoplate(self, event = None):
if self.simarrange_path:
try:
self.autoplate_simarrange()
except Exception as e:
logging.warning(_("Failed to use simarrange for plating, "
"falling back to the standard method. "
"The error was: ") + e)
super().autoplate()
else:
super().autoplate()
def autoplate_simarrange(self):
logging.info(_("Autoplating using simarrange"))
models = dict(self.models)
files = [model.filename for model in models.values()]
command = [self.simarrange_path, "--dryrun",
"-m", # Pack around center
"-x", str(int(self.build_dimensions[0])),
"-y", str(int(self.build_dimensions[1]))] + files
p = subprocess.Popen(command, stdout = subprocess.PIPE, universal_newlines = True)
pos_regexp = re.compile("File: (.*) minx: ([0-9]+), miny: ([0-9]+), minrot: ([0-9]+)")
for line in p.stdout:
line = line.rstrip()
if "Generating plate" in line:
plateid = int(line.split()[-1])
if plateid > 0:
logging.error(_("Plate full, please remove some objects"))
break
if "File:" in line:
bits = pos_regexp.match(line).groups()
filename = bits[0]
x = float(bits[1])
y = float(bits[2])
rot = -float(bits[3])
for name, model in list(models.items()):
# FIXME: not sure this is going to work superwell with utf8
if model.filename == filename:
model.offsets[0] = x + self.build_dimensions[3]
model.offsets[1] = y + self.build_dimensions[4]
model.rot = rot
del models[name]
break
if p.wait() != 0:
raise RuntimeError(_("simarrange failed"))
StlPlater = make_plater(StlPlaterPanel)
| 19,236 | Python | .py | 458 | 29.700873 | 137 | 0.534294 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,365 | stlview.py | kliment_Printrun/printrun/stlview.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
import time
import numpy
import pyglet
pyglet.options['debug_gl'] = True
from pyglet.gl import GL_AMBIENT_AND_DIFFUSE, glBegin, glClearColor, \
glColor3f, GL_CULL_FACE, GL_DEPTH_TEST, GL_DIFFUSE, GL_EMISSION, \
glEnable, glEnd, GL_FILL, GLfloat, GL_FRONT_AND_BACK, GL_LIGHT0, \
GL_LIGHT1, glLightfv, GL_LIGHTING, GL_LINE, glMaterialf, glMaterialfv, \
glMultMatrixd, glNormal3f, glPolygonMode, glPopMatrix, GL_POSITION, \
glPushMatrix, glRotatef, glScalef, glShadeModel, GL_SHININESS, \
GL_SMOOTH, GL_SPECULAR, glTranslatef, GL_TRIANGLES, glVertex3f, \
glGetDoublev, GL_MODELVIEW_MATRIX, GLdouble, glClearDepth, glDepthFunc, \
GL_LEQUAL, GL_BLEND, glBlendFunc, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, \
GL_LINE_LOOP, glGetFloatv, GL_LINE_WIDTH, glLineWidth, glDisable, \
GL_LINE_SMOOTH
from pyglet import gl
from .gl.panel import wxGLPanel
from .gl.trackball import build_rotmatrix
from .gl.libtatlin import actors
def vec(*args):
return (GLfloat * len(args))(*args)
class stlview:
def __init__(self, facets, batch):
# Create the vertex and normal arrays.
vertices = []
normals = []
for i in facets:
for j in i[1]:
vertices.extend(j)
normals.extend(i[0])
# Create a list of triangle indices.
indices = list(range(3 * len(facets))) # [[3*i, 3*i+1, 3*i+2] for i in xrange(len(facets))]
self.vertex_list = batch.add_indexed(len(vertices) // 3,
GL_TRIANGLES,
None, # group,
indices,
('v3f/static', vertices),
('n3f/static', normals))
def delete(self):
self.vertex_list.delete()
class StlViewPanel(wxGLPanel):
do_lights = False
def __init__(self, parent, size,
build_dimensions = None, circular = False,
antialias_samples = 0,
grid = (1, 10), perspective=False):
if perspective:
self.orthographic=False
super().__init__(parent, wx.DefaultPosition, size, 0,
antialias_samples = antialias_samples)
self.batches = []
self.rot = 0
self.canvas.Bind(wx.EVT_MOUSE_EVENTS, self.move)
self.canvas.Bind(wx.EVT_MOUSEWHEEL, self.wheel)
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.double_click)
self.initialized = True
self.parent = parent
self.initpos = None
if build_dimensions:
self.build_dimensions = build_dimensions
else:
self.build_dimensions = [200, 200, 100, 0, 0, 0]
self.platform = actors.Platform(self.build_dimensions,
circular = circular,
grid = grid)
self.dist = max(self.build_dimensions[0], self.build_dimensions[1])
self.basequat = [0, 0, 0, 1]
wx.CallAfter(self.forceresize) #why needed
self.mousepos = (0, 0)
def OnReshape(self):
self.mview_initialized = False
super(StlViewPanel, self).OnReshape()
# ==========================================================================
# GLFrame OpenGL Event Handlers
# ==========================================================================
def OnInitGL(self, call_reshape = True):
'''Initialize OpenGL for use in the window.'''
if self.GLinitialized:
return
self.GLinitialized = True
# create a pyglet context for this panel
self.pygletcontext = gl.Context(gl.current_context)
self.pygletcontext.canvas = self
self.pygletcontext.set_current()
# normal gl init
glClearColor(0, 0, 0, 1)
glColor3f(1, 0, 0)
glEnable(GL_DEPTH_TEST)
glClearDepth(1.0)
glDepthFunc(GL_LEQUAL)
glEnable(GL_CULL_FACE)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Uncomment this line for a wireframe view
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# Simple light setup. On Windows GL_LIGHT0 is enabled by default,
# but this is not the case on Linux or Mac, so remember to always
# include it.
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
glLightfv(GL_LIGHT0, GL_POSITION, vec(.5, .5, 1, 0))
glLightfv(GL_LIGHT0, GL_SPECULAR, vec(.5, .5, 1, 1))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(1, 1, 1, 1))
glLightfv(GL_LIGHT1, GL_POSITION, vec(1, 0, .5, 0))
glLightfv(GL_LIGHT1, GL_DIFFUSE, vec(.5, .5, .5, 1))
glLightfv(GL_LIGHT1, GL_SPECULAR, vec(1, 1, 1, 1))
glShadeModel(GL_SMOOTH)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.5, 0, 0.3, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
glMaterialfv(GL_FRONT_AND_BACK, GL_EMISSION, vec(0, 0.1, 0, 0.9))
if call_reshape:
self.OnReshape()
if hasattr(self.parent, "filenames") and self.parent.filenames:
for filename in self.parent.filenames:
self.parent.load_file(filename)
self.parent.autoplate()
if hasattr(self.parent, "loadcb"):
self.parent.loadcb()
self.parent.filenames = None
def double_click(self, event):
if hasattr(self.parent, "clickcb") and self.parent.clickcb:
self.parent.clickcb(event)
def forceresize(self):
#print('forceresize')
x, y = self.GetClientSize()
#TODO: probably not needed
self.SetClientSize((x, y+1))
self.SetClientSize((x, y))
self.initialized = False
def move(self, event):
"""react to mouse actions:
no mouse: show red mousedrop
LMB: move active object,
with shift rotate viewport
RMB: nothing
with shift move viewport
"""
self.mousepos = event.GetPosition()
if event.Dragging():
if event.LeftIsDown():
self.handle_rotation(event)
elif event.RightIsDown():
self.handle_translation(event)
self.Refresh(False)
elif event.ButtonUp(wx.MOUSE_BTN_LEFT) or \
event.ButtonUp(wx.MOUSE_BTN_RIGHT):
self.initpos = None
event.Skip()
def handle_wheel(self, event):
delta = event.GetWheelRotation()
factor = 1.05
x, y = event.GetPosition()
x, y, _ = self.mouse_to_3d(x, y, local_transform = True)
if delta > 0:
self.zoom(factor, (x, y))
else:
self.zoom(1 / factor, (x, y))
def wheel(self, event):
"""react to mouse wheel actions:
rotate object
with shift zoom viewport
"""
self.handle_wheel(event)
wx.CallAfter(self.Refresh)
def keypress(self, event):
"""gets keypress events and moves/rotates active shape"""
keycode = event.GetKeyCode()
step = 5
angle = 18
if event.ControlDown():
step = 1
angle = 1
# h
if keycode == 72:
self.parent.move_shape((-step, 0))
# l
if keycode == 76:
self.parent.move_shape((step, 0))
# j
if keycode == 75:
self.parent.move_shape((0, step))
# k
if keycode == 74:
self.parent.move_shape((0, -step))
# [
if keycode == 91:
self.parent.rotate_shape(-angle)
# ]
if keycode == 93:
self.parent.rotate_shape(angle)
event.Skip()
wx.CallAfter(self.Refresh)
def anim(self, obj):
g = 50 * 9.8
v = 20
dt = 0.05
basepos = obj.offsets[2]
obj.offsets[2] += obj.animoffset
while obj.offsets[2] > -1:
time.sleep(dt)
obj.offsets[2] -= v * dt
v += g * dt
if obj.offsets[2] < 0:
obj.scale[2] *= 1 - 3 * dt
# return
v = v / 4
while obj.offsets[2] < basepos:
time.sleep(dt)
obj.offsets[2] += v * dt
v -= g * dt
obj.scale[2] *= 1 + 5 * dt
obj.scale[2] = 1.0
def create_objects(self):
'''create opengl objects when opengl is initialized'''
if not self.platform.initialized:
self.platform.init()
self.initialized = 1
#TODO: this probably creates constant redraw
# create_objects is called during OnDraw, remove
wx.CallAfter(self.Refresh)
def prepare_model(self, m, scale):
batch = pyglet.graphics.Batch()
stlview(m.facets, batch = batch)
m.batch = batch
# m.animoffset = 300
# threading.Thread(target = self.anim, args = (m, )).start()
wx.CallAfter(self.Refresh)
def update_object_resize(self):
'''called when the window receives only if opengl is initialized'''
pass
def draw_objects(self):
'''called in the middle of ondraw after the buffer has been cleared'''
self.create_objects()
glPushMatrix()
glTranslatef(0, 0, -self.dist)
glMultMatrixd(build_rotmatrix(self.basequat)) # Rotate according to trackball
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.2, 0.2, 0.2, 1))
glTranslatef(- self.build_dimensions[3] - self.platform.width / 2,
- self.build_dimensions[4] - self.platform.depth / 2, 0) # Move origin to bottom left of platform
# Draw platform
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glDisable(GL_LIGHTING)
self.platform.draw()
glEnable(GL_LIGHTING)
# Draw mouse
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
inter = self.mouse_to_plane(self.mousepos[0], self.mousepos[1],
plane_normal = (0, 0, 1), plane_offset = 0,
local_transform = False)
if inter is not None:
glPushMatrix()
glTranslatef(inter[0], inter[1], inter[2])
glBegin(GL_TRIANGLES)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(1, 0, 0, 1))
glNormal3f(0, 0, 1)
glVertex3f(2, 2, 0)
glVertex3f(-2, 2, 0)
glVertex3f(-2, -2, 0)
glVertex3f(2, -2, 0)
glVertex3f(2, 2, 0)
glVertex3f(-2, -2, 0)
glEnd()
glPopMatrix()
# Draw objects
glDisable(GL_CULL_FACE)
glPushMatrix()
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.3, 0.7, 0.5, 1))
for i in self.parent.models:
model = self.parent.models[i]
glPushMatrix()
glTranslatef(*(model.offsets))
glRotatef(model.rot, 0.0, 0.0, 1.0)
glTranslatef(*(model.centeroffset))
glScalef(*model.scale)
model.batch.draw()
glPopMatrix()
glPopMatrix()
glEnable(GL_CULL_FACE)
# Draw cutting plane
if self.parent.cutting:
# FIXME: make this a proper Actor
axis = self.parent.cutting_axis
fixed_dist = self.parent.cutting_dist
dist, plane_width, plane_height = self.get_cutting_plane(axis, fixed_dist)
if dist is not None:
glPushMatrix()
if axis == "x":
glRotatef(90, 0, 1, 0)
glRotatef(90, 0, 0, 1)
glTranslatef(0, 0, dist)
elif axis == "y":
glRotatef(90, 1, 0, 0)
glTranslatef(0, 0, -dist)
elif axis == "z":
glTranslatef(0, 0, dist)
glDisable(GL_CULL_FACE)
glBegin(GL_TRIANGLES)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0, 0.9, 0.15, 0.3))
glNormal3f(0, 0, self.parent.cutting_direction)
glVertex3f(plane_width, plane_height, 0)
glVertex3f(0, plane_height, 0)
glVertex3f(0, 0, 0)
glVertex3f(plane_width, 0, 0)
glVertex3f(plane_width, plane_height, 0)
glVertex3f(0, 0, 0)
glEnd()
glEnable(GL_CULL_FACE)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_LINE_SMOOTH)
orig_linewidth = (GLfloat)()
glGetFloatv(GL_LINE_WIDTH, orig_linewidth)
glLineWidth(4.0)
glBegin(GL_LINE_LOOP)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0, 0.8, 0.15, 1))
glVertex3f(0, 0, 0)
glVertex3f(0, plane_height, 0)
glVertex3f(plane_width, plane_height, 0)
glVertex3f(plane_width, 0, 0)
glEnd()
glLineWidth(orig_linewidth)
glDisable(GL_LINE_SMOOTH)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glPopMatrix()
glPopMatrix()
# ==========================================================================
# Utils
# ==========================================================================
def get_modelview_mat(self, local_transform):
mvmat = (GLdouble * 16)()
if local_transform:
glPushMatrix()
# Rotate according to trackball
glTranslatef(0, 0, -self.dist)
glMultMatrixd(build_rotmatrix(self.basequat)) # Rotate according to trackball
glTranslatef(- self.build_dimensions[3] - self.platform.width / 2,
- self.build_dimensions[4] - self.platform.depth / 2, 0) # Move origin to bottom left of platform
glGetDoublev(GL_MODELVIEW_MATRIX, mvmat)
glPopMatrix()
else:
glGetDoublev(GL_MODELVIEW_MATRIX, mvmat)
return mvmat
def get_cutting_plane(self, cutting_axis, fixed_dist, local_transform = False):
cutting_plane_sizes = {"x": (self.platform.depth, self.platform.height),
"y": (self.platform.width, self.platform.height),
"z": (self.platform.width, self.platform.depth)}
plane_width, plane_height = cutting_plane_sizes[cutting_axis]
if fixed_dist is not None:
return fixed_dist, plane_width, plane_height
ref_sizes = {"x": self.platform.width,
"y": self.platform.depth,
"z": self.platform.height,
}
ref_planes = {"x": (0, 0, 1),
"y": (0, 0, 1),
"z": (0, 1, 0)
}
ref_offsets = {"x": 0,
"y": 0,
"z": - self.platform.depth / 2
}
translate_axis = {"x": 0,
"y": 1,
"z": 2
}
fallback_ref_planes = {"x": (0, 1, 0),
"y": (1, 0, 0),
"z": (1, 0, 0)
}
fallback_ref_offsets = {"x": - self.platform.height / 2,
"y": - self.platform.width / 2,
"z": - self.platform.width / 2,
}
ref_size = ref_sizes[cutting_axis]
ref_plane = ref_planes[cutting_axis]
ref_offset = ref_offsets[cutting_axis]
inter = self.mouse_to_plane(self.mousepos[0], self.mousepos[1],
plane_normal = ref_plane,
plane_offset = ref_offset,
local_transform = local_transform)
max_size = max((self.platform.width,
self.platform.depth,
self.platform.height))
dist = None
if inter is not None and numpy.fabs(inter).max() + max_size / 2 < 2 * max_size:
dist = inter[translate_axis[cutting_axis]]
if dist is None or dist < -0.5 * ref_size or dist > 1.5 * ref_size:
ref_plane = fallback_ref_planes[cutting_axis]
ref_offset = fallback_ref_offsets[cutting_axis]
inter = self.mouse_to_plane(self.mousepos[0], self.mousepos[1],
plane_normal = ref_plane,
plane_offset = ref_offset,
local_transform = False)
if inter is not None and numpy.fabs(inter).max() + max_size / 2 < 2 * max_size:
dist = inter[translate_axis[cutting_axis]]
if dist is not None:
dist = min(1.5 * ref_size, max(-0.5 * ref_size, dist))
return dist, plane_width, plane_height
def main():
app = wx.App(redirect = False)
frame = wx.Frame(None, -1, "GL Window", size = (400, 400))
StlViewPanel(frame)
frame.Show(True)
app.MainLoop()
app.Destroy()
if __name__ == "__main__":
main()
| 18,146 | Python | .py | 424 | 30.688679 | 123 | 0.538414 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,366 | pronsole.py | kliment_Printrun/printrun/pronsole.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import cmd
import glob
import os
import platform
import time
import threading
import sys
import shutil
import subprocess
import codecs
import argparse
import locale
import logging
import traceback
import re
from platformdirs import user_cache_dir, user_config_dir, user_data_dir
from serial import SerialException
from . import printcore
from .utils import install_locale, run_command, get_command_output, \
format_time, format_duration, RemainingTimeEstimator, \
get_home_pos, parse_build_dimensions, parse_temperature_report, \
setup_logging
install_locale('pronterface')
from .settings import Settings, BuildDimensionsSetting
from .power import powerset_print_start, powerset_print_stop
from printrun import gcoder
from .rpc import ProntRPC
from printrun.spoolmanager import spoolmanager
if os.name == "nt":
try:
import winreg
except:
pass
READLINE = True
try:
import readline
if os.name == "nt": # config pyreadline on Windows
readline.rl.mode.show_all_if_ambiguous = "on"
except ImportError:
READLINE = False # neither readline module is available
tempreading_exp = re.compile('\\bT\d*:')
REPORT_NONE = 0
REPORT_POS = 1
REPORT_TEMP = 2
REPORT_MANUAL = 4
DEG = "\N{DEGREE SIGN}"
class Status:
def __init__(self):
self.extruder_temp = 0
self.extruder_temp_target = 0
self.bed_temp = 0
self.bed_temp_target = 0
self.print_job = None
self.print_job_progress = 1.0
def update_tempreading(self, tempstr):
temps = parse_temperature_report(tempstr)
if "T0" in temps and temps["T0"][0]: hotend_temp = float(temps["T0"][0])
elif "T" in temps and temps["T"][0]: hotend_temp = float(temps["T"][0])
else: hotend_temp = None
if "T0" in temps and temps["T0"][1]: hotend_setpoint = float(temps["T0"][1])
elif "T" in temps and temps["T"][1]: hotend_setpoint = float(temps["T"][1])
else: hotend_setpoint = None
if hotend_temp is not None:
self.extruder_temp = hotend_temp
if hotend_setpoint is not None:
self.extruder_temp_target = hotend_setpoint
bed_temp = float(temps["B"][0]) if "B" in temps and temps["B"][0] else None
if bed_temp is not None:
self.bed_temp = bed_temp
setpoint = temps["B"][1]
if setpoint:
self.bed_temp_target = float(setpoint)
@property
def bed_enabled(self):
return self.bed_temp != 0
@property
def extruder_enabled(self):
return self.extruder_temp != 0
class RGSGCoder():
"""Bare alternative to gcoder.LightGCode which does not preload all lines in memory,
but still allows run_gcode_script (hence the RGS) to be processed by do_print (checksum,threading,ok waiting)"""
def __init__(self, line):
self.lines = True
self.filament_length = 0.
self.filament_length_multi = [0]
self.proc = run_command(line, {"$s": 'str(self.filename)'}, stdout = subprocess.PIPE, universal_newlines = True)
lr = gcoder.Layer([])
lr.duration = 0.
self.all_layers = [lr]
self.read() #empty layer causes division by zero during progress calculation
def read(self):
ln = self.proc.stdout.readline()
if not ln:
self.proc.stdout.close()
return None
ln = ln.strip()
if not ln:
return None
pyLn = gcoder.PyLightLine(ln)
self.all_layers[0].append(pyLn)
return pyLn
def has_index(self, i):
while i >= len(self.all_layers[0]) and not self.proc.stdout.closed:
self.read()
return i < len(self.all_layers[0])
def __len__(self):
return len(self.all_layers[0])
def idxs(self, i):
return 0, i #layer, line
class pronsole(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
if not READLINE:
self.completekey = None
self.status = Status()
self.dynamic_temp = False
self.compute_eta = None
self.statuscheck = False
self.status_thread = None
self.monitor_interval = 3
self.p = printcore.printcore()
self.p.recvcb = self.recvcb
self.p.startcb = self.startcb
self.p.endcb = self.endcb
self.p.layerchangecb = self.layer_change_cb
self.p.process_host_command = self.process_host_command
self.recvlisteners = []
self.in_macro = False
self.p.onlinecb = self.online
self.p.errorcb = self.logError
self.fgcode = None
self.filename = None
self.rpc_server = None
self.curlayer = 0
self.sdlisting = 0
self.sdlisting_echo = 0
self.sdfiles = []
self.paused = False
self.sdprinting = 0
self.uploading = 0 # Unused, just for pronterface generalization
self.temps = {"PLA": "185", "ABS": "230", "Off": "0"}
self.bedtemps = {"PLA": "60", "ABS": "110", "Off": "0"}
self.percentdone = 0
self.posreport = ""
self.tempreadings = ""
self.userm114 = 0
self.userm105 = 0
self.m105_waitcycles = 0
self.macros = {}
self.rc_loaded = False
self.processing_rc = False
self.processing_args = False
self.settings = Settings(self)
self.settings._add(BuildDimensionsSetting("build_dimensions", "200x200x100+0+0+0+0+0+0", _("Build Dimensions:"), _("Dimensions of Build Platform\n & optional offset of origin\n & optional switch position\n\nExamples:\n XXXxYYY\n XXX,YYY,ZZZ\n XXXxYYYxZZZ+OffX+OffY+OffZ\nXXXxYYYxZZZ+OffX+OffY+OffZ+HomeX+HomeY+HomeZ"), "Printer"), self.update_build_dimensions)
self.settings._port_list = self.scanserial
self.update_build_dimensions(None, self.settings.build_dimensions)
self.update_tcp_streaming_mode(None, self.settings.tcp_streaming_mode)
self.monitoring = 0
self.starttime = 0
self.extra_print_time = 0
self.silent = False
self.commandprefixes = 'MGTD$'
self.promptstrs = {"offline": "%(bold)soffline>%(normal)s ",
"fallback": "%(bold)s%(red)s%(port)s%(white)s PC>%(normal)s ",
"macro": "%(bold)s..>%(normal)s ",
"online": "%(bold)s%(green)s%(port)s%(white)s %(extruder_temp_fancy)s%(progress_fancy)s>%(normal)s "}
self.spool_manager = spoolmanager.SpoolManager(self)
self.current_tool = 0 # Keep track of the extruder being used
self.cache_dir = os.path.join(user_cache_dir("Printrun"))
self.history_file = os.path.join(self.cache_dir,"history")
self.config_dir = os.path.join(user_config_dir("Printrun"))
self.data_dir = os.path.join(user_data_dir("Printrun"))
self.lineignorepattern=re.compile("ok ?\d*$|.*busy: ?processing|.*busy: ?heating|.*Active Extruder: ?\d*$")
# --------------------------------------------------------------
# General console handling
# --------------------------------------------------------------
def postloop(self):
self.p.disconnect()
cmd.Cmd.postloop(self)
def preloop(self):
self.log(_("Welcome to the printer console! Type \"help\" for a list of available commands."))
self.prompt = self.promptf()
cmd.Cmd.preloop(self)
# We replace this function, defined in cmd.py .
# It's default behavior with regards to Ctr-C
# and Ctr-D doesn't make much sense...
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ": complete")
history = (self.history_file)
if not os.path.exists(history):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
history = os.path.join(self.cache_dir, "history")
if os.path.exists(history):
readline.read_history_file(history)
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro) + "\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except EOFError:
self.log("")
self.do_exit("")
except KeyboardInterrupt:
self.log("")
line = ""
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = ""
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
readline.set_completer(self.old_completer)
readline.write_history_file(self.history_file)
def confirm(self):
y_or_n = input("y/n: ")
if y_or_n == "y":
return True
elif y_or_n != "n":
return self.confirm()
return False
def log(self, *msg):
msg = "".join(str(i) for i in msg)
logging.info(msg)
def logError(self, *msg):
msg = "".join(str(i) for i in msg)
logging.error(msg)
if not self.settings.error_command:
return
output = get_command_output(self.settings.error_command, {"$m": msg})
if output:
self.log(_("Error command output:"))
self.log(output.rstrip())
def promptf(self):
"""A function to generate prompts so that we can do dynamic prompts. """
if self.in_macro:
promptstr = self.promptstrs["macro"]
elif not self.p.online:
promptstr = self.promptstrs["offline"]
elif self.status.extruder_enabled:
promptstr = self.promptstrs["online"]
else:
promptstr = self.promptstrs["fallback"]
if "%" not in promptstr:
return promptstr
else:
specials = {}
specials["extruder_temp"] = str(int(self.status.extruder_temp))
specials["extruder_temp_target"] = str(int(self.status.extruder_temp_target))
# port: /dev/tty* | netaddress:port
specials["port"] = self.settings.port.replace('/dev/', '')
if self.status.extruder_temp_target == 0:
specials["extruder_temp_fancy"] = str(int(self.status.extruder_temp)) + DEG
else:
specials["extruder_temp_fancy"] = "%s%s/%s%s" % (str(int(self.status.extruder_temp)), DEG, str(int(self.status.extruder_temp_target)), DEG)
if self.p.printing:
progress = int(1000 * float(self.p.queueindex) / len(self.p.mainqueue)) / 10
elif self.sdprinting:
progress = self.percentdone
else:
progress = 0.0
specials["progress"] = str(progress)
if self.p.printing or self.sdprinting:
specials["progress_fancy"] = " " + str(round(progress, 2)) + "%"
else:
specials["progress_fancy"] = ""
specials["red"] = "\033[31m"
specials["green"] = "\033[32m"
specials["white"] = "\033[37m"
specials["bold"] = "\033[01m"
specials["normal"] = "\033[00m"
return promptstr % specials
def postcmd(self, stop, line):
""" A hook we override to generate prompts after
each command is executed, for the next prompt.
We also use it to send M105 commands so that
temp info gets updated for the prompt."""
if self.p.online and self.dynamic_temp:
self.p.send_now("M105")
self.prompt = self.promptf()
return stop
def kill(self):
self.statuscheck = False
if self.status_thread:
self.status_thread.join()
self.status_thread = None
if self.rpc_server is not None:
self.rpc_server.shutdown()
def write_prompt(self):
sys.stdout.write(self.promptf())
sys.stdout.flush()
def help_help(self, l = ""):
self.do_help("")
def do_gcodes(self, l = ""):
self.help_gcodes()
def help_gcodes(self):
self.log(_("Gcodes are passed through to the printer as they are"))
def precmd(self, line):
if line.upper().startswith("M114"):
self.userm114 += 1
elif line.upper().startswith("M105"):
self.userm105 += 1
return line
def help_shell(self):
self.log(_("Executes a python command. Example:"))
self.log("! os.listdir('.')")
def do_shell(self, l):
exec(l)
def emptyline(self):
"""Called when an empty line is entered - do not remove"""
pass
def default(self, l):
if l[0].upper() in self.commandprefixes.upper():
if self.p and self.p.online:
if not self.p.loud:
self.log(_("SENDING:") + l.upper())
self.p.send_now(l.upper())
else:
self.logError(_("Printer is not online."))
return
elif l[0] == "@":
if self.p and self.p.online:
if not self.p.loud:
self.log(_("SENDING:") + l[1:])
self.p.send_now(l[1:])
else:
self.logError(_("Printer is not online."))
return
else:
cmd.Cmd.default(self, l)
def do_exit(self, l):
if self.p.printing and l != "force":
self.log(_("Are you sure you want to exit while printing?\n\
Disables all heaters upon exit."))
if not self.confirm():
return
if self.status.extruder_temp_target != 0:
self.log(_("Setting extruder temp to 0"))
self.p.send_now("M104 S0.0")
if self.status.bed_enabled:
if self.status.bed_temp_target != 0:
self.log(_("Setting bed temp to 0"))
self.p.send_now("M140 S0.0")
self.log(_("Disconnecting from printer..."))
self.log(_("Exiting program. Goodbye!"))
self.p.disconnect()
self.kill()
sys.exit()
def help_exit(self):
self.log(_("Disconnects from the printer and exits the program."))
# --------------------------------------------------------------
# Macro handling
# --------------------------------------------------------------
def complete_macro(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.macros.keys() if i.startswith(text)]
elif len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " "):
return [i for i in ["/D", "/S"] + self.completenames(text) if i.startswith(text)]
else:
return []
def hook_macro(self, l):
l = l.rstrip()
ls = l.lstrip()
ws = l[:len(l) - len(ls)] # just leading whitespace
if len(ws) == 0:
self.end_macro()
# pass the unprocessed line to regular command processor to not require empty line in .pronsolerc
return self.onecmd(l)
self.cur_macro_def += l + "\n"
def end_macro(self):
if "onecmd" in self.__dict__: del self.onecmd # remove override
self.in_macro = False
self.prompt = self.promptf()
if self.cur_macro_def != "":
self.macros[self.cur_macro_name] = self.cur_macro_def
macro = self.compile_macro(self.cur_macro_name, self.cur_macro_def)
setattr(self.__class__, "do_" + self.cur_macro_name, lambda self, largs, macro = macro: macro(self, *largs.split()))
setattr(self.__class__, "help_" + self.cur_macro_name, lambda self, macro_name = self.cur_macro_name: self.subhelp_macro(macro_name))
if not self.processing_rc:
self.log(_("Macro '") + self.cur_macro_name + _("' defined"))
# save it
if not self.processing_args:
macro_key = "macro " + self.cur_macro_name
macro_def = macro_key
if "\n" in self.cur_macro_def:
macro_def += "\n"
else:
macro_def += " "
macro_def += self.cur_macro_def
self.save_in_rc(macro_key, macro_def)
else:
self.logError(_("Empty macro - cancelled"))
del self.cur_macro_name, self.cur_macro_def
def compile_macro_line(self, line):
line = line.rstrip()
ls = line.lstrip()
ws = line[:len(line) - len(ls)] # just leading whitespace
if ls == "" or ls.startswith('#'): return "" # no code
if ls.startswith('!'):
return ws + ls[1:] + "\n" # python mode
else:
ls = ls.replace('"', '\\"') # need to escape double quotes
ret = ws + 'self.precmd("' + ls + '".format(*arg))\n' # parametric command mode
return ret + ws + 'self.onecmd("' + ls + '".format(*arg))\n'
def compile_macro(self, macro_name, macro_def):
if macro_def.strip() == "":
self.logError(_("Empty macro - cancelled"))
return
macro = None
namespace={}
pycode = "def macro(self,*arg):\n"
if "\n" not in macro_def.strip():
pycode += self.compile_macro_line(" " + macro_def.strip())
else:
lines = macro_def.split("\n")
for l in lines:
pycode += self.compile_macro_line(l)
exec(pycode,namespace)
try:
macro=namespace['macro']
except:
pass
return macro
def start_macro(self, macro_name, prev_definition = "", suppress_instructions = False):
if not self.processing_rc and not suppress_instructions:
self.logError(_("Enter macro using indented lines, end with empty line"))
self.cur_macro_name = macro_name
self.cur_macro_def = ""
self.onecmd = self.hook_macro # override onecmd temporarily
self.in_macro = False
self.prompt = self.promptf()
def delete_macro(self, macro_name):
if macro_name in self.macros.keys():
delattr(self.__class__, "do_" + macro_name)
del self.macros[macro_name]
self.log(_("Macro '") + macro_name + _("' removed"))
if not self.processing_rc and not self.processing_args:
self.save_in_rc("macro " + macro_name, "")
else:
self.logError(_("Macro '") + macro_name + _("' is not defined"))
def do_macro(self, args):
if args.strip() == "":
self.print_topics("User-defined macros", [str(k) for k in self.macros.keys()], 15, 80)
return
arglist = args.split(None, 1)
macro_name = arglist[0]
if macro_name not in self.macros and hasattr(self.__class__, "do_" + macro_name):
self.logError(_("Name '") + macro_name + _("' is being used by built-in command"))
return
if len(arglist) == 2:
macro_def = arglist[1]
if macro_def.lower() == "/d":
self.delete_macro(macro_name)
return
if macro_def.lower() == "/s":
self.subhelp_macro(macro_name)
return
self.cur_macro_def = macro_def
self.cur_macro_name = macro_name
self.end_macro()
return
if macro_name in self.macros:
self.start_macro(macro_name, self.macros[macro_name])
else:
self.start_macro(macro_name)
def help_macro(self):
self.log(_("Define single-line macro: macro <name> <definition>"))
self.log(_("Define multi-line macro: macro <name>"))
self.log(_("Enter macro definition in indented lines. Use {0} .. {N} to substitute macro arguments"))
self.log(_("Enter python code, prefixed with ! Use arg[0] .. arg[N] to substitute macro arguments"))
self.log(_("Delete macro: macro <name> /d"))
self.log(_("Show macro definition: macro <name> /s"))
self.log(_("'macro' without arguments displays list of defined macros"))
def subhelp_macro(self, macro_name):
if macro_name in self.macros.keys():
macro_def = self.macros[macro_name]
if "\n" in macro_def:
self.log(_("Macro '") + macro_name + _("' defined as:"))
self.log(self.macros[macro_name] + "----------------")
else:
self.log(_("Macro '") + macro_name + _("' defined as: '") + macro_def + "'")
else:
self.logError(_("Macro '") + macro_name + _("' is not defined"))
# --------------------------------------------------------------
# Configuration handling
# --------------------------------------------------------------
def set(self, var, str):
try:
t = type(getattr(self.settings, var))
value = self.settings._set(var, str)
if not self.processing_rc and not self.processing_args:
self.save_in_rc("set " + var, "set %s %s" % (var, value))
except AttributeError:
logging.debug(_("Unknown variable '%s'") % var)
except ValueError as ve:
if hasattr(ve, "from_validator"):
self.logError(_("Bad value %s for variable '%s': %s") % (str, var, ve.args[0]))
else:
self.logError(_("Bad value for variable '%s', expecting %s (%s)") % (var, repr(t)[1:-1], ve.args[0]))
def do_set(self, argl):
args = argl.split(None, 1)
if len(args) < 1:
for k in [kk for kk in dir(self.settings) if not kk.startswith("_")]:
self.log("%s = %s" % (k, str(getattr(self.settings, k))))
return
if len(args) < 2:
# Try getting the default value of the setting to check whether it
# actually exists
try:
getattr(self.settings, args[0])
except AttributeError:
logging.warning(_("Unknown variable '%s'") % args[0])
return
self.set(args[0], args[1])
def help_set(self):
self.log(_("Set variable: set <variable> <value>"))
self.log(_("Show variable: set <variable>"))
self.log(_("'set' without arguments displays all variables"))
def complete_set(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in dir(self.settings) if not i.startswith("_") and i.startswith(text)]
elif len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " "):
return [i for i in self.settings._tabcomplete(line.split()[1]) if i.startswith(text)]
else:
return []
def load_rc(self, rc_filename):
self.processing_rc = True
try:
rc = codecs.open(rc_filename, "r", "utf-8")
self.rc_filename = os.path.abspath(rc_filename)
for rc_cmd in rc:
if not rc_cmd.lstrip().startswith("#"):
logging.debug(rc_cmd.rstrip())
self.onecmd(rc_cmd)
rc.close()
if hasattr(self, "cur_macro_def"):
self.end_macro()
self.rc_loaded = True
finally:
self.processing_rc = False
def load_default_rc(self):
# Check if a configuration file exists in an "old" location,
# if not, use the "new" location provided by appdirs
for f in '~/.pronsolerc', '~/printrunconf.ini':
expanded = os.path.expanduser(f)
if os.path.exists(expanded):
config = expanded
break
else:
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir)
config_name = ('printrunconf.ini'
if platform.system() == 'Windows'
else 'pronsolerc')
config = os.path.join(self.config_dir, config_name)
logging.info('Loading config file ' + config)
# Load the default configuration file
try:
self.load_rc(config)
except FileNotFoundError:
# Make sure the filename is initialized,
# and create the file if it doesn't exist
self.rc_filename = config
open(self.rc_filename, 'a').close()
def save_in_rc(self, key, definition):
"""
Saves or updates macro or other definitions in .pronsolerc
key is prefix that determines what is being defined/updated (e.g. 'macro foo')
definition is the full definition (that is written to file). (e.g. 'macro foo move x 10')
Set key as empty string to just add (and not overwrite)
Set definition as empty string to remove it from .pronsolerc
To delete line from .pronsolerc, set key as the line contents, and definition as empty string
Only first definition with given key is overwritten.
Updates are made in the same file position.
Additions are made to the end of the file.
"""
rci, rco = None, None
if definition != "" and not definition.endswith("\n"):
definition += "\n"
try:
written = False
if os.path.exists(self.rc_filename):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
configcache = os.path.join(self.cache_dir, os.path.basename(self.rc_filename))
configcachebak = configcache + "~bak"
configcachenew = configcache + "~new"
shutil.copy(self.rc_filename, configcachebak)
rci = codecs.open(configcachebak, "r", "utf-8")
rco = codecs.open(configcachenew, "w", "utf-8")
if rci is not None:
overwriting = False
for rc_cmd in rci:
l = rc_cmd.rstrip()
ls = l.lstrip()
ws = l[:len(l) - len(ls)] # just leading whitespace
if overwriting and len(ws) == 0:
overwriting = False
if not written and key != "" and rc_cmd.startswith(key) and (rc_cmd + "\n")[len(key)].isspace():
overwriting = True
written = True
rco.write(definition)
if not overwriting:
rco.write(rc_cmd)
if not rc_cmd.endswith("\n"): rco.write("\n")
if not written:
rco.write(definition)
if rci is not None:
rci.close()
rco.close()
shutil.move(configcachenew, self.rc_filename)
# if definition != "":
# self.log("Saved '"+key+"' to '"+self.rc_filename+"'")
# else:
# self.log("Removed '"+key+"' from '"+self.rc_filename+"'")
except Exception as e:
self.logError(_("Saving failed for "), key + ":", str(e))
finally:
del rci, rco
# --------------------------------------------------------------
# Configuration update callbacks
# --------------------------------------------------------------
def update_build_dimensions(self, param, value):
self.build_dimensions_list = parse_build_dimensions(value)
self.p.analyzer.home_pos = get_home_pos(self.build_dimensions_list)
def update_tcp_streaming_mode(self, param, value):
self.p.tcp_streaming_mode = self.settings.tcp_streaming_mode
def update_rpc_server(self, param, value):
if value:
if self.rpc_server is None:
self.rpc_server = ProntRPC(self)
else:
if self.rpc_server is not None:
self.rpc_server.shutdown()
self.rpc_server = None
# --------------------------------------------------------------
# Command line options handling
# --------------------------------------------------------------
def add_cmdline_arguments(self, parser):
parser.add_argument('-v', '--verbose', help = _("increase verbosity"), action = "store_true")
parser.add_argument('-c', '--conf', '--config', help = _("load this file on startup instead of .pronsolerc ; you may chain config files, if so settings auto-save will use the last specified file"), action = "append", default = [])
parser.add_argument('-e', '--execute', help = _("executes command after configuration/.pronsolerc is loaded ; macros/settings from these commands are not autosaved"), action = "append", default = [])
parser.add_argument('filename', nargs='?', help = _("file to load"))
def process_cmdline_arguments(self, args):
if args.verbose:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
for config in args.conf:
try:
self.load_rc(config)
except EnvironmentError as err:
print((_("ERROR: Unable to load configuration file: %s") %
str(err)[10:]))
sys.exit(1)
if not self.rc_loaded:
self.load_default_rc()
self.processing_args = True
for command in args.execute:
self.onecmd(command)
self.processing_args = False
self.update_rpc_server(None, self.settings.rpc_server)
if args.filename:
self.cmdline_filename_callback(args.filename)
def cmdline_filename_callback(self, filename):
self.do_load(filename)
def parse_cmdline(self, args):
parser = argparse.ArgumentParser(description = 'Printrun 3D printer interface')
self.add_cmdline_arguments(parser)
args = [arg for arg in args if not arg.startswith("-psn")]
args = parser.parse_args(args = args)
self.process_cmdline_arguments(args)
setup_logging(sys.stdout, self.settings.log_path, True)
# --------------------------------------------------------------
# Printer connection handling
# --------------------------------------------------------------
def connect_to_printer(self, port, baud, dtr):
try:
self.p.connect(port, baud, dtr)
except SerialException as e:
# Currently, there is no errno, but it should be there in the future
if e.errno == 2:
self.logError(_("Error: You are trying to connect to a non-existing port."))
elif e.errno == 8:
self.logError(_("Error: You don't have permission to open %s.") % port)
self.logError(_("You might need to add yourself to the dialout group."))
else:
self.logError(traceback.format_exc())
# Kill the scope anyway
return False
except OSError as e:
if e.errno == 2:
self.logError(_("Error: You are trying to connect to a non-existing port."))
else:
self.logError(traceback.format_exc())
return False
self.statuscheck = True
self.status_thread = threading.Thread(target = self.statuschecker,
name = 'status thread')
self.status_thread.start()
return True
def do_connect(self, l):
a = l.split()
p = self.scanserial()
port = self.settings.port
if (port == "" or port not in p) and len(p) > 0:
port = p[0]
baud = self.settings.baudrate or 115200
if len(a) > 0:
port = a[0]
if len(a) > 1:
try:
baud = int(a[1])
except:
self.log(_("Bad baud value '") + a[1] + _("' ignored"))
if len(p) == 0 and not port:
self.log(_("No serial ports detected - please specify a port"))
return
if len(a) == 0:
self.log(_("No port specified - connecting to %s at %dbps") % (port, baud))
if port != self.settings.port:
self.settings.port = port
self.save_in_rc("set port", "set port %s" % port)
if baud != self.settings.baudrate:
self.settings.baudrate = baud
self.save_in_rc("set baudrate", "set baudrate %d" % baud)
self.connect_to_printer(port, baud, self.settings.dtr)
def help_connect(self):
self.log(_("Connect to printer"))
self.log(_("connect <port> <baudrate>"))
self.log(_("If port and baudrate are not specified, connects to first detected port at 115200bps"))
ports = self.scanserial()
if ports:
self.log(_("Available ports: "), " ".join(ports))
else:
self.log(_("No serial ports were automatically found."))
def complete_connect(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.scanserial() if i.startswith(text)]
elif len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " "):
return [i for i in ["2400", "9600", "19200", "38400", "57600", "115200", "250000"] if i.startswith(text)]
else:
return []
def scanserial(self):
"""scan for available ports. return a list of device names."""
baselist = []
if os.name == "nt":
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "HARDWARE\\DEVICEMAP\\SERIALCOMM")
i = 0
while(1):
baselist += [winreg.EnumValue(key, i)[1]]
i += 1
except:
pass
for g in ['/dev/ttyUSB*', '/dev/ttyACM*', "/dev/tty.*", "/dev/cu.*", "/dev/rfcomm*"]:
baselist += glob.glob(g)
if(sys.platform!="win32" and self.settings.devicepath):
baselist += glob.glob(self.settings.devicepath)
return [p for p in baselist if self._bluetoothSerialFilter(p)]
def _bluetoothSerialFilter(self, serial):
return not ("Bluetooth" in serial or "FireFly" in serial)
def online(self):
self.log("\r" + _("Printer is now online"))
self.write_prompt()
def do_disconnect(self, l):
self.p.disconnect()
def help_disconnect(self):
self.log(_("Disconnects from the printer"))
def do_block_until_online(self, l):
while not self.p.online:
time.sleep(0.1)
def help_block_until_online(self, l):
self.log(_("Blocks until printer is online"))
self.log(_("Warning: if something goes wrong, this can block pronsole forever"))
# --------------------------------------------------------------
# Printer status monitoring
# --------------------------------------------------------------
def statuschecker_inner(self, do_monitoring = True):
if self.p.online:
if self.p.writefailures >= 4:
self.logError(_("Disconnecting after 4 failed writes."))
self.status_thread = None
self.p.disconnect()
return
if do_monitoring:
if self.sdprinting and not self.paused:
self.p.send_now("M27")
if self.m105_waitcycles % 10 == 0:
self.p.send_now("M105")
self.m105_waitcycles += 1
cur_time = time.time()
wait_time = 0
while time.time() < cur_time + self.monitor_interval - 0.25:
if not self.statuscheck:
break
time.sleep(0.25)
# Safeguard: if system time changes and goes back in the past,
# we could get stuck almost forever
wait_time += 0.25
if wait_time > self.monitor_interval - 0.25:
break
# Always sleep at least a bit, if something goes wrong with the
# system time we'll avoid freezing the whole app this way
time.sleep(0.25)
def statuschecker(self):
while self.statuscheck:
self.statuschecker_inner()
# --------------------------------------------------------------
# File loading handling
# --------------------------------------------------------------
def do_load(self, filename):
self._do_load(filename)
def _do_load(self, filename):
if not filename:
self.logError(_("No file name given."))
return
self.log(_("Loading file: %s") % filename)
if not os.path.exists(filename):
self.logError(_("File not found!"))
return
self.load_gcode(filename)
self.log(_("Loaded %s, %d lines.") % (filename, len(self.fgcode)))
self.log(_("Estimated duration: %d layers, %s") % self.fgcode.estimate_duration())
def load_gcode(self, filename, layer_callback = None, gcode = None):
if gcode is None:
self.fgcode = gcoder.LightGCode(deferred = True)
else:
self.fgcode = gcode
self.fgcode.prepare(open(filename, "r", encoding="utf-8"),
get_home_pos(self.build_dimensions_list),
layer_callback = layer_callback)
self.fgcode.estimate_duration()
self.filename = filename
def complete_load(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.g*")]
else:
return glob.glob("*/") + glob.glob("*.g*")
def help_load(self):
self.log(_("Loads a gcode file (with tab-completion)"))
def do_slice(self, l):
l = l.split()
if len(l) == 0:
self.logError(_("No file name given."))
return
settings = 0
if l[0] == "set":
settings = 1
else:
self.log(_("Slicing file: %s") % l[0])
if not(os.path.exists(l[0])):
self.logError(_("File not found!"))
return
try:
if settings:
command = self.settings.slicecommandpath+self.settings.sliceoptscommand
self.log(_("Entering slicer settings: %s") % command)
run_command(command, blocking = True)
else:
command = self.settings.slicecommandpath+self.settings.slicecommand
stl_name = l[0]
gcode_name = stl_name.replace(".stl", "_export.gcode").replace(".STL", "_export.gcode")
run_command(command,
{"$s": stl_name,
"$o": gcode_name},
blocking = True)
self.log(_("Loading sliced file."))
self.do_load(l[0].replace(".stl", "_export.gcode"))
except Exception as e:
self.logError(_("Slicing failed: %s") % e)
def complete_slice(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.stl")]
else:
return glob.glob("*/") + glob.glob("*.stl")
def help_slice(self):
self.log(_("Creates a gcode file from an stl model using the slicer (with tab-completion)"))
self.log(_("slice filename.stl - create gcode file"))
self.log(_("slice filename.stl view - create gcode file and view using skeiniso (if using skeinforge)"))
self.log(_("slice set - adjust slicer settings"))
# --------------------------------------------------------------
# Print/upload handling
# --------------------------------------------------------------
def do_upload(self, l):
names = l.split()
if len(names) == 2:
filename = names[0]
targetname = names[1]
else:
self.logError(_("Please enter target name in 8.3 format."))
return
if not self.p.online:
self.logError(_("Not connected to printer."))
return
self._do_load(filename)
self.log(_("Uploading as %s") % targetname)
self.log(_("Uploading %s") % self.filename)
self.p.send_now("M28 " + targetname)
self.log(_("Press Ctrl-C to interrupt upload."))
self.p.startprint(self.fgcode)
try:
sys.stdout.write(_("Progress: ") + "00.0%")
sys.stdout.flush()
while self.p.printing:
time.sleep(0.5)
sys.stdout.write("\b\b\b\b\b%04.1f%%" % (100 * float(self.p.queueindex) / len(self.p.mainqueue),))
sys.stdout.flush()
self.p.send_now("M29 " + targetname)
time.sleep(0.2)
self.p.clear = True
self._do_ls(False)
self.log("\b\b\b\b\b100%.")
self.log(_("Upload completed. %s should now be on the card.") % targetname)
return
except (KeyboardInterrupt, Exception) as e:
if isinstance(e, KeyboardInterrupt):
self.logError(_("...interrupted!"))
else:
self.logError(_("Something wrong happened while uploading:")
+ "\n" + traceback.format_exc())
self.p.pause()
self.p.send_now("M29 " + targetname)
time.sleep(0.2)
self.p.cancelprint()
self.logError(_("A partial file named %s may have been written to the sd card.") % targetname)
def complete_upload(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.g*")]
else:
return glob.glob("*/") + glob.glob("*.g*")
def help_upload(self):
self.log(_("Uploads a gcode file to the sd card"))
def help_print(self):
if not self.fgcode:
self.log(_("Send a loaded gcode file to the printer. Load a file with the load command first."))
else:
self.log(_("Send a loaded gcode file to the printer. You have %s loaded right now.") % self.filename)
def do_print(self, l):
if not self.fgcode:
self.logError(_("No file loaded. Please use load first."))
return
if not self.p.online:
self.logError(_("Not connected to printer."))
return
self.log(_("Printing %s") % self.filename)
self.log(_("You can monitor the print with the monitor command."))
self.sdprinting = False
self.p.startprint(self.fgcode)
def do_pause(self, l):
if self.sdprinting:
self.p.send_now("M25")
else:
if not self.p.printing:
self.logError(_("Not printing, cannot pause."))
return
self.p.pause()
self.paused = True
def help_pause(self):
self.log(_("Pauses a running print"))
def pause(self, event = None):
return self.do_pause(None)
def do_resume(self, l):
if not self.paused:
self.logError(_("Not paused, unable to resume. Start a print first."))
return
self.paused = False
if self.sdprinting:
self.p.send_now("M24")
return
else:
self.p.resume()
def help_resume(self):
self.log(_("Resumes a paused print."))
def listfiles(self, line):
if "Begin file list" in line:
self.sdlisting = 1
elif "End file list" in line:
self.sdlisting = 0
self.recvlisteners.remove(self.listfiles)
if self.sdlisting_echo:
self.log(_("Files on SD card:"))
self.log("\n".join(self.sdfiles))
elif self.sdlisting:
self.sdfiles.append(re.sub(" \d+$","",line.strip().lower()))
def _do_ls(self, echo):
# FIXME: this was 2, but I think it should rather be 0 as in do_upload
self.sdlisting = 0
self.sdlisting_echo = echo
self.sdfiles = []
self.recvlisteners.append(self.listfiles)
self.p.send_now("M20")
def do_ls(self, l):
if not self.p.online:
self.logError(_("Printer is not online. Please connect to it first."))
return
self._do_ls(True)
def help_ls(self):
self.log(_("Lists files on the SD card"))
def waitforsdresponse(self, l):
if "file.open failed" in l:
self.logError(_("Opening file failed."))
self.recvlisteners.remove(self.waitforsdresponse)
return
if "File opened" in l:
self.log(l)
if "File selected" in l:
self.log(_("Starting print"))
self.p.send_now("M24")
self.sdprinting = True
# self.recvlisteners.remove(self.waitforsdresponse)
return
if "Done printing file" in l:
self.log(l)
self.sdprinting = False
self.recvlisteners.remove(self.waitforsdresponse)
return
if "SD printing byte" in l:
# M27 handler
try:
resp = l.split()
vals = resp[-1].split("/")
self.percentdone = 100.0 * int(vals[0]) / int(vals[1])
except:
pass
def do_reset(self, l):
self.p.reset()
def help_reset(self):
self.log(_("Resets the printer."))
def do_sdprint(self, l):
if not self.p.online:
self.log(_("Printer is not online. Please connect to it first."))
return
self._do_ls(False)
while self.listfiles in self.recvlisteners:
time.sleep(0.1)
if l.lower() not in self.sdfiles:
self.log(_("File is not present on card. Please upload it first."))
return
self.recvlisteners.append(self.waitforsdresponse)
self.p.send_now("M23 " + l.lower())
self.log(_("Printing file: %s from SD card.") % l.lower())
self.log(_("Requesting SD print..."))
time.sleep(1)
def help_sdprint(self):
self.log(_("Print a file from the SD card. Tab completes with available file names."))
self.log(_("sdprint filename.g"))
def complete_sdprint(self, text, line, begidx, endidx):
if not self.sdfiles and self.p.online:
self._do_ls(False)
while self.listfiles in self.recvlisteners:
time.sleep(0.1)
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.sdfiles if i.startswith(text)]
# --------------------------------------------------------------
# Printcore callbacks
# --------------------------------------------------------------
def startcb(self, resuming = False):
self.starttime = time.time()
if resuming:
self.log(_("Print resumed at: %s") % format_time(self.starttime))
else:
self.log(_("Print started at: %s") % format_time(self.starttime))
if not self.sdprinting:
self.compute_eta = RemainingTimeEstimator(self.fgcode)
else:
self.compute_eta = None
if self.settings.start_command:
output = get_command_output(self.settings.start_command,
{"$s": str(self.filename),
"$t": format_time(time.time())})
if output:
self.log(_("Start command output:"))
self.log(output.rstrip())
try:
powerset_print_start(reason = "Preventing sleep during print")
except:
self.logError(_("Failed to set power settings:")
+ "\n" + traceback.format_exc())
def endcb(self):
try:
powerset_print_stop()
except:
self.logError(_("Failed to set power settings:")
+ "\n" + traceback.format_exc())
if self.p.queueindex == 0:
print_duration = int(time.time() - self.starttime + self.extra_print_time)
self.log(_("Print ended at: %(end_time)s and took %(duration)s") % {"end_time": format_time(time.time()),
"duration": format_duration(print_duration)})
# Update total filament length used
if self.fgcode is not None:
new_total = self.settings.total_filament_used + self.fgcode.filament_length
self.set("total_filament_used", new_total)
# Update the length of filament in the spools
self.spool_manager.refresh()
if(len(self.fgcode.filament_length_multi)>1):
for i in enumerate(self.fgcode.filament_length_multi):
if self.spool_manager.getSpoolName(i[0]) != None:
self.spool_manager.editLength(
-i[1], extruder = i[0])
else:
if self.spool_manager.getSpoolName(0) != None:
self.spool_manager.editLength(
-self.fgcode.filament_length, extruder = 0)
if not self.settings.final_command:
return
output = get_command_output(self.settings.final_command,
{"$s": str(self.filename),
"$t": format_duration(print_duration)})
if output:
self.log(_("Final command output:"))
self.log(output.rstrip())
def recvcb_report(self, l):
isreport = REPORT_NONE
if "ok C:" in l or " Count " in l \
or ("X:" in l and len(gcoder.m114_exp.findall(l)) == 6):
self.posreport = l
isreport = REPORT_POS
if self.userm114 > 0:
self.userm114 -= 1
isreport |= REPORT_MANUAL
if "ok T:" in l or tempreading_exp.findall(l):
self.tempreadings = l
isreport = REPORT_TEMP
if self.userm105 > 0:
self.userm105 -= 1
isreport |= REPORT_MANUAL
else:
self.m105_waitcycles = 0
return isreport
def recvcb_actions(self, l):
if l.startswith("!!"):
self.do_pause(None)
msg = l.split(" ", 1)
if len(msg) > 1 and self.silent is False: self.logError(msg[1].ljust(15))
sys.stdout.write(self.promptf())
sys.stdout.flush()
return True
elif l.startswith("//"):
command = l.split(" ", 1)
if len(command) > 1:
command = command[1]
self.log(_("Received command %s") % command)
command = command.split(":")
if len(command) == 2 and command[0] == "action":
command = command[1]
if command in ["pause", "cancel"]:
self.do_pause(None)
sys.stdout.write(self.promptf())
sys.stdout.flush()
return True
elif command == "resume":
self.do_resume(None)
sys.stdout.write(self.promptf())
sys.stdout.flush()
return True
elif command == "disconnect":
self.do_disconnect(None)
sys.stdout.write(self.promptf())
sys.stdout.flush()
return True
return False
def recvcb(self, l):
l = l.rstrip()
for listener in self.recvlisteners:
listener(l)
if not self.recvcb_actions(l):
report_type = self.recvcb_report(l)
if report_type & REPORT_TEMP:
self.status.update_tempreading(l)
if not self.lineignorepattern.match(l) and l[:4] != "wait" and not self.sdlisting \
and not self.monitoring and (report_type == REPORT_NONE or report_type & REPORT_MANUAL):
if l[:5] == "echo:":
l = l[5:].lstrip()
if self.silent is False: self.log("\r" + l.ljust(15))
sys.stdout.write(self.promptf())
sys.stdout.flush()
def layer_change_cb(self, newlayer):
layerz = self.fgcode.all_layers[newlayer].z
if layerz is not None:
self.curlayer = layerz
if self.compute_eta:
secondselapsed = int(time.time() - self.starttime + self.extra_print_time)
self.compute_eta.update_layer(newlayer, secondselapsed)
def get_eta(self):
if self.sdprinting or self.uploading:
if self.uploading:
fractioncomplete = float(self.p.queueindex) / len(self.p.mainqueue)
else:
fractioncomplete = float(self.percentdone / 100.0)
secondselapsed = int(time.time() - self.starttime + self.extra_print_time)
# Prevent division by zero
secondsestimate = secondselapsed / max(fractioncomplete, 0.000001)
secondsremain = secondsestimate - secondselapsed
progress = fractioncomplete
elif self.compute_eta is not None:
secondselapsed = int(time.time() - self.starttime + self.extra_print_time)
secondsremain, secondsestimate = self.compute_eta(self.p.queueindex, secondselapsed)
progress = self.p.queueindex
else:
secondsremain, secondsestimate, progress = 1, 1, 0
return secondsremain, secondsestimate, progress
def do_eta(self, l):
if not self.p.printing:
self.logError(_("Printer is not currently printing. No ETA available."))
else:
secondsremain, secondsestimate, progress = self.get_eta()
eta = _("Est: %s of %s remaining") % (format_duration(secondsremain),
format_duration(secondsestimate))
self.log(eta.strip())
def help_eta(self):
self.log(_("Displays estimated remaining print time."))
# --------------------------------------------------------------
# Temperature handling
# --------------------------------------------------------------
def set_temp_preset(self, key, value):
if not key.startswith("bed"):
self.temps["PLA"] = str(self.settings.temperature_pla)
self.temps["ABS"] = str(self.settings.temperature_abs)
self.log(_("Hotend temperature presets updated, PLA:%s, ABS:%s") % (self.temps["PLA"], self.temps["ABS"]))
else:
self.bedtemps["PLA"] = str(self.settings.bedtemp_pla)
self.bedtemps["ABS"] = str(self.settings.bedtemp_abs)
self.log(_("Bed temperature presets updated, PLA:%s, ABS:%s") % (self.bedtemps["PLA"], self.bedtemps["ABS"]))
def tempcb(self, l):
if "T:" in l:
self.log(l.strip().replace("T", "Hotend").replace("B", "Bed").replace("ok ", ""))
def do_gettemp(self, l):
if "dynamic" in l:
self.dynamic_temp = True
if self.p.online:
self.p.send_now("M105")
time.sleep(0.75)
if not self.status.bed_enabled:
self.log(_("Hotend: %s%s/%s%s") % (self.status.extruder_temp, DEG, self.status.extruder_temp_target, DEG))
else:
self.log(_("Hotend: %s%s/%s%s") % (self.status.extruder_temp, DEG, self.status.extruder_temp_target, DEG))
self.log(_("Bed: %s%s/%s%s") % (self.status.bed_temp, DEG, self.status.bed_temp_target, DEG))
def help_gettemp(self):
self.log(_("Read the extruder and bed temperature."))
def do_settemp(self, l):
l = l.lower().replace(", ", ".")
for i in self.temps.keys():
l = l.replace(i, self.temps[i])
try:
f = float(l)
except:
self.logError(_("You must enter a temperature."))
return
if f >= 0:
if f > 250:
self.log(_("%s is a high temperature to set your extruder to. Are you sure you want to do that?") % f)
if not self.confirm():
return
if self.p.online:
self.p.send_now("M104 S" + l)
self.log(_("Setting hotend temperature to %s degrees Celsius.") % f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the hotend off entirely, set its temperature to 0."))
def help_settemp(self):
self.log(_("Sets the hotend temperature to the value entered."))
self.log(_("Enter either a temperature in celsius or one of the following keywords"))
self.log(', '.join('%s (%s)'%kv for kv in self.temps.items()))
def complete_settemp(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.temps.keys() if i.startswith(text)]
def do_bedtemp(self, l):
f = None
try:
l = l.lower().replace(", ", ".")
for i in self.bedtemps.keys():
l = l.replace(i, self.bedtemps[i])
f = float(l)
except:
self.logError(_("You must enter a temperature."))
if f is not None and f >= 0:
if self.p.online:
self.p.send_now("M140 S" + l)
self.log(_("Setting bed temperature to %s degrees Celsius.") % f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the bed off entirely, set its temperature to 0."))
def help_bedtemp(self):
self.log(_("Sets the bed temperature to the value entered."))
self.log(_("Enter either a temperature in celsius or one of the following keywords"))
self.log(", ".join([i + "(" + self.bedtemps[i] + ")" for i in self.bedtemps.keys()]))
def complete_bedtemp(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.bedtemps.keys() if i.startswith(text)]
def do_monitor(self, l):
interval = 5
if not self.p.online:
self.logError(_("Printer is not online. Please connect to it first."))
return
if not (self.p.printing or self.sdprinting):
self.logError(_("Printer is not printing. Please print something before monitoring."))
return
self.log(_("Monitoring printer, use ^C to interrupt."))
if len(l):
try:
interval = float(l)
except:
self.logError(_("Invalid period given."))
self.log(_("Updating values every %f seconds.") % (interval,))
self.monitoring = 1
prev_msg_len = 0
try:
while True:
self.p.send_now("M105")
if self.sdprinting:
self.p.send_now("M27")
time.sleep(interval)
if self.p.printing:
preface = _("Print progress: ")
progress = 100 * float(self.p.queueindex) / len(self.p.mainqueue)
elif self.sdprinting:
preface = _("SD print progress: ")
progress = self.percentdone
prev_msg = preface + "%.1f%%" % progress
if self.silent is False:
sys.stdout.write("\r" + prev_msg.ljust(prev_msg_len))
sys.stdout.flush()
prev_msg_len = len(prev_msg)
except KeyboardInterrupt:
if self.silent is False: self.log(_("Done monitoring."))
self.monitoring = 0
def help_monitor(self):
self.log(_("Monitor a machine's temperatures and an SD print's status."))
self.log(_("monitor - Reports temperature and SD print status (if SD printing) every 5 seconds"))
self.log(_("monitor 2 - Reports temperature and SD print status (if SD printing) every 2 seconds"))
# --------------------------------------------------------------
# Manual printer controls
# --------------------------------------------------------------
def do_tool(self, l):
tool = None
try:
tool = int(l.lower().strip())
except:
self.logError(_("You must specify the tool index as an integer."))
if tool is not None and tool >= 0:
if self.p.online:
self.p.send_now("T%d" % tool)
self.log(_("Using tool %d.") % tool)
self.current_tool = tool
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative tool numbers."))
def help_tool(self):
self.log(_("Switches to the specified tool (e.g. doing tool 1 will emit a T1 G-Code)."))
def do_move(self, l):
if len(l.split()) < 2:
self.logError(_("No move specified."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
if not self.p.online:
self.logError(_("Printer is not online. Unable to move."))
return
l = l.split()
if l[0].lower() == "x":
feed = self.settings.xy_feedrate
axis = "X"
elif l[0].lower() == "y":
feed = self.settings.xy_feedrate
axis = "Y"
elif l[0].lower() == "z":
feed = self.settings.z_feedrate
axis = "Z"
elif l[0].lower() == "e":
feed = self.settings.e_feedrate
axis = "E"
else:
self.logError(_("Unknown axis."))
return
try:
float(l[1]) # check if distance can be a float
except:
self.logError(_("Invalid distance"))
return
try:
feed = int(l[2])
except:
pass
self.p.send_now("G91")
self.p.send_now("G0 " + axis + str(l[1]) + " F" + str(feed))
self.p.send_now("G90")
def help_move(self):
self.log(_("Move an axis. Specify the name of the axis and the amount. "))
self.log(_("move X 10 - will move the X axis forward by 10 mm at %s mm/min (default XY speed)") % self.settings.xy_feedrate)
self.log(_("move Y 10 5000 - will move the Y axis forward by 10 mm at 5000 mm/min"))
self.log(_("move Z -1 - will move the Z axis down by 1mm at %s mm/min (default Z speed)") % self.settings.z_feedrate)
self.log(_("Common amounts are in the tabcomplete list."))
def complete_move(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in ["X ", "Y ", "Z ", "E "] if i.lower().startswith(text)]
elif len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " "):
base = line.split()[-1]
rlen = 0
if base.startswith("-"):
rlen = 1
if line[-1] == " ":
base = ""
return [i[rlen:] for i in ["-100", "-10", "-1", "-0.1", "100", "10", "1", "0.1", "-50", "-5", "-0.5", "50", "5", "0.5", "-200", "-20", "-2", "-0.2", "200", "20", "2", "0.2"] if i.startswith(base)]
else:
return []
def do_extrude(self, l, override = None, overridefeed = 300):
length = self.settings.default_extrusion # default extrusion length
feed = self.settings.e_feedrate # default speed
if not self.p.online:
self.logError(_("Printer is not online. Unable to extrude."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
ls = l.split()
if len(ls):
try:
length = float(ls[0])
except:
self.logError(_("Invalid length given."))
if len(ls) > 1:
try:
feed = int(ls[1])
except:
self.logError(_("Invalid speed given."))
if override is not None:
length = override
feed = overridefeed
self.do_extrude_final(length, feed)
def do_extrude_final(self, length, feed):
if length > 0:
self.log(_("Extruding %f mm of filament.") % (length,))
elif length < 0:
self.log(_("Reversing %f mm of filament.") % (-length,))
else:
self.log(_("Length is 0, not doing anything."))
self.p.send_now("G91")
self.p.send_now("G1 E" + str(length) + " F" + str(feed))
self.p.send_now("G90")
# Update the length of filament in the current spool
self.spool_manager.refresh()
if self.spool_manager.getSpoolName(self.current_tool) != None:
self.spool_manager.editLength(-length,
extruder = self.current_tool)
def help_extrude(self):
self.log(_("Extrudes a length of filament, 5 mm by default, or the number of mm given as a parameter"))
self.log(_("extrude - extrudes 5 mm of filament at 300 mm/min (5 mm/s)"))
self.log(_("extrude 20 - extrudes 20 mm of filament at 300 mm/min (5 mm/s)"))
self.log(_("extrude -5 - REVERSES 5 mm of filament at 300 mm/min (5 mm/s)"))
self.log(_("extrude 10 210 - extrudes 10 mm of filament at 210 mm/min (3.5 mm/s)"))
def do_reverse(self, l):
length = self.settings.default_extrusion # default extrusion length
feed = self.settings.e_feedrate # default speed
if not self.p.online:
self.logError(_("Printer is not online. Unable to reverse."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
ls = l.split()
if len(ls):
try:
length = float(ls[0])
except:
self.logError(_("Invalid length given."))
if len(ls) > 1:
try:
feed = int(ls[1])
except:
self.logError(_("Invalid speed given."))
self.do_extrude("", -length, feed)
def help_reverse(self):
self.log(_("Reverses the extruder, 5 mm by default, or the number of mm given as a parameter"))
self.log(_("reverse - reverses 5 mm of filament at 300mm/min (5 mm/s)"))
self.log(_("reverse 20 - reverses 20 mm of filament at 300mm/min (5 mm/s)"))
self.log(_("reverse 10 210 - extrudes 10 mm of filament at 210 mm/min (3.5 mm/s)"))
self.log(_("reverse -5 - EXTRUDES 5 mm of filament at 300 mm/min (5 mm/s)"))
def do_home(self, l):
if not self.p.online:
self.logError(_("Printer is not online. Unable to move."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
if "x" in l.lower():
self.p.send_now("G28 X0")
if "y" in l.lower():
self.p.send_now("G28 Y0")
if "z" in l.lower():
self.p.send_now("G28 Z0")
if "e" in l.lower():
self.p.send_now("G92 E0")
if not len(l):
self.p.send_now("G28")
self.p.send_now("G92 E0")
def help_home(self):
self.log(_("Homes the printer"))
self.log(_("home - homes all axes and zeroes the extruder(Using G28 and G92)"))
self.log(_("home xy - homes x and y axes (Using G28)"))
self.log(_("home z - homes z axis only (Using G28)"))
self.log(_("home e - set extruder position to zero (Using G92)"))
self.log(_("home xyze - homes all axes and zeroes the extruder (Using G28 and G92)"))
def do_off(self, l):
self.off()
def off(self, ignore = None):
if self.p.online:
if self.p.printing: self.pause(None)
self.log(_("; Motors off"))
self.onecmd("M84")
self.log(_("; Extruder off"))
self.onecmd("M104 S0")
self.log(_("; Heatbed off"))
self.onecmd("M140 S0")
self.log(_("; Fan off"))
self.onecmd("M107")
self.log(_("; Power supply off"))
self.onecmd("M81")
else:
self.logError(_("Printer is not online. Unable to turn it off."))
def help_off(self):
self.log(_("Turns off everything on the printer"))
# --------------------------------------------------------------
# Host commands handling
# --------------------------------------------------------------
def process_host_command(self, command):
"""Override host command handling"""
command = command.lstrip()
if command.startswith(";@"):
command = command[2:]
self.log(_("G-Code calling host command \"%s\"") % command)
self.onecmd(command)
def do_run_script(self, l):
p = run_command(l, {"$s": str(self.filename)}, stdout = subprocess.PIPE, universal_newlines = True)
for line in p.stdout.readlines():
self.log("<< " + line.strip())
def help_run_script(self):
self.log(_("Runs a custom script. Current gcode filename can be given using $s token."))
def do_run_gcode_script(self, l):
try:
self.fgcode = RGSGCoder(l)
self.do_print(None)
except BaseException as e:
self.logError(traceback.format_exc())
def help_run_gcode_script(self):
self.log(_("Runs a custom script which output gcode which will in turn be executed. Current gcode filename can be given using $s token."))
def complete_run_gcode_script(self, text, line, begidx, endidx):
words = line.split()
sep = os.path.sep
if len(words) < 2:
return ['.' + sep , sep]
corrected_text = words[-1] # text arg skips leading '/', include it
if corrected_text == '.':
return ['./'] # guide user that in linux, PATH does not include . and relative executed scripts must start with ./
prefix_len = len(corrected_text) - len(text)
res = [((f + sep) if os.path.isdir(f) else f)[prefix_len:] #skip unskipped prefix_len
for f in glob.glob(corrected_text + '*')]
return res
| 73,659 | Python | .py | 1,611 | 34.136561 | 374 | 0.536665 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,367 | stltool.py | kliment_Printrun/printrun/stltool.py | # coding: utf-8
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import struct
import math
import logging
import numpy
import numpy.linalg
def normalize(v):
return v / numpy.linalg.norm(v)
def genfacet(v):
veca = v[1] - v[0]
vecb = v[2] - v[1]
vecx = numpy.cross(veca, vecb)
vlen = numpy.linalg.norm(vecx)
if vlen == 0:
vlen = 1
normal = vecx / vlen
return (normal, v)
I = numpy.identity(4)
def homogeneous(v, w = 1):
return numpy.append(v, w)
def applymatrix(facet, matrix = I):
return genfacet([matrix.dot(homogeneous(x))[:3] for x in facet[1]])
def ray_triangle_intersection(ray_near, ray_dir, v123):
"""
Möller–Trumbore intersection algorithm in pure python
Based on http://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
"""
v1, v2, v3 = v123
eps = 0.000001
edge1 = v2 - v1
edge2 = v3 - v1
pvec = numpy.cross(ray_dir, edge2)
det = edge1.dot(pvec)
if abs(det) < eps:
return False, None
inv_det = 1. / det
tvec = ray_near - v1
u = tvec.dot(pvec) * inv_det
if u < 0. or u > 1.:
return False, None
qvec = numpy.cross(tvec, edge1)
v = ray_dir.dot(qvec) * inv_det
if v < 0. or u + v > 1.:
return False, None
t = edge2.dot(qvec) * inv_det
if t < eps:
return False, None
return True, t
def ray_rectangle_intersection(ray_near, ray_dir, p0, p1, p2, p3):
match1, _ = ray_triangle_intersection(ray_near, ray_dir, (p0, p1, p2))
match2, _ = ray_triangle_intersection(ray_near, ray_dir, (p0, p2, p3))
return match1 or match2
def ray_box_intersection(ray_near, ray_dir, p0, p1):
x0, y0, z0 = p0[:]
x1, y1, z1 = p1[:]
rectangles = [((x0, y0, z0), (x1, y0, z0), (x1, y1, z0), (x0, y1, z0)),
((x0, y0, z1), (x1, y0, z1), (x1, y1, z1), (x0, y1, z1)),
((x0, y0, z0), (x1, y0, z0), (x1, y0, z1), (x0, y0, z1)),
((x0, y1, z0), (x1, y1, z0), (x1, y1, z1), (x0, y1, z1)),
((x0, y0, z0), (x0, y1, z0), (x0, y1, z1), (x0, y0, z1)),
((x1, y0, z0), (x1, y1, z0), (x1, y1, z1), (x1, y0, z1)),
]
rectangles = [(numpy.array(p) for p in rect)
for rect in rectangles]
for rect in rectangles:
if ray_rectangle_intersection(ray_near, ray_dir, *rect):
return True
return False
def emitstl(filename, facets = [], objname = "stltool_export", binary = True):
if filename is None:
return
if binary:
with open(filename, "wb") as f:
buf = b"".join([b"\0"] * 80)
buf += struct.pack("<I", len(facets))
facetformat = struct.Struct("<ffffffffffffH")
for facet in facets:
l = list(facet[0][:])
for vertex in facet[1]:
l += list(vertex[:])
l.append(0)
buf += facetformat.pack(*l)
f.write(buf)
else:
with open(filename, "w") as f:
f.write("solid " + objname + "\n")
for i in facets:
f.write(" facet normal " + " ".join(map(str, i[0])) + "\n outer loop\n")
for j in i[1]:
f.write(" vertex " + " ".join(map(str, j)) + "\n")
f.write(" endloop" + "\n")
f.write(" endfacet" + "\n")
f.write("endsolid " + objname + "\n")
class stl:
_dims = None
def _get_dims(self):
if self._dims is None:
minx = float("inf")
miny = float("inf")
minz = float("inf")
maxx = float("-inf")
maxy = float("-inf")
maxz = float("-inf")
for normal, facet in self.facets:
for vert in facet:
if vert[0] < minx:
minx = vert[0]
if vert[1] < miny:
miny = vert[1]
if vert[2] < minz:
minz = vert[2]
if vert[0] > maxx:
maxx = vert[0]
if vert[1] > maxy:
maxy = vert[1]
if vert[2] > maxz:
maxz = vert[2]
self._dims = [minx, maxx, miny, maxy, minz, maxz]
return self._dims
dims = property(_get_dims)
def __init__(self, filename = None):
self.facet = (numpy.zeros(3), (numpy.zeros(3), numpy.zeros(3), numpy.zeros(3)))
self.facets = []
self.facetsminz = []
self.facetsmaxz = []
self.name = ""
self.insolid = 0
self.infacet = 0
self.inloop = 0
self.facetloc = 0
if filename is None:
return
with open(filename,encoding="ascii",errors="ignore") as f:
data = f.read()
if "facet normal" in data[1:300] and "outer loop" in data[1:300]:
lines = data.split("\n")
for line in lines:
if not self.parseline(line):
return
else:
logging.warning(_("Not an ascii stl solid - attempting to parse as binary"))
f = open(filename, "rb")
buf = f.read(84)
while len(buf) < 84:
newdata = f.read(84 - len(buf))
if not len(newdata):
break
buf += newdata
facetcount = struct.unpack_from("<I", buf, 80)
facetformat = struct.Struct("<ffffffffffffH")
for i in range(facetcount[0]):
buf = f.read(50)
while len(buf) < 50:
newdata = f.read(50 - len(buf))
if not len(newdata):
break
buf += newdata
fd = list(facetformat.unpack(buf))
self.name = "binary soloid"
facet = [fd[:3], [fd[3:6], fd[6:9], fd[9:12]]]
self.facets.append(facet)
self.facetsminz.append((min(x[2] for x in facet[1]), facet))
self.facetsmaxz.append((max(x[2] for x in facet[1]), facet))
f.close()
return
def intersect_box(self, ray_near, ray_far):
ray_near = numpy.array(ray_near)
ray_far = numpy.array(ray_far)
ray_dir = normalize(ray_far - ray_near)
x0, x1, y0, y1, z0, z1 = self.dims
p0 = numpy.array([x0, y0, z0])
p1 = numpy.array([x1, y1, z1])
return ray_box_intersection(ray_near, ray_dir, p0, p1)
def intersect(self, ray_near, ray_far):
ray_near = numpy.array(ray_near)
ray_far = numpy.array(ray_far)
ray_dir = normalize(ray_far - ray_near)
best_facet = None
best_dist = float("inf")
for facet_i, (normal, facet) in enumerate(self.facets):
match, dist = ray_triangle_intersection(ray_near, ray_dir, facet)
if match and dist < best_dist:
best_facet = facet_i
best_dist = dist
return best_facet, best_dist
def rebase(self, facet_i):
normal, facet = self.facets[facet_i]
u1 = facet[1] - facet[0]
v2 = facet[2] - facet[0]
n1 = u1.dot(u1)
e1 = u1 / math.sqrt(n1)
u2 = v2 - u1 * v2.dot(u1) / n1
e2 = u2 / numpy.linalg.norm(u2)
e3 = numpy.cross(e1, e2)
# Ensure Z direction if opposed to the normal
if normal.dot(e3) > 0:
e2 = - e2
e3 = - e3
matrix = [[e1[0], e2[0], e3[0], 0],
[e1[1], e2[1], e3[1], 0],
[e1[2], e2[2], e3[2], 0],
[0, 0, 0, 1]]
matrix = numpy.array(matrix)
# Inverse change of basis matrix
matrix = numpy.linalg.inv(matrix)
# Set first vertex of facet as origin
neworig = matrix.dot(homogeneous(facet[0]))
matrix[:3, 3] = -neworig[:3]
newmodel = self.transform(matrix)
return newmodel
def cut(self, axis, direction, dist):
s = stl()
s.facets = []
f = min if direction == 1 else max
for _, facet in self.facets:
minval = f([vertex[axis] for vertex in facet])
if direction * minval > direction * dist:
continue
vertices = []
for vertex in facet:
vertex = numpy.copy(vertex)
if direction * (vertex[axis] - dist) > 0:
vertex[axis] = dist
vertices.append(vertex)
s.facets.append(genfacet(vertices))
s.insolid = 0
s.infacet = 0
s.inloop = 0
s.facetloc = 0
s.name = self.name
for facet in s.facets:
s.facetsminz += [(min(x[2] for x in facet[1]), facet)]
s.facetsmaxz += [(max(x[2] for x in facet[1]), facet)]
return s
def translation_matrix(self, v):
matrix = [[1, 0, 0, v[0]],
[0, 1, 0, v[1]],
[0, 0, 1, v[2]],
[0, 0, 0, 1]
]
return numpy.array(matrix)
def translate(self, v = [0, 0, 0]):
return self.transform(self.translation_matrix(v))
def rotation_matrix(self, v):
z = v[2]
matrix1 = [[math.cos(math.radians(z)), -math.sin(math.radians(z)), 0, 0],
[math.sin(math.radians(z)), math.cos(math.radians(z)), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
]
matrix1 = numpy.array(matrix1)
y = v[0]
matrix2 = [[1, 0, 0, 0],
[0, math.cos(math.radians(y)), -math.sin(math.radians(y)), 0],
[0, math.sin(math.radians(y)), math.cos(math.radians(y)), 0],
[0, 0, 0, 1]
]
matrix2 = numpy.array(matrix2)
x = v[1]
matrix3 = [[math.cos(math.radians(x)), 0, -math.sin(math.radians(x)), 0],
[0, 1, 0, 0],
[math.sin(math.radians(x)), 0, math.cos(math.radians(x)), 0],
[0, 0, 0, 1]
]
matrix3 = numpy.array(matrix3)
return matrix3.dot(matrix2.dot(matrix1))
def rotate(self, v = [0, 0, 0]):
return self.transform(self.rotation_matrix(v))
def scale_matrix(self, v):
matrix = [[v[0], 0, 0, 0],
[0, v[1], 0, 0],
[0, 0, v[2], 0],
[0, 0, 0, 1]
]
return numpy.array(matrix)
def scale(self, v = [0, 0, 0]):
return self.transform(self.scale_matrix(v))
def transform(self, m = I):
s = stl()
s.facets = [applymatrix(i, m) for i in self.facets]
s.insolid = 0
s.infacet = 0
s.inloop = 0
s.facetloc = 0
s.name = self.name
for facet in s.facets:
s.facetsminz += [(min(x[2] for x in facet[1]), facet)]
s.facetsmaxz += [(max(x[2] for x in facet[1]), facet)]
return s
def export(self, f = sys.stdout):
f.write("solid " + self.name + "\n")
for i in self.facets:
f.write(" facet normal " + " ".join(map(str, i[0])) + "\n")
f.write(" outer loop" + "\n")
for j in i[1]:
f.write(" vertex " + " ".join(map(str, j)) + "\n")
f.write(" endloop" + "\n")
f.write(" endfacet" + "\n")
f.write("endsolid " + self.name + "\n")
f.flush()
def parseline(self, l):
l = l.strip()
if l.startswith("solid"):
self.insolid = 1
self.name = l[6:]
elif l.startswith("endsolid"):
self.insolid = 0
return 0
elif l.startswith("facet normal"):
l = l.replace(", ", ".")
self.infacet = 1
self.facetloc = 0
normal = numpy.array([float(f) for f in l.split()[2:]])
self.facet = (normal, (numpy.zeros(3), numpy.zeros(3), numpy.zeros(3)))
elif l.startswith("endfacet"):
self.infacet = 0
self.facets.append(self.facet)
facet = self.facet
self.facetsminz += [(min(x[2] for x in facet[1]), facet)]
self.facetsmaxz += [(max(x[2] for x in facet[1]), facet)]
elif l.startswith("vertex"):
l = l.replace(", ", ".")
self.facet[1][self.facetloc][:] = numpy.array([float(f) for f in l.split()[1:]])
self.facetloc += 1
return 1
if __name__ == "__main__":
s = stl("../../Downloads/frame-vertex-neo-foot-x4.stl")
for i in range(11, 11):
working = s.facets[:]
for j in reversed(sorted(s.facetsminz)):
if j[0] > i:
working.remove(j[1])
else:
break
for j in (sorted(s.facetsmaxz)):
if j[0] < i:
working.remove(j[1])
else:
break
print(i, len(working))
emitstl("../../Downloads/frame-vertex-neo-foot-x4-a.stl", s.facets, "emitted_object")
# stl("../prusamendel/stl/mendelplate.stl")
| 13,809 | Python | .py | 357 | 27.87395 | 93 | 0.500932 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,368 | osx.py | kliment_Printrun/printrun/power/osx.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
#
# Imported from http://www.benden.us/journal/2014/OS-X-Power-Management-No-Sleep-Howto/
# Copyright (c) Joseph Benden 2014
import ctypes
import CoreFoundation
import objc
def SetUpIOFramework():
# load the IOKit library
framework = ctypes.cdll.LoadLibrary('/System/Library/Frameworks/IOKit.framework/IOKit')
# declare parameters as described in IOPMLib.h
framework.IOPMAssertionCreateWithName.argtypes = [
ctypes.c_void_p, # CFStringRef
ctypes.c_uint32, # IOPMAssertionLevel
ctypes.c_void_p, # CFStringRef
ctypes.POINTER(ctypes.c_uint32)] # IOPMAssertionID
framework.IOPMAssertionRelease.argtypes = [
ctypes.c_uint32] # IOPMAssertionID
return framework
def StringToCFString(string):
# we'll need to convert our strings before use
try:
encoding = CoreFoundation.kCFStringEncodingASCII
except AttributeError:
encoding = 0x600
string = string.encode('ascii')
cfstring = CoreFoundation.CFStringCreateWithCString(None, string, encoding)
return objc.pyobjc_id(cfstring.nsstring())
def AssertionCreateWithName(framework, a_type,
a_level, a_reason):
# this method will create an assertion using the IOKit library
# several parameters
a_id = ctypes.c_uint32(0)
a_type = StringToCFString(a_type)
a_reason = StringToCFString(a_reason)
a_error = framework.IOPMAssertionCreateWithName(
a_type, a_level, a_reason, ctypes.byref(a_id))
# we get back a 0 or stderr, along with a unique c_uint
# representing the assertion ID so we can release it later
return a_error, a_id
def AssertionRelease(framework, assertion_id):
# releasing the assertion is easy, and also returns a 0 on
# success, or stderr otherwise
return framework.IOPMAssertionRelease(assertion_id)
def inhibit_sleep_osx(reason):
no_idle = "NoIdleSleepAssertion"
# Initialize IOKit framework
if inhibit_sleep_osx.framework is None:
inhibit_sleep_osx.framework = SetUpIOFramework()
framework = inhibit_sleep_osx.framework
# Start inhibition
ret, a_id = AssertionCreateWithName(framework, no_idle, 255, reason)
inhibit_sleep_osx.assertion_id = a_id
return ret
inhibit_sleep_osx.framework = None
def deinhibit_sleep_osx():
return AssertionRelease(inhibit_sleep_osx.framework,
inhibit_sleep_osx.assertion_id)
| 3,096 | Python | .py | 71 | 38.788732 | 91 | 0.738885 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,369 | __init__.py | kliment_Printrun/printrun/power/__init__.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import platform
import logging
import os
if platform.system() == "Darwin":
from .osx import inhibit_sleep_osx, deinhibit_sleep_osx
inhibit_sleep = inhibit_sleep_osx
deinhibit_sleep = deinhibit_sleep_osx
elif platform.system() == "Windows":
import ctypes
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
def inhibit_sleep(reason):
mode = ES_CONTINUOUS | ES_SYSTEM_REQUIRED
ctypes.windll.kernel32.SetThreadExecutionState(ctypes.c_int(mode))
def deinhibit_sleep():
ctypes.windll.kernel32.SetThreadExecutionState(ctypes.c_int(ES_CONTINUOUS))
else:
try:
import dbus
inhibit_sleep_handler = None
inhibit_sleep_token = None
bus = dbus.SessionBus()
try:
if os.environ.get('DESKTOP_SESSION') == "mate":
# Mate uses a special service
service_name = "org.mate.ScreenSaver"
object_path = "/org/mate/ScreenSaver"
else:
# standard service name
service_name = "org.freedesktop.ScreenSaver"
object_path = "/org/freedesktop/ScreenSaver"
# GNOME and Mate use the right object path, try it first
proxy = bus.get_object(service_name, object_path)
inhibit_sleep_handler = dbus.Interface(proxy, service_name)
# Do a test run
token = inhibit_sleep_handler.Inhibit("printrun", "test")
inhibit_sleep_handler.UnInhibit(token)
except dbus.DBusException:
# KDE uses /ScreenSaver object path, let's try it as well
proxy = bus.get_object(service_name,
"/ScreenSaver")
inhibit_sleep_handler = dbus.Interface(proxy, service_name)
token = inhibit_sleep_handler.Inhibit("printrun", "test")
inhibit_sleep_handler.UnInhibit(token)
def inhibit_sleep(reason):
global inhibit_sleep_handler, inhibit_sleep_token
inhibit_sleep_token = inhibit_sleep_handler.Inhibit("printrun", reason)
def deinhibit_sleep():
global inhibit_sleep_handler, inhibit_sleep_token
if inhibit_sleep_handler is None or inhibit_sleep_token is None:
return
inhibit_sleep_handler.UnInhibit(inhibit_sleep_token)
inhibit_sleep_token = None
except Exception as e:
logging.warning("Could not setup DBus for sleep inhibition: %s" % e)
def inhibit_sleep(reason):
return
def deinhibit_sleep():
return
try:
import psutil
def get_nice(nice, p = None):
if not p: p = psutil.Process(os.getpid())
if callable(p.nice):
return p.nice()
else:
return p.nice
def set_nice(nice, p = None):
if not p: p = psutil.Process(os.getpid())
if callable(p.nice):
p.nice(nice)
else:
p.nice = nice
if platform.system() != "Windows":
import resource
if hasattr(psutil, "RLIMIT_NICE"):
nice_limit, _ = resource.getrlimit(psutil.RLIMIT_NICE)
high_priority_nice = 20 - nice_limit
else:
high_priority_nice = 0
# RLIMIT_NICE is not available (probably OSX), let's probe
# Try setting niceness to -20 .. -1
p = psutil.Process(os.getpid())
orig_nice = get_nice(p)
for i in range(-20, 0):
try:
set_nice(i, p)
high_priority_nice = i
break
except psutil.AccessDenied as e:
pass
set_nice(orig_nice, p)
def set_priority():
if platform.system() == "Windows":
set_nice(psutil.HIGH_PRIORITY_CLASS)
else:
if high_priority_nice < 0:
set_nice(high_priority_nice)
def reset_priority():
if platform.system() == "Windows":
set_nice(psutil.NORMAL_PRIORITY_CLASS)
else:
if high_priority_nice < 0:
set_nice(0)
def powerset_print_start(reason):
set_priority()
inhibit_sleep(reason)
def powerset_print_stop():
reset_priority()
deinhibit_sleep()
except ImportError as e:
logging.warning("psutil unavailable, could not import power utils:" + str(e))
def powerset_print_start(reason):
pass
def powerset_print_stop():
pass
| 5,183 | Python | .py | 130 | 30.307692 | 83 | 0.613704 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,370 | trackball.py | kliment_Printrun/printrun/gl/trackball.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import math
from pyglet.gl import GLdouble
def cross(v1, v2):
return [v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0]]
def trackball(p1x, p1y, p2x, p2y, r):
TRACKBALLSIZE = r
if p1x == p2x and p1y == p2y:
return [0.0, 0.0, 0.0, 1.0]
p1 = [p1x, p1y, project_to_sphere(TRACKBALLSIZE, p1x, p1y)]
p2 = [p2x, p2y, project_to_sphere(TRACKBALLSIZE, p2x, p2y)]
a = cross(p2, p1)
d = map(lambda x, y: x - y, p1, p2)
t = math.sqrt(sum(x * x for x in d)) / (2.0 * TRACKBALLSIZE)
if t > 1.0:
t = 1.0
if t < -1.0:
t = -1.0
phi = 2.0 * math.asin(t)
return axis_to_quat(a, phi)
def axis_to_quat(a, phi):
lena = math.sqrt(sum(x * x for x in a))
q = [x * (1 / lena) for x in a]
q = [x * math.sin(phi / 2.0) for x in q]
q.append(math.cos(phi / 2.0))
return q
def build_rotmatrix(q):
m = (GLdouble * 16)()
m[0] = 1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2])
m[1] = 2.0 * (q[0] * q[1] - q[2] * q[3])
m[2] = 2.0 * (q[2] * q[0] + q[1] * q[3])
m[3] = 0.0
m[4] = 2.0 * (q[0] * q[1] + q[2] * q[3])
m[5] = 1.0 - 2.0 * (q[2] * q[2] + q[0] * q[0])
m[6] = 2.0 * (q[1] * q[2] - q[0] * q[3])
m[7] = 0.0
m[8] = 2.0 * (q[2] * q[0] - q[1] * q[3])
m[9] = 2.0 * (q[1] * q[2] + q[0] * q[3])
m[10] = 1.0 - 2.0 * (q[1] * q[1] + q[0] * q[0])
m[11] = 0.0
m[12] = 0.0
m[13] = 0.0
m[14] = 0.0
m[15] = 1.0
return m
def project_to_sphere(r, x, y):
d = math.sqrt(x * x + y * y)
if (d < r * 0.70710678118654752440):
return math.sqrt(r * r - d * d)
else:
t = r / 1.41421356237309504880
return t * t / d
def mulquat(q1, rq):
return [q1[3] * rq[0] + q1[0] * rq[3] + q1[1] * rq[2] - q1[2] * rq[1],
q1[3] * rq[1] + q1[1] * rq[3] + q1[2] * rq[0] - q1[0] * rq[2],
q1[3] * rq[2] + q1[2] * rq[3] + q1[0] * rq[1] - q1[1] * rq[0],
q1[3] * rq[3] - q1[0] * rq[0] - q1[1] * rq[1] - q1[2] * rq[2]]
| 2,724 | Python | .py | 72 | 32.916667 | 74 | 0.524677 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,371 | panel.py | kliment_Printrun/printrun/gl/panel.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
from threading import Lock
import logging
import traceback
import numpy
import numpy.linalg
import wx
from wx import glcanvas
import pyglet
pyglet.options['debug_gl'] = True
from pyglet.gl import glEnable, glDisable, GL_LIGHTING, glLightfv, \
GL_LIGHT0, GL_LIGHT1, GL_LIGHT2, GL_POSITION, GL_DIFFUSE, \
GL_AMBIENT, GL_SPECULAR, GL_COLOR_MATERIAL, \
glShadeModel, GL_SMOOTH, GL_NORMALIZE, \
GL_BLEND, glBlendFunc, glClear, glClearColor, \
glClearDepth, GL_COLOR_BUFFER_BIT, GL_CULL_FACE, \
GL_DEPTH_BUFFER_BIT, glDepthFunc, GL_DEPTH_TEST, \
GLdouble, glGetDoublev, glGetIntegerv, GLint, \
GL_LEQUAL, glLoadIdentity, glMatrixMode, GL_MODELVIEW, \
GL_MODELVIEW_MATRIX, GL_ONE_MINUS_SRC_ALPHA, glOrtho, \
GL_PROJECTION, GL_PROJECTION_MATRIX, glScalef, \
GL_SRC_ALPHA, glTranslatef, gluPerspective, gluUnProject, \
glViewport, GL_VIEWPORT, glPushMatrix, glPopMatrix, \
glBegin, glVertex2f, glVertex3f, glEnd, GL_LINE_LOOP, glColor3f, \
GL_LINE_STIPPLE, glColor4f, glLineStipple
from pyglet import gl
from .trackball import trackball, mulquat, axis_to_quat
from .libtatlin.actors import vec
from pyglet.gl.glu import gluOrtho2D
# When Subclassing wx.Window in Windows the focus goes to the wx.Window
# instead of GLCanvas and it does not draw the focus rectangle and
# does not consume used keystrokes
# BASE_CLASS = wx.Window
# Subclassing Panel solves problem In Windows
BASE_CLASS = wx.Panel
# BASE_CLASS = wx.ScrolledWindow
# BASE_CLASS = glcanvas.GLCanvas
class wxGLPanel(BASE_CLASS):
'''A simple class for using OpenGL with wxPython.'''
orbit_control = True
orthographic = True
color_background = (0.98, 0.98, 0.78, 1)
do_lights = True
def __init__(self, parent, pos = wx.DefaultPosition,
size = wx.DefaultSize, style = 0,
antialias_samples = 0):
# Full repaint should not be a performance problem
#TODO: test on windows, tested in Ubuntu
style = style | wx.FULL_REPAINT_ON_RESIZE
self.GLinitialized = False
self.mview_initialized = False
attribList = [glcanvas.WX_GL_RGBA, # RGBA
glcanvas.WX_GL_DOUBLEBUFFER, # Double Buffered
glcanvas.WX_GL_DEPTH_SIZE, 24 # 24 bit
]
if antialias_samples > 0 and hasattr(glcanvas, "WX_GL_SAMPLE_BUFFERS"):
attribList += (glcanvas.WX_GL_SAMPLE_BUFFERS, 1,
glcanvas.WX_GL_SAMPLES, antialias_samples)
attribList.append(0)
if BASE_CLASS is glcanvas.GLCanvas:
super().__init__(parent, wx.ID_ANY, attribList, pos, size, style)
self.canvas = self
else:
super().__init__(parent, wx.ID_ANY, pos, size, style)
self.canvas = glcanvas.GLCanvas(self, wx.ID_ANY, attribList, pos, size, style)
self.width = self.height = None
self.context = glcanvas.GLContext(self.canvas)
self.rot_lock = Lock()
self.basequat = [0, 0, 0, 1]
self.zoom_factor = 1.0
self.angle_z = 0
self.angle_x = 0
self.gl_broken = False
# bind events
self.canvas.Bind(wx.EVT_SIZE, self.processSizeEvent)
if self.canvas is not self:
self.Bind(wx.EVT_SIZE, self.OnScrollSize)
# do not focus parent (panel like) but its canvas
self.SetCanFocus(False)
self.canvas.Bind(wx.EVT_ERASE_BACKGROUND, self.processEraseBackgroundEvent)
# In wxWidgets 3.0.x there is a clipping bug during resizing
# which could be affected by painting the container
# self.Bind(wx.EVT_PAINT, self.processPaintEvent)
# Upgrade to wxPython 4.1 recommended
self.canvas.Bind(wx.EVT_PAINT, self.processPaintEvent)
self.canvas.Bind(wx.EVT_SET_FOCUS, self.processFocus)
self.canvas.Bind(wx.EVT_KILL_FOCUS, self.processKillFocus)
def processFocus(self, ev):
# print('processFocus')
self.Refresh(False)
ev.Skip()
def processKillFocus(self, ev):
# print('processKillFocus')
self.Refresh(False)
ev.Skip()
# def processIdle(self, event):
# print('processIdle')
# event.Skip()
def Layout(self):
return super().Layout()
def Refresh(self, eraseback=True):
# print('Refresh')
return super().Refresh(eraseback)
def OnScrollSize(self, event):
self.canvas.SetSize(event.Size)
def processEraseBackgroundEvent(self, event):
'''Process the erase background event.'''
pass # Do nothing, to avoid flashing on MSWin
def processSizeEvent(self, event):
'''Process the resize event.'''
# print('processSizeEvent frozen', self.IsFrozen(), event.Size.x, self.ClientSize.x)
if not self.IsFrozen() and self.canvas.IsShownOnScreen():
# Make sure the frame is shown before calling SetCurrent.
self.canvas.SetCurrent(self.context)
self.OnReshape()
# self.Refresh(False)
# print('Refresh')
event.Skip()
def processPaintEvent(self, event):
'''Process the drawing event.'''
# print('wxGLPanel.processPaintEvent', self.ClientSize.Width)
self.canvas.SetCurrent(self.context)
if not self.gl_broken:
try:
self.OnInitGL()
self.DrawCanvas()
except pyglet.gl.lib.GLException:
self.gl_broken = True
logging.error(_("OpenGL failed, disabling it:")
+ "\n" + traceback.format_exc())
event.Skip()
def Destroy(self):
# clean up the pyglet OpenGL context
self.pygletcontext.destroy()
# call the super method
super().Destroy()
# ==========================================================================
# GLFrame OpenGL Event Handlers
# ==========================================================================
def OnInitGL(self, call_reshape = True):
'''Initialize OpenGL for use in the window.'''
if self.GLinitialized:
return
self.GLinitialized = True
# create a pyglet context for this panel
self.pygletcontext = gl.Context(gl.current_context)
self.pygletcontext.canvas = self
self.pygletcontext.set_current()
# normal gl init
glClearColor(*self.color_background)
glClearDepth(1.0) # set depth value to 1
glDepthFunc(GL_LEQUAL)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
if call_reshape:
self.OnReshape()
def OnReshape(self):
"""Reshape the OpenGL viewport based on the size of the window"""
size = self.GetClientSize() * self.GetContentScaleFactor()
oldwidth, oldheight = self.width, self.height
width, height = size.width, size.height
if width < 1 or height < 1:
return
self.width = max(float(width), 1.0)
self.height = max(float(height), 1.0)
self.OnInitGL(call_reshape = False)
# print('glViewport', width)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if self.orthographic:
glOrtho(-width / 2, width / 2, -height / 2, height / 2,
-5 * self.dist, 5 * self.dist)
else:
gluPerspective(60., float(width) / height, 10.0, 3 * self.dist)
glTranslatef(0, 0, -self.dist) # Move back
glMatrixMode(GL_MODELVIEW)
if not self.mview_initialized:
self.reset_mview(0.9)
self.mview_initialized = True
elif oldwidth is not None and oldheight is not None:
wratio = self.width / oldwidth
hratio = self.height / oldheight
factor = min(wratio * self.zoomed_width, hratio * self.zoomed_height)
x, y, _ = self.mouse_to_3d(self.width / 2, self.height / 2)
self.zoom(factor, (x, y))
self.zoomed_width *= wratio / factor
self.zoomed_height *= hratio / factor
# Wrap text to the width of the window
if self.GLinitialized:
self.pygletcontext.set_current()
self.update_object_resize()
def setup_lights(self):
if not self.do_lights:
return
glEnable(GL_LIGHTING)
glDisable(GL_LIGHT0)
glLightfv(GL_LIGHT0, GL_AMBIENT, vec(0.4, 0.4, 0.4, 1.0))
glLightfv(GL_LIGHT0, GL_SPECULAR, vec(0, 0, 0, 0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(0, 0, 0, 0))
glEnable(GL_LIGHT1)
glLightfv(GL_LIGHT1, GL_AMBIENT, vec(0, 0, 0, 1.0))
glLightfv(GL_LIGHT1, GL_SPECULAR, vec(0.6, 0.6, 0.6, 1.0))
glLightfv(GL_LIGHT2, GL_DIFFUSE, vec(0.8, 0.8, 0.8, 1))
glLightfv(GL_LIGHT1, GL_POSITION, vec(1, 2, 3, 0))
glEnable(GL_LIGHT2)
glLightfv(GL_LIGHT2, GL_AMBIENT, vec(0, 0, 0, 1.0))
glLightfv(GL_LIGHT2, GL_SPECULAR, vec(0.6, 0.6, 0.6, 1.0))
glLightfv(GL_LIGHT2, GL_DIFFUSE, vec(0.8, 0.8, 0.8, 1))
glLightfv(GL_LIGHT2, GL_POSITION, vec(-1, -1, 3, 0))
glEnable(GL_NORMALIZE)
glShadeModel(GL_SMOOTH)
def reset_mview(self, factor):
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
self.setup_lights()
wratio = self.width / self.dist
hratio = self.height / self.dist
minratio = float(min(wratio, hratio))
self.zoom_factor = 1.0
self.zoomed_width = wratio / minratio
self.zoomed_height = hratio / minratio
glScalef(factor * minratio, factor * minratio, 1)
def DrawCanvas(self):
"""Draw the window."""
#import time
#start = time.perf_counter()
# print('DrawCanvas', self.canvas.GetClientRect())
self.pygletcontext.set_current()
glClearColor(*self.color_background)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.draw_objects()
if self.canvas.HasFocus():
self.drawFocus()
self.canvas.SwapBuffers()
#print('Draw took', '%.2f'%(time.perf_counter()-start))
def drawFocus(self):
glColor4f(0, 0, 0, 0.4)
glPushMatrix()
glLoadIdentity()
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluOrtho2D(0, self.width, 0, self.height)
glLineStipple(1, 0xf0f0)
glEnable(GL_LINE_STIPPLE)
glBegin(GL_LINE_LOOP)
glVertex2f(1, 0)
glVertex2f(self.width, 0)
glVertex2f(self.width, self.height-1)
glVertex2f(1, self.height-1)
glEnd()
glDisable(GL_LINE_STIPPLE)
glPopMatrix() # restore PROJECTION
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
# ==========================================================================
# To be implemented by a sub class
# ==========================================================================
def create_objects(self):
'''create opengl objects when opengl is initialized'''
pass
def update_object_resize(self):
'''called when the window receives only if opengl is initialized'''
pass
def draw_objects(self):
'''called in the middle of ondraw after the buffer has been cleared'''
pass
# ==========================================================================
# Utils
# ==========================================================================
def get_modelview_mat(self, local_transform):
mvmat = (GLdouble * 16)()
glGetDoublev(GL_MODELVIEW_MATRIX, mvmat)
return mvmat
def mouse_to_3d(self, x, y, z = 1.0, local_transform = False):
x = float(x)
y = self.height - float(y)
# The following could work if we were not initially scaling to zoom on
# the bed
# if self.orthographic:
# return (x - self.width / 2, y - self.height / 2, 0)
pmat = (GLdouble * 16)()
mvmat = self.get_modelview_mat(local_transform)
viewport = (GLint * 4)()
px = (GLdouble)()
py = (GLdouble)()
pz = (GLdouble)()
glGetIntegerv(GL_VIEWPORT, viewport)
glGetDoublev(GL_PROJECTION_MATRIX, pmat)
glGetDoublev(GL_MODELVIEW_MATRIX, mvmat)
gluUnProject(x, y, z, mvmat, pmat, viewport, px, py, pz)
return (px.value, py.value, pz.value)
def mouse_to_ray(self, x, y, local_transform = False):
x = float(x)
y = self.height - float(y)
pmat = (GLdouble * 16)()
mvmat = (GLdouble * 16)()
viewport = (GLint * 4)()
px = (GLdouble)()
py = (GLdouble)()
pz = (GLdouble)()
glGetIntegerv(GL_VIEWPORT, viewport)
glGetDoublev(GL_PROJECTION_MATRIX, pmat)
mvmat = self.get_modelview_mat(local_transform)
gluUnProject(x, y, 1, mvmat, pmat, viewport, px, py, pz)
ray_far = (px.value, py.value, pz.value)
gluUnProject(x, y, 0., mvmat, pmat, viewport, px, py, pz)
ray_near = (px.value, py.value, pz.value)
return ray_near, ray_far
def mouse_to_plane(self, x, y, plane_normal, plane_offset, local_transform = False):
# Ray/plane intersection
ray_near, ray_far = self.mouse_to_ray(x, y, local_transform)
ray_near = numpy.array(ray_near)
ray_far = numpy.array(ray_far)
ray_dir = ray_far - ray_near
ray_dir = ray_dir / numpy.linalg.norm(ray_dir)
plane_normal = numpy.array(plane_normal)
q = ray_dir.dot(plane_normal)
if q == 0:
return None
t = - (ray_near.dot(plane_normal) + plane_offset) / q
if t < 0:
return None
return ray_near + t * ray_dir
def zoom(self, factor, to = None):
glMatrixMode(GL_MODELVIEW)
if to:
delta_x = to[0]
delta_y = to[1]
glTranslatef(delta_x, delta_y, 0)
glScalef(factor, factor, 1)
self.zoom_factor *= factor
if to:
glTranslatef(-delta_x, -delta_y, 0)
# For wxPython (<4.1) and GTK:
# when you resize (enlarge) 3d view fast towards the log pane
# sash garbage may remain in GLCanvas
# The following refresh clears it at the cost of
# doubled frame draws.
# wx.CallAfter(self.Refresh)
self.Refresh(False)
def zoom_to_center(self, factor):
self.canvas.SetCurrent(self.context)
x, y, _ = self.mouse_to_3d(self.width / 2, self.height / 2)
self.zoom(factor, (x, y))
def orbit(self, p1x, p1y, p2x, p2y):
rz = p2x-p1x
self.angle_z-=rz
rotz = axis_to_quat([0.0,0.0,1.0],self.angle_z)
rx = p2y-p1y
self.angle_x+=rx
rota = axis_to_quat([1.0,0.0,0.0],self.angle_x)
return mulquat(rotz,rota)
def handle_rotation(self, event):
content_scale_factor = self.GetContentScaleFactor()
if self.initpos is None:
self.initpos = event.GetPosition() * content_scale_factor
else:
p1 = self.initpos
p2 = event.GetPosition() * content_scale_factor
sz = self.GetClientSize() * content_scale_factor
p1x = p1[0] / (sz[0] / 2) - 1
p1y = 1 - p1[1] / (sz[1] / 2)
p2x = p2[0] / (sz[0] / 2) - 1
p2y = 1 - p2[1] / (sz[1] / 2)
quat = trackball(p1x, p1y, p2x, p2y, self.dist / 250.0)
with self.rot_lock:
if self.orbit_control:
self.basequat = self.orbit(p1x, p1y, p2x, p2y)
else:
self.basequat = mulquat(self.basequat, quat)
self.initpos = p2
def handle_translation(self, event):
content_scale_factor = self.GetContentScaleFactor()
if self.initpos is None:
self.initpos = event.GetPosition() * content_scale_factor
else:
p1 = self.initpos
p2 = event.GetPosition() * content_scale_factor
if self.orthographic:
x1, y1, _ = self.mouse_to_3d(p1[0], p1[1])
x2, y2, _ = self.mouse_to_3d(p2[0], p2[1])
glTranslatef(x2 - x1, y2 - y1, 0)
else:
glTranslatef(p2[0] - p1[0], -(p2[1] - p1[1]), 0)
self.initpos = p2
| 17,232 | Python | .py | 403 | 33.826303 | 92 | 0.594013 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,372 | actors.py | kliment_Printrun/printrun/gl/libtatlin/actors.py | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Guillaume Seguin
# Copyright (C) 2011 Denis Kobozev
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import time
import numpy
import array
import math
import logging
import threading
from ctypes import sizeof
from pyglet.gl import glPushMatrix, glPopMatrix, glTranslatef, \
glGenLists, glNewList, GL_COMPILE, glEndList, glCallList, \
GL_ELEMENT_ARRAY_BUFFER, GL_UNSIGNED_INT, GL_TRIANGLES, GL_LINE_LOOP, \
GL_ARRAY_BUFFER, GL_STATIC_DRAW, glColor4f, glVertex3f, \
glBegin, glEnd, GL_LINES, glEnable, glDisable, glGetFloatv, \
GL_LINE_SMOOTH, glLineWidth, GL_LINE_WIDTH, GLfloat, GL_FLOAT, GLuint, \
glVertexPointer, glColorPointer, glDrawArrays, glDrawRangeElements, \
glEnableClientState, glDisableClientState, GL_VERTEX_ARRAY, GL_COLOR_ARRAY, \
GL_FRONT_AND_BACK, GL_FRONT, glMaterialfv, GL_SPECULAR, GL_EMISSION, \
glColorMaterial, GL_AMBIENT_AND_DIFFUSE, glMaterialf, GL_SHININESS, \
GL_NORMAL_ARRAY, glNormalPointer, GL_LIGHTING, glColor3f
from pyglet.graphics.vertexbuffer import create_buffer, VertexBufferObject
from printrun.utils import install_locale
install_locale('pronterface')
def vec(*args):
return (GLfloat * len(args))(*args)
def compile_display_list(func, *options):
display_list = glGenLists(1)
glNewList(display_list, GL_COMPILE)
func(*options)
glEndList()
return display_list
def numpy2vbo(nparray, target = GL_ARRAY_BUFFER, usage = GL_STATIC_DRAW, use_vbos = True):
vbo = create_buffer(nparray.nbytes, target = target, usage = usage, vbo = use_vbos)
vbo.bind()
vbo.set_data(nparray.ctypes.data)
return vbo
def triangulate_rectangle(i1, i2, i3, i4):
return [i1, i4, i3, i3, i2, i1]
def triangulate_box(i1, i2, i3, i4,
j1, j2, j3, j4):
return [i1, i2, j2, j2, j1, i1, i2, i3, j3, j3, j2, i2,
i3, i4, j4, j4, j3, i3, i4, i1, j1, j1, j4, i4]
class BoundingBox:
"""
A rectangular box (cuboid) enclosing a 3D model, defined by lower and upper corners.
"""
def __init__(self, upper_corner, lower_corner):
self.upper_corner = upper_corner
self.lower_corner = lower_corner
@property
def width(self):
width = abs(self.upper_corner[0] - self.lower_corner[0])
return round(width, 2)
@property
def depth(self):
depth = abs(self.upper_corner[1] - self.lower_corner[1])
return round(depth, 2)
@property
def height(self):
height = abs(self.upper_corner[2] - self.lower_corner[2])
return round(height, 2)
class Platform:
"""
Platform on which models are placed.
"""
def __init__(self, build_dimensions, light = False, circular = False, grid = (1, 10)):
self.light = light
self.circular = circular
self.width = build_dimensions[0]
self.depth = build_dimensions[1]
self.height = build_dimensions[2]
self.xoffset = build_dimensions[3]
self.yoffset = build_dimensions[4]
self.zoffset = build_dimensions[5]
self.grid = grid
self.color_grads_minor = (0xaf / 255, 0xdf / 255, 0x5f / 255, 0.1)
self.color_grads_interm = (0xaf / 255, 0xdf / 255, 0x5f / 255, 0.2)
self.color_grads_major = (0xaf / 255, 0xdf / 255, 0x5f / 255, 0.33)
self.initialized = False
self.loaded = True
def init(self):
self.display_list = compile_display_list(self.draw)
self.initialized = True
def draw(self):
glPushMatrix()
glTranslatef(self.xoffset, self.yoffset, self.zoffset)
def color(i):
if i % self.grid[1] == 0:
glColor4f(*self.color_grads_major)
elif i % (self.grid[1] // 2) == 0:
glColor4f(*self.color_grads_interm)
else:
if self.light: return False
glColor4f(*self.color_grads_minor)
return True
# draw the grid
glBegin(GL_LINES)
if self.circular: # Draw a circular grid
for i in numpy.arange(0, int(math.ceil(self.width + 1)), self.grid[0]):
angle = math.asin(2 * float(i) / self.width - 1)
x = (math.cos(angle) + 1) * self.depth / 2
if color(i):
glVertex3f(float(i), self.depth - x, 0.0)
glVertex3f(float(i), x, 0.0)
for i in numpy.arange(0, int(math.ceil(self.depth + 1)), self.grid[0]):
angle = math.acos(2 * float(i) / self.depth - 1)
x = (math.sin(angle) + 1) * self.width / 2
if color(i):
glVertex3f(self.width - x, float(i), 0.0)
glVertex3f(x, float(i), 0.0)
else: # Draw a rectangular grid
for i in numpy.arange(0, int(math.ceil(self.width + 1)), self.grid[0]):
if color(i):
glVertex3f(float(i), 0.0, 0.0)
glVertex3f(float(i), self.depth, 0.0)
for i in numpy.arange(0, int(math.ceil(self.depth + 1)), self.grid[0]):
if color(i):
glVertex3f(0, float(i), 0.0)
glVertex3f(self.width, float(i), 0.0)
glEnd()
if self.circular:
glBegin(GL_LINE_LOOP)
for i in range(0, 360):
angle = math.radians(i)
glVertex3f((math.cos(angle) + 1) * self.width / 2,
(math.sin(angle) + 1) * self.depth / 2, 0.0)
glEnd()
glPopMatrix()
def display(self, mode_2d=False):
# FIXME: using the list sometimes results in graphical corruptions
# glCallList(self.display_list)
self.draw()
class PrintHead:
def __init__(self):
self.color = (43. / 255, 0., 175. / 255, 1.0)
self.scale = 5
self.height = 5
self.initialized = False
self.loaded = True
def init(self):
self.display_list = compile_display_list(self.draw)
self.initialized = True
def draw(self):
glPushMatrix()
glBegin(GL_LINES)
glColor4f(*self.color)
for di in [-1, 1]:
for dj in [-1, 1]:
glVertex3f(0, 0, 0)
glVertex3f(self.scale * di, self.scale * dj, self.height)
glEnd()
glPopMatrix()
def display(self, mode_2d=False):
glEnable(GL_LINE_SMOOTH)
orig_linewidth = (GLfloat)()
glGetFloatv(GL_LINE_WIDTH, orig_linewidth)
glLineWidth(3.0)
glCallList(self.display_list)
glLineWidth(orig_linewidth)
glDisable(GL_LINE_SMOOTH)
class Model:
"""
Parent class for models that provides common functionality.
"""
AXIS_X = (1, 0, 0)
AXIS_Y = (0, 1, 0)
AXIS_Z = (0, 0, 1)
letter_axis_map = {
'x': AXIS_X,
'y': AXIS_Y,
'z': AXIS_Z,
}
axis_letter_map = dict([(v, k) for k, v in letter_axis_map.items()])
lock = None
def __init__(self, offset_x=0, offset_y=0):
self.offset_x = offset_x
self.offset_y = offset_y
self.lock = threading.Lock()
self.init_model_attributes()
def init_model_attributes(self):
"""
Set/reset saved properties.
"""
self.invalidate_bounding_box()
self.modified = False
def invalidate_bounding_box(self):
self._bounding_box = None
@property
def bounding_box(self):
"""
Get a bounding box for the model.
"""
if self._bounding_box is None:
self._bounding_box = self._calculate_bounding_box()
return self._bounding_box
def _calculate_bounding_box(self):
"""
Calculate an axis-aligned box enclosing the model.
"""
# swap rows and columns in our vertex arrays so that we can do max and
# min on axis 1
xyz_rows = self.vertices.reshape(-1, order='F').reshape(3, -1)
lower_corner = xyz_rows.min(1)
upper_corner = xyz_rows.max(1)
box = BoundingBox(upper_corner, lower_corner)
return box
@property
def width(self):
return self.bounding_box.width
@property
def depth(self):
return self.bounding_box.depth
@property
def height(self):
return self.bounding_box.height
def movement_color(self, move):
"""
Return the color to use for particular type of movement.
"""
if move.extruding:
if move.current_tool == 0:
return self.color_tool0
elif move.current_tool == 1:
return self.color_tool1
elif move.current_tool == 2:
return self.color_tool2
elif move.current_tool == 3:
return self.color_tool3
else:
return self.color_tool4
return self.color_travel
def movement_angle(src, dst, precision=0):
x = dst[0] - src[0]
y = dst[1] - src[1]
angle = math.degrees(math.atan2(y, -x)) # negate x for clockwise rotation angle
return round(angle, precision)
def get_next_move(gcode, layer_idx, gline_idx):
gline_idx += 1
while layer_idx < len(gcode.all_layers):
layer = gcode.all_layers[layer_idx]
while gline_idx < len(layer):
gline = layer[gline_idx]
if gline.is_move:
return gline
gline_idx += 1
layer_idx += 1
gline_idx = 0
return None
def interpolate_arcs(gline, prev_gline):
if gline.command == "G2" or gline.command == "G3":
rx = gline.i if gline.i is not None else 0
ry = gline.j if gline.j is not None else 0
r = math.sqrt(rx*rx + ry*ry)
cx = prev_gline.current_x + rx
cy = prev_gline.current_y + ry
a_start = math.atan2(-ry, -rx)
dx = gline.current_x - cx
dy = gline.current_y - cy
a_end = math.atan2(dy, dx)
a_delta = a_end - a_start
if gline.command == "G3" and a_delta <= 0:
a_delta += math.pi * 2
elif gline.command == "G2" and a_delta >= 0:
a_delta -= math.pi * 2
z0 = prev_gline.current_z
dz = gline.current_z - z0
# max segment size: 0.5mm, max num of segments: 100
segments = math.ceil(abs(a_delta) * r * 2 / 0.5)
if segments > 100:
segments = 100
for t in range(segments):
a = t / segments * a_delta + a_start
mid = ((
cx + math.cos(a) * r,
cy + math.sin(a) * r,
z0 + t / segments * dz
), True)
yield mid
yield ((gline.current_x, gline.current_y, gline.current_z), False) # last segment of this line
class GcodeModel(Model):
"""
Model for displaying Gcode data.
"""
color_travel = (0.6, 0.6, 0.6, 0.6)
color_tool0 = (1.0, 0.0, 0.0, 1.0)
color_tool1 = (0.67, 0.05, 0.9, 1.0)
color_tool2 = (1.0, 0.8, 0., 1.0)
color_tool3 = (1.0, 0., 0.62, 1.0)
color_tool4 = (0., 1.0, 0.58, 1.0)
color_printed = (0.2, 0.75, 0, 1.0)
color_current = (0, 0.9, 1.0, 1.0)
color_current_printed = (0.1, 0.4, 0, 1.0)
display_travels = True
buffers_created = False
use_vbos = True
loaded = False
fully_loaded = False
gcode = None
path_halfwidth = 0.2
path_halfheight = 0.2
def set_path_size(self, path_halfwidth, path_halfheight):
with self.lock:
self.path_halfwidth = path_halfwidth
self.path_halfheight = path_halfheight
def load_data(self, model_data, callback=None):
t_start = time.time()
self.gcode = model_data
self.count_travel_indices = count_travel_indices = [0]
self.count_print_indices = count_print_indices = [0]
self.count_print_vertices = count_print_vertices = [0]
# Some trivial computations, but that's mostly for documentation :)
# Not like 10 multiplications are going to cost much time vs what's
# about to happen :)
# Max number of values which can be generated per gline
# to store coordinates/colors/normals.
# Nicely enough we have 3 per kind of thing for all kinds.
coordspervertex = 3
buffered_color_len = 3 # 4th color component (alpha) is ignored
verticesperline = 8
coordsperline = coordspervertex * verticesperline
coords_count = lambda nlines: nlines * coordsperline
travelverticesperline = 2
travelcoordsperline = coordspervertex * travelverticesperline
travel_coords_count = lambda nlines: nlines * travelcoordsperline
trianglesperface = 2
facesperbox = 4
trianglesperbox = trianglesperface * facesperbox
verticespertriangle = 3
indicesperbox = verticespertriangle * trianglesperbox
boxperline = 2
indicesperline = indicesperbox * boxperline
indices_count = lambda nlines: nlines * indicesperline
nlines = len(model_data)
ntravelcoords = travel_coords_count(nlines)
ncoords = coords_count(nlines)
nindices = indices_count(nlines)
travel_vertices = self.travels = numpy.zeros(ntravelcoords, dtype = GLfloat)
travel_vertex_k = 0
vertices = self.vertices = numpy.zeros(ncoords, dtype = GLfloat)
vertex_k = 0
colors = self.colors = numpy.zeros(ncoords, dtype = GLfloat)
color_k = 0
normals = self.normals = numpy.zeros(ncoords, dtype = GLfloat)
indices = self.indices = numpy.zeros(nindices, dtype = GLuint)
index_k = 0
self.layer_idxs_map = {}
self.layer_stops = [0]
prev_move_normal_x = None
prev_move_normal_y = None
prev_move_angle = None
prev_pos = (0, 0, 0)
prev_gline = None
prev_extruding = False
layer_idx = 0
self.printed_until = 0
self.only_current = False
twopi = 2 * math.pi
processed_lines = 0
while layer_idx < len(model_data.all_layers):
with self.lock:
nlines = len(model_data)
remaining_lines = nlines - processed_lines
# Only reallocate memory which might be needed, not memory
# for everything
ntravelcoords = coords_count(remaining_lines) + travel_vertex_k
ncoords = coords_count(remaining_lines) + vertex_k
nindices = indices_count(remaining_lines) + index_k
if ncoords > vertices.size:
self.travels.resize(ntravelcoords, refcheck = False)
self.vertices.resize(ncoords, refcheck = False)
self.colors.resize(ncoords, refcheck = False)
self.normals.resize(ncoords, refcheck = False)
self.indices.resize(nindices, refcheck = False)
layer = model_data.all_layers[layer_idx]
has_movement = False
for gline_idx, gline in enumerate(layer):
if not gline.is_move:
continue
if gline.x is None and gline.y is None and gline.z is None and gline.j is None and gline.i is None:
continue
has_movement = True
for (current_pos, interpolated) in interpolate_arcs(gline, prev_gline):
if not gline.extruding:
if self.travels.size < (travel_vertex_k + 100 * 6):
# arc interpolation extra points allocation
# if not enough room for another 100 points now,
# allocate enough and 50% extra to minimize separate allocations
ratio = (travel_vertex_k + 100 * 6) / self.travels.size * 1.5
# print(f"gl realloc travel {self.travels.size} -> {int(self.travels.size * ratio)}")
self.travels.resize(int(self.travels.size * ratio), refcheck = False)
travel_vertices[travel_vertex_k:travel_vertex_k+3] = prev_pos
travel_vertices[travel_vertex_k + 3:travel_vertex_k + 6] = current_pos
travel_vertex_k += 6
else:
delta_x = current_pos[0] - prev_pos[0]
delta_y = current_pos[1] - prev_pos[1]
norm = delta_x * delta_x + delta_y * delta_y
if norm == 0: # Don't draw anything if this move is Z+E only
continue
norm = math.sqrt(norm)
move_normal_x = - delta_y / norm
move_normal_y = delta_x / norm
move_angle = math.atan2(delta_y, delta_x)
# FIXME: compute these dynamically
path_halfwidth = self.path_halfwidth * 1.2
path_halfheight = self.path_halfheight * 1.2
new_indices = []
new_vertices = []
new_normals = []
if prev_gline and prev_gline.extruding or prev_extruding:
# Store previous vertices indices
prev_id = vertex_k // 3 - 4
avg_move_normal_x = (prev_move_normal_x + move_normal_x) / 2
avg_move_normal_y = (prev_move_normal_y + move_normal_y) / 2
norm = avg_move_normal_x * avg_move_normal_x + avg_move_normal_y * avg_move_normal_y
if norm == 0:
avg_move_normal_x = move_normal_x
avg_move_normal_y = move_normal_y
else:
norm = math.sqrt(norm)
avg_move_normal_x /= norm
avg_move_normal_y /= norm
delta_angle = move_angle - prev_move_angle
delta_angle = (delta_angle + twopi) % twopi
fact = abs(math.cos(delta_angle / 2))
# If move is turning too much, avoid creating a big peak
# by adding an intermediate box
if fact < 0.5:
# FIXME: It looks like there's some heavy code duplication here...
hw = path_halfwidth
p1x = prev_pos[0] - hw * prev_move_normal_x
p2x = prev_pos[0] + hw * prev_move_normal_x
p1y = prev_pos[1] - hw * prev_move_normal_y
p2y = prev_pos[1] + hw * prev_move_normal_y
new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
new_vertices.extend((p1x, p1y, prev_pos[2]))
new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
new_vertices.extend((p2x, p2y, prev_pos[2]))
new_normals.extend((0, 0, 1))
new_normals.extend((-prev_move_normal_x, -prev_move_normal_y, 0))
new_normals.extend((0, 0, -1))
new_normals.extend((prev_move_normal_x, prev_move_normal_y, 0))
first = vertex_k // 3
# Link to previous
new_indices += triangulate_box(prev_id, prev_id + 1,
prev_id + 2, prev_id + 3,
first, first + 1,
first + 2, first + 3)
p1x = prev_pos[0] - hw * move_normal_x
p2x = prev_pos[0] + hw * move_normal_x
p1y = prev_pos[1] - hw * move_normal_y
p2y = prev_pos[1] + hw * move_normal_y
new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
new_vertices.extend((p1x, p1y, prev_pos[2]))
new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
new_vertices.extend((p2x, p2y, prev_pos[2]))
new_normals.extend((0, 0, 1))
new_normals.extend((-move_normal_x, -move_normal_y, 0))
new_normals.extend((0, 0, -1))
new_normals.extend((move_normal_x, move_normal_y, 0))
prev_id += 4
first += 4
# Link to previous
new_indices += triangulate_box(prev_id, prev_id + 1,
prev_id + 2, prev_id + 3,
first, first + 1,
first + 2, first + 3)
else:
hw = path_halfwidth / fact
# Compute vertices
p1x = prev_pos[0] - hw * avg_move_normal_x
p2x = prev_pos[0] + hw * avg_move_normal_x
p1y = prev_pos[1] - hw * avg_move_normal_y
p2y = prev_pos[1] + hw * avg_move_normal_y
new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
new_vertices.extend((p1x, p1y, prev_pos[2]))
new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
new_vertices.extend((p2x, p2y, prev_pos[2]))
new_normals.extend((0, 0, 1))
new_normals.extend((-avg_move_normal_x, -avg_move_normal_y, 0))
new_normals.extend((0, 0, -1))
new_normals.extend((avg_move_normal_x, avg_move_normal_y, 0))
first = vertex_k // 3
# Link to previous
new_indices += triangulate_box(prev_id, prev_id + 1,
prev_id + 2, prev_id + 3,
first, first + 1,
first + 2, first + 3)
else:
# Compute vertices normal to the current move and cap it
p1x = prev_pos[0] - path_halfwidth * move_normal_x
p2x = prev_pos[0] + path_halfwidth * move_normal_x
p1y = prev_pos[1] - path_halfwidth * move_normal_y
p2y = prev_pos[1] + path_halfwidth * move_normal_y
new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
new_vertices.extend((p1x, p1y, prev_pos[2]))
new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
new_vertices.extend((p2x, p2y, prev_pos[2]))
new_normals.extend((0, 0, 1))
new_normals.extend((-move_normal_x, -move_normal_y, 0))
new_normals.extend((0, 0, -1))
new_normals.extend((move_normal_x, move_normal_y, 0))
first = vertex_k // 3
new_indices = triangulate_rectangle(first, first + 1,
first + 2, first + 3)
next_move = get_next_move(model_data, layer_idx, gline_idx)
next_is_extruding = interpolated or next_move and next_move.extruding
if not next_is_extruding:
# Compute caps and link everything
p1x = current_pos[0] - path_halfwidth * move_normal_x
p2x = current_pos[0] + path_halfwidth * move_normal_x
p1y = current_pos[1] - path_halfwidth * move_normal_y
p2y = current_pos[1] + path_halfwidth * move_normal_y
new_vertices.extend((current_pos[0], current_pos[1], current_pos[2] + path_halfheight))
new_vertices.extend((p1x, p1y, current_pos[2]))
new_vertices.extend((current_pos[0], current_pos[1], current_pos[2] - path_halfheight))
new_vertices.extend((p2x, p2y, current_pos[2]))
new_normals.extend((0, 0, 1))
new_normals.extend((-move_normal_x, -move_normal_y, 0))
new_normals.extend((0, 0, -1))
new_normals.extend((move_normal_x, move_normal_y, 0))
end_first = vertex_k // 3 + len(new_vertices) // 3 - 4
new_indices += triangulate_rectangle(end_first + 3, end_first + 2,
end_first + 1, end_first)
new_indices += triangulate_box(first, first + 1,
first + 2, first + 3,
end_first, end_first + 1,
end_first + 2, end_first + 3)
if self.indices.size < (index_k + len(new_indices) + 100 * indicesperline):
# arc interpolation extra points allocation
ratio = (index_k + len(new_indices) + 100 * indicesperline) / self.indices.size * 1.5
# print(f"gl realloc print {self.vertices.size} -> {int(self.vertices.size * ratio)}")
self.vertices.resize(int(self.vertices.size * ratio), refcheck = False)
self.colors.resize(int(self.colors.size * ratio), refcheck = False)
self.normals.resize(int(self.normals.size * ratio), refcheck = False)
self.indices.resize(int(self.indices.size * ratio), refcheck = False)
for new_i, item in enumerate(new_indices):
indices[index_k + new_i] = item
index_k += len(new_indices)
new_vertices_len = len(new_vertices)
vertices[vertex_k:vertex_k+new_vertices_len] = new_vertices
normals[vertex_k:vertex_k+new_vertices_len] = new_normals
vertex_k += new_vertices_len
new_vertices_count = new_vertices_len//coordspervertex
# settings support alpha (transparency), but it is ignored here
gline_color = self.movement_color(gline)[:buffered_color_len]
for vi in range(new_vertices_count):
colors[color_k:color_k+buffered_color_len] = gline_color
color_k += buffered_color_len
prev_move_normal_x = move_normal_x
prev_move_normal_y = move_normal_y
prev_move_angle = move_angle
prev_pos = current_pos
prev_extruding = gline.extruding
prev_gline = gline
prev_extruding = gline.extruding
count_travel_indices.append(travel_vertex_k // 3)
count_print_indices.append(index_k)
count_print_vertices.append(vertex_k // 3)
gline.gcview_end_vertex = len(count_print_indices) - 1
if has_movement:
self.layer_stops.append(len(count_print_indices) - 1)
self.layer_idxs_map[layer_idx] = len(self.layer_stops) - 1
self.max_layers = len(self.layer_stops) - 1
self.num_layers_to_draw = self.max_layers + 1
self.initialized = False
self.loaded = True
processed_lines += len(layer)
if callback:
callback(layer_idx + 1)
yield layer_idx
layer_idx += 1
with self.lock:
self.dims = ((model_data.xmin, model_data.xmax, model_data.width),
(model_data.ymin, model_data.ymax, model_data.depth),
(model_data.zmin, model_data.zmax, model_data.height))
self.travels.resize(travel_vertex_k, refcheck = False)
self.vertices.resize(vertex_k, refcheck = False)
self.colors.resize(color_k, refcheck = False)
self.normals.resize(vertex_k, refcheck = False)
self.indices.resize(index_k, refcheck = False)
self.layer_stops = array.array('L', self.layer_stops)
self.count_travel_indices = array.array('L', count_travel_indices)
self.count_print_indices = array.array('L', count_print_indices)
self.count_print_vertices = array.array('L', count_print_vertices)
self.max_layers = len(self.layer_stops) - 1
self.num_layers_to_draw = self.max_layers + 1
self.loaded = True
self.initialized = False
self.loaded = True
self.fully_loaded = True
t_end = time.time()
logging.debug(_('Initialized 3D visualization in %.2f seconds') % (t_end - t_start))
logging.debug(_('Vertex count: %d') % ((len(self.vertices) + len(self.travels)) // 3))
yield None
def copy(self):
copy = GcodeModel()
for var in ["vertices", "colors", "travels", "indices", "normals",
"max_layers", "num_layers_to_draw", "printed_until",
"layer_stops", "dims", "only_current",
"layer_idxs_map", "count_travel_indices",
"count_print_indices", "count_print_vertices",
"path_halfwidth", "path_halfheight",
"gcode"]:
setattr(copy, var, getattr(self, var))
copy.loaded = True
copy.fully_loaded = True
copy.initialized = False
return copy
def update_colors(self):
"""Rebuild gl color buffer without loading. Used after color settings edit"""
ncoords = self.count_print_vertices[-1]
colors = numpy.empty(ncoords*3, dtype = GLfloat)
cur_vertex = 0
gline_i = 1
for gline in self.gcode.lines:
if gline.gcview_end_vertex:
gline_color = self.movement_color(gline)[:3]
last_vertex = self.count_print_vertices[gline_i]
gline_i += 1
while cur_vertex < last_vertex:
colors[cur_vertex*3:cur_vertex*3+3] = gline_color
cur_vertex += 1
if self.vertex_color_buffer:
self.vertex_color_buffer.delete()
self.vertex_color_buffer = numpy2vbo(colors, use_vbos = self.use_vbos)
# ------------------------------------------------------------------------
# DRAWING
# ------------------------------------------------------------------------
def init(self):
with self.lock:
self.layers_loaded = self.max_layers
self.initialized = True
if self.buffers_created:
self.travel_buffer.delete()
self.index_buffer.delete()
self.vertex_buffer.delete()
self.vertex_color_buffer.delete()
self.vertex_normal_buffer.delete()
self.travel_buffer = numpy2vbo(self.travels, use_vbos = self.use_vbos)
self.index_buffer = numpy2vbo(self.indices, use_vbos = self.use_vbos,
target = GL_ELEMENT_ARRAY_BUFFER)
self.vertex_buffer = numpy2vbo(self.vertices, use_vbos = self.use_vbos)
self.vertex_color_buffer = numpy2vbo(self.colors, use_vbos = self.use_vbos)
self.vertex_normal_buffer = numpy2vbo(self.normals, use_vbos = self.use_vbos)
if self.fully_loaded:
# Delete numpy arrays after creating VBOs after full load
self.travels = None
self.indices = None
self.vertices = None
self.colors = None
self.normals = None
self.buffers_created = True
def display(self, mode_2d=False):
with self.lock:
glPushMatrix()
glTranslatef(self.offset_x, self.offset_y, 0)
glEnableClientState(GL_VERTEX_ARRAY)
has_vbo = isinstance(self.vertex_buffer, VertexBufferObject)
if self.display_travels:
self._display_travels(has_vbo)
glEnable(GL_LIGHTING)
glEnableClientState(GL_NORMAL_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
glMaterialfv(GL_FRONT, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_EMISSION, vec(0, 0, 0, 0))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
self._display_movements(has_vbo)
glDisable(GL_LIGHTING)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_NORMAL_ARRAY)
glPopMatrix()
def _display_travels(self, has_vbo):
self.travel_buffer.bind()
glVertexPointer(3, GL_FLOAT, 0, self.travel_buffer.ptr)
# Prevent race condition by using the number of currently loaded layers
max_layers = self.layers_loaded
# TODO: show current layer travels in a different color
end = self.layer_stops[min(self.num_layers_to_draw, max_layers)]
end_index = self.count_travel_indices[end]
glColor4f(*self.color_travel)
if self.only_current:
if self.num_layers_to_draw < max_layers:
end_prev_layer = self.layer_stops[self.num_layers_to_draw - 1]
start_index = self.count_travel_indices[end_prev_layer + 1]
glDrawArrays(GL_LINES, start_index, end_index - start_index + 1)
else:
glDrawArrays(GL_LINES, 0, end_index)
self.travel_buffer.unbind()
def _draw_elements(self, start, end, draw_type = GL_TRIANGLES):
# Don't attempt printing empty layer
if self.count_print_indices[end] == self.count_print_indices[start - 1]:
return
glDrawRangeElements(draw_type,
self.count_print_vertices[start - 1],
self.count_print_vertices[end] - 1,
self.count_print_indices[end] - self.count_print_indices[start - 1],
GL_UNSIGNED_INT,
sizeof(GLuint) * self.count_print_indices[start - 1])
def _display_movements(self, has_vbo):
self.vertex_buffer.bind()
glVertexPointer(3, GL_FLOAT, 0, self.vertex_buffer.ptr)
self.vertex_color_buffer.bind()
glColorPointer(3, GL_FLOAT, 0, self.vertex_color_buffer.ptr)
self.vertex_normal_buffer.bind()
glNormalPointer(GL_FLOAT, 0, self.vertex_normal_buffer.ptr)
self.index_buffer.bind()
# Prevent race condition by using the number of currently loaded layers
max_layers = self.layers_loaded
start = 1
layer_selected = self.num_layers_to_draw <= max_layers
if layer_selected:
end_prev_layer = self.layer_stops[self.num_layers_to_draw - 1]
else:
end_prev_layer = 0
end = self.layer_stops[min(self.num_layers_to_draw, max_layers)]
glDisableClientState(GL_COLOR_ARRAY)
glColor3f(*self.color_printed[:-1])
# Draw printed stuff until end or end_prev_layer
cur_end = min(self.printed_until, end)
if not self.only_current:
if 1 <= end_prev_layer <= cur_end:
self._draw_elements(1, end_prev_layer)
elif cur_end >= 1:
self._draw_elements(1, cur_end)
glEnableClientState(GL_COLOR_ARRAY)
# Draw nonprinted stuff until end_prev_layer
start = max(cur_end, 1)
if end_prev_layer >= start:
if not self.only_current:
self._draw_elements(start, end_prev_layer)
cur_end = end_prev_layer
# Draw current layer
if layer_selected:
glDisableClientState(GL_COLOR_ARRAY)
glColor3f(*self.color_current_printed[:-1])
if cur_end > end_prev_layer:
self._draw_elements(end_prev_layer + 1, cur_end)
glColor3f(*self.color_current[:-1])
if end > cur_end:
self._draw_elements(cur_end + 1, end)
glEnableClientState(GL_COLOR_ARRAY)
# Draw non printed stuff until end (if not ending at a given layer)
start = max(self.printed_until, 1)
if not layer_selected and end >= start:
self._draw_elements(start, end)
self.index_buffer.unbind()
self.vertex_buffer.unbind()
self.vertex_color_buffer.unbind()
self.vertex_normal_buffer.unbind()
class GcodeModelLight(Model):
"""
Model for displaying Gcode data.
"""
color_travel = (0.6, 0.6, 0.6, 0.6)
color_tool0 = (1.0, 0.0, 0.0, 0.6)
color_tool1 = (0.67, 0.05, 0.9, 0.6)
color_tool2 = (1.0, 0.8, 0., 0.6)
color_tool3 = (1.0, 0., 0.62, 0.6)
color_tool4 = (0., 1.0, 0.58, 0.6)
color_printed = (0.2, 0.75, 0, 0.6)
color_current = (0, 0.9, 1.0, 0.8)
color_current_printed = (0.1, 0.4, 0, 0.8)
buffers_created = False
use_vbos = True
loaded = False
fully_loaded = False
gcode = None
def load_data(self, model_data, callback=None):
t_start = time.time()
self.gcode = model_data
self.layer_idxs_map = {}
self.layer_stops = [0]
prev_pos = (0, 0, 0)
layer_idx = 0
nlines = len(model_data)
vertices = self.vertices = numpy.zeros(nlines * 6, dtype = GLfloat)
vertex_k = 0
colors = self.colors = numpy.zeros(nlines * 8, dtype = GLfloat)
color_k = 0
self.printed_until = -1
self.only_current = False
prev_gline = None
while layer_idx < len(model_data.all_layers):
with self.lock:
nlines = len(model_data)
if nlines * 6 > vertices.size:
self.vertices.resize(nlines * 6, refcheck = False)
self.colors.resize(nlines * 8, refcheck = False)
layer = model_data.all_layers[layer_idx]
has_movement = False
for gline in layer:
if not gline.is_move:
continue
if gline.x is None and gline.y is None and gline.z is None:
continue
has_movement = True
for (current_pos, interpolated) in interpolate_arcs(gline, prev_gline):
if self.vertices.size < (vertex_k + 100 * 6):
# arc interpolation extra points allocation
ratio = (vertex_k + 100 * 6) / self.vertices.size * 1.5
# print(f"gl realloc lite {self.vertices.size} -> {int(self.vertices.size * ratio)}")
self.vertices.resize(int(self.vertices.size * ratio), refcheck = False)
self.colors.resize(int(self.colors.size * ratio), refcheck = False)
vertices[vertex_k] = prev_pos[0]
vertices[vertex_k + 1] = prev_pos[1]
vertices[vertex_k + 2] = prev_pos[2]
vertices[vertex_k + 3] = current_pos[0]
vertices[vertex_k + 4] = current_pos[1]
vertices[vertex_k + 5] = current_pos[2]
vertex_k += 6
vertex_color = self.movement_color(gline)
colors[color_k] = vertex_color[0]
colors[color_k + 1] = vertex_color[1]
colors[color_k + 2] = vertex_color[2]
colors[color_k + 3] = vertex_color[3]
colors[color_k + 4] = vertex_color[0]
colors[color_k + 5] = vertex_color[1]
colors[color_k + 6] = vertex_color[2]
colors[color_k + 7] = vertex_color[3]
color_k += 8
prev_pos = current_pos
prev_gline = gline
gline.gcview_end_vertex = vertex_k // 3
if has_movement:
self.layer_stops.append(vertex_k // 3)
self.layer_idxs_map[layer_idx] = len(self.layer_stops) - 1
self.max_layers = len(self.layer_stops) - 1
self.num_layers_to_draw = self.max_layers + 1
self.initialized = False
self.loaded = True
if callback:
callback(layer_idx + 1)
yield layer_idx
layer_idx += 1
with self.lock:
self.dims = ((model_data.xmin, model_data.xmax, model_data.width),
(model_data.ymin, model_data.ymax, model_data.depth),
(model_data.zmin, model_data.zmax, model_data.height))
self.vertices.resize(vertex_k, refcheck = False)
self.colors.resize(color_k, refcheck = False)
self.max_layers = len(self.layer_stops) - 1
self.num_layers_to_draw = self.max_layers + 1
self.initialized = False
self.loaded = True
self.fully_loaded = True
t_end = time.time()
logging.debug(_('Initialized 3D visualization in %.2f seconds') % (t_end - t_start))
logging.debug(_('Vertex count: %d') % (len(self.vertices) // 3))
yield None
def copy(self):
copy = GcodeModelLight()
for var in ["vertices", "colors", "max_layers",
"num_layers_to_draw", "printed_until",
"layer_stops", "dims", "only_current",
"layer_idxs_map", "gcode"]:
setattr(copy, var, getattr(self, var))
copy.loaded = True
copy.fully_loaded = True
copy.initialized = False
return copy
# ------------------------------------------------------------------------
# DRAWING
# ------------------------------------------------------------------------
def init(self):
with self.lock:
self.layers_loaded = self.max_layers
self.initialized = True
if self.buffers_created:
self.vertex_buffer.delete()
self.vertex_color_buffer.delete()
self.vertex_buffer = numpy2vbo(self.vertices, use_vbos = self.use_vbos)
self.vertex_color_buffer = numpy2vbo(self.colors, use_vbos = self.use_vbos) # each pair of vertices shares the color
if self.fully_loaded:
# Delete numpy arrays after creating VBOs after full load
self.vertices = None
self.colors = None
self.buffers_created = True
def display(self, mode_2d=False):
with self.lock:
glPushMatrix()
glTranslatef(self.offset_x, self.offset_y, 0)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
self._display_movements(mode_2d)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glPopMatrix()
def _display_movements(self, mode_2d=False):
self.vertex_buffer.bind()
has_vbo = isinstance(self.vertex_buffer, VertexBufferObject)
if has_vbo:
glVertexPointer(3, GL_FLOAT, 0, None)
else:
glVertexPointer(3, GL_FLOAT, 0, self.vertex_buffer.ptr)
self.vertex_color_buffer.bind()
if has_vbo:
glColorPointer(4, GL_FLOAT, 0, None)
else:
glColorPointer(4, GL_FLOAT, 0, self.vertex_color_buffer.ptr)
# Prevent race condition by using the number of currently loaded layers
max_layers = self.layers_loaded
start = 0
if self.num_layers_to_draw <= max_layers:
end_prev_layer = self.layer_stops[self.num_layers_to_draw - 1]
else:
end_prev_layer = -1
end = self.layer_stops[min(self.num_layers_to_draw, max_layers)]
glDisableClientState(GL_COLOR_ARRAY)
glColor4f(*self.color_printed)
# Draw printed stuff until end or end_prev_layer
cur_end = min(self.printed_until, end)
if not self.only_current:
if 0 <= end_prev_layer <= cur_end:
glDrawArrays(GL_LINES, start, end_prev_layer)
elif cur_end >= 0:
glDrawArrays(GL_LINES, start, cur_end)
glEnableClientState(GL_COLOR_ARRAY)
# Draw nonprinted stuff until end_prev_layer
start = max(cur_end, 0)
if end_prev_layer >= start:
if not self.only_current:
glDrawArrays(GL_LINES, start, end_prev_layer - start)
cur_end = end_prev_layer
# Draw current layer
if end_prev_layer >= 0:
glDisableClientState(GL_COLOR_ARRAY)
# Backup & increase line width
orig_linewidth = (GLfloat)()
glGetFloatv(GL_LINE_WIDTH, orig_linewidth)
glLineWidth(2.0)
glColor4f(*self.color_current_printed)
if cur_end > end_prev_layer:
glDrawArrays(GL_LINES, end_prev_layer, cur_end - end_prev_layer)
glColor4f(*self.color_current)
if end > cur_end:
glDrawArrays(GL_LINES, cur_end, end - cur_end)
# Restore line width
glLineWidth(orig_linewidth)
glEnableClientState(GL_COLOR_ARRAY)
# Draw non printed stuff until end (if not ending at a given layer)
start = max(self.printed_until, 0)
end = end - start
if end_prev_layer < 0 and end > 0 and not self.only_current:
glDrawArrays(GL_LINES, start, end)
self.vertex_buffer.unbind()
self.vertex_color_buffer.unbind()
| 48,911 | Python | .py | 964 | 34.974066 | 129 | 0.522109 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,373 | controls.py | kliment_Printrun/printrun/gui/controls.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from .xybuttons import XYButtons, XYButtonsMini
from .zbuttons import ZButtons, ZButtonsMini
from .graph import Graph
from .widgets import TempGauge
from wx.lib.agw.floatspin import FloatSpin
from .utils import make_button, make_custom_button
class XYZControlsSizer(wx.GridBagSizer):
def __init__(self, root, parentpanel = None):
super(XYZControlsSizer, self).__init__()
if not parentpanel: parentpanel = root.panel
root.xyb = XYButtons(parentpanel, root.moveXY, root.homeButtonClicked, root.spacebarAction, root.bgcolor, zcallback=root.moveZ)
root.xyb.SetToolTip(_('[J]og controls. (Shift)+TAB ESC Shift/Ctrl+(arrows PgUp/PgDn)'))
self.Add(root.xyb, pos = (0, 1), flag = wx.ALIGN_CENTER)
root.zb = ZButtons(parentpanel, root.moveZ, root.bgcolor)
self.Add(root.zb, pos = (0, 2), flag = wx.ALIGN_CENTER)
def add_extra_controls(self, root, parentpanel, extra_buttons = None, mini_mode = False):
standalone_mode = extra_buttons is not None
base_line = 1 if standalone_mode else 2
if standalone_mode:
gauges_base_line = base_line + 10
elif mini_mode and root.display_graph:
gauges_base_line = base_line + 7
else:
gauges_base_line = base_line + 6
tempdisp_line = gauges_base_line + (2 if root.display_gauges else 0)
if mini_mode and root.display_graph:
e_base_line = base_line + 3
else:
e_base_line = base_line + 2
pos_mapping = {
"htemp_label": (base_line + 0, 0),
"htemp_off": (base_line + 0, 2),
"htemp_val": (base_line + 0, 3),
"htemp_set": (base_line + 0, 4),
"btemp_label": (base_line + 1, 0),
"btemp_off": (base_line + 1, 2),
"btemp_val": (base_line + 1, 3),
"btemp_set": (base_line + 1, 4),
"ebuttons": (e_base_line + 0, 0),
"esettings": (e_base_line + 1, 0),
"speedcontrol": (e_base_line + 2, 0),
"flowcontrol": (e_base_line + 3, 0),
"htemp_gauge": (gauges_base_line + 0, 0),
"btemp_gauge": (gauges_base_line + 1, 0),
"tempdisp": (tempdisp_line, 0),
"extrude": (3, 0),
"reverse": (3, 2),
}
span_mapping = {
"htemp_label": (1, 2),
"htemp_off": (1, 1),
"htemp_val": (1, 1),
"htemp_set": (1, 1 if root.display_graph else 2),
"btemp_label": (1, 2),
"btemp_off": (1, 1),
"btemp_val": (1, 1),
"btemp_set": (1, 1 if root.display_graph else 2),
"ebuttons": (1, 5 if root.display_graph else 6),
"esettings": (1, 5 if root.display_graph else 6),
"speedcontrol": (1, 5 if root.display_graph else 6),
"flowcontrol": (1, 5 if root.display_graph else 6),
"htemp_gauge": (1, 5 if mini_mode else 6),
"btemp_gauge": (1, 5 if mini_mode else 6),
"tempdisp": (1, 5 if mini_mode else 6),
"extrude": (1, 2),
"reverse": (1, 3),
}
if standalone_mode:
pos_mapping["tempgraph"] = (base_line + 6, 0)
span_mapping["tempgraph"] = (3, 2)
elif mini_mode:
pos_mapping["tempgraph"] = (base_line + 2, 0)
span_mapping["tempgraph"] = (1, 5)
else:
pos_mapping["tempgraph"] = (base_line + 0, 5)
span_mapping["tempgraph"] = (5, 1)
if mini_mode:
pos_mapping["etool_label"] = (0, 0)
pos_mapping["etool_val"] = (0, 1)
pos_mapping["edist_label"] = (0, 2)
pos_mapping["edist_val"] = (0, 3)
pos_mapping["edist_unit"] = (0, 4)
else:
pos_mapping["edist_label"] = (0, 0)
pos_mapping["edist_val"] = (1, 0)
pos_mapping["edist_unit"] = (1, 1)
pos_mapping["efeed_label"] = (0, 2)
pos_mapping["efeed_val"] = (1, 2)
pos_mapping["efeed_unit"] = (1, 3)
def add(name, widget, *args, **kwargs):
kwargs["pos"] = pos_mapping[name]
if name in span_mapping:
kwargs["span"] = span_mapping[name]
if "container" in kwargs:
container = kwargs["container"]
del kwargs["container"]
else:
container = self
container.Add(widget, *args, **kwargs)
# Hotend & bed temperatures #
# Hotend temp
add("htemp_label", wx.StaticText(parentpanel, -1, _("Heat:")), flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
root.settoff = make_button(parentpanel, _("Off"), lambda e: root.do_settemp("0.0"), _("Switch Hotend Off"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.settoff)
add("htemp_off", root.settoff)
root.htemp = wx.ComboBox(parentpanel, style = wx.CB_DROPDOWN, size = (115, -1))
root.htemp.SetToolTip(wx.ToolTip(_("Select Temperature for [H]otend")))
root.htemp.Bind(wx.EVT_COMBOBOX, root.htemp_change)
add("htemp_val", root.htemp)
root.settbtn = make_button(parentpanel, _("Set"), root.do_settemp, _("Switch Hotend On"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.settbtn)
add("htemp_set", root.settbtn, flag = wx.EXPAND)
# Bed temp
add("btemp_label", wx.StaticText(parentpanel, -1, _("Bed:")), flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
root.setboff = make_button(parentpanel, _("Off"), lambda e: root.do_bedtemp("0.0"), _("Switch Heated Bed Off"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.setboff)
add("btemp_off", root.setboff)
root.btemp = wx.ComboBox(parentpanel, style = wx.CB_DROPDOWN, size = (115, -1))
root.btemp.SetToolTip(wx.ToolTip(_("Select Temperature for Heated [B]ed")))
root.btemp.Bind(wx.EVT_COMBOBOX, root.btemp_change)
add("btemp_val", root.btemp)
root.setbbtn = make_button(parentpanel, _("Set"), root.do_bedtemp, _("Switch Heated Bed On"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.setbbtn)
add("btemp_set", root.setbbtn, flag = wx.EXPAND)
def set_labeled(temp, choices, widget):
choices = [(float(p[1]), p[0]) for p in choices.items()]
if not next((1 for p in choices if p[0] == temp), False):
choices.append((temp, 'user'))
choices = sorted(choices)
widget.Items = ['%s (%s)'%tl for tl in choices]
widget.Selection = next((i for i, tl in enumerate(choices) if tl[0] == temp), -1)
set_labeled(root.settings.last_bed_temperature, root.bedtemps, root.btemp)
set_labeled(root.settings.last_temperature, root.temps, root.htemp)
# Speed control #
speedpanel = root.newPanel(parentpanel)
speedsizer = wx.BoxSizer(wx.HORIZONTAL)
speedsizer.Add(wx.StaticText(speedpanel, -1, _("Print speed:")), flag = wx.ALIGN_CENTER_VERTICAL)
root.speed_slider = wx.Slider(speedpanel, -1, 100, 1, 300)
speedsizer.Add(root.speed_slider, 1, flag = wx.EXPAND)
root.speed_spin = wx.SpinCtrlDouble(speedpanel, -1, initial = 100, min = 1, max = 300, style = wx.ALIGN_LEFT, size = (115, -1))
root.speed_spin.SetDigits(0)
speedsizer.Add(root.speed_spin, 0, flag = wx.ALIGN_CENTER_VERTICAL)
root.speed_label = wx.StaticText(speedpanel, -1, _("%"))
speedsizer.Add(root.speed_label, flag = wx.ALIGN_CENTER_VERTICAL)
def speedslider_set(event):
root.do_setspeed()
root.speed_setbtn.SetBackgroundColour(wx.NullColour)
root.speed_setbtn = make_button(speedpanel, _("Set"), speedslider_set, _("Set print speed factor"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.speed_setbtn)
speedsizer.Add(root.speed_setbtn, flag = wx.ALIGN_CENTER)
speedpanel.SetSizer(speedsizer)
add("speedcontrol", speedpanel, flag = wx.EXPAND)
def speedslider_spin(event):
value = root.speed_spin.GetValue()
root.speed_setbtn.SetBackgroundColour("red")
root.speed_slider.SetValue(int(value))
root.speed_spin.Bind(wx.EVT_SPINCTRLDOUBLE, speedslider_spin)
def speedslider_scroll(event):
value = root.speed_slider.GetValue()
root.speed_setbtn.SetBackgroundColour("red")
root.speed_spin.SetValue(value)
root.speed_slider.Bind(wx.EVT_SCROLL, speedslider_scroll)
# Flow control #
flowpanel = root.newPanel(parentpanel)
flowsizer = wx.BoxSizer(wx.HORIZONTAL)
flowsizer.Add(wx.StaticText(flowpanel, -1, _("Print flow:")), flag = wx.ALIGN_CENTER_VERTICAL)
root.flow_slider = wx.Slider(flowpanel, -1, 100, 1, 300)
flowsizer.Add(root.flow_slider, 1, flag = wx.EXPAND)
root.flow_spin = wx.SpinCtrlDouble(flowpanel, -1, initial = 100, min = 1, max = 300, style = wx.ALIGN_LEFT, size = (115, -1))
flowsizer.Add(root.flow_spin, 0, flag = wx.ALIGN_CENTER_VERTICAL)
root.flow_label = wx.StaticText(flowpanel, -1, _("%"))
flowsizer.Add(root.flow_label, flag = wx.ALIGN_CENTER_VERTICAL)
def flowslider_set(event):
root.do_setflow()
root.flow_setbtn.SetBackgroundColour(wx.NullColour)
root.flow_setbtn = make_button(flowpanel, _("Set"), flowslider_set, _("Set print flow factor"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.flow_setbtn)
flowsizer.Add(root.flow_setbtn, flag = wx.ALIGN_CENTER)
flowpanel.SetSizer(flowsizer)
add("flowcontrol", flowpanel, flag = wx.EXPAND)
def flowslider_spin(event):
value = root.flow_spin.GetValue()
root.flow_setbtn.SetBackgroundColour("red")
root.flow_slider.SetValue(int(value))
root.flow_spin.Bind(wx.EVT_SPINCTRLDOUBLE, flowslider_spin)
def flowslider_scroll(event):
value = root.flow_slider.GetValue()
root.flow_setbtn.SetBackgroundColour("red")
root.flow_spin.SetValue(value)
root.flow_slider.Bind(wx.EVT_SCROLL, flowslider_scroll)
# Temperature gauges #
if root.display_gauges:
root.hottgauge = TempGauge(parentpanel, size = (-1, 24), title = _("Heater:"), maxval = 300, bgcolor = root.bgcolor)
root.hottgauge.SetTarget(root.settings.last_temperature)
# root.hsetpoint = root.settings.last_temperature
add("htemp_gauge", root.hottgauge, flag = wx.EXPAND)
root.bedtgauge = TempGauge(parentpanel, size = (-1, 24), title = _("Bed:"), maxval = 150, bgcolor = root.bgcolor)
root.bedtgauge.SetTarget(root.settings.last_bed_temperature)
# root.bsetpoint = root.settings.last_bed_temperature
add("btemp_gauge", root.bedtgauge, flag = wx.EXPAND)
def scroll_gauge(rot, cmd, setpoint):
if rot:
temp = setpoint + (1 if rot > 0 else -1)
cmd(str(max(0, temp)))
def hotend_handler(e):
scroll_gauge(e.WheelRotation, root.do_settemp, root.hsetpoint)
def bed_handler(e):
scroll_gauge(e.WheelRotation, root.do_bedtemp, root.bsetpoint)
root.hottgauge.Bind(wx.EVT_MOUSEWHEEL, hotend_handler)
root.bedtgauge.Bind(wx.EVT_MOUSEWHEEL, bed_handler)
def updateGauge(e, gauge):
gauge.SetTarget(float(e.String.split()[0]))
root.htemp.Bind(wx.EVT_TEXT, lambda e: updateGauge(e, root.hottgauge))
root.btemp.Bind(wx.EVT_TEXT, lambda e: updateGauge(e, root.bedtgauge))
# Temperature (M105) feedback display #
root.tempdisp = wx.StaticText(parentpanel, -1, "", style = wx.ST_NO_AUTORESIZE)
def on_tempdisp_size(evt):
root.tempdisp.Wrap(root.tempdisp.GetSize().width)
root.tempdisp.Bind(wx.EVT_SIZE, on_tempdisp_size)
def tempdisp_setlabel(label):
wx.StaticText.SetLabel(root.tempdisp, label)
root.tempdisp.Wrap(root.tempdisp.GetSize().width)
root.tempdisp.SetSize((-1, root.tempdisp.GetBestSize().height))
root.tempdisp.SetLabel = tempdisp_setlabel
add("tempdisp", root.tempdisp, flag = wx.EXPAND)
# Temperature graph #
if root.display_graph:
root.graph = Graph(parentpanel, wx.ID_ANY, root)
add("tempgraph", root.graph, flag = wx.EXPAND | wx.ALL, border = 5)
root.graph.Bind(wx.EVT_LEFT_DOWN, root.graph.show_graph_window)
# Extrusion controls #
# Extrusion settings
esettingspanel = root.newPanel(parentpanel)
esettingssizer = wx.GridBagSizer()
esettingssizer.SetEmptyCellSize((0, 0))
root.edist = wx.SpinCtrlDouble(esettingspanel, -1, initial = root.settings.last_extrusion, min = 0, max = 1000, size = (135, -1))
root.edist.SetDigits(1)
root.edist.Bind(wx.EVT_SPINCTRLDOUBLE, root.setfeeds)
root.edist.SetBackgroundColour((225, 200, 200))
root.edist.SetForegroundColour("black")
root.edist.Bind(wx.EVT_TEXT, root.setfeeds)
add("edist_label", wx.StaticText(esettingspanel, -1, _("Length:")), container = esettingssizer, flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT | wx.RIGHT | wx.LEFT, border = 5)
add("edist_val", root.edist, container = esettingssizer, flag = wx.ALIGN_CENTER | wx.RIGHT, border = 5)
unit_label = _("mm") if mini_mode else _("mm @")
add("edist_unit", wx.StaticText(esettingspanel, -1, unit_label), container = esettingssizer, flag = wx.ALIGN_CENTER | wx.RIGHT, border = 5)
root.edist.SetToolTip(wx.ToolTip(_("Amount to Extrude or Retract (mm)")))
if not mini_mode:
root.efeedc = wx.SpinCtrlDouble(esettingspanel, -1, initial = root.settings.e_feedrate, min = 0, max = 50000, size = (145, -1))
root.efeedc.SetDigits(1)
root.efeedc.Bind(wx.EVT_SPINCTRLDOUBLE, root.setfeeds)
root.efeedc.SetToolTip(wx.ToolTip(_("Extrude / Retract speed (mm/min)")))
root.efeedc.SetBackgroundColour((225, 200, 200))
root.efeedc.SetForegroundColour("black")
root.efeedc.Bind(wx.EVT_TEXT, root.setfeeds)
add("efeed_val", root.efeedc, container = esettingssizer, flag = wx.ALIGN_CENTER | wx.RIGHT, border = 5)
add("efeed_label", wx.StaticText(esettingspanel, -1, _("Speed:")), container = esettingssizer, flag = wx.ALIGN_LEFT)
add("efeed_unit", wx.StaticText(esettingspanel, -1, _("mm/\nmin")), container = esettingssizer, flag = wx.ALIGN_CENTER)
else:
root.efeedc = None
esettingspanel.SetSizer(esettingssizer)
add("esettings", esettingspanel, flag = wx.ALIGN_LEFT)
if not standalone_mode:
ebuttonspanel = root.newPanel(parentpanel)
ebuttonssizer = wx.BoxSizer(wx.HORIZONTAL)
if root.settings.extruders > 1:
etool_sel_panel = esettingspanel if mini_mode else ebuttonspanel
etool_label = wx.StaticText(etool_sel_panel, -1, _("Tool:"))
if root.settings.extruders == 2:
root.extrudersel = wx.Button(etool_sel_panel, -1, "0", style = wx.BU_EXACTFIT)
root.extrudersel.SetToolTip(wx.ToolTip(_("Click to switch current extruder")))
def extrudersel_cb(event):
if root.extrudersel.GetLabel() == "1":
new = "0"
else:
new = "1"
root.extrudersel.SetLabel(new)
root.tool_change(event)
root.extrudersel.Bind(wx.EVT_BUTTON, extrudersel_cb)
root.extrudersel.GetValue = root.extrudersel.GetLabel
root.extrudersel.SetValue = root.extrudersel.SetLabel
else:
choices = [str(i) for i in range(0, root.settings.extruders)]
root.extrudersel = wx.ComboBox(etool_sel_panel, -1, choices = choices,
style = wx.CB_DROPDOWN | wx.CB_READONLY,
size = (50, -1))
root.extrudersel.SetToolTip(wx.ToolTip(_("Select current extruder")))
root.extrudersel.SetValue(choices[0])
root.extrudersel.Bind(wx.EVT_COMBOBOX, root.tool_change)
root.printerControls.append(root.extrudersel)
if mini_mode:
add("etool_label", etool_label, container = esettingssizer, flag = wx.ALIGN_CENTER)
add("etool_val", root.extrudersel, container = esettingssizer)
else:
ebuttonssizer.Add(etool_label, flag = wx.ALIGN_CENTER)
ebuttonssizer.Add(root.extrudersel)
for key in ["extrude", "reverse"]:
desc = root.cpbuttons[key]
btn = make_custom_button(root, ebuttonspanel, desc,
style = wx.BU_EXACTFIT)
ebuttonssizer.Add(btn, 1, flag = wx.EXPAND)
ebuttonspanel.SetSizer(ebuttonssizer)
add("ebuttons", ebuttonspanel, flag = wx.EXPAND)
else:
for key, btn in extra_buttons.items():
add(key, btn, flag = wx.EXPAND)
class ControlsSizer(wx.GridBagSizer):
def __init__(self, root, parentpanel = None, standalone_mode = False, mini_mode = False):
super(ControlsSizer, self).__init__()
if not parentpanel: parentpanel = root.panel
if mini_mode: self.make_mini(root, parentpanel)
else: self.make_standard(root, parentpanel, standalone_mode)
def make_standard(self, root, parentpanel, standalone_mode):
lltspanel = root.newPanel(parentpanel)
llts = wx.BoxSizer(wx.HORIZONTAL)
lltspanel.SetSizer(llts)
self.Add(lltspanel, pos = (0, 0), span = (1, 6))
xyzpanel = root.newPanel(parentpanel)
self.xyzsizer = XYZControlsSizer(root, xyzpanel)
xyzpanel.SetSizer(self.xyzsizer)
self.Add(xyzpanel, pos = (1, 0), span = (1, 6), flag = wx.ALIGN_CENTER)
self.extra_buttons = {}
pos_mapping = {"extrude": (4, 0),
"reverse": (4, 2),
}
span_mapping = {"extrude": (1, 2),
"reverse": (1, 3),
}
for key, desc in root.cpbuttons.items():
if not standalone_mode and key in ["extrude", "reverse"]:
continue
panel = lltspanel if key == "motorsoff" else parentpanel
btn = make_custom_button(root, panel, desc)
if key == "motorsoff":
llts.Add(btn)
elif not standalone_mode:
self.Add(btn, pos = pos_mapping[key], span = span_mapping[key], flag = wx.EXPAND)
else:
self.extra_buttons[key] = btn
root.xyfeedc = wx.SpinCtrl(lltspanel, -1, str(root.settings.xy_feedrate), min = 0, max = 50000, size = (130, -1))
root.xyfeedc.SetToolTip(wx.ToolTip(_("Set Maximum Speed for X & Y axes (mm/min)")))
llts.Add(wx.StaticText(lltspanel, -1, _("XY:")), flag = wx.ALIGN_CENTER_VERTICAL)
llts.Add(root.xyfeedc)
llts.Add(wx.StaticText(lltspanel, -1, _("mm/min Z:")), flag = wx.ALIGN_CENTER_VERTICAL)
root.zfeedc = wx.SpinCtrl(lltspanel, -1, str(root.settings.z_feedrate), min = 0, max = 50000, size = (130, -1))
root.zfeedc.SetToolTip(wx.ToolTip(_("Set Maximum Speed for Z axis (mm/min)")))
llts.Add(root.zfeedc,)
root.xyfeedc.Bind(wx.EVT_SPINCTRL, root.setfeeds)
root.zfeedc.Bind(wx.EVT_SPINCTRL, root.setfeeds)
root.xyfeedc.Bind(wx.EVT_TEXT, root.setfeeds)
root.zfeedc.Bind(wx.EVT_TEXT, root.setfeeds)
root.zfeedc.SetBackgroundColour((180, 255, 180))
root.zfeedc.SetForegroundColour("black")
if not standalone_mode:
add_extra_controls(self, root, parentpanel, None)
def make_mini(self, root, parentpanel):
root.xyb = XYButtonsMini(parentpanel, root.moveXY, root.homeButtonClicked,
root.spacebarAction, root.bgcolor,
zcallback = root.moveZ)
self.Add(root.xyb, pos = (1, 0), span = (1, 4), flag = wx.ALIGN_CENTER)
root.zb = ZButtonsMini(parentpanel, root.moveZ, root.bgcolor)
self.Add(root.zb, pos = (0, 4), span = (2, 1), flag = wx.ALIGN_CENTER)
wx.CallAfter(root.xyb.SetFocus)
pos_mapping = {"motorsoff": (0, 0),
}
span_mapping = {"motorsoff": (1, 4),
}
btn = make_custom_button(root, parentpanel, root.cpbuttons["motorsoff"])
self.Add(btn, pos = pos_mapping["motorsoff"], span = span_mapping["motorsoff"], flag = wx.EXPAND)
add_extra_controls(self, root, parentpanel, None, True)
| 20,869 | Python | .py | 384 | 45.393229 | 181 | 0.632045 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,374 | bufferedcanvas.py | kliment_Printrun/printrun/gui/bufferedcanvas.py | """
BufferedCanvas -- flicker-free canvas widget
Copyright (C) 2005, 2006 Daniel Keep, 2011 Duane Johnson
To use this widget, just override or replace the draw method.
This will be called whenever the widget size changes, or when
the update method is explicitly called.
Please submit any improvements/bugfixes/ideas to the following
url:
http://wiki.wxpython.org/index.cgi/BufferedCanvas
2006-04-29: Added bugfix for a crash on Mac provided by Marc Jans.
"""
# Hint: try removing '.sp4msux0rz'
__author__ = 'Daniel Keep <daniel.keep.sp4msux0rz@gmail.com>'
__license__ = """
This file is part of the Printrun suite.
Printrun is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Printrun is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Printrun. If not, see <http://www.gnu.org/licenses/>.
"""
__all__ = ['BufferedCanvas']
import wx
class BufferedCanvas(wx.Panel):
"""
Implements a flicker-free canvas widget.
Standard usage is to subclass this class, and override the
draw method. The draw method is passed a device context, which
should be used to do your drawing.
If you want to force a redraw (for whatever reason), you should
call the update method. This is because the draw method is never
called as a result of an EVT_PAINT event.
"""
# These are our two buffers. Just be aware that when the buffers
# are flipped, the REFERENCES are swapped. So I wouldn't want to
# try holding onto explicit references to one or the other ;)
buffer = None
backbuffer = None
def __init__(self,
parent,
ID=-1,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style = wx.NO_FULL_REPAINT_ON_RESIZE | wx.WANTS_CHARS):
wx.Panel.__init__(self, parent, ID, pos, size, style)
# Bind events
self.Bind(wx.EVT_PAINT, self.onPaint)
# Disable background erasing (flicker-licious)
def disable_event(*pargs, **kwargs):
pass # the sauce, please
self.Bind(wx.EVT_ERASE_BACKGROUND, disable_event)
#
# General methods
#
def draw(self, dc, w, h):
"""
Stub: called when the canvas needs to be re-drawn.
"""
pass
def update(self):
"""
Causes the canvas to be updated.
"""
self.Refresh()
def getWidthHeight(self):
width, height = self.GetClientSize()
if width == 0:
width = 1
if height == 0:
height = 1
return (width, height)
#
# Event handlers
#
def onPaint(self, event):
# Blit the front buffer to the screen
w, h = self.GetClientSize()
if not w or not h:
return
else:
dc = wx.BufferedPaintDC(self)
self.draw(dc, w, h)
| 3,276 | Python | .py | 87 | 31.37931 | 72 | 0.666035 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,375 | toolbar.py | kliment_Printrun/printrun/gui/toolbar.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from .utils import make_autosize_button
def MainToolbar(root, parentpanel = None, use_wrapsizer = False):
if not parentpanel: parentpanel = root.panel
if root.settings.lockbox:
root.locker = wx.CheckBox(parentpanel, label = _("Lock") + " ")
root.locker.Bind(wx.EVT_CHECKBOX, root.lock)
root.locker.SetToolTip(wx.ToolTip(_("Lock graphical interface")))
glob = wx.BoxSizer(wx.HORIZONTAL)
parentpanel = root.newPanel(parentpanel)
glob.Add(parentpanel, 1, flag = wx.EXPAND)
glob.Add(root.locker, 0, flag = wx.ALIGN_CENTER)
ToolbarSizer = wx.WrapSizer if use_wrapsizer else wx.BoxSizer
self = ToolbarSizer(wx.HORIZONTAL)
root.rescanbtn = make_autosize_button(parentpanel, _("Port"), root.rescanports, _("Communication Settings\nClick to rescan ports"))
self.Add(root.rescanbtn, 0, wx.TOP | wx.LEFT, 0)
root.serialport = wx.ComboBox(parentpanel, -1, choices = root.scanserial(),
style = wx.CB_DROPDOWN)
root.serialport.SetToolTip(wx.ToolTip(_("Select Port Printer is connected to")))
root.rescanports()
self.Add(root.serialport)
self.Add(wx.StaticText(parentpanel, -1, "@"), 0, wx.RIGHT | wx.ALIGN_CENTER, 0)
root.baud = wx.ComboBox(parentpanel, -1,
choices = ["2400", "9600", "19200", "38400",
"57600", "115200", "250000", "500000", "1000000"],
style = wx.CB_DROPDOWN, size = (110, -1))
root.baud.SetToolTip(wx.ToolTip(_("Select Baud rate for printer communication")))
try:
root.baud.SetValue("115200")
root.baud.SetValue(str(root.settings.baudrate))
except:
pass
self.Add(root.baud)
if not hasattr(root, "connectbtn"):
root.connectbtn_cb_var = root.connect
root.connectbtn = make_autosize_button(parentpanel, _("&Connect"), root.connectbtn_cb, _("Connect to the printer"))
root.statefulControls.append(root.connectbtn)
else:
root.connectbtn.Reparent(parentpanel)
self.Add(root.connectbtn)
if not hasattr(root, "resetbtn"):
root.resetbtn = make_autosize_button(parentpanel, _("Reset"), root.reset, _("Reset the printer"))
root.statefulControls.append(root.resetbtn)
else:
root.resetbtn.Reparent(parentpanel)
self.Add(root.resetbtn)
self.AddStretchSpacer(prop = 1)
root.loadbtn = make_autosize_button(parentpanel, _("Load file"), root.loadfile, _("Load a 3D model file"), self)
root.sdbtn = make_autosize_button(parentpanel, _("SD"), root.sdmenu, _("SD Card Printing"), self)
root.sdbtn.Reparent(parentpanel)
root.printerControls.append(root.sdbtn)
if not hasattr(root, "printbtn"):
root.printbtn = make_autosize_button(parentpanel, _("Print"), root.printfile, _("Start Printing Loaded File"))
root.statefulControls.append(root.printbtn)
else:
root.printbtn.Reparent(parentpanel)
self.Add(root.printbtn)
if not hasattr(root, "pausebtn"):
root.pausebtn = make_autosize_button(parentpanel, _("Pause"), root.pause, _("Pause Current Print"))
root.statefulControls.append(root.pausebtn)
else:
root.pausebtn.Reparent(parentpanel)
self.Add(root.pausebtn)
root.offbtn = make_autosize_button(parentpanel, _("Off"), root.off, _("Turn printer off"), self)
root.printerControls.append(root.offbtn)
self.AddStretchSpacer(prop = 4)
if root.settings.lockbox:
parentpanel.SetSizer(self)
return glob
else:
return self
| 4,272 | Python | .py | 85 | 43.388235 | 135 | 0.682787 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,376 | graph.py | kliment_Printrun/printrun/gui/graph.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from math import log10, floor, ceil
from bisect import bisect_left
from printrun.utils import install_locale
install_locale('pronterface')
from .bufferedcanvas import BufferedCanvas
class GraphWindow(wx.Frame):
def __init__(self, root, parent_graph = None, size = (600, 600)):
super().__init__(None, title = _("Temperature graph"),
size = size)
self.parentg = parent_graph
panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
self.graph = Graph(panel, wx.ID_ANY, root, parent_graph = parent_graph)
vbox.Add(self.graph, 1, wx.EXPAND)
panel.SetSizer(vbox)
def Destroy(self):
self.graph.StopPlotting()
if self.parentg is not None:
self.parentg.window=None
return super().Destroy()
def __del__(self):
if self.parentg is not None:
self.parentg.window=None
self.graph.StopPlotting()
class Graph(BufferedCanvas):
'''A class to show a Graph with Pronterface.'''
def __init__(self, parent, id, root, pos = wx.DefaultPosition,
size = wx.Size(150, 80), style = 0, parent_graph = None):
# Forcing a no full repaint to stop flickering
style = style | wx.NO_FULL_REPAINT_ON_RESIZE
super().__init__(parent, id, pos, size, style)
self.root = root
if parent_graph is not None:
self.extruder0temps = parent_graph.extruder0temps
self.extruder0targettemps = parent_graph.extruder0targettemps
self.extruder1temps = parent_graph.extruder1temps
self.extruder1targettemps = parent_graph.extruder1targettemps
self.bedtemps = parent_graph.bedtemps
self.bedtargettemps = parent_graph.bedtargettemps
self.fanpowers=parent_graph.fanpowers
else:
self.extruder0temps = [0]
self.extruder0targettemps = [0]
self.extruder1temps = [0]
self.extruder1targettemps = [0]
self.bedtemps = [0]
self.bedtargettemps = [0]
self.fanpowers= [0]
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.updateTemperatures, self.timer)
self.Bind(wx.EVT_WINDOW_DESTROY, self.processDestroy)
self.minyvalue = 0
self.maxyvalue = 260
self.rescaley = True # should the Y axis be rescaled dynamically?
if self.rescaley:
self._ybounds = Graph._YBounds(self)
# If rescaley is set then ybars gives merely an estimate
# Note that "bars" actually indicate the number of internal+external gridlines.
self.ybars = 5
self.xbars = 7 # One bar per 10 second
self.xsteps = 60 # Covering 1 minute in the graph
self.window = None
self.reserved = []
def processDestroy(self, event):
# print('processDestroy')
self.StopPlotting()
self.Unbind(wx.EVT_TIMER)
event.Skip()
def show_graph_window(self, event = None):
if self.window is None or not self.window:
self.window = GraphWindow(self.root, self)
self.window.Show()
if self.timer.IsRunning():
self.window.graph.StartPlotting(self.timer.Interval)
else:
self.window.Raise()
def __del__(self):
if self.window: self.window.Close()
def updateTemperatures(self, event):
# print('updateTemperatures')
self.AddBedTemperature(self.bedtemps[-1])
self.AddBedTargetTemperature(self.bedtargettemps[-1])
self.AddExtruder0Temperature(self.extruder0temps[-1])
self.AddExtruder0TargetTemperature(self.extruder0targettemps[-1])
self.AddExtruder1Temperature(self.extruder1temps[-1])
self.AddExtruder1TargetTemperature(self.extruder1targettemps[-1])
self.AddFanPower(self.fanpowers[-1])
if self.rescaley:
self._ybounds.update()
self.Refresh()
def drawgrid(self, dc, gc):
# cold, medium, hot = wx.Colour(0, 167, 223),\
# wx.Colour(239, 233, 119),\
# wx.Colour(210, 50.100)
# col1 = wx.Colour(255, 0, 0, 255)
# col2 = wx.Colour(255, 255, 255, 128)
# b = gc.CreateLinearGradientBrush(0, 0, w, h, col1, col2)
gc.SetPen(wx.Pen(wx.Colour(255, 0, 0, 0), 1))
# gc.SetBrush(wx.Brush(wx.Colour(245, 245, 255, 52)))
# gc.SetBrush(gc.CreateBrush(wx.Brush(wx.Colour(0, 0, 0, 255))))
gc.SetPen(wx.Pen(wx.Colour(255, 0, 0, 255), 1))
# gc.DrawLines(wx.Point(0, 0), wx.Point(50, 10))
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
gc.SetFont(font, wx.Colour(self.root.settings.graph_color_text))
# draw vertical bars
dc.SetPen(wx.Pen(wx.Colour(self.root.settings.graph_color_grid), 1))
xscale = float(self.width - 1) / (self.xbars - 1)
for x in range(self.xbars + 1):
x = x * xscale
dc.DrawLine(int(x), 0, int(x), self.height)
# draw horizontal bars
spacing = self._calculate_spacing() # spacing between bars, in degrees
yspan = self.maxyvalue - self.minyvalue
ybars = int(yspan / spacing) # Should be close to self.ybars
firstbar = int(ceil(self.minyvalue / spacing)) # in degrees
dc.SetPen(wx.Pen(wx.Colour(self.root.settings.graph_color_grid), 1))
for y in range(firstbar, firstbar + ybars + 1):
# y_pos = y*(float(self.height)/self.ybars)
degrees = y * spacing
y_pos = self._y_pos(degrees)
dc.DrawLine(0, int(y_pos), self.width, int(y_pos))
label = str(y * spacing)
label_y = y_pos - font.GetPointSize() / 2
self.layoutText(label, 1, label_y, gc)
gc.DrawText(label, 1, label_y)
if not self.timer.IsRunning():
font = wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.BOLD)
gc.SetFont(font, wx.Colour(3, 4, 4))
gc.DrawText("Graph offline",
self.width / 2 - font.GetPointSize() * 3,
self.height / 2 - font.GetPointSize() * 1)
# dc.DrawCircle(50, 50, 1)
# gc.SetPen(wx.Pen(wx.Colour(255, 0, 0, 0), 1))
# gc.DrawLines([[20, 30], [10, 53]])
# dc.SetPen(wx.Pen(wx.Colour(255, 0, 0, 0), 1))
def _y_pos(self, temperature):
"""Converts a temperature, in degrees, to a pixel position"""
# fraction of the screen from the bottom
frac = (float(temperature - self.minyvalue)
/ (self.maxyvalue - self.minyvalue))
return int((1.0 - frac) * (self.height - 1))
def _calculate_spacing(self):
# Allow grids of spacings 1,2.5,5,10,25,50,100,etc
yspan = float(self.maxyvalue - self.minyvalue)
log_yspan = log10(yspan / self.ybars)
exponent = int(floor(log_yspan))
# calculate boundary points between allowed spacings
log1_25 = log10(2) + log10(1) + log10(2.5) - log10(1 + 2.5)
log25_5 = log10(2) + log10(2.5) + log10(5) - log10(2.5 + 5)
log5_10 = log10(2) + log10(5) + log10(10) - log10(5 + 10)
if log_yspan - exponent < log1_25:
return 10 ** exponent
elif log1_25 <= log_yspan - exponent < log25_5:
return 25 * 10 ** (exponent - 1)
elif log25_5 <= log_yspan - exponent < log5_10:
return 5 * 10 ** exponent
else:
return 10 ** (exponent + 1)
def drawtemperature(self, dc, gc, temperature_list,
text, text_xoffset, color):
rgba = wx.Colour(color if self.timer.IsRunning() else '#80808080')
dc.SetPen(wx.Pen(rgba, 1))
x_add = float(self.width) / self.xsteps
x_pos = 0.0
lastxvalue = 0.0
lastyvalue = temperature_list[-1]
for temperature in temperature_list:
y_pos = self._y_pos(temperature)
if x_pos > 0: # One need 2 points to draw a line.
dc.DrawLine(int(lastxvalue), int(lastyvalue), int(x_pos), int(y_pos))
lastxvalue = x_pos
x_pos += x_add
lastyvalue = y_pos
if text:
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
# font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
gc.SetFont(font, wx.Colour(rgba.RGB))
pos = self.layoutText(text, lastxvalue, lastyvalue, gc)
gc.DrawText(text, pos.x, pos.y)
def layoutRect(self, rc):
res = LtRect(rc)
reserved = sorted((rs for rs in self.reserved
if not (rc.bottom < rs.top or rc.top > rs.bottom)),
key=wx.Rect.GetLeft)
self.boundRect(res)
# search to the left for gaps large enough to accommodate res
rci = bisect_left(reserved, res)
for i in range(rci, len(reserved)-1):
res.x = reserved[i].right + 1
if res.right < reserved[i+1].left:
#found good res
break
else:
# did not find gap to the right
if reserved:
#try to respect rc.x at the cost of a gap (50...Bed)
if res.left < reserved[-1].right:
res.x = reserved[-1].right + 1
if res.right >= self.width:
#goes beyond window bounds
# try to the left
for i in range(min(rci, len(reserved)-1), 0, -1):
res.x = reserved[i].left - rc.width
if reserved[i-1].right < res.left:
break
else:
res = LtRect(self.layoutRectY(rc))
self.reserved.append(res)
return res
def boundRect(self, rc):
rc.x = min(rc.x, self.width - rc.width)
return rc
def layoutRectY(self, rc):
top = self.height
bottom = 0
collision = False
res = LtRect(rc)
res.x = max(self.gridLabelsRight+1, min(rc.x, self.width-rc.width))
for rs in self.reserved:
if not (res.right < rs.left or res.left > rs.right):
collision = True
top = min(top, rs.Top)
bottom = max(bottom, rs.bottom)
if collision:
res.y = top - rc.height
if res.y < 0:
res.y = bottom+1
if res.bottom >= self.height:
res.y = rc.y
return res
def layoutText(self, text, x, y, gc):
ext = gc.GetTextExtent(text)
rc = self.layoutRect(wx.Rect(int(x), int(y), int(ext[0]), int(ext[1])))
# print('layoutText', text, rc.TopLeft)
return rc
def drawfanpower(self, dc, gc):
self.drawtemperature(dc, gc, self.fanpowers,
"Fan", 1, self.root.settings.graph_color_fan)
def drawbedtemp(self, dc, gc):
self.drawtemperature(dc, gc, self.bedtemps,
"Bed", 2, self.root.settings.graph_color_bedtemp)
def drawbedtargettemp(self, dc, gc):
self.drawtemperature(dc, gc, self.bedtargettemps,
"Bed Target", 2, self.root.settings.graph_color_bedtarget)
def drawextruder0temp(self, dc, gc):
self.drawtemperature(dc, gc, self.extruder0temps,
"Ex0", 1, self.root.settings.graph_color_ex0temp)
def drawextruder0targettemp(self, dc, gc):
self.drawtemperature(dc, gc, self.extruder0targettemps,
"Ex0 Target", 2, self.root.settings.graph_color_ex0target)
def drawextruder1temp(self, dc, gc):
self.drawtemperature(dc, gc, self.extruder1temps,
"Ex1", 3, self.root.settings.graph_color_ex1temp)
def drawextruder1targettemp(self, dc, gc):
self.drawtemperature(dc, gc, self.extruder1targettemps,
"Ex1 Target", 2, self.root.settings.graph_color_ex1target)
def SetFanPower(self, value):
self.fanpowers.pop()
self.fanpowers.append(value)
def AddFanPower(self, value):
self.fanpowers.append(value)
if float(len(self.fanpowers) - 1) / self.xsteps > 1:
self.fanpowers.pop(0)
def SetBedTemperature(self, value):
self.bedtemps.pop()
self.bedtemps.append(value)
def AddBedTemperature(self, value):
self.bedtemps.append(value)
if float(len(self.bedtemps) - 1) / self.xsteps > 1:
self.bedtemps.pop(0)
def SetBedTargetTemperature(self, value):
self.bedtargettemps.pop()
self.bedtargettemps.append(value)
def AddBedTargetTemperature(self, value):
self.bedtargettemps.append(value)
if float(len(self.bedtargettemps) - 1) / self.xsteps > 1:
self.bedtargettemps.pop(0)
def SetExtruder0Temperature(self, value):
self.extruder0temps.pop()
self.extruder0temps.append(value)
def AddExtruder0Temperature(self, value):
self.extruder0temps.append(value)
if float(len(self.extruder0temps) - 1) / self.xsteps > 1:
self.extruder0temps.pop(0)
def SetExtruder0TargetTemperature(self, value):
self.extruder0targettemps.pop()
self.extruder0targettemps.append(value)
def AddExtruder0TargetTemperature(self, value):
self.extruder0targettemps.append(value)
if float(len(self.extruder0targettemps) - 1) / self.xsteps > 1:
self.extruder0targettemps.pop(0)
def SetExtruder1Temperature(self, value):
self.extruder1temps.pop()
self.extruder1temps.append(value)
def AddExtruder1Temperature(self, value):
self.extruder1temps.append(value)
if float(len(self.extruder1temps) - 1) / self.xsteps > 1:
self.extruder1temps.pop(0)
def SetExtruder1TargetTemperature(self, value):
self.extruder1targettemps.pop()
self.extruder1targettemps.append(value)
def AddExtruder1TargetTemperature(self, value):
self.extruder1targettemps.append(value)
if float(len(self.extruder1targettemps) - 1) / self.xsteps > 1:
self.extruder1targettemps.pop(0)
def StartPlotting(self, time):
self.Refresh()
self.timer.Start(time)
if self.window: self.window.graph.StartPlotting(time)
def Destroy(self):
# print(__class__, '.Destroy')
self.StopPlotting()
return super(BufferedCanvas, self).Destroy()
def StopPlotting(self):
self.timer.Stop()
#self.Refresh() # do not refresh when stopping in case the underlying object has been destroyed already
if self.window: self.window.graph.StopPlotting()
def draw(self, dc, w, h):
dc.SetBackground(wx.Brush(self.root.settings.graph_color_background))
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
self.width = w
self.height = h
self.reserved.clear()
self.drawgrid(dc, gc)
self.gridLabelsRight = self.reserved[-1].Right
self.drawbedtargettemp(dc, gc)
self.drawbedtemp(dc, gc)
self.drawfanpower(dc, gc)
self.drawextruder0targettemp(dc, gc)
self.drawextruder0temp(dc, gc)
if self.extruder1targettemps[-1]>0 or self.extruder1temps[-1]>5:
self.drawextruder1targettemp(dc, gc)
self.drawextruder1temp(dc, gc)
class _YBounds:
"""Small helper class to calculate y bounds dynamically"""
def __init__(self, graph, minimum_scale=5.0, buffer=0.10):
"""_YBounds(Graph,float,float)
graph parent object to calculate scales for
minimum_scale minimum range to show on the graph
buffer amount of padding to add above & below the
displayed temperatures. Given as a fraction of the
total range. (Eg .05 to use 90% of the range for
temperatures)
"""
self.graph = graph
self.min_scale = minimum_scale
self.buffer = buffer
# Frequency to rescale the graph
self.update_freq = 10
# number of updates since last full refresh
self._last_update = self.update_freq
def update(self, forceUpdate=False):
"""Updates graph.minyvalue and graph.maxyvalue based on current
temperatures """
self._last_update += 1
# TODO Smart update. Only do full calculation every 10s. Otherwise,
# just look at current graph & expand if necessary
if forceUpdate or self._last_update >= self.update_freq:
self.graph.minyvalue, self.graph.maxyvalue = self.getBounds()
self._last_update = 0
else:
bounds = self.getBoundsQuick()
self.graph.minyvalue, self.graph.maxyvalue = bounds
def getBounds(self):
"""
Calculates the bounds based on the current temperatures
Rules:
* Include the full extruder0 history
* Include the current target temp (but not necessarily old
settings)
* Include the extruder1 and/or bed temp if
1) The target temp is >0
2) The history has ever been above 5
* Include at least min_scale
* Include at least buffer above & below the extreme temps
"""
extruder0_min = min(self.graph.extruder0temps)
extruder0_max = max(self.graph.extruder0temps)
extruder0_target = self.graph.extruder0targettemps[-1]
extruder1_min = min(self.graph.extruder1temps)
extruder1_max = max(self.graph.extruder1temps)
extruder1_target = self.graph.extruder1targettemps[-1]
bed_min = min(self.graph.bedtemps)
bed_max = max(self.graph.bedtemps)
bed_target = self.graph.bedtargettemps[-1]
miny = min(extruder0_min, extruder0_target)
maxy = max(extruder0_max, extruder0_target)
if extruder1_target > 0 or extruder1_max > 5: # use extruder1
miny = min(miny, extruder1_min, extruder1_target)
maxy = max(maxy, extruder1_max, extruder1_target)
if bed_target > 0 or bed_max > 5: # use HBP
miny = min(miny, bed_min, bed_target)
maxy = max(maxy, bed_max, bed_target)
miny = min(0, miny)
maxy = max(260, maxy)
padding = (maxy - miny) * self.buffer / (1.0 - 2 * self.buffer)
miny -= padding
maxy += padding
if maxy - miny < self.min_scale:
extrapadding = (self.min_scale - maxy + miny) / 2.0
miny -= extrapadding
maxy += extrapadding
return (miny, maxy)
def getBoundsQuick(self):
# Only look at current temps
extruder0_min = self.graph.extruder0temps[-1]
extruder0_max = self.graph.extruder0temps[-1]
extruder0_target = self.graph.extruder0targettemps[-1]
extruder1_min = self.graph.extruder1temps[-1]
extruder1_max = self.graph.extruder1temps[-1]
extruder1_target = self.graph.extruder1targettemps[-1]
bed_min = self.graph.bedtemps[-1]
bed_max = self.graph.bedtemps[-1]
bed_target = self.graph.bedtargettemps[-1]
miny = min(extruder0_min, extruder0_target)
maxy = max(extruder0_max, extruder0_target)
if extruder1_target > 0 or extruder1_max > 5: # use extruder1
miny = min(miny, extruder1_min, extruder1_target)
maxy = max(maxy, extruder1_max, extruder1_target)
if bed_target > 0 or bed_max > 5: # use HBP
miny = min(miny, bed_min, bed_target)
maxy = max(maxy, bed_max, bed_target)
miny = min(0, miny)
maxy = max(260, maxy)
# We have to rescale, so add padding
bufratio = self.buffer / (1.0 - self.buffer)
if miny < self.graph.minyvalue:
padding = (self.graph.maxyvalue - miny) * bufratio
miny -= padding
if maxy > self.graph.maxyvalue:
padding = (maxy - self.graph.minyvalue) * bufratio
maxy += padding
return (min(miny, self.graph.minyvalue),
max(maxy, self.graph.maxyvalue))
class LtRect(wx.Rect):
def __lt__(self, other):
return self.x < other.x
| 21,456 | Python | .py | 453 | 36.06181 | 111 | 0.594004 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,377 | utils.py | kliment_Printrun/printrun/gui/utils.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
def make_button(parent, label, callback, tooltip, container = None, size = wx.DefaultSize, style = 0):
button = wx.Button(parent, -1, label, style = style, size = size)
button.Bind(wx.EVT_BUTTON, callback)
button.SetToolTip(wx.ToolTip(tooltip))
if container:
container.Add(button)
return button
def make_autosize_button(*args):
return make_button(*args, size = (-1, -1), style = wx.BU_EXACTFIT)
def make_custom_button(root, parentpanel, i, style = 0):
btn = make_button(parentpanel, i.label, root.process_button,
i.tooltip, style = style)
btn.SetBackgroundColour(i.background)
btn.SetForegroundColour("black")
btn.properties = i
root.btndict[i.command] = btn
root.printerControls.append(btn)
return btn
| 1,466 | Python | .py | 33 | 40.818182 | 102 | 0.731281 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,378 | __init__.py | kliment_Printrun/printrun/gui/__init__.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import logging
try:
import wx
if wx.VERSION < (4,):
raise ImportError()
except:
logging.error(_("WX >= 4 is not installed. This program requires WX >= 4 to run."))
raise
from printrun.utils import install_locale
install_locale('pronterface')
from .controls import ControlsSizer, add_extra_controls
from .viz import VizPane
from .log import LogPane
from .toolbar import MainToolbar
class ToggleablePane(wx.BoxSizer):
def __init__(self, root, label, parentpanel, parentsizers):
super(ToggleablePane, self).__init__(wx.HORIZONTAL)
if not parentpanel:
parentpanel = root.panel
self.root = root
self.visible = True
self.parentpanel = parentpanel
self.parentsizers = parentsizers
self.panepanel = root.newPanel(parentpanel)
self.button = wx.Button(parentpanel, -1, label, size = (35, 18), style = wx.BU_EXACTFIT)
self.button.Bind(wx.EVT_BUTTON, self.toggle)
def toggle(self, event):
if self.visible:
self.Hide(self.panepanel)
self.on_hide()
else:
self.Show(self.panepanel)
self.on_show()
self.visible = not self.visible
self.button.SetLabel(">" if self.button.GetLabel() == "<" else "<")
class LeftPaneToggleable(ToggleablePane):
def __init__(self, root, parentpanel, parentsizers):
super().__init__(root, "<", parentpanel, parentsizers)
self.Add(self.panepanel, 0, wx.EXPAND)
self.Add(self.button, 0)
def set_sizer(self, sizer):
self.panepanel.SetSizer(sizer)
def on_show(self):
for sizer in self.parentsizers:
sizer.Layout()
def on_hide(self):
for sizer in self.parentsizers:
# Expand right splitterwindow
if isinstance(sizer, wx.SplitterWindow):
if sizer.shrinked:
button_width = self.button.GetSize()[0]
sizer.SetSashPosition(sizer.GetSize()[0] - button_width)
else:
sizer.Layout()
class LogPaneToggleable(ToggleablePane):
def __init__(self, root, parentpanel, parentsizers):
super(LogPaneToggleable, self).__init__(root, ">", parentpanel, parentsizers)
self.Add(self.button, 0)
pane = LogPane(root, self.panepanel)
self.panepanel.SetSizer(pane)
self.Add(self.panepanel, 1, wx.EXPAND)
self.splitter = self.parentpanel.GetParent()
def on_show(self):
self.splitter.shrinked = False
self.splitter.SetSashPosition(self.splitter.GetSize()[0] - self.orig_width)
self.splitter.SetMinimumPaneSize(self.orig_min_size)
self.splitter.SetSashGravity(self.orig_gravity)
if getattr(self.splitter, 'SetSashSize', False):
self.splitter.SetSashSize(self.orig_sash_size)
getattr(self.splitter, 'SetSashInvisible', bool)(False)
for sizer in self.parentsizers:
sizer.Layout()
def on_hide(self):
self.splitter.shrinked = True
self.orig_width = self.splitter.GetSize()[0] - self.splitter.GetSashPosition()
button_width = self.button.GetSize()[0]
self.orig_min_size = self.splitter.GetMinimumPaneSize()
self.orig_gravity = self.splitter.GetSashGravity()
self.splitter.SetMinimumPaneSize(button_width)
self.splitter.SetSashGravity(1)
self.splitter.SetSashPosition(self.splitter.GetSize()[0] - button_width)
if getattr(self.splitter, 'SetSashSize', False):
self.orig_sash_size = self.splitter.GetSashSize()
self.splitter.SetSashSize(0)
getattr(self.splitter, 'SetSashInvisible', bool)(True)
for sizer in self.parentsizers:
sizer.Layout()
class MainWindow(wx.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# this list will contain all controls that should be only enabled
# when we're connected to a printer
self.panel = wx.Panel(self)
self.reset_ui()
self.statefulControls = []
def reset_ui(self):
self.panels = []
self.printerControls = []
def newPanel(self, parent, add_to_list = True):
panel = wx.Panel(parent)
self.registerPanel(panel, add_to_list)
return panel
def registerPanel(self, panel, add_to_list = True):
panel.SetBackgroundColour(self.bgcolor)
if add_to_list:
self.panels.append(panel)
def createTabbedGui(self):
self.notesizer = wx.BoxSizer(wx.VERTICAL)
self.notebook = wx.Notebook(self.panel)
self.notebook.SetBackgroundColour(self.bgcolor)
page1panel = self.newPanel(self.notebook)
page2panel = self.newPanel(self.notebook)
self.mainsizer_page1 = wx.BoxSizer(wx.VERTICAL)
page1panel1 = self.newPanel(page1panel)
page1panel2 = self.newPanel(page1panel)
self.toolbarsizer = MainToolbar(self, page1panel1, use_wrapsizer = True)
page1panel1.SetSizer(self.toolbarsizer)
self.mainsizer_page1.Add(page1panel1, 0, wx.EXPAND)
self.lowersizer = wx.BoxSizer(wx.HORIZONTAL)
page1panel2.SetSizer(self.lowersizer)
leftsizer = wx.BoxSizer(wx.VERTICAL)
controls_sizer = ControlsSizer(self, page1panel2, True)
leftsizer.Add(controls_sizer, 1, wx.ALIGN_CENTER)
rightsizer = wx.BoxSizer(wx.VERTICAL)
extracontrols = wx.GridBagSizer()
add_extra_controls(extracontrols, self, page1panel2, controls_sizer.extra_buttons)
rightsizer.AddStretchSpacer()
rightsizer.Add(extracontrols, 0, wx.ALIGN_CENTER)
self.lowersizer.Add(leftsizer, 0, wx.ALIGN_CENTER | wx.RIGHT, border = 10)
self.lowersizer.Add(rightsizer, 1, wx.ALIGN_CENTER)
self.mainsizer_page1.Add(page1panel2, 1)
self.mainsizer = wx.BoxSizer(wx.HORIZONTAL)
self.splitterwindow = wx.SplitterWindow(page2panel, style = wx.SP_3D)
page2sizer1 = wx.BoxSizer(wx.HORIZONTAL)
page2panel1 = self.newPanel(self.splitterwindow)
page2sizer2 = wx.BoxSizer(wx.HORIZONTAL)
page2panel2 = self.newPanel(self.splitterwindow)
vizpane = VizPane(self, page2panel1)
page2sizer1.Add(vizpane, 1, wx.EXPAND)
page2sizer2.Add(LogPane(self, page2panel2), 1, wx.EXPAND)
page2panel1.SetSizer(page2sizer1)
page2panel2.SetSizer(page2sizer2)
self.splitterwindow.SetMinimumPaneSize(1)
self.splitterwindow.SetSashGravity(0.5)
self.splitterwindow.SplitVertically(page2panel1, page2panel2,
self.settings.last_sash_position)
self.mainsizer.Add(self.splitterwindow, 1, wx.EXPAND)
page1panel.SetSizer(self.mainsizer_page1)
page2panel.SetSizer(self.mainsizer)
self.notesizer.Add(self.notebook, 1, wx.EXPAND)
self.notebook.AddPage(page1panel, _("Commands"))
self.notebook.AddPage(page2panel, _("Status"))
if self.settings.uimode == _("Tabbed with platers"):
from printrun.stlplater import StlPlaterPanel
from printrun.gcodeplater import GcodePlaterPanel
page3panel = StlPlaterPanel(parent = self.notebook,
callback = self.platecb,
build_dimensions = self.build_dimensions_list,
circular_platform = self.settings.circular_bed,
simarrange_path = self.settings.simarrange_path,
antialias_samples = int(self.settings.antialias3dsamples))
page4panel = GcodePlaterPanel(parent = self.notebook,
callback = self.platecb,
build_dimensions = self.build_dimensions_list,
circular_platform = self.settings.circular_bed,
antialias_samples = int(self.settings.antialias3dsamples))
self.registerPanel(page3panel)
self.registerPanel(page4panel)
self.notebook.AddPage(page3panel, _("Plater"))
self.notebook.AddPage(page4panel, _("G-Code Plater"))
self.panel.SetSizer(self.notesizer)
self.panel.Bind(wx.EVT_MOUSE_EVENTS, self.editbutton)
# Custom buttons
self.cbuttonssizer = wx.WrapSizer(wx.HORIZONTAL)
self.centerpanel = self.newPanel(page1panel2)
self.centerpanel.SetSizer(self.cbuttonssizer)
rightsizer.Add(self.centerpanel, 0, wx.ALIGN_CENTER)
rightsizer.AddStretchSpacer()
self.panel.SetSizerAndFit(self.notesizer)
self.cbuttons_reload()
minsize = self.lowersizer.GetMinSize() # lower pane
minsize[1] = self.notebook.GetSize()[1]
self.SetMinSize(self.ClientToWindowSize(minsize)) # client to window
self.Fit()
def createGui(self, compact = False, mini = False):
self.mainsizer = wx.BoxSizer(wx.VERTICAL)
self.lowersizer = wx.BoxSizer(wx.HORIZONTAL)
upperpanel = self.newPanel(self.panel, False)
self.toolbarsizer = MainToolbar(self, upperpanel)
lowerpanel = self.newPanel(self.panel)
upperpanel.SetSizer(self.toolbarsizer)
lowerpanel.SetSizer(self.lowersizer)
leftpanel = self.newPanel(lowerpanel)
left_pane = LeftPaneToggleable(self, leftpanel, [self.lowersizer])
leftpanel.SetSizer(left_pane)
left_real_panel = left_pane.panepanel
controls_panel = self.newPanel(left_real_panel)
controls_sizer = ControlsSizer(self, controls_panel, mini_mode = mini)
controls_panel.SetSizer(controls_sizer)
left_sizer = wx.BoxSizer(wx.VERTICAL)
left_sizer.Add(controls_panel, 1, wx.EXPAND)
left_pane.set_sizer(left_sizer)
self.lowersizer.Add(leftpanel, 0, wx.EXPAND)
if compact:
vizpanel = self.newPanel(lowerpanel)
logpanel = self.newPanel(left_real_panel)
else:
# Use a splitterwindow to group viz and log
rightpanel = self.newPanel(lowerpanel)
rightsizer = wx.BoxSizer(wx.VERTICAL)
rightpanel.SetSizer(rightsizer)
self.splitterwindow = wx.SplitterWindow(rightpanel, style = wx.SP_3D | wx.SP_LIVE_UPDATE)
self.splitterwindow.SetMinimumPaneSize(150)
self.splitterwindow.SetSashGravity(0.8)
rightsizer.Add(self.splitterwindow, 1, wx.EXPAND)
vizpanel = self.newPanel(self.splitterwindow)
logpanel = self.newPanel(self.splitterwindow)
self.splitterwindow.SplitVertically(vizpanel, logpanel,
self.settings.last_sash_position)
self.splitterwindow.shrinked = False
viz_pane = VizPane(self, vizpanel)
# Custom buttons
self.cbuttonssizer = wx.WrapSizer(wx.HORIZONTAL)
self.centerpanel = self.newPanel(vizpanel)
self.centerpanel.SetSizer(self.cbuttonssizer)
viz_pane.Add(self.centerpanel, 0, flag = wx.ALIGN_CENTER)
vizpanel.SetSizer(viz_pane)
if compact:
log_pane = LogPane(self, logpanel)
else:
log_pane = LogPaneToggleable(self, logpanel, [self.lowersizer])
left_pane.parentsizers.append(self.splitterwindow)
logpanel.SetSizer(log_pane)
if compact:
left_sizer.Add(logpanel, 1, wx.EXPAND)
self.lowersizer.Add(vizpanel, 1, wx.EXPAND)
else:
self.lowersizer.Add(rightpanel, 1, wx.EXPAND)
self.mainsizer.Add(upperpanel, 0, wx.EXPAND)
self.mainsizer.Add(lowerpanel, 1, wx.EXPAND)
self.panel.SetSizer(self.mainsizer)
self.panel.Bind(wx.EVT_MOUSE_EVENTS, self.editbutton)
self.mainsizer.Layout()
# This prevents resizing below a reasonable value
# We sum the lowersizer (left pane / viz / log) min size
# the toolbar height and the statusbar/menubar sizes
minsize = [0, 0]
minsize[0] = self.lowersizer.GetMinSize()[0] # lower pane
minsize[1] = max(viz_pane.GetMinSize()[1], controls_sizer.GetMinSize()[1])
minsize[1] += self.toolbarsizer.GetMinSize()[1] # toolbar height
displaysize = wx.DisplaySize()
minsize[0] = min(minsize[0], displaysize[0])
minsize[1] = min(minsize[1], displaysize[1])
self.SetMinSize(self.ClientToWindowSize(minsize)) # client to window
self.cbuttons_reload()
def gui_set_connected(self):
self.xyb.enable()
self.zb.enable()
for control in self.printerControls:
control.Enable()
def gui_set_disconnected(self):
self.printbtn.Disable()
self.pausebtn.Disable()
self.recoverbtn.Disable()
for control in self.printerControls:
control.Disable()
self.xyb.disable()
self.zb.disable()
| 13,721 | Python | .py | 282 | 38.521277 | 101 | 0.652226 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,379 | log.py | kliment_Printrun/printrun/gui/log.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from .utils import make_button
class LogPane(wx.BoxSizer):
def __init__(self, root, parentpanel = None):
super(LogPane, self).__init__(wx.VERTICAL)
if not parentpanel: parentpanel = root.panel
root.logbox = wx.TextCtrl(parentpanel, style = wx.TE_MULTILINE, size = (350, -1))
root.logbox.SetMinSize((100, -1))
root.logbox.SetEditable(0)
self.Add(root.logbox, 1, wx.EXPAND)
bottom_panel = root.newPanel(parentpanel)
lbrs = wx.BoxSizer(wx.HORIZONTAL)
root.commandbox = wx.TextCtrl(bottom_panel, style = wx.TE_PROCESS_ENTER)
root.commandbox.SetToolTip(wx.ToolTip(_("Send commands to printer\n(Type 'help' for simple\nhelp function)")))
root.commandbox.Hint = _("Command to send")
root.commandbox.Bind(wx.EVT_TEXT_ENTER, root.sendline)
root.commandbox.Bind(wx.EVT_CHAR, root.cbkey)
def deselect(ev):
# In Ubuntu 19.10, when focused, all text is selected
lp = root.commandbox.LastPosition
# print(f"SetSelection({lp}, {lp})")
wx.CallAfter(root.commandbox.SetSelection, lp, lp)
ev.Skip()
root.commandbox.Bind(wx.EVT_SET_FOCUS, deselect)
root.commandbox.history = [""]
root.commandbox.histindex = 1
lbrs.Add(root.commandbox, 1)
root.sendbtn = make_button(bottom_panel, _("Send"), root.sendline, _("Send Command to Printer"), style = wx.BU_EXACTFIT, container = lbrs)
bottom_panel.SetSizer(lbrs)
self.Add(bottom_panel, 0, wx.EXPAND)
| 2,237 | Python | .py | 44 | 44.477273 | 146 | 0.687529 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,380 | zbuttons.py | kliment_Printrun/printrun/gui/zbuttons.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from printrun.gui.xybuttons import FocusCanvas
from printrun.utils import imagefile
def sign(n):
if n < 0: return -1
elif n > 0: return 1
else: return 0
class ZButtons(FocusCanvas):
button_ydistances = [7, 30, 55, 83] # ,112
move_values = [0.1, 1, 10]
center = (30, 118)
label_overlay_positions = {
0: (1.1, 18, 9),
1: (1.1, 41.5, 10.6),
2: (1.1, 68, 13),
}
imagename = "control_z.png"
def __init__(self, parent, moveCallback = None, bgcolor = "#FFFFFF", ID=-1):
self.bg_bmp = wx.Image(imagefile(self.imagename), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.range = None
self.direction = None
self.orderOfMagnitudeIdx = 0 # 0 means '1', 1 means '10', 2 means '100', etc.
self.moveCallback = moveCallback
self.enabled = False
# Remember the last clicked value, so we can repeat when spacebar pressed
self.lastValue = None
self.bgcolor = wx.Colour()
self.bgcolor.Set(bgcolor)
self.bgcolormask = wx.Colour(self.bgcolor.Red(), self.bgcolor.Green(), self.bgcolor.Blue(), 128)
# On MS Windows super(style=WANTS_CHARS) prevents tab cycling
# pass empty style explicitly
super().__init__(parent, ID, size=self.bg_bmp.GetSize(), style=0)
# Set up mouse and keyboard event capture
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDown)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.Bind(wx.EVT_SET_FOCUS, self.RefreshFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.RefreshFocus)
def RefreshFocus(self, evt):
self.Refresh()
evt.Skip()
def disable(self):
self.Enabled = False # prevents focus
self.enabled = False
self.update()
def enable(self):
self.Enabled = True
self.enabled = True
self.update()
def repeatLast(self):
if self.lastValue:
self.moveCallback(self.lastValue)
def clearRepeat(self):
self.lastValue = None
def lookupRange(self, ydist):
idx = -1
for d in self.button_ydistances:
if ydist < d:
return idx
idx += 1
return None
def highlight(self, gc, rng, dir):
assert(rng >= -1 and rng <= 3)
assert(dir >= -1 and dir <= 1)
fudge = 11
x = 0 + fudge
w = 59 - fudge * 2
if rng >= 0:
k = 1 if dir > 0 else 0
y = self.center[1] - (dir * self.button_ydistances[rng + k])
h = self.button_ydistances[rng + 1] - self.button_ydistances[rng]
gc.DrawRoundedRectangle(x, y, w, h, 4)
# gc.DrawRectangle(x, y, w, h)
# self.drawPartialPie(dc, center, r1-inner_ring_radius, r2-inner_ring_radius, a1+fudge, a2-fudge)
def getRangeDir(self, pos):
ydelta = self.center[1] - pos[1]
return (self.lookupRange(abs(ydelta)), sign(ydelta))
def draw(self, dc, w, h):
dc.SetBackground(wx.Brush(self.bgcolor))
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
if self.bg_bmp:
w, h = (self.bg_bmp.GetWidth(), self.bg_bmp.GetHeight())
gc.DrawBitmap(self.bg_bmp, 0, 0, w, h)
if self.enabled and self.IsEnabled():
# Draw label overlays
gc.SetPen(wx.Pen(wx.Colour(255, 255, 255, 128), 1))
gc.SetBrush(wx.Brush(wx.Colour(255, 255, 255, 128 + 64)))
for idx, kpos in self.label_overlay_positions.items():
if idx != self.range:
r = kpos[2]
gc.DrawEllipse(self.center[0] - kpos[0] - r, self.center[1] - kpos[1] - r, r * 2, r * 2)
# Top 'layer' is the mouse-over highlights
gc.SetPen(wx.Pen(wx.Colour(100, 100, 100, 172), 4))
gc.SetBrush(wx.Brush(wx.Colour(0, 0, 0, 128)))
if self.range is not None and self.direction is not None:
self.highlight(gc, self.range, self.direction)
else:
gc.SetPen(wx.Pen(self.bgcolor, 0))
gc.SetBrush(wx.Brush(self.bgcolormask))
gc.DrawRectangle(0, 0, w, h)
self.drawFocusRect(dc)
# ------ #
# Events #
# ------ #
def OnMotion(self, event):
if not self.enabled:
return
oldr, oldd = self.range, self.direction
mpos = event.GetPosition()
self.range, self.direction = self.getRangeDir(mpos)
if oldr != self.range or oldd != self.direction:
self.update()
def OnLeftDown(self, event):
if not self.enabled:
return
mpos = event.GetPosition()
r, d = self.getRangeDir(mpos)
if r is not None and r >= 0:
value = d * self.move_values[r]
if self.moveCallback:
self.lastValue = value
self.moveCallback(value)
def OnLeaveWindow(self, evt):
self.range = None
self.direction = None
self.update()
class ZButtonsMini(ZButtons):
button_ydistances = [7, 30, 55]
center = (30, 84)
label_overlay_positions = {
0: (1, 18, 9),
1: (1, 42.8, 12.9),
}
imagename = "control_z_mini.png"
move_values = [0.1, 10]
| 6,048 | Python | .py | 151 | 31.609272 | 108 | 0.6 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,381 | widgets.py | kliment_Printrun/printrun/gui/widgets.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import re
import string # For determining whitespaces and punctuation marks
import platform # Used by get_space() for platform specific spacing
import logging
import wx
def get_space(key: str) -> int:
'''
Takes key (str), returns spacing value (int).
Provides correct spacing in pixel for borders and sizers.
'''
spacing_values = {
'major': 12, # e.g. outer border of dialog boxes
'minor': 8, # e.g. border of inner elements
'mini': 4,
'stddlg': 4, # Border around std dialog buttons.
'stddlg-frame': 8, # Border around std dialog buttons when used with frames.
'staticbox': 0, # Border between StaticBoxSizers and the elements inside.
'settings': 16, # How wide setting elements can be (multiples of this)
'none': 0
}
# Platform specific overrides, Windows
if platform.system() == 'Windows':
spacing_values['stddlg'] = 8
spacing_values['staticbox'] = 4
# Platform specific overrides, macOS
if platform.system() == 'Darwin':
spacing_values['stddlg-frame'] = 12
try:
return spacing_values[key]
except KeyError:
logging.warning("get_space() cannot return spacing value, "
"will return 0 instead. No entry: %s" % key)
return 0
class MacroEditor(wx.Dialog):
"""Really simple editor to edit macro definitions"""
def __init__(self, macro_name, definition, callback, gcode = False):
self.indent_chars = " "
title = "%s" if gcode else "Macro %s"
self.gcode = gcode
self.fr_settings = (False, False, True, '')
wx.Dialog.__init__(self, None, title = title % macro_name,
style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.callback = callback
panel = wx.Panel(self)
panelsizer = wx.BoxSizer(wx.VERTICAL)
titlesizer = wx.BoxSizer(wx.HORIZONTAL)
self.status_field = wx.StaticText(panel, -1, "")
titlesizer.Add(self.status_field, 1, wx.ALIGN_CENTER_VERTICAL)
self.findbtn = wx.Button(panel, -1, _("Find...")) # New button for "Find" (Jezmy)
self.findbtn.Bind(wx.EVT_BUTTON, self.on_find)
self.Bind(wx.EVT_CLOSE, self.on_close)
titlesizer.Add(self.findbtn, 0, wx.ALIGN_CENTER_VERTICAL)
panelsizer.Add(titlesizer, 0, wx.EXPAND | wx.ALL, get_space('minor'))
self.text_box = wx.TextCtrl(panel,
style = wx.HSCROLL | wx.TE_MULTILINE | wx.TE_RICH2 | wx.TE_NOHIDESEL,
size = (400, 400))
if not self.gcode:
self.text_box.SetValue(self.unindent(definition))
else:
self.text_box.SetValue("\n".join(definition))
panelsizer.Add(self.text_box, 1, wx.EXPAND)
panel.SetSizer(panelsizer)
topsizer = wx.BoxSizer(wx.VERTICAL)
topsizer.Add(panel, 1, wx.EXPAND | wx.ALL, get_space('none'))
# No StaticLine in this case bc the TextCtrl acts as a divider
btnsizer = wx.StdDialogButtonSizer()
self.savebtn = wx.Button(self, wx.ID_SAVE)
self.savebtn.SetDefault()
self.savebtn.Bind(wx.EVT_BUTTON, self.on_save)
self.cancelbtn = wx.Button(self, wx.ID_CANCEL)
self.cancelbtn.Bind(wx.EVT_BUTTON, self.on_close)
btnsizer.AddButton(self.savebtn)
btnsizer.AddButton(self.cancelbtn)
btnsizer.Realize()
topsizer.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.ALL, get_space('stddlg'))
self.SetSizer(topsizer)
self.SetMinClientSize((230, 150)) # TODO: Check if self.FromDIP() is needed
topsizer.Fit(self)
self.CentreOnParent()
self.Show()
self.text_box.SetFocus()
def on_find(self, event):
for window in self.GetChildren():
if isinstance(window, wx.FindReplaceDialog):
window.Show()
window.Raise()
return
FindAndReplace(self.text_box, self.status_field, self.fr_settings, self.fr_callback)
def fr_callback(self, val1, val2, val3, val4):
self.fr_settings = (val1, val2, val3, val4)
def on_save(self, event):
self.Destroy()
if not self.gcode:
self.callback(self.reindent(self.text_box.GetValue()))
else:
self.callback(self.text_box.GetValue().split("\n"))
def on_close(self, event):
self.Destroy()
def ShowMessage(self, event, message):
dlg = wx.MessageDialog(self, message,
"Info!", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def unindent(self, text):
self.indent_chars = text[:len(text) - len(text.lstrip())]
if len(self.indent_chars) == 0:
self.indent_chars = " "
unindented = ""
lines = re.split(r"(?:\r\n?|\n)", text)
if len(lines) <= 1:
return text
for line in lines:
if line.startswith(self.indent_chars):
unindented += line[len(self.indent_chars):] + "\n"
else:
unindented += line + "\n"
return unindented
def reindent(self, text):
lines = re.split(r"(?:\r\n?|\n)", text)
if len(lines) <= 1:
return text
reindented = ""
for line in lines:
if line.strip() != "":
reindented += self.indent_chars + line + "\n"
return reindented
class FindAndReplace():
'''A dialogue that provides full functionality for finding
and replacing strings in a given target string.
'''
def __init__(self, text_cntrl: wx.TextCtrl,
statusbar: wx.StaticText,
settings: tuple = (False, False, True, ''),
settings_cb = None):
self.matchcase = settings[0] # wx.FR_MATCHCASE
self.wholeword = settings[1] # wx.FR_WHOLEWORD
self.down = settings[2] # wx.FR_DOWN
self.callback = settings_cb
self.statusbar = statusbar
self.text_cntrl = text_cntrl
self.find_str = settings[3]
self.replace_str = ""
self.target = ""
self.all_matches = 0
self.current_match = 0
if self.text_cntrl.IsEmpty():
self.statusbar.SetLabel(_("No content to search."))
return
# Initialise and hold search parameters in fr_data
self.fr_data = wx.FindReplaceData(self.bools_to_flags(settings))
selection = text_cntrl.GetStringSelection()
if selection and not len(selection) > 40 and selection not in ('\n', '\r'):
self.find_str = selection
self.fr_data.SetFindString(self.find_str)
self.fr_dialog = wx.FindReplaceDialog(self.text_cntrl,
self.fr_data, _("Find and Replace..."),
wx.FR_REPLACEDIALOG)
# Bind all button events
self.fr_dialog.Bind(wx.EVT_FIND, self.on_find)
self.fr_dialog.Bind(wx.EVT_FIND_NEXT, self.on_find_next)
self.fr_dialog.Bind(wx.EVT_FIND_REPLACE, self.on_replace)
self.fr_dialog.Bind(wx.EVT_FIND_REPLACE_ALL, self.on_replace_all)
self.fr_dialog.Bind(wx.EVT_FIND_CLOSE, self.on_cancel)
# Move the dialogue to the side of the editor where there is more space
display_size = wx.Display(self.fr_dialog).GetClientArea()
ed_x, ed_y, ed_width, ed_height = self.fr_dialog.GetParent().GetRect()
fr_x, fr_y, fr_width, fr_height = self.fr_dialog.GetRect()
if display_size[2] - ed_x - ed_width < fr_width:
fr_x = ed_x - fr_width
else:
fr_x = ed_x + ed_width - 16
self.fr_dialog.SetRect((fr_x, fr_y, fr_width, fr_height))
self.fr_dialog.Show()
def update_data(self):
'''Reads the current settings of the FindReplaceDialog and updates
all relevant strings of the search feature.
'''
# Update flags
flags_binary = bin(self.fr_data.GetFlags())[2:].zfill(3)
self.down = bool(int(flags_binary[2]))
self.wholeword = bool(int(flags_binary[1]))
self.matchcase = bool(int(flags_binary[0]))
# Update search data
self.find_str = self.fr_data.GetFindString()
self.replace_str = self.fr_data.GetReplaceString()
self.target = self.text_cntrl.GetRange(0, self.text_cntrl.GetLastPosition())
if not self.find_str:
self.statusbar.SetLabel("")
if not self.matchcase:
# When search is not case-sensitve, convert the whole string to lowercase
self.find_str = self.find_str.casefold()
self.target = self.target.casefold()
def find_next(self):
self.update_data()
if not self.update_all_matches():
return
# If the search string is already selected, move
# the InsertionPoint and then select the next match
idx = self.text_cntrl.GetInsertionPoint()
selection = self.get_selected_str()
if selection == self.find_str:
sel_from, sel_to = self.text_cntrl.GetSelection()
self.text_cntrl.SelectNone()
if self.down:
self.text_cntrl.SetInsertionPoint(sel_to)
idx = sel_to
else:
self.text_cntrl.SetInsertionPoint(sel_from)
idx = sel_from
self.select_next_match(idx)
def replace_next(self):
'''Replaces one time the next instance of the search string
in the defined direction.
'''
self.update_data()
if not self.update_all_matches():
return
# If the search string is already selected, replace it.
# Otherwise find the next match an replace that one.
# The while loop helps us with the wholeword checks
if self.get_selected_str() == self.find_str:
sel_from, sel_to = self.text_cntrl.GetSelection()
else:
sel_from = self.get_next_idx(self.text_cntrl.GetInsertionPoint())
sel_to = sel_from + len(self.find_str)
self.text_cntrl.SelectNone()
self.text_cntrl.Replace(sel_from, sel_to, self.replace_str)
# The text_cntrl object is changed directly so
# we need to update the copy in self.target
self.update_data()
self.all_matches -= 1
if not self.all_matches:
self.statusbar.SetLabel(_('No matches'))
return
self.select_next_match(sel_from)
def replace_all(self):
'''Goes through the whole file and replaces
every instance of the search string.
'''
position = self.text_cntrl.GetInsertionPoint()
self.update_data()
if not self.update_all_matches():
return
self.text_cntrl.SelectNone()
seek_idx = 0
for match in range(self.all_matches):
sel_from = self.get_next_idx(seek_idx)
sel_to = sel_from + len(self.find_str)
self.text_cntrl.Replace(sel_from, sel_to, self.replace_str)
seek_idx = sel_from
self.update_data()
self.statusbar.SetLabel(_('Replaced {} matches').format(self.all_matches))
self.all_matches = 0
self.text_cntrl.SetInsertionPoint(position)
self.text_cntrl.ShowPosition(position)
def bools_to_flags(self, bools) -> int:
'''Converts a tuple of bool settings into an integer
that is readable for wx.FindReplaceData'''
matchcase = wx.FR_MATCHCASE if bools[0] else 0
wholeword = wx.FR_WHOLEWORD if bools[1] else 0
down = wx.FR_DOWN if bools[2] else 0
return matchcase | wholeword | down
def get_selected_str(self) -> str:
'''Returns the currently selected string, accounting for matchcase.'''
selection = self.text_cntrl.GetStringSelection()
if not self.matchcase:
selection = selection.casefold()
return selection
def get_next_idx(self, position: int) -> int:
'''Searches for the next instance of the search string
in the defined direction.
Takes wholeword setting into account.
Returns index of the first character.
'''
while True:
if self.down:
next_idx = self.target.find(self.find_str, position)
if next_idx == -1:
next_idx = self.target.find(self.find_str, 0, position)
if not self.wholeword or (self.wholeword and self.is_wholeword(next_idx)):
break
position = next_idx + len(self.find_str)
else:
next_idx = self.target.rfind(self.find_str, 0, position)
if next_idx == -1:
next_idx = self.target.rfind(self.find_str, position)
if not self.wholeword or (self.wholeword and self.is_wholeword(next_idx)):
break
position = next_idx
return next_idx
def update_all_matches(self) -> bool:
'''Updates self.all_matches with the amount of search
string instances in the target string.
'''
self.all_matches = 0
if self.wholeword:
selection = self.text_cntrl.GetSelection()
self.text_cntrl.SetInsertionPoint(0)
seek_idx = 0
found_idx = 0
while found_idx != -1:
found_idx = self.target.find(self.find_str, seek_idx)
if found_idx == -1:
break
if self.is_wholeword(found_idx):
self.all_matches += 1
seek_idx = found_idx + len(self.find_str)
self.text_cntrl.SetSelection(selection[0], selection[1])
else:
self.all_matches = self.target.count(self.find_str)
if not self.all_matches:
self.statusbar.SetLabel(_('No matches'))
return False
return True
def select_next_match(self, position: int):
'''Selects the next match in the defined direction.'''
idx = self.get_next_idx(position)
self.text_cntrl.SetSelection(idx, idx + len(self.find_str))
self.text_cntrl.ShowPosition(idx)
self.update_current_match()
def update_current_match(self):
'''Updates the current match index.'''
self.current_match = 0
position = self.text_cntrl.GetInsertionPoint()
if self.wholeword:
selection = self.text_cntrl.GetSelection()
seek_idx = position
found_idx = 0
while found_idx != -1:
found_idx = self.target.rfind(self.find_str, 0, seek_idx)
if found_idx == -1:
break
if self.is_wholeword(found_idx):
self.current_match += 1
seek_idx = found_idx
self.current_match += 1 # We counted all matches before the current, therefore +1
self.text_cntrl.SetSelection(selection[0], selection[1])
else:
self.current_match = self.target.count(self.find_str, 0, position) + 1
self.statusbar.SetLabel(_('Match {} out of {}').format(self.current_match, self.all_matches))
def is_wholeword(self, index: int) -> bool:
'''Returns True if the search string is a whole word.
That is, if it is enclosed in spaces, line breaks, or
the very start or end of the target string.
'''
start_idx = index
delimiter = string.whitespace + string.punctuation
if start_idx != 0 and self.target[start_idx - 1] not in delimiter:
return False
end_idx = start_idx + len(self.find_str)
if not end_idx > len(self.target) and self.target[end_idx] not in delimiter:
return False
return True
def on_find_next(self, event):
self.find_next()
def on_find(self, event):
self.find_next()
def on_replace(self, event):
self.replace_next()
def on_replace_all(self, event):
self.replace_all()
def on_cancel(self, event):
self.statusbar.SetLabel("")
if self.callback:
self.update_data()
self.callback(self.matchcase, self.wholeword,
self.down, self.find_str)
self.fr_dialog.Destroy()
SETTINGS_GROUPS = {"Printer": _("Printer Settings"),
"UI": _("User Interface"),
"Viewer": _("Viewer"),
"Colors": _("Colors"),
"External": _("External Commands")}
class PronterOptionsDialog(wx.Dialog):
"""Options editor"""
def __init__(self, pronterface):
wx.Dialog.__init__(self, parent = None, title = _("Edit Settings"),
size = wx.DefaultSize, style = wx.DEFAULT_DIALOG_STYLE)
self.notebook = notebook = wx.Notebook(self)
all_settings = pronterface.settings._all_settings()
group_list = []
groups = {}
for group in ["Printer", "UI", "Viewer", "Colors", "External"]:
group_list.append(group)
groups[group] = []
for setting in all_settings:
if setting.group not in group_list:
group_list.append(setting.group)
groups[setting.group] = []
groups[setting.group].append(setting)
for group in group_list:
grouppanel = wx.ScrolledWindow(notebook, -1, style = wx.VSCROLL)
# Setting the scrollrate based on the systemfont
fontsize = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT).GetPixelSize()
grouppanel.SetScrollRate(fontsize.x, fontsize.y)
notebook.AddPage(grouppanel, SETTINGS_GROUPS[group])
settings = groups[group]
grid = wx.GridBagSizer(hgap = get_space('minor'), vgap = get_space('mini'))
current_row = 0
# This gives the first entry on the page a tiny bit of extra space to the top
grid.Add((12, get_space('mini')), pos = (current_row, 0), span = (1, 2))
current_row += 1
for setting in settings:
if setting.name.startswith("separator_"):
sep = wx.StaticLine(grouppanel, size = (-1, 5), style = wx.LI_HORIZONTAL)
grid.Add(sep, pos = (current_row, 0), span = (1, 2),
border = get_space('mini'), flag = wx.ALIGN_CENTER | wx.ALL | wx.EXPAND)
current_row += 1
label, widget = setting.get_label(grouppanel), setting.get_widget(grouppanel)
if setting.name.startswith("separator_"):
font = label.GetFont()
font.SetWeight(wx.BOLD)
label.SetFont(font)
grid.Add(label, pos = (current_row, 0), border = get_space('minor'),
flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.LEFT)
expand = 0 if isinstance(widget, (wx.SpinCtrlDouble, wx.Choice, wx.ComboBox)) else wx.EXPAND
grid.Add(widget, pos = (current_row, 1), border = get_space('minor'),
flag = wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | expand)
if hasattr(label, "set_default"):
label.Bind(wx.EVT_MOUSE_EVENTS, label.set_default)
if hasattr(widget, "Bind"):
widget.Bind(wx.EVT_MOUSE_EVENTS, label.set_default)
current_row += 1
grid.AddGrowableCol(1)
grouppanel.SetSizer(grid)
# The size of the options dialog is determined by the first panel 'Printer settings'
if group == group_list[0]:
grouppanel.SetMinSize(grid.ComputeFittingWindowSize(grouppanel))
topsizer = wx.BoxSizer(wx.VERTICAL)
topsizer.Add(notebook, 1, wx.EXPAND | wx.ALL, get_space('minor'))
topsizer.Add(wx.StaticLine(self, -1, style = wx.LI_HORIZONTAL), 0, wx.EXPAND)
topsizer.Add(self.CreateButtonSizer(wx.OK | wx.CANCEL), 0, wx.ALIGN_RIGHT | wx.ALL, get_space('stddlg'))
self.SetSizer(topsizer)
self.Fit()
self.CentreOnParent()
notebookSelection = 0
def PronterOptions(pronterface):
dialog = PronterOptionsDialog(pronterface)
global notebookSelection
dialog.notebook.Selection = notebookSelection
if dialog.ShowModal() == wx.ID_OK:
changed_settings = []
for setting in pronterface.settings._all_settings():
old_value = setting.value
setting.update()
if setting.value != old_value:
pronterface.set(setting.name, setting.value)
changed_settings.append(setting)
pronterface.on_settings_change(changed_settings)
notebookSelection = dialog.notebook.Selection
dialog.Destroy()
class ButtonEdit(wx.Dialog):
"""Custom button edit dialog"""
def __init__(self, pronterface):
wx.Dialog.__init__(self, None, title = _("Custom Button"),
style = wx.DEFAULT_DIALOG_STYLE)
self.pronterface = pronterface
panel = wx.Panel(self)
grid = wx.FlexGridSizer(rows = 0, cols = 2, hgap = get_space('minor'), vgap = get_space('minor'))
grid.AddGrowableCol(1, 1)
# Title of the button
grid.Add(wx.StaticText(panel, -1, _("Button Title:")), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.name = wx.TextCtrl(panel, -1, "")
dlg_size = 260
self.name.SetMinSize(wx.Size(dlg_size, -1))
grid.Add(self.name, 1, wx.EXPAND)
# Colour of the button
grid.Add(wx.StaticText(panel, -1, _("Colour:")), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
coloursizer = wx.BoxSizer(wx.HORIZONTAL)
self.use_colour = wx.CheckBox(panel, -1)
self.color = wx.ColourPickerCtrl(panel, colour=(255, 255, 255), style=wx.CLRP_USE_TEXTCTRL)
self.color.Disable()
self.use_colour.Bind(wx.EVT_CHECKBOX, self.toggle_colour)
coloursizer.Add(self.use_colour, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, get_space('minor'))
coloursizer.Add(self.color, 1, wx.EXPAND)
grid.Add(coloursizer, 1, wx.EXPAND)
# Enter commands or choose a macro
macrotooltip = _("Type short commands directly, enter a name for a new macro or select an existing macro from the list.")
commandfield = wx.StaticText(panel, -1, _("Command:"))
commandfield.SetToolTip(wx.ToolTip(macrotooltip))
grid.Add(commandfield, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
macrochoices = list(self.pronterface.macros.keys()) # add the available macros to the dropdown
macrochoices.insert(0, "") # add an empty field, so that a new macro name can be entered
self.command = wx.ComboBox(panel, -1, "", choices = macrochoices, style = wx.CB_DROPDOWN)
self.command.SetToolTip(wx.ToolTip(macrotooltip))
commandsizer = wx.BoxSizer(wx.HORIZONTAL)
commandsizer.Add(self.command, 1, wx.EXPAND)
self.command.Bind(wx.EVT_TEXT, self.macrob_enabler)
self.macrobtn = wx.Button(panel, -1, "...", style = wx.BU_EXACTFIT)
self.macrobtn.SetMinSize((self.macrobtn.GetTextExtent('AAA').width, -1))
self.macrobtn.SetToolTip(wx.ToolTip(_("Create a new macro or edit an existing one.")))
self.macrobtn.Bind(wx.EVT_BUTTON, self.macrob_handler)
commandsizer.Add(self.macrobtn, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, get_space('minor'))
grid.Add(commandsizer, 1, wx.EXPAND)
panel.SetSizer(grid)
topsizer = wx.BoxSizer(wx.VERTICAL)
topsizer.Add(panel, 0, wx.EXPAND | wx.ALL, get_space('major'))
topsizer.Add(wx.StaticLine(self, -1, style = wx.LI_HORIZONTAL), 0, wx.EXPAND)
topsizer.Add(self.CreateButtonSizer(wx.OK | wx.CANCEL), 0, wx.ALIGN_RIGHT | wx.ALL, get_space('stddlg'))
self.SetSizer(topsizer)
topsizer.Fit(self)
self.CentreOnParent()
self.name.SetFocus()
def macrob_enabler(self, event):
macro = self.command.GetValue()
valid = False
try:
if macro == "":
valid = True
elif macro in self.pronterface.macros:
valid = True
elif hasattr(self.pronterface.__class__, "do_" + macro):
valid = False
elif len([c for c in macro if not c.isalnum() and c != "_"]):
valid = False
else:
valid = True
except:
if macro == "":
valid = True
elif macro in self.pronterface.macros:
valid = True
elif len([c for c in macro if not c.isalnum() and c != "_"]):
valid = False
else:
valid = True
self.macrobtn.Enable(valid)
def macrob_handler(self, event):
macro = self.command.GetValue()
macro = self.pronterface.edit_macro(macro)
self.command.SetValue(macro)
if self.name.GetValue() == "":
self.name.SetValue(macro)
def toggle_colour(self, event):
self.color.Enable(self.use_colour.GetValue())
class TempGauge(wx.Panel):
def __init__(self, parent, size = (200, 22), title = "",
maxval = 240, gaugeColour = None, bgcolor = "#FFFFFF"):
wx.Panel.__init__(self, parent, -1, size = size)
self.Bind(wx.EVT_PAINT, self.paint)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.bgcolor = wx.Colour()
self.bgcolor.Set(bgcolor)
self.width, self.height = size
self.title = title
self.max = maxval
self.gaugeColour = gaugeColour
self.value = 0
self.setpoint = 0
self.recalc()
def recalc(self):
mmax = max(int(self.setpoint * 1.05), self.max)
self.scale = float(self.width - 2) / float(mmax)
self.ypt = max(16, int(self.scale * max(self.setpoint, self.max / 6)))
def SetValue(self, value):
self.value = value
wx.CallAfter(self.Refresh)
def SetTarget(self, value):
self.setpoint = value
wx.CallAfter(self.Refresh)
def interpolatedColour(self, val, vmin, vmid, vmax, cmin, cmid, cmax):
if val < vmin:
return cmin
if val > vmax:
return cmax
if val <= vmid:
lo, hi, val, valhi = cmin, cmid, val - vmin, vmid - vmin
else:
lo, hi, val, valhi = cmid, cmax, val - vmid, vmax - vmid
vv = float(val) / valhi
rgb = lo.Red() + (hi.Red() - lo.Red()) * vv, lo.Green() + (hi.Green() - lo.Green()) * vv, lo.Blue() + (hi.Blue() - lo.Blue()) * vv
rgb = (int(x * 0.8) for x in rgb)
return wx.Colour(*rgb)
def paint(self, event):
self.width, self.height = self.GetClientSize()
self.recalc()
x0, y0, x1, y1, xE, yE = 1, 1, self.ypt + 1, 1, self.width + 1 - 2, 20
dc = wx.PaintDC(self)
dc.SetBackground(wx.Brush(self.bgcolor))
dc.Clear()
cold, medium, hot = wx.Colour(0, 167, 223), wx.Colour(239, 233, 119), wx.Colour(210, 50, 0)
# gauge1, gauge2 = wx.Colour(255, 255, 210), (self.gaugeColour or wx.Colour(234, 82, 0))
gauge1 = wx.Colour(255, 255, 210)
shadow1, shadow2 = wx.Colour(110, 110, 110), self.bgcolor
gc = wx.GraphicsContext.Create(dc)
# draw shadow first
# corners
gc.SetBrush(gc.CreateRadialGradientBrush(xE - 7, 9, xE - 7, 9, 8, shadow1, shadow2))
gc.DrawRectangle(xE - 7, 1, 8, 8)
gc.SetBrush(gc.CreateRadialGradientBrush(xE - 7, 17, xE - 7, 17, 8, shadow1, shadow2))
gc.DrawRectangle(xE - 7, 17, 8, 8)
gc.SetBrush(gc.CreateRadialGradientBrush(x0 + 6, 17, x0 + 6, 17, 8, shadow1, shadow2))
gc.DrawRectangle(0, 17, x0 + 6, 8)
# edges
gc.SetBrush(gc.CreateLinearGradientBrush(xE - 6, 0, xE + 1, 0, shadow1, shadow2))
gc.DrawRectangle(xE - 7, 9, 8, 8)
gc.SetBrush(gc.CreateLinearGradientBrush(x0, yE - 2, x0, yE + 5, shadow1, shadow2))
gc.DrawRectangle(x0 + 6, yE - 2, xE - 12, 7)
# draw gauge background
gc.SetBrush(gc.CreateLinearGradientBrush(x0, y0, x1 + 1, y1, cold, medium))
gc.DrawRoundedRectangle(x0, y0, x1 + 4, yE, 6)
gc.SetBrush(gc.CreateLinearGradientBrush(x1 - 2, y1, xE, y1, medium, hot))
gc.DrawRoundedRectangle(x1 - 2, y1, xE - x1, yE, 6)
# draw gauge
width = 12
w1 = y0 + 9 - width / 2
w2 = w1 + width
value = x0 + max(10, min(self.width + 1 - 2, int(self.value * self.scale)))
# gc.SetBrush(gc.CreateLinearGradientBrush(x0, y0 + 3, x0, y0 + 15, gauge1, gauge2))
# gc.SetBrush(gc.CreateLinearGradientBrush(0, 3, 0, 15, wx.Colour(255, 255, 255), wx.Colour(255, 90, 32)))
gc.SetBrush(gc.CreateLinearGradientBrush(x0, y0 + 3, x0, y0 + 15, gauge1, self.interpolatedColour(value, x0, x1, xE, cold, medium, hot)))
val_path = gc.CreatePath()
val_path.MoveToPoint(x0, w1)
val_path.AddLineToPoint(value, w1)
val_path.AddLineToPoint(value + 2, w1 + width / 4)
val_path.AddLineToPoint(value + 2, w2 - width / 4)
val_path.AddLineToPoint(value, w2)
# val_path.AddLineToPoint(value-4, 10)
val_path.AddLineToPoint(x0, w2)
gc.DrawPath(val_path)
# draw setpoint markers
setpoint = x0 + max(10, int(self.setpoint * self.scale))
gc.SetBrush(gc.CreateBrush(wx.Brush(wx.Colour(0, 0, 0))))
setp_path = gc.CreatePath()
setp_path.MoveToPoint(setpoint - 4, y0)
setp_path.AddLineToPoint(setpoint + 4, y0)
setp_path.AddLineToPoint(setpoint, y0 + 5)
setp_path.MoveToPoint(setpoint - 4, yE)
setp_path.AddLineToPoint(setpoint + 4, yE)
setp_path.AddLineToPoint(setpoint, yE - 5)
gc.DrawPath(setp_path)
# draw readout
text = "T\u00B0 %u/%u" % (self.value, self.setpoint)
# gc.SetFont(gc.CreateFont(wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD), wx.WHITE))
# gc.DrawText(text, 29,-2)
gc.SetFont(gc.CreateFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD), wx.WHITE))
gc.DrawText(self.title, x0 + 19, y0 + 4)
gc.DrawText(text, x0 + 119, y0 + 4)
gc.SetFont(gc.CreateFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)))
gc.DrawText(self.title, x0 + 18, y0 + 3)
gc.DrawText(text, x0 + 118, y0 + 3)
class SpecialButton:
label = None
command = None
background = None
tooltip = None
custom = None
def __init__(self, label, command, background = None,
tooltip = None, custom = False):
self.label = label
self.command = command
self.background = background
self.tooltip = tooltip
self.custom = custom
| 31,749 | Python | .py | 668 | 37.112275 | 145 | 0.599697 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,382 | viz.py | kliment_Printrun/printrun/gui/viz.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import traceback
import logging
import wx
class BaseViz:
def clear(self, *a):
pass
def addfile_perlayer(self, gcode, showall = False):
layer_idx = 0
while layer_idx < len(gcode.all_layers):
yield layer_idx
layer_idx += 1
yield None
def addfile(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
def setlayer(self, *a):
pass
def on_settings_change(self, changed_settings):
pass
class NoViz(BaseViz):
showall = False
def Refresh(self, *a):
pass
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel = None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel: parentpanel = root.panel
if root.settings.mainviz == "None":
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == "2D"
if root.settings.mainviz == "3D":
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(
parentpanel,
root.build_dimensions_list,
root = root,
circular = root.settings.circular_bed,
antialias_samples = int(root.settings.antialias3dsamples),
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
perspective = root.settings.perspective)
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error(_("3D view mode requested, but we failed to initialize it.\n")
+ _("Falling back to 2D view, and here is the backtrace:\n")
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300),
build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_("Click to examine / edit\n layers of loaded file")))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.ID_ANY, _('G-Code Viewer'),
size = (600, 600),
build_dimensions = root.build_dimensions_list,
objects = objects,
root = root,
circular = root.settings.circular_bed,
antialias_samples = int(root.settings.antialias3dsamples),
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
perspective=root.settings.perspective)
except:
use3dview = False
logging.error(_("3D view mode requested, but we failed to initialize it.\n")
+ _("Falling back to 2D view, and here is the backtrace:\n")
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag = wx.EXPAND)
| 5,083 | Python | .py | 109 | 33.53211 | 119 | 0.576008 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,383 | xybuttons.py | kliment_Printrun/printrun/gui/xybuttons.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
import math
from .bufferedcanvas import BufferedCanvas
from printrun.utils import imagefile
def sign(n):
if n < 0: return -1
elif n > 0: return 1
else: return 0
DASHES = [4, 7]
# Brush and pen for grey overlay when mouse hovers over
HOVER_PEN_COLOR = wx.Colour(100, 100, 100, 172)
HOVER_BRUSH_COLOR = wx.Colour(0, 0, 0, 128)
class FocusCanvas(BufferedCanvas):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.Bind(wx.EVT_SET_FOCUS, self.onFocus)
def onFocus(self, evt):
self.Refresh()
evt.Skip()
def drawFocusRect(self, dc):
if self.HasFocus():
pen = wx.Pen(wx.BLACK, 1, wx.PENSTYLE_USER_DASH)
pen.SetDashes(DASHES)
dc.Pen = pen
dc.Brush = wx.Brush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(self.ClientRect)
class XYButtons(FocusCanvas):
keypad_positions = {
0: (104, 99),
1: (86, 83),
2: (68, 65),
3: (53, 50)
}
keypad_radius = 9
corner_size = (49, 49)
corner_inset = (7, 13)
label_overlay_positions = {
1: (145, 98.5, 9),
2: (160.5, 83.5, 10.6),
3: (178, 66, 13),
4: (197.3, 46.3, 13.3)
}
concentric_circle_radii = [0, 17, 45, 69, 94, 115]
concentric_inset = 11
center = (124, 121)
spacer = 7
imagename = "control_xy.png"
corner_to_axis = {
-1: "center",
0: "x",
1: "z",
2: "y",
3: "all",
}
def __init__(self, parent, moveCallback = None, cornerCallback = None, spacebarCallback = None, bgcolor = "#FFFFFF", ID=-1, zcallback=None):
self.bg_bmp = wx.Image(imagefile(self.imagename), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.keypad_bmp = wx.Image(imagefile("arrow_keys.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.keypad_idx = -1
self.hovered_keypad = None
self.quadrant = None
self.concentric = None
self.corner = None
self.moveCallback = moveCallback
self.cornerCallback = cornerCallback
self.spacebarCallback = spacebarCallback
self.zCallback = zcallback
self.enabled = False
# Remember the last clicked buttons, so we can repeat when spacebar pressed
self.lastMove = None
self.lastCorner = None
self.bgcolor = wx.Colour()
self.bgcolor.Set(bgcolor)
self.bgcolormask = wx.Colour(self.bgcolor.Red(), self.bgcolor.Green(), self.bgcolor.Blue(), 128)
super().__init__(parent, ID, size=self.bg_bmp.GetSize())
self.bind_events()
def bind_events(self):
# Set up mouse and keyboard event capture
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDown)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKey)
self.Bind(wx.EVT_KILL_FOCUS, self.onKillFocus)
def onKillFocus(self, evt):
self.setKeypadIndex(-1)
evt.Skip()
def disable(self):
self.Enabled = self.enabled = False
self.update()
def enable(self):
self.Enabled = self.enabled = True
self.update()
def repeatLast(self):
if self.lastMove:
self.moveCallback(*self.lastMove)
if self.lastCorner:
self.cornerCallback(self.corner_to_axis[self.lastCorner])
def clearRepeat(self):
self.lastMove = None
self.lastCorner = None
def distanceToLine(self, pos, x1, y1, x2, y2):
xlen = x2 - x1
ylen = y2 - y1
pxlen = x1 - pos.x
pylen = y1 - pos.y
return abs(xlen * pylen - ylen * pxlen) / math.sqrt(xlen ** 2 + ylen ** 2)
def distanceToPoint(self, x1, y1, x2, y2):
return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
def cycleKeypadIndex(self, forward):
idx = self.keypad_idx + (1 if forward else -1)
# do not really cycle to allow exiting of jog controls widget
return idx if idx < len(self.keypad_positions) else -1
def setKeypadIndex(self, idx):
self.keypad_idx = idx
self.update()
def getMovement(self, event):
xdir = [1, 0, -1, 0, 0, 0][self.quadrant]
ydir = [0, 1, 0, -1, 0, 0][self.quadrant]
zdir = [0, 0, 0, 0, 1, -1][self.quadrant]
magnitude = math.pow(10, self.concentric - 2)
magnitude *= event.ShiftDown() and 2 or event.ControlDown() and 0.5 or 1
if zdir:
magnitude = min(magnitude, 10)
return (magnitude * xdir, magnitude * ydir, magnitude * zdir)
def lookupConcentric(self, radius):
idx = 0
for r in self.concentric_circle_radii[1:]:
if radius < r:
return idx
idx += 1
return len(self.concentric_circle_radii)
def getQuadrantConcentricFromPosition(self, pos):
rel_x = pos[0] - self.center[0]
rel_y = pos[1] - self.center[1]
if rel_x > rel_y and rel_x > -rel_y:
quadrant = 0 # Right
elif rel_x <= rel_y and rel_x > -rel_y:
quadrant = 3 # Down
elif rel_x > rel_y and rel_x < -rel_y:
quadrant = 1 # Up
else:
quadrant = 2 # Left
radius = math.sqrt(rel_x ** 2 + rel_y ** 2)
idx = self.lookupConcentric(radius)
return (quadrant, idx)
def mouseOverKeypad(self, mpos):
for idx, kpos in self.keypad_positions.items():
radius = self.distanceToPoint(mpos[0], mpos[1], kpos[0], kpos[1])
if radius < XYButtons.keypad_radius:
return idx
return None
def drawPartialPie(self, gc, center, r1, r2, angle1, angle2):
p1 = wx.Point(int(center.x + r1 * math.cos(angle1)), int(center.y + r1 * math.sin(angle1)))
path = gc.CreatePath()
path.MoveToPoint(p1.x, p1.y)
path.AddArc(center.x, center.y, r1, angle1, angle2, True)
path.AddArc(center.x, center.y, r2, angle2, angle1, False)
path.AddLineToPoint(p1.x, p1.y)
gc.DrawPath(path)
def highlightQuadrant(self, gc, quadrant, concentric):
if not 0 <= quadrant <= 3:
return
assert(concentric >= 0 and concentric <= 4)
inner_ring_radius = self.concentric_inset
# fudge = math.pi*0.002
fudge = -0.02
center = wx.Point(self.center[0], self.center[1])
if quadrant == 0:
a1, a2 = (-math.pi * 0.25, math.pi * 0.25)
center.x += inner_ring_radius
elif quadrant == 1:
a1, a2 = (math.pi * 1.25, math.pi * 1.75)
center.y -= inner_ring_radius
elif quadrant == 2:
a1, a2 = (math.pi * 0.75, math.pi * 1.25)
center.x -= inner_ring_radius
elif quadrant == 3:
a1, a2 = (math.pi * 0.25, math.pi * 0.75)
center.y += inner_ring_radius
r1 = self.concentric_circle_radii[concentric]
r2 = self.concentric_circle_radii[concentric + 1]
self.drawPartialPie(gc, center, r1 - inner_ring_radius, r2 - inner_ring_radius, a1 + fudge, a2 - fudge)
def drawCorner(self, gc, x, y, angle = 0.0):
w, h = self.corner_size
gc.PushState()
gc.Translate(x, y)
gc.Rotate(angle)
path = gc.CreatePath()
path.MoveToPoint(-w / 2, -h / 2)
path.AddLineToPoint(w / 2, -h / 2)
path.AddLineToPoint(w / 2, -h / 2 + h / 4)
path.AddLineToPoint(w / 12, h / 12)
path.AddLineToPoint(-w / 2 + w / 4, h / 2)
path.AddLineToPoint(-w / 2, h / 2)
path.AddLineToPoint(-w / 2, -h / 2)
gc.DrawPath(path)
gc.PopState()
def highlightCorner(self, gc, corner = 0):
w, h = self.corner_size
xinset, yinset = self.corner_inset
cx, cy = self.center
ww, wh = self.GetSize()
if corner == 0:
x, y = (cx - ww / 2 + xinset + 1, cy - wh / 2 + yinset)
self.drawCorner(gc, x + w / 2, y + h / 2, 0)
elif corner == 1:
x, y = (cx + ww / 2 - xinset, cy - wh / 2 + yinset)
self.drawCorner(gc, x - w / 2, y + h / 2, math.pi / 2)
elif corner == 2:
x, y = (cx + ww / 2 - xinset, cy + wh / 2 - yinset - 1)
self.drawCorner(gc, x - w / 2, y - h / 2, math.pi)
elif corner == 3:
x, y = (cx - ww / 2 + xinset + 1, cy + wh / 2 - yinset - 1)
self.drawCorner(gc, x + w / 2, y - h / 2, math.pi * 3 / 2)
def drawCenteredDisc(self, gc, radius):
cx, cy = self.center
gc.DrawEllipse(cx - radius, cy - radius, radius * 2, radius * 2)
def draw(self, dc, w, h):
dc.SetBackground(wx.Brush(self.bgcolor))
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
if self.bg_bmp:
w, h = (self.bg_bmp.GetWidth(), self.bg_bmp.GetHeight())
gc.DrawBitmap(self.bg_bmp, 0, 0, w, h)
if self.enabled and self.IsEnabled():
gc.SetPen(wx.Pen(HOVER_PEN_COLOR, 4))
gc.SetBrush(wx.Brush(HOVER_BRUSH_COLOR))
if self.concentric is not None:
if self.concentric < len(self.concentric_circle_radii):
if self.concentric == 0:
self.drawCenteredDisc(gc, self.concentric_circle_radii[1])
elif self.quadrant is not None:
self.highlightQuadrant(gc, self.quadrant, self.concentric)
elif self.corner is not None:
self.highlightCorner(gc, self.corner)
if self.keypad_idx >= 0:
padw, padh = (self.keypad_bmp.GetWidth(), self.keypad_bmp.GetHeight())
pos = self.keypad_positions[self.keypad_idx]
pos = (pos[0] - padw / 2 - 3, pos[1] - padh / 2 - 3)
gc.DrawBitmap(self.keypad_bmp, pos[0], pos[1], padw, padh)
if self.hovered_keypad is not None and self.hovered_keypad != self.keypad_idx:
pos = self.keypad_positions[self.hovered_keypad]
r = XYButtons.keypad_radius
gc.DrawEllipse(pos[0]-r/2, pos[1]-r/2, r, r)
# Draw label overlays
gc.SetPen(wx.Pen(wx.Colour(255, 255, 255, 128), 1))
gc.SetBrush(wx.Brush(wx.Colour(255, 255, 255, 128 + 64)))
for idx, kpos in self.label_overlay_positions.items():
if idx != self.concentric:
r = kpos[2]
gc.DrawEllipse(kpos[0] - r, kpos[1] - r, r * 2, r * 2)
else:
gc.SetPen(wx.Pen(self.bgcolor, 0))
gc.SetBrush(wx.Brush(self.bgcolormask))
gc.DrawRectangle(0, 0, w, h)
self.drawFocusRect(dc)
# Used to check exact position of keypad dots, should we ever resize the bg image
# for idx, kpos in self.label_overlay_positions.items():
# dc.DrawCircle(kpos[0], kpos[1], kpos[2])
# ------ #
# Events #
# ------ #
def OnKey(self, evt):
# print('XYButtons key', evt.GetKeyCode())
if not self.enabled:
evt.Skip()
return
key = evt.KeyCode
if self.keypad_idx >= 0:
if key == wx.WXK_TAB:
keypad = self.cycleKeypadIndex(not evt.ShiftDown())
self.setKeypadIndex(keypad)
if keypad == -1:
# exit widget after largest step
# evt.Skip()
# On MS Windows if tab event is delivered,
# it is not handled
self.Navigate(not evt.ShiftDown())
return
elif key == wx.WXK_ESCAPE:
self.setKeypadIndex(-1)
elif key == wx.WXK_UP:
self.quadrant = 1
elif key == wx.WXK_DOWN:
self.quadrant = 3
elif key == wx.WXK_LEFT:
self.quadrant = 2
elif key == wx.WXK_RIGHT:
self.quadrant = 0
elif key == wx.WXK_PAGEUP:
self.quadrant = 4
elif key == wx.WXK_PAGEDOWN:
self.quadrant = 5
else:
evt.Skip()
return
self.concentric = self.keypad_idx + 1
if self.quadrant is not None:
x, y, z = self.getMovement(evt)
if (x or y) and self.moveCallback:
self.moveCallback(x, y)
if z and self.zCallback:
self.zCallback(z)
self.Refresh()
elif key == wx.WXK_SPACE:
self.spacebarCallback()
elif key == wx.WXK_TAB:
self.setKeypadIndex(len(self.keypad_positions)-1 if evt.ShiftDown() else 0)
else:
# handle arrows elsewhere
evt.Skip()
def OnMotion(self, event):
if not self.enabled:
return
oldcorner = self.corner
oldq, oldc = self.quadrant, self.concentric
old_hovered_keypad = self.hovered_keypad
mpos = event.GetPosition()
self.hovered_keypad = self.mouseOverKeypad(mpos)
self.quadrant = None
self.concentric = None
if self.hovered_keypad is None:
center = wx.Point(self.center[0], self.center[1])
riseDist = self.distanceToLine(mpos, center.x - 1, center.y - 1, center.x + 1, center.y + 1)
fallDist = self.distanceToLine(mpos, center.x - 1, center.y + 1, center.x + 1, center.y - 1)
self.quadrant, self.concentric = self.getQuadrantConcentricFromPosition(mpos)
# If mouse hovers in space between quadrants, don't commit to a quadrant
if riseDist <= self.spacer or fallDist <= self.spacer:
self.quadrant = None
cx, cy = self.center
if mpos.x < cx and mpos.y < cy:
self.corner = 0
if mpos.x >= cx and mpos.y < cy:
self.corner = 1
if mpos.x >= cx and mpos.y >= cy:
self.corner = 2
if mpos.x < cx and mpos.y >= cy:
self.corner = 3
if oldq != self.quadrant or oldc != self.concentric or oldcorner != self.corner \
or old_hovered_keypad != self.hovered_keypad:
self.update()
def OnLeftDown(self, event):
if not self.enabled:
return
# Take focus when clicked so that arrow keys can control movement
self.SetFocus()
mpos = event.GetPosition()
idx = self.mouseOverKeypad(mpos)
if idx is None:
self.quadrant, self.concentric = self.getQuadrantConcentricFromPosition(mpos)
if self.concentric is not None:
if self.concentric < len(self.concentric_circle_radii):
if self.concentric == 0:
self.lastCorner = -1
self.lastMove = None
self.cornerCallback(self.corner_to_axis[-1])
elif self.quadrant is not None:
x, y, z = self.getMovement(event)
if self.moveCallback:
self.lastMove = (x, y)
self.lastCorner = None
self.moveCallback(x, y)
elif self.corner is not None:
if self.cornerCallback:
self.lastCorner = self.corner
self.lastMove = None
self.cornerCallback(self.corner_to_axis[self.corner])
else:
self.setKeypadIndex(-1 if self.keypad_idx == idx else idx)
def OnLeaveWindow(self, evt):
self.quadrant = None
self.concentric = None
self.update()
class XYButtonsMini(XYButtons):
imagename = "control_mini.png"
center = (57, 56.5)
concentric_circle_radii = [0, 30.3]
corner_inset = (5, 5)
corner_size = (50, 50)
outer_radius = 31
corner_to_axis = {
0: "x",
1: "z",
2: "y",
3: "center",
}
def bind_events(self):
# Set up mouse and keyboard event capture
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDown)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
def OnMotion(self, event):
if not self.enabled:
return
oldcorner = self.corner
oldq, oldc = self.quadrant, self.concentric
mpos = event.GetPosition()
self.quadrant, self.concentric = self.getQuadrantConcentricFromPosition(mpos)
cx, cy = XYButtonsMini.center
if mpos.x < cx and mpos.y < cy:
self.corner = 0
if mpos.x >= cx and mpos.y < cy:
self.corner = 1
if mpos.x >= cx and mpos.y >= cy:
self.corner = 2
if mpos.x < cx and mpos.y >= cy:
self.corner = 3
if oldq != self.quadrant or oldc != self.concentric or oldcorner != self.corner:
self.update()
def OnLeftDown(self, event):
if not self.enabled:
return
# Take focus when clicked so that arrow keys can control movement
self.SetFocus()
mpos = event.GetPosition()
self.quadrant, self.concentric = self.getQuadrantConcentricFromPosition(mpos)
if self.concentric is not None:
if self.concentric < len(self.concentric_circle_radii):
self.cornerCallback("all")
elif self.corner is not None:
if self.cornerCallback:
self.lastCorner = self.corner
self.lastMove = None
self.cornerCallback(self.corner_to_axis[self.corner])
def drawCorner(self, gc, x, y, angle = 0.0):
w, h = self.corner_size
gc.PushState()
gc.Translate(x, y)
gc.Rotate(angle)
path = gc.CreatePath()
path.MoveToPoint(-w / 2, -h / 2)
path.AddLineToPoint(w / 2, -h / 2)
path.AddLineToPoint(w / 2, -h / 2 + h / 4)
path.AddArc(w / 2, h / 2, self.outer_radius, 3 * math.pi / 2, math.pi, False)
path.AddLineToPoint(-w / 2, h / 2)
path.AddLineToPoint(-w / 2, -h / 2)
gc.DrawPath(path)
gc.PopState()
def draw(self, dc, w, h):
dc.SetBackground(wx.Brush(self.bgcolor))
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
if self.bg_bmp:
w, h = (self.bg_bmp.GetWidth(), self.bg_bmp.GetHeight())
gc.DrawBitmap(self.bg_bmp, 0, 0, w, h)
if self.enabled and self.IsEnabled():
gc.SetPen(wx.Pen(HOVER_PEN_COLOR, 4))
gc.SetBrush(wx.Brush(HOVER_BRUSH_COLOR))
if self.concentric is not None:
if self.concentric < len(self.concentric_circle_radii):
self.drawCenteredDisc(gc, self.concentric_circle_radii[-1])
elif self.corner is not None:
self.highlightCorner(gc, self.corner)
else:
gc.SetPen(wx.Pen(self.bgcolor, 0))
gc.SetBrush(wx.Brush(self.bgcolormask))
gc.DrawRectangle(0, 0, w, h)
| 19,964 | Python | .py | 472 | 31.629237 | 144 | 0.562523 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,384 | spoolmanager.py | kliment_Printrun/printrun/spoolmanager/spoolmanager.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2017 Rock Storm <rockstorm@gmx.com>
# This module indirectly depends of pronsole and settings but it does not
# import them
class SpoolManager():
"""
Back-end for the Spool Manager.
It is expected to be called from an object which has the contents of
settings.py and pronsole.py. This way the class is able to '_add' and
'set' settings.
This class basically handles a single variable called '_spool_list'. It is
a list of spool_items. A spool_item is in turn a list three elements: a
string, a float and an integer. Namely: the name of the spool, the
remaining length of filament and the extruder it is loaded to. E.g.:
spool_item = [string name, float length, int extruder]
_spool_list = [spool_item spool_1, ... , spool_item spool_n ]
'_spool_list' is somehow a Nx3 matrix where N is the number of recorded
spools. The first column contains the names of the spools, the second the
lengths of remaining filament and the third column contains which extruder
is the spool loaded for.
The variable '_spool_list' is saved in the configuration file using a
setting with the same name: 'spool_list'. It is saved as a single string.
It concatenates every item from the list and separates them by a comma and
a space. For instance, if the variable '_spool_list' was:
_spool_list = [["spool_1", 100.0, 0], ["spool_2", 200.0, -1]]
The 'spool_list' setting will look like:
"spool_1, 100.0, 0, spool_2, 200.0, -1"
"""
def __init__(self, parent):
self.parent = parent
self.refresh()
def refresh(self):
"""
Read the configuration file and populate the list of recorded spools.
"""
self._spool_list = self._readSetting(self.parent.settings.spool_list)
def add(self, spool_name, spool_length):
"""Add the given spool to the list of recorded spools."""
self._spool_list.append([spool_name, spool_length, -1])
self._save()
def load(self, spool_name, extruder):
"""Set the extruder field of the given spool item."""
# If there was a spool already loaded for this extruder unload it
previous_spool = self._findByColumn(extruder, 2)
if previous_spool != -1:
self.unload(extruder)
# Load the given spool
new_spool = self._findByColumn(spool_name, 0)
self.remove(spool_name)
self._spool_list.append([new_spool[0], new_spool[1], extruder])
self._save()
def remove(self, spool_name):
"""Remove the given spool item from the list of recorded spools."""
spool_item = self._findByColumn(spool_name, 0)
self._spool_list.remove(spool_item)
self._save()
def unload(self, extruder):
"""Set to -1 the extruder field of the spool item currently on."""
spool_item = self._findByColumn(extruder, 2)
if spool_item != -1:
self.remove(spool_item[0])
self._spool_list.append([spool_item[0], spool_item[1], -1])
self._save()
def isLoaded(self, spool_name):
"""
int isLoaded( string name )
Return the extruder that the given spool is loaded to. -1 if it is
not loaded for any extruder or None if the given name does not match
any known spool.
"""
spool_item = self._findByColumn(spool_name, 0)
if spool_item != -1:
return spool_item[2]
else:
return None
def isListed(self, spool_name):
"""Return 'True' if the given spool is on the list."""
spool_item = self._findByColumn(spool_name, 0)
if not spool_item == -1:
return True
else:
return False
def getSpoolName(self, extruder):
"""
string getSpoolName( int extruder )
Return the name of the spool loaded for the given extruder.
"""
spool_item = self._findByColumn(extruder, 2)
if spool_item != -1:
return spool_item[0]
else:
return None
def getRemainingFilament(self, extruder):
"""
float getRemainingFilament( int extruder )
Return the name of the spool loaded for the given extruder.
"""
spool_item = self._findByColumn(extruder, 2)
if spool_item != -1:
return spool_item[1]
else:
return float("NaN")
def editLength(self, increment, spool_name = None, extruder = -1):
"""
int editLength ( float increment, string spool_name, int extruder )
Add the given 'increment' amount to the length of filament of the
given spool. Spool can be specified either by name or by the extruder
it is loaded to.
"""
if spool_name != None:
spool_item = self._findByColumn(spool_name, 0)
elif extruder != -1:
spool_item = self._findByColumn(extruder, 2)
else:
return -1 # Not enough arguments
if spool_item == -1:
return -2 # No spool found for the given name or extruder
length = spool_item[1] + increment
self.remove(spool_item[0])
self.add(spool_item[0], length)
if spool_item[2] > -1:
self.load(spool_item[0], spool_item[2])
self._save()
return 0
def getExtruderCount(self):
"""int getExtruderCount()"""
return self.parent.settings.extruders
def getSpoolCount(self):
"""
int getSpoolCount()
Return the number of currently recorded spools.
"""
return len(self._spool_list)
def getSpoolList(self):
"""
[N][2] getSpoolList ()
Returns a list of the recorded spools. Returns a Nx2 matrix where N is
the number of recorded spools. The first column contains the names of
the spools and the second the lengths of remaining filament.
"""
slist = []
for i in range(self.getSpoolCount()):
item = [self._spool_list[i][0], self._spool_list[i][1]]
slist.append(item)
return slist
def _findByColumn(self, data, col = 0):
"""
Find which spool_item from the list contains certain data.
The 'col' argument specifies in which field from the spool_item to
look for. For instance, with the following list:
_spool_list = [["spool_1", 100.0, 1],
["spool_2", 200.0, 0],
.
.
.
["spool_10", 1000.0, 0]]
A call like: _findByColumn("spool_2", 0)
Will produce: ["spool_2", 200.0, 0]
col = 0, would look into the "name's column"
col = 1, would look into the "length's column"
col = 2, would look into the "extruder's column"
"""
for spool_item in self._spool_list:
if data == spool_item[col]:
return spool_item
return -1
def _save(self):
"""Update the list of recorded spools in the configuration file."""
self._setSetting(self._spool_list, "spool_list")
def _setSetting(self, variable, setting):
"""
Write the given variable to the given setting of the configuration
file.
"""
n = 3 # number of fields in spool_item
string_list = []
for i in range(len(variable)):
for j in range(n):
string_list.append(str(variable[i][j]))
separator = ", "
self.parent.set(setting, separator.join(string_list))
def _readSetting(self, setting):
"""
Return the variable read.
"""
n = 3 # number of fields in spool_item
string_list = setting.split(", ")
variable = []
for i in range(len(string_list)//n):
variable.append(
[string_list[n*i],
float(string_list[n*i+1]),
int(string_list[n*i+2])])
return variable
| 8,798 | Python | .py | 208 | 33.346154 | 78 | 0.610122 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,385 | spoolmanager_gui.py | kliment_Printrun/printrun/spoolmanager/spoolmanager_gui.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2017 Rock Storm <rockstorm@gmx.com>
import wx
from printrun.gui.widgets import get_space
from printrun.utils import install_locale
install_locale('pronterface')
# Set up Internationalization using gettext
class SpoolManagerMainWindow(wx.Frame):
"""
Front-end for the Spool Manager.
Main window which displays the currently loaded spools and the list of
recorded ones with buttons to add, load, edit or delete them.
"""
def __init__(self, parent, spool_manager):
wx.Frame.__init__(self, parent,
title = _("Spool Manager"),
style = wx.DEFAULT_FRAME_STYLE)
# An empty wx.Frame has a darker background on win, but filled with a panel it looks native
self.panel = wx.Panel(self, -1)
self.SetIcon(parent.GetIcon())
# Initiate the back-end
self.spool_manager = spool_manager
self.spool_manager.refresh()
# Generate the dialogs showing the current spools
self.current_spools_dialog = CurrentSpoolDialog(self.panel,
self.spool_manager)
# Check if any spools are loaded on non-exisiting extruders
for spool in self.spool_manager.getSpoolList():
if self.spool_manager.isLoaded(spool[0]) > (spool_manager.getExtruderCount() - 1):
spool_manager.unload(self.spool_manager.isLoaded(spool[0]))
# Generate the list of recorded spools
self.spool_list = SpoolListView(self.panel, self.spool_manager)
# Generate the buttons
self.panel.new_button = wx.Button(self.panel, wx.ID_ADD)
self.panel.new_button.SetToolTip(_("Add a new spool"))
self.panel.edit_button = wx.Button(self.panel, wx.ID_EDIT)
self.panel.edit_button.SetToolTip(_("Edit the selected spool"))
self.panel.edit_button.Disable()
self.panel.delete_button = wx.Button(self.panel, wx.ID_DELETE)
self.panel.delete_button.SetToolTip(_("Delete the selected spool"))
self.panel.delete_button.Disable()
# Instead of a real statusbar, a virtual statusbar is combined with the close button
self.statusbar = wx.StaticText(self.panel, -1, "", style = wx.ST_ELLIPSIZE_END)
statusfont = wx.Font(self.statusbar.GetFont())
statusfont.MakeSmaller()
self.statusbar.SetFont(statusfont)
self.close_button = wx.Button(self.panel, wx.ID_CLOSE)
self.bottom_button_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.bottom_button_sizer.Add(self.statusbar, 1, wx.ALIGN_CENTER_VERTICAL)
self.bottom_button_sizer.Add(self.close_button, 0, wx.ALIGN_CENTER_VERTICAL)
# "Program" the buttons
self.panel.new_button.Bind(wx.EVT_BUTTON, self.onClickAdd)
self.panel.edit_button.Bind(wx.EVT_BUTTON, self.onClickEdit)
self.panel.delete_button.Bind(wx.EVT_BUTTON, self.onClickDelete)
self.close_button.Bind(wx.EVT_BUTTON, self.onClickClose)
# Layout
## Group the buttons
self.list_button_sizer = wx.BoxSizer(wx.VERTICAL)
self.list_button_sizer.Add(self.panel.new_button, 0,
wx.FIXED_MINSIZE | wx.EXPAND | wx.LEFT | wx.BOTTOM, get_space('minor'))
self.list_button_sizer.Add(self.panel.edit_button, 0,
wx.FIXED_MINSIZE | wx.EXPAND | wx.LEFT | wx.BOTTOM, get_space('minor'))
self.list_button_sizer.Add(self.panel.delete_button, 0,
wx.FIXED_MINSIZE | wx.EXPAND | wx.LEFT, get_space('minor'))
## Group the buttons with the spool list
self.list_sizer = wx.StaticBoxSizer(wx.HORIZONTAL, self.panel, label = _("Spool List"))
self.list_sizer.Add(self.spool_list, 1,
wx.EXPAND | wx.LEFT | wx.TOP | wx.BOTTOM, get_space('staticbox'))
self.list_sizer.Add(self.list_button_sizer, 0,
wx.ALIGN_TOP | wx.TOP | wx.RIGHT, get_space('staticbox'))
## Layout the whole thing
widgetsizer = wx.BoxSizer(wx.VERTICAL)
widgetsizer.Add(self.list_sizer, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, get_space('minor'))
widgetsizer.Add(self.current_spools_dialog, 0,
wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, get_space('minor'))
widgetsizer.Add(wx.StaticLine(self.panel, -1, style = wx.LI_HORIZONTAL), 0, wx.EXPAND)
widgetsizer.Add(self.bottom_button_sizer, 0, wx.EXPAND | wx.ALL, get_space('stddlg-frame'))
## Make sure the frame has the right size when it opens, but can still be resized
self.panel.SetSizer(widgetsizer)
topsizer = wx.BoxSizer(wx.VERTICAL)
topsizer.Add(self.panel, -1, wx.EXPAND)
self.SetSizer(topsizer)
self.SetMinClientSize(self.panel.GetEffectiveMinSize())
self.Fit()
self.CentreOnParent()
def onClickAdd(self, event):
"""Open the window for customizing the new spool."""
SpoolManagerAddWindow(self).ShowModal()
def onClickLoad(self, event, extruder):
"""Load the selected spool to the correspondent extruder."""
# Check whether there is a spool selected
spool_index = self.spool_list.GetFirstSelected()
if spool_index == -1:
self.statusbar.SetLabel(
_("Could not load the spool. No spool selected."))
return
spool_name = self.spool_list.GetItemText(spool_index)
self.statusbar.SetLabel("")
# If selected spool is already loaded, do nothing
spool_extruder = self.spool_manager.isLoaded(spool_name)
if spool_extruder > -1:
self.statusbar.SetLabel(
_("Spool '%s' is already loaded for Extruder %d.") %
(spool_name, spool_extruder))
self.Layout() # Layout() is needed to ellipsize possible overlength status
return
# Load the selected spool and refresh the current spools dialog
self.spool_manager.load(spool_name, extruder)
self.current_spools_dialog.refreshDialog(self.spool_manager)
self.current_spools_dialog.unload_button[extruder].Enable()
self.statusbar.SetLabel(
_("Loaded spool '%s' for Extruder %d.") % (spool_name, extruder))
self.Layout() # Layout() is needed to ellipsize possible overlength status
def onClickUnload(self, event, extruder):
"""Unload the spool from the correspondent extruder."""
spool_name = self.spool_manager.getSpoolName(extruder)
if spool_name is not None:
self.spool_manager.unload(extruder)
self.current_spools_dialog.refreshDialog(self.spool_manager)
self.statusbar.SetLabel(
_("Unloaded spool from Extruder %d.") % extruder)
self.current_spools_dialog.unload_button[extruder].Disable()
else:
self.statusbar.SetLabel(
_("There is no spool loaded for Extruder %d.") % extruder)
def onClickEdit(self, event):
"""Open the window for editing the data of the selected spool."""
# Check whether there is a spool selected
spool_index = self.spool_list.GetFirstSelected()
if spool_index == -1:
self.statusbar.SetLabel(
_("Could not edit the spool. No spool selected."))
return
# Open the edit window
spool_name = self.spool_list.GetItemText(spool_index)
spool_length = self.spool_list.GetItemText(spool_index, 1)
SpoolManagerEditWindow(self, spool_name, spool_length).ShowModal()
self.statusbar.SetLabel("")
def onClickDelete(self, event):
"""Delete the selected spool."""
# Get the selected spool
spool_index = self.spool_list.GetFirstSelected()
if spool_index == -1:
self.statusbar.SetLabel(
_("Could not delete the spool. No spool selected."))
return
spool_name = self.spool_list.GetItemText(spool_index)
self.statusbar.SetLabel("")
# Ask confirmation for deleting
delete_dialog = wx.MessageDialog(self,
message = _("Are you sure you want to delete the '%s' spool?") %
spool_name, caption = _("Delete Spool"),
style = wx.YES_NO | wx.ICON_EXCLAMATION)
if delete_dialog.ShowModal() == wx.ID_YES:
# Remove spool
self.spool_manager.remove(spool_name)
self.spool_list.refreshList(self.spool_manager)
self.current_spools_dialog.refreshDialog(self.spool_manager)
self.statusbar.SetLabel(
_("Deleted spool '%s'.") % spool_name)
def onClickClose(self, event):
self.Destroy()
class SpoolListView(wx.ListView):
"""
Custom wxListView object which visualizes the list of available spools.
"""
def __init__(self, parent, spool_manager):
wx.ListView.__init__(self, parent,
style = wx.LC_REPORT | wx.LC_SINGLE_SEL)
self.InsertColumn(0, _("Spool"), width = wx.LIST_AUTOSIZE_USEHEADER)
self.InsertColumn(1, _("Filament"), width = wx.LIST_AUTOSIZE_USEHEADER)
self.populateList(spool_manager)
# "Program" the layout
self.Bind(wx.EVT_SIZE, self.onResizeList)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onItemSelect)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.onItemDeselect)
self.Bind(wx.EVT_LIST_DELETE_ITEM, self.onItemDeselect)
self.Bind(wx.EVT_LIST_INSERT_ITEM, self.onItemDeselect)
def populateList(self, spool_manager):
"""Get the list of recorded spools from the Spool Manager."""
spool_list = spool_manager.getSpoolList()
for spool in spool_list:
spool[1] = str(spool[1]) + " mm"
self.Append(spool)
def refreshList(self, spool_manager):
"""Refresh the list by re-reading the Spool Manager list."""
self.DeleteAllItems()
self.populateList(spool_manager)
def onResizeList(self, event):
list_size = self.GetSize()
self.SetColumnWidth(1, -2)
filament_column_width = self.GetColumnWidth(1)
self.SetColumnWidth(col = 0,
width = list_size.width - filament_column_width)
event.Skip()
def onItemSelect(self, event):
self.Parent.edit_button.Enable()
self.Parent.delete_button.Enable()
def onItemDeselect(self, event):
self.Parent.edit_button.Disable()
self.Parent.delete_button.Disable()
class CurrentSpoolDialog(wx.Panel):
"""
Custom wxStaticText object to display the currently loaded spools and
their remaining filament.
"""
def __init__(self, parent, spool_manager):
wx.Panel.__init__(self, parent)
self.parent = parent
self.extruders = spool_manager.getExtruderCount()
# If the settings file has no entry, at least 1 extruder will be set
if not self.extruders:
self.extruders = 1
csd_sizer = wx.BoxSizer(wx.VERTICAL)
# Calculate the minimum size needed to properly display the
# extruder information
min_size = self.GetTextExtent("Default Very Long Spool Name")
# Generate a dialog for every extruder
self.extruder_dialog = []
load_button = []
self.unload_button = []
button_sizer = []
dialog_sizer = []
for i in range(self.extruders):
# Generate the dialog with the spool information
textlabel = wx.StaticText(self, label = _("Name:\nRemaining filament:"),
style = wx.ALIGN_RIGHT)
self.extruder_dialog.append(
wx.StaticText(self, style = wx.ST_ELLIPSIZE_END))
self.extruder_dialog[i].SetMinSize(wx.Size(min_size.width, -1))
# Generate the "load" and "unload" buttons
load_button.append(wx.Button(self, label = _("Load")))
load_button[i].SetToolTip(
_("Load selected spool for Extruder %d") % i)
self.unload_button.append(wx.Button(self, label = _("Unload")))
self.unload_button[i].Disable()
self.unload_button[i].SetToolTip(
_("Unload the spool for Extruder %d") % i)
# "Program" the buttons
load_button[i].Bind(wx.EVT_BUTTON,
lambda event, extruder=i: parent.Parent.onClickLoad(event, extruder))
self.unload_button[i].Bind(wx.EVT_BUTTON,
lambda event, extruder=i: parent.Parent.onClickUnload(event, extruder))
# Layout
button_sizer.append(wx.BoxSizer(wx.HORIZONTAL))
button_sizer[i].Add(load_button[i], 0,
wx.FIXED_MINSIZE | wx.ALIGN_CENTER | wx.RIGHT, get_space('minor'))
button_sizer[i].Add(self.unload_button[i], 0,
wx.FIXED_MINSIZE | wx.ALIGN_CENTER)
dialog_sizer.append(wx.StaticBoxSizer(wx.HORIZONTAL,
self, label = _("Spool for Extruder %d:") % i))
dialog_sizer[i].Add(textlabel, 0, wx.ALIGN_TOP | wx.ALL, get_space('staticbox'))
dialog_sizer[i].AddSpacer(get_space('minor'))
dialog_sizer[i].Add(self.extruder_dialog[i], 1, wx.ALIGN_TOP | wx.TOP, get_space('staticbox'))
dialog_sizer[i].AddSpacer(get_space('major'))
dialog_sizer[i].Add(button_sizer[i], 0, wx.EXPAND | wx.RIGHT, get_space('staticbox'))
csd_sizer.Add(dialog_sizer[i], 0, wx.EXPAND | wx.TOP, get_space('minor'))
self.refreshDialog(spool_manager)
self.SetSizerAndFit(csd_sizer)
def refreshDialog(self, spool_manager):
"""Retrieve the current spools from the Spool Manager."""
for i in range(self.extruders):
spool_name = spool_manager.getSpoolName(i)
if spool_name is not None:
self.unload_button[i].Enable()
else:
self.unload_button[i].Disable()
spool_filament = spool_manager.getRemainingFilament(i)
label = ("%s\n" % spool_name +
"%.2f mm" % spool_filament)
self.extruder_dialog[i].SetLabelText(label)
# ---------------------------------------------------------------------------
def checkOverwrite(parent, spool_name):
"""Ask the user whether or not to overwrite the existing spool."""
overwrite_dialog = wx.MessageDialog(parent,
message = _("A spool with the name '%s'' already exists.") %
spool_name +
_("Do you wish to overwrite it?"),
caption = _("Overwrite"),
style = wx.YES_NO | wx.ICON_EXCLAMATION)
return overwrite_dialog.ShowModal() == wx.ID_YES
def getFloat(parent, number):
"""
Check whether the input number is a float. Either return the number or
return False.
"""
if ',' in number:
parent.parent.statusbar.SetLabel(_("Value contains a comma, please use a point for decimal values: %s") % number)
parent.parent.Layout() # Layout() is needed to ellipsize possible overlength status
return False
try:
return float(number)
except ValueError:
parent.parent.statusbar.SetLabel(_("Unrecognized number: %s") % number)
parent.parent.Layout() # Layout() is needed to ellipsize possible overlength status
return False
# ---------------------------------------------------------------------------
class SpoolManagerAddWindow(wx.Dialog):
"""Window for adding spools."""
def __init__(self, parent):
wx.Dialog.__init__(self, parent,
title = _("Add Spool"),
style = wx.DEFAULT_DIALOG_STYLE)
self.parent = parent
self.SetIcon(parent.GetIcon())
# Generate the dialogs
# The wx.TextCtrl variabels need to be declared before the loop, empty
self.name_dialog = wx.TextCtrl(self, -1)
self.diameter_dialog = wx.TextCtrl(self, -1)
self.weight_dialog = wx.TextCtrl(self, -1)
self.density_dialog = wx.TextCtrl(self, -1)
self.length_dialog = wx.TextCtrl(self, -1)
# The list contains field-description, textctrl value, default value, unit, tooltip;
name_dlg = [_("Name:"), self.name_dialog, _("Default Spool"), "", ""]
diameter_dlg = [_("Diameter:"), self.diameter_dialog, "1.75", "mm",
_("Typically, either 1.75 mm or 2.85 mm")]
weight_dlg = [_("Weight:"), self.weight_dialog, "1.0", "kg", ""]
density_dlg = [_("Density:"), self.density_dialog, "1.25", "g/cm^3",
_("Typical densities are 1.25 g/cm^3 for PLA,\n1.27 g/cm^3 for PETG or 1.08 g/cm^3 for ABS")]
length_dlg = [_("Length:"), self.length_dialog, "332601.35", "mm", ""]
dialog_list = [name_dlg, diameter_dlg, weight_dlg, density_dlg, length_dlg]
minwidth = self.GetTextExtent('Default Long Spool Name').width
grid = wx.FlexGridSizer(rows = 0, cols = 3, hgap = get_space('minor'), vgap = get_space('minor'))
for dialog in dialog_list:
# Add a field-description label
grid.Add(wx.StaticText(self, -1, dialog[0], size = (-1, -1)), 0,
wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
# Give the TextCtrl the right value
dialog[1].ChangeValue(dialog[2])
dialog[1].SetMinSize((minwidth, -1))
# Add a tooltip
if dialog[4] != "":
dialog[1].SetToolTip(dialog[4])
grid.Add(dialog[1])
# Add a label for the unit
if dialog[3] == "":
grid.Add((0, 0))
else:
grid.Add(wx.StaticText(self, -1, dialog[3], size = (-1, -1)), 0, wx.ALIGN_CENTER_VERTICAL)
# "Program" the dialogs
self.diameter_dialog.Bind(wx.EVT_TEXT, self.calculateLength)
self.weight_dialog.Bind(wx.EVT_TEXT, self.calculateLength)
self.density_dialog.Bind(wx.EVT_TEXT, self.calculateLength)
self.length_dialog.Bind(wx.EVT_TEXT, self.calculateWeight)
# Generate the bottom buttons
self.add_button = wx.Button(self, wx.ID_ADD)
self.cancel_button = wx.Button(self, wx.ID_CANCEL)
# "Program" the bottom buttons
self.add_button.Bind(wx.EVT_BUTTON, self.onClickAdd)
self.add_button.SetDefault()
self.SetAffirmativeId(wx.ID_ADD)
self.cancel_button.Bind(wx.EVT_BUTTON, self.onClickCancel)
# Layout
## Setup the bottom buttons
self.bottom_buttons_sizer = wx.StdDialogButtonSizer()
self.bottom_buttons_sizer.SetAffirmativeButton(self.add_button)
self.bottom_buttons_sizer.AddButton(self.cancel_button)
self.bottom_buttons_sizer.Realize()
## Group the whole window
self.topsizer = wx.BoxSizer(wx.VERTICAL)
self.topsizer.Add(grid, 1, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, get_space('major'))
self.topsizer.Add(wx.StaticLine(self, -1, style = wx.LI_HORIZONTAL), 0,
wx.EXPAND | wx.TOP, get_space('minor'))
self.topsizer.Add(self.bottom_buttons_sizer, 0,
wx.ALIGN_RIGHT | wx.ALL, get_space('stddlg'))
self.SetSizerAndFit(self.topsizer)
self.CentreOnParent()
self.name_dialog.SetFocus()
def onClickAdd(self, ev):
"""Add the new spool and close the window."""
spool_name = self.name_dialog.GetValue()
spool_length = getFloat(self, self.length_dialog.GetValue())
# Check whether the length is actually a number
if not spool_length:
self.parent.statusbar.SetLabel(_("ERROR: Unrecognized length: %s.") %
self.length_dialog.GetValue())
self.parent.Layout() # Layout() is needed to ellipsize possible overlength status
return
# The remaining filament should always be a positive number
if not spool_length > 0:
self.parent.statusbar.SetLabel(_("ERROR: Length is zero or negative: %.2f.") %
spool_length)
self.parent.Layout() # Layout() is needed to ellipsize possible overlength status
return
# Check whether the name is already used. If it is used, prompt the
# user before overwriting it
if self.parent.spool_manager.isListed(spool_name):
if checkOverwrite(self, spool_name):
# Remove the "will be overwritten" spool
self.parent.spool_manager.remove(spool_name)
else:
return
# Add the new spool
self.parent.spool_manager.add(spool_name, spool_length)
self.parent.spool_list.refreshList(self.parent.spool_manager)
self.parent.current_spools_dialog.refreshDialog(
self.parent.spool_manager)
self.parent.statusbar.SetLabel(
_("Added new spool '%s'") % spool_name +
_(" with %.2f mm of remaining filament.") % spool_length)
self.parent.Layout() # Layout() is needed to ellipsize possible overlength status
self.EndModal(True)
self.Destroy()
def onClickCancel(self, event):
"""Do nothing and close the window."""
self.parent.statusbar.SetLabel("")
self.EndModal(True)
self.Destroy()
def calculateLength(self, event):
"""
Calculate the length of the filament given the mass, diameter and
density of the filament. Set the 'Length' field to this quantity.
"""
mass = getFloat(self, self.weight_dialog.GetValue())
diameter = getFloat(self, self.diameter_dialog.GetValue())
density = getFloat(self, self.density_dialog.GetValue())
if mass and diameter and density:
pi = 3.14159265359
length = 4e6 * mass / pi / diameter**2 / density
self.parent.statusbar.SetLabel("")
self.length_dialog.ChangeValue("%.2f" % length)
else:
self.length_dialog.ChangeValue("---")
def calculateWeight(self, event):
"""
Calculate the weight of the filament given the length, diameter and
density of the filament. Set the 'Weight' field to this value.
"""
length = getFloat(self, self.length_dialog.GetValue())
diameter = getFloat(self, self.diameter_dialog.GetValue())
density = getFloat(self, self.density_dialog.GetValue())
if length and diameter and density:
pi = 3.14159265359
mass = length * pi * diameter**2 * density / 4e6
self.parent.statusbar.SetLabel("")
self.weight_dialog.ChangeValue("%.2f" % mass)
else:
self.weight_dialog.ChangeValue("---")
# ---------------------------------------------------------------------------
class SpoolManagerEditWindow(wx.Dialog):
"""Window for editing the name or the length of a spool."""
def __init__(self, parent, spool_name, spool_length):
wx.Dialog.__init__(self, parent,
title = _("Edit Spool"),
style = wx.DEFAULT_DIALOG_STYLE)
self.parent = parent
self.SetIcon(parent.GetIcon())
self.old_spool_name = spool_name
self.old_spool_length = getFloat(self, spool_length.replace(" mm", ""))
# Set how many millimeters will the buttons add or subtract
self.quantities = [-100.0, -50.0, -10.0, 10.0, 50.0, 100.0]
# Generate the name field
self.name_title = wx.StaticText(self, -1, _("Name:"))
minwidth = self.GetTextExtent('Default Very Long Spool Name').width
self.name_field = wx.TextCtrl(self, -1, self.old_spool_name, style = wx.TE_RIGHT)
self.name_field.SetMinSize((minwidth, -1))
# Generate the length field
self.length_title = wx.StaticText(self, label = _("Remaining filament:"),
style = wx.ALIGN_RIGHT)
self.length_field = wx.TextCtrl(self, -1, value = str(self.old_spool_length),
style = wx.TE_RIGHT)
self.length_field.SetMinSize((minwidth, -1))
# Generate the buttons
button_min_width = self.GetTextExtent(' +000.0 ').width
self.minus3_button = wx.Button(self,
label = str(self.quantities[0]))
self.minus2_button = wx.Button(self,
label = str(self.quantities[1]))
self.minus1_button = wx.Button(self,
label = str(self.quantities[2]))
self.plus1_button = wx.Button(self,
label = "+" + str(self.quantities[3]))
self.plus2_button = wx.Button(self,
label = "+" + str(self.quantities[4]))
self.plus3_button = wx.Button(self,
label = "+" + str(self.quantities[5]))
self.minus3_button.SetSize((button_min_width, -1))
self.minus2_button.SetSize((button_min_width, -1))
self.minus1_button.SetSize((button_min_width, -1))
self.plus1_button.SetSize((button_min_width, -1))
self.plus2_button.SetSize((button_min_width, -1))
self.plus3_button.SetSize((button_min_width, -1))
# "Program" the length buttons
self.minus3_button.Bind(wx.EVT_BUTTON, self.changeLength)
self.minus2_button.Bind(wx.EVT_BUTTON, self.changeLength)
self.minus1_button.Bind(wx.EVT_BUTTON, self.changeLength)
self.plus1_button.Bind(wx.EVT_BUTTON, self.changeLength)
self.plus2_button.Bind(wx.EVT_BUTTON, self.changeLength)
self.plus3_button.Bind(wx.EVT_BUTTON, self.changeLength)
# Generate the bottom buttons
self.save_button = wx.Button(self, wx.ID_SAVE)
self.cancel_button = wx.Button(self, wx.ID_CANCEL)
# "Program" the bottom buttons
self.save_button.Bind(wx.EVT_BUTTON, self.onClickSave)
self.cancel_button.Bind(wx.EVT_BUTTON, self.onClickCancel)
self.save_button.SetDefault()
self.SetAffirmativeId(wx.ID_SAVE)
# Layout
## Group the length field and its correspondent buttons
self.btn_sizer = wx.StaticBoxSizer(wx.HORIZONTAL, self)
self.btn_sizer.Add(self.minus3_button, 0,
wx.FIXED_MINSIZE | wx.ALIGN_CENTER | wx.LEFT | wx.TOP | wx.BOTTOM,
get_space('staticbox'))
self.btn_sizer.Add(self.minus2_button, 0,
wx.FIXED_MINSIZE | wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, get_space('mini'))
self.btn_sizer.Add(self.minus1_button, 0,
wx.FIXED_MINSIZE | wx.ALIGN_CENTER | wx.RIGHT, get_space('mini'))
self.btn_sizer.AddSpacer(get_space('major'))
self.btn_sizer.Add(self.plus1_button, 0,
wx.FIXED_MINSIZE | wx.ALIGN_CENTER | wx.LEFT, get_space('mini'))
self.btn_sizer.Add(self.plus2_button, 0,
wx.FIXED_MINSIZE | wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, get_space('mini'))
self.btn_sizer.Add(self.plus3_button, 0,
wx.FIXED_MINSIZE | wx.ALIGN_CENTER | wx.RIGHT, get_space('staticbox'))
## Group the bottom buttons
self.bottom_buttons_sizer = wx.StdDialogButtonSizer()
self.bottom_buttons_sizer.AddButton(self.save_button)
self.bottom_buttons_sizer.AddButton(self.cancel_button)
self.bottom_buttons_sizer.Realize()
## Lay out the whole window
grid = wx.GridBagSizer(hgap = get_space('minor'), vgap = get_space('minor'))
# Gridbagsizer: pos = (row, col), span = (rowspan, colspan)
grid.Add(self.name_title, pos = (0, 0), span = (1, 1), border = 0,
flag = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.EXPAND)
grid.Add(self.name_field, pos = (0, 1), span = (1, 1), border = 0,
flag = wx.ALIGN_LEFT)
grid.Add(self.length_title, pos = (1, 0), span = (1, 1), border = 0,
flag = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.EXPAND)
grid.Add(self.length_field, pos = (1, 1), span = (1, 1), border = 0,
flag = wx.ALIGN_LEFT)
grid.Add(self.btn_sizer, pos = (2, 0), span = (1, 2), border = 0,
flag = wx.ALIGN_CENTER | wx.EXPAND)
topsizer = wx.BoxSizer(wx.VERTICAL)
topsizer.Add(grid, 1, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, get_space('major'))
topsizer.Add(wx.StaticLine(self, -1, style = wx.LI_HORIZONTAL), 0,
wx.EXPAND | wx.TOP, get_space('minor'))
topsizer.Add(self.bottom_buttons_sizer, 0, wx.ALIGN_RIGHT | wx.ALL, get_space('stddlg'))
self.SetSizer(topsizer)
self.Fit()
self.CentreOnParent()
self.name_field.SetFocus()
def changeLength(self, event):
new_length = getFloat(self, self.length_field.GetValue())
if new_length:
new_length = new_length + float(event.GetEventObject().GetLabel())
self.length_field.ChangeValue("%.2f" % new_length)
self.parent.statusbar.SetLabel("")
def onClickSave(self, event):
new_spool_name = self.name_field.GetValue()
new_spool_length = getFloat(self, self.length_field.GetValue())
# Check whether the length is actually a number
if not new_spool_length:
self.parent.statusbar.SetLabel(
_("ERROR: Unrecognized length: %s.") %
self.length_field.GetValue())
self.parent.Layout() # Layout() is needed to ellipsize possible overlength status
return
if not new_spool_length > 0:
self.parent.statusbar.SetLabel(
_("ERROR: Length is zero or negative: %.2f.") % new_spool_length)
self.parent.Layout() # Layout() is needed to ellipsize possible overlength status
return
# Check whether the "old" spool was loaded
new_spool_extruder = self.parent.spool_manager.isLoaded(
self.old_spool_name)
# Check whether the name has changed
if new_spool_name == self.old_spool_name:
# Remove only the "old" spool
self.parent.spool_manager.remove(self.old_spool_name)
else:
# Check whether the new name is already used
if self.parent.spool_manager.isListed(new_spool_name):
if checkOverwrite(self, new_spool_name):
# Remove the "old" and the "will be overwritten" spools
self.parent.spool_manager.remove(self.old_spool_name)
self.parent.spool_manager.remove(new_spool_name)
else:
return
else:
# Remove only the "old" spool
self.parent.spool_manager.remove(self.old_spool_name)
# Add "new" or edited spool
self.parent.spool_manager.add(new_spool_name, new_spool_length)
self.parent.spool_manager.load(new_spool_name, new_spool_extruder)
self.parent.spool_list.refreshList(self.parent.spool_manager)
self.parent.current_spools_dialog.refreshDialog(
self.parent.spool_manager)
self.parent.statusbar.SetLabel(
_("Edited spool '%s'") % new_spool_name +
_(" with %.2f mm of remaining filament.") % new_spool_length)
self.parent.Layout() # Layout() is needed to ellipsize possible overlength status
self.EndModal(True)
self.Destroy()
def onClickCancel(self, event):
self.parent.statusbar.SetLabel("")
self.EndModal(True)
self.Destroy()
| 32,507 | Python | .py | 611 | 42.129296 | 121 | 0.613596 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,386 | sample.py | kliment_Printrun/printrun/plugins/sample.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
from printrun.eventhandler import PrinterEventHandler
class SampleHandler(PrinterEventHandler):
'''
Sample event handler for printcore.
'''
def __init__(self):
pass
def __write(self, field, text = ""):
print("%-15s - %s" % (field, text))
def on_init(self):
self.__write("on_init")
def on_send(self, command, gline):
self.__write("on_send", command)
def on_recv(self, line):
self.__write("on_recv", line.strip())
def on_connect(self):
self.__write("on_connect")
def on_disconnect(self):
self.__write("on_disconnect")
def on_error(self, error):
self.__write("on_error", error)
def on_online(self):
self.__write("on_online")
def on_temp(self, line):
self.__write("on_temp", line)
def on_start(self, resume):
self.__write("on_start", "true" if resume else "false")
def on_end(self):
self.__write("on_end")
def on_layerchange(self, layer):
self.__write("on_layerchange", "%f" % (layer))
def on_preprintsend(self, gline, index, mainqueue):
self.__write("on_preprintsend", gline)
def on_printsend(self, gline):
self.__write("on_printsend", gline)
| 1,989 | Python | .py | 49 | 33.836735 | 70 | 0.664324 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,387 | __init__.py | kliment_Printrun/printrun/plugins/__init__.py | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
#from printrun.plugins.sample import SampleHandler
#
#PRINTCORE_HANDLER = [SampleHandler()]
PRINTCORE_HANDLER = []
| 784 | Python | .py | 18 | 42.444444 | 70 | 0.782723 | kliment/Printrun | 2,360 | 997 | 202 | GPL-3.0 | 9/5/2024, 5:10:38 PM (Europe/Amsterdam) |
9,388 | setup.py | s3tools_s3cmd/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
try:
import xml.etree.ElementTree
print("Using xml.etree.ElementTree for XML processing")
except ImportError as e:
sys.stderr.write(str(e) + "\n")
try:
import elementtree.ElementTree
print("Using elementtree.ElementTree for XML processing")
except ImportError as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("Please install ElementTree module from\n")
sys.stderr.write("http://effbot.org/zone/element-index.htm\n")
sys.exit(1)
from setuptools import setup
import S3.PkgInfo
if float("%d.%d" % sys.version_info[:2]) < 2.6:
sys.stderr.write("Your Python version %d.%d.%d is not supported.\n" % sys.version_info[:3])
sys.stderr.write("S3cmd requires Python 2.6 or newer.\n")
sys.exit(1)
## Remove 'MANIFEST' file to force
## distutils to recreate it.
## Only in "sdist" stage. Otherwise
## it makes life difficult to packagers.
if len(sys.argv) > 1 and sys.argv[1] == "sdist":
try:
os.unlink("MANIFEST")
except OSError as e:
pass
## Re-create the manpage
## (Beware! Perl script on the loose!!)
if len(sys.argv) > 1 and sys.argv[1] == "sdist":
if os.stat_result(os.stat("s3cmd.1")).st_mtime \
< os.stat_result(os.stat("s3cmd")).st_mtime:
sys.stderr.write("Re-create man page first!\n")
sys.stderr.write("Run: ./s3cmd --help | ./format-manpage.pl > s3cmd.1\n")
sys.exit(1)
## Don't install manpages and docs when $S3CMD_PACKAGING is set
## This was a requirement of Debian package maintainer.
if not os.getenv("S3CMD_PACKAGING"):
man_path = os.getenv("S3CMD_INSTPATH_MAN") or "share/man"
doc_path = os.getenv("S3CMD_INSTPATH_DOC") or "share/doc/packages"
data_files = [
(doc_path+"/s3cmd", ["README.md", "INSTALL.md", "LICENSE", "NEWS"]),
(man_path+"/man1", ["s3cmd.1"]),
]
else:
data_files = None
## Main distutils info
setup(
## Content description
name=S3.PkgInfo.package,
version=S3.PkgInfo.version,
packages=['S3'],
scripts=['s3cmd'],
data_files=data_files,
test_suite='S3.PkgInfo',
## Packaging details
author="Michal Ludvig",
author_email="michal@logix.cz",
maintainer="github.com/fviard, github.com/matteobar",
maintainer_email="s3tools-bugs@lists.sourceforge.net",
url=S3.PkgInfo.url,
license=S3.PkgInfo.license,
description=S3.PkgInfo.short_description,
long_description="""
%s
Authors:
--------
Florent Viard <florent@sodria.com>
Michal Ludvig <michal@logix.cz>
Matt Domsch (github.com/mdomsch)
""" % (S3.PkgInfo.long_description),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12',
'Topic :: System :: Archiving',
'Topic :: Utilities',
],
install_requires=["python-dateutil", "python-magic"]
)
# vim:et:ts=4:sts=4:ai
| 4,198 | Python | .py | 111 | 32.207207 | 95 | 0.639558 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,389 | run-tests.py | s3tools_s3cmd/run-tests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Amazon S3cmd - testsuite
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import, print_function
import sys
import os
import re
import time
from subprocess import Popen, PIPE, STDOUT
import locale
import getpass
import S3.Exceptions
import S3.Config
from S3.ExitCodes import *
try:
unicode
except NameError:
# python 3 support
# In python 3, unicode -> str, and str -> bytes
unicode = str
ALLOWED_SERVER_PROFILES = ['aws', 'minio']
count_pass = 0
count_fail = 0
count_skip = 0
test_counter = 0
run_tests = []
exclude_tests = []
verbose = False
encoding = locale.getpreferredencoding()
if not encoding:
print("Guessing current system encoding failed. Consider setting $LANG variable.")
sys.exit(1)
else:
print("System encoding: " + encoding)
try:
unicode
except NameError:
# python 3 support
# In python 3, unicode -> str, and str -> bytes
unicode = str
def unicodise(string, encoding = "utf-8", errors = "replace"):
"""
Convert 'string' to Unicode or raise an exception.
Config can't use toolbox from Utils that is itself using Config
"""
if type(string) == unicode:
return string
try:
return unicode(string, encoding, errors)
except UnicodeDecodeError:
raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
# https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
if which('curl') is not None:
have_curl = True
else:
have_curl = False
config_file = None
if os.getenv("HOME"):
config_file = os.path.join(unicodise(os.getenv("HOME"), encoding),
".s3cfg")
elif os.name == "nt" and os.getenv("USERPROFILE"):
config_file = os.path.join(
unicodise(os.getenv("USERPROFILE"), encoding),
os.getenv("APPDATA") and unicodise(os.getenv("APPDATA"), encoding)
or 'Application Data',
"s3cmd.ini")
## Unpack testsuite/ directory
if not os.path.isdir('testsuite') and os.path.isfile('testsuite.tar.gz'):
os.system("tar -xz -f testsuite.tar.gz")
if not os.path.isdir('testsuite'):
print("Something went wrong while unpacking testsuite.tar.gz")
sys.exit(1)
os.system("tar -xf testsuite/checksum.tar -C testsuite")
if not os.path.isfile('testsuite/checksum/cksum33.txt'):
print("Something went wrong while unpacking testsuite/checksum.tar")
sys.exit(1)
## Fix up permissions for permission-denied tests
os.chmod("testsuite/permission-tests/permission-denied-dir", 0o444)
os.chmod("testsuite/permission-tests/permission-denied.txt", 0o000)
## Patterns for Unicode tests
patterns = {}
patterns['UTF-8'] = u"ŪņЇЌœđЗ/☺ unicode € rocks ™"
patterns['GBK'] = u"12月31日/1-特色條目"
have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
if not have_encoding and os.path.isfile('testsuite/encodings/%s.tar.gz' % encoding):
os.system("tar xvz -C testsuite/encodings -f testsuite/encodings/%s.tar.gz" % encoding)
have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
if have_encoding:
#enc_base_remote = "%s/xyz/%s/" % (pbucket(1), encoding)
enc_pattern = patterns[encoding]
else:
print(encoding + " specific files not found.")
def unicodise(string):
if type(string) == unicode:
return string
return unicode(string, "UTF-8", "replace")
def deunicodise(string):
if type(string) != unicode:
return string
return string.encode("UTF-8", "replace")
if not os.path.isdir('testsuite/crappy-file-name'):
os.system("tar xvz -C testsuite -f testsuite/crappy-file-name.tar.gz")
# TODO: also unpack if the tarball is newer than the directory timestamp
# for instance when a new version was pulled from SVN.
def test(label, cmd_args = [], retcode = 0, must_find = [], must_not_find = [],
must_find_re = [], must_not_find_re = [], stdin = None,
skip_if_profile = None, skip_if_not_profile = None):
def command_output():
print("----")
print(" ".join([" " in arg and "'%s'" % arg or arg for arg in cmd_args]))
print("----")
print(stdout)
print("----")
def failure(message = ""):
global count_fail
if message:
message = u" (%r)" % message
print(u"\x1b[31;1mFAIL%s\x1b[0m" % (message))
count_fail += 1
command_output()
#return 1
sys.exit(1)
def success(message = ""):
global count_pass
if message:
message = " (%r)" % message
print("\x1b[32;1mOK\x1b[0m%s" % (message))
count_pass += 1
if verbose:
command_output()
return 0
def skip(message = ""):
global count_skip
if message:
message = " (%r)" % message
print("\x1b[33;1mSKIP\x1b[0m%s" % (message))
count_skip += 1
return 0
def compile_list(_list, regexps = False):
if regexps == False:
_list = [re.escape(item) for item in _list]
return [re.compile(item, re.MULTILINE) for item in _list]
global test_counter
test_counter += 1
print(("%3d %s " % (test_counter, label)).ljust(30, "."), end=' ')
sys.stdout.flush()
if run_tests.count(test_counter) == 0 or exclude_tests.count(test_counter) > 0:
return skip()
if not cmd_args:
return skip()
if skip_if_profile and server_profile in skip_if_profile:
return skip()
if skip_if_not_profile and server_profile not in skip_if_not_profile:
return skip()
p = Popen(cmd_args, stdin = stdin, stdout = PIPE, stderr = STDOUT, universal_newlines = True, close_fds = True)
stdout, stderr = p.communicate()
if type(retcode) not in [list, tuple]:
retcode = [retcode]
if p.returncode not in retcode:
return failure("retcode: %d, expected one of: %s" % (p.returncode, retcode))
if type(must_find) not in [ list, tuple ]: must_find = [must_find]
if type(must_find_re) not in [ list, tuple ]: must_find_re = [must_find_re]
if type(must_not_find) not in [ list, tuple ]: must_not_find = [must_not_find]
if type(must_not_find_re) not in [ list, tuple ]: must_not_find_re = [must_not_find_re]
find_list = []
find_list.extend(compile_list(must_find))
find_list.extend(compile_list(must_find_re, regexps = True))
find_list_patterns = []
find_list_patterns.extend(must_find)
find_list_patterns.extend(must_find_re)
not_find_list = []
not_find_list.extend(compile_list(must_not_find))
not_find_list.extend(compile_list(must_not_find_re, regexps = True))
not_find_list_patterns = []
not_find_list_patterns.extend(must_not_find)
not_find_list_patterns.extend(must_not_find_re)
for index in range(len(find_list)):
stdout = unicodise(stdout)
match = find_list[index].search(stdout)
if not match:
return failure("pattern not found: %s" % find_list_patterns[index])
for index in range(len(not_find_list)):
match = not_find_list[index].search(stdout)
if match:
return failure("pattern found: %s (match: %s)" % (not_find_list_patterns[index], match.group(0)))
return success()
def test_s3cmd(label, cmd_args = [], **kwargs):
if not cmd_args[0].endswith("s3cmd"):
cmd_args.insert(0, "python")
cmd_args.insert(1, "s3cmd")
if config_file:
cmd_args.insert(2, "-c")
cmd_args.insert(3, config_file)
return test(label, cmd_args, **kwargs)
def test_mkdir(label, dir_name):
if os.name in ("posix", "nt"):
cmd = ['mkdir', '-p']
else:
print("Unknown platform: %s" % os.name)
sys.exit(1)
cmd.append(dir_name)
return test(label, cmd)
def test_rmdir(label, dir_name):
if os.path.isdir(dir_name):
if os.name == "posix":
cmd = ['rm', '-rf']
elif os.name == "nt":
cmd = ['rmdir', '/s/q']
else:
print("Unknown platform: %s" % os.name)
sys.exit(1)
cmd.append(dir_name)
return test(label, cmd)
else:
return test(label, [])
def test_flushdir(label, dir_name):
test_rmdir(label + "(rm)", dir_name)
return test_mkdir(label + "(mk)", dir_name)
def test_copy(label, src_file, dst_file):
if os.name == "posix":
cmd = ['cp', '-f']
elif os.name == "nt":
cmd = ['copy']
else:
print("Unknown platform: %s" % os.name)
sys.exit(1)
cmd.append(src_file)
cmd.append(dst_file)
return test(label, cmd)
def test_curl_HEAD(label, src_file, **kwargs):
cmd = ['curl', '--silent', '--head', '--include', '--location']
cmd.append(src_file)
return test(label, cmd, **kwargs)
bucket_prefix = u"%s-" % getpass.getuser().lower()
server_profile = None
argv = sys.argv[1:]
while argv:
arg = argv.pop(0)
if arg.startswith('--bucket-prefix='):
print("Usage: '--bucket-prefix PREFIX', not '--bucket-prefix=PREFIX'")
sys.exit(0)
if arg in ("-h", "--help"):
print("%s A B K..O -N" % sys.argv[0])
print("Run tests number A, B and K through to O, except for N")
sys.exit(0)
if arg in ("-c", "--config"):
config_file = argv.pop(0)
continue
if arg in ("-l", "--list"):
exclude_tests = range(0, 999)
break
if arg in ("-v", "--verbose"):
verbose = True
continue
if arg in ("-p", "--bucket-prefix"):
try:
bucket_prefix = argv.pop(0)
except IndexError:
print("Bucket prefix option must explicitly supply a bucket name prefix")
sys.exit(0)
continue
if arg in ("-s", "--server-profile"):
try:
server_profile = argv.pop(0)
server_profile = server_profile.lower()
except IndexError:
print("Server profile option must explicitly supply a server profile name")
sys.exit(0)
if server_profile not in ALLOWED_SERVER_PROFILES:
print("Server profile value must be one of %r" % ALLOWED_SERVER_PROFILES)
sys.exit(0)
continue
if ".." in arg:
range_idx = arg.find("..")
range_start = arg[:range_idx] or 0
range_end = arg[range_idx+2:] or 999
run_tests.extend(range(int(range_start), int(range_end) + 1))
elif arg.startswith("-"):
exclude_tests.append(int(arg[1:]))
else:
run_tests.append(int(arg))
print("Using bucket prefix: '%s'" % bucket_prefix)
cfg = S3.Config.Config(config_file)
# Autodetect server profile if not set:
if server_profile is None:
if 's3.amazonaws.com' in cfg.host_base:
server_profile = 'aws'
print("Using server profile: '%s'" % server_profile)
if not run_tests:
run_tests = range(0, 999)
# helper functions for generating bucket names
def bucket(tail):
'''Test bucket name'''
label = 'autotest'
if str(tail) == '3':
label = 'autotest'
return '%ss3cmd-%s-%s' % (bucket_prefix, label, tail)
def pbucket(tail):
'''Like bucket(), but prepends "s3://" for you'''
return 's3://' + bucket(tail)
## ====== Remove test buckets
test_s3cmd("Remove test buckets", ['rb', '-r', '--force', pbucket(1), pbucket(2), pbucket(3)])
## ====== verify they were removed
test_s3cmd("Verify no test buckets", ['ls'],
must_not_find = [pbucket(1), pbucket(2), pbucket(3)])
## ====== Create one bucket (EU)
test_s3cmd("Create one bucket (EU)", ['mb', '--bucket-location=EU', pbucket(1)],
must_find = "Bucket '%s/' created" % pbucket(1))
## ====== Create multiple buckets
test_s3cmd("Create multiple buckets", ['mb', pbucket(2), pbucket(3)],
must_find = [ "Bucket '%s/' created" % pbucket(2), "Bucket '%s/' created" % pbucket(3)])
## ====== Invalid bucket name
test_s3cmd("Invalid bucket name", ["mb", "--bucket-location=EU", pbucket('EU')],
retcode = EX_USAGE,
must_find = "ERROR: Parameter problem: Bucket name '%s' contains disallowed character" % bucket('EU'),
must_not_find_re = "Bucket.*created")
## ====== Enable ACLs and public access to buckets
for idx, bpath in enumerate((pbucket(1), pbucket(2), pbucket(3))):
test_s3cmd("Enable ACLs for bucket %d" % idx, ['setownership', bpath, 'ObjectWriter'],
must_find = "%s/: Bucket Object Ownership updated" % bpath,
skip_if_profile = ['minio'])
test_s3cmd("Disable Block Public Access for bucket %d" % idx, ['setblockpublicaccess', bpath, ''],
must_find = "%s/: Block Public Access updated" % bpath,
skip_if_profile = ['minio'])
## ====== Buckets list
test_s3cmd("Buckets list", ["ls"],
must_find = [ pbucket(1), pbucket(2), pbucket(3) ], must_not_find_re = pbucket('EU'))
## ====== Directory for cache
test_flushdir("Create cache dir", "testsuite/cachetest")
## ====== Sync to S3
test_s3cmd("Sync to S3", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'testsuite/cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache'],
must_find = ["ERROR: Upload of 'testsuite/permission-tests/permission-denied.txt' is not possible (Reason: Permission denied)",
"WARNING: 32 non-printable characters replaced in: crappy-file-name/non-printables",
],
must_not_find_re = ["demo/", r"^(?!WARNING: Skipping).*\.png$", "permission-denied-dir"],
retcode = EX_PARTIAL)
## ====== Create new file and sync with caching enabled
test_mkdir("Create cache dir", "testsuite/cachetest/content")
if os.path.exists("testsuite/cachetest"):
with open("testsuite/cachetest/content/testfile", "w"):
pass
test_s3cmd("Sync to S3 with caching", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache' ],
must_find = "upload: 'testsuite/cachetest/content/testfile' -> '%s/xyz/cachetest/content/testfile'" % pbucket(1),
must_not_find = "upload 'testsuite/cachetest/.s3cmdcache'",
retcode = EX_PARTIAL)
## ====== Remove content and retry cached sync with --delete-removed
test_rmdir("Remove local file", "testsuite/cachetest/content")
test_s3cmd("Sync to S3 and delete removed with caching", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'testsuite/cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache', '--delete-removed'],
must_find = "delete: '%s/xyz/cachetest/content/testfile'" % pbucket(1),
must_not_find = "dictionary changed size during iteration",
retcode = EX_PARTIAL)
## ====== Remove cache directory and file
test_rmdir("Remove cache dir", "testsuite/cachetest")
## ====== Test empty directories
test_mkdir("Create empty dir", "testsuite/blahBlah/dirtest/emptydir")
test_s3cmd("Sync to S3 empty dir without keep dir", ['sync', 'testsuite/blahBlah', pbucket(1) + '/withoutdirs/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings'],
#must_find = "upload: 'testsuite/cachetest/content/testfile' -> '%s/xyz/cachetest/content/testfile'" % pbucket(1),
must_not_find = "upload: 'testsuite/blahBlah/dirtest/emptydir'")
test_s3cmd("Sync to S3 empty dir with keep dir", ['sync', 'testsuite/blahBlah', pbucket(1) + '/withdirs/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--keep-dirs'],
#must_find = "upload: 'testsuite/cachetest/content/testfile' -> '%s/xyz/cachetest/content/testfile'" % pbucket(1),
must_find = "upload: 'testsuite/blahBlah/dirtest/emptydir'")
## ====== Remove cache directory and file
test_rmdir("Remove cache dir", "testsuite/blahBlah/dirtest")
if have_encoding:
## ====== Sync UTF-8 / GBK / ... to S3
test_s3cmd(u"Sync %s to S3" % encoding, ['sync', 'testsuite/encodings/' + encoding, '%s/xyz/encodings/' % pbucket(1), '--exclude', 'demo/*', '--no-encrypt' ],
must_find = [ u"'testsuite/encodings/%(encoding)s/%(pattern)s' -> '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s'" % { 'encoding' : encoding, 'pattern' : enc_pattern , 'pbucket' : pbucket(1)} ])
## ====== List bucket content
test_s3cmd("List bucket content", ['ls', '%s/xyz/' % pbucket(1) ],
must_find_re = [ u"DIR +%s/xyz/binary/$" % pbucket(1) , u"DIR +%s/xyz/etc/$" % pbucket(1) ],
must_not_find = [ u"random-crap.md5", u"/demo" ])
## ====== List bucket recursive
must_find = [ u"%s/xyz/binary/random-crap.md5" % pbucket(1) ]
if have_encoding:
must_find.append(u"%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s" % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
test_s3cmd("List bucket recursive", ['ls', '--recursive', pbucket(1)],
must_find = must_find,
must_not_find = [ "logo.png" ])
## ====== FIXME
test_s3cmd("Recursive put", ['put', '--recursive', 'testsuite/etc', '%s/xyz/' % pbucket(1) ])
## ====== Clean up local destination dir
test_flushdir("Clean testsuite-out/", "testsuite-out")
## ====== Put from stdin
f = open('testsuite/single-file/single-file.txt', 'r')
test_s3cmd("Put from stdin", ['put', '-', '%s/single-file/single-file.txt' % pbucket(1)],
must_find = ["'<stdin>' -> '%s/single-file/single-file.txt'" % pbucket(1)],
stdin = f)
f.close()
## ====== Multipart put
os.system('mkdir -p testsuite-out')
os.system('dd if=/dev/urandom of=testsuite-out/urandom.bin bs=1M count=16 > /dev/null 2>&1')
test_s3cmd("Put multipart", ['put', '--multipart-chunk-size-mb=5', 'testsuite-out/urandom.bin', '%s/urandom.bin' % pbucket(1)],
must_not_find = ['abortmp'])
## ====== Multipart put from stdin
f = open('testsuite-out/urandom.bin', 'r')
test_s3cmd("Multipart large put from stdin", ['put', '--multipart-chunk-size-mb=5', '-', '%s/urandom2.bin' % pbucket(1)],
must_find = ['%s/urandom2.bin' % pbucket(1)],
must_not_find = ['abortmp'],
stdin = f)
f.close()
## ====== Clean up local destination dir
test_flushdir("Clean testsuite-out/", "testsuite-out")
## ====== Moving things without trailing '/'
os.system('dd if=/dev/urandom of=testsuite-out/urandom1.bin bs=1k count=1 > /dev/null 2>&1')
os.system('dd if=/dev/urandom of=testsuite-out/urandom2.bin bs=1k count=1 > /dev/null 2>&1')
test_s3cmd("Put multiple files", ['put', 'testsuite-out/urandom1.bin', 'testsuite-out/urandom2.bin', '%s/' % pbucket(1)],
must_find = ["%s/urandom1.bin" % pbucket(1), "%s/urandom2.bin" % pbucket(1)])
test_s3cmd("Move without '/'", ['mv', '%s/urandom1.bin' % pbucket(1), '%s/urandom2.bin' % pbucket(1), '%s/dir' % pbucket(1)],
retcode = 64,
must_find = ['Destination must be a directory'])
test_s3cmd("Move recursive w/a '/'",
['-r', 'mv', '%s/dir1' % pbucket(1), '%s/dir2' % pbucket(1)],
retcode = 64,
must_find = ['Destination must be a directory'])
## ====== Moving multiple files into directory with trailing '/'
must_find = ["'%s/urandom1.bin' -> '%s/dir/urandom1.bin'" % (pbucket(1),pbucket(1)), "'%s/urandom2.bin' -> '%s/dir/urandom2.bin'" % (pbucket(1),pbucket(1))]
must_not_find = ["'%s/urandom1.bin' -> '%s/dir'" % (pbucket(1),pbucket(1)), "'%s/urandom2.bin' -> '%s/dir'" % (pbucket(1),pbucket(1))]
test_s3cmd("Move multiple files",
['mv', '%s/urandom1.bin' % pbucket(1), '%s/urandom2.bin' % pbucket(1), '%s/dir/' % pbucket(1)],
must_find = must_find,
must_not_find = must_not_find)
## ====== Clean up local destination dir
test_flushdir("Clean testsuite-out/", "testsuite-out")
## ====== Sync from S3
must_find = [ "'%s/xyz/binary/random-crap.md5' -> 'testsuite-out/xyz/binary/random-crap.md5'" % pbucket(1) ]
if have_encoding:
must_find.append(u"'%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s' -> 'testsuite-out/xyz/encodings/%(encoding)s/%(pattern)s' " % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
test_s3cmd("Sync from S3", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
must_find = must_find)
## ====== Create 'emptydirtests' test directories
test_rmdir("Create 'emptytests/withoutdirs'", "testsuite-out/emptytests/withoutdirs/")
test_rmdir("Create 'emptytests/withdirs/'", "testsuite-out/emptytests/withdirs/")
test_s3cmd("Sync from S3 no empty dir", ['sync', '%s/withoutdirs/' % pbucket(1), 'testsuite-out/emptytests/withoutdirs/'],
must_not_find = ["mkdir: '%s/withoutdirs/blahBlah/dirtest/emptydir/'" % pbucket(1)])
test_s3cmd("Sync from S3 with empty dir", ['sync', '%s/withdirs/' % pbucket(1), 'testsuite-out/emptytests/withdirs/'],
must_find = ["mkdir: '%s/withdirs/blahBlah/dirtest/emptydir/'" % pbucket(1)])
## ====== Remove 'emptydirtests' directory
test_rmdir("Remove 'emptytests/'", "testsuite-out/emptytests/")
## ====== Remove 'demo' directory
test_rmdir("Remove 'dir-test/'", "testsuite-out/xyz/dir-test/")
## ====== Create dir with name of a file
test_mkdir("Create file-dir dir", "testsuite-out/xyz/dir-test/file-dir")
## ====== Skip dst dirs
test_s3cmd("Skip over dir", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
must_find = "ERROR: Download of 'xyz/dir-test/file-dir' failed (Reason: testsuite-out/xyz/dir-test/file-dir is a directory)",
retcode = EX_PARTIAL)
## ====== Clean up local destination dir
test_flushdir("Clean testsuite-out/", "testsuite-out")
## ====== Put public, guess MIME
test_s3cmd("Put public, guess MIME", ['put', '--guess-mime-type', '--acl-public', 'testsuite/etc/logo.png', '%s/xyz/etc/logo.png' % pbucket(1)],
must_find = [ "-> '%s/xyz/etc/logo.png'" % pbucket(1) ])
## ====== Retrieve from URL
if have_curl:
test_curl_HEAD("Retrieve from URL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
must_find_re = ['Content-Length: 22059'],
skip_if_profile = ['minio'])
## ====== Change ACL to Private
test_s3cmd("Change ACL to Private", ['setacl', '--acl-private', '%s/xyz/etc/l*.png' % pbucket(1)],
must_find = [ "logo.png: ACL set to Private" ],
skip_if_profile = ['minio'])
## ====== Verify Private ACL
if have_curl:
test_curl_HEAD("Verify Private ACL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
must_find_re = [ '403 Forbidden' ],
skip_if_profile = ['minio'])
## ====== Change ACL to Public
test_s3cmd("Change ACL to Public", ['setacl', '--acl-public', '--recursive', '%s/xyz/etc/' % pbucket(1) , '-v'],
must_find = [ "logo.png: ACL set to Public" ],
skip_if_profile = ['minio'])
## ====== Verify Public ACL
if have_curl:
test_curl_HEAD("Verify Public ACL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
must_find_re = [ '200 OK', 'Content-Length: 22059'],
skip_if_profile = ['minio'])
## ====== Sync more to S3
test_s3cmd("Sync more to S3", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt' ],
must_find = [ "'testsuite/demo/some-file.xml' -> '%s/xyz/demo/some-file.xml' " % pbucket(1) ],
must_not_find = [ "'testsuite/etc/linked.png' -> '%s/xyz/etc/linked.png'" % pbucket(1) ],
retcode = EX_PARTIAL)
## ====== Don't check MD5 sum on Sync
test_copy("Change file cksum1.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum1.txt")
test_copy("Change file cksum33.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum33.txt")
test_s3cmd("Don't check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--no-check-md5'],
must_find = [ "cksum33.txt" ],
must_not_find = [ "cksum1.txt" ],
retcode = EX_PARTIAL)
## ====== Check MD5 sum on Sync
test_s3cmd("Check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--check-md5'],
must_find = [ "cksum1.txt" ],
retcode = EX_PARTIAL)
## ====== Rename within S3
test_s3cmd("Rename within S3", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
must_find = [ "move: '%s/xyz/etc/logo.png' -> '%s/xyz/etc2/Logo.PNG'" % (pbucket(1), pbucket(1))])
## ====== Rename (NoSuchKey)
test_s3cmd("Rename (NoSuchKey)", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
retcode = EX_NOTFOUND,
must_find_re = [ 'Key not found' ],
must_not_find = [ "move: '%s/xyz/etc/logo.png' -> '%s/xyz/etc2/Logo.PNG'" % (pbucket(1), pbucket(1)) ])
## ====== Sync more from S3 (invalid src)
test_s3cmd("Sync more from S3 (invalid src)", ['sync', '--delete-removed', '%s/xyz/DOESNOTEXIST' % pbucket(1), 'testsuite-out'],
must_not_find = [ "delete: 'testsuite-out/logo.png'" ])
## ====== Sync more from S3
test_s3cmd("Sync more from S3", ['sync', '--delete-removed', '%s/xyz' % pbucket(1), 'testsuite-out'],
must_find = [ "'%s/xyz/etc2/Logo.PNG' -> 'testsuite-out/xyz/etc2/Logo.PNG'" % pbucket(1),
"'%s/xyz/demo/some-file.xml' -> 'testsuite-out/xyz/demo/some-file.xml'" % pbucket(1) ],
must_not_find_re = [ "not-deleted.*etc/logo.png", "delete: 'testsuite-out/logo.png'" ])
## ====== Make dst dir for get
test_rmdir("Remove dst dir for get", "testsuite-out")
## ====== Get multiple files
test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
retcode = EX_USAGE,
must_find = [ 'Destination must be a directory or stdout when downloading multiple sources.' ])
## ====== put/get non-ASCII filenames
test_s3cmd("Put unicode filenames", ['put', u'testsuite/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo', u'%s/xyz/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo' % pbucket(1)],
retcode = 0,
must_find = [ '->' ])
## ====== Make dst dir for get
test_mkdir("Make dst dir for get", "testsuite-out")
## ====== put/get non-ASCII filenames
test_s3cmd("Get unicode filenames", ['get', u'%s/xyz/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo' % pbucket(1), 'testsuite-out'],
retcode = 0,
must_find = [ '->' ])
## ====== Get multiple files
test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
must_find = [ u"-> 'testsuite-out/Logo.PNG'",
u"-> 'testsuite-out/AtomicClockRadio.ttf'" ])
## ====== Upload files differing in capitalisation
test_s3cmd("blah.txt / Blah.txt", ['put', '-r', 'testsuite/blahBlah', pbucket(1)],
must_find = [ '%s/blahBlah/Blah.txt' % pbucket(1), '%s/blahBlah/blah.txt' % pbucket(1)])
## ====== Copy between buckets
test_s3cmd("Copy between buckets", ['cp', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc2/logo.png' % pbucket(3)],
must_find = [ "remote copy: '%s/xyz/etc2/Logo.PNG' -> '%s/xyz/etc2/logo.png'" % (pbucket(1), pbucket(3)) ])
## ====== Recursive copy
test_s3cmd("Recursive copy, set ACL", ['cp', '-r', '--acl-public', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--exclude', 'demo/dir?/*.txt', '--exclude', 'non-printables*'],
must_find = [ "remote copy: '%s/xyz/etc2/Logo.PNG' -> '%s/copy/etc2/Logo.PNG'" % (pbucket(1), pbucket(2)),
"remote copy: '%s/xyz/blahBlah/Blah.txt' -> '%s/copy/blahBlah/Blah.txt'" % (pbucket(1), pbucket(2)),
"remote copy: '%s/xyz/blahBlah/blah.txt' -> '%s/copy/blahBlah/blah.txt'" % (pbucket(1), pbucket(2)) ],
must_not_find = [ "demo/dir1/file1-1.txt" ])
## ====== Verify ACL and MIME type
test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = [ "MIME type:.*image/png",
r"ACL:.*\*anon\*: READ",
"URL:.*https?://%s.%s/copy/etc2/Logo.PNG" % (bucket(2), cfg.host_base) ],
skip_if_profile = ['minio'])
# Minio does not support ACL checks
test_s3cmd("Verify MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = ["MIME type:.*image/png"],
skip_if_not_profile = ['minio'])
## ====== modify MIME type
test_s3cmd("Modify MIME type", ['modify', '--mime-type=binary/octet-stream', '%s/copy/etc2/Logo.PNG' % pbucket(2) ])
test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = [ "MIME type:.*binary/octet-stream",
r"ACL:.*\*anon\*: READ",
"URL:.*https?://%s.%s/copy/etc2/Logo.PNG" % (bucket(2), cfg.host_base) ],
skip_if_profile = ['minio'])
# Minio does not support ACL checks
test_s3cmd("Verify MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = ["MIME type:.*binary/octet-stream"],
skip_if_not_profile = ['minio'])
## ====== reset MIME type
test_s3cmd("Modify MIME type back", ['modify', '--mime-type=image/png', '%s/copy/etc2/Logo.PNG' % pbucket(2) ])
test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = [ "MIME type:.*image/png",
r"ACL:.*\*anon\*: READ",
"URL:.*https?://%s.%s/copy/etc2/Logo.PNG" % (bucket(2), cfg.host_base) ],
skip_if_profile = ['minio'])
# Minio does not support ACL checks
test_s3cmd("Verify MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = ["MIME type:.*image/png"],
skip_if_not_profile = ['minio'])
test_s3cmd("Add cache-control header", ['modify', '--add-header=cache-control: max-age=3600, public', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = [ "modify: .*" ])
if have_curl:
test_curl_HEAD("HEAD check Cache-Control present", 'http://%s.%s/copy/etc2/Logo.PNG' % (bucket(2), cfg.host_base),
must_find_re = [ "Cache-Control: max-age=3600" ],
skip_if_profile = ['minio'])
test_s3cmd("Remove cache-control header", ['modify', '--remove-header=cache-control', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = [ "modify: .*" ])
if have_curl:
test_curl_HEAD("HEAD check Cache-Control not present", 'http://%s.%s/copy/etc2/Logo.PNG' % (bucket(2), cfg.host_base),
must_not_find_re = [ "Cache-Control: max-age=3600" ],
skip_if_profile = ['minio'])
## ====== sign
test_s3cmd("sign string", ['sign', 's3cmd'], must_find_re = ["Signature:"])
test_s3cmd("signurl time", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), str(int(time.time()) + 60)], must_find_re = ["http://"])
test_s3cmd("signurl time offset", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), '+60'], must_find_re = ["https?://"])
test_s3cmd("signurl content disposition and type", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), '+60', '--content-disposition=inline; filename=video.mp4', '--content-type=video/mp4'], must_find_re = [ 'response-content-disposition', 'response-content-type' ] )
## ====== Rename within S3
test_s3cmd("Rename within S3", ['mv', '%s/copy/etc2/Logo.PNG' % pbucket(2), '%s/copy/etc/logo.png' % pbucket(2)],
must_find = [ "move: '%s/copy/etc2/Logo.PNG' -> '%s/copy/etc/logo.png'" % (pbucket(2), pbucket(2))])
## ====== Sync between buckets
test_s3cmd("Sync remote2remote", ['sync', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--delete-removed', '--exclude', 'non-printables*'],
must_find = [ "remote copy: '%s/xyz/demo/dir1/file1-1.txt' -> '%s/copy/demo/dir1/file1-1.txt'" % (pbucket(1), pbucket(2)),
"remote copy: 'etc/logo.png' -> 'etc2/Logo.PNG'",
"delete: '%s/copy/etc/logo.png'" % pbucket(2) ],
must_not_find = [ "blah.txt" ])
## ====== Exclude directory
test_s3cmd("Exclude directory", ['put', '-r', 'testsuite/demo/', pbucket(1) + '/xyz/demo/', '--exclude', 'dir1/', '-d'],
must_find = ["'testsuite/demo/dir2/file2-1.bin' -> '%s/xyz/demo/dir2/file2-1.bin'" % pbucket(1),
"DEBUG: EXCLUDE: 'testsuite/demo/dir1/'"], # whole directory is excluded
must_not_find = ["'testsuite/demo/dir1/file1-1.txt' -> '%s/xyz/demo/dir1/file1-1.txt'" % pbucket(1),
"DEBUG: EXCLUDE: 'dir1/file1-1.txt'" # file is not synced, but also single file is not excluded
])
## ====== Don't Put symbolic link
test_s3cmd("Don't put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),],
retcode = EX_USAGE,
must_find = ["WARNING: Skipping over symbolic link: testsuite/etc/linked1.png"],
must_not_find_re = ["^(?!WARNING: Skipping).*linked1.png"])
## ====== Put symbolic link
test_s3cmd("Put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),'--follow-symlinks' ],
must_find = [ "'testsuite/etc/linked1.png' -> '%s/xyz/linked1.png'" % pbucket(1)])
## ====== Sync symbolic links
test_s3cmd("Sync symbolic links", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--follow-symlinks' ],
must_find = ["remote copy: 'etc2/Logo.PNG' -> 'etc/linked.png'"],
# Don't want to recursively copy linked directories!
must_not_find_re = ["etc/more/linked-dir/more/give-me-more.txt",
"etc/brokenlink.png"],
retcode = EX_PARTIAL)
## ====== Multi source move
test_s3cmd("Multi-source move", ['mv', '-r', '%s/copy/blahBlah/Blah.txt' % pbucket(2), '%s/copy/etc/' % pbucket(2), '%s/moved/' % pbucket(2)],
must_find = [ "move: '%s/copy/blahBlah/Blah.txt' -> '%s/moved/Blah.txt'" % (pbucket(2), pbucket(2)),
"move: '%s/copy/etc/AtomicClockRadio.ttf' -> '%s/moved/AtomicClockRadio.ttf'" % (pbucket(2), pbucket(2)),
"move: '%s/copy/etc/TypeRa.ttf' -> '%s/moved/TypeRa.ttf'" % (pbucket(2), pbucket(2)) ],
must_not_find = [ "blah.txt" ])
## ====== Verify move
test_s3cmd("Verify move", ['ls', '-r', pbucket(2)],
must_find = [ "%s/moved/Blah.txt" % pbucket(2),
"%s/moved/AtomicClockRadio.ttf" % pbucket(2),
"%s/moved/TypeRa.ttf" % pbucket(2),
"%s/copy/blahBlah/blah.txt" % pbucket(2) ],
must_not_find = [ "%s/copy/blahBlah/Blah.txt" % pbucket(2),
"%s/copy/etc/AtomicClockRadio.ttf" % pbucket(2),
"%s/copy/etc/TypeRa.ttf" % pbucket(2) ])
## ====== List all
test_s3cmd("List all", ['la'],
must_find = [ "%s/urandom.bin" % pbucket(1)])
## ====== Simple delete
test_s3cmd("Simple delete", ['del', '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
must_find = [ "delete: '%s/xyz/etc2/Logo.PNG'" % pbucket(1) ])
## ====== Simple delete with rm
test_s3cmd("Simple delete with rm", ['rm', '%s/xyz/test_rm/TypeRa.ttf' % pbucket(1)],
must_find = [ "delete: '%s/xyz/test_rm/TypeRa.ttf'" % pbucket(1) ])
## ====== Create expiration rule with days and prefix
test_s3cmd("Create expiration rule with days and prefix", ['expire', pbucket(1), '--expiry-days=365', '--expiry-prefix=log/'],
must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
## ====== Create expiration rule with date and prefix
test_s3cmd("Create expiration rule with date and prefix", ['expire', pbucket(1), '--expiry-date=2030-12-31T00:00:00.000Z', '--expiry-prefix=log/'],
must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
## ====== Create expiration rule with days only
test_s3cmd("Create expiration rule with days only", ['expire', pbucket(1), '--expiry-days=365'],
must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
## ====== Create expiration rule with date only
test_s3cmd("Create expiration rule with date only", ['expire', pbucket(1), '--expiry-date=2030-12-31T00:00:00.000Z'],
must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
## ====== Get current expiration setting
test_s3cmd("Get current expiration setting", ['info', pbucket(1)],
must_find_re = [ "Expiration Rule: all objects in this bucket will expire in '2030-12-31T00:00:00(?:.000)?Z'"])
## ====== Delete expiration rule
test_s3cmd("Delete expiration rule", ['expire', pbucket(1)],
must_find = [ "Bucket '%s/': expiration configuration is deleted." % pbucket(1)])
## ====== set Requester Pays flag
test_s3cmd("Set requester pays", ['payer', '--requester-pays', pbucket(2)],
skip_if_profile=['minio'])
## ====== get Requester Pays flag
test_s3cmd("Get requester pays flag", ['info', pbucket(2)],
must_find = [ "Payer: Requester"],
skip_if_profile=['minio'])
## ====== ls using Requester Pays flag
test_s3cmd("ls using requester pays flag", ['ls', '--requester-pays', pbucket(2)],
skip_if_profile=['minio'])
## ====== clear Requester Pays flag
test_s3cmd("Clear requester pays", ['payer', pbucket(2)],
skip_if_profile=['minio'])
## ====== get Requester Pays flag
test_s3cmd("Get requester pays flag", ['info', pbucket(2)],
must_find = [ "Payer: BucketOwner"],
skip_if_profile=['minio'])
## ====== Recursive delete maximum exceed
test_s3cmd("Recursive delete maximum exceeded", ['del', '--recursive', '--max-delete=1', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)],
must_not_find = [ "delete: '%s/xyz/etc/TypeRa.ttf'" % pbucket(1) ])
## ====== Recursive delete
test_s3cmd("Recursive delete", ['del', '--recursive', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)],
must_find = [ "delete: '%s/xyz/etc/TypeRa.ttf'" % pbucket(1) ],
must_find_re = [ "delete: '.*/etc/logo.png'" ],
must_not_find = [ "AtomicClockRadio.ttf" ])
## ====== Recursive delete with rm
test_s3cmd("Recursive delete with rm", ['rm', '--recursive', '--exclude', 'Atomic*', '%s/xyz/test_rm' % pbucket(1)],
must_find = [ "delete: '%s/xyz/test_rm/more/give-me-more.txt'" % pbucket(1) ],
must_find_re = [ "delete: '.*/test_rm/logo.png'" ],
must_not_find = [ "AtomicClockRadio.ttf" ])
## ====== Recursive delete all
test_s3cmd("Recursive delete all", ['del', '--recursive', '--force', pbucket(1)],
must_find_re = [ "delete: '.*binary/random-crap'" ])
## ====== Remove empty bucket
test_s3cmd("Remove empty bucket", ['rb', pbucket(1)],
must_find = [ "Bucket '%s/' removed" % pbucket(1) ])
## ====== Remove remaining buckets
test_s3cmd("Remove remaining buckets", ['rb', '--recursive', pbucket(2), pbucket(3)],
must_find = [ "Bucket '%s/' removed" % pbucket(2),
"Bucket '%s/' removed" % pbucket(3) ])
# vim:et:ts=4:sts=4:ai
| 39,835 | Python | .py | 723 | 48.957123 | 331 | 0.612455 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,390 | Config.py | s3tools_s3cmd/S3/Config.py | # -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Amazon S3 manager
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import
import logging
import datetime
import locale
import re
import os
import io
import sys
import json
import time
from logging import debug, warning
from .ExitCodes import EX_OSFILE
try:
import dateutil.parser
import dateutil.tz
except ImportError:
sys.stderr.write(u"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ImportError trying to import dateutil.parser and dateutil.tz.
Please install the python dateutil module:
$ sudo apt-get install python-dateutil
or
$ sudo yum install python-dateutil
or
$ pip install python-dateutil
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
""")
sys.stderr.flush()
sys.exit(EX_OSFILE)
try:
# python 3 support
import httplib
except ImportError:
import http.client as httplib
try:
from configparser import (NoOptionError, NoSectionError,
MissingSectionHeaderError, ParsingError,
ConfigParser as PyConfigParser)
except ImportError:
# Python2 fallback code
from ConfigParser import (NoOptionError, NoSectionError,
MissingSectionHeaderError, ParsingError,
ConfigParser as PyConfigParser)
from . import Progress
from .SortedDict import SortedDict
from .BaseUtils import (s3_quote, getTreeFromXml, getDictFromTree,
base_unicodise, dateRFC822toPython)
try:
unicode
except NameError:
# python 3 support
# In python 3, unicode -> str, and str -> bytes
unicode = str
def is_bool_true(value):
"""Check to see if a string is true, yes, on, or 1
value may be a str, or unicode.
Return True if it is
"""
if type(value) == unicode:
return value.lower() in ["true", "yes", "on", "1"]
elif type(value) == bool and value == True:
return True
else:
return False
def is_bool_false(value):
"""Check to see if a string is false, no, off, or 0
value may be a str, or unicode.
Return True if it is
"""
if type(value) == unicode:
return value.lower() in ["false", "no", "off", "0"]
elif type(value) == bool and value == False:
return True
else:
return False
def is_bool(value):
"""Check a string value to see if it is bool"""
return is_bool_true(value) or is_bool_false(value)
class Config(object):
_instance = None
_parsed_files = []
_doc = {}
access_key = u""
secret_key = u""
access_token = u""
_access_token_refresh = True
_access_token_expiration = None
_access_token_last_update = None
host_base = u"s3.amazonaws.com"
host_bucket = u"%(bucket)s.s3.amazonaws.com"
kms_key = u"" #can't set this and Server Side Encryption at the same time
# simpledb_host looks useless, legacy? to remove?
simpledb_host = u"sdb.amazonaws.com"
cloudfront_host = u"cloudfront.amazonaws.com"
verbosity = logging.WARNING
progress_meter = sys.stdout.isatty()
progress_class = Progress.ProgressCR
send_chunk = 64 * 1024
recv_chunk = 64 * 1024
list_md5 = False
long_listing = False
human_readable_sizes = False
extra_headers = SortedDict(ignore_case = True)
force = False
server_side_encryption = False
enable = None
get_continue = False
put_continue = False
upload_id = u""
skip_existing = False
recursive = False
restore_days = 1
restore_priority = u"Standard"
acl_public = None
acl_grants = []
acl_revokes = []
proxy_host = u""
proxy_port = 3128
encrypt = False
dry_run = False
add_encoding_exts = u""
preserve_attrs = True
preserve_attrs_list = [
u'uname', # Verbose owner Name (e.g. 'root')
u'uid', # Numeric user ID (e.g. 0)
u'gname', # Group name (e.g. 'users')
u'gid', # Numeric group ID (e.g. 100)
u'atime', # Last access timestamp
u'mtime', # Modification timestamp
u'ctime', # Creation timestamp
u'mode', # File mode (e.g. rwxr-xr-x = 755)
u'md5', # File MD5 (if known)
#u'acl', # Full ACL (not yet supported)
]
keep_dirs = False
delete_removed = False
delete_after = False
delete_after_fetch = False
max_delete = -1
limit = -1
_doc['delete_removed'] = u"[sync] Remove remote S3 objects when local file has been deleted"
delay_updates = False # OBSOLETE
gpg_passphrase = u""
gpg_command = u""
gpg_encrypt = u"%(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
gpg_decrypt = u"%(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
use_https = True
ca_certs_file = u""
ssl_client_key_file = u""
ssl_client_cert_file = u""
check_ssl_certificate = True
check_ssl_hostname = True
bucket_location = u"US"
default_mime_type = u"binary/octet-stream"
guess_mime_type = True
use_mime_magic = True
mime_type = u""
enable_multipart = True
# Chunk size is at the same time the chunk size and the threshold
multipart_chunk_size_mb = 15 # MiB
# Maximum chunk size for s3-to-s3 copy is 5 GiB.
# But, use a lot lower value by default (1GiB)
multipart_copy_chunk_size_mb = 1 * 1024
# Maximum chunks on AWS S3, could be different on other S3-compatible APIs
multipart_max_chunks = 10000
# List of checks to be performed for 'sync'
sync_checks = ['size', 'md5'] # 'weak-timestamp'
# List of compiled REGEXPs
exclude = []
include = []
# Dict mapping compiled REGEXPs back to their textual form
debug_exclude = {}
debug_include = {}
encoding = locale.getpreferredencoding() or "UTF-8"
urlencoding_mode = u"normal"
log_target_prefix = u""
reduced_redundancy = False
storage_class = u""
follow_symlinks = False
# If too big, this value can be overridden by the OS socket timeouts max values.
# For example, on Linux, a connection attempt will automatically timeout after 120s.
socket_timeout = 300
invalidate_on_cf = False
# joseprio: new flags for default index invalidation
invalidate_default_index_on_cf = False
invalidate_default_index_root_on_cf = True
website_index = u"index.html"
website_error = u""
website_endpoint = u"http://%(bucket)s.s3-website-%(location)s.amazonaws.com/"
additional_destinations = []
files_from = []
cache_file = u""
add_headers = u""
remove_headers = []
expiry_days = u""
expiry_date = u""
expiry_prefix = u""
skip_destination_validation = False
signature_v2 = False
limitrate = 0
requester_pays = False
stop_on_error = False
content_disposition = u""
content_type = u""
stats = False
# Disabled by default because can create a latency with a CONTINUE status reply
# expected for every send file requests.
use_http_expect = False
signurl_use_https = False
# Maximum sleep duration for throttle / limitrate.
# s3 will timeout if a request/transfer is stuck for more than a short time
throttle_max = 100
public_url_use_https = False
connection_pooling = True
# How long in seconds a connection can be kept idle in the pool and still
# be alive. AWS s3 is supposed to close connections that are idle for 20
# seconds or more, but in real life, undocumented, it closes https conns
# after around 6s of inactivity.
connection_max_age = 5
# Not an AWS standard
# allow the listing results to be returned in unsorted order.
# This may be faster when listing very large buckets.
list_allow_unordered = False
# Maximum attempts of re-issuing failed requests
max_retries = 5
## Creating a singleton
def __new__(self, configfile = None, access_key=None, secret_key=None, access_token=None):
if self._instance is None:
self._instance = object.__new__(self)
return self._instance
def __init__(self, configfile = None, access_key=None, secret_key=None, access_token=None):
if configfile:
try:
self.read_config_file(configfile)
except IOError:
if 'AWS_CREDENTIAL_FILE' in os.environ or 'AWS_PROFILE' in os.environ:
self.aws_credential_file()
# override these if passed on the command-line
if access_key and secret_key:
self.access_key = access_key
self.secret_key = secret_key
if access_token:
self.access_token = access_token
# Do not refresh the IAM role when an access token is provided.
self._access_token_refresh = False
if len(self.access_key) == 0:
env_access_key = os.getenv('AWS_ACCESS_KEY') or os.getenv('AWS_ACCESS_KEY_ID')
env_secret_key = os.getenv('AWS_SECRET_KEY') or os.getenv('AWS_SECRET_ACCESS_KEY')
env_access_token = os.getenv('AWS_SESSION_TOKEN') or os.getenv('AWS_SECURITY_TOKEN')
if env_access_key:
if not env_secret_key:
raise ValueError(
"AWS_ACCESS_KEY environment variable is used but"
" AWS_SECRET_KEY variable is missing"
)
# py3 getenv returns unicode and py2 returns bytes.
self.access_key = base_unicodise(env_access_key)
self.secret_key = base_unicodise(env_secret_key)
if env_access_token:
# Do not refresh the IAM role when an access token is provided.
self._access_token_refresh = False
self.access_token = base_unicodise(env_access_token)
else:
self.role_config()
#TODO check KMS key is valid
if self.kms_key and self.server_side_encryption == True:
warning('Cannot have server_side_encryption (S3 SSE) and KMS_key set (S3 KMS). KMS encryption will be used. Please set server_side_encryption to False')
if self.kms_key and self.signature_v2 == True:
raise Exception('KMS encryption requires signature v4. Please set signature_v2 to False')
def role_config(self):
"""
Get credentials from IAM authentication and STS AssumeRole
"""
try:
role_arn = os.environ.get('AWS_ROLE_ARN')
if role_arn:
role_session_name = 'role-session-%s' % (int(time.time()))
params = {
'Action': 'AssumeRole',
'Version': '2011-06-15',
'RoleArn': role_arn,
'RoleSessionName': role_session_name,
}
web_identity_token_file = os.environ.get('AWS_WEB_IDENTITY_TOKEN_FILE')
if web_identity_token_file:
with open(web_identity_token_file) as f:
web_identity_token = f.read()
params['Action'] = 'AssumeRoleWithWebIdentity'
params['WebIdentityToken'] = web_identity_token
encoded_params = '&'.join([
'%s=%s' % (k, s3_quote(v, unicode_output=True))
for k, v in params.items()
])
sts_endpoint = "sts.amazonaws.com"
if os.environ.get("AWS_STS_REGIONAL_ENDPOINTS") == "regional":
# Check if the AWS_REGION variable is available to use as a region.
region = os.environ.get("AWS_REGION")
if not region:
# Otherwise use the bucket location
region = self.bucket_location
sts_endpoint = "sts.%s.amazonaws.com" % region
conn = httplib.HTTPSConnection(host=sts_endpoint,
timeout=2)
conn.request('POST', '/?' + encoded_params)
resp = conn.getresponse()
resp_content = resp.read()
if resp.status == 200 and len(resp_content) > 1:
tree = getTreeFromXml(resp_content)
result_dict = getDictFromTree(tree)
if tree.tag == "AssumeRoleResponse":
creds = result_dict['AssumeRoleResult']['Credentials']
elif tree.tag == "AssumeRoleWithWebIdentityResponse":
creds = result_dict['AssumeRoleWithWebIdentityResult']['Credentials']
else:
raise IOError("Unexpected XML message from STS server: <%s />" % tree.tag)
Config().update_option('access_key', creds['AccessKeyId'])
Config().update_option('secret_key', creds['SecretAccessKey'])
Config().update_option('access_token', creds['SessionToken'])
expiration = dateRFC822toPython(base_unicodise(creds['Expiration']))
# Add a timedelta to prevent any expiration if the EC2 machine is not at the right date
self._access_token_expiration = expiration - datetime.timedelta(minutes=15)
# last update date is not provided in STS responses
self._access_token_last_update = datetime.datetime.now(dateutil.tz.tzutc())
# Others variables : Code / Type
else:
raise IOError
else:
conn = httplib.HTTPConnection(host='169.254.169.254',
timeout=2)
# To use Instance Metadata Service (IMDSv2), we first need to obtain a token, then
# supply it with every IMDS HTTP call. More info:
#
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
#
# 60 seconds is arbitrary, but since we're just pulling small bits of data from the
# local instance, it should be plenty of time.
#
# There's a chance that there are "mostly AWS compatible" systems that might offer
# only IMDSv1 emulation, so we make this optional -- if we can't get the token, we
# just proceed without.
#
# More discussion at https://github.com/Hyperbase/hyperbase/pull/22259
#
imds_auth = {}
try:
imds_ttl = {"X-aws-ec2-metadata-token-ttl-seconds": "60"}
conn.request('PUT', "/latest/api/token", headers=imds_ttl)
resp = conn.getresponse()
resp_content = resp.read()
if resp.status == 200:
imds_token = base_unicodise(resp_content)
imds_auth = {"X-aws-ec2-metadata-token": imds_token}
except Exception:
# Ensure to close the connection in case of timeout or
# anything. This will avoid CannotSendRequest errors for
# the next request.
conn.close()
conn.request('GET', "/latest/meta-data/iam/security-credentials/", headers=imds_auth)
resp = conn.getresponse()
files = resp.read()
if resp.status == 200 and len(files) > 1:
conn.request('GET',
"/latest/meta-data/iam/security-credentials/%s" % files.decode('utf-8'),
headers=imds_auth)
resp=conn.getresponse()
if resp.status == 200:
resp_content = base_unicodise(resp.read())
creds = json.loads(resp_content)
Config().update_option('access_key', base_unicodise(creds['AccessKeyId']))
Config().update_option('secret_key', base_unicodise(creds['SecretAccessKey']))
Config().update_option('access_token', base_unicodise(creds['Token']))
expiration = dateRFC822toPython(base_unicodise(creds['Expiration']))
# Add a timedelta to prevent any expiration if the EC2 machine is not at the right date
self._access_token_expiration = expiration - datetime.timedelta(minutes=15)
self._access_token_last_update = dateRFC822toPython(base_unicodise(creds['LastUpdated']))
# Others variables : Code / Type
else:
raise IOError
else:
raise IOError
except Exception:
raise
def role_refresh(self):
if self._access_token_refresh:
now = datetime.datetime.now(dateutil.tz.tzutc())
if self._access_token_expiration \
and now < self._access_token_expiration \
and self._access_token_last_update \
and self._access_token_last_update <= now:
# current token is still valid. No need to refresh it
return
try:
self.role_config()
except Exception:
warning("Could not refresh role")
def aws_credential_file(self):
try:
aws_credential_file = os.path.expanduser('~/.aws/credentials')
credential_file_from_env = os.environ.get('AWS_CREDENTIAL_FILE')
if credential_file_from_env and \
os.path.isfile(credential_file_from_env):
aws_credential_file = base_unicodise(credential_file_from_env)
elif not os.path.isfile(aws_credential_file):
return
config = PyConfigParser()
debug("Reading AWS credentials from %s" % (aws_credential_file))
with io.open(aws_credential_file, "r",
encoding=getattr(self, 'encoding', 'UTF-8')) as fp:
config_string = fp.read()
try:
try:
# readfp is replaced by read_file in python3,
# but so far readfp it is still available.
config.readfp(io.StringIO(config_string))
except MissingSectionHeaderError:
# if header is missing, this could be deprecated
# credentials file format as described here:
# https://blog.csanchez.org/2011/05/
# then do the hacky-hack and add default header
# to be able to read the file with PyConfigParser()
config_string = u'[default]\n' + config_string
config.readfp(io.StringIO(config_string))
except ParsingError as exc:
raise ValueError(
"Error reading aws_credential_file "
"(%s): %s" % (aws_credential_file, str(exc)))
profile = base_unicodise(os.environ.get('AWS_PROFILE', "default"))
debug("Using AWS profile '%s'" % (profile))
# get_key - helper function to read the aws profile credentials
# including the legacy ones as described here:
# https://blog.csanchez.org/2011/05/
def get_key(profile, key, legacy_key, print_warning=True):
result = None
try:
result = config.get(profile, key)
except NoOptionError as e:
# we may want to skip warning message for optional keys
if print_warning:
warning("Couldn't find key '%s' for the AWS Profile "
"'%s' in the credentials file '%s'",
e.option, e.section, aws_credential_file)
# if the legacy_key defined and original one wasn't found,
# try read the legacy_key
if legacy_key:
try:
key = legacy_key
profile = "default"
result = config.get(profile, key)
warning(
"Legacy configuration key '%s' used, please use"
" the standardized config format as described "
"here: https://aws.amazon.com/blogs/security/a-new-and-standardized-way-to-manage-credentials-in-the-aws-sdks/",
key)
except NoOptionError as e:
pass
if result:
debug("Found the configuration option '%s' for the AWS "
"Profile '%s' in the credentials file %s",
key, profile, aws_credential_file)
return result
profile_access_key = get_key(profile, "aws_access_key_id",
"AWSAccessKeyId")
if profile_access_key:
Config().update_option('access_key',
base_unicodise(profile_access_key))
profile_secret_key = get_key(profile, "aws_secret_access_key",
"AWSSecretKey")
if profile_secret_key:
Config().update_option('secret_key',
base_unicodise(profile_secret_key))
profile_access_token = get_key(profile, "aws_session_token", None,
False)
if profile_access_token:
Config().update_option('access_token',
base_unicodise(profile_access_token))
except IOError as e:
warning("Errno %d accessing credentials file %s", e.errno,
aws_credential_file)
except NoSectionError as e:
warning("Couldn't find AWS Profile '%s' in the credentials file "
"'%s'", profile, aws_credential_file)
def option_list(self):
retval = []
for option in dir(self):
## Skip attributes that start with underscore or are not string, int or bool
option_type = type(getattr(Config, option))
if option.startswith("_") or \
not (option_type in (
type(u"string"), # str
type(42), # int
type(True))): # bool
continue
retval.append(option)
return retval
def read_config_file(self, configfile):
cp = ConfigParser(configfile)
for option in self.option_list():
_option = cp.get(option)
if _option is not None:
_option = _option.strip()
self.update_option(option, _option)
# allow acl_public to be set from the config file too, even though by
# default it is set to None, and not present in the config file.
if cp.get('acl_public'):
self.update_option('acl_public', cp.get('acl_public'))
if cp.get('add_headers'):
for option in cp.get('add_headers').split(","):
(key, value) = option.split(':', 1)
self.extra_headers[key.strip()] = value.strip()
self._parsed_files.append(configfile)
def dump_config(self, stream):
ConfigDumper(stream).dump(u"default", self)
def update_option(self, option, value):
if value is None:
return
#### Handle environment reference
if unicode(value).startswith("$"):
return self.update_option(option, os.getenv(value[1:]))
#### Special treatment of some options
## verbosity must be known to "logging" module
if option == "verbosity":
# support integer verboisities
try:
value = int(value)
except ValueError:
try:
# otherwise it must be a key known to the logging module
try:
# python 3 support
value = logging._levelNames[value]
except AttributeError:
value = logging._nameToLevel[value]
except KeyError:
raise ValueError("Config: verbosity level '%s' is not valid" % value)
elif option == "limitrate":
#convert kb,mb to bytes
if value.endswith("k") or value.endswith("K"):
shift = 10
elif value.endswith("m") or value.endswith("M"):
shift = 20
else:
shift = 0
try:
value = shift and int(value[:-1]) << shift or int(value)
except Exception:
raise ValueError("Config: value of option %s must have suffix m, k, or nothing, not '%s'" % (option, value))
## allow yes/no, true/false, on/off and 1/0 for boolean options
## Some options default to None, if that's the case check the value to see if it is bool
elif (type(getattr(Config, option)) is type(True) or # Config is bool
(getattr(Config, option) is None and is_bool(value))): # Config is None and value is bool
if is_bool_true(value):
value = True
elif is_bool_false(value):
value = False
else:
raise ValueError("Config: value of option '%s' must be Yes or No, not '%s'" % (option, value))
elif type(getattr(Config, option)) is type(42): # int
try:
value = int(value)
except ValueError:
raise ValueError("Config: value of option '%s' must be an integer, not '%s'" % (option, value))
elif option in ["host_base", "host_bucket", "cloudfront_host"]:
if value.startswith("http://"):
value = value[7:]
elif value.startswith("https://"):
value = value[8:]
setattr(Config, option, value)
class ConfigParser(object):
def __init__(self, file, sections = []):
self.cfg = {}
self.parse_file(file, sections)
def parse_file(self, file, sections = []):
debug("ConfigParser: Reading file '%s'" % file)
if type(sections) != type([]):
sections = [sections]
in_our_section = True
r_comment = re.compile(r'^\s*#.*')
r_empty = re.compile(r'^\s*$')
r_section = re.compile(r'^\[([^\]]+)\]')
r_data = re.compile(r'^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)')
r_quotes = re.compile(r'^"(.*)"\s*$')
with io.open(file, "r", encoding=self.get('encoding', 'UTF-8')) as fp:
for line in fp:
if r_comment.match(line) or r_empty.match(line):
continue
is_section = r_section.match(line)
if is_section:
section = is_section.groups()[0]
in_our_section = (section in sections) or (len(sections) == 0)
continue
is_data = r_data.match(line)
if is_data and in_our_section:
data = is_data.groupdict()
if r_quotes.match(data["value"]):
data["value"] = data["value"][1:-1]
self.__setitem__(data["key"], data["value"])
if data["key"] in ("access_key", "secret_key", "gpg_passphrase"):
print_value = ("%s...%d_chars...%s") % (data["value"][:2], len(data["value"]) - 3, data["value"][-1:])
else:
print_value = data["value"]
debug("ConfigParser: %s->%s" % (data["key"], print_value))
continue
warning("Ignoring invalid line in '%s': %s" % (file, line))
def __getitem__(self, name):
return self.cfg[name]
def __setitem__(self, name, value):
self.cfg[name] = value
def get(self, name, default = None):
if name in self.cfg:
return self.cfg[name]
return default
class ConfigDumper(object):
def __init__(self, stream):
self.stream = stream
def dump(self, section, config):
self.stream.write(u"[%s]\n" % section)
for option in config.option_list():
value = getattr(config, option)
if option == "verbosity":
# we turn level numbers back into strings if possible
if isinstance(value, int):
try:
try:
# python 3 support
value = logging._levelNames[value]
except AttributeError:
value = logging._levelToName[value]
except KeyError:
pass
self.stream.write(u"%s = %s\n" % (option, value))
# vim:et:ts=4:sts=4:ai
| 29,938 | Python | .py | 641 | 33.75975 | 168 | 0.550101 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,391 | ExitCodes.py | s3tools_s3cmd/S3/ExitCodes.py | # -*- coding: utf-8 -*-
# patterned on /usr/include/sysexits.h
EX_OK = 0
EX_GENERAL = 1
EX_PARTIAL = 2 # some parts of the command succeeded, while others failed
EX_SERVERMOVED = 10 # 301: Moved permanently & 307: Moved temp
EX_SERVERERROR = 11 # 400, 405, 411, 416, 417, 501: Bad request, 504: Gateway Time-out
EX_NOTFOUND = 12 # 404: Not found
EX_CONFLICT = 13 # 409: Conflict (ex: bucket error)
EX_PRECONDITION = 14 # 412: Precondition failed
EX_SERVICE = 15 # 503: Service not available or slow down
EX_USAGE = 64 # The command was used incorrectly (e.g. bad command line syntax)
EX_DATAERR = 65 # Failed file transfer, upload or download
EX_SOFTWARE = 70 # internal software error (e.g. S3 error of unknown specificity)
EX_OSERR = 71 # system error (e.g. out of memory)
EX_OSFILE = 72 # OS error (e.g. invalid Python version)
EX_IOERR = 74 # An error occurred while doing I/O on some file.
EX_TEMPFAIL = 75 # temporary failure (S3DownloadError or similar, retry later)
EX_ACCESSDENIED = 77 # Insufficient permissions to perform the operation on S3
EX_CONFIG = 78 # Configuration file error
EX_CONNECTIONREFUSED = 111 # TCP connection refused (e.g. connecting to a closed server port)
_EX_SIGNAL = 128
_EX_SIGINT = 2
EX_BREAK = _EX_SIGNAL + _EX_SIGINT # Control-C (KeyboardInterrupt raised)
class ExitScoreboard(object):
"""Helper to return best return code"""
def __init__(self):
self._success = 0
self._notfound = 0
self._failed = 0
def success(self):
self._success += 1
def notfound(self):
self._notfound += 1
def failed(self):
self._failed += 1
def rc(self):
if self._success:
if not self._failed and not self._notfound:
return EX_OK
elif self._failed:
return EX_PARTIAL
else:
if self._failed:
return EX_GENERAL
else:
if self._notfound:
return EX_NOTFOUND
return EX_GENERAL
| 2,254 | Python | .py | 49 | 40.204082 | 94 | 0.590537 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,392 | FileDict.py | s3tools_s3cmd/S3/FileDict.py | # -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Amazon S3 manager
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import
import logging
from .SortedDict import SortedDict
from .Crypto import hash_file_md5
from . import Utils
from . import Config
zero_length_md5 = "d41d8cd98f00b204e9800998ecf8427e"
cfg = Config.Config()
class FileDict(SortedDict):
def __init__(self, mapping = None, ignore_case = True, **kwargs):
SortedDict.__init__(self, mapping = mapping or {}, ignore_case = ignore_case, **kwargs)
self.hardlinks_md5 = dict() # { dev: { inode : {'md5':, 'relative_files':}}}
self.by_md5 = dict() # {md5: set(relative_files)}
def record_md5(self, relative_file, md5):
if not relative_file:
return
if md5 is None:
return
if md5 == zero_length_md5:
return
if md5 not in self.by_md5:
self.by_md5[md5] = relative_file
def find_md5_one(self, md5):
if not md5:
return None
return self.by_md5.get(md5, None)
def get_md5(self, relative_file):
"""returns md5 if it can, or raises IOError if file is unreadable"""
md5 = None
if 'md5' in self[relative_file]:
return self[relative_file]['md5']
md5 = self.get_hardlink_md5(relative_file)
if md5 is None and 'md5' in cfg.sync_checks:
logging.debug(u"doing file I/O to read md5 of %s" % relative_file)
md5 = hash_file_md5(self[relative_file]['full_name'])
self.record_md5(relative_file, md5)
self[relative_file]['md5'] = md5
return md5
def record_hardlink(self, relative_file, dev, inode, md5, size):
if md5 is None:
return
if size == 0:
# don't record 0-length files
return
if dev == 0 or inode == 0:
# Windows
return
if dev not in self.hardlinks_md5:
self.hardlinks_md5[dev] = dict()
if inode not in self.hardlinks_md5[dev]:
self.hardlinks_md5[dev][inode] = md5
def get_hardlink_md5(self, relative_file):
try:
dev = self[relative_file]['dev']
inode = self[relative_file]['inode']
md5 = self.hardlinks_md5[dev][inode]
except KeyError:
md5 = None
return md5
| 2,744 | Python | .py | 69 | 32.072464 | 95 | 0.569231 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,393 | Exceptions.py | s3tools_s3cmd/S3/Exceptions.py | # -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Amazon S3 manager - Exceptions library
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import
from logging import debug, error
import sys
import S3.BaseUtils
import S3.Utils
from . import ExitCodes
if sys.version_info >= (3, 0):
PY3 = True
# In python 3, unicode -> str, and str -> bytes
unicode = str
else:
PY3 = False
## External exceptions
from ssl import SSLError as S3SSLError
try:
from ssl import CertificateError as S3SSLCertificateError
except ImportError:
class S3SSLCertificateError(Exception):
pass
try:
from xml.etree.ElementTree import ParseError as XmlParseError
except ImportError:
# ParseError was only added in python2.7, before ET was raising ExpatError
from xml.parsers.expat import ExpatError as XmlParseError
## s3cmd exceptions
class S3Exception(Exception):
def __init__(self, message=""):
self.message = S3.Utils.unicodise(message)
def __str__(self):
## Don't return self.message directly because
## __unicode__() method could be overridden in subclasses!
if PY3:
return self.__unicode__()
else:
return S3.Utils.deunicodise(self.__unicode__())
def __unicode__(self):
return self.message
## (Base)Exception.message has been deprecated in Python 2.6
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
class S3Error(S3Exception):
def __init__(self, response):
self.status = response["status"]
self.reason = response["reason"]
self.info = {
"Code": "",
"Message": "",
"Resource": ""
}
debug("S3Error: %s (%s)" % (self.status, self.reason))
if "headers" in response:
for header in response["headers"]:
debug("HttpHeader: %s: %s" % (header, response["headers"][header]))
if "data" in response and response["data"]:
try:
tree = S3.BaseUtils.getTreeFromXml(response["data"])
except XmlParseError:
debug("Not an XML response")
else:
try:
self.info.update(self.parse_error_xml(tree))
except Exception as e:
error("Error parsing xml: %s. ErrorXML: %s" % (e, response["data"]))
self.code = self.info["Code"]
self.message = self.info["Message"]
self.resource = self.info["Resource"]
def __unicode__(self):
retval = u"%d " % (self.status)
retval += (u"(%s)" % (self.code or self.reason))
error_msg = self.message
if error_msg:
retval += (u": %s" % error_msg)
return retval
def get_error_code(self):
if self.status in [301, 307]:
return ExitCodes.EX_SERVERMOVED
elif self.status in [400, 405, 411, 416, 417, 501, 504]:
return ExitCodes.EX_SERVERERROR
elif self.status == 403:
return ExitCodes.EX_ACCESSDENIED
elif self.status == 404:
return ExitCodes.EX_NOTFOUND
elif self.status == 409:
return ExitCodes.EX_CONFLICT
elif self.status == 412:
return ExitCodes.EX_PRECONDITION
elif self.status == 500:
return ExitCodes.EX_SOFTWARE
elif self.status in [429, 503]:
return ExitCodes.EX_SERVICE
else:
return ExitCodes.EX_SOFTWARE
@staticmethod
def parse_error_xml(tree):
info = {}
error_node = tree
if not error_node.tag == "Error":
error_node = tree.find(".//Error")
if error_node is not None:
for child in error_node:
if child.text != "":
debug("ErrorXML: " + child.tag + ": " + repr(child.text))
info[child.tag] = child.text
else:
raise S3ResponseError("Malformed error XML returned from remote server.")
return info
class CloudFrontError(S3Error):
pass
class S3UploadError(S3Exception):
pass
class S3DownloadError(S3Exception):
pass
class S3RequestError(S3Exception):
pass
class S3ResponseError(S3Exception):
pass
class InvalidFileError(S3Exception):
pass
class ParameterError(S3Exception):
pass
# vim:et:ts=4:sts=4:ai
| 4,852 | Python | .py | 134 | 28.529851 | 89 | 0.595904 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,394 | S3Uri.py | s3tools_s3cmd/S3/S3Uri.py | # -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Amazon S3 manager
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import, print_function
import os
import re
import sys
from .Utils import unicodise, deunicodise, check_bucket_name_dns_support
from . import Config
PY3 = (sys.version_info >= (3, 0))
class S3Uri(object):
type = None
_subclasses = None
def __new__(self, string):
if not self._subclasses:
## Generate a list of all subclasses of S3Uri
self._subclasses = []
dict = sys.modules[__name__].__dict__
for something in dict:
if type(dict[something]) is not type(self):
continue
if issubclass(dict[something], self) and dict[something] != self:
self._subclasses.append(dict[something])
for subclass in self._subclasses:
try:
instance = object.__new__(subclass)
instance.__init__(string)
return instance
except ValueError:
continue
raise ValueError("%s: not a recognized URI" % string)
def __str__(self):
if PY3:
return self.uri()
else:
return deunicodise(self.uri())
def __unicode__(self):
return self.uri()
def __repr__(self):
return repr("<%s: %s>" % (self.__class__.__name__, self.__unicode__()))
def public_url(self):
raise ValueError("This S3 URI does not have Anonymous URL representation")
def basename(self):
return self.__unicode__().split("/")[-1]
class S3UriS3(S3Uri):
type = "s3"
_re = re.compile("^s3:///*([^/]*)/?(.*)", re.IGNORECASE | re.UNICODE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a S3 URI" % string)
groups = match.groups()
self._bucket = groups[0]
self._object = groups[1]
def bucket(self):
return self._bucket
def object(self):
return self._object
def has_bucket(self):
return bool(self._bucket)
def has_object(self):
return bool(self._object)
def uri(self):
return u"/".join([u"s3:/", self._bucket, self._object])
def is_dns_compatible(self):
return check_bucket_name_dns_support(Config.Config().host_bucket, self._bucket)
def public_url(self):
public_url_protocol = "http"
if Config.Config().public_url_use_https:
public_url_protocol = "https"
if self.is_dns_compatible():
return "%s://%s.%s/%s" % (public_url_protocol, self._bucket, Config.Config().host_base, self._object)
else:
return "%s://%s/%s/%s" % (public_url_protocol, Config.Config().host_base, self._bucket, self._object)
def host_name(self):
if self.is_dns_compatible():
return "%s.s3.amazonaws.com" % (self._bucket)
else:
return "s3.amazonaws.com"
@staticmethod
def compose_uri(bucket, object = ""):
return u"s3://%s/%s" % (bucket, object)
@staticmethod
def httpurl_to_s3uri(http_url):
m = re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE | re.UNICODE)
hostname, object = m.groups()[1:]
hostname = hostname.lower()
# Worst case scenario, we would like to be able to match something like
# my.website.com.s3-fips.dualstack.us-west-1.amazonaws.com.cn
m = re.match(r"(.*\.)?s3(?:\-[^\.]*)?(?:\.dualstack)?(?:\.[^\.]*)?\.amazonaws\.com(?:\.cn)?$",
hostname, re.IGNORECASE | re.UNICODE)
if not m:
raise ValueError("Unable to parse URL: %s" % http_url)
bucket = m.groups()[0]
if not bucket:
## old-style url: http://s3.amazonaws.com/bucket/object
if "/" not in object:
## no object given
bucket = object
object = ""
else:
## bucket/object
bucket, object = object.split("/", 1)
else:
## new-style url: http://bucket.s3.amazonaws.com/object
bucket = bucket.rstrip('.')
return S3Uri(
u"s3://%(bucket)s/%(object)s" % {
'bucket' : bucket,
'object' : object
}
)
class S3UriS3FS(S3Uri):
type = "s3fs"
_re = re.compile("^s3fs:///*([^/]*)/?(.*)", re.IGNORECASE | re.UNICODE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a S3fs URI" % string)
groups = match.groups()
self._fsname = groups[0]
self._path = groups[1].split("/")
def fsname(self):
return self._fsname
def path(self):
return "/".join(self._path)
def uri(self):
return u"/".join([u"s3fs:/", self._fsname, self.path()])
class S3UriFile(S3Uri):
type = "file"
_re = re.compile(r"^(\w+://)?(.*)", re.UNICODE)
def __init__(self, string):
match = self._re.match(string)
groups = match.groups()
if groups[0] not in (None, "file://"):
raise ValueError("%s: not a file:// URI" % string)
if groups[0] is None:
self._path = groups[1].split(os.sep)
else:
self._path = groups[1].split("/")
def path(self):
return os.sep.join(self._path)
def uri(self):
return u"/".join([u"file:/"]+self._path)
def isdir(self):
return os.path.isdir(deunicodise(self.path()))
def dirname(self):
return unicodise(os.path.dirname(deunicodise(self.path())))
def basename(self):
return unicodise(os.path.basename(deunicodise(self.path())))
class S3UriCloudFront(S3Uri):
type = "cf"
_re = re.compile("^cf://([^/]*)/*(.*)", re.IGNORECASE | re.UNICODE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a CloudFront URI" % string)
groups = match.groups()
self._dist_id = groups[0]
self._request_id = groups[1] != "/" and groups[1] or None
def dist_id(self):
return self._dist_id
def request_id(self):
return self._request_id
def uri(self):
uri = u"cf://" + self.dist_id()
if self.request_id():
uri += u"/" + self.request_id()
return uri
if __name__ == "__main__":
uri = S3Uri("s3://bucket/object")
print("type() =", type(uri))
print("uri =", uri)
print("uri.type=", uri.type)
print("bucket =", uri.bucket())
print("object =", uri.object())
print()
uri = S3Uri("s3://bucket")
print("type() =", type(uri))
print("uri =", uri)
print("uri.type=", uri.type)
print("bucket =", uri.bucket())
print()
uri = S3Uri("s3fs://filesystem1/path/to/remote/file.txt")
print("type() =", type(uri))
print("uri =", uri)
print("uri.type=", uri.type)
print("path =", uri.path())
print()
uri = S3Uri("/path/to/local/file.txt")
print("type() =", type(uri))
print("uri =", uri)
print("uri.type=", uri.type)
print("path =", uri.path())
print()
uri = S3Uri("cf://1234567890ABCD/")
print("type() =", type(uri))
print("uri =", uri)
print("uri.type=", uri.type)
print("dist_id =", uri.dist_id())
print()
# vim:et:ts=4:sts=4:ai
| 7,874 | Python | .py | 209 | 29.574163 | 113 | 0.542793 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,395 | FileLists.py | s3tools_s3cmd/S3/FileLists.py | # -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Create and compare lists of files/objects
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import
from .S3 import S3
from .Config import Config
from .S3Uri import S3Uri
from .FileDict import FileDict
from .BaseUtils import dateS3toUnix, dateRFC822toUnix, s3path
from .Utils import unicodise, deunicodise, deunicodise_s, replace_nonprintables
from .Exceptions import ParameterError
from .HashCache import HashCache
from logging import debug, info, warning
import os
import sys
import glob
import re
import errno
import io
from stat import S_ISDIR
PY3 = (sys.version_info >= (3, 0))
__all__ = ["fetch_local_list", "fetch_remote_list", "compare_filelists"]
def _os_walk_unicode(top):
'''
Reimplementation of python's os.walk to nicely support unicode in input as in output.
'''
try:
names = os.listdir(deunicodise(top))
except Exception:
return
dirs, nondirs = [], []
for name in names:
name = unicodise(name)
if os.path.isdir(deunicodise(os.path.join(top, name))):
if not handle_exclude_include_walk_dir(top, name):
dirs.append(name)
else:
nondirs.append(name)
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if not os.path.islink(deunicodise(new_path)):
for x in _os_walk_unicode(new_path):
yield x
def handle_exclude_include_walk_dir(root, dirname):
'''
Should this root/dirname directory be excluded? (otherwise included by default)
Exclude dir matches in the current directory
This prevents us from recursing down trees we know we want to ignore
return True for excluding, and False for including
'''
cfg = Config()
# python versions end their patterns (from globs) differently, test for different styles; check python3.6+ styles first
directory_patterns = (u'/)$', u'/)\\Z', u'\\/$', u'\\/\\Z(?ms)')
d = os.path.join(root, dirname, '')
debug(u"CHECK: '%s'" % d)
excluded = False
for r in cfg.exclude:
if not any(r.pattern.endswith(dp) for dp in directory_patterns):
# we only check for directory patterns here
continue
if r.search(d):
excluded = True
debug(u"EXCL-MATCH: '%s'" % cfg.debug_exclude[r])
break
if excluded:
## No need to check for --include if not excluded
for r in cfg.include:
if not any(r.pattern.endswith(dp) for dp in directory_patterns):
# we only check for directory patterns here
continue
debug(u"INCL-TEST: '%s' ~ %s" % (d, r.pattern))
if r.search(d):
excluded = False
debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r]))
break
if excluded:
## Still excluded - ok, action it
debug(u"EXCLUDE: '%s'" % d)
else:
debug(u"PASS: '%s'" % d)
return excluded
def _fswalk_follow_symlinks(path):
'''
Walk filesystem, following symbolic links (but without recursion), on python2.4 and later
If a symlink directory loop is detected, emit a warning and skip.
E.g.: dir1/dir2/sym-dir -> ../dir2
'''
assert os.path.isdir(deunicodise(path)) # only designed for directory argument
walkdirs = set([path])
for dirpath, dirnames, filenames in _os_walk_unicode(path):
real_dirpath = unicodise(os.path.realpath(deunicodise(dirpath)))
for dirname in dirnames:
current = os.path.join(dirpath, dirname)
real_current = unicodise(os.path.realpath(deunicodise(current)))
if os.path.islink(deunicodise(current)):
if (real_dirpath == real_current or
real_dirpath.startswith(real_current + os.path.sep)):
warning("Skipping recursively symlinked directory %s" % dirname)
else:
walkdirs.add(current)
for walkdir in walkdirs:
for dirpath, dirnames, filenames in _os_walk_unicode(walkdir):
yield (dirpath, dirnames, filenames)
def _fswalk_no_symlinks(path):
'''
Directory tree generator
path (str) is the root of the directory tree to walk
'''
for dirpath, dirnames, filenames in _os_walk_unicode(path):
yield (dirpath, dirnames, filenames)
def filter_exclude_include(src_list):
debug(u"Applying --exclude/--include")
cfg = Config()
exclude_list = FileDict(ignore_case = False)
for file in src_list.keys():
debug(u"CHECK: '%s'" % file)
excluded = False
for r in cfg.exclude:
if r.search(file):
excluded = True
debug(u"EXCL-MATCH: '%s'" % cfg.debug_exclude[r])
break
if excluded:
## No need to check for --include if not excluded
for r in cfg.include:
if r.search(file):
excluded = False
debug(u"INCL-MATCH: '%s'" % cfg.debug_include[r])
break
if excluded:
## Still excluded - ok, action it
debug(u"EXCLUDE: '%s'" % file)
exclude_list[file] = src_list[file]
del(src_list[file])
continue
else:
debug(u"PASS: '%s'" % file)
return src_list, exclude_list
def _get_filelist_from_file(cfg, local_path):
def _append(d, key, value):
if key not in d:
d[key] = [value]
else:
d[key].append(value)
filelist = {}
for fname in cfg.files_from:
try:
f = None
if fname == u'-':
f = io.open(sys.stdin.fileno(), mode='r', closefd=False)
else:
try:
f = io.open(deunicodise(fname), mode='r')
except IOError as e:
warning(u"--files-from input file %s could not be opened for reading (%s), skipping." % (fname, e.strerror))
continue
for line in f:
line = unicodise(line).strip()
line = os.path.normpath(os.path.join(local_path, line))
dirname = unicodise(os.path.dirname(deunicodise(line)))
basename = unicodise(os.path.basename(deunicodise(line)))
_append(filelist, dirname, basename)
finally:
if f:
f.close()
# reformat to match os.walk()
result = []
for key in sorted(filelist):
values = filelist[key]
values.sort()
result.append((key, [], values))
return result
def fetch_local_list(args, is_src = False, recursive = None, with_dirs=False):
def _fetch_local_list_info(loc_list):
len_loc_list = len(loc_list)
total_size = 0
info(u"Running stat() and reading/calculating MD5 values on %d files, this may take some time..." % len_loc_list)
counter = 0
for relative_file in loc_list:
counter += 1
if counter % 1000 == 0:
info(u"[%d/%d]" % (counter, len_loc_list))
if relative_file == '-':
continue
loc_list_item = loc_list[relative_file]
full_name = loc_list_item['full_name']
is_dir = loc_list_item['is_dir']
try:
sr = os.stat_result(os.stat(deunicodise(full_name)))
except OSError as e:
if e.errno == errno.ENOENT:
# file was removed async to us getting the list
continue
else:
raise
if is_dir:
size = 0
else:
size = sr.st_size
loc_list[relative_file].update({
'size' : size,
'mtime' : sr.st_mtime,
'dev' : sr.st_dev,
'inode' : sr.st_ino,
'uid' : sr.st_uid,
'gid' : sr.st_gid,
'sr': sr, # save it all, may need it in preserve_attrs_list
## TODO: Possibly more to save here...
})
total_size += sr.st_size
if is_dir:
# A md5 can't be calculated with a directory path
continue
if 'md5' in cfg.sync_checks:
md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size)
if md5 is None:
try:
# this does the file I/O
md5 = loc_list.get_md5(relative_file)
except IOError:
continue
cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5)
loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5, sr.st_size)
return total_size
def _get_filelist_local(loc_list, local_uri, cache, with_dirs):
info(u"Compiling list of local files...")
if local_uri.basename() == "-":
try:
uid = os.geteuid()
gid = os.getegid()
except Exception:
uid = 0
gid = 0
loc_list["-"] = {
'full_name' : '-',
'size' : -1,
'mtime' : -1,
'uid' : uid,
'gid' : gid,
'dev' : 0,
'inode': 0,
'is_dir': False,
}
return loc_list, True
if local_uri.isdir():
local_base = local_uri.basename()
local_path = local_uri.path()
if is_src and len(cfg.files_from):
filelist = _get_filelist_from_file(cfg, local_path)
single_file = False
else:
if cfg.follow_symlinks:
filelist = _fswalk_follow_symlinks(local_path)
else:
filelist = _fswalk_no_symlinks(local_path)
single_file = False
else:
local_base = ""
local_path = local_uri.dirname()
filelist = [( local_path, [], [local_uri.basename()] )]
single_file = True
for root, dirs, files in filelist:
rel_root = root.replace(local_path, local_base, 1)
if not with_dirs:
iter_elements = ((files, False),)
else:
iter_elements = ((dirs, True), (files, False))
for elements, is_dir in iter_elements:
for f in elements:
full_name = os.path.join(root, f)
if not is_dir and not os.path.isfile(deunicodise(full_name)):
if os.path.exists(deunicodise(full_name)):
warning(u"Skipping over non regular file: %s" % full_name)
continue
if os.path.islink(deunicodise(full_name)):
if not cfg.follow_symlinks:
warning(u"Skipping over symbolic link: %s" % full_name)
continue
relative_file = os.path.join(rel_root, f)
if os.path.sep != "/":
# Convert non-unix dir separators to '/'
relative_file = "/".join(relative_file.split(os.path.sep))
if cfg.urlencoding_mode == "normal":
relative_file = replace_nonprintables(relative_file)
if relative_file.startswith('./'):
relative_file = relative_file[2:]
if is_dir and relative_file and relative_file[-1] != '/':
relative_file += '/'
loc_list[relative_file] = {
'full_name' : full_name,
'is_dir': is_dir,
}
return loc_list, single_file
def _maintain_cache(cache, local_list):
# if getting the file list from files_from, it is going to be
# a subset of the actual tree. We should not purge content
# outside of that subset as we don't know if it's valid or
# not. Leave it to a non-files_from run to purge.
if cfg.cache_file and len(cfg.files_from) == 0:
cache.mark_all_for_purge()
if PY3:
local_list_val_iter = local_list.values()
else:
local_list_val_iter = local_list.itervalues()
for f_info in local_list_val_iter:
inode = f_info.get('inode', 0)
if not inode:
continue
cache.unmark_for_purge(f_info['dev'], inode, f_info['mtime'],
f_info['size'])
cache.purge()
cache.save(cfg.cache_file)
cfg = Config()
cache = HashCache()
if cfg.cache_file and os.path.isfile(deunicodise_s(cfg.cache_file)) and os.path.getsize(deunicodise_s(cfg.cache_file)) > 0:
cache.load(cfg.cache_file)
else:
info(u"Cache file not found or empty, creating/populating it.")
local_uris = []
local_list = FileDict(ignore_case = False)
single_file = False
if type(args) not in (list, tuple, set):
args = [args]
if recursive == None:
recursive = cfg.recursive
for arg in args:
uri = S3Uri(arg)
if not uri.type == 'file':
raise ParameterError("Expecting filename or directory instead of: %s" % arg)
if uri.isdir() and not recursive:
raise ParameterError("Use --recursive to upload a directory: %s" % arg)
local_uris.append(uri)
for uri in local_uris:
list_for_uri, single_file = _get_filelist_local(local_list, uri, cache, with_dirs)
## Single file is True if and only if the user
## specified one local URI and that URI represents
## a FILE. Ie it is False if the URI was of a DIR
## and that dir contained only one FILE. That's not
## a case of single_file==True.
if len(local_list) > 1:
single_file = False
local_list, exclude_list = filter_exclude_include(local_list)
total_size = _fetch_local_list_info(local_list)
_maintain_cache(cache, local_list)
return local_list, single_file, exclude_list, total_size
def fetch_remote_list(args, require_attribs = False, recursive = None, uri_params = {}):
def _get_remote_attribs(uri, remote_item):
response = S3(cfg).object_info(uri)
if not response.get('headers'):
return
remote_item.update({
'size': int(response['headers']['content-length']),
'md5': response['headers']['etag'].strip('"\''),
'timestamp': dateRFC822toUnix(response['headers']['last-modified'])
})
try:
md5 = response['s3cmd-attrs']['md5']
remote_item.update({'md5': md5})
debug(u"retrieved md5=%s from headers" % md5)
except KeyError:
pass
def _get_filelist_remote(remote_uri, recursive = True):
## If remote_uri ends with '/' then all remote files will have
## the remote_uri prefix removed in the relative path.
## If, on the other hand, the remote_uri ends with something else
## (probably alphanumeric symbol) we'll use the last path part
## in the relative path.
##
## Complicated, eh? See an example:
## _get_filelist_remote("s3://bckt/abc/def") may yield:
## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
## _get_filelist_remote("s3://bckt/abc/def/") will yield:
## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
## Furthermore a prefix-magic can restrict the return list:
## _get_filelist_remote("s3://bckt/abc/def/x") yields:
## { 'xyz/blah.txt' : {} }
info(u"Retrieving list of remote files for %s ..." % remote_uri)
total_size = 0
s3 = S3(Config())
response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(),
recursive = recursive, uri_params = uri_params)
rem_base_original = rem_base = remote_uri.object()
remote_uri_original = remote_uri
if rem_base != '' and rem_base[-1] != '/':
rem_base = rem_base[:rem_base.rfind('/')+1]
remote_uri = S3Uri(u"s3://%s/%s" % (remote_uri.bucket(), rem_base))
rem_base_len = len(rem_base)
rem_list = FileDict(ignore_case = False)
break_now = False
for object in response['list']:
object_key = object['Key']
object_size = int(object['Size'])
is_dir = (object_key[-1] == '/')
if object_key == rem_base_original and not is_dir:
## We asked for one file and we got that file :-)
key = s3path.basename(object_key)
object_uri_str = remote_uri_original.uri()
break_now = True
# Remove whatever has already been put to rem_list
rem_list = FileDict(ignore_case = False)
else:
# Beware - this may be '' if object_key==rem_base !!
key = object_key[rem_base_len:]
object_uri_str = remote_uri.uri() + key
if not key:
# Objects may exist on S3 with empty names (''), which don't map so well to common filesystems.
warning(u"Found empty root object name on S3, ignoring.")
continue
rem_list[key] = {
'size' : object_size,
'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-(
'md5' : object['ETag'].strip('"\''),
'object_key' : object_key,
'object_uri_str' : object_uri_str,
'base_uri' : remote_uri,
'dev' : None,
'inode' : None,
'is_dir': is_dir,
}
if '-' in rem_list[key]['md5']: # always get it for multipart uploads
_get_remote_attribs(S3Uri(object_uri_str), rem_list[key])
md5 = rem_list[key]['md5']
rem_list.record_md5(key, md5)
total_size += object_size
if break_now:
break
return rem_list, total_size
cfg = Config()
remote_uris = []
remote_list = FileDict(ignore_case = False)
if type(args) not in (list, tuple, set):
args = [args]
if recursive == None:
recursive = cfg.recursive
for arg in args:
uri = S3Uri(arg)
if not uri.type == 's3':
raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
remote_uris.append(uri)
total_size = 0
if recursive:
for uri in remote_uris:
objectlist, tmp_total_size = _get_filelist_remote(uri, recursive = True)
total_size += tmp_total_size
for key in objectlist:
remote_list[key] = objectlist[key]
remote_list.record_md5(key, objectlist.get_md5(key))
else:
for uri in remote_uris:
uri_str = uri.uri()
## Wildcards used in remote URI?
## If yes we'll need a bucket listing...
wildcard_split_result = re.split(r"\*|\?", uri_str, maxsplit=1)
if len(wildcard_split_result) == 2:
## If wildcards found
prefix, rest = wildcard_split_result
## Only request recursive listing if the 'rest' of the URI,
## i.e. the part after first wildcard, contains '/'
need_recursion = '/' in rest
objectlist, tmp_total_size = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion)
total_size += tmp_total_size
for key in objectlist:
## Check whether the 'key' matches the requested wildcards
if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str):
remote_list[key] = objectlist[key]
else:
## No wildcards - simply append the given URI to the list
key = s3path.basename(uri.object())
if not key:
raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri())
is_dir = (key and key[-1] == '/')
remote_item = {
'base_uri': uri,
'object_uri_str': uri.uri(),
'object_key': uri.object(),
'is_dir': is_dir,
}
if require_attribs:
_get_remote_attribs(uri, remote_item)
remote_list[key] = remote_item
md5 = remote_item.get('md5')
if md5:
remote_list.record_md5(key, md5)
total_size += remote_item.get('size', 0)
remote_list, exclude_list = filter_exclude_include(remote_list)
return remote_list, exclude_list, total_size
def compare_filelists(src_list, dst_list, src_remote, dst_remote):
def __direction_str(is_remote):
return is_remote and "remote" or "local"
def _compare(src_list, dst_lst, src_remote, dst_remote, file):
"""Return True if src_list[file] matches dst_list[file], else False"""
attribs_match = True
src_file = src_list.get(file)
dst_file = dst_list.get(file)
if not src_file or not dst_file:
info(u"%s: does not exist in one side or the other: src_list=%s, dst_list=%s"
% (file, bool(src_file), bool(dst_file)))
return False
## check size first
if 'size' in cfg.sync_checks:
src_size = src_file.get('size')
dst_size = dst_file.get('size')
if dst_size is not None and src_size is not None and dst_size != src_size:
debug(u"xfer: %s (size mismatch: src=%s dst=%s)" % (file, src_size, dst_size))
attribs_match = False
## check md5
compare_md5 = 'md5' in cfg.sync_checks
# Multipart-uploaded files don't have a valid md5 sum - it ends with "...-nn"
if compare_md5:
if (src_remote == True and '-' in src_file['md5']) or (dst_remote == True and '-' in dst_file['md5']):
compare_md5 = False
info(u"disabled md5 check for %s" % file)
if compare_md5 and src_file['is_dir'] == True:
# For directories, nothing to do if they already exist
compare_md5 = False
if attribs_match and compare_md5:
try:
src_md5 = src_list.get_md5(file)
dst_md5 = dst_list.get_md5(file)
except (IOError, OSError):
# md5 sum verification failed - ignore that file altogether
debug(u"IGNR: %s (disappeared)" % (file))
warning(u"%s: file disappeared, ignoring." % (file))
raise
if src_md5 != dst_md5:
## checksums are different.
attribs_match = False
debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" % (file, src_md5, dst_md5))
return attribs_match
# we don't support local->local sync, use 'rsync' or something like that instead ;-)
assert(not(src_remote == False and dst_remote == False))
info(u"Verifying attributes...")
cfg = Config()
## Items left on src_list will be transferred
## Items left on update_list will be transferred after src_list
## Items left on copy_pairs will be copied from dst1 to dst2
update_list = FileDict(ignore_case = False)
## Items left on dst_list will be deleted
copy_pairs = {}
debug("Comparing filelists (direction: %s -> %s)" % (__direction_str(src_remote), __direction_str(dst_remote)))
src_dir_cache = set()
for relative_file in src_list.keys():
debug(u"CHECK: '%s'" % relative_file)
if src_remote:
# Most of the time, there will not be dir objects on the remote side
# we still need to have a "virtual" list of them to not think that there
# are unmatched dirs with the local side.
dir_idx = relative_file.rfind('/')
if dir_idx > 0:
path = relative_file[:dir_idx+1]
while path and path not in src_dir_cache:
src_dir_cache.add(path)
# Also add to cache, all the parent dirs
try:
path = path[:path.rindex('/', 0, -1)+1]
except ValueError:
continue
if relative_file in dst_list:
## Was --skip-existing requested?
if cfg.skip_existing:
debug(u"IGNR: '%s' (used --skip-existing)" % relative_file)
del(src_list[relative_file])
del(dst_list[relative_file])
continue
try:
same_file = _compare(src_list, dst_list, src_remote, dst_remote, relative_file)
except (IOError,OSError):
debug(u"IGNR: '%s' (disappeared)" % relative_file)
warning(u"%s: file disappeared, ignoring." % relative_file)
del(src_list[relative_file])
del(dst_list[relative_file])
continue
if same_file:
debug(u"IGNR: '%s' (transfer not needed)" % relative_file)
del(src_list[relative_file])
del(dst_list[relative_file])
else:
# look for matching file in src
try:
md5 = src_list.get_md5(relative_file)
except IOError:
md5 = None
if md5 is not None and md5 in dst_list.by_md5:
# Found one, we want to copy
copy_src_file = dst_list.find_md5_one(md5)
debug(u"DST COPY src: '%s' -> '%s'" % (copy_src_file, relative_file))
src_item = src_list[relative_file]
src_item["md5"] = md5
src_item["copy_src"] = copy_src_file
copy_pairs[relative_file] = src_item
del(src_list[relative_file])
del(dst_list[relative_file])
else:
# record that we will get this file transferred to us (before all the copies), so if we come across it later again,
# we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter).
dst_list.record_md5(relative_file, md5)
update_list[relative_file] = src_list[relative_file]
del src_list[relative_file]
del dst_list[relative_file]
else:
# dst doesn't have this file
# look for matching file elsewhere in dst
try:
md5 = src_list.get_md5(relative_file)
except IOError:
md5 = None
copy_src_file = dst_list.find_md5_one(md5)
if copy_src_file is not None:
# Found one, we want to copy
debug(u"DST COPY dst: '%s' -> '%s'" % (copy_src_file, relative_file))
src_item = src_list[relative_file]
src_item["md5"] = md5
src_item["copy_src"] = copy_src_file
copy_pairs[relative_file] = src_item
del(src_list[relative_file])
else:
# we don't have this file, and we don't have a copy of this file elsewhere. Get it.
# record that we will get this file transferred to us (before all the copies), so if we come across it later again,
# we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter).
dst_list.record_md5(relative_file, md5)
for f in dst_list.keys():
if f in src_list or f in update_list or f in src_dir_cache:
# leave only those not on src_list + update_list + src_dir_cache
del dst_list[f]
return src_list, dst_list, update_list, copy_pairs
# vim:et:ts=4:sts=4:ai
| 28,822 | Python | .py | 634 | 32.908517 | 135 | 0.539255 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,396 | S3.py | s3tools_s3cmd/S3/S3.py | # -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Amazon S3 manager
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import, division
import sys
import os
import time
import errno
import mimetypes
import io
import pprint
from xml.sax import saxutils
from socket import timeout as SocketTimeoutException
from logging import debug, info, warning, error
from stat import ST_SIZE, ST_MODE, S_ISDIR, S_ISREG
try:
# python 3 support
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import select
from .BaseUtils import (getListFromXml, getTextFromXml, getRootTagName,
decode_from_s3, encode_to_s3, md5, s3_quote)
from .Utils import (convertHeaderTupleListToDict, unicodise,
deunicodise, check_bucket_name,
check_bucket_name_dns_support, getHostnameFromBucket)
from .SortedDict import SortedDict
from .AccessLog import AccessLog
from .ACL import ACL, GranteeLogDelivery
from .BidirMap import BidirMap
from .Config import Config
from .Exceptions import *
from .MultiPart import MultiPartUpload
from .S3Uri import S3Uri
from .ConnMan import ConnMan
from .Crypto import (sign_request_v2, sign_request_v4, checksum_sha256_file,
checksum_sha256_buffer, generate_content_md5,
hash_file_md5, calculateChecksum, format_param_str)
try:
from ctypes import ArgumentError
import magic
try:
## https://github.com/ahupp/python-magic
## Always expect unicode for python 2
## (has Magic class but no "open()" function)
magic_ = magic.Magic(mime=True)
def mime_magic_file(file):
return magic_.from_file(file)
except TypeError:
try:
## file-5.11 built-in python bindings
## Sources: http://www.darwinsys.com/file/
## Expects unicode since version 5.19, encoded strings before
## we can't tell if a given copy of the magic library will take a
## filesystem-encoded string or a unicode value, so try first
## with the unicode, then with the encoded string.
## (has Magic class and "open()" function)
magic_ = magic.open(magic.MAGIC_MIME)
magic_.load()
def mime_magic_file(file):
try:
return magic_.file(file)
except (UnicodeDecodeError, UnicodeEncodeError, ArgumentError):
return magic_.file(deunicodise(file))
except AttributeError:
## http://pypi.python.org/pypi/filemagic
## Accept gracefully both unicode and encoded
## (has Magic class but not "mime" argument and no "open()" function )
magic_ = magic.Magic(flags=magic.MAGIC_MIME)
def mime_magic_file(file):
return magic_.id_filename(file)
except AttributeError:
## Older python-magic versions doesn't have a "Magic" method
## Only except encoded strings
## (has no Magic class but "open()" function)
magic_ = magic.open(magic.MAGIC_MIME)
magic_.load()
def mime_magic_file(file):
return magic_.file(deunicodise(file))
except (ImportError, OSError) as e:
error_str = str(e)
if 'magic' in error_str:
magic_message = "Module python-magic is not available."
else:
magic_message = "Module python-magic can't be used (%s)." % error_str
magic_message += " Guessing MIME types based on file extensions."
magic_warned = False
def mime_magic_file(file):
global magic_warned
if (not magic_warned):
warning(magic_message)
magic_warned = True
return mimetypes.guess_type(file)[0]
def mime_magic(file):
## NOTE: So far in the code, "file" var is already unicode
def _mime_magic(file):
magictype = mime_magic_file(file)
return magictype
result = _mime_magic(file)
if result is not None:
if isinstance(result, str):
if ';' in result:
mimetype, charset = result.split(';')
charset = charset[len('charset'):]
result = (mimetype, charset)
else:
result = (result, None)
if result is None:
result = (None, None)
return result
EXPECT_CONTINUE_TIMEOUT = 2
SIZE_1MB = 1024 * 1024
__all__ = []
class S3Request(object):
region_map = {}
## S3 sometimes sends HTTP-301, HTTP-307 response
redir_map = {}
def __init__(self, s3, method_string, resource, headers, body, params = None):
self.s3 = s3
self.headers = SortedDict(headers or {}, ignore_case = True)
if len(self.s3.config.access_token)>0:
self.s3.config.role_refresh()
self.headers['x-amz-security-token']=self.s3.config.access_token
self.resource = resource
self.method_string = method_string
self.params = params or {}
self.body = body
self.requester_pays()
def requester_pays(self):
if self.s3.config.requester_pays and self.method_string in ("GET", "POST", "PUT", "HEAD"):
self.headers['x-amz-request-payer'] = 'requester'
def update_timestamp(self):
if "date" in self.headers:
del(self.headers["date"])
self.headers["x-amz-date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
def use_signature_v2(self):
if self.s3.endpoint_requires_signature_v4:
return False
if self.s3.config.signature_v2 or self.s3.fallback_to_signature_v2:
return True
return False
def sign(self):
bucket_name = self.resource.get('bucket')
if self.use_signature_v2():
debug("Using signature v2")
if bucket_name:
resource_uri = "/%s%s" % (bucket_name, self.resource['uri'])
else:
resource_uri = self.resource['uri']
self.headers = sign_request_v2(self.method_string, resource_uri, self.params, self.headers)
else:
debug("Using signature v4")
hostname = self.s3.get_hostname(self.resource['bucket'])
## Default to bucket part of DNS.
## If bucket is not part of DNS assume path style to complete the request.
## Like for format_uri, take care that redirection could be to base path
if bucket_name and (
(bucket_name in S3Request.redir_map
and not S3Request.redir_map.get(bucket_name, '').startswith("%s."% bucket_name))
or (bucket_name not in S3Request.redir_map
and not check_bucket_name_dns_support(Config().host_bucket, bucket_name))
):
resource_uri = "/%s%s" % (bucket_name, self.resource['uri'])
else:
resource_uri = self.resource['uri']
bucket_region = S3Request.region_map.get(self.resource['bucket'], Config().bucket_location)
## Sign the data.
self.headers = sign_request_v4(self.method_string, hostname, resource_uri, self.params,
bucket_region, self.headers, self.body)
def get_triplet(self):
self.update_timestamp()
self.sign()
resource = dict(self.resource) ## take a copy
# URL Encode the uri for the http request
resource['uri'] = s3_quote(resource['uri'], quote_backslashes=False, unicode_output=True)
# Get the final uri by adding the uri parameters
resource['uri'] += format_param_str(self.params)
return (self.method_string, resource, self.headers)
class S3(object):
http_methods = BidirMap(
GET = 0x01,
PUT = 0x02,
HEAD = 0x04,
DELETE = 0x08,
POST = 0x10,
MASK = 0x1F,
)
targets = BidirMap(
SERVICE = 0x0100,
BUCKET = 0x0200,
OBJECT = 0x0400,
BATCH = 0x0800,
MASK = 0x0700,
)
operations = BidirMap(
UNDEFINED = 0x0000,
LIST_ALL_BUCKETS = targets["SERVICE"] | http_methods["GET"],
BUCKET_CREATE = targets["BUCKET"] | http_methods["PUT"],
BUCKET_LIST = targets["BUCKET"] | http_methods["GET"],
BUCKET_DELETE = targets["BUCKET"] | http_methods["DELETE"],
OBJECT_PUT = targets["OBJECT"] | http_methods["PUT"],
OBJECT_GET = targets["OBJECT"] | http_methods["GET"],
OBJECT_HEAD = targets["OBJECT"] | http_methods["HEAD"],
OBJECT_DELETE = targets["OBJECT"] | http_methods["DELETE"],
OBJECT_POST = targets["OBJECT"] | http_methods["POST"],
BATCH_DELETE = targets["BATCH"] | http_methods["POST"],
)
codes = {
"NoSuchBucket" : "Bucket '%s' does not exist",
"AccessDenied" : "Access to bucket '%s' was denied",
"BucketAlreadyExists" : "Bucket '%s' already exists",
}
def __init__(self, config):
self.config = config
self.fallback_to_signature_v2 = False
self.endpoint_requires_signature_v4 = False
self.expect_continue_not_supported = False
def storage_class(self):
# Note - you cannot specify GLACIER here
# https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
cls = 'STANDARD'
if self.config.storage_class != "":
return self.config.storage_class
if self.config.reduced_redundancy:
cls = 'REDUCED_REDUNDANCY'
return cls
def get_hostname(self, bucket):
if bucket and bucket in S3Request.redir_map:
host = S3Request.redir_map[bucket]
elif bucket and check_bucket_name_dns_support(self.config.host_bucket, bucket):
host = getHostnameFromBucket(bucket)
else:
host = self.config.host_base.lower()
# The following hack is needed because it looks like that some servers
# are not respecting the HTTP spec and so will fail the signature check
# if the port is specified in the "Host" header for default ports.
# STUPIDIEST THING EVER FOR A SERVER...
# See: https://github.com/minio/minio/issues/9169
if self.config.use_https:
if host.endswith(':443'):
host = host[:-4]
elif host.endswith(':80'):
host = host[:-3]
debug('get_hostname(%s): %s' % (bucket, host))
return host
def set_hostname(self, bucket, redir_hostname):
S3Request.redir_map[bucket] = redir_hostname.lower()
def format_uri(self, resource, base_path=None):
bucket_name = resource.get('bucket')
if bucket_name and (
(bucket_name in S3Request.redir_map
and not S3Request.redir_map.get(bucket_name, '').startswith("%s."% bucket_name))
or (bucket_name not in S3Request.redir_map
and not check_bucket_name_dns_support(self.config.host_bucket, bucket_name))
):
uri = "/%s%s" % (s3_quote(bucket_name, quote_backslashes=False,
unicode_output=True),
resource['uri'])
else:
uri = resource['uri']
if base_path:
uri = "%s%s" % (base_path, uri)
if self.config.proxy_host != "" and not self.config.use_https:
uri = "http://%s%s" % (self.get_hostname(bucket_name), uri)
debug('format_uri(): ' + uri)
return uri
## Commands / Actions
def list_all_buckets(self):
request = self.create_request("LIST_ALL_BUCKETS")
response = self.send_request(request)
response["list"] = getListFromXml(response["data"], "Bucket")
return response
def bucket_list(self, bucket, prefix = None, recursive = None, uri_params = None, limit = -1):
item_list = []
prefixes = []
for truncated, dirs, objects in self.bucket_list_streaming(bucket, prefix, recursive, uri_params, limit):
item_list.extend(objects)
prefixes.extend(dirs)
response = {}
response['list'] = item_list
response['common_prefixes'] = prefixes
response['truncated'] = truncated
return response
def bucket_list_streaming(self, bucket, prefix = None, recursive = None, uri_params = None, limit = -1):
""" Generator that produces <dir_list>, <object_list> pairs of groups of content of a specified bucket. """
def _list_truncated(data):
## <IsTruncated> can either be "true" or "false" or be missing completely
is_truncated = getTextFromXml(data, ".//IsTruncated") or "false"
return is_truncated.lower() != "false"
def _get_contents(data):
return getListFromXml(data, "Contents")
def _get_common_prefixes(data):
return getListFromXml(data, "CommonPrefixes")
def _get_next_marker(data, current_elts, key):
return getTextFromXml(response["data"], "NextMarker") or current_elts[-1][key]
uri_params = uri_params and uri_params.copy() or {}
truncated = True
prefixes = []
num_objects = 0
num_prefixes = 0
max_keys = limit
while truncated:
response = self.bucket_list_noparse(bucket, prefix, recursive,
uri_params, max_keys)
current_list = _get_contents(response["data"])
current_prefixes = _get_common_prefixes(response["data"])
num_objects += len(current_list)
num_prefixes += len(current_prefixes)
if limit > num_objects + num_prefixes:
max_keys = limit - (num_objects + num_prefixes)
truncated = _list_truncated(response["data"])
if truncated:
if limit == -1 or num_objects + num_prefixes < limit:
if current_list:
uri_params['marker'] = \
_get_next_marker(response["data"], current_list, "Key")
elif current_prefixes:
uri_params['marker'] = \
_get_next_marker(response["data"], current_prefixes, "Prefix")
else:
# Unexpectedly, the server lied, and so the previous
# response was not truncated. So, no new key to get.
yield False, current_prefixes, current_list
break
debug("Listing continues after '%s'" % uri_params['marker'])
else:
yield truncated, current_prefixes, current_list
break
yield truncated, current_prefixes, current_list
def bucket_list_noparse(self, bucket, prefix = None, recursive = None, uri_params = None, max_keys = -1):
if uri_params is None:
uri_params = {}
if prefix:
uri_params['prefix'] = prefix
if not self.config.recursive and not recursive:
uri_params['delimiter'] = "/"
if max_keys != -1:
uri_params['max-keys'] = str(max_keys)
if self.config.list_allow_unordered:
uri_params['allow-unordered'] = "true"
request = self.create_request("BUCKET_LIST", bucket = bucket, uri_params = uri_params)
response = self.send_request(request)
#debug(response)
return response
def bucket_create(self, bucket, bucket_location = None, extra_headers = None):
headers = SortedDict(ignore_case = True)
if extra_headers:
headers.update(extra_headers)
body = ""
if bucket_location and bucket_location.strip().upper() != "US" and bucket_location.strip().lower() != "us-east-1":
bucket_location = bucket_location.strip()
if bucket_location.upper() == "EU":
bucket_location = bucket_location.upper()
body = "<CreateBucketConfiguration><LocationConstraint>"
body += bucket_location
body += "</LocationConstraint></CreateBucketConfiguration>"
debug("bucket_location: " + body)
check_bucket_name(bucket, dns_strict = True)
else:
check_bucket_name(bucket, dns_strict = False)
if self.config.acl_public:
headers["x-amz-acl"] = "public-read"
# AWS suddenly changed the default "ownership" control value mid 2023.
# ACL are disabled by default, so obviously the bucket can't be public.
# See: https://aws.amazon.com/fr/blogs/aws/heads-up-amazon-s3-security-changes-are-coming-in-april-of-2023/
# To be noted: "Block Public Access" flags should also be disabled after the bucket creation to be able to set a "public" acl for an object.
headers["x-amz-object-ownership"] = 'ObjectWriter'
request = self.create_request("BUCKET_CREATE", bucket = bucket, headers = headers, body = body)
response = self.send_request(request)
return response
def bucket_delete(self, bucket):
request = self.create_request("BUCKET_DELETE", bucket = bucket)
response = self.send_request(request)
return response
def get_bucket_location(self, uri, force_us_default=False):
bucket = uri.bucket()
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(),
uri_params = {'location': None})
saved_redir_map = S3Request.redir_map.get(bucket, '')
saved_region_map = S3Request.region_map.get(bucket, '')
try:
if force_us_default and not (saved_redir_map and saved_region_map):
S3Request.redir_map[bucket] = self.config.host_base
S3Request.region_map[bucket] = 'us-east-1'
response = self.send_request(request)
finally:
if bucket in saved_redir_map:
S3Request.redir_map[bucket] = saved_redir_map
elif bucket in S3Request.redir_map:
del S3Request.redir_map[bucket]
if bucket in saved_region_map:
S3Request.region_map[bucket] = saved_region_map
elif bucket in S3Request.region_map:
del S3Request.region_map[bucket]
location = getTextFromXml(response['data'], "LocationConstraint")
if not location or location in [ "", "US" ]:
location = "us-east-1"
elif location == "EU":
location = "eu-west-1"
return location
def get_bucket_requester_pays(self, uri):
request = self.create_request("BUCKET_LIST", bucket=uri.bucket(),
uri_params={'requestPayment': None})
response = self.send_request(request)
resp_data = response.get('data', '')
if resp_data:
payer = getTextFromXml(resp_data, "Payer")
else:
payer = None
return payer
def set_bucket_ownership(self, uri, ownership):
headers = SortedDict(ignore_case=True)
body = '<OwnershipControls xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' \
'<Rule>' \
'<ObjectOwnership>%s</ObjectOwnership>' \
'</Rule>' \
'</OwnershipControls>'
body = body % ownership
debug(u"set_bucket_ownership(%s)" % body)
headers['content-md5'] = generate_content_md5(body)
request = self.create_request("BUCKET_CREATE", uri = uri,
headers = headers, body = body,
uri_params = {'ownershipControls': None})
response = self.send_request(request)
return response
def get_bucket_ownership(self, uri):
request = self.create_request("BUCKET_LIST", bucket=uri.bucket(),
uri_params={'ownershipControls': None})
response = self.send_request(request)
resp_data = response.get('data', '')
if resp_data:
ownership = getTextFromXml(resp_data, ".//Rule//ObjectOwnership")
else:
ownership = None
return ownership
def set_bucket_public_access_block(self, uri, flags):
headers = SortedDict(ignore_case=True)
body = '<PublicAccessBlockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
for tag in ('BlockPublicAcls', 'IgnorePublicAcls', 'BlockPublicPolicy', 'RestrictPublicBuckets'):
val = flags.get(tag, False) and "true" or "false"
body += '<%s>%s</%s>' % (tag, val, tag)
body += '</PublicAccessBlockConfiguration>'
debug(u"set_bucket_public_access_block(%s)" % body)
headers['content-md5'] = generate_content_md5(body)
request = self.create_request("BUCKET_CREATE", uri = uri,
headers = headers, body = body,
uri_params = {'publicAccessBlock': None})
response = self.send_request(request)
return response
def get_bucket_public_access_block(self, uri):
request = self.create_request("BUCKET_LIST", bucket=uri.bucket(),
uri_params={'publicAccessBlock': None})
response = self.send_request(request)
resp_data = response.get('data', '')
if resp_data:
flags = {
"BlockPublicAcls": getTextFromXml(resp_data, "BlockPublicAcls") == "true",
"IgnorePublicAcls": getTextFromXml(resp_data, "IgnorePublicAcls") == "true",
"BlockPublicPolicy": getTextFromXml(resp_data, "BlockPublicPolicy") == "true",
"RestrictPublicBuckets": getTextFromXml(resp_data, "RestrictPublicBuckets") == "true",
}
else:
flags = {}
return flags
def bucket_info(self, uri):
response = {}
response['bucket-location'] = self.get_bucket_location(uri)
for key, func in (('requester-pays', self.get_bucket_requester_pays),
('versioning', self.get_versioning),
('ownership', self.get_bucket_ownership)):
try:
response[key] = func(uri)
except S3Error as e:
response[key] = None
try:
response['public-access-block'] = self.get_bucket_public_access_block(uri)
except S3Error as e:
response['public-access-block'] = {}
return response
def website_info(self, uri, bucket_location = None):
bucket = uri.bucket()
request = self.create_request("BUCKET_LIST", bucket = bucket,
uri_params = {'website': None})
try:
response = self.send_request(request)
response['index_document'] = getTextFromXml(response['data'], ".//IndexDocument//Suffix")
response['error_document'] = getTextFromXml(response['data'], ".//ErrorDocument//Key")
response['website_endpoint'] = self.config.website_endpoint % {
"bucket" : uri.bucket(),
"location" : self.get_bucket_location(uri)}
return response
except S3Error as e:
if e.status == 404:
debug("Could not get /?website - website probably not configured for this bucket")
return None
raise
def website_create(self, uri, bucket_location = None):
bucket = uri.bucket()
body = '<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
body += ' <IndexDocument>'
body += (' <Suffix>%s</Suffix>' % self.config.website_index)
body += ' </IndexDocument>'
if self.config.website_error:
body += ' <ErrorDocument>'
body += (' <Key>%s</Key>' % self.config.website_error)
body += ' </ErrorDocument>'
body += '</WebsiteConfiguration>'
request = self.create_request("BUCKET_CREATE", bucket = bucket, body = body,
uri_params = {'website': None})
response = self.send_request(request)
debug("Received response '%s'" % (response))
return response
def website_delete(self, uri, bucket_location = None):
bucket = uri.bucket()
request = self.create_request("BUCKET_DELETE", bucket = bucket,
uri_params = {'website': None})
response = self.send_request(request)
debug("Received response '%s'" % (response))
if response['status'] != 204:
raise S3ResponseError("Expected status 204: %s" % response)
return response
def expiration_info(self, uri, bucket_location = None):
bucket = uri.bucket()
request = self.create_request("BUCKET_LIST", bucket=bucket,
uri_params={'lifecycle': None})
try:
response = self.send_request(request)
except S3Error as e:
if e.status == 404:
debug("Could not get /?lifecycle - lifecycle probably not "
"configured for this bucket")
return None
elif e.status == 501:
debug("Could not get /?lifecycle - lifecycle support not "
"implemented by the server")
return None
raise
root_tag_name = getRootTagName(response['data'])
if root_tag_name != "LifecycleConfiguration":
debug("Could not get /?lifecycle - unexpected xml response: "
"%s", root_tag_name)
return None
response['prefix'] = getTextFromXml(response['data'],
".//Rule//Prefix")
response['date'] = getTextFromXml(response['data'],
".//Rule//Expiration//Date")
response['days'] = getTextFromXml(response['data'],
".//Rule//Expiration//Days")
return response
def expiration_set(self, uri, bucket_location = None):
if self.config.expiry_date and self.config.expiry_days:
raise ParameterError("Expect either --expiry-day or --expiry-date")
if not (self.config.expiry_date or self.config.expiry_days):
if self.config.expiry_prefix:
raise ParameterError("Expect either --expiry-day or --expiry-date")
debug("del bucket lifecycle")
bucket = uri.bucket()
request = self.create_request("BUCKET_DELETE", bucket = bucket,
uri_params = {'lifecycle': None})
else:
request = self._expiration_set(uri)
response = self.send_request(request)
debug("Received response '%s'" % (response))
return response
def _expiration_set(self, uri):
debug("put bucket lifecycle")
body = '<LifecycleConfiguration>'
body += ' <Rule>'
body += ' <Filter>'
body += ' <Prefix>%s</Prefix>' % self.config.expiry_prefix
body += ' </Filter>'
body += ' <Status>Enabled</Status>'
body += ' <Expiration>'
if self.config.expiry_date:
body += ' <Date>%s</Date>' % self.config.expiry_date
elif self.config.expiry_days:
body += ' <Days>%s</Days>' % self.config.expiry_days
body += ' </Expiration>'
body += ' </Rule>'
body += '</LifecycleConfiguration>'
headers = SortedDict(ignore_case = True)
headers['content-md5'] = generate_content_md5(body)
bucket = uri.bucket()
request = self.create_request("BUCKET_CREATE", bucket = bucket,
headers = headers, body = body,
uri_params = {'lifecycle': None})
return (request)
def _guess_content_type(self, filename):
content_type = self.config.default_mime_type
content_charset = None
if filename == "-" and not self.config.default_mime_type:
raise ParameterError("You must specify --mime-type or --default-mime-type for files uploaded from stdin.")
if self.config.guess_mime_type:
if self.config.follow_symlinks:
filename = unicodise(os.path.realpath(deunicodise(filename)))
if self.config.use_mime_magic:
(content_type, content_charset) = mime_magic(filename)
else:
(content_type, content_charset) = mimetypes.guess_type(filename)
if not content_type:
content_type = self.config.default_mime_type
return (content_type, content_charset)
def stdin_content_type(self):
content_type = self.config.mime_type
if not content_type:
content_type = self.config.default_mime_type
content_type += "; charset=" + self.config.encoding.upper()
return content_type
def content_type(self, filename=None, is_dir=False):
# explicit command line argument always wins
content_type = self.config.mime_type
content_charset = None
if filename == u'-':
return self.stdin_content_type()
if is_dir:
content_type = 'application/x-directory'
elif not content_type:
(content_type, content_charset) = self._guess_content_type(filename)
## add charset to content type
if not content_charset:
content_charset = self.config.encoding.upper()
if self.add_encoding(filename, content_type) and content_charset is not None:
content_type = content_type + "; charset=" + content_charset
return content_type
def add_encoding(self, filename, content_type):
if 'charset=' in content_type:
return False
exts = self.config.add_encoding_exts.split(',')
if exts[0]=='':
return False
parts = filename.rsplit('.',2)
if len(parts) < 2:
return False
ext = parts[1]
if ext in exts:
return True
else:
return False
def object_put(self, filename, uri, extra_headers = None, extra_label = ""):
# TODO TODO
# Make it consistent with stream-oriented object_get()
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
try:
is_dir = False
size = 0
if filename == "-":
is_stream = True
src_stream = io.open(sys.stdin.fileno(), mode='rb', closefd=False)
src_stream.stream_name = u'<stdin>'
else:
is_stream = False
filename_bytes = deunicodise(filename)
stat = os.stat(filename_bytes)
mode = stat[ST_MODE]
if S_ISDIR(mode):
is_dir = True
# Dirs are represented as empty objects on S3
src_stream = io.BytesIO(b'')
elif not S_ISREG(mode):
raise InvalidFileError(u"Not a regular file")
else:
# Standard normal file
src_stream = io.open(filename_bytes, mode='rb')
size = stat[ST_SIZE]
src_stream.stream_name = filename
except (IOError, OSError) as e:
raise InvalidFileError(u"%s" % e.strerror)
headers = SortedDict(ignore_case=True)
if extra_headers:
headers.update(extra_headers)
## Set server side encryption
if self.config.server_side_encryption:
headers["x-amz-server-side-encryption"] = "AES256"
## Set kms headers
if self.config.kms_key:
headers['x-amz-server-side-encryption'] = 'aws:kms'
headers['x-amz-server-side-encryption-aws-kms-key-id'] = self.config.kms_key
## MIME-type handling
headers["content-type"] = self.content_type(filename=filename, is_dir=is_dir)
## Other Amazon S3 attributes
if self.config.acl_public:
headers["x-amz-acl"] = "public-read"
headers["x-amz-storage-class"] = self.storage_class()
## Multipart decision
multipart = False
if not self.config.enable_multipart and is_stream:
raise ParameterError("Multi-part upload is required to upload from stdin")
if self.config.enable_multipart:
if size > self.config.multipart_chunk_size_mb * SIZE_1MB or is_stream:
multipart = True
if size > self.config.multipart_max_chunks * self.config.multipart_chunk_size_mb * SIZE_1MB:
raise ParameterError("Chunk size %d MB results in more than %d chunks. Please increase --multipart-chunk-size-mb" % \
(self.config.multipart_chunk_size_mb, self.config.multipart_max_chunks))
if multipart:
# Multipart requests are quite different... drop here
return self.send_file_multipart(src_stream, headers, uri, size, extra_label)
## Not multipart...
if self.config.put_continue:
# Note, if input was stdin, we would be performing multipart upload.
# So this will always work as long as the file already uploaded was
# not uploaded via MultiUpload, in which case its ETag will not be
# an md5.
try:
info = self.object_info(uri)
except Exception:
info = None
if info is not None:
remote_size = int(info['headers']['content-length'])
remote_checksum = info['headers']['etag'].strip('"\'')
if size == remote_size:
checksum = calculateChecksum('', src_stream, 0, size, self.config.send_chunk)
if remote_checksum == checksum:
warning("Put: size and md5sum match for %s, skipping." % uri)
return
else:
warning("MultiPart: checksum (%s vs %s) does not match for %s, reuploading."
% (remote_checksum, checksum, uri))
else:
warning("MultiPart: size (%d vs %d) does not match for %s, reuploading."
% (remote_size, size, uri))
headers["content-length"] = str(size)
request = self.create_request("OBJECT_PUT", uri = uri, headers = headers)
labels = { 'source' : filename, 'destination' : uri.uri(), 'extra' : extra_label }
response = self.send_file(request, src_stream, labels)
return response
def object_get(self, uri, stream, dest_name, start_position = 0, extra_label = ""):
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
request = self.create_request("OBJECT_GET", uri = uri)
labels = { 'source' : uri.uri(), 'destination' : dest_name, 'extra' : extra_label }
response = self.recv_file(request, stream, labels, start_position)
return response
def object_batch_delete(self, remote_list):
""" Batch delete given a remote_list """
uris = [remote_list[item]['object_uri_str'] for item in remote_list]
return self.object_batch_delete_uri_strs(uris)
def object_batch_delete_uri_strs(self, uris):
""" Batch delete given a list of object uris """
def compose_batch_del_xml(bucket, key_list):
body = u"<?xml version=\"1.0\" encoding=\"UTF-8\"?><Delete>"
for key in key_list:
uri = S3Uri(key)
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
if not uri.has_object():
raise ValueError("URI '%s' has no object" % key)
if uri.bucket() != bucket:
raise ValueError("The batch should contain keys from the same bucket")
object = saxutils.escape(uri.object())
body += u"<Object><Key>%s</Key></Object>" % object
body += u"</Delete>"
body = encode_to_s3(body)
return body
batch = uris
if len(batch) == 0:
raise ValueError("Key list is empty")
bucket = S3Uri(batch[0]).bucket()
request_body = compose_batch_del_xml(bucket, batch)
headers = SortedDict({'content-md5': generate_content_md5(request_body),
'content-type': 'application/xml'}, ignore_case=True)
request = self.create_request("BATCH_DELETE", bucket = bucket,
headers = headers, body = request_body,
uri_params = {'delete': None})
response = self.send_request(request)
return response
def object_delete(self, uri):
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
request = self.create_request("OBJECT_DELETE", uri = uri)
response = self.send_request(request)
return response
def object_restore(self, uri):
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
if self.config.restore_days < 1:
raise ParameterError("You must restore a file for 1 or more days")
if self.config.restore_priority not in ['Standard', 'Expedited', 'Bulk']:
raise ParameterError("Valid restoration priorities: bulk, standard, expedited")
body = '<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
body += (' <Days>%s</Days>' % self.config.restore_days)
body += ' <GlacierJobParameters>'
body += (' <Tier>%s</Tier>' % self.config.restore_priority)
body += ' </GlacierJobParameters>'
body += '</RestoreRequest>'
request = self.create_request("OBJECT_POST", uri = uri, body = body,
uri_params = {'restore': None})
response = self.send_request(request)
debug("Received response '%s'" % (response))
return response
def _sanitize_headers(self, headers):
to_remove = [
# from http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
'date',
'content-length',
'last-modified',
'content-md5',
'x-amz-version-id',
'x-amz-delete-marker',
# other headers returned from object_info() we don't want to send
'accept-ranges',
'connection',
'etag',
'server',
'x-amz-id-2',
'x-amz-request-id',
# Cloudflare's R2 header we don't want to send
'cf-ray',
# Other headers that are not copying by a direct copy
'x-amz-storage-class',
## We should probably also add server-side encryption headers
]
for h in to_remove + self.config.remove_headers:
if h.lower() in headers:
del headers[h.lower()]
return headers
def object_copy(self, src_uri, dst_uri, extra_headers=None,
src_size=None, extra_label="", replace_meta=False):
"""Remote copy an object and eventually set metadata
Note: A little memo description of the nightmare for performance here:
** FOR AWS, 2 cases:
- COPY will copy the metadata of the source to dest, but you can't
modify them. Any additional header will be ignored anyway.
- REPLACE will set the additional metadata headers that are provided
but will not copy any of the source headers.
So, to add to existing meta during copy, you have to do an object_info
to get original source headers, then modify, then use REPLACE for the
copy operation.
** For Minio and maybe other implementations:
- if additional headers are sent, they will be set to the destination
on top of source original meta in all cases COPY and REPLACE.
It is a nice behavior except that it is different of the aws one.
As it was still too easy, there is another catch:
In all cases, for multipart copies, metadata data are never copied
from the source.
"""
if src_uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % src_uri.type)
if dst_uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % dst_uri.type)
if self.config.acl_public is None:
try:
acl = self.get_acl(src_uri)
except S3Error as exc:
# Ignore the exception and don't fail the copy
# if the server doesn't support setting ACLs
if exc.status != 501:
raise exc
acl = None
multipart = False
headers = None
if extra_headers or self.config.mime_type:
# Force replace, that will force getting meta with object_info()
replace_meta = True
if replace_meta:
src_info = self.object_info(src_uri)
headers = src_info['headers']
src_size = int(headers["content-length"])
if self.config.enable_multipart:
# Get size of remote source only if multipart is enabled and that no
# size info was provided
src_headers = headers
if src_size is None:
src_info = self.object_info(src_uri)
src_headers = src_info['headers']
src_size = int(src_headers["content-length"])
# If we are over the grand maximum size for a normal copy/modify
# (> 5GB) go nuclear and use multipart copy as the only option to
# modify an object.
# Reason is an aws s3 design bug. See:
# https://github.com/aws/aws-sdk-java/issues/367
if src_uri is dst_uri:
# optimisation in the case of modify
threshold = MultiPartUpload.MAX_CHUNK_SIZE_MB * SIZE_1MB
else:
threshold = self.config.multipart_copy_chunk_size_mb * SIZE_1MB
if src_size > threshold:
# Sadly, s3 has a bad logic as metadata will not be copied for
# multipart copy unlike what is done for direct copies.
# TODO: Optimize by re-using the object_info request done
# earlier earlier at fetch remote stage, and preserve headers.
if src_headers is None:
src_info = self.object_info(src_uri)
src_headers = src_info['headers']
src_size = int(src_headers["content-length"])
headers = src_headers
multipart = True
if headers:
self._sanitize_headers(headers)
headers = SortedDict(headers, ignore_case=True)
else:
headers = SortedDict(ignore_case=True)
# Following meta data are updated even in COPY by aws
if self.config.acl_public:
headers["x-amz-acl"] = "public-read"
headers["x-amz-storage-class"] = self.storage_class()
## Set server side encryption
if self.config.server_side_encryption:
headers["x-amz-server-side-encryption"] = "AES256"
## Set kms headers
if self.config.kms_key:
headers['x-amz-server-side-encryption'] = 'aws:kms'
headers['x-amz-server-side-encryption-aws-kms-key-id'] = \
self.config.kms_key
# Following meta data are not updated in simple COPY by aws.
if extra_headers:
headers.update(extra_headers)
if self.config.mime_type:
headers["content-type"] = self.config.mime_type
# "COPY" or "REPLACE"
if not replace_meta:
headers['x-amz-metadata-directive'] = "COPY"
else:
headers['x-amz-metadata-directive'] = "REPLACE"
if multipart:
# Multipart decision. Only do multipart copy for remote s3 files
# bigger than the multipart copy threshold.
# Multipart requests are quite different... delegate
response = self.copy_file_multipart(src_uri, dst_uri, src_size,
headers, extra_label)
else:
# Not multipart... direct request
headers['x-amz-copy-source'] = s3_quote(
"/%s/%s" % (src_uri.bucket(), src_uri.object()),
quote_backslashes=False, unicode_output=True)
request = self.create_request("OBJECT_PUT", uri=dst_uri,
headers=headers)
response = self.send_request(request)
if response["data"] and getRootTagName(response["data"]) == "Error":
# http://doc.s3.amazonaws.com/proposals/copy.html
# Error during copy, status will be 200, so force error code 500
response["status"] = 500
error("Server error during the COPY operation. Overwrite response "
"status to 500")
raise S3Error(response)
if self.config.acl_public is None and acl:
try:
self.set_acl(dst_uri, acl)
except S3Error as exc:
# Ignore the exception and don't fail the copy
# if the server doesn't support setting ACLs
if exc.status != 501:
raise exc
return response
def object_modify(self, src_uri, dst_uri, extra_headers=None,
src_size=None, extra_label=""):
# dst_uri = src_uri Will optimize by using multipart just in worst case
return self.object_copy(src_uri, src_uri, extra_headers, src_size,
extra_label, replace_meta=True)
def object_move(self, src_uri, dst_uri, extra_headers=None,
src_size=None, extra_label=""):
response_copy = self.object_copy(src_uri, dst_uri, extra_headers,
src_size, extra_label)
debug("Object %s copied to %s" % (src_uri, dst_uri))
if not response_copy["data"] \
or getRootTagName(response_copy["data"]) \
in ["CopyObjectResult", "CompleteMultipartUploadResult"]:
self.object_delete(src_uri)
debug("Object '%s' deleted", src_uri)
else:
warning("Object '%s' NOT deleted because of an unexpected "
"response data content.", src_uri)
return response_copy
def object_info(self, uri):
request = self.create_request("OBJECT_HEAD", uri=uri)
try:
response = self.send_request(request)
except S3Error as exc:
# A HEAD request will not have body, even in the case of an error
# so we can't get the usual XML error content.
# Add fake similar content in such a case
if exc.status == 404 and not exc.code:
exc.code = 'NoSuchKey'
exc.message = 'The specified key does not exist.'
exc.resource = uri
raise exc
return response
def get_acl(self, uri):
if uri.has_object():
request = self.create_request("OBJECT_GET", uri=uri,
uri_params={'acl': None})
else:
request = self.create_request("BUCKET_LIST", bucket=uri.bucket(),
uri_params={'acl': None})
response = self.send_request(request)
acl = ACL(response['data'])
return acl
def set_acl(self, uri, acl):
body = u"%s"% acl
debug(u"set_acl(%s): acl-xml: %s" % (uri, body))
headers = SortedDict({'content-type': 'application/xml'}, ignore_case = True)
if uri.has_object():
request = self.create_request("OBJECT_PUT", uri = uri,
headers = headers, body = body,
uri_params = {'acl': None})
else:
request = self.create_request("BUCKET_CREATE", bucket = uri.bucket(),
headers = headers, body = body,
uri_params = {'acl': None})
response = self.send_request(request)
return response
def set_versioning(self, uri, enabled):
headers = SortedDict(ignore_case = True)
status = "Enabled" if enabled is True else "Suspended"
body = '<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
body += '<Status>%s</Status>' % status
body += '</VersioningConfiguration>'
debug(u"set_versioning(%s)" % body)
headers['content-md5'] = generate_content_md5(body)
request = self.create_request("BUCKET_CREATE", uri = uri,
headers = headers, body = body,
uri_params = {'versioning': None})
response = self.send_request(request)
return response
def get_versioning(self, uri):
request = self.create_request("BUCKET_LIST", uri = uri,
uri_params = {'versioning': None})
response = self.send_request(request)
return getTextFromXml(response['data'], "Status")
def get_policy(self, uri):
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(),
uri_params = {'policy': None})
response = self.send_request(request)
return decode_from_s3(response['data'])
def set_object_legal_hold(self, uri, legal_hold_status):
body = '<LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
body += '<Status>%s</Status>' % legal_hold_status
body += '</LegalHold>'
headers = SortedDict(ignore_case = True)
headers['content-type'] = 'application/xml'
headers['content-md5'] = generate_content_md5(body)
request = self.create_request("OBJECT_PUT", uri = uri,
headers = headers, body = body,
uri_params = {'legal-hold': None})
response = self.send_request(request)
return response
def set_object_retention(self, uri, mode, retain_until_date):
body = '<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
body += '<Mode>%s</Mode>' % mode
body += '<RetainUntilDate>%s</RetainUntilDate>' % retain_until_date
body += '</Retention>'
headers = SortedDict(ignore_case = True)
headers['content-type'] = 'application/xml'
headers['content-md5'] = generate_content_md5(body)
request = self.create_request("OBJECT_PUT", uri = uri,
headers = headers, body = body,
uri_params = {'retention': None})
response = self.send_request(request)
return response
def set_policy(self, uri, policy):
headers = SortedDict(ignore_case = True)
# TODO check policy is proper json string
headers['content-type'] = 'application/json'
request = self.create_request("BUCKET_CREATE", uri = uri,
headers=headers, body = policy,
uri_params = {'policy': None})
response = self.send_request(request)
return response
def delete_policy(self, uri):
request = self.create_request("BUCKET_DELETE", uri = uri,
uri_params = {'policy': None})
debug(u"delete_policy(%s)" % uri)
response = self.send_request(request)
return response
def get_cors(self, uri):
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(),
uri_params = {'cors': None})
response = self.send_request(request)
return decode_from_s3(response['data'])
def set_cors(self, uri, cors):
headers = SortedDict(ignore_case = True)
# TODO check cors is proper json string
headers['content-type'] = 'application/xml'
headers['content-md5'] = generate_content_md5(cors)
request = self.create_request("BUCKET_CREATE", uri = uri,
headers=headers, body = cors,
uri_params = {'cors': None})
response = self.send_request(request)
return response
def delete_cors(self, uri):
request = self.create_request("BUCKET_DELETE", uri = uri,
uri_params = {'cors': None})
debug(u"delete_cors(%s)" % uri)
response = self.send_request(request)
return response
def set_lifecycle_policy(self, uri, policy):
headers = SortedDict(ignore_case = True)
headers['content-md5'] = generate_content_md5(policy)
request = self.create_request("BUCKET_CREATE", uri = uri,
headers=headers, body = policy,
uri_params = {'lifecycle': None})
debug(u"set_lifecycle_policy(%s): policy-xml: %s" % (uri, policy))
response = self.send_request(request)
return response
def set_payer(self, uri):
headers = SortedDict(ignore_case = True)
headers['content-type'] = 'application/xml'
body = '<RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">\n'
if self.config.requester_pays:
body += '<Payer>Requester</Payer>\n'
else:
body += '<Payer>BucketOwner</Payer>\n'
body += '</RequestPaymentConfiguration>\n'
request = self.create_request("BUCKET_CREATE", uri = uri, body = body,
uri_params = {'requestPayment': None})
response = self.send_request(request)
return response
def get_lifecycle_policy(self, uri):
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(),
uri_params = {'lifecycle': None})
debug(u"get_lifecycle_policy(%s)" % uri)
response = self.send_request(request)
debug(u"%s: Got Lifecycle Policy" % response['status'])
return response
def delete_lifecycle_policy(self, uri):
request = self.create_request("BUCKET_DELETE", uri = uri,
uri_params = {'lifecycle': None})
debug(u"delete_lifecycle_policy(%s)" % uri)
response = self.send_request(request)
return response
def set_notification_policy(self, uri, policy):
headers = SortedDict(ignore_case = True)
if self.config.skip_destination_validation:
headers["x-amz-skip-destination-validation"] = "True"
request = self.create_request("BUCKET_CREATE", uri = uri,
headers = headers, body = policy,
uri_params = {'notification': None})
debug(u"set_notification_policy(%s): policy-xml: %s" % (uri, policy))
response = self.send_request(request)
return response
def get_notification_policy(self, uri):
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(),
uri_params = {'notification': None})
debug(u"get_notification_policy(%s)" % uri)
response = self.send_request(request)
debug(u"%s: Got notification Policy" % response['status'])
return response
def delete_notification_policy(self, uri):
empty_config = '<NotificationConfiguration></NotificationConfiguration>'
return self.set_notification_policy(uri, empty_config)
def set_tagging(self, uri, tagsets):
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
body = '<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
body += '<TagSet>'
for (key, val) in tagsets:
body += '<Tag>'
body += (' <Key>%s</Key>' % key)
body += (' <Value>%s</Value>' % val)
body += '</Tag>'
body += '</TagSet>'
body += '</Tagging>'
headers = SortedDict(ignore_case=True)
headers['content-md5'] = generate_content_md5(body)
if uri.has_object():
request = self.create_request("OBJECT_PUT", uri=uri,
headers=headers, body=body,
uri_params={'tagging': None})
else:
request = self.create_request("BUCKET_CREATE", bucket=uri.bucket(),
headers=headers, body=body,
uri_params={'tagging': None})
debug(u"set_tagging(%s): tagset-xml: %s" % (uri, body))
response = self.send_request(request)
return response
def get_tagging(self, uri):
if uri.has_object():
request = self.create_request("OBJECT_GET", uri=uri,
uri_params={'tagging': None})
else:
request = self.create_request("BUCKET_LIST", bucket=uri.bucket(),
uri_params={'tagging': None})
debug(u"get_tagging(%s)" % uri)
response = self.send_request(request)
xml_data = response["data"]
# extract list of tag sets
tagsets = getListFromXml(xml_data, "Tag")
debug(u"%s: Got object tagging" % response['status'])
return tagsets
def delete_tagging(self, uri):
if uri.has_object():
request = self.create_request("OBJECT_DELETE", uri=uri,
uri_params={'tagging': None})
else:
request = self.create_request("BUCKET_DELETE", bucket=uri.bucket(),
uri_params={'tagging': None})
debug(u"delete_tagging(%s)" % uri)
response = self.send_request(request)
return response
def get_multipart(self, uri, uri_params=None, limit=-1):
upload_list = []
for truncated, uploads in self.get_multipart_streaming(uri,
uri_params,
limit):
upload_list.extend(uploads)
return upload_list
def get_multipart_streaming(self, uri, uri_params=None, limit=-1):
uri_params = uri_params and uri_params.copy() or {}
bucket = uri.bucket()
truncated = True
num_objects = 0
max_keys = limit
# It is the "uploads: None" in uri_params that will change the
# behavior of bucket_list to return multiparts instead of keys
uri_params['uploads'] = None
while truncated:
response = self.bucket_list_noparse(bucket, recursive=True,
uri_params=uri_params,
max_keys=max_keys)
xml_data = response["data"]
# extract list of info of uploads
upload_list = getListFromXml(xml_data, "Upload")
num_objects += len(upload_list)
if limit > num_objects:
max_keys = limit - num_objects
xml_truncated = getTextFromXml(xml_data, ".//IsTruncated")
if not xml_truncated or xml_truncated.lower() == "false":
truncated = False
if truncated:
if limit == -1 or num_objects < limit:
if upload_list:
next_key = getTextFromXml(xml_data, "NextKeyMarker")
if not next_key:
next_key = upload_list[-1]["Key"]
uri_params['key-marker'] = next_key
upload_id_marker = getTextFromXml(
xml_data, "NextUploadIdMarker")
if upload_id_marker:
uri_params['upload-id-marker'] = upload_id_marker
elif 'upload-id-marker' in uri_params:
# Clear any pre-existing value
del uri_params['upload-id-marker']
else:
# Unexpectedly, the server lied, and so the previous
# response was not truncated. So, no new key to get.
yield False, upload_list
break
debug("Listing continues after '%s'" %
uri_params['key-marker'])
else:
yield truncated, upload_list
break
yield truncated, upload_list
def list_multipart(self, uri, upload_id, uri_params=None, limit=-1):
part_list = []
for truncated, parts in self.list_multipart_streaming(uri,
upload_id,
uri_params,
limit):
part_list.extend(parts)
return part_list
def list_multipart_streaming(self, uri, upload_id, uri_params=None,
limit=-1):
uri_params = uri_params and uri_params.copy() or {}
truncated = True
num_objects = 0
max_parts = limit
while truncated:
response = self.list_multipart_noparse(uri, upload_id,
uri_params, max_parts)
xml_data = response["data"]
# extract list of multipart upload parts
part_list = getListFromXml(xml_data, "Part")
num_objects += len(part_list)
if limit > num_objects:
max_parts = limit - num_objects
xml_truncated = getTextFromXml(xml_data, ".//IsTruncated")
if not xml_truncated or xml_truncated.lower() == "false":
truncated = False
if truncated:
if limit == -1 or num_objects < limit:
if part_list:
next_part_number = getTextFromXml(
xml_data, "NextPartNumberMarker")
if not next_part_number:
next_part_number = part_list[-1]["PartNumber"]
uri_params['part-number-marker'] = next_part_number
else:
# Unexpectedly, the server lied, and so the previous
# response was not truncated. So, no new part to get.
yield False, part_list
break
debug("Listing continues after Part '%s'" %
uri_params['part-number-marker'])
else:
yield truncated, part_list
break
yield truncated, part_list
def list_multipart_noparse(self, uri, upload_id, uri_params=None,
max_parts=-1):
if uri_params is None:
uri_params = {}
if max_parts != -1:
uri_params['max-parts'] = str(max_parts)
uri_params['uploadId'] = upload_id
request = self.create_request("OBJECT_GET", uri=uri,
uri_params=uri_params)
response = self.send_request(request)
return response
def abort_multipart(self, uri, id):
request = self.create_request("OBJECT_DELETE", uri = uri,
uri_params = {'uploadId': id})
response = self.send_request(request)
return response
def get_accesslog(self, uri):
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(),
uri_params = {'logging': None})
response = self.send_request(request)
accesslog = AccessLog(response['data'])
return accesslog
def set_accesslog_acl(self, uri):
acl = self.get_acl(uri)
debug("Current ACL(%s): %s" % (uri.uri(), acl))
acl.appendGrantee(GranteeLogDelivery("READ_ACP"))
acl.appendGrantee(GranteeLogDelivery("WRITE"))
debug("Updated ACL(%s): %s" % (uri.uri(), acl))
self.set_acl(uri, acl)
def set_accesslog(self, uri, enable, log_target_prefix_uri = None, acl_public = False):
accesslog = AccessLog()
if enable:
accesslog.enableLogging(log_target_prefix_uri)
accesslog.setAclPublic(acl_public)
else:
accesslog.disableLogging()
body = "%s" % accesslog
debug(u"set_accesslog(%s): accesslog-xml: %s" % (uri, body))
request = self.create_request("BUCKET_CREATE", bucket = uri.bucket(),
body = body, uri_params = {'logging': None})
try:
response = self.send_request(request)
except S3Error as e:
if e.info['Code'] == "InvalidTargetBucketForLogging":
info("Setting up log-delivery ACL for target bucket.")
self.set_accesslog_acl(S3Uri(u"s3://%s" % log_target_prefix_uri.bucket()))
response = self.send_request(request)
else:
raise
return accesslog, response
def create_request(self, operation, uri = None, bucket = None, object = None, headers = None, body = "", uri_params = None):
resource = { 'bucket' : None, 'uri' : "/" }
if uri and (bucket or object):
raise ValueError("Both 'uri' and either 'bucket' or 'object' parameters supplied")
## If URI is given use that instead of bucket/object parameters
if uri:
bucket = uri.bucket()
object = uri.has_object() and uri.object() or None
if bucket:
resource['bucket'] = bucket
if object:
resource['uri'] = "/" + object
method_string = S3.http_methods.getkey(S3.operations[operation] & S3.http_methods["MASK"])
request = S3Request(self, method_string, resource, headers, body, uri_params)
debug("CreateRequest: resource[uri]=%s", resource['uri'])
return request
def _fail_wait(self, retries):
# Wait a few seconds. The more it fails the more we wait.
return (self.config.max_retries - retries + 1) * 3
def _http_redirection_handler(self, request, response, fn, *args, **kwargs):
# Region info might already be available through the x-amz-bucket-region header
redir_region = response['headers'].get('x-amz-bucket-region')
if 'data' in response and len(response['data']) > 0:
redir_bucket = getTextFromXml(response['data'], ".//Bucket")
redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
self.set_hostname(redir_bucket, redir_hostname)
info(u'Redirected to: %s', redir_hostname)
if redir_region:
S3Request.region_map[redir_bucket] = redir_region
info(u'Redirected to region: %s', redir_region)
return fn(*args, **kwargs)
elif request.method_string == 'HEAD':
# Head is a special case, redirection info usually are in the body
# but there is no body for an HEAD request.
location_url = response['headers'].get('location')
if location_url:
# Sometimes a "location" http header could be available and
# can help us deduce the redirection path.
# It is the case of "dns-style" syntax, but not for "path-style" syntax.
if location_url.startswith("http://"):
location_url = location_url[7:]
elif location_url.startswith("https://"):
location_url = location_url[8:]
location_url = urlparse('https://' + location_url).hostname
redir_bucket = request.resource['bucket']
self.set_hostname(redir_bucket, location_url)
info(u'Redirected to: %s', location_url)
if redir_region:
S3Request.region_map[redir_bucket] = redir_region
info(u'Redirected to region: %s', redir_region)
return fn(*args, **kwargs)
warning(u'Redirection error: No info provided by the server to where should be forwarded the request (HEAD request). (Hint target region: %s)', redir_region)
raise S3Error(response)
def _http_400_handler(self, request, response, fn, *args, **kwargs):
"""
Returns None if no handler available for the specific error code
"""
# AWS response AuthorizationHeaderMalformed means we sent the request to the wrong region
# get the right region out of the response and send it there.
if 'data' in response and len(response['data']) > 0:
failureCode = getTextFromXml(response['data'], 'Code')
if failureCode == 'AuthorizationHeaderMalformed':
# we sent the request to the wrong region
region = getTextFromXml(response['data'], 'Region')
if region is not None:
S3Request.region_map[request.resource['bucket']] = region
info('Forwarding request to %s', region)
return fn(*args, **kwargs)
else:
warning(u'Could not determine bucket the location. Please consider using the --region parameter.')
elif failureCode == 'InvalidRequest':
message = getTextFromXml(response['data'], 'Message')
if message == 'The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.':
debug(u'Endpoint requires signature v4')
self.endpoint_requires_signature_v4 = True
return fn(*args, **kwargs)
elif failureCode == 'InvalidArgument':
# returned by DreamObjects on send_request and send_file,
# which doesn't support signature v4. Retry with signature v2
if not request.use_signature_v2() and not self.fallback_to_signature_v2: # have not tried with v2 yet
debug(u'Falling back to signature v2')
self.fallback_to_signature_v2 = True
return fn(*args, **kwargs)
else:
# returned by DreamObjects on recv_file, which doesn't support signature v4. Retry with signature v2
if not request.use_signature_v2() and not self.fallback_to_signature_v2:
# have not tried with v2 yet
debug(u'Falling back to signature v2')
self.fallback_to_signature_v2 = True
return fn(*args, **kwargs)
return None
def _http_403_handler(self, request, response, fn, *args, **kwargs):
if 'data' in response and len(response['data']) > 0:
failureCode = getTextFromXml(response['data'], 'Code')
if failureCode == 'AccessDenied':
# traditional HTTP 403
message = getTextFromXml(response['data'], 'Message')
if message == 'AWS authentication requires a valid Date or x-amz-date header': # message from an Eucalyptus walrus server
if not request.use_signature_v2() and not self.fallback_to_signature_v2: # have not tried with v2 yet
debug(u'Falling back to signature v2')
self.fallback_to_signature_v2 = True
return fn(*args, **kwargs)
raise S3Error(response)
def update_region_inner_request(self, request):
"""Get and update region for the request if needed.
Signature v4 needs the region of the bucket or the request will fail
with the indication of the correct region.
We are trying to avoid this failure by pre-emptively getting the
correct region to use, if not provided by the user.
"""
if request.resource.get('bucket') and not request.use_signature_v2() \
and S3Request.region_map.get(
request.resource['bucket'], Config().bucket_location
) == "US":
debug("===== SEND Inner request to determine the bucket region "
"=====")
try:
s3_uri = S3Uri(u's3://' + request.resource['bucket'])
# "force_us_default" should prevent infinite recursivity because
# it will set the region_map dict.
region = self.get_bucket_location(s3_uri, force_us_default=True)
if region is not None:
S3Request.region_map[request.resource['bucket']] = region
debug("===== SUCCESS Inner request to determine the bucket "
"region (%r) =====", region)
except Exception as exc:
# Ignore errors, it is just an optimisation, so nothing critical
debug("getlocation inner request failure reason: %s", exc)
debug("===== FAILED Inner request to determine the bucket "
"region =====")
def send_request(self, request, retries=None):
if retries is None:
retries = self.config.max_retries
self.update_region_inner_request(request)
request.body = encode_to_s3(request.body)
headers = request.headers
method_string, resource, headers = request.get_triplet()
response = {}
debug("Processing request, please wait...")
conn = None
try:
conn = ConnMan.get(self.get_hostname(resource['bucket']))
# TODO: Check what was supposed to be the usage of conn.path here
# Currently this is always "None" all the time as not defined in ConnMan
uri = self.format_uri(resource, conn.path)
debug("Sending request method_string=%r, uri=%r, headers=%r, body=(%i bytes)" % (method_string, uri, headers, len(request.body or "")))
conn.c.request(method_string, uri, request.body, headers)
http_response = conn.c.getresponse()
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = convertHeaderTupleListToDict(http_response.getheaders())
response["data"] = http_response.read()
if "x-amz-meta-s3cmd-attrs" in response["headers"]:
attrs = parse_attrs_header(response["headers"]["x-amz-meta-s3cmd-attrs"])
response["s3cmd-attrs"] = attrs
ConnMan.put(conn)
except (S3SSLError, S3SSLCertificateError):
# In case of failure to validate the certificate for a ssl
# connection,no need to retry, abort immediately
raise
except (IOError, Exception) as e:
debug("Response:\n" + pprint.pformat(response))
if ((hasattr(e, 'errno') and e.errno
and e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT))
or "[Errno 104]" in str(e)
or "[Errno 32]" in str(e)
) and not isinstance(e, SocketTimeoutException):
raise
# When the connection is broken, BadStatusLine is raised with py2
# and RemoteDisconnected is raised by py3 with a trap:
# RemoteDisconnected has an errno field with a None value.
# close the connection and re-establish
ConnMan.close(conn)
if retries:
warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_request(request, retries - 1)
else:
raise S3RequestError("Request failed for: %s" % resource['uri'])
except:
# Only KeyboardInterrupt and SystemExit will not be covered by Exception
debug("Response:\n" + pprint.pformat(response))
raise
debug("Response:\n" + pprint.pformat(response))
if response["status"] in [301, 307]:
## RedirectTemporary or RedirectPermanent
return self._http_redirection_handler(request, response, self.send_request, request)
if response["status"] == 400:
handler_fn = self._http_400_handler(request, response, self.send_request, request)
if handler_fn:
return handler_fn
err = S3Error(response)
if retries and err.code in ['BadDigest', 'OperationAborted',
'TokenRefreshRequired', 'RequestTimeout']:
warning(u"Retrying failed request: %s (%s)" % (resource['uri'], err))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_request(request, retries - 1)
raise err
if response["status"] == 403:
return self._http_403_handler(request, response, self.send_request, request)
if response["status"] == 405: # Method Not Allowed. Don't retry.
raise S3Error(response)
if response["status"] >= 500 or response["status"] == 429:
e = S3Error(response)
if response["status"] == 501:
## NotImplemented server error - no need to retry
retries = 0
if retries:
warning(u"Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_request(request, retries - 1)
else:
raise e
if response["status"] < 200 or response["status"] > 299:
raise S3Error(response)
return response
def send_request_with_progress(self, request, labels, operation_size=0):
"""Wrapper around send_request for slow requests.
To be able to show progression for small requests
"""
if not self.config.progress_meter:
info("Sending slow request, please wait...")
return self.send_request(request)
if 'action' not in labels:
labels[u'action'] = u'request'
progress = self.config.progress_class(labels, operation_size)
try:
response = self.send_request(request)
except Exception as exc:
progress.done("failed")
raise
progress.update(current_position=operation_size)
progress.done("done")
return response
def send_file(self, request, stream, labels, buffer = '', throttle = 0,
retries = None, offset = 0, chunk_size = -1,
use_expect_continue = None):
if retries is None:
retries = self.config.max_retries
self.update_region_inner_request(request)
if use_expect_continue is None:
use_expect_continue = self.config.use_http_expect
if self.expect_continue_not_supported and use_expect_continue:
use_expect_continue = False
headers = request.headers
size_left = size_total = int(headers["content-length"])
filename = stream.stream_name
if self.config.progress_meter:
labels[u'action'] = u'upload'
progress = self.config.progress_class(labels, size_total)
else:
info("Sending file '%s', please wait..." % filename)
timestamp_start = time.time()
if buffer:
sha256_hash = checksum_sha256_buffer(buffer, offset, size_total)
else:
sha256_hash = checksum_sha256_file(stream, offset, size_total)
request.body = sha256_hash
if use_expect_continue:
if not size_total:
use_expect_continue = False
else:
headers['expect'] = '100-continue'
method_string, resource, headers = request.get_triplet()
try:
conn = ConnMan.get(self.get_hostname(resource['bucket']))
conn.c.putrequest(method_string, self.format_uri(resource, conn.path))
for header in headers.keys():
conn.c.putheader(encode_to_s3(header), encode_to_s3(headers[header]))
conn.c.endheaders()
except ParameterError as e:
raise
except Exception as e:
if self.config.progress_meter:
progress.done("failed")
if retries:
warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
# Connection error -> same throttle value
return self.send_file(request, stream, labels, buffer, throttle, retries - 1, offset, chunk_size)
else:
raise S3UploadError("Upload failed for: %s" % resource['uri'])
if buffer == '':
stream.seek(offset)
md5_hash = md5()
try:
http_response = None
if use_expect_continue:
# Wait for the 100-Continue before sending the content
readable, writable, exceptional = select.select([conn.c.sock],[], [], EXPECT_CONTINUE_TIMEOUT)
if readable:
# 100-CONTINUE STATUS RECEIVED, get it before continuing.
http_response = conn.c.getresponse()
elif not writable and not exceptional:
warning("HTTP Expect Continue feature disabled because of no reply of the server in %.2fs.", EXPECT_CONTINUE_TIMEOUT)
self.expect_continue_not_supported = True
use_expect_continue = False
if not use_expect_continue or (http_response and http_response.status == ConnMan.CONTINUE):
if http_response:
# CONTINUE case. Reset the response
http_response.read()
conn.c._HTTPConnection__state = ConnMan._CS_REQ_SENT
while size_left > 0:
#debug("SendFile: Reading up to %d bytes from '%s' - remaining bytes: %s" % (self.config.send_chunk, filename, size_left))
l = min(self.config.send_chunk, size_left)
if buffer == '':
data = stream.read(l)
else:
data = buffer
if not data:
raise InvalidFileError("File smaller than expected. Was the file truncated?")
if self.config.limitrate > 0:
start_time = time.time()
md5_hash.update(data)
conn.c.wrapper_send_body(data)
if self.config.progress_meter:
progress.update(delta_position = len(data))
size_left -= len(data)
#throttle
limitrate_throttle = throttle
if self.config.limitrate > 0:
real_duration = time.time() - start_time
expected_duration = float(l) / self.config.limitrate
limitrate_throttle = max(expected_duration - real_duration, limitrate_throttle)
if limitrate_throttle:
time.sleep(min(limitrate_throttle, self.config.throttle_max))
md5_computed = md5_hash.hexdigest()
http_response = conn.c.getresponse()
response = {}
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = convertHeaderTupleListToDict(http_response.getheaders())
response["data"] = http_response.read()
response["size"] = size_total
ConnMan.put(conn)
debug(u"Response:\n" + pprint.pformat(response))
except ParameterError as e:
raise
except InvalidFileError as e:
if self.config.progress_meter:
progress.done("failed")
raise
except Exception as e:
if self.config.progress_meter:
progress.done("failed")
if retries:
known_error = False
if ((hasattr(e, 'errno') and e.errno
and e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT))
or "[Errno 104]" in str(e) or "[Errno 32]" in str(e)
) and not isinstance(e, SocketTimeoutException):
# We have to detect these errors by looking at the error string
# Connection reset by peer and Broken pipe
# The server broke the connection early with an error like
# in a HTTP Expect Continue case even if asked nothing.
try:
http_response = conn.c.getresponse()
response = {}
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = convertHeaderTupleListToDict(http_response.getheaders())
response["data"] = http_response.read()
response["size"] = size_total
known_error = True
except Exception:
error("Cannot retrieve any response status before encountering an EPIPE or ECONNRESET exception")
if not known_error:
warning("Upload failed: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
# Connection error -> same throttle value
return self.send_file(request, stream, labels, buffer, throttle,
retries - 1, offset, chunk_size, use_expect_continue)
else:
debug("Giving up on '%s' %s" % (filename, e))
raise S3UploadError("Upload failed for: %s" % resource['uri'])
timestamp_end = time.time()
response["elapsed"] = timestamp_end - timestamp_start
response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
if self.config.progress_meter:
## Finalising the upload takes some time -> update() progress meter
## to correct the average speed. Otherwise people will complain that
## 'progress' and response["speed"] are inconsistent ;-)
progress.update()
progress.done("done")
if response["status"] in [301, 307]:
## RedirectTemporary or RedirectPermanent
return self._http_redirection_handler(request, response,
self.send_file, request, stream, labels, buffer, offset = offset, chunk_size = chunk_size, use_expect_continue = use_expect_continue)
if response["status"] == 400:
handler_fn = self._http_400_handler(request, response,
self.send_file, request, stream, labels, buffer, offset = offset, chunk_size = chunk_size, use_expect_continue = use_expect_continue)
if handler_fn:
return handler_fn
err = S3Error(response)
if err.code not in ['BadDigest', 'OperationAborted',
'TokenRefreshRequired', 'RequestTimeout']:
raise err
# else the error will be handled later with a retry
if response["status"] == 403:
return self._http_403_handler(request, response,
self.send_file, request, stream, labels, buffer, offset = offset, chunk_size = chunk_size, use_expect_continue = use_expect_continue)
if response["status"] == 417 and retries:
# Expect 100-continue not supported by proxy/server
self.expect_continue_not_supported = True
return self.send_file(request, stream, labels, buffer, throttle,
retries - 1, offset, chunk_size, use_expect_continue = False)
# S3 from time to time doesn't send ETag back in a response :-(
# Force re-upload here.
if 'etag' not in response['headers']:
response['headers']['etag'] = ''
if response["status"] < 200 or response["status"] > 299:
try_retry = False
if response["status"] >= 500:
# AWS internal error - retry
try_retry = True
if response["status"] == 503:
## SlowDown error
throttle = throttle and throttle * 5 or 0.01
elif response["status"] == 507:
# Not an AWS error, but s3 compatible server possible error:
# InsufficientStorage
try_retry = False
elif response["status"] == 429:
# Not an AWS error, but s3 compatible server possible error:
# TooManyRequests/Busy/slowdown
try_retry = True
throttle = throttle and throttle * 5 or 0.01
elif response["status"] >= 400:
err = S3Error(response)
## Retriable client error?
if err.code in ['BadDigest', 'OperationAborted', 'TokenRefreshRequired', 'RequestTimeout']:
try_retry = True
err = S3Error(response)
if try_retry:
if retries:
warning("Upload failed: %s (%s)" % (resource['uri'], err))
if throttle:
warning("Retrying on lower speed (throttle=%0.2f)" % throttle)
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_file(request, stream, labels, buffer, throttle,
retries - 1, offset, chunk_size, use_expect_continue)
else:
warning("Too many failures. Giving up on '%s'" % filename)
raise S3UploadError("%s" % err)
## Non-recoverable error
raise err
debug("MD5 sums: computed=%s, received=%s" % (md5_computed, response["headers"].get('etag', '').strip('"\'')))
## when using KMS encryption, MD5 etag value will not match
md5_from_s3 = response["headers"].get("etag", "").strip('"\'')
if ('-' not in md5_from_s3) and (md5_from_s3 != md5_hash.hexdigest()) and response["headers"].get("x-amz-server-side-encryption") != 'aws:kms':
warning("MD5 Sums don't match!")
if retries:
warning("Retrying upload of %s" % (filename))
return self.send_file(request, stream, labels, buffer, throttle,
retries - 1, offset, chunk_size, use_expect_continue)
else:
warning("Too many failures. Giving up on '%s'" % filename)
raise S3UploadError("MD5 sums of sent and received files don't match!")
return response
def send_file_multipart(self, stream, headers, uri, size, extra_label=""):
timestamp_start = time.time()
upload = MultiPartUpload(self, stream, uri, headers, size)
upload.upload_all_parts(extra_label)
response = upload.complete_multipart_upload()
timestamp_end = time.time()
response["elapsed"] = timestamp_end - timestamp_start
response["size"] = size
response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
if response["data"] and getRootTagName(response["data"]) == "Error":
#http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
# Error Complete Multipart UPLOAD, status may be 200
# raise S3UploadError
raise S3UploadError(getTextFromXml(response["data"], 'Message'))
return response
def copy_file_multipart(self, src_uri, dst_uri, size, headers,
extra_label=""):
return self.send_file_multipart(src_uri, headers, dst_uri, size,
extra_label)
def recv_file(self, request, stream, labels, start_position=0, retries=None):
if retries is None:
retries = self.config.max_retries
self.update_region_inner_request(request)
method_string, resource, headers = request.get_triplet()
filename = stream.stream_name
if self.config.progress_meter:
labels[u'action'] = u'download'
progress = self.config.progress_class(labels, 0)
else:
info("Receiving file '%s', please wait..." % filename)
timestamp_start = time.time()
conn = None
try:
conn = ConnMan.get(self.get_hostname(resource['bucket']))
conn.c.putrequest(method_string, self.format_uri(resource, conn.path))
for header in headers.keys():
conn.c.putheader(encode_to_s3(header), encode_to_s3(headers[header]))
if start_position > 0:
debug("Requesting Range: %d .. end" % start_position)
conn.c.putheader("Range", "bytes=%d-" % start_position)
conn.c.endheaders()
response = {}
http_response = conn.c.getresponse()
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = convertHeaderTupleListToDict(http_response.getheaders())
if "x-amz-meta-s3cmd-attrs" in response["headers"]:
attrs = parse_attrs_header(response["headers"]["x-amz-meta-s3cmd-attrs"])
response["s3cmd-attrs"] = attrs
debug("Response:\n" + pprint.pformat(response))
except ParameterError as e:
raise
except (IOError, Exception) as e:
if self.config.progress_meter:
progress.done("failed")
if ((hasattr(e, 'errno') and e.errno and
e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT))
or "[Errno 104]" in str(e) or "[Errno 32]" in str(e)
) and not isinstance(e, SocketTimeoutException):
raise
# close the connection and re-establish
ConnMan.close(conn)
if retries:
warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
# Connection error -> same throttle value
return self.recv_file(request, stream, labels, start_position,
retries=retries - 1)
else:
raise S3DownloadError("Download failed for: %s" % resource['uri'])
if response["status"] < 200 or response["status"] > 299:
# In case of error, we still need to flush the read buffer to be able to reuse
# the connection
response['data'] = http_response.read()
if response["status"] in [301, 307]:
## RedirectPermanent or RedirectTemporary
return self._http_redirection_handler(request, response,
self.recv_file, request,
stream, labels, start_position)
if response["status"] == 400:
handler_fn = self._http_400_handler(request, response, self.recv_file,
request, stream, labels, start_position)
if handler_fn:
return handler_fn
raise S3Error(response)
if response["status"] == 403:
return self._http_403_handler(request, response, self.recv_file,
request, stream, labels, start_position)
if response["status"] < 200 or response["status"] > 299:
try_retry = False
if response["status"] == 429:
# Not an AWS error, but s3 compatible server possible error:
# TooManyRequests/Busy/slowdown
try_retry = True
elif response["status"] == 503:
# SlowDown error
try_retry = True
if try_retry:
resource_uri = resource['uri']
if retries:
retry_delay = self._fail_wait(retries)
warning("Retrying failed request: %s (%s)"
% (resource_uri, S3Error(response)))
warning("Waiting %d sec..." % retry_delay)
time.sleep(retry_delay)
return self.recv_file(request, stream, labels, start_position,
retries=retries - 1)
else:
warning("Too many failures. Giving up on '%s'" % resource_uri)
raise S3DownloadError("Download failed for: %s" % resource_uri)
# Non-recoverable error
raise S3Error(response)
if start_position == 0:
# Only compute MD5 on the fly if we're downloading from beginning
# Otherwise we'd get a nonsense.
md5_hash = md5()
size_left = int(response["headers"]["content-length"])
size_total = start_position + size_left
current_position = start_position
if self.config.progress_meter:
progress.total_size = size_total
progress.initial_position = current_position
progress.current_position = current_position
try:
# Fix for issue #432. Even when content size is 0, httplib expect the response to be read.
if size_left == 0:
data = http_response.read(1)
# It is not supposed to be some data returned in that case
assert(len(data) == 0)
while (current_position < size_total):
this_chunk = size_left > self.config.recv_chunk and self.config.recv_chunk or size_left
if self.config.limitrate > 0:
start_time = time.time()
data = http_response.read(this_chunk)
if len(data) == 0:
raise S3ResponseError("EOF from S3!")
#throttle
if self.config.limitrate > 0:
real_duration = time.time() - start_time
expected_duration = float(this_chunk) / self.config.limitrate
if expected_duration > real_duration:
time.sleep(expected_duration - real_duration)
stream.write(data)
if start_position == 0:
md5_hash.update(data)
current_position += len(data)
## Call progress meter from here...
if self.config.progress_meter:
progress.update(delta_position = len(data))
ConnMan.put(conn)
except OSError:
raise
except (IOError, Exception) as e:
if self.config.progress_meter:
progress.done("failed")
if ((hasattr(e, 'errno') and e.errno
and e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT))
or "[Errno 104]" in str(e) or "[Errno 32]" in str(e)
) and not isinstance(e, SocketTimeoutException):
raise
# close the connection and re-establish
ConnMan.close(conn)
if retries:
warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
# Connection error -> same throttle value
return self.recv_file(request, stream, labels, current_position,
retries=retries - 1)
else:
raise S3DownloadError("Download failed for: %s" % resource['uri'])
stream.flush()
timestamp_end = time.time()
if self.config.progress_meter:
## The above stream.flush() may take some time -> update() progress meter
## to correct the average speed. Otherwise people will complain that
## 'progress' and response["speed"] are inconsistent ;-)
progress.update()
progress.done("done")
md5_from_s3 = response["headers"].get("etag", "").strip('"\'')
if not 'x-amz-meta-s3tools-gpgenc' in response["headers"]:
# we can't trust our stored md5 because we
# encrypted the file after calculating it but before
# uploading it.
try:
md5_from_s3 = response["s3cmd-attrs"]["md5"]
except KeyError:
pass
# we must have something to compare against to bother with the calculation
if '-' not in md5_from_s3:
if start_position == 0:
# Only compute MD5 on the fly if we were downloading from the beginning
response["md5"] = md5_hash.hexdigest()
else:
# Otherwise try to compute MD5 of the output file
try:
response["md5"] = hash_file_md5(filename)
except IOError as e:
if e.errno != errno.ENOENT:
warning("Unable to open file: %s: %s" % (filename, e))
warning("Unable to verify MD5. Assume it matches.")
response["md5match"] = response.get("md5") == md5_from_s3
response["elapsed"] = timestamp_end - timestamp_start
response["size"] = current_position
response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
if response["size"] != start_position + int(response["headers"]["content-length"]):
warning("Reported size (%s) does not match received size (%s)" % (
start_position + int(response["headers"]["content-length"]), response["size"]))
debug("ReceiveFile: Computed MD5 = %s" % response.get("md5"))
# avoid ETags from multipart uploads that aren't the real md5
if ('-' not in md5_from_s3 and not response["md5match"]) and (response["headers"].get("x-amz-server-side-encryption") != 'aws:kms'):
warning("MD5 signatures do not match: computed=%s, received=%s" % (
response.get("md5"), md5_from_s3))
return response
__all__.append("S3")
def parse_attrs_header(attrs_header):
attrs = {}
for attr in attrs_header.split("/"):
key, val = attr.split(":")
attrs[key] = val
return attrs
# vim:et:ts=4:sts=4:ai
| 104,820 | Python | .py | 2,033 | 37.701918 | 183 | 0.563025 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,397 | ACL.py | s3tools_s3cmd/S3/ACL.py | # -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Amazon S3 - Access Control List representation
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import, print_function
import sys
from .BaseUtils import getTreeFromXml, encode_to_s3, decode_from_s3
from .Utils import deunicodise
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
PY3 = (sys.version_info >= (3, 0))
class Grantee(object):
ALL_USERS_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
LOG_DELIVERY_URI = "http://acs.amazonaws.com/groups/s3/LogDelivery"
def __init__(self):
self.xsi_type = None
self.tag = None
self.name = None
self.display_name = ''
self.permission = None
def __repr__(self):
return repr('Grantee("%(tag)s", "%(name)s", "%(permission)s")' % {
"tag" : self.tag,
"name" : self.name,
"permission" : self.permission
})
def isAllUsers(self):
return self.tag == "URI" and self.name == Grantee.ALL_USERS_URI
def isAnonRead(self):
return self.isAllUsers() and (self.permission == "READ" or self.permission == "FULL_CONTROL")
def isAnonWrite(self):
return self.isAllUsers() and (self.permission == "WRITE" or self.permission == "FULL_CONTROL")
def getElement(self):
el = ET.Element("Grant")
grantee = ET.SubElement(el, "Grantee", {
'xmlns:xsi' : 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:type' : self.xsi_type
})
name = ET.SubElement(grantee, self.tag)
name.text = self.name
permission = ET.SubElement(el, "Permission")
permission.text = self.permission
return el
class GranteeAnonRead(Grantee):
def __init__(self):
Grantee.__init__(self)
self.xsi_type = "Group"
self.tag = "URI"
self.name = Grantee.ALL_USERS_URI
self.permission = "READ"
class GranteeLogDelivery(Grantee):
def __init__(self, permission):
"""
permission must be either READ_ACP or WRITE
"""
Grantee.__init__(self)
self.xsi_type = "Group"
self.tag = "URI"
self.name = Grantee.LOG_DELIVERY_URI
self.permission = permission
class ACL(object):
EMPTY_ACL = b"<AccessControlPolicy><Owner><ID></ID></Owner><AccessControlList></AccessControlList></AccessControlPolicy>"
def __init__(self, xml = None):
if not xml:
xml = ACL.EMPTY_ACL
self.grantees = []
self.owner_id = ""
self.owner_nick = ""
tree = getTreeFromXml(encode_to_s3(xml))
self.parseOwner(tree)
self.parseGrants(tree)
def parseOwner(self, tree):
self.owner_id = tree.findtext(".//Owner//ID")
self.owner_nick = tree.findtext(".//Owner//DisplayName")
def parseGrants(self, tree):
for grant in tree.findall(".//Grant"):
grantee = Grantee()
g = grant.find(".//Grantee")
grantee.xsi_type = g.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
grantee.permission = grant.find('Permission').text
for el in g:
if el.tag == "DisplayName":
grantee.display_name = el.text
else:
grantee.tag = el.tag
grantee.name = el.text
self.grantees.append(grantee)
def getGrantList(self):
acl = []
for grantee in self.grantees:
if grantee.display_name:
user = grantee.display_name
elif grantee.isAllUsers():
user = "*anon*"
else:
user = grantee.name
acl.append({'grantee': user, 'permission': grantee.permission})
return acl
def getOwner(self):
return { 'id' : self.owner_id, 'nick' : self.owner_nick }
def isAnonRead(self):
for grantee in self.grantees:
if grantee.isAnonRead():
return True
return False
def isAnonWrite(self):
for grantee in self.grantees:
if grantee.isAnonWrite():
return True
return False
def grantAnonRead(self):
if not self.isAnonRead():
self.appendGrantee(GranteeAnonRead())
def revokeAnonRead(self):
self.grantees = [g for g in self.grantees if not g.isAnonRead()]
def revokeAnonWrite(self):
self.grantees = [g for g in self.grantees if not g.isAnonWrite()]
def appendGrantee(self, grantee):
self.grantees.append(grantee)
def hasGrant(self, name, permission):
name = name.lower()
permission = permission.upper()
for grantee in self.grantees:
if grantee.name.lower() == name:
if grantee.permission == "FULL_CONTROL":
return True
elif grantee.permission.upper() == permission:
return True
return False
def grant(self, name, permission):
if self.hasGrant(name, permission):
return
permission = permission.upper()
if "ALL" == permission:
permission = "FULL_CONTROL"
if "FULL_CONTROL" == permission:
self.revoke(name, "ALL")
grantee = Grantee()
grantee.name = name
grantee.permission = permission
if '@' in name:
grantee.name = grantee.name.lower()
grantee.xsi_type = "AmazonCustomerByEmail"
grantee.tag = "EmailAddress"
elif 'http://acs.amazonaws.com/groups/' in name:
grantee.xsi_type = "Group"
grantee.tag = "URI"
else:
grantee.name = grantee.name.lower()
grantee.xsi_type = "CanonicalUser"
grantee.tag = "ID"
self.appendGrantee(grantee)
def revoke(self, name, permission):
name = name.lower()
permission = permission.upper()
if "ALL" == permission:
self.grantees = [g for g in self.grantees if not (g.name.lower() == name or (g.display_name is not None and g.display_name.lower() == name))]
else:
self.grantees = [g for g in self.grantees if not (((g.display_name is not None and g.display_name.lower() == name) or g.name.lower() == name)
and g.permission.upper() == permission)]
def get_printable_tree(self):
tree = getTreeFromXml(ACL.EMPTY_ACL)
tree.attrib['xmlns'] = "http://s3.amazonaws.com/doc/2006-03-01/"
owner = tree.find(".//Owner//ID")
owner.text = self.owner_id
acl = tree.find(".//AccessControlList")
for grantee in self.grantees:
acl.append(grantee.getElement())
return tree
def __unicode__(self):
return decode_from_s3(ET.tostring(self.get_printable_tree()))
def __str__(self):
if PY3:
# Return unicode
return ET.tostring(self.get_printable_tree(), encoding="unicode")
else:
# Return bytes
return ET.tostring(self.get_printable_tree())
if __name__ == "__main__":
xml = b"""<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>12345678901234567890</ID>
<DisplayName>owner-nickname</DisplayName>
</Owner>
<AccessControlList>
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">
<ID>12345678901234567890</ID>
<DisplayName>owner-nickname</DisplayName>
</Grantee>
<Permission>FULL_CONTROL</Permission>
</Grant>
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group">
<URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>
</Grantee>
<Permission>READ</Permission>
</Grant>
</AccessControlList>
</AccessControlPolicy>
"""
acl = ACL(xml)
print("Grants:", acl.getGrantList())
acl.revokeAnonRead()
print("Grants:", acl.getGrantList())
acl.grantAnonRead()
print("Grants:", acl.getGrantList())
print(acl)
# vim:et:ts=4:sts=4:ai
| 8,601 | Python | .py | 218 | 30.738532 | 153 | 0.589661 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,398 | CloudFront.py | s3tools_s3cmd/S3/CloudFront.py | # -*- coding: utf-8 -*-
## --------------------------------------------------------------------
## Amazon CloudFront support
##
## Authors : Michal Ludvig <michal@logix.cz> (https://www.logix.cz/michal)
## Florent Viard <florent@sodria.com> (https://www.sodria.com)
## Copyright : TGRMN Software, Sodria SAS and contributors
## License : GPL Version 2
## Website : https://s3tools.org
## --------------------------------------------------------------------
from __future__ import absolute_import
import sys
import time
import random
from collections import defaultdict
from datetime import datetime
from logging import debug, info, warning, error
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
from .S3 import S3
from .Config import Config
from .Exceptions import CloudFrontError, ParameterError
from .ExitCodes import EX_OK, EX_GENERAL, EX_PARTIAL
from .BaseUtils import (getTreeFromXml, appendXmlTextNode, getDictFromTree,
dateS3toPython, encode_to_s3, decode_from_s3)
from .Utils import (getBucketFromHostname, getHostnameFromBucket, deunicodise, convertHeaderTupleListToDict)
from .Crypto import sign_string_v2
from .S3Uri import S3Uri, S3UriS3
from .ConnMan import ConnMan
from .SortedDict import SortedDict
PY3 = (sys.version_info >= (3, 0))
cloudfront_api_version = "2010-11-01"
cloudfront_resource = "/%(api_ver)s/distribution" % { 'api_ver' : cloudfront_api_version }
def output(message):
sys.stdout.write(message + "\n")
def pretty_output(label, message):
#label = ("%s " % label).ljust(20, ".")
label = ("%s:" % label).ljust(15)
output("%s %s" % (label, message))
class DistributionSummary(object):
## Example:
##
## <DistributionSummary>
## <Id>1234567890ABC</Id>
## <Status>Deployed</Status>
## <LastModifiedTime>2009-01-16T11:49:02.189Z</LastModifiedTime>
## <DomainName>blahblahblah.cloudfront.net</DomainName>
## <S3Origin>
## <DNSName>example.bucket.s3.amazonaws.com</DNSName>
## </S3Origin>
## <CNAME>cdn.example.com</CNAME>
## <CNAME>img.example.com</CNAME>
## <Comment>What Ever</Comment>
## <Enabled>true</Enabled>
## </DistributionSummary>
def __init__(self, tree):
if tree.tag != "DistributionSummary":
raise ValueError("Expected <DistributionSummary /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
self.info['Enabled'] = (self.info['Enabled'].lower() == "true")
if "CNAME" in self.info and type(self.info['CNAME']) != list:
self.info['CNAME'] = [self.info['CNAME']]
def uri(self):
return S3Uri(u"cf://%s" % self.info['Id'])
class DistributionList(object):
## Example:
##
## <DistributionList xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">
## <Marker />
## <MaxItems>100</MaxItems>
## <IsTruncated>false</IsTruncated>
## <DistributionSummary>
## ... handled by DistributionSummary() class ...
## </DistributionSummary>
## </DistributionList>
def __init__(self, xml):
tree = getTreeFromXml(xml)
if tree.tag != "DistributionList":
raise ValueError("Expected <DistributionList /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
## Normalise some items
self.info['IsTruncated'] = (self.info['IsTruncated'].lower() == "true")
self.dist_summs = []
for dist_summ in tree.findall(".//DistributionSummary"):
self.dist_summs.append(DistributionSummary(dist_summ))
class Distribution(object):
## Example:
##
## <Distribution xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">
## <Id>1234567890ABC</Id>
## <Status>InProgress</Status>
## <LastModifiedTime>2009-01-16T13:07:11.319Z</LastModifiedTime>
## <DomainName>blahblahblah.cloudfront.net</DomainName>
## <DistributionConfig>
## ... handled by DistributionConfig() class ...
## </DistributionConfig>
## </Distribution>
def __init__(self, xml):
tree = getTreeFromXml(xml)
if tree.tag != "Distribution":
raise ValueError("Expected <Distribution /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
## Normalise some items
self.info['LastModifiedTime'] = dateS3toPython(self.info['LastModifiedTime'])
self.info['DistributionConfig'] = DistributionConfig(tree = tree.find(".//DistributionConfig"))
def uri(self):
return S3Uri(u"cf://%s" % self.info['Id'])
class DistributionConfig(object):
## Example:
##
## <DistributionConfig>
## <Origin>somebucket.s3.amazonaws.com</Origin>
## <CallerReference>s3://somebucket/</CallerReference>
## <Comment>http://somebucket.s3.amazonaws.com/</Comment>
## <Enabled>true</Enabled>
## <Logging>
## <Bucket>bu.ck.et</Bucket>
## <Prefix>/cf-somebucket/</Prefix>
## </Logging>
## </DistributionConfig>
EMPTY_CONFIG = "<DistributionConfig><S3Origin><DNSName/></S3Origin><CallerReference/><Enabled>true</Enabled></DistributionConfig>"
xmlns = "http://cloudfront.amazonaws.com/doc/%(api_ver)s/" % { 'api_ver' : cloudfront_api_version }
def __init__(self, xml = None, tree = None):
if xml is None:
xml = DistributionConfig.EMPTY_CONFIG
if tree is None:
tree = getTreeFromXml(xml)
if tree.tag != "DistributionConfig":
raise ValueError("Expected <DistributionConfig /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
self.info['Enabled'] = (self.info['Enabled'].lower() == "true")
if "CNAME" not in self.info:
self.info['CNAME'] = []
if type(self.info['CNAME']) != list:
self.info['CNAME'] = [self.info['CNAME']]
self.info['CNAME'] = [cname.lower() for cname in self.info['CNAME']]
if "Comment" not in self.info:
self.info['Comment'] = ""
if "DefaultRootObject" not in self.info:
self.info['DefaultRootObject'] = ""
## Figure out logging - complex node not parsed by getDictFromTree()
logging_nodes = tree.findall(".//Logging")
if logging_nodes:
logging_dict = getDictFromTree(logging_nodes[0])
logging_dict['Bucket'], success = getBucketFromHostname(logging_dict['Bucket'])
if not success:
warning("Logging to unparsable bucket name: %s" % logging_dict['Bucket'])
self.info['Logging'] = S3UriS3(u"s3://%(Bucket)s/%(Prefix)s" % logging_dict)
else:
self.info['Logging'] = None
def get_printable_tree(self):
tree = ET.Element("DistributionConfig")
tree.attrib['xmlns'] = DistributionConfig.xmlns
## Retain the order of the following calls!
s3org = appendXmlTextNode("S3Origin", '', tree)
appendXmlTextNode("DNSName", self.info['S3Origin']['DNSName'], s3org)
appendXmlTextNode("CallerReference", self.info['CallerReference'], tree)
for cname in self.info['CNAME']:
appendXmlTextNode("CNAME", cname.lower(), tree)
if self.info['Comment']:
appendXmlTextNode("Comment", self.info['Comment'], tree)
appendXmlTextNode("Enabled", str(self.info['Enabled']).lower(), tree)
# don't create a empty DefaultRootObject element as it would result in a MalformedXML error
if str(self.info['DefaultRootObject']):
appendXmlTextNode("DefaultRootObject", str(self.info['DefaultRootObject']), tree)
if self.info['Logging']:
logging_el = ET.Element("Logging")
appendXmlTextNode("Bucket", getHostnameFromBucket(self.info['Logging'].bucket()), logging_el)
appendXmlTextNode("Prefix", self.info['Logging'].object(), logging_el)
tree.append(logging_el)
return tree
def __unicode__(self):
return decode_from_s3(ET.tostring(self.get_printable_tree()))
def __str__(self):
if PY3:
# Return unicode
return ET.tostring(self.get_printable_tree(), encoding="unicode")
else:
# Return bytes
return ET.tostring(self.get_printable_tree())
class Invalidation(object):
## Example:
##
## <Invalidation xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
## <Id>id</Id>
## <Status>status</Status>
## <CreateTime>date</CreateTime>
## <InvalidationBatch>
## <Path>/image1.jpg</Path>
## <Path>/image2.jpg</Path>
## <Path>/videos/movie.flv</Path>
## <CallerReference>my-batch</CallerReference>
## </InvalidationBatch>
## </Invalidation>
def __init__(self, xml):
tree = getTreeFromXml(xml)
if tree.tag != "Invalidation":
raise ValueError("Expected <Invalidation /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
def __str__(self):
return str(self.info)
class InvalidationList(object):
## Example:
##
## <InvalidationList>
## <Marker/>
## <NextMarker>Invalidation ID</NextMarker>
## <MaxItems>2</MaxItems>
## <IsTruncated>true</IsTruncated>
## <InvalidationSummary>
## <Id>[Second Invalidation ID]</Id>
## <Status>Completed</Status>
## </InvalidationSummary>
## <InvalidationSummary>
## <Id>[First Invalidation ID]</Id>
## <Status>Completed</Status>
## </InvalidationSummary>
## </InvalidationList>
def __init__(self, xml):
tree = getTreeFromXml(xml)
if tree.tag != "InvalidationList":
raise ValueError("Expected <InvalidationList /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
def __str__(self):
return str(self.info)
class InvalidationBatch(object):
## Example:
##
## <InvalidationBatch>
## <Path>/image1.jpg</Path>
## <Path>/image2.jpg</Path>
## <Path>/videos/movie.flv</Path>
## <Path>/sound%20track.mp3</Path>
## <CallerReference>my-batch</CallerReference>
## </InvalidationBatch>
def __init__(self, reference = None, distribution = None, paths = []):
if reference:
self.reference = reference
else:
if not distribution:
distribution="0"
self.reference = "%s.%s.%s" % (distribution,
datetime.strftime(datetime.now(),"%Y%m%d%H%M%S"),
random.randint(1000,9999))
self.paths = []
self.add_objects(paths)
def add_objects(self, paths):
self.paths.extend(paths)
def get_reference(self):
return self.reference
def get_printable_tree(self):
tree = ET.Element("InvalidationBatch")
for path in self.paths:
if len(path) < 1 or path[0] != "/":
path = "/" + path
appendXmlTextNode("Path", path, tree)
appendXmlTextNode("CallerReference", self.reference, tree)
return tree
def __unicode__(self):
return decode_from_s3(ET.tostring(self.get_printable_tree()))
def __str__(self):
if PY3:
# Return unicode
return ET.tostring(self.get_printable_tree(), encoding="unicode")
else:
# Return bytes
return ET.tostring(self.get_printable_tree())
class CloudFront(object):
operations = {
"CreateDist" : { 'method' : "POST", 'resource' : "" },
"DeleteDist" : { 'method' : "DELETE", 'resource' : "/%(dist_id)s" },
"GetList" : { 'method' : "GET", 'resource' : "" },
"GetDistInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s" },
"GetDistConfig" : { 'method' : "GET", 'resource' : "/%(dist_id)s/config" },
"SetDistConfig" : { 'method' : "PUT", 'resource' : "/%(dist_id)s/config" },
"Invalidate" : { 'method' : "POST", 'resource' : "/%(dist_id)s/invalidation" },
"GetInvalList" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation" },
"GetInvalInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation/%(request_id)s" },
}
dist_list = None
def __init__(self, config):
self.config = config
## --------------------------------------------------
## Methods implementing CloudFront API
## --------------------------------------------------
def GetList(self):
response = self.send_request("GetList")
response['dist_list'] = DistributionList(response['data'])
if response['dist_list'].info['IsTruncated']:
raise NotImplementedError("List is truncated. Ask s3cmd author to add support.")
## TODO: handle Truncated
return response
def CreateDistribution(self, uri, cnames_add = [], comment = None, logging = None, default_root_object = None):
dist_config = DistributionConfig()
dist_config.info['Enabled'] = True
dist_config.info['S3Origin']['DNSName'] = uri.host_name()
dist_config.info['CallerReference'] = str(uri)
dist_config.info['DefaultRootObject'] = default_root_object
if comment == None:
dist_config.info['Comment'] = uri.public_url()
else:
dist_config.info['Comment'] = comment
for cname in cnames_add:
if dist_config.info['CNAME'].count(cname) == 0:
dist_config.info['CNAME'].append(cname)
if logging:
dist_config.info['Logging'] = S3UriS3(logging)
request_body = str(dist_config)
debug("CreateDistribution(): request_body: %s" % request_body)
response = self.send_request("CreateDist", body = request_body)
response['distribution'] = Distribution(response['data'])
return response
def ModifyDistribution(self, cfuri, cnames_add = [], cnames_remove = [],
comment = None, enabled = None, logging = None,
default_root_object = None):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
# Get current dist status (enabled/disabled) and Etag
info("Checking current status of %s" % cfuri)
response = self.GetDistConfig(cfuri)
dc = response['dist_config']
if enabled != None:
dc.info['Enabled'] = enabled
if comment != None:
dc.info['Comment'] = comment
if default_root_object != None:
dc.info['DefaultRootObject'] = default_root_object
for cname in cnames_add:
if dc.info['CNAME'].count(cname) == 0:
dc.info['CNAME'].append(cname)
for cname in cnames_remove:
while dc.info['CNAME'].count(cname) > 0:
dc.info['CNAME'].remove(cname)
if logging != None:
if logging == False:
dc.info['Logging'] = False
else:
dc.info['Logging'] = S3UriS3(logging)
response = self.SetDistConfig(cfuri, dc, response['headers']['etag'])
return response
def DeleteDistribution(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
# Get current dist status (enabled/disabled) and Etag
info("Checking current status of %s" % cfuri)
response = self.GetDistConfig(cfuri)
if response['dist_config'].info['Enabled']:
info("Distribution is ENABLED. Disabling first.")
response['dist_config'].info['Enabled'] = False
response = self.SetDistConfig(cfuri, response['dist_config'],
response['headers']['etag'])
warning("Waiting for Distribution to become disabled.")
warning("This may take several minutes, please wait.")
while True:
response = self.GetDistInfo(cfuri)
d = response['distribution']
if d.info['Status'] == "Deployed" and d.info['Enabled'] == False:
info("Distribution is now disabled")
break
warning("Still waiting...")
time.sleep(10)
headers = SortedDict(ignore_case = True)
headers['if-match'] = response['headers']['etag']
response = self.send_request("DeleteDist", dist_id = cfuri.dist_id(),
headers = headers)
return response
def GetDistInfo(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
response = self.send_request("GetDistInfo", dist_id = cfuri.dist_id())
response['distribution'] = Distribution(response['data'])
return response
def GetDistConfig(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
response = self.send_request("GetDistConfig", dist_id = cfuri.dist_id())
response['dist_config'] = DistributionConfig(response['data'])
return response
def SetDistConfig(self, cfuri, dist_config, etag = None):
if etag == None:
debug("SetDistConfig(): Etag not set. Fetching it first.")
etag = self.GetDistConfig(cfuri)['headers']['etag']
debug("SetDistConfig(): Etag = %s" % etag)
request_body = str(dist_config)
debug("SetDistConfig(): request_body: %s" % request_body)
headers = SortedDict(ignore_case = True)
headers['if-match'] = etag
response = self.send_request("SetDistConfig", dist_id = cfuri.dist_id(),
body = request_body, headers = headers)
return response
def InvalidateObjects(self, uri, paths, default_index_file, invalidate_default_index_on_cf, invalidate_default_index_root_on_cf):
# joseprio: if the user doesn't want to invalidate the default index
# path, or if the user wants to invalidate the root of the default
# index, we need to process those paths
if default_index_file is not None and (not invalidate_default_index_on_cf or invalidate_default_index_root_on_cf):
new_paths = []
default_index_suffix = '/' + default_index_file
for path in paths:
if path.endswith(default_index_suffix) or path == default_index_file:
if invalidate_default_index_on_cf:
new_paths.append(path)
if invalidate_default_index_root_on_cf:
new_paths.append(path[:-len(default_index_file)])
else:
new_paths.append(path)
paths = new_paths
# uri could be either cf:// or s3:// uri
cfuris = self.get_dist_name_for_bucket(uri)
if len(paths) > 999:
try:
tmp_filename = Utils.mktmpfile()
with open(deunicodise(tmp_filename), "w") as fp:
fp.write(deunicodise("\n".join(paths)+"\n"))
warning("Request to invalidate %d paths (max 999 supported)" % len(paths))
warning("All the paths are now saved in: %s" % tmp_filename)
except Exception:
pass
raise ParameterError("Too many paths to invalidate")
responses = []
for cfuri in cfuris:
invalbatch = InvalidationBatch(distribution = cfuri.dist_id(), paths = paths)
debug("InvalidateObjects(): request_body: %s" % invalbatch)
response = self.send_request("Invalidate", dist_id = cfuri.dist_id(),
body = str(invalbatch))
response['dist_id'] = cfuri.dist_id()
if response['status'] == 201:
inval_info = Invalidation(response['data']).info
response['request_id'] = inval_info['Id']
debug("InvalidateObjects(): response: %s" % response)
responses.append(response)
return responses
def GetInvalList(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
response = self.send_request("GetInvalList", dist_id = cfuri.dist_id())
response['inval_list'] = InvalidationList(response['data'])
return response
def GetInvalInfo(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
if cfuri.request_id() is None:
raise ValueError("Expected CFUri with Request ID")
response = self.send_request("GetInvalInfo", dist_id = cfuri.dist_id(), request_id = cfuri.request_id())
response['inval_status'] = Invalidation(response['data'])
return response
## --------------------------------------------------
## Low-level methods for handling CloudFront requests
## --------------------------------------------------
def send_request(self, op_name, dist_id = None, request_id = None, body = None, headers = None, retries = None):
if retries is None:
retries = self.config.max_retries
if headers is None:
headers = SortedDict(ignore_case = True)
operation = self.operations[op_name]
if body:
headers['content-type'] = 'text/plain'
request = self.create_request(operation, dist_id, request_id, headers)
conn = self.get_connection()
debug("send_request(): %s %s" % (request['method'], request['resource']))
conn.c.request(request['method'], request['resource'], body, request['headers'])
http_response = conn.c.getresponse()
response = {}
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = convertHeaderTupleListToDict(http_response.getheaders())
response["data"] = http_response.read()
ConnMan.put(conn)
debug("CloudFront: response: %r" % response)
if response["status"] >= 500:
e = CloudFrontError(response)
if retries:
warning(u"Retrying failed request: %s (%s)" % (op_name, e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_request(op_name, dist_id, body = body, retries = retries - 1)
else:
raise e
if response["status"] < 200 or response["status"] > 299:
raise CloudFrontError(response)
return response
def create_request(self, operation, dist_id = None, request_id = None, headers = None):
resource = cloudfront_resource + (
operation['resource'] % { 'dist_id' : dist_id, 'request_id' : request_id })
if not headers:
headers = SortedDict(ignore_case = True)
if "date" in headers:
if "x-amz-date" not in headers:
headers["x-amz-date"] = headers["date"]
del(headers["date"])
if "x-amz-date" not in headers:
headers["x-amz-date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
if len(self.config.access_token)>0:
self.config.role_refresh()
headers['x-amz-security-token']=self.config.access_token
signature = self.sign_request(headers)
headers["Authorization"] = "AWS "+self.config.access_key+":"+signature
request = {}
request['resource'] = resource
request['headers'] = headers
request['method'] = operation['method']
return request
def sign_request(self, headers):
string_to_sign = headers['x-amz-date']
signature = decode_from_s3(sign_string_v2(encode_to_s3(string_to_sign)))
debug(u"CloudFront.sign_request('%s') = %s" % (string_to_sign, signature))
return signature
def get_connection(self):
conn = ConnMan.get(self.config.cloudfront_host, ssl = True)
return conn
def _fail_wait(self, retries):
# Wait a few seconds. The more it fails the more we wait.
return (self.config.max_retries - retries + 1) * 3
def get_dist_name_for_bucket(self, uri):
if uri.type == "cf":
return [uri]
if uri.type != "s3":
raise ParameterError("CloudFront or S3 URI required instead of: %s" % uri)
debug("_get_dist_name_for_bucket(%r)" % uri)
if CloudFront.dist_list is None:
response = self.GetList()
CloudFront.dist_list = {}
for d in response['dist_list'].dist_summs:
distListIndex = ""
if "S3Origin" in d.info:
distListIndex = getBucketFromHostname(d.info['S3Origin']['DNSName'])[0]
elif "CustomOrigin" in d.info:
# Aral: This used to skip over distributions with CustomOrigin, however, we mustn't
# do this since S3 buckets that are set up as websites use custom origins.
# Thankfully, the custom origin URLs they use start with the URL of the
# S3 bucket. Here, we make use this naming convention to support this use case.
distListIndex = getBucketFromHostname(d.info['CustomOrigin']['DNSName'])[0]
distListIndex = distListIndex[:len(uri.bucket())]
else:
# Aral: I'm not sure when this condition will be reached, but keeping it in there.
continue
if CloudFront.dist_list.get(distListIndex, None) is None:
CloudFront.dist_list[distListIndex] = set()
CloudFront.dist_list[distListIndex].add(d.uri())
debug("dist_list: %s" % CloudFront.dist_list)
try:
return CloudFront.dist_list[uri.bucket()]
except Exception as e:
debug(e)
raise ParameterError("Unable to translate S3 URI to CloudFront distribution name: %s" % uri)
class Cmd(object):
"""
Class that implements CloudFront commands
"""
class Options(object):
cf_cnames_add = []
cf_cnames_remove = []
cf_comment = None
cf_enable = None
cf_logging = None
cf_default_root_object = None
def option_list(self):
return [opt for opt in dir(self) if opt.startswith("cf_")]
def update_option(self, option, value):
setattr(Cmd.options, option, value)
options = Options()
@staticmethod
def _parse_args(args):
cf = CloudFront(Config())
cfuris = []
for arg in args:
uris = cf.get_dist_name_for_bucket(S3Uri(arg))
cfuris.extend(uris)
return cfuris
@staticmethod
def info(args):
cf = CloudFront(Config())
if not args:
response = cf.GetList()
for d in response['dist_list'].dist_summs:
if "S3Origin" in d.info:
origin = S3UriS3.httpurl_to_s3uri(d.info['S3Origin']['DNSName'])
elif "CustomOrigin" in d.info:
origin = "http://%s/" % d.info['CustomOrigin']['DNSName']
else:
origin = "<unknown>"
pretty_output("Origin", origin)
pretty_output("DistId", d.uri())
pretty_output("DomainName", d.info['DomainName'])
if "CNAME" in d.info:
pretty_output("CNAMEs", ", ".join(d.info['CNAME']))
pretty_output("Status", d.info['Status'])
pretty_output("Enabled", d.info['Enabled'])
output("")
else:
cfuris = Cmd._parse_args(args)
for cfuri in cfuris:
response = cf.GetDistInfo(cfuri)
d = response['distribution']
dc = d.info['DistributionConfig']
if "S3Origin" in dc.info:
origin = S3UriS3.httpurl_to_s3uri(dc.info['S3Origin']['DNSName'])
elif "CustomOrigin" in dc.info:
origin = "http://%s/" % dc.info['CustomOrigin']['DNSName']
else:
origin = "<unknown>"
pretty_output("Origin", origin)
pretty_output("DistId", d.uri())
pretty_output("DomainName", d.info['DomainName'])
if "CNAME" in dc.info:
pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
pretty_output("Status", d.info['Status'])
pretty_output("Comment", dc.info['Comment'])
pretty_output("Enabled", dc.info['Enabled'])
pretty_output("DfltRootObject", dc.info['DefaultRootObject'])
pretty_output("Logging", dc.info['Logging'] or "Disabled")
pretty_output("Etag", response['headers']['etag'])
@staticmethod
def create(args):
cf = CloudFront(Config())
buckets = []
for arg in args:
uri = S3Uri(arg)
if uri.type != "s3":
raise ParameterError("Distribution can only be created from a s3:// URI instead of: %s" % arg)
if uri.object():
raise ParameterError("Use s3:// URI with a bucket name only instead of: %s" % arg)
if not uri.is_dns_compatible():
raise ParameterError("CloudFront can only handle lowercase-named buckets.")
buckets.append(uri)
if not buckets:
raise ParameterError("No valid bucket names found")
for uri in buckets:
info("Creating distribution from: %s" % uri)
response = cf.CreateDistribution(uri, cnames_add = Cmd.options.cf_cnames_add,
comment = Cmd.options.cf_comment,
logging = Cmd.options.cf_logging,
default_root_object = Cmd.options.cf_default_root_object)
d = response['distribution']
dc = d.info['DistributionConfig']
output("Distribution created:")
pretty_output("Origin", S3UriS3.httpurl_to_s3uri(dc.info['S3Origin']['DNSName']))
pretty_output("DistId", d.uri())
pretty_output("DomainName", d.info['DomainName'])
pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
pretty_output("Comment", dc.info['Comment'])
pretty_output("Status", d.info['Status'])
pretty_output("Enabled", dc.info['Enabled'])
pretty_output("DefaultRootObject", dc.info['DefaultRootObject'])
pretty_output("Etag", response['headers']['etag'])
@staticmethod
def delete(args):
cf = CloudFront(Config())
cfuris = Cmd._parse_args(args)
for cfuri in cfuris:
response = cf.DeleteDistribution(cfuri)
if response['status'] >= 400:
error("Distribution %s could not be deleted: %s" % (cfuri, response['reason']))
output("Distribution %s deleted" % cfuri)
@staticmethod
def modify(args):
cf = CloudFront(Config())
if len(args) > 1:
raise ParameterError("Too many parameters. Modify one Distribution at a time.")
try:
cfuri = Cmd._parse_args(args)[0]
except IndexError:
raise ParameterError("No valid Distribution URI found.")
response = cf.ModifyDistribution(cfuri,
cnames_add = Cmd.options.cf_cnames_add,
cnames_remove = Cmd.options.cf_cnames_remove,
comment = Cmd.options.cf_comment,
enabled = Cmd.options.cf_enable,
logging = Cmd.options.cf_logging,
default_root_object = Cmd.options.cf_default_root_object)
if response['status'] >= 400:
error("Distribution %s could not be modified: %s" % (cfuri, response['reason']))
output("Distribution modified: %s" % cfuri)
response = cf.GetDistInfo(cfuri)
d = response['distribution']
dc = d.info['DistributionConfig']
pretty_output("Origin", S3UriS3.httpurl_to_s3uri(dc.info['S3Origin']['DNSName']))
pretty_output("DistId", d.uri())
pretty_output("DomainName", d.info['DomainName'])
pretty_output("Status", d.info['Status'])
pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
pretty_output("Comment", dc.info['Comment'])
pretty_output("Enabled", dc.info['Enabled'])
pretty_output("DefaultRootObject", dc.info['DefaultRootObject'])
pretty_output("Etag", response['headers']['etag'])
@staticmethod
def invalinfo(args):
cf = CloudFront(Config())
cfuris = Cmd._parse_args(args)
requests = []
for cfuri in cfuris:
if cfuri.request_id():
requests.append(str(cfuri))
else:
inval_list = cf.GetInvalList(cfuri)
try:
for i in inval_list['inval_list'].info['InvalidationSummary']:
requests.append("/".join(["cf:/", cfuri.dist_id(), i["Id"]]))
except Exception:
continue
for req in requests:
cfuri = S3Uri(req)
inval_info = cf.GetInvalInfo(cfuri)
st = inval_info['inval_status'].info
paths = st['InvalidationBatch']['Path']
nr_of_paths = len(paths) if isinstance(paths, list) else 1
pretty_output("URI", str(cfuri))
pretty_output("Status", st['Status'])
pretty_output("Created", st['CreateTime'])
pretty_output("Nr of paths", nr_of_paths)
pretty_output("Reference", st['InvalidationBatch']['CallerReference'])
output("")
@staticmethod
def invalidate(args):
cfg = Config()
cf = CloudFront(cfg)
s3 = S3(cfg)
bucket_paths = defaultdict(list)
for arg in args:
uri = S3Uri(arg)
uobject = uri.object()
if not uobject:
# If object is not defined, we want to invalidate the whole bucket
uobject = '*'
elif uobject[-1] == '/':
# If object is folder (ie prefix), we want to invalidate the whole content
uobject += '*'
bucket_paths[uri.bucket()].append(uobject)
ret = EX_OK
params = []
for bucket, paths in bucket_paths.items():
base_uri = S3Uri(u's3://%s' % bucket)
cfuri = next(iter(cf.get_dist_name_for_bucket(base_uri)))
default_index_file = None
if cfg.invalidate_default_index_on_cf or cfg.invalidate_default_index_root_on_cf:
info_response = s3.website_info(base_uri, cfg.bucket_location)
if info_response:
default_index_file = info_response['index_document']
if not default_index_file:
default_index_file = None
if cfg.dry_run:
fulluri_paths = [S3UriS3.compose_uri(bucket, path) for path in paths]
output(u"[--dry-run] Would invalidate %r" % fulluri_paths)
continue
params.append((bucket, paths, base_uri, cfuri, default_index_file))
if cfg.dry_run:
warning(u"Exiting now because of --dry-run")
return EX_OK
nb_success = 0
first = True
for bucket, paths, base_uri, cfuri, default_index_file in params:
if not first:
output("")
else:
first = False
results = cf.InvalidateObjects(
cfuri, paths, default_index_file,
cfg.invalidate_default_index_on_cf, cfg.invalidate_default_index_root_on_cf
)
dist_id = cfuri.dist_id()
pretty_output("URI", str(base_uri))
pretty_output("DistId", dist_id)
pretty_output("Nr of paths", len(paths))
for result in results:
result_code = result['status']
if result_code != 201:
pretty_output("Status", "Failed: %d" % result_code)
ret = EX_GENERAL
continue
request_id = result['request_id']
nb_success += 1
pretty_output("Status", "Created")
pretty_output("RequestId", request_id)
pretty_output("Info", u"Check progress with: s3cmd cfinvalinfo %s/%s"
% (dist_id, request_id))
if ret != EX_OK and cfg.stop_on_error:
error(u"Exiting now because of --stop-on-error")
break
if ret != EX_OK and nb_success:
ret = EX_PARTIAL
return ret
# vim:et:ts=4:sts=4:ai
| 37,587 | Python | .py | 795 | 36.396226 | 134 | 0.575226 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |
9,399 | Custom_httplib3x.py | s3tools_s3cmd/S3/Custom_httplib3x.py | from __future__ import absolute_import, print_function
import os
import sys
import http.client as httplib
from http.client import (_CS_REQ_SENT, _CS_REQ_STARTED, CONTINUE, UnknownProtocol,
CannotSendHeader, NO_CONTENT, NOT_MODIFIED, EXPECTATION_FAILED,
HTTPMessage, HTTPException)
from io import StringIO
from .BaseUtils import encode_to_s3
_METHODS_EXPECTING_BODY = ['PATCH', 'POST', 'PUT']
# Fixed python 2.X httplib to be able to support
# Expect: 100-Continue http feature
# Inspired by:
# http://bugs.python.org/file26357/issue1346874-273.patch
def _encode(data, name='data'):
"""Call data.encode("latin-1") but show a better error message."""
try:
return data.encode("latin-1")
except UnicodeEncodeError as err:
# The following is equivalent to raise Exception() from None
# but is still byte-compilable compatible with python 2.
exc = UnicodeEncodeError(
err.encoding,
err.object,
err.start,
err.end,
"%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
"if you want to send it encoded in UTF-8." %
(name.title(), data[err.start:err.end], name))
exc.__cause__ = None
raise exc
def httpresponse_patched_begin(self):
""" Re-implemented httplib begin function
to not loop over "100 CONTINUE" status replies
but to report it to higher level so it can be processed.
"""
if self.headers is not None:
# we've already started reading the response
return
# read only one status even if we get a non-100 response
version, status, reason = self._read_status()
self.code = self.status = status
self.reason = reason.strip()
if version in ('HTTP/1.0', 'HTTP/0.9'):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = httplib.parse_headers(self.fp)
if self.debuglevel > 0:
for hdr in self.headers:
print("header:", hdr, end=" ")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
# No need to override httplib with this one, as it is only used by send_request
def httpconnection_patched_get_content_length(body, method):
"""## REIMPLEMENTED because new in last httplib but needed by send_request"""
"""Get the content-length based on the body.
If the body is None, we set Content-Length: 0 for methods that expect
a body (RFC 7230, Section 3.3.2). We also set the Content-Length for
any method if the body is a str or bytes-like object and not a file.
"""
if body is None:
# do an explicit check for not None here to distinguish
# between unset and set but empty
if method.upper() in _METHODS_EXPECTING_BODY:
return 0
else:
return None
if hasattr(body, 'read'):
# file-like object.
return None
try:
# does it implement the buffer protocol (bytes, bytearray, array)?
mv = memoryview(body)
return mv.nbytes
except TypeError:
pass
if isinstance(body, str):
return len(body)
return None
def httpconnection_patched_send_request(self, method, url, body, headers,
encode_chunked=False):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
expect_continue = False
for hdr, value in headers.items():
if 'expect' == hdr.lower() and '100-continue' in value.lower():
expect_continue = True
self.putrequest(method, url, **skips)
# chunked encoding will happen if HTTP/1.1 is used and either
# the caller passes encode_chunked=True or the following
# conditions hold:
# 1. content-length has not been explicitly set
# 2. the body is a file or iterable, but not a str or bytes-like
# 3. Transfer-Encoding has NOT been explicitly set by the caller
if 'content-length' not in header_names:
# only chunk body if not explicitly set for backwards
# compatibility, assuming the client code is already handling the
# chunking
if 'transfer-encoding' not in header_names:
# if content-length cannot be automatically determined, fall
# back to chunked encoding
encode_chunked = False
content_length = httpconnection_patched_get_content_length(body, method)
if content_length is None:
if body is not None:
if self.debuglevel > 0:
print('Unable to determine size of %r' % body)
encode_chunked = True
self.putheader('Transfer-Encoding', 'chunked')
else:
self.putheader('Content-Length', str(content_length))
else:
encode_chunked = False
for hdr, value in headers.items():
self.putheader(encode_to_s3(hdr), encode_to_s3(value))
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = _encode(body, 'body')
# If an Expect: 100-continue was sent, we need to check for a 417
# Expectation Failed to avoid unnecessarily sending the body
# See RFC 2616 8.2.3
if not expect_continue:
self.endheaders(body, encode_chunked=encode_chunked)
else:
if not body:
raise HTTPException("A body is required when expecting "
"100-continue")
self.endheaders()
resp = self.getresponse()
resp.read()
self._HTTPConnection__state = _CS_REQ_SENT
if resp.status == EXPECTATION_FAILED:
raise ExpectationFailed()
elif resp.status == CONTINUE:
self.wrapper_send_body(body, encode_chunked)
def httpconnection_patched_endheaders(self, message_body=None, encode_chunked=False):
"""REIMPLEMENTED because new argument encode_chunked added after py 3.4"""
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request.
"""
if self._HTTPConnection__state == _CS_REQ_STARTED:
self._HTTPConnection__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body, encode_chunked=encode_chunked)
def httpconnection_patched_read_readable(self, readable):
"""REIMPLEMENTED because needed by send_output and added after py 3.4
"""
blocksize = 8192
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(readable)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while True:
datablock = readable.read(blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
yield datablock
def httpconnection_patched_send_output(self, message_body=None,
encode_chunked=False):
"""REIMPLEMENTED because needed by endheaders and parameter
encode_chunked was added"""
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
if message_body is not None:
self.wrapper_send_body(message_body, encode_chunked)
class ExpectationFailed(HTTPException):
pass
# Wrappers #
def httpconnection_patched_wrapper_send_body(self, message_body, encode_chunked=False):
# create a consistent interface to message_body
if hasattr(message_body, 'read'):
# Let file-like take precedence over byte-like. This
# is needed to allow the current position of mmap'ed
# files to be taken into account.
chunks = self._read_readable(message_body)
else:
try:
# this is solely to check to see if message_body
# implements the buffer API. it /would/ be easier
# to capture if PyObject_CheckBuffer was exposed
# to Python.
memoryview(message_body)
except TypeError:
try:
chunks = iter(message_body)
except TypeError:
raise TypeError("message_body should be a bytes-like "
"object or an iterable, got %r"
% type(message_body))
else:
# the object implements the buffer interface and
# can be passed directly into socket methods
chunks = (message_body,)
for chunk in chunks:
if not chunk:
if self.debuglevel > 0:
print('Zero length chunk ignored')
continue
if encode_chunked and self._http_vsn == 11:
# chunked encoding
chunk = '{:X}\r\n'.format(len(chunk)).encode('ascii') + chunk \
+ b'\r\n'
self.send(chunk)
if encode_chunked and self._http_vsn == 11:
# end chunked transfer
self.send(b'0\r\n\r\n')
httplib.HTTPResponse.begin = httpresponse_patched_begin
httplib.HTTPConnection.endheaders = httpconnection_patched_endheaders
httplib.HTTPConnection._send_readable = httpconnection_patched_read_readable
httplib.HTTPConnection._send_output = httpconnection_patched_send_output
httplib.HTTPConnection._send_request = httpconnection_patched_send_request
# Interfaces added to httplib.HTTPConnection:
httplib.HTTPConnection.wrapper_send_body = httpconnection_patched_wrapper_send_body
| 11,505 | Python | .py | 268 | 34.589552 | 87 | 0.643783 | s3tools/s3cmd | 4,533 | 903 | 301 | GPL-2.0 | 9/5/2024, 5:10:46 PM (Europe/Amsterdam) |