index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
78,260 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/webserviceutils.py | # coding: utf-8
"""
Webservice
"""
from typing import List
import json
import requests
from synchromoodle.config import WebServiceConfig
class WebService:
"""
Couche d'accès au webservice Moodle.
"""
def __init__(self, config: WebServiceConfig):
self.config = config
self.url = "%s/webservice/rest/server.php" % config.moodle_host
def delete_users(self, userids: List[int]):
"""
Supprime des utilisateurs via le webservice moodle
:param userids:
:return:
"""
i = 0
users_to_delete = {}
for userid in userids:
users_to_delete["userids[%d]" % i] = userid
i += 1
res = requests.get(url=self.url,
params={
'wstoken': self.config.token,
'moodlewsrestformat': "json",
'wsfunction': "core_user_delete_users",
**users_to_delete
})
json_data = json.loads(res.text)
if json_data is not None and 'exception' in json_data:
raise Exception(json_data['message'])
return json_data
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,261 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
install_requires = ['mysql-connector-python',
'ldap3',
'ruamel.yaml',
'requests']
setup_requires = ['pytest-runner']
dev_require = ['pylint',
'tox']
tests_require = ['pytest>=4',
'pytest-docker',
'sqlparse',
'cachetools']
entry_points = {
'console_scripts': [
'synchromoodle = synchromoodle.__main__:main'
],
}
dependency_links = ['https://github.com/Toilal/pytest-docker/tarball/master#egg=pytest-docker']
with open('synchromoodle/__version__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]$', f.read(), re.MULTILINE).group(1)
args = dict(name='synchromoodle',
version=version,
description='Scripts de synchronisation Moodle.',
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(),
dependency_links=dependency_links,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
entry_points=entry_points,
test_suite='test',
zip_safe=True,
extras_require={
'test': tests_require,
'dev': dev_require
})
setup(**args)
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,262 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /test/conftest.py | # coding: utf-8
import datetime
import json
import os
import time
import pytest
from synchromoodle.config import Config, ActionConfig
from synchromoodle.dbutils import Database
from synchromoodle.ldaputils import Ldap
pytest_plugins = ["docker"]
@pytest.fixture(scope='session')
def docker_compose_file(pytestconfig):
return os.path.join(str(pytestconfig.rootdir), 'docker-compose.pytest.yml')
@pytest.fixture(scope='session')
def docker_compose_subprocess_kwargs():
return {}
@pytest.fixture(scope="session")
def action_config():
action_config = ActionConfig()
return action_config
@pytest.fixture(scope="session")
def config(action_config: ActionConfig):
config = Config()
config.actions.append(action_config)
return config
@pytest.fixture(scope="session", name="docker_config")
def docker_config(config, docker_ip, docker_services):
"""
Configure l'application pour se connecter au container de test.
S'assure également que les containers sont disponibles.
:param config:
:param docker_ip:
:param docker_services:
:return:
"""
docker_config = Config()
docker_config.update(**json.loads(json.dumps(config, default=lambda o: getattr(o, '__dict__', str(o)))))
docker_config.database.host = docker_ip
now = datetime.datetime.now()
timeout = 60
while True:
docker_config.ldap.uri = "ldap://%s:%s" % (docker_ip, docker_services.port_for('ldap-test', 389))
# Ensure ldap is available
ldap = Ldap(docker_config.ldap)
try:
ldap.connect()
except Exception as e:
time.sleep(1)
if (datetime.datetime.now() - now).seconds > timeout:
raise e
continue
ldap.disconnect()
break
# Ensure database is available
while True:
docker_config.database.port = docker_services.port_for('moodle-db-test', 3306)
db = Database(docker_config.database, docker_config.constantes)
try:
db.connect()
except Exception as e:
time.sleep(1)
docker_config.database.host = docker_ip
docker_config.database.port = docker_services.port_for('moodle-db-test', 3306)
if (datetime.datetime.now() - now).seconds > timeout:
raise e
continue
db.disconnect()
break
return docker_config
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,263 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /test/__init__.py | # coding: utf-8
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,264 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/__main__.py | #!/usr/bin/env python3
# coding: utf-8
"""
Entrypoint
"""
from logging import getLogger, basicConfig
from logging.config import dictConfig
from synchromoodle import actions
from synchromoodle.arguments import parse_args
from synchromoodle.config import ConfigLoader
def main():
"""
Main function
"""
arguments = parse_args()
config_loader = ConfigLoader()
config = config_loader.load(['config.yml', 'config.yaml'], True)
config = config_loader.update(config, arguments.config)
if config.logging is not False:
# pylint is not that smart with union type conditional inference
# pylint: disable=no-member,not-a-mapping,unsupported-membership-test,unsupported-assignment-operation
if isinstance(config.logging, dict):
if config.logging.pop('basic', None):
basicConfig(**config.logging)
else:
if 'version' not in config.logging:
config.logging['version'] = 1
dictConfig(config.logging)
elif isinstance(config.logging, str):
basicConfig(level=config.logging)
else:
basicConfig(level='INFO')
log = getLogger()
try:
config.validate()
except ValueError as e:
log.error(e)
exit(1)
log.info("Démarrage")
errors = 0
for action in config.actions:
try:
action_func = getattr(actions, action.type)
except AttributeError:
errors += 1
log.error("Action invalide: %s", action)
continue
log.info("Démarrage de l'action %s", action)
try:
action_func(config, action, arguments)
except Exception: # pylint: disable=broad-except
errors += 1
log.exception("Une erreur inattendue s'est produite")
log.info("Fin de l'action %s", action)
log.info("Terminé")
if errors:
exit(errors)
if __name__ == "__main__":
main()
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,265 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/actions.py | # coding: utf-8
"""
Actions
"""
from logging import getLogger
from synchromoodle.synchronizer import Synchronizer
from synchromoodle.timestamp import TimestampStore
from .arguments import DEFAULT_ARGS
from .config import Config, ActionConfig
from .dbutils import Database
from .ldaputils import Ldap
def default(config: Config, action: ActionConfig, arguments=DEFAULT_ARGS):
"""
Execute la mise à jour de la base de données Moodle à partir des informations du LDAP.
:param config: Configuration d'execution
:param action: Configuration de l'action
:param arguments: Arguments de ligne de commande
"""
log = getLogger()
db = Database(config.database, config.constantes)
ldap = Ldap(config.ldap)
try:
db.connect()
ldap.connect()
synchronizer = Synchronizer(ldap, db, config, action, arguments)
synchronizer.initialize()
timestamp_store = TimestampStore(action.timestamp_store)
log.info('Traitement des établissements')
for uai in action.etablissements.listeEtab:
etablissement_log = log.getChild('etablissement.%s' % uai)
etablissement_log.info('Traitement de l\'établissement (uai=%s)' % uai)
etablissement_context = synchronizer.handle_etablissement(uai, log=etablissement_log)
etablissement_log.info('Traitement des élèves pour l\'établissement (uai=%s)' % uai)
since_timestamp = timestamp_store.get_timestamp(uai)
for eleve in ldap.search_eleve(since_timestamp, uai):
utilisateur_log = etablissement_log.getChild("utilisateur.%s" % eleve.uid)
utilisateur_log.info("Traitement de l'élève (uid=%s)" % eleve.uid)
synchronizer.handle_eleve(etablissement_context, eleve, log=utilisateur_log)
etablissement_log.info("Traitement du personnel enseignant pour l'établissement (uai=%s)" % uai)
for enseignant in ldap.search_enseignant(since_timestamp=since_timestamp, uai=uai):
utilisateur_log = etablissement_log.getChild("enseignant.%s" % enseignant.uid)
utilisateur_log.info("Traitement de l'enseignant (uid=%s)" % enseignant.uid)
synchronizer.handle_enseignant(etablissement_context, enseignant, log=utilisateur_log)
db.connection.commit()
timestamp_store.mark(uai)
timestamp_store.write()
log.info("Fin du traitement des établissements")
finally:
db.disconnect()
ldap.disconnect()
def interetab(config: Config, action: ActionConfig, arguments=DEFAULT_ARGS):
"""
Effectue la mise a jour de la BD Moodle via les infos issues du LDAP
Cette mise a jour concerne les utilisateurs et administrateurs inter-etablissements
:param config: Configuration globale
:param action: Configuration de l'action
:param arguments: Arguments de ligne de commande
:return:
"""
log = getLogger()
db = Database(config.database, config.constantes)
ldap = Ldap(config.ldap)
try:
db.connect()
ldap.connect()
synchronizer = Synchronizer(ldap, db, config, action, arguments)
synchronizer.initialize()
timestamp_store = TimestampStore(action.timestamp_store)
log.info('Traitement des utilisateurs inter-établissements')
personne_filter = {
action.inter_etablissements.ldap_attribut_user: action.inter_etablissements.ldap_valeur_attribut_user
}
since_timestamp = timestamp_store.get_timestamp(action.inter_etablissements.cle_timestamp)
for personne_ldap in ldap.search_personne(since_timestamp=since_timestamp, **personne_filter):
utilisateur_log = log.getChild("utilisateur.%s" % personne_ldap.uid)
utilisateur_log.info("Traitement de l'utilisateur (uid=%s)" % personne_ldap.uid)
synchronizer.handle_user_interetab(personne_ldap, log=utilisateur_log)
log.info('Mise à jour des cohortes de la categorie inter-établissements')
for is_member_of, cohort_name in action.inter_etablissements.cohorts.items():
synchronizer.mise_a_jour_cohorte_interetab(is_member_of, cohort_name, since_timestamp, log=log)
db.connection.commit()
timestamp_store.mark(action.inter_etablissements.cle_timestamp)
timestamp_store.write()
log.info("Fin du traitement des utilisateurs inter-établissements")
finally:
db.disconnect()
ldap.disconnect()
def inspecteurs(config: Config, action: ActionConfig, arguments=DEFAULT_ARGS):
"""
Effectue la mise a jour de la BD
Moodle via les infos issues du LDAP
Cette mise a jour concerne les inspecteurs
:param config: Configuration globale
:param action: Configuration de l'action
:param arguments: Arguments de ligne de commande
"""
log = getLogger()
db = Database(config.database, config.constantes)
ldap = Ldap(config.ldap)
try:
db.connect()
ldap.connect()
synchronizer = Synchronizer(ldap, db, config, action, arguments)
synchronizer.initialize()
log.info('Traitement des inspecteurs')
timestamp_store = TimestampStore(action.timestamp_store)
personne_filter = {
action.inspecteurs.ldap_attribut_user: action.inspecteurs.ldap_valeur_attribut_user
}
# Traitement des inspecteurs
for personne_ldap in ldap.search_personne(timestamp_store.get_timestamp(action.inspecteurs.cle_timestamp),
**personne_filter):
utilisateur_log = log.getChild("utilisateur.%s" % personne_ldap.uid)
utilisateur_log.info("Traitement de l'inspecteur (uid=%s)" % personne_ldap.uid)
synchronizer.handle_inspecteur(personne_ldap)
db.connection.commit()
# Mise a jour de la date de dernier traitement
timestamp_store.mark(action.inspecteurs.cle_timestamp)
timestamp_store.write()
log.info('Fin du traitement des inspecteurs')
finally:
db.disconnect()
ldap.disconnect()
def nettoyage(config: Config, action: ActionConfig, arguments=DEFAULT_ARGS):
"""
Effectue une purge des cohortes dans la base de données par rapport
au contenu du LDAP et supprime les cohortes inutiles (vides)
:param config: Configuration globale
:param action: Configuration de l'action
:param arguments: Arguments de ligne de commande
:return:
"""
log = getLogger()
db = Database(config.database, config.constantes)
ldap = Ldap(config.ldap)
try:
db.connect()
ldap.connect()
synchronizer = Synchronizer(ldap, db, config, action, arguments)
synchronizer.initialize()
log.info("Début de l'action de nettoyage")
for uai in action.etablissements.listeEtab:
etablissement_log = log.getChild('etablissement.%s' % uai)
etablissement_log.info("Nettoyage de l'établissement (uai=%s)" % uai)
etablissement_context = synchronizer.handle_etablissement(uai, log=etablissement_log, readonly=True)
eleves_by_cohorts_db, eleves_by_cohorts_ldap = synchronizer.\
get_users_by_cohorts_comparators(etablissement_context, r'(Élèves de la Classe )(.*)$',
'Élèves de la Classe %')
eleves_lvformation_by_cohorts_db, eleves_lvformation_by_cohorts_ldap = synchronizer.\
get_users_by_cohorts_comparators(etablissement_context, r'(Élèves du Niveau de formation )(.*)$',
'Élèves du Niveau de formation %')
profs_classe_by_cohorts_db, profs_classe_by_cohorts_ldap = synchronizer.\
get_users_by_cohorts_comparators(etablissement_context, r'(Profs de la Classe )(.*)$',
'Profs de la Classe %')
profs_etab_by_cohorts_db, profs_etab_by_cohorts_ldap = synchronizer.\
get_users_by_cohorts_comparators(etablissement_context, r"(Profs de l'établissement )(.*)$",
"Profs de l'établissement %")
log.info("Purge des cohortes Elèves de la Classe")
synchronizer.purge_cohorts(eleves_by_cohorts_db, eleves_by_cohorts_ldap,
"Élèves de la Classe %s")
log.info("Purge des cohortes Elèves du Niveau de formation")
synchronizer.purge_cohorts(eleves_lvformation_by_cohorts_db, eleves_lvformation_by_cohorts_ldap,
'Élèves du Niveau de formation %s')
log.info("Purge des cohortes Profs de la Classe")
synchronizer.purge_cohorts(profs_classe_by_cohorts_db, profs_classe_by_cohorts_ldap,
'Profs de la Classe %s')
log.info("Purge des cohortes Profs de l'établissement")
synchronizer.purge_cohorts(profs_etab_by_cohorts_db, profs_etab_by_cohorts_ldap,
"Profs de l'établissement %s")
log.info("Suppression des cohortes vides (sans utilisateur)")
db.delete_empty_cohorts()
# Premier commit pour libérer les locks pour le webservice moodle
db.connection.commit()
log.info("Début de la procédure d'anonymisation/suppression des utilisateurs inutiles")
ldap_users = ldap.search_personne()
db_valid_users = db.get_all_valid_users()
synchronizer.anonymize_or_delete_users(ldap_users, db_valid_users)
db.delete_useless_users()
db.connection.commit()
log.info("Fin d'action de nettoyage")
finally:
db.disconnect()
ldap.disconnect()
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,266 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /test/utils/db_utils.py | # coding: utf-8
import re
from pkgutil import get_data
import sqlparse
from cachetools import cached, Cache
from synchromoodle.dbutils import Database
__statements_cache = Cache(maxsize=100)
def init(db: Database):
run_script('data/ddl.sql', db)
def reset(db: Database):
run_script('data/ddl.sql', db)
@cached(__statements_cache)
def _get_statements(path: str):
script_data = str(get_data('test', path), 'utf8')
cleaned_script_data = re.sub(r'/\*.+?\*/;\n', "", script_data, flags=re.MULTILINE)
statements = sqlparse.split(cleaned_script_data)
return statements
def run_script(script: str, db: Database, connect=True):
if connect:
db.connect()
try:
statements = _get_statements(script)
for statement in statements:
db.mark.execute(statement)
while db.mark.nextset():
pass
db.connection.commit()
finally:
if connect:
db.disconnect()
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,267 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/ldaputils.py | # coding: utf-8
"""
Accès LDAP
"""
import datetime
from collections.abc import Iterable
from typing import List, Dict, Union
from ldap3 import Server, Connection, LEVEL
from synchromoodle.config import LdapConfig
class ClasseLdap:
def __init__(self, etab_dn: str, classe: str):
self.etab_dn = etab_dn
self.classe = classe
def extraire_classes_ldap(classes_ldap: List[str]):
"""
Extrait le nom des classes à partir de l'entrée issue de l'annuaire ldap.
:param classes_ldap: entrée issue du LDAP.
:return
"""
classes = []
for classe_ldap in classes_ldap:
split = classe_ldap.split("$")
if len(split) > 1:
classes.append(ClasseLdap(split[0], split[-1]))
return classes
def ldap_escape(ldapstr: str) -> str:
"""
Echappe les caractères specifiques pour les filtres LDAP
:param ldapstr:
:return:
"""
if ldapstr is None:
return ""
return ldapstr\
.replace("\\", "\\5C")\
.replace("*", "\\2A")\
.replace("(", "\\28")\
.replace(")", "\\29")\
.replace("\000", "\\00")
class StructureLdap:
"""
Représente une structure issue du LDAP.
"""
def __init__(self, data):
# TODO: Replace devrait supporter toutes les acamédies ?
self.nom = data.ou.value.replace("-ac-ORL._TOURS", "")
self.type = data.ENTStructureTypeStruct.value
self.code_postal = data.postalCode.value[:2]
self.siren = data.ENTStructureSIREN.value
self.uai = data.ENTStructureUAI.value
self.domaine = data.ESCODomaines.value
self.domaines = data.ESCODomaines.values
self.dn = data.entry_dn
def __str__(self):
return "uai=%s, siren=%s, nom=%s" % (self.uai, self.siren, self.nom)
def __repr__(self):
return "[%s] %s" % (self.__class__.__name__, str(self))
class PersonneLdap:
"""
Représente une personne issue du LDAP.
"""
def __init__(self, data):
self.uid = data.uid.value
self.sn = data.sn.value
self.given_name = data.givenName.value
self.domaine = data.ESCODomaines.value
self.domaines = data.ESCODomaines.values
self.uai_courant = data.ESCOUAICourant.value
self.mail = None
self.classes = None # type: List[ClasseLdap]
if 'mail' in data:
self.mail = data.mail.value
self.is_member_of = None
if 'isMemberOf' in data:
self.is_member_of = data.isMemberOf.values
def __str__(self):
return "uid=%s, given_name=%s, sn=%s" % (self.uid, self.given_name, self.sn)
def __repr__(self):
return "[%s] %s" % (self.__class__.__name__, str(self))
class EleveLdap(PersonneLdap):
"""
Représente un élève issu du LDAP.
"""
def __init__(self, data):
super().__init__(data)
self.niveau_formation = data.ENTEleveNivFormation.value
self.classe = None # type: ClasseLdap
if 'ENTEleveClasses' in data:
self.classes = extraire_classes_ldap(data.ENTEleveClasses.values)
if self.classes:
self.classe = self.classes[0]
class EnseignantLdap(PersonneLdap):
"""
Représente un enseignant issu du LDAP.
"""
def __init__(self, data):
super().__init__(data)
self.structure_rattachement = data.ENTPersonStructRattach.value
self.profils = None
if 'ENTPersonProfils' in data:
self.profils = data.ENTPersonProfils.values
self.uais = None
if 'ESCOUAI' in data:
self.uais = data.ESCOUAI.values
if 'ENTAuxEnsClasses' in data:
self.classes = extraire_classes_ldap(data.ENTAuxEnsClasses.values)
class Ldap:
"""
Couche d'accès aux données du LDAP.
"""
config = None # type: LdapConfig
connection = None # type: Connection
def __init__(self, config: LdapConfig):
self.config = config
def connect(self):
"""
Etablit la connection au LDAP.
"""
server = Server(host=self.config.uri)
self.connection = Connection(server,
user=self.config.username,
password=self.config.password,
auto_bind=True,
raise_exceptions=True)
def disconnect(self):
"""
Ferme la connection au LDAP.
"""
if self.connection:
self.connection.unbind()
self.connection = None
def get_structure(self, uai: str) -> StructureLdap:
"""
Recherche de structures.
:param uai: code établissement
:return: L'établissement trouvé, ou None si non trouvé.
"""
structures = self.search_structure(uai)
return structures[0] if structures else None
def search_structure(self, uai: str = None) -> List[StructureLdap]:
"""
Recherche de structures.
:param uai: code établissement
:return: Liste des structures trouvées
"""
ldap_filter = _get_filtre_etablissement(uai)
self.connection.search(self.config.structuresDN, ldap_filter,
search_scope=LEVEL, attributes=
['ou', 'ENTStructureSIREN', 'ENTStructureTypeStruct', 'postalCode', 'ENTStructureUAI',
'ESCODomaines', '+'])
return [StructureLdap(entry) for entry in self.connection.entries]
def search_personne(self, since_timestamp: datetime.datetime = None, **filters) -> List[PersonneLdap]:
"""
Recherche de personnes.
:param since_timestamp: datetime.datetime
:param filters: Filtres à appliquer
:return: Liste des personnes
"""
ldap_filter = _get_filtre_personnes(since_timestamp, **filters)
self.connection.search(self.config.personnesDN, ldap_filter,
search_scope=LEVEL, attributes=
['objectClass', 'uid', 'sn', 'givenName', 'mail', 'ESCODomaines', 'ESCOUAICourant',
'ENTPersonStructRattach', 'isMemberOf', '+'])
return [PersonneLdap(entry) for entry in self.connection.entries]
def search_eleve(self, since_timestamp: datetime.datetime = None, uai: str = None) -> List[EleveLdap]:
"""
Recherche d'étudiants.
:param since_timestamp: datetime.datetime
:param uai: code établissement
:return: Liste des étudiants correspondant
"""
ldap_filter = _get_filtre_eleves(since_timestamp, uai)
self.connection.search(self.config.personnesDN, ldap_filter,
search_scope=LEVEL, attributes=
['uid', 'sn', 'givenName', 'mail', 'ENTEleveClasses', 'ENTEleveNivFormation',
'ESCODomaines', 'ESCOUAICourant', '+'])
return [EleveLdap(entry) for entry in self.connection.entries]
def search_eleves_in_classe(self, classe, uai):
"""
Recherche les élèves dans une classe.
:param classe:
:param uai:
:return:
"""
ldap_filter = '(&(ENTEleveClasses=*$%s)(ESCOUAI=%s))' % (ldap_escape(classe), ldap_escape(uai))
self.connection.search(self.config.personnesDN, ldap_filter,
search_scope=LEVEL, attributes=
['uid', 'sn', 'givenName', 'mail', 'ENTEleveClasses', 'ENTEleveNivFormation',
'ESCODomaines', 'ESCOUAICourant', '+'])
return [EleveLdap(entry) for entry in self.connection.entries]
def search_enseignant(self, since_timestamp: datetime.datetime = None, uai=None, tous=False) \
-> List[EnseignantLdap]:
"""
Recherche d'enseignants.
:param since_timestamp: datetime.datetime
:param uai: code etablissement
:param tous: Si True, retourne également le personnel non enseignant
:return: Liste des enseignants
"""
ldap_filter = get_filtre_enseignants(since_timestamp, uai, tous)
self.connection.search(self.config.personnesDN,
ldap_filter, LEVEL, attributes=
['objectClass', 'uid', 'sn', 'givenName', 'mail', 'ESCOUAI', 'ESCODomaines',
'ESCOUAICourant', 'ENTPersonStructRattach', 'ENTPersonProfils', 'isMemberOf', '+',
'ENTAuxEnsClasses'])
return [EnseignantLdap(entry) for entry in self.connection.entries]
def get_domaines_etabs(self) -> Dict[str, List[str]]:
"""
Obtient la liste des "ESCOUAICourant : Domaine" des établissements
:return: Dictionnaire uai/list de domaines
"""
structures = self.search_structure()
etabs_ldap = {}
for structure in structures:
etabs_ldap[structure.uai] = structure.domaines
return etabs_ldap
def _get_filtre_eleves(since_timestamp: datetime.datetime = None, uai: str = None) -> str:
"""
Construit le filtre pour récupérer les élèves au sein du LDAP
:param since_timestamp:
:param uai: code établissement
:return: Le filtre
"""
filtre = "(&(objectClass=ENTEleve)"
if uai:
filtre += "(ESCOUAI={uai})".format(uai=ldap_escape(uai))
if since_timestamp:
filtre += "(modifyTimeStamp>={since_timestamp})" \
.format(since_timestamp=since_timestamp.strftime("%Y%m%d%H%M%SZ"))
filtre = filtre + ")"
return filtre
def get_filtre_enseignants(since_timestamp: datetime.datetime = None, uai=None, tous=False) -> str:
"""
Construit le filtre pour récupérer les enseignants au sein du LDAP.
:param since_timestamp:
:param uai: code établissement
:param tous:
:return: Le filtre
"""
filtre = "(&"
if tous:
filtre += "(|(objectClass=ENTAuxEnseignant)" \
"(objectClass=ENTAuxNonEnsEtab)" \
"(objectClass=ENTAuxNonEnsCollLoc)" \
")"
else:
filtre += "(objectClass=ENTAuxEnseignant)"
filtre += "(!(uid=ADM00000))"
if uai:
filtre += "(ESCOUAI={uai})".format(uai=ldap_escape(uai))
if since_timestamp:
filtre += "(modifyTimeStamp>={since_timestamp})" \
.format(since_timestamp=since_timestamp.strftime("%Y%m%d%H%M%SZ"))
filtre = filtre + ")"
return filtre
def _get_filtre_personnes(since_timestamp: datetime.datetime = None, **filters: Union[str, List[str]]) -> str:
"""
Construit le filtre pour récupérer les personnes
:param modify_time_stamp:
:param filters: Filtres spécifiques à appliquer
:return: Le filtre
"""
filtre = "(&(|" \
+ "(objectClass=ENTPerson)" \
+ ")" \
+ "(!(uid=ADM00000))"
if filters:
filtre = filtre + "(|"
for k, v in filters.items():
if not isinstance(v, Iterable) or isinstance(v, str):
v = [v]
for item in v:
attribute_filtre = "(%s=%s)" % (ldap_escape(k), ldap_escape(item))
filtre = filtre + attribute_filtre
filtre = filtre + ")"
if since_timestamp:
filtre = filtre + "(modifyTimeStamp>=%s)"
filtre = filtre % since_timestamp.strftime("%Y%m%d%H%M%SZ")
filtre = filtre + ")"
return filtre
def _get_filtre_etablissement(uai=None):
"""Construit le filtre pour les établissements."""
filtre = "(&(ObjectClass=ENTEtablissement)" \
"(!(ENTStructureSiren=0000000000000A))"
if uai:
filtre += "(ENTStructureUAI={uai})".format(uai=ldap_escape(uai))
filtre += ")"
return filtre
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,268 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /test/utils/ldap_utils.py | # coding: utf-8
from io import StringIO
from pkgutil import get_data
from ldap3 import Connection, LEVEL
from ldap3.core.exceptions import LDAPNoSuchObjectResult
import test.utils.ldif as ldif
from synchromoodle.ldaputils import Ldap
def _remove_all_in(connection: Connection, dn: str):
try:
connection.search(dn, '(objectClass=*)', search_scope=LEVEL)
except LDAPNoSuchObjectResult:
return
for entry in connection.entries:
connection.delete(entry.entry_dn)
def reset(l: Ldap):
l.connect()
connection = l.connection
try:
_remove_all_in(connection, l.config.groupsDN)
_remove_all_in(connection, l.config.personnesDN)
_remove_all_in(connection, l.config.structuresDN)
finally:
l.disconnect()
class LDIFLoader(ldif.LDIFRecordList):
def __init__(self, connection: Connection,
input_file, ignored_attr_types=None, max_entries=0, process_url_schemes=None):
super().__init__(input_file, ignored_attr_types, max_entries, process_url_schemes)
self.connection = connection
def handle_modify(self, dn, modops, controls=None):
pass
def handle(self, dn, entry):
self.connection.add(dn, attributes=entry)
def run_ldif(path: str, ldap: Ldap):
"""
Load file from ldif format.
:param path: path to ldif file
:param ldap: ldap adapter
"""
ldif_data = str(get_data('test', path), 'utf8')
with StringIO(ldif_data) as ldif_file:
loader = LDIFLoader(ldap.connection, ldif_file)
loader.parse()
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,269 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/config.py | # coding: utf-8
"""
Configuration
"""
from logging import getLogger
from typing import List, Dict, Union
import ruamel.yaml as yaml
log = getLogger('config')
class _BaseConfig:
def __init__(self, **entries):
self.update(**entries)
def update(self, **entries):
"""
Met à jour les données de l'objet de configuration.
:param entries:
:return:
"""
self.__dict__.update(entries)
class WebServiceConfig(_BaseConfig):
"""
Configuration du Webservice Moodle
"""
def __init__(self, **entries):
self.token = ""
"""Token d'accès au webservice Moodle"""
self.moodle_host = ""
"""Host HTTP cible pour accéder au webservice Moodle"""
self.backup_cmd = "php backup.php --courseid=%courseid% --destination=/MoodleBackups"
"""Commande à executer pour lancer la backup d'un cours"""
self.backup_success_re = "Backup completed"
"""Expression Reguliere à appliquer sur le retour de la sortie standard de backup_cmd pour vérifier le
succès de l'opération"""
super().__init__(**entries)
class DeleteConfig(_BaseConfig):
"""
Configuration des valeurs pour l'anonymisation/suppression
"""
def __init__(self, **entries):
self.ids_users_undeletable = [1, 2]
"""Ids des utilisateurs qui ne doivent en aucun cas être supprimés"""
self.ids_roles_teachers = [2]
"""Ids des roles considérés comme enseignants pour la suppression"""
self.delay_anonymize_student = 60
"""Délai, en jours, avant de anonymiser un élève qui n'est plus présent dans l'annuaire LDAP"""
self.delay_delete_student = 90
"""Délai, en jours, avant de supprimer un élève qui n'est plus présent dans l'annuaire LDAP"""
self.delay_anonymize_teacher = 90
"""Délai, en jours, avant d'anonymiser un enseignant qui n'est plus présent dans l'annuaire LDAP"""
self.delay_delete_teacher = 365
"""Délai, en jours, avant de supprimer un enseignant qui n'est plus présent dans l'annuaire LDAP"""
self.delay_backup_course = 365
"""Délai, en jours, avant de sauvegarder un cours inutilisé"""
super().__init__(**entries)
class ConstantesConfig(_BaseConfig):
"""
Configuration des contstantes
"""
def __init__(self, **entries):
self.anonymous_phone = "0606060606"
"""Valeur assignée aux numeros de telephones des utilisateurs anonymisés"""
self.anonymous_name = "Anonyme"
"""Valeur assignée aux champs divers du profil des utilisateurs anonymisés"""
self.anonymous_mail = "anonyme@email.com"
"""Adresse email assignée aux utilisateurs anonymisés"""
self.default_moodle_theme = "netocentre" # type: str
"""Thèmes par défault pour les utilisateurs inter-etabs"""
self.default_mail_display = 2 # type: int
"""Par défaut, les mails sont uniquement affichés aux participants du cours"""
self.default_mail = 'non_renseigne@netocentre.fr' # type: str
"""Email utilise lorsque les personnes n'ont pas d'email dans le LDAP"""
self.default_domain = "lycees.netocentre.fr" # type: str
"""Domaine par défaut"""
self.id_instance_moodle = 1 # type: int
"""Id de l'instance concernant Moodle"""
self.niveau_ctx_categorie = 40 # type: int
"""Niveau de contexte pour une categorie"""
self.niveau_ctx_cours = 50 # type: int
"""Niveau de contexte pour un cours"""
self.niveau_ctx_forum = 70 # type: int
"""Niveau de contexte pour un forum"""
self.niveau_ctx_bloc = 80 # type: int
"""Niveau de contexte pour un bloc"""
self.id_role_admin = 1 # type: int
"""Id pour le role admin"""
self.id_role_createur_cours = 2 # type: int
"""Id pour le role createur de cours"""
self.id_role_proprietaire_cours = 11 # type: int
"""Id pour le role propriétaire de cours"""
self.id_role_enseignant = 3 # type: int
"""Id pour le role enseignant"""
self.id_role_eleve = 5 # type: int
"""Id pour le role eleve"""
self.id_role_inspecteur = 9 # type: int
"""Id pour le role inspecteur"""
self.id_role_directeur = 18 # type: int
"""Id pour le role directeur"""
self.id_role_utilisateur_limite = 14 # type: int
"""Id pour le role d'utilisateur avec droits limites"""
self.type_structure_cfa = "CFA" # type: str
"""Type de structure d'un CFA"""
self.type_structure_clg = "COLLEGE" # type: str
"""Type de structure d'un college"""
super().__init__(**entries)
class DatabaseConfig(_BaseConfig):
"""
Configuration de la base de données Moodle
"""
def __init__(self, **entries):
self.database = "moodle" # type: str
"""Nom de la base de données"""
self.user = "moodle" # type: str
"""Nom de l'utilisateur moodle"""
self.password = "moodle" # type: str
"""Mot de passe de l'utilisateur moodle"""
self.host = "192.168.1.100" # type: str
"""Adresse IP ou nom de domaine de la base de données"""
self.port = 9806 # type: int
"""Port TCP"""
self.entete = "mdl_" # type: str
"""Entêtes des tables"""
self.charset = "utf8" # type: str
"""Charset à utiliser pour la connexion"""
super().__init__(**entries)
class LdapConfig(_BaseConfig):
"""
Configuration de l'annuaire LDAP.
"""
def __init__(self, **entries):
self.uri = "ldap://192.168.1.100:9889" # type: str
"""URI du serveur LDAP"""
self.username = "cn=admin,ou=administrateurs,dc=esco-centre,dc=fr" # type: str
"""Utilisateur"""
self.password = "admin" # type: str
"""Mot de passe"""
self.baseDN = "dc=esco-centre,dc=fr" # type: str
"""DN de base"""
self.structuresRDN = "ou=structures" # type: str
"""OU pour les structures"""
self.personnesRDN = "ou=people" # type: str
"""OU pour les personnes"""
self.groupsRDN = "ou=groups" # type: str
"""OU pour les groupes"""
self.adminRDN = "ou=administrateurs" # type: str
"""OU pour les administrateurs"""
super().__init__(**entries)
@property
def structuresDN(self) -> str:
"""
DN pour les structures
"""
return self.structuresRDN + ',' + self.baseDN
@property
def personnesDN(self) -> str:
"""
DN pour les personnes
"""
return self.personnesRDN + ',' + self.baseDN
@property
def groupsDN(self) -> str:
"""
DN pour les personnes
"""
return self.groupsRDN + ',' + self.baseDN
@property
def adminDN(self) -> str:
"""
DN pour les admins
"""
return self.adminRDN + ',' + self.baseDN
class EtablissementRegroupement(_BaseConfig):
"""
Configuration d'un regroupement d'établissement
"""
def __init__(self, **entries):
self.nom = "" # type: str
"""Nom du regroupement d'etablissements"""
self.uais = [] # type: List[str]
"""Liste des UAI consituant le regroupement"""
super().__init__(**entries)
class EtablissementsConfig(_BaseConfig):
"""
Configuration des établissements
"""
def __init__(self, **entries):
self.etabRgp = [] # type: List[EtablissementRegroupement]
"""Regroupement d'etablissements"""
self.inter_etab_categorie_name = 'Catégorie Inter-Établissements' # type: str
"""Nom de la catégorie inter-etablissement"""
self.inter_etab_categorie_name_cfa = 'Catégorie Inter-CFA' # type: str
"""Nom de la catégorie inter-etablissement pour les CFA"""
self.listeEtab = [] # type: List[str]
"""Liste des établissements"""
self.listeEtabSansAdmin = [] # type: List[str]
"""Etablissements sans administrateurs"""
self.listeEtabSansMail = [] # type: List[str]
"""Etablissements dont le mail des professeurs n'est pas synchronise"""
self.prefixAdminMoodleLocal = "(esco|clg37):admin:Moodle:local:" # type: str
"""Préfixe de l'attribut "isMemberOf" indiquant que l'utilisateur est un administrateur local de Moodle"""
self.prefixAdminLocal = "(esco|clg37):admin:local:" # type: str
"""Prefix de l'attribut "isMemberOf" indiquant que l'utilisateur est un administrateur local"""
super().__init__(**entries)
def update(self, **entries):
if 'etabRgp' in entries:
entries['etabRgp'] = list(map(lambda d: EtablissementRegroupement(**d), entries['etabRgp']))
super().update(**entries)
class InterEtablissementsConfig(_BaseConfig):
"""
Configuration de l'inter-établissement
"""
def __init__(self, **entries):
self.cohorts = {} # type: Dict[str, str]
"""Cohortes à synchroniser"""
self.categorie_name = '%%Cat%%gorie inter%%tablissements' # type: str
"""Nom de la catégorie inter-etablissement"""
self.ldap_attribut_user = "isMemberOf" # type: str
"""Attribut utilisé pour determiner les utilisateurs inter-établissement"""
self.ldap_valeur_attribut_user = ["cfa:Applications:Espace_Moodle:Inter_etablissements"] # type: List[str]
"""Valeurs possibles de l'attribut pour déterminer si l'utilisateur est un utilisateur inter-établissement"""
self.ldap_valeur_attribut_admin = "cfa:admin:Moodle:local:Inter_etablissements" # type: str
"""Utilisateurs administrateurs de la section inter-etablissement"""
self.cle_timestamp = "INTER_ETAB" # type: str
"""Clé pour stocker le timestamp du dernier traitement inter-etablissements"""
super().__init__(**entries)
class InspecteursConfig(_BaseConfig):
"""
Configuration des inspecteurs
"""
def __init__(self, **entries):
self.ldap_attribut_user = "ESCOPersonProfils" # type: str
"""Attribut utilisé pour determiner les inspecteurs"""
self.ldap_valeur_attribut_user = ["INS"] # type: List[str]
"""Valeur de l'attribute pour déterminer les inspecteurs"""
self.cle_timestamp = "INSPECTEURS" # type: str
"""Clé pour stocker le timestamp du dernier traitement inter-etablissements"""
super().__init__(**entries)
class TimestampStoreConfig(_BaseConfig):
"""
Configuration des timestamp de traitement précédent
"""
def __init__(self, **entries):
self.file = "timestamps.txt" # type: str
"""Fichier contenant les dates de traitement précedent pour les établissements"""
self.separator = "-" # type: str
"""Séparateur utilisé dans le fichier de traitement pour séparer l'etablissement des date de traitement
précedent"""
super().__init__(**entries)
class ActionConfig(_BaseConfig):
"""
Configuration d'une action
"""
def __init__(self, **entries):
self.id = None
self.type = "default"
self.timestamp_store = TimestampStoreConfig() # type: TimestampStoreConfig
self.etablissements = EtablissementsConfig() # type: EtablissementsConfig
self.inter_etablissements = InterEtablissementsConfig() # type: InterEtablissementsConfig
self.inspecteurs = InspecteursConfig() # type: InspecteursConfig
super().__init__(**entries)
def update(self, **entries):
if 'etablissements' in entries:
self.etablissements.update(**entries['etablissements'])
entries['etablissements'] = self.etablissements
if 'interEtablissements' in entries:
self.inter_etablissements.update(**entries['interEtablissements'])
entries['interEtablissements'] = self.inter_etablissements
if 'inspecteurs' in entries:
self.inspecteurs.update(**entries['inspecteurs'])
entries['inspecteurs'] = self.inspecteurs
if 'timestampStore' in entries:
self.timestamp_store.update(**entries['timestampStore'])
entries['timestampStore'] = self.timestamp_store
super().update(**entries)
def __str__(self):
return self.type + " (id=%s)" % self.id if self.id else ""
class Config(_BaseConfig):
"""
Configuration globale.
"""
def __init__(self, **entries):
super().__init__(**entries)
self.delete = DeleteConfig() # type: DeleteConfig
self.webservice = WebServiceConfig() # type: WebServiceConfig
self.constantes = ConstantesConfig() # type: ConstantesConfig
self.database = DatabaseConfig() # type: DatabaseConfig
self.ldap = LdapConfig() # type: LdapConfig
self.actions = [] # type: List[ActionConfig]
self.logging = True # type: Union[dict, str, bool]
def update(self, **entries):
if 'delete' in entries:
self.delete.update(**entries['delete'])
entries['delete'] = self.delete
if 'webservice' in entries:
self.webservice.update(**entries['webservice'])
entries['webservice'] = self.webservice
if 'constantes' in entries:
self.constantes.update(**entries['constantes'])
entries['constantes'] = self.constantes
if 'database' in entries:
self.database.update(**entries['database'])
entries['database'] = self.database
if 'ldap' in entries:
self.ldap.update(**entries['ldap'])
entries['ldap'] = self.ldap
if 'actions' in entries:
actions = entries['actions']
for action in actions:
existing_action = next((x for x in self.actions if 'id' in action and x.id == action['id']), None)
if existing_action:
existing_action.update(**action)
else:
self.actions.append(ActionConfig(**action))
entries['actions'] = self.actions
super().update(**entries)
def validate(self):
"""
Valide la configuration.
:return:
"""
if not self.actions:
raise ValueError("Au moins une action doit être définie dans la configuration.")
class ConfigLoader:
"""
Chargement de la configuration
"""
def update(self, config: Config, config_fp: List[str], silent=False) -> Config:
"""
Met à jour la configuration avec le chargement d'une une liste de fichier de configuration.
:param config:
:param config_fp:
:param silent:
:return:
"""
for config_item in config_fp:
try:
with open(config_item) as fp:
data = yaml.safe_load(fp)
config.update(**data)
except FileNotFoundError as e:
message = "Le fichier de configuration n'a pas été chargé: " + str(e)
if silent:
log.debug(message)
else:
log.warning(message)
return config
def load(self, config: List[str], silent=False) -> Config:
"""
Charge une configuration à partir d'une liste de fichier de configuration.
:param config:
:param silent:
:return:
"""
loaded_config = Config()
loaded_config = self.update(loaded_config, config, silent)
return loaded_config
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,270 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/synchronizer.py | # coding: utf-8
"""
Synchronizer
"""
import datetime
import re
import subprocess
from logging import getLogger
from typing import Dict, List
from synchromoodle.arguments import DEFAULT_ARGS
from synchromoodle.config import EtablissementsConfig, Config, ActionConfig
from synchromoodle.dbutils import Database, PROFONDEUR_CTX_ETAB, COURSE_MODULES_MODULE, \
PROFONDEUR_CTX_MODULE_ZONE_PRIVEE, \
PROFONDEUR_CTX_BLOCK_ZONE_PRIVEE
from synchromoodle.ldaputils import Ldap, EleveLdap, EnseignantLdap, PersonneLdap
from synchromoodle.ldaputils import StructureLdap
#######################################
# FORUM
#######################################
# Nom du forum pour la zone privee
# Le (%s) est reserve a l'organisation unit de l'etablissement
from synchromoodle.webserviceutils import WebService
FORUM_NAME_ZONE_PRIVEE = "Forum réservé au personnel éducatif de l'établissement %s"
# Format d'intro. pour le forum de la zone privee
FORUM_INTRO_FORMAT_ZONE_PRIVEE = 1
# Introduction pour le forum de la zone privee
FORUM_INTRO_ZONE_PRIVEE = "<p></p>"
# Max attachements pour le forum de la zone privee
FORUM_MAX_ATTACHEMENTS_ZONE_PRIVEE = 2
# Max bytes pour le forum de la zone privee
FORUM_MAX_BYTES_ZONE_PRIVEE = 512000
#######################################
# BLOCKS
#######################################
# Default region pour le bloc de recherche sur le forum de la zone privee
BLOCK_FORUM_SEARCH_DEFAULT_REGION = "side-pre"
# Default weight pour le bloc de recherche sur le forum de la zone privee
BLOCK_FORUM_SEARCH_DEFAULT_WEIGHT = 2
# Nom pour le bloc de recherche sur le forum de la zone privee
BLOCK_FORUM_SEARCH_NAME = "searches_forums"
# Page type pattern pour le bloc de recherche sur le forum de la zone privee
BLOCK_FORUM_SEARCH_PAGE_TYPE_PATTERN = "course-view-*"
# Show in sub context option pour le bloc de recherche sur le forum de la zone privee
BLOCK_FORUM_SEARCH_SHOW_IN_SUB_CTX = 0
# Sub page pattern pour le bloc de recherche sur le forum de la zone privee
BLOCK_FORUM_SEARCH_SUB_PAGE_PATTERN = ""
SECONDS_PER_DAY = 86400
def est_grp_etab(uai: str, etablissements_config: EtablissementsConfig):
"""
Indique si un établissement fait partie d'un regroupement d'établissement ou non
:param uai: code de l'établissement
:param etablissements_config: EtablissementsConfig
:return: True si l'établissement fait partie d'un regroupement d'établissement
"""
for regroupement in etablissements_config.etabRgp:
if uai in regroupement.uais:
return regroupement
return False
class SyncContext:
"""
Contexte global de synchronisation
"""
def __init__(self):
self.timestamp_now_sql = None
self.map_etab_domaine = None # type: Dict[str, List[str]]
self.id_context_categorie_inter_etabs = None # type: int
self.id_context_categorie_inter_cfa = None # type: int
self.id_role_extended_teacher = None # type: int
self.id_role_advanced_teacher = None # type: int
self.id_field_classe = None # type: int
self.id_field_domaine = None # type: int
self.utilisateurs_by_cohortes = {}
class EtablissementContext:
"""
Contexte de synchronisation d'établissement
"""
def __init__(self, uai: str):
self.uai = uai # type: str
self.id_context_categorie = None
self.id_context_course_forum = None
self.etablissement_regroupe = None
self.structure_ldap = None # type: StructureLdap
self.gere_admin_local = None # type: bool
self.regexp_admin_moodle = None # type: str
self.regexp_admin_local = None # type: str
self.id_zone_privee = None # type: int
self.etablissement_theme = None # type: str
self.eleves_by_cohortes = {}
self.enseignants_by_cohortes = {}
class Synchronizer:
"""
Synchronise les objets métiers entre l'annuaire LDAP et le Moodle.
"""
def __init__(self, ldap: Ldap, db: Database, config: Config, action_config: ActionConfig = None,
arguments=DEFAULT_ARGS):
self.__webservice = WebService(config.webservice) # type: WebService
self.__ldap = ldap # type: Ldap
self.__db = db # type: Database
self.__config = config # type: Config
self.__action_config = action_config if action_config \
else next(iter(config.actions), ActionConfig()) # type: ActionConfig
self.__arguments = arguments
self.context = None # type: SyncContext
def initialize(self):
"""
Initialise la synchronisation
:return:
"""
self.context = SyncContext()
# Recuperation du timestamp actuel
self.context.timestamp_now_sql = self.__db.get_timestamp_now()
# Récupération de la liste UAI-Domaine des établissements
self.context.map_etab_domaine = self.__ldap.get_domaines_etabs()
# Ids des categories inter etablissements
id_categorie_inter_etabs = self.__db.get_id_categorie(
self.__action_config.etablissements.inter_etab_categorie_name)
self.context.id_context_categorie_inter_etabs = self.__db.get_id_context_categorie(id_categorie_inter_etabs)
id_categorie_inter_cfa = self.__db.get_id_categorie(
self.__action_config.etablissements.inter_etab_categorie_name_cfa)
self.context.id_context_categorie_inter_cfa = self.__db.get_id_context_categorie(id_categorie_inter_cfa)
# Recuperation des ids des roles
self.context.id_role_extended_teacher = self.__db.get_id_role_by_shortname('extendedteacher')
self.context.id_role_advanced_teacher = self.__db.get_id_role_by_shortname('advancedteacher')
# Recuperation de l'id du user info field pour la classe
self.context.id_field_classe = self.__db.get_id_user_info_field_by_shortname('classe')
# Recuperation de l'id du champ personnalisé Domaine
self.context.id_field_domaine = self.__db.get_id_user_info_field_by_shortname('Domaine')
def handle_etablissement(self, uai, log=getLogger(), readonly=False) -> EtablissementContext:
"""
Synchronise un établissement
:return: EtabContext
"""
context = EtablissementContext(uai)
context.gere_admin_local = uai not in self.__action_config.etablissements.listeEtabSansAdmin
context.etablissement_regroupe = est_grp_etab(uai, self.__action_config.etablissements)
# Regex pour savoir si l'utilisateur est administrateur moodle
context.regexp_admin_moodle = self.__action_config.etablissements.prefixAdminMoodleLocal + ".*_%s$" % uai
# Regex pour savoir si l'utilisateur est administrateur local
context.regexp_admin_local = self.__action_config.etablissements.prefixAdminLocal + ".*_%s$" % uai
log.debug("Recherche de la structure dans l'annuaire")
structure_ldap = self.__ldap.get_structure(uai)
if structure_ldap:
log.debug("La structure a été trouvée")
etablissement_path = "/1"
# Si l'etablissement fait partie d'un groupement
if context.etablissement_regroupe:
etablissement_ou = context.etablissement_regroupe["nom"]
structure_ldap.uai = context.etablissement_regroupe["uais"][0]
log.debug("L'établissement fait partie d'un groupement: ou=%s, uai=%s",
etablissement_ou, structure_ldap.uai)
else:
etablissement_ou = structure_ldap.nom
log.debug("L'établissement ne fait partie d'un groupement: ou=%s", etablissement_ou)
# Recuperation du bon theme
context.etablissement_theme = structure_ldap.uai.lower()
# Creation de la structure si elle n'existe pas encore
id_etab_categorie = self.__db.get_id_course_category_by_theme(context.etablissement_theme)
if id_etab_categorie is None and not readonly:
log.info("Création de la structure")
self.insert_moodle_structure(context.etablissement_regroupe, structure_ldap.nom,
etablissement_path, etablissement_ou,
structure_ldap.siren, context.etablissement_theme)
id_etab_categorie = self.__db.get_id_course_category_by_id_number(structure_ldap.siren)
# Mise a jour de la description dans la cas d'un groupement d'etablissement
if context.etablissement_regroupe and not readonly:
description = self.__db.get_description_course_category(id_etab_categorie)
if description.find(structure_ldap.siren) == -1:
log.info("Mise à jour de la description")
description = "%s$%s@%s" % (description, structure_ldap.siren, structure_ldap.nom)
self.__db.update_course_category_description(id_etab_categorie, description)
self.__db.update_course_category_name(id_etab_categorie, etablissement_ou)
# Recuperation de l'id du contexte correspondant à l'etablissement
if id_etab_categorie is not None:
context.id_context_categorie = self.__db.get_id_context_categorie(id_etab_categorie)
context.id_zone_privee = self.__db.get_id_course_by_id_number("ZONE-PRIVEE-" + structure_ldap.siren)
# Recreation de la zone privee si celle-ci n'existe plus
if context.id_zone_privee is None and not readonly:
log.info("Création de la zone privée")
context.id_zone_privee = self.__db.insert_zone_privee(id_etab_categorie, structure_ldap.siren,
etablissement_ou, self.context.timestamp_now_sql)
if context.id_zone_privee is not None:
context.id_context_course_forum = self.__db.get_id_context(self.__config.constantes.niveau_ctx_cours, 3,
context.id_zone_privee)
if context.id_context_course_forum is None and not readonly:
log.info("Création du cours associé à la zone privée")
context.id_context_course_forum = self.__db.insert_zone_privee_context(context.id_zone_privee)
context.structure_ldap = structure_ldap
return context
def handle_eleve(self, etablissement_context: EtablissementContext, eleve_ldap: EleveLdap, log=getLogger()):
"""
Synchronise un élève au sein d'un établissement
:param etablissement_context:
:param eleve_ldap:
:param log:
:return:
"""
mail_display = self.__config.constantes.default_mail_display
if not eleve_ldap.mail:
eleve_ldap.mail = self.__config.constantes.default_mail
log.info("Le mail de l'élève n'est pas défini dans l'annuaire, "
"utilisation de la valeur par défault: %s", eleve_ldap.mail)
eleve_id = self.__db.get_user_id(eleve_ldap.uid)
if not eleve_id:
log.info("Ajout de l'utilisateur: %s", eleve_ldap)
self.__db.insert_moodle_user(eleve_ldap.uid, eleve_ldap.given_name,
eleve_ldap.sn, eleve_ldap.mail,
mail_display, etablissement_context.etablissement_theme)
eleve_id = self.__db.get_user_id(eleve_ldap.uid)
else:
log.info("Mise à jour de l'utilisateur: %s", eleve_ldap)
self.__db.update_moodle_user(eleve_id, eleve_ldap.given_name,
eleve_ldap.sn, eleve_ldap.mail, mail_display,
etablissement_context.etablissement_theme)
# Ajout ou suppression du role d'utilisateur avec droits limités Pour les eleves de college
if etablissement_context.structure_ldap.type == self.__config.constantes.type_structure_clg:
log.info("Ajout du rôle droit limités à l'utilisateur: %s", eleve_ldap)
self.__db.add_role_to_user(self.__config.constantes.id_role_utilisateur_limite,
self.__config.constantes.id_instance_moodle, eleve_id)
else:
self.__db.remove_role_to_user(self.__config.constantes.id_role_utilisateur_limite,
self.__config.constantes.id_instance_moodle, eleve_id)
log.info(
"Suppression du role d'utilisateur avec des droits limites à l'utilisateur %s %s %s (id = %s)"
, eleve_ldap.given_name, eleve_ldap.sn, eleve_ldap.uid, str(eleve_id))
# Inscription dans les cohortes associees aux classes
eleve_cohorts = []
eleve_classes_for_etab = []
for classe in eleve_ldap.classes:
if classe.etab_dn == etablissement_context.structure_ldap.dn:
eleve_classes_for_etab.append(classe.classe)
if eleve_classes_for_etab:
log.info("Inscription de l'élève %s "
"dans les cohortes de classes %s", eleve_ldap, eleve_classes_for_etab)
ids_classes_cohorts = self.get_or_create_classes_cohorts(etablissement_context.id_context_categorie,
eleve_classes_for_etab,
self.context.timestamp_now_sql,
log=log)
for ids_classe_cohorts in ids_classes_cohorts:
self.__db.enroll_user_in_cohort(ids_classe_cohorts, eleve_id, self.context.timestamp_now_sql)
eleve_cohorts.extend(ids_classes_cohorts)
# Inscription dans la cohorte associee au niveau de formation
if eleve_ldap.niveau_formation:
log.info("Inscription de l'élève %s "
"dans la cohorte de niveau de formation %s", eleve_ldap, eleve_ldap.niveau_formation)
id_formation_cohort = self.get_or_create_formation_cohort(etablissement_context.id_context_categorie,
eleve_ldap.niveau_formation,
self.context.timestamp_now_sql,
log=log)
self.__db.enroll_user_in_cohort(id_formation_cohort, eleve_id, self.context.timestamp_now_sql)
eleve_cohorts.append(id_formation_cohort)
log.info("Désinscription de l'élève %s des anciennes cohortes", eleve_ldap)
self.__db.disenroll_user_from_cohorts(eleve_cohorts, eleve_id)
# Mise a jour des dictionnaires concernant les cohortes
for cohort_id in eleve_cohorts:
# Si la cohorte est deja connue
if cohort_id in etablissement_context.eleves_by_cohortes:
etablissement_context.eleves_by_cohortes[cohort_id].append(eleve_id)
# Si la cohorte n'a pas encore ete rencontree
else:
etablissement_context.eleves_by_cohortes[cohort_id] = [eleve_id]
# Mise a jour de la classe
id_user_info_data = self.__db.get_id_user_info_data(eleve_id, self.context.id_field_classe)
if id_user_info_data is not None:
self.__db.update_user_info_data(eleve_id, self.context.id_field_classe, eleve_ldap.classe.classe)
log.debug("Mise à jour user_info_data")
else:
self.__db.insert_moodle_user_info_data(eleve_id, self.context.id_field_classe, eleve_ldap.classe.classe)
log.debug("Insertion user_info_data")
# Mise a jour du Domaine
user_domain = self.__config.constantes.default_domain
if len(eleve_ldap.domaines) == 1:
user_domain = eleve_ldap.domaines[0]
else:
if eleve_ldap.uai_courant and eleve_ldap.uai_courant in self.context.map_etab_domaine:
user_domain = self.context.map_etab_domaine[eleve_ldap.uai_courant][0]
log.debug("Insertion du Domaine")
self.__db.set_user_domain(eleve_id, self.context.id_field_domaine, user_domain)
def handle_enseignant(self, etablissement_context: EtablissementContext, enseignant_ldap: EnseignantLdap,
log=getLogger()):
"""
Met à jour un enseignant au sein d'un établissement
:param etablissement_context:
:param enseignant_ldap:
:param log:
:return:
"""
enseignant_infos = "%s %s %s" % (enseignant_ldap.uid, enseignant_ldap.given_name, enseignant_ldap.sn)
if enseignant_ldap.uai_courant and not etablissement_context.etablissement_regroupe:
etablissement_context.etablissement_theme = enseignant_ldap.uai_courant.lower()
if not enseignant_ldap.mail:
enseignant_ldap.mail = self.__config.constantes.default_mail
# Affichage du mail reserve aux membres de cours
mail_display = self.__config.constantes.default_mail_display
if etablissement_context.structure_ldap.uai in self.__action_config.etablissements.listeEtabSansMail:
# Desactivation de l'affichage du mail
mail_display = 0
# Insertion de l'enseignant
id_user = self.__db.get_user_id(enseignant_ldap.uid)
if not id_user:
self.__db.insert_moodle_user(enseignant_ldap.uid, enseignant_ldap.given_name, enseignant_ldap.sn,
enseignant_ldap.mail,
mail_display, etablissement_context.etablissement_theme)
id_user = self.__db.get_user_id(enseignant_ldap.uid)
else:
self.__db.update_moodle_user(id_user, enseignant_ldap.given_name, enseignant_ldap.sn, enseignant_ldap.mail,
mail_display, etablissement_context.etablissement_theme)
# Mise à jour des droits sur les anciens etablissement
if enseignant_ldap.uais is not None and not etablissement_context.etablissement_regroupe:
# Recuperation des uais des etablissements dans lesquels l'enseignant est autorise
self.mettre_a_jour_droits_enseignant(enseignant_infos, id_user, enseignant_ldap.uais, log=log)
# Ajout du role de createur de cours au niveau de la categorie inter-etablissement Moodle
self.__db.add_role_to_user(self.__config.constantes.id_role_createur_cours,
self.context.id_context_categorie_inter_etabs, id_user)
log.info("Ajout du role de createur de cours dans la categorie inter-etablissements")
# Si l'enseignant fait partie d'un CFA
# Ajout du role createur de cours au niveau de la categorie inter-cfa
if etablissement_context.structure_ldap.type == self.__config.constantes.type_structure_cfa:
self.__db.add_role_to_user(self.__config.constantes.id_role_createur_cours,
self.context.id_context_categorie_inter_cfa, id_user)
log.info("Ajout du role de createur de cours dans la categorie inter-cfa")
# ajout du role de createur de cours dans l'etablissement
self.__db.add_role_to_user(self.__config.constantes.id_role_createur_cours,
etablissement_context.id_context_categorie, id_user)
# Ajouts des autres roles pour le personnel établissement
if set(enseignant_ldap.profils).intersection(['National_ENS', 'National_DIR', 'National_EVS', 'National_ETA']):
# Ajout des roles sur le contexte forum
self.__db.add_role_to_user(self.__config.constantes.id_role_eleve,
etablissement_context.id_context_course_forum, id_user)
# Inscription à la Zone Privée
self.__db.enroll_user_in_course(self.__config.constantes.id_role_eleve,
etablissement_context.id_zone_privee, id_user)
if set(enseignant_ldap.profils).intersection(['National_ENS', 'National_EVS', 'National_ETA']):
if not etablissement_context.gere_admin_local:
self.__db.add_role_to_user(self.context.id_role_extended_teacher,
etablissement_context.id_context_categorie,
id_user)
elif 'National_DIR' in enseignant_ldap.profils:
self.__db.add_role_to_user(self.__config.constantes.id_role_directeur,
etablissement_context.id_context_categorie, id_user)
# Ajout des droits d'administration locale pour l'etablissement
if etablissement_context.gere_admin_local:
for member in enseignant_ldap.is_member_of:
# L'enseignant est il administrateur Moodle ?
admin_moodle = re.match(etablissement_context.regexp_admin_moodle, member, flags=re.IGNORECASE)
if admin_moodle:
self.__db.insert_moodle_local_admin(etablissement_context.id_context_categorie, id_user)
log.info("Insertion d'un admin local %s %s %s",
enseignant_ldap.uid, enseignant_ldap.given_name, enseignant_ldap.sn)
# Si il est admin local on en fait un utilisateur avancé par default
if not self.__db.is_enseignant_avance(id_user, self.context.id_role_advanced_teacher):
self.__db.add_role_to_user(self.context.id_role_advanced_teacher, 1, id_user)
break
else:
delete = self.__db.delete_moodle_local_admin(self.context.id_context_categorie_inter_etabs, id_user)
if delete:
log.info("Suppression d'un admin local %s %s %s",
enseignant_ldap.uid, enseignant_ldap.given_name, enseignant_ldap.sn)
# Inscription dans les cohortes associees aux classes
enseignant_cohorts = []
enseignant_classes_for_etab = []
for classe in enseignant_ldap.classes:
if classe.etab_dn == etablissement_context.structure_ldap.dn:
enseignant_classes_for_etab.append(classe.classe)
if enseignant_classes_for_etab:
log.info("Inscription de l'enseignant %s dans les cohortes de classes %s",
enseignant_ldap, enseignant_classes_for_etab)
name_pattern = "Profs de la Classe %s"
desc_pattern = "Profs de la Classe %s"
ids_classes_cohorts = self.get_or_create_classes_cohorts(etablissement_context.id_context_categorie,
enseignant_classes_for_etab,
self.context.timestamp_now_sql,
name_pattern=name_pattern,
desc_pattern=desc_pattern,
log=log)
for ids_classe_cohorts in ids_classes_cohorts:
self.__db.enroll_user_in_cohort(ids_classe_cohorts, id_user, self.context.timestamp_now_sql)
enseignant_cohorts.extend(ids_classes_cohorts)
log.info("Inscription de l'enseignant %s dans la cohorte d'enseignants de l'établissement", enseignant_ldap)
id_prof_etabs_cohort = self.get_or_create_profs_etab_cohort(etablissement_context, log)
id_user = self.__db.get_user_id(enseignant_ldap.uid)
self.__db.enroll_user_in_cohort(id_prof_etabs_cohort, id_user, self.context.timestamp_now_sql)
# Mise a jour des dictionnaires concernant les cohortes
for cohort_id in enseignant_cohorts:
# Si la cohorte est deja connue
if cohort_id in etablissement_context.enseignants_by_cohortes:
etablissement_context.enseignants_by_cohortes[cohort_id].append(id_user)
# Si la cohorte n'a pas encore ete rencontree
else:
etablissement_context.enseignants_by_cohortes[cohort_id] = [id_user]
# Mise a jour du Domaine
user_domain = self.__config.constantes.default_domain
if len(enseignant_ldap.domaines) == 1:
user_domain = enseignant_ldap.domaines[0]
else:
if enseignant_ldap.uai_courant and enseignant_ldap.uai_courant in self.context.map_etab_domaine:
user_domain = self.context.map_etab_domaine[enseignant_ldap.uai_courant][0]
log.debug("Insertion du Domaine")
self.__db.set_user_domain(id_user, self.context.id_field_domaine, user_domain)
def handle_user_interetab(self, personne_ldap: PersonneLdap, log=getLogger()):
"""
Synchronise un utilisateur inter-etablissement
:param personne_ldap:
:param log:
:return:
"""
if not personne_ldap.mail:
personne_ldap.mail = self.__config.constantes.default_mail
# Creation de l'utilisateur
id_user = self.__db.get_user_id(personne_ldap.uid)
if not id_user:
self.__db.insert_moodle_user(personne_ldap.uid, personne_ldap.given_name, personne_ldap.sn,
personne_ldap.mail,
self.__config.constantes.default_mail_display,
self.__config.constantes.default_moodle_theme)
id_user = self.__db.get_user_id(personne_ldap.uid)
else:
self.__db.update_moodle_user(id_user, personne_ldap.given_name, personne_ldap.sn, personne_ldap.mail,
self.__config.constantes.default_mail_display,
self.__config.constantes.default_moodle_theme)
# Ajout du role de createur de cours
self.__db.add_role_to_user(self.__config.constantes.id_role_createur_cours,
self.context.id_context_categorie_inter_etabs, id_user)
# Attribution du role admin local si necessaire
for member in personne_ldap.is_member_of:
admin = re.match(self.__action_config.inter_etablissements.ldap_valeur_attribut_admin, member,
flags=re.IGNORECASE)
if admin:
insert = self.__db.insert_moodle_local_admin(self.context.id_context_categorie_inter_etabs, id_user)
if insert:
log.info("Insertion d'un admin local %s %s %s",
personne_ldap.uid, personne_ldap.given_name, personne_ldap.sn)
break
else:
delete = self.__db.delete_moodle_local_admin(self.context.id_context_categorie_inter_etabs, id_user)
if delete:
log.info("Suppression d'un admin local %s %s %s",
personne_ldap.uid, personne_ldap.given_name, personne_ldap.sn)
def handle_inspecteur(self, personne_ldap: PersonneLdap, log=getLogger()):
"""
Synchronise un inspecteur
:param personne_ldap:
:param log:
:return:
"""
if not personne_ldap.mail:
personne_ldap.mail = self.__config.constantes.default_mail
# Creation de l'utilisateur
self.__db.insert_moodle_user(personne_ldap.uid, personne_ldap.given_name, personne_ldap.sn,
personne_ldap.mail,
self.__config.constantes.default_mail_display,
self.__config.constantes.default_moodle_theme)
id_user = self.__db.get_user_id(personne_ldap.uid)
if not id_user:
self.__db.insert_moodle_user(personne_ldap.uid, personne_ldap.given_name, personne_ldap.sn,
personne_ldap.mail,
self.__config.constantes.default_mail_display,
self.__config.constantes.default_moodle_theme)
id_user = self.__db.get_user_id(personne_ldap.uid)
else:
self.__db.update_moodle_user(id_user, personne_ldap.given_name, personne_ldap.sn, personne_ldap.mail,
self.__config.constantes.default_mail_display,
self.__config.constantes.default_moodle_theme)
# Ajout du role de createur de cours au niveau de la categorie inter-etablissement Moodle
self.__db.add_role_to_user(self.__config.constantes.id_role_createur_cours,
self.context.id_context_categorie_inter_etabs, id_user)
log.info("Ajout du role de createur de cours dans la categorie inter-etablissements")
# Mise a jour du Domaine
user_domain = self.__config.constantes.default_domain
if len(personne_ldap.domaines) == 1:
user_domain = personne_ldap.domaines[0]
else:
if personne_ldap.uai_courant and personne_ldap.uai_courant in self.context.map_etab_domaine:
user_domain = self.context.map_etab_domaine[personne_ldap.uai_courant][0]
log.debug("Insertion du Domaine")
self.__db.set_user_domain(id_user, self.context.id_field_domaine, user_domain)
def mettre_a_jour_droits_enseignant(self, enseignant_infos, id_enseignant, uais_autorises, log=getLogger()):
"""
Fonction permettant de mettre a jour les droits d'un enseignant.
Cette mise a jour consiste a :
- Supprimer les roles non autorises
- ajouter les roles
:param enseignant_infos:
:param id_enseignant:
:param uais_autorises:
:param log:
:return:
"""
# Recuperation des themes autorises pour l'enseignant
themes_autorises = [uai_autorise.lower() for uai_autorise in uais_autorises]
log.debug("Etablissements autorises pour l'enseignant pour %s : %s",
enseignant_infos, themes_autorises)
#########################
# ZONES PRIVEES
#########################
# Recuperation des ids des roles et les themes non autorises
ids_roles_non_autorises, ids_themes_non_autorises = self.__db.get_ids_and_themes_not_allowed_roles(
id_enseignant, themes_autorises)
# Suppression des roles non autorises
if ids_roles_non_autorises:
self.__db.delete_roles(ids_roles_non_autorises)
log.info("Suppression des rôles d'enseignant pour %s dans les établissements %s"
, enseignant_infos, str(ids_themes_non_autorises))
log.info("Les seuls établissements autorisés pour cet enseignant sont %s", themes_autorises)
#########################
# FORUMS
#########################
# Recuperation des SIREN des etablissements dans lequel l'enseignant travaille
sirens = self.__db.get_descriptions_course_categories_by_themes(themes_autorises)
# Shortname des forums associes
# Ancien code : shortnames_forums = [ ( "ZONE-PRIVEE-%s" % str( siren ) ) for siren in sirens ]
shortnames_forums = ["ZONE-PRIVEE-%s" % siren for siren in sirens]
# Recuperation des roles sur les forums qui ne devraient plus exister
ids_roles_non_autorises, forums_summaries = self.__db.get_ids_and_summaries_not_allowed_roles(id_enseignant,
shortnames_forums)
# Suppression des roles non autorises
if ids_roles_non_autorises:
# Suppression des roles
self.__db.delete_roles(ids_roles_non_autorises)
log.info("Suppression des rôles d'enseignant pour %s sur les forum '%s' ",
enseignant_infos, str(forums_summaries))
log.info("Les seuls établissements autorisés pour cet enseignant sont '%s'", themes_autorises)
def get_or_create_cohort(self, id_context, name, id_number, description, time_created, log=getLogger()):
"""
Fonction permettant de creer une nouvelle cohorte pour un contexte donne.
:param id_context:
:param name:
:param id_number:
:param description:
:param time_created:
:return:
"""
id_cohort = self.__db.get_id_cohort(id_context, name)
if id_cohort is None:
self.__db.create_cohort(id_context, name, id_number, description, time_created)
log.info("Creation de la cohorte (name=%s)", name)
return self.__db.get_id_cohort(id_context, name)
return id_cohort
def get_or_create_formation_cohort(self, id_context_etab, niveau_formation, timestamp_now_sql, log=getLogger()):
"""
Charge ou créer une cohorte de formation
:param id_context_etab:
:param niveau_formation:
:param timestamp_now_sql:
:param log:
:return:
"""
cohort_name = 'Élèves du Niveau de formation %s' % niveau_formation
cohort_description = 'Eleves avec le niveau de formation %s' % niveau_formation
id_cohort = self.get_or_create_cohort(id_context_etab, cohort_name, cohort_name, cohort_description,
timestamp_now_sql, log)
return id_cohort
def get_or_create_classes_cohorts(self, id_context_etab, classes_names, time_created, name_pattern=None,
desc_pattern=None, log=getLogger()):
"""
Charge ou crée des cohortes a partir de classes liées a un établissement.
:param id_context_etab:
:param classes_names:
:param time_created:
:param name_pattern:
:param desc_pattern:
:return:
"""
if name_pattern is None:
name_pattern = "Élèves de la Classe %s"
if desc_pattern is None:
desc_pattern = "Élèves de la Classe %s"
ids_cohorts = []
for class_name in classes_names:
cohort_name = name_pattern % class_name
cohort_description = desc_pattern % class_name
id_cohort = self.get_or_create_cohort(id_context_etab,
cohort_name,
cohort_name,
cohort_description,
time_created,
log=log)
ids_cohorts.append(id_cohort)
return ids_cohorts
def get_or_create_profs_etab_cohort(self, etab_context: EtablissementContext, log=getLogger()):
"""
Charge ou crée la cohorte d'enseignant de l'établissement.
:param etab_context:
:param log:
:return:
"""
cohort_name = 'Profs de l\'établissement (%s)' % etab_context.uai
cohort_description = 'Enseignants de l\'établissement %s' % etab_context.uai
id_cohort_enseignants = self.get_or_create_cohort(etab_context.id_context_categorie,
cohort_name,
cohort_name,
cohort_description,
self.context.timestamp_now_sql,
log=log)
return id_cohort_enseignants
def get_users_by_cohorts_comparators(self, etab_context: EtablissementContext, cohortname_pattern_re: str,
cohortname_pattern: str) -> (Dict[str, List[str]], Dict[str, List[str]]):
"""
Renvoie deux dictionnaires listant les utilisateurs (uid) dans chacune des classes.
Le premier dictionnaire contient les valeurs de la BDD, le second celles du LDAP
:param etab_context: EtablissementContext
:param cohortname_pattern_re: str
:param cohortname_pattern: str
:return:
"""
classes_cohorts = self.__db.get_user_filtered_cohorts(etab_context.id_context_categorie, cohortname_pattern)
eleves_by_cohorts_db = {}
for cohort in classes_cohorts:
matches = re.search(cohortname_pattern_re, cohort.name)
classe_name = matches.group(2)
eleves_by_cohorts_db[classe_name] = []
for username in self.__db.get_cohort_members(cohort.id):
eleves_by_cohorts_db[classe_name].append(username.lower())
eleves_by_cohorts_ldap = {}
for classe in eleves_by_cohorts_db:
eleves_by_cohorts_ldap[classe] = []
for eleve in self.__ldap.search_eleves_in_classe(classe, etab_context.uai):
eleves_by_cohorts_ldap[classe].append(eleve.uid.lower())
return eleves_by_cohorts_db, eleves_by_cohorts_ldap
def list_contains_username(self, ldap_users: List[PersonneLdap], username: str):
"""
Vérifie si une liste d'utilisateurs ldap contient un utilisateur via son username
:param ldap_users:
:param username:
:return:
"""
for ldap_user in ldap_users:
if ldap_user.uid.lower() == username.lower():
return True
return False
def backup_course(self, courseid, log=getLogger()):
log.info("Backup du cours avec l'id %d", courseid)
cmd = self.__config.webservice.backup_cmd.replace("%courseid%", str(courseid))
backup_process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout = backup_process.stdout.read()
output = stdout.decode('utf-8')
m = re.search(self.__config.webservice.backup_success_re, output)
return m is not None
def check_and_process_user_courses(self, user_id: int, log=getLogger()):
user_courses_ids = [user_course[0] for user_course in self.__db.get_courses_ids_owned_by(user_id)]
now = self.__db.get_timestamp_now()
for courseid in user_courses_ids:
owners_ids = [ownerid[0] for ownerid in self.__db.get_userids_owner_of_course(courseid)]
if len(owners_ids) == 1 and owners_ids[0] == user_id:
timemodified = self.__db.get_course_timemodified(courseid)
delay_backup_course = self.__config.delete.delay_backup_course
if timemodified < now - (delay_backup_course * SECONDS_PER_DAY):
backup_success = self.backup_course(courseid, log)
if backup_success:
log.info("La backup du cours %d été sauvegardée", courseid)
self.__db.delete_course(courseid)
log.info("Le cours %d a été supprimé de la base de données Moodle", courseid)
else:
log.error("La backup du cours %d a échouée", courseid)
def anonymize_or_delete_users(self, ldap_users: List[PersonneLdap], db_users: List, log=getLogger()):
"""
Anonymise ou Supprime les utilisateurs devenus inutiles
:param ldap_users:
:param db_users:
:param log:
:return:
"""
user_ids_to_delete = []
user_ids_to_anonymize = []
now = self.__db.get_timestamp_now()
for db_user in db_users:
if db_user[0] in self.__config.delete.ids_users_undeletable:
continue
if not self.list_contains_username(ldap_users, db_user[1]):
log.info("L'utilisateur %s n'est plus présent dans l'annuaire LDAP", db_user[1])
is_teacher = self.__db.user_has_role(db_user[0], self.__config.delete.ids_roles_teachers)
delete_delay = self.__config.delete.delay_delete_teacher if is_teacher else \
self.__config.delete.delay_delete_student
anon_delay = self.__config.delete.delay_anonymize_teacher if is_teacher else \
self.__config.delete.delay_anonymize_student
if db_user[2] < now - (delete_delay * SECONDS_PER_DAY):
log.info("L'utilisateur %s ne s'est pas connecté depuis au moins %s jours. Il va être"
" supprimé", db_user[1], delete_delay)
user_ids_to_delete.append(db_user[0])
elif db_user[2] < now - (anon_delay * SECONDS_PER_DAY):
log.info("L'utilisateur %s ne s'est pas connecté depuis au moins %s jours. Il va être"
" anonymisé", db_user[1], delete_delay)
user_ids_to_anonymize.append(db_user[0])
if user_ids_to_delete:
log.info("Suppression des utilisateurs en cours...")
for user_id in user_ids_to_delete:
self.check_and_process_user_courses(user_id, log=log)
self.delete_users(user_ids_to_delete, log=log)
log.info("%d utilisateurs supprimés", len(user_ids_to_delete))
if user_ids_to_anonymize:
log.info("Anonymisation des utilisateurs en cours...")
self.__db.anonymize_users(user_ids_to_anonymize)
log.info("%d utilisateurs anonymisés", len(user_ids_to_anonymize))
def delete_users(self, userids: List[int], pagesize=50, log=getLogger()) -> int:
"""
Supprime les utilisateurs d'une liste en paginant les appels au webservice
:param userids:
:param pagesize:
:param log:
:return:
"""
i = 0
total = len(userids)
userids_page = []
for userid in userids:
userids_page.append(userid)
i += 1
if i % pagesize == 0:
self.__webservice.delete_users(userids_page)
userids_page = []
log.info("%d / %d utilisateurs supprimés", i, total)
if i % pagesize > 0:
self.__webservice.delete_users(userids_page)
log.info("%d / %d utilisateurs supprimés", i, total)
return i
def purge_cohorts(self, users_by_cohorts_db: Dict[str, List[str]],
users_by_cohorts_ldap: Dict[str, List[str]],
cohortname_pattern: str,
log=getLogger()):
"""
Vide les cohortes d'utilisateurs conformément à l'annuaire LDAP
:param users_by_cohorts_db:
:param users_by_cohorts_ldap:
:param cohortname_pattern:
:param log:
:return:
"""
disenrolled_users = {}
for cohort_db, eleves_db in users_by_cohorts_db.items():
cohortname = cohortname_pattern % cohort_db
if cohort_db not in users_by_cohorts_ldap.keys():
for username_db in users_by_cohorts_db[cohort_db]:
log.info("Désenrollement de l'utilisateur %s de la cohorte \"%s\"", username_db, cohort_db)
self.__db.disenroll_user_from_username_and_cohortname(username_db, cohortname)
if cohort_db not in disenrolled_users.keys():
disenrolled_users[cohort_db] = []
disenrolled_users[cohort_db].append(username_db)
else:
for username_db in eleves_db:
if username_db not in users_by_cohorts_ldap[cohort_db]:
log.info("Désenrollement de l'utilisateur %s de la cohorte \"%s\"", username_db, cohort_db)
self.__db.disenroll_user_from_username_and_cohortname(username_db, cohortname)
if cohort_db not in disenrolled_users.keys():
disenrolled_users[cohort_db] = []
disenrolled_users[cohort_db].append(username_db)
return disenrolled_users
def mise_a_jour_cohorte_interetab(self, is_member_of, cohort_name, since_timestamp: datetime.datetime,
log=getLogger()):
"""
Met à jour la cohorte inter-etablissement.
:param is_member_of:
:param cohort_name:
:param since_timestamp:
:param log:
:return:
"""
# Creation de la cohort si necessaire
self.get_or_create_cohort(self.context.id_context_categorie_inter_etabs, cohort_name, cohort_name,
cohort_name, self.context.timestamp_now_sql, log=log)
id_cohort = self.__db.get_id_cohort(self.context.id_context_categorie_inter_etabs, cohort_name)
# Liste permettant de sauvegarder les utilisateurs de la cohorte
self.context.utilisateurs_by_cohortes[id_cohort] = []
# Recuperation des utilisateurs
is_member_of_list = [is_member_of]
# Ajout des utilisateurs dans la cohorte
for personne_ldap in self.__ldap.search_personne(
since_timestamp=since_timestamp if not self.__arguments.purge_cohortes else None,
isMemberOf=is_member_of_list):
user_id = self.__db.get_user_id(personne_ldap.uid)
if user_id:
self.__db.enroll_user_in_cohort(id_cohort, user_id, self.context.timestamp_now_sql)
# Mise a jour des utilisateurs de la cohorte
self.context.utilisateurs_by_cohortes[id_cohort].append(user_id)
else:
log.warning("Impossible d'inserer l'utilisateur %s dans la cohorte %s, "
"car il n'est pas connu dans Moodle", personne_ldap, cohort_name)
def insert_moodle_structure(self, grp, nom_structure, path, ou, siren, uai):
"""
Fonction permettant d'inserer une structure dans Moodle.
:param grp:
:param nom_structure:
:param path:
:param ou:
:param siren:
:param uai:
:return:
"""
# Recuperation du timestamp
now = self.__db.get_timestamp_now()
# Creation de la description pour la structure
description = siren
if grp:
description = siren + "@" + nom_structure
#########################
# PARTIE CATEGORIE
#########################
# Insertion de la categorie correspondant a l'etablissement
self.__db.insert_moodle_course_category(ou, description, description, uai)
id_categorie_etablissement = self.__db.get_id_course_category_by_id_number(siren)
# Mise a jour du path de la categorie
path_etablissement = "/%d" % id_categorie_etablissement
self.__db.update_course_category_path(id_categorie_etablissement, path_etablissement)
#########################
# PARTIE CONTEXTE
#########################
# Insertion du contexte associe a la categorie de l'etablissement
self.__db.insert_moodle_context(self.__config.constantes.niveau_ctx_categorie,
PROFONDEUR_CTX_ETAB,
id_categorie_etablissement)
id_contexte_etablissement = self.__db.get_id_context(self.__config.constantes.niveau_ctx_categorie,
PROFONDEUR_CTX_ETAB,
id_categorie_etablissement)
# Mise a jour du path de la categorie
path_contexte_etablissement = "%s/%d" % (path, id_contexte_etablissement)
self.__db.update_context_path(id_contexte_etablissement, path_contexte_etablissement)
#########################
# PARTIE ZONE PRIVEE
#########################
# Insertion du cours pour le forum de discussion
id_zone_privee = self.__db.insert_zone_privee(id_categorie_etablissement, siren, ou, now)
# Insertion du contexte associe
id_contexte_zone_privee = self.__db.insert_zone_privee_context(id_zone_privee)
# Mise a jour du path du contexte
path_contexte_zone_privee = "%s/%d" % (path_contexte_etablissement, id_contexte_zone_privee)
self.__db.update_context_path(id_contexte_zone_privee, path_contexte_zone_privee)
#########################
# PARTIE INSCRIPTIONS
#########################
# Ouverture du cours a l'inscription manuelle
role_id = self.__config.constantes.id_role_eleve
self.__db.insert_moodle_enrol_capability("manual", 0, id_zone_privee, role_id)
#########################
# PARTIE FORUM
#########################
# Insertion du forum au sein de la zone privee
course = id_zone_privee
name = FORUM_NAME_ZONE_PRIVEE % ou
intro = FORUM_INTRO_ZONE_PRIVEE
intro_format = FORUM_INTRO_FORMAT_ZONE_PRIVEE
max_bytes = FORUM_MAX_BYTES_ZONE_PRIVEE
max_attachements = FORUM_MAX_ATTACHEMENTS_ZONE_PRIVEE
time_modified = now
id_forum = self.__db.get_id_forum(course)
if id_forum is None:
self.__db.insert_moodle_forum(course, name, intro, intro_format, max_bytes, max_attachements, time_modified)
id_forum = self.__db.get_id_forum(course)
#########################
# PARTIE MODULE
#########################
# Insertion du module forum dans la zone privee
course = id_zone_privee
module = COURSE_MODULES_MODULE
instance = id_forum
added = now
id_course_module = self.__db.get_id_course_module(course)
if id_course_module is None:
self.__db.insert_moodle_course_module(course, module, instance, added)
id_course_module = self.__db.get_id_course_module(course)
# Insertion du contexte pour le module de cours (forum)
id_contexte_module = self.__db.get_id_context(self.__config.constantes.niveau_ctx_forum,
PROFONDEUR_CTX_MODULE_ZONE_PRIVEE,
id_course_module)
if id_contexte_module is None:
self.__db.insert_moodle_context(self.__config.constantes.niveau_ctx_forum,
PROFONDEUR_CTX_MODULE_ZONE_PRIVEE,
id_course_module)
id_contexte_module = self.__db.get_id_context(self.__config.constantes.niveau_ctx_forum,
PROFONDEUR_CTX_MODULE_ZONE_PRIVEE,
id_course_module)
# Mise a jour du path du contexte
path_contexte_module = "%s/%d" % (path_contexte_zone_privee, id_contexte_module)
self.__db.update_context_path(id_contexte_module, path_contexte_module)
#########################
# PARTIE BLOC
#########################
# Insertion du bloc de recherche forum
parent_context_id = id_contexte_zone_privee
block_name = BLOCK_FORUM_SEARCH_NAME
show_in_subcontexts = BLOCK_FORUM_SEARCH_SHOW_IN_SUB_CTX
page_type_pattern = BLOCK_FORUM_SEARCH_PAGE_TYPE_PATTERN
sub_page_pattern = BLOCK_FORUM_SEARCH_SUB_PAGE_PATTERN
default_region = BLOCK_FORUM_SEARCH_DEFAULT_REGION
default_weight = BLOCK_FORUM_SEARCH_DEFAULT_WEIGHT
id_block = self.__db.get_id_block(parent_context_id)
if id_block is None:
self.__db.insert_moodle_block(block_name, parent_context_id, show_in_subcontexts, page_type_pattern,
sub_page_pattern, default_region, default_weight)
id_block = self.__db.get_id_block(parent_context_id)
# Insertion du contexte pour le bloc
id_contexte_bloc = self.__db.get_id_context(self.__config.constantes.niveau_ctx_bloc,
PROFONDEUR_CTX_BLOCK_ZONE_PRIVEE,
id_block)
if id_contexte_bloc is None:
self.__db.insert_moodle_context(self.__config.constantes.niveau_ctx_bloc,
PROFONDEUR_CTX_BLOCK_ZONE_PRIVEE,
id_block)
id_contexte_bloc = self.__db.get_id_context(self.__config.constantes.niveau_ctx_bloc,
PROFONDEUR_CTX_BLOCK_ZONE_PRIVEE,
id_block)
# Mise a jour du path du contexte
path_contexte_bloc = "%s/%d" % (path_contexte_zone_privee, id_contexte_bloc)
self.__db.update_context_path(id_contexte_bloc, path_contexte_bloc)
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,271 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /test/test_config.py | # coding: utf-8
from synchromoodle.config import ConfigLoader
from os import path
class TestConfig:
def test_config_update_no_id(self):
config_loader = ConfigLoader()
config = config_loader.load([
path.join(path.dirname(__file__), 'data/config_test_no_id_1.yml'),
path.join(path.dirname(__file__), 'data/config_test_no_id_2.yml'),
path.join(path.dirname(__file__), 'data/config_test_invalid.yml')], True)
assert config.constantes is not None
assert config.database is not None
assert config.ldap is not None
assert config.actions is not None
assert len(config.actions) == 2
config_action_1 = config.actions[0]
assert config_action_1.etablissements is not None
assert config_action_1.inter_etablissements is not None
assert config_action_1.inspecteurs is not None
assert config_action_1.timestamp_store is not None
assert config_action_1.timestamp_store.file == "config_test1_precedent.txt"
config_action_2 = config.actions[1]
assert config_action_2.timestamp_store.file == "config_test2_precedent.txt"
assert config.constantes.foo == "constante test foo 2"
assert config.constantes.bar == "constante test bar 1"
assert config_action_1.etablissements.prefixAdminMoodleLocal == "config_test1:admin:Moodle:local:"
assert config_action_1.etablissements.prefixAdminLocal == "config_test1:admin:local:"
assert len(config_action_1.etablissements.listeEtab) == 3
assert config_action_1.etablissements.etabRgp[1].nom == "ETAB RGP DE TEST 2"
assert len(config_action_1.etablissements.etabRgp[2].uais) == 10
def test_config_update_same_id(self):
config_loader = ConfigLoader()
config = config_loader.load([
path.join(path.dirname(__file__), 'data/config_test_same_id_1.yml'),
path.join(path.dirname(__file__), 'data/config_test_same_id_2.yml'),
path.join(path.dirname(__file__), 'data/config_test_invalid.yml')], True)
assert config.constantes is not None
assert config.database is not None
assert config.ldap is not None
assert config.actions is not None
assert len(config.actions) == 1
config_action = config.actions[0]
assert config_action.etablissements is not None
assert config_action.inter_etablissements is not None
assert config_action.inspecteurs is not None
assert config_action.timestamp_store is not None
assert config_action.timestamp_store.file == "config_test2_precedent.txt"
assert config.constantes.foo == "constante test foo 2"
assert config.constantes.bar == "constante test bar 1"
assert config_action.etablissements.prefixAdminMoodleLocal == "config_test1:admin:Moodle:local:"
assert config_action.etablissements.prefixAdminLocal == "config_test1:admin:local:"
assert len(config_action.etablissements.listeEtab) == 3
assert config_action.etablissements.etabRgp[1].nom == "ETAB RGP DE TEST 2"
assert len(config_action.etablissements.etabRgp[2].uais) == 10
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,272 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /test/test_synchronizer.py | # coding: utf-8
import pytest
import platform
from synchromoodle.config import Config, ActionConfig
from synchromoodle.dbutils import Database
from synchromoodle.ldaputils import Ldap
from synchromoodle.synchronizer import Synchronizer
from test.utils import db_utils, ldap_utils
@pytest.fixture(scope='function', name='db')
def db(docker_config: Config):
db = Database(docker_config.database, docker_config.constantes)
db_utils.init(db)
return db
@pytest.fixture(scope='function', name='ldap')
def ldap(docker_config: Config):
ldap = Ldap(docker_config.ldap)
ldap_utils.reset(ldap)
return ldap
class TestEtablissement:
@pytest.fixture(autouse=True)
def manage_ldap(self, ldap: Ldap):
ldap.connect()
try:
yield
finally:
ldap.disconnect()
@pytest.fixture(autouse=True)
def manage_db(self, db: Database):
db.connect()
try:
yield
finally:
db.disconnect()
def test_should_load_context(self, ldap: Ldap, db: Database, docker_config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, docker_config)
synchronizer.initialize()
assert synchronizer.context
assert synchronizer.context.timestamp_now_sql is not None
assert synchronizer.context.id_context_categorie_inter_etabs == 3
assert synchronizer.context.id_context_categorie_inter_cfa == 343065
assert synchronizer.context.id_field_classe == 1
assert synchronizer.context.id_field_domaine == 3
assert synchronizer.context.id_role_extended_teacher == 13
assert synchronizer.context.id_role_advanced_teacher == 20
assert synchronizer.context.map_etab_domaine == {'0291595B': ['lycees.netocentre.fr'],
'0290009C': ['lycees.netocentre.fr']}
def test_maj_etab(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
structure = ldap.get_structure("0290009C")
assert structure is not None
etab_context = synchronizer.handle_etablissement(structure.uai)
assert etab_context.uai == "0290009C"
assert etab_context.gere_admin_local is True
assert etab_context.etablissement_regroupe is False
assert etab_context.regexp_admin_moodle == "(esco|clg37):admin:Moodle:local:.*_0290009C$"
assert etab_context.regexp_admin_local == "(esco|clg37):admin:local:.*_0290009C$"
assert etab_context.etablissement_theme == "0290009c"
assert etab_context.id_context_categorie is not None
assert etab_context.id_zone_privee is not None
assert etab_context.id_context_course_forum is not None
etablissement_ou = ldap.get_structure("0290009C").nom
db.mark.execute("SELECT * FROM {entete}course_categories "
"WHERE name = %(name)s "
"AND theme = %(theme)s".format(entete=db.entete),
params={
'name': etablissement_ou,
'theme': etab_context.uai
})
result = db.mark.fetchone()
assert result is not None
def test_maj_eleve(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
structure = ldap.get_structure("0290009C")
eleves = ldap.search_eleve(None, "0290009C")
eleve = eleves[1]
etab_context = synchronizer.handle_etablissement(structure.uai)
synchronizer.handle_eleve(etab_context, eleve)
db.mark.execute("SELECT * FROM {entete}user WHERE username = %(username)s".format(entete=db.entete),
params={
'username': str(eleve.uid).lower()
})
result = db.mark.fetchone()
assert result is not None
assert result[10] == 'Dorian'
assert result[12] == 'dorian.meyer@netocentre.fr'
assert result[27] == '0290009c'
eleve_id = result[0]
db.mark.execute("SELECT * FROM {entete}role_assignments WHERE userid = %(userid)s".format(entete=db.entete),
params={
'userid': eleve_id
})
roles_results = db.mark.fetchall()
assert len(roles_results) == 0
for classe in eleve.classes:
cohort_name = "Élèves de la Classe %s" % classe
db.mark.execute("SELECT * FROM {entete}cohort WHERE name = %(name)s".format(entete=db.entete),
params={
'name': cohort_name
})
cohort = db.mark.fetchone()
cohort_id = cohort[0]
db.mark.execute("SELECT * FROM {entete}cohort_members WHERE cohortid = %(cohortid)s AND userid = %(userid)s"
.format(entete=db.entete),
params={
'cohortid': cohort_id,
'userid': eleve_id
})
result_cohort_enrollment = db.mark.fetchone()
assert result_cohort_enrollment is not None
assert result_cohort_enrollment[2] == eleve_id
def test_maj_enseignant(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
structure = ldap.get_structure("0290009C")
enseignants = ldap.search_enseignant(None, "0290009C")
enseignant = enseignants[1]
etab_context = synchronizer.handle_etablissement(structure.uai)
synchronizer.handle_enseignant(etab_context, enseignant)
db.mark.execute("SELECT * FROM {entete}user WHERE username = %(username)s".format(entete=db.entete),
params={
'username': str(enseignant.uid).lower()
})
result = db.mark.fetchone()
assert result is not None
assert result[10] == 'Jules'
assert result[11] == 'PICARD'
assert result[12] == 'noreply@ac-rennes.fr'
assert result[27] == '0290009c'
enseignant_id = result[0]
db.mark.execute("SELECT * FROM {entete}role_assignments WHERE userid = %(userid)s".format(entete=db.entete),
params={
'userid': enseignant_id
})
roles_results = db.mark.fetchall()
assert len(roles_results) == 3
assert roles_results[0][1] == 2
assert roles_results[0][2] == 3
assert roles_results[1][1] == 2
assert roles_results[1][2] == 1184277
assert roles_results[2][1] == 5
assert roles_results[2][2] == 1184278
def test_maj_user_interetab(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
users = ldap.search_personne()
user = users[0]
synchronizer.handle_user_interetab(user)
db.mark.execute("SELECT * FROM {entete}user WHERE username = %(username)s".format(entete=db.entete),
params={
'username': str(user.uid).lower()
})
result = db.mark.fetchone()
user_id = result[0]
db.mark.execute("SELECT * FROM {entete}role_assignments WHERE userid = %(userid)s".format(entete=db.entete),
params={
'userid': user_id
})
roles_results = db.mark.fetchall()
assert len(roles_results) == 1
def test_maj_usercfa_interetab(self, ldap: Ldap, db: Database, config: Config, action_config: ActionConfig):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
users = ldap.search_personne()
user = users[0]
user.is_member_of = [action_config.inter_etablissements.ldap_valeur_attribut_admin]
synchronizer.handle_user_interetab(user)
db.mark.execute("SELECT * FROM {entete}user WHERE username = %(username)s".format(entete=db.entete),
params={
'username': str(user.uid).lower()
})
result = db.mark.fetchone()
user_id = result[0]
db.mark.execute("SELECT * FROM {entete}role_assignments WHERE userid = %(userid)s".format(entete=db.entete),
params={
'userid': user_id
})
roles_results = db.mark.fetchall()
assert len(roles_results) == 2
assert roles_results[1][1] == db.get_id_role_admin_local()
def test_maj_inspecteur(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
users = ldap.search_personne()
user = users[0]
synchronizer.handle_inspecteur(user)
db.mark.execute("SELECT * FROM {entete}user WHERE username = %(username)s".format(entete=db.entete),
params={
'username': str(user.uid).lower()
})
result = db.mark.fetchone()
user_id = result[0]
assert result is not None
db.mark.execute("SELECT * FROM {entete}role_assignments WHERE userid = %(userid)s".format(entete=db.entete),
params={
'userid': user_id
})
roles_results = db.mark.fetchall()
assert len(roles_results) == 1
assert roles_results[0][1] == 2
db.mark.execute("SELECT * FROM {entete}user_info_data WHERE userid = %(userid)s".format(entete=db.entete),
params={
'userid': user_id
})
infos_result = db.mark.fetchone()
assert infos_result[3] == "lycees.netocentre.fr"
def test_eleve_passage_lycee(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
college = ldap.get_structure("0291595B")
lycee = ldap.get_structure("0290009C")
eleves = ldap.search_eleve(None, "0291595B")
eleve = eleves[0]
college_context = synchronizer.handle_etablissement(college.uai)
lycee_context = synchronizer.handle_etablissement(lycee.uai)
synchronizer.handle_eleve(college_context, eleve)
db.mark.execute("SELECT * FROM {entete}user WHERE username = %(username)s".format(entete=db.entete),
params={
'username': str(eleve.uid).lower()
})
result = db.mark.fetchone()
eleve_id = result[0]
db.mark.execute("SELECT * FROM {entete}role_assignments WHERE userid = %(userid)s".format(entete=db.entete),
params={
'userid': eleve_id
})
roles_results = db.mark.fetchall()
assert len(roles_results) == 1
assert roles_results[0][1] == 14
eleve.uai_courant = "0290009C"
synchronizer.handle_eleve(lycee_context, eleve)
db.mark.execute("SELECT * FROM {entete}role_assignments WHERE userid = %(userid)s".format(entete=db.entete),
params={
'userid': eleve_id
})
roles_results = db.mark.fetchall()
assert len(roles_results) == 0
def test_nettoyage(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
etab_context = synchronizer.handle_etablissement("0290009C")
eleves = ldap.search_eleve(None, "0290009C")
for eleve in eleves:
synchronizer.handle_eleve(etab_context, eleve)
eleves_by_cohorts_db, eleves_by_cohorts_ldap = \
synchronizer.get_users_by_cohorts_comparators(etab_context, r'(Élèves de la Classe )(.*)$',
'Élèves de la Classe %')
eleves_by_cohorts_ldap.pop('1ERE S2', None)
eleves_by_cohorts_ldap.pop('TES3', None)
eleves_by_cohorts_ldap['TS2'].remove('f1700ivg')
eleves_by_cohorts_ldap['TS2'].remove('f1700ivl')
eleves_by_cohorts_ldap['TS2'].remove('f1700ivv')
synchronizer.purge_cohorts(eleves_by_cohorts_db, eleves_by_cohorts_ldap, "Élèves de la Classe %s")
db.delete_empty_cohorts()
s = "SELECT COUNT(cohort_members.id) FROM {entete}cohort_members AS cohort_members" \
" INNER JOIN {entete}cohort AS cohort" \
" ON cohort_members.cohortid = cohort.id" \
" WHERE cohort.name = %(cohortname)s".format(entete=db.entete)
db.mark.execute(s, params={'cohortname': "Élèves de la Classe 1ERE S2"})
result = db.mark.fetchone()
assert result[0] == 0
db.mark.execute(s, params={'cohortname': "Élèves de la Classe TES3"})
result = db.mark.fetchone()
assert result[0] == 0
db.mark.execute("SELECT {entete}user.username FROM {entete}cohort_members AS cohort_members"
" INNER JOIN {entete}cohort AS cohort"
" ON cohort_members.cohortid = cohort.id"
" INNER JOIN {entete}user"
" ON cohort_members.userid = {entete}user.id"
" WHERE cohort.name = %(cohortname)s".format(entete=db.entete),
params={
'cohortname': "Élèves de la Classe TS2"
})
results = [result[0] for result in db.mark.fetchall()]
assert 'f1700ivg' not in results
assert 'f1700ivl' not in results
assert 'f1700ivv' not in results
assert len(results) == 5
def test_anonymize_useless_users(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
etab_context = synchronizer.handle_etablissement("0290009C")
ldap_eleves = ldap.search_eleve(uai="0290009C")
ldap_enseignants = ldap.search_enseignant(uai="0290009C")
for eleve in ldap_eleves:
synchronizer.handle_eleve(etab_context, eleve)
for enseignant in ldap_enseignants:
synchronizer.handle_enseignant(etab_context, enseignant)
ldap_users = ldap.search_personne()
db_valid_users = db.get_all_valid_users()
users_to_anon = []
for i in range(0, 3):
users_to_anon.append(ldap_users[i])
for user_to_anon in users_to_anon:
ldap_users.remove(user_to_anon)
age = db.get_timestamp_now() - (config.delete.delay_anonymize_student * 86400) - 1
db_valid_users = [(db_valid_user[0], db_valid_user[1], age) for db_valid_user in db_valid_users]
synchronizer._Synchronizer__webservice.delete_users = lambda arg: None
synchronizer.anonymize_or_delete_users(ldap_users, db_valid_users)
db.mark.execute("SELECT username, deleted, firstname, lastname, email, skype, yahoo, aim, msn, phone1, phone2,"
" department, address, city, description, lastnamephonetic, firstnamephonetic, middlename,"
" alternatename"
" FROM {entete}user ORDER BY id LIMIT 3".format(entete=db.entete))
db_users = db.mark.fetchall()
assert db_users[0][0] == 'f1700ivg'
assert db_users[1][0] == 'f1700ivh'
assert db_users[2][0] == 'f1700ivi'
for x in range(0, 3):
assert db_users[x][2] == config.constantes.anonymous_name
assert db_users[x][3] == config.constantes.anonymous_name
assert db_users[x][4] == config.constantes.anonymous_mail
assert db_users[x][5] == config.constantes.anonymous_name
assert db_users[x][6] == config.constantes.anonymous_name
assert db_users[x][7] == config.constantes.anonymous_name
assert db_users[x][8] == config.constantes.anonymous_name
assert db_users[x][9] == config.constantes.anonymous_phone
assert db_users[x][10] == config.constantes.anonymous_phone
assert db_users[x][11] == config.constantes.anonymous_name
assert db_users[x][12] == config.constantes.anonymous_name
assert db_users[x][13] == config.constantes.anonymous_name
assert db_users[x][14] is None
assert db_users[x][15] == config.constantes.anonymous_name
assert db_users[x][16] == config.constantes.anonymous_name
assert db_users[x][17] == config.constantes.anonymous_name
assert db_users[x][18] == config.constantes.anonymous_name
def test_course_backup(self, ldap: Ldap, db: Database, config: Config):
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
ldap_utils.run_ldif('data/default-groups.ldif', ldap)
db_utils.run_script('data/default-context.sql', db, connect=False)
os = platform.system()
if os == "Linux":
config.webservice.backup_cmd = "sh backup.sh --courseid=%courseid% --destination=/MoodleBackups"
elif os == "Windows":
config.webservice.backup_cmd = "backup.bat --courseid=%courseid% --destination=/MoodleBackups"
synchronizer = Synchronizer(ldap, db, config)
synchronizer.initialize()
etab_context = synchronizer.handle_etablissement("0290009C")
ldap_eleves = ldap.search_eleve(uai="0290009C")
ldap_enseignants = ldap.search_enseignant(uai="0290009C")
enseignant = ldap_enseignants[0]
enseignant2 = ldap_enseignants[1]
for eleve in ldap_eleves:
synchronizer.handle_eleve(etab_context, eleve)
for enseignant in ldap_enseignants:
synchronizer.handle_enseignant(etab_context, enseignant)
db.mark.execute("SELECT id FROM {entete}user WHERE username = %(username)s".format(entete=db.entete), params={
'username': str(enseignant.uid).lower()
})
enseignant_db = db.mark.fetchone()
db.mark.execute("SELECT id FROM {entete}user WHERE username = %(username)s".format(entete=db.entete), params={
'username': str(enseignant2.uid).lower()
})
enseignant2_db = db.mark.fetchone()
now = synchronizer.context.timestamp_now_sql
db.mark.execute("INSERT INTO {entete}course (fullname, timemodified) VALUES ('cours de test 1',"
" %(timemodified)s)".format(entete=db.entete), params={'timemodified': now})
db.mark.execute("INSERT INTO {entete}course (fullname, timemodified) VALUES ('cours de test 2',"
" %(timemodified)s)".format(entete=db.entete), params={'timemodified': now - 31622400})
db.mark.execute("INSERT INTO {entete}course (fullname, timemodified) VALUES ('cours de test 3',"
" %(timemodified)s)".format(entete=db.entete), params={'timemodified': now - 31622400})
db.mark.execute("SELECT id, fullname, timemodified FROM {entete}course ORDER BY id DESC LIMIT 3"
.format(entete=db.entete))
courses = db.mark.fetchall()
for course in courses:
db.mark.execute("INSERT INTO {entete}context (contextlevel, instanceid) VALUES (50, %(instanceid)s)"
.format(entete=db.entete), params={'instanceid': course[0]})
db.mark.execute("SELECT id FROM {entete}context ORDER BY id DESC LIMIT 1".format(entete=db.entete))
contextid = db.mark.fetchone()
db.add_role_to_user(config.constantes.id_role_proprietaire_cours, contextid[0], enseignant_db[0])
db.mark.execute("INSERT INTO {entete}context (contextlevel, instanceid) VALUES (60, %(instanceid)s)"
.format(entete=db.entete), params={'instanceid': courses[1][0]})
db.mark.execute("SELECT id FROM {entete}context ORDER BY id DESC LIMIT 1".format(entete=db.entete))
contextid = db.mark.fetchone()
db.add_role_to_user(config.constantes.id_role_proprietaire_cours, contextid[0], enseignant2_db[0])
synchronizer.check_and_process_user_courses(enseignant_db[0])
db.mark.execute("SELECT id FROM {entete}course WHERE fullname LIKE 'cours de test%'".format(entete=db.entete))
new_courses = db.mark.fetchall()
new_courses_ids = [new_course[0] for new_course in new_courses]
assert len(new_courses_ids) == 2
assert courses[0][0] not in new_courses_ids
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,278 | techhat/webflayer | refs/heads/master | /flayer/tools.py | # -*- coding: utf-8 -*-
'''
Tools for Web Flayer
'''
# Python
import os
import re
import sys
import time
import random
import pprint
import urllib
# 3rd party
import requests
from termcolor import colored
import psycopg2
from psycopg2.extras import Json
from bs4 import BeautifulSoup
# Internal
import flayer.event
class Output(object):
'''
Used for outputting data
'''
def __init__(self, opts):
'''
Initialize
'''
self.opts = opts
def action(self, msg, force=False):
'''
Something is currently happening
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('action_color', 'green')))
def info(self, msg, force=False):
'''
Informational only
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('info_color', 'cyan')))
def warn(self, msg, force=False):
'''
Something is possibly wrong, but not enough to stop running
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('warn_color', 'yellow')))
def error(self, msg, force=False):
'''
Something is wrong enough to halt execution
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('error_color', 'red'), attrs=['bold']))
def process_url(url_uuid, url, content, parsers):
'''
Process a URL
'''
fun = None
for mod in parsers:
if fun is not None:
break
if not mod.endswith('.func_map'):
continue
fun = parsers[mod](url)
fun(url_uuid, url, content)
def get_url(
url,
parent=None,
referer=None,
dbclient=None,
client=requests,
opts=None,
context=None,
):
'''
Download a URL (if necessary) and store it
'''
out = Output(opts)
headers = opts['headers'].copy()
data = opts.get('data', None)
if referer:
headers['referer'] = referer
if flayer.db.check_domain_wait(dbclient, url) is False:
# We need to put this URL back into the queue
queue_urls([url], dbclient, opts)
flayer.db.pattern_wait(dbclient, url)
flayer.db.set_domain_wait(dbclient, opts, url)
wait = 0
if opts.get('no_db_cache') is True:
# Skip all the DB stuff and just download the URL
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
req.raise_for_status()
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req.headers)))
content = req.text
if opts['random_wait'] is True:
wait = int(opts.get('wait', 10))
time.sleep(random.randrange(1, wait))
if url not in opts['warned']:
opts['warned'].append(url)
return 0, content
cur = dbclient.cursor()
exists = False
# Check for URL in DB
cur.execute('''
SELECT uuid, url, last_retrieved
FROM urls
WHERE url = %s
''', [url])
if cur.rowcount < 1:
# URL has never been retrieved
cur.execute('''
INSERT INTO urls
(url) VALUES (%s)
RETURNING uuid
''', [url])
dbclient.commit()
url_uuid = cur.fetchone()[0]
out.action('{} has not been retrieved before, new UUID is {}'.format(url, url_uuid))
else:
# URL has been retrieved, get its UUID
url_uuid = cur.fetchone()[0]
out.warn('{} exists, UUID is {}'.format(url, url_uuid))
exists = True
if url not in opts['warned']:
opts['warned'].append(url)
# Save referer relationships
if parent:
try:
cur.execute('''
INSERT INTO referers
(url_uuid, referer_uuid)
VALUES
(%s, %s)
''', [url_uuid, parent])
dbclient.commit()
except psycopg2.IntegrityError:
# This relationship already exists
dbclient.rollback()
if opts['force_directories'] and not opts['save_path']:
opts['save_path'] = '.'
# Check for content
cur.execute('''
SELECT data, uuid
FROM content
WHERE url_uuid = %s
ORDER BY retrieved
LIMIT 1
''', [url_uuid])
if cur.rowcount < 1:
try:
if opts['save_path']:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
stream=True,
)
content, req_headers = _save_path(url, url_uuid, req, wait, opts, context, dbclient)
else:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
content = req.text
req_headers = req.headers
except requests.exceptions.ConnectionError as exc:
out.error('Error downloading {}:'.format(url))
out.error(exc)
return 0, ''
except requests.exceptions.InvalidSchema as exc:
out.error('Error downloading {}:'.format(url))
out.error(exc)
return 0, ''
if url not in opts['warned']:
opts['warned'].append(url)
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req_headers)))
if content:
cur.execute('''
INSERT INTO content
(url_uuid, data) VALUES (%s, %s)
''',
[
url_uuid,
Json({
'content': content.replace('\x00', ''),
'status': req.status_code,
})
]
)
dbclient.commit()
else:
if opts['force'] is True:
row_id = cur.fetchone()[1]
if opts['save_path']:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
stream=True,
)
content, req_headers = _save_path(url, url_uuid, req, wait, opts, context, dbclient)
else:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
content = req.text
req_headers = req.headers
if url not in opts['warned']:
opts['warned'].append(url)
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req_headers)))
if content:
cur.execute('''
UPDATE content
SET url_uuid = %s, data = %s
WHERE uuid = %s
''',
[
url_uuid,
Json({'content': content}),
row_id
]
)
dbclient.commit()
else:
content = cur.fetchone()[0]['content']
flayer.db.pattern_wait(dbclient, url)
flayer.db.set_domain_wait(dbclient, opts, url)
if exists is False:
if opts['random_wait'] is True:
wait = int(opts.get('wait', 10))
time.sleep(random.randrange(1, wait))
return url_uuid, content
def _save_path(url, url_uuid, req, wait, opts, context, dbclient):
'''
Save the URL to a path
'''
urlcomps = urllib.parse.urlparse(url)
if opts['force_directories']:
newpath = urlcomps[2].lstrip('/')
file_name = os.path.join(opts['save_path'], urlcomps[1], newpath)
else:
file_name = os.path.join(opts['save_path'], urlcomps[2].split('/')[-1])
return status(req, url, url_uuid, file_name, wait, opts, context, dbclient)
def status(
req,
media_url,
url_uuid,
file_name,
wait=0,
opts=None,
context=None,
dbclient=None,
):
'''
Show status of the download
'''
out = Output(opts)
if opts is None:
opts = {}
if context is None:
context = {}
file_name = _rename(media_url, file_name, opts)
cache_dir = '/'.join(file_name.split('/')[:-1])
try:
os.makedirs(cache_dir, mode=0o0755, exist_ok=True)
except PermissionError as exc:
out.error('Cannot create directory {}: {}'.format(cache_dir, exc))
is_text = False
req_headers = req.headers
for header in list(req_headers):
if header.lower().startswith('content-type'):
if req_headers[header].startswith('text'):
is_text = True
content = ''
cur = dbclient.cursor()
agent_id = opts.get('id', 'unknown')
cur.execute(
'INSERT INTO active_dl (url_uuid, started_by) VALUES (%s, %s)',
[url_uuid, agent_id]
)
cur.execute('SELECT url FROM urls WHERE uuid = %s', [url_uuid])
root_url = cur.fetchone()[0]
flayer.db.pattern_wait(dbclient, media_url)
flayer.db.set_domain_wait(dbclient, opts, media_url)
out.action('Downloading: {}'.format(media_url))
if os.path.exists(file_name):
if opts['overwrite']:
out.warn('... {} exists, overwriting'.format(file_name))
else:
out.warn('... {} exists, skipping'.format(file_name))
return None, {}
if not opts['daemon']:
sys.stdout.write(colored('...Saving to: ', 'green'))
out.info(file_name)
buffer_size = 4096
total = int(req.headers.get('Content-Length', 0))
count = 0
try:
point = int(total / 100)
#increment = int(total / buffer_size)
except ZeroDivisionError:
out.error('Divide by zero error, status not available')
point = 0
#increment = 0
start_time = time.time()
last_time = time.time()
delay_blocks = 0
delay_count = 0
context['dl_data'] = {
'url': root_url,
'media_url': media_url,
'url_uuid': url_uuid,
'bytes_total': '',
'bytes_elapsed': '',
'time_total': '',
'time_left': '',
'kbsec': 0,
}
flayer.event.fire('flayer/{}/download'.format(opts['id']), {root_url: 'started'}, opts)
try:
with open(file_name, 'wb') as fhp:
#old_time = time.time()
try:
for block in req.iter_content(buffer_size):
if opts.get('hard_stop'):
queue_urls([media_url], dbclient, opts)
break
if opts.get('abort'):
break
if is_text is True:
content += str(block)
fhp.write(block)
count += buffer_size
delay_blocks += buffer_size
delay_count += 1
#old_time = time.time()
time_delay = time.time() - last_time
if time_delay >= float(1):
last_time = time.time()
try:
blocks_left = int((total - count) / buffer_size)
except ZeroDivisionError:
blocks_left = 0
kbsec = (buffer_size / 1024) * delay_count
try:
seconds_left = ((blocks_left * buffer_size) / 1024) / kbsec
except ZeroDivisionError:
seconds_left = 0
minutes_left = int(seconds_left / 60)
minsecs_left = seconds_left % 60
time_left = '%d:%02d' % (minutes_left, minsecs_left)
seconds_elapsed = time.time() - start_time
seconds_total = seconds_elapsed + seconds_left
minutes_total = int(seconds_total / 60)
minsecs_total = int(seconds_total % 60)
time_total = '%d:%02d' % (minutes_total, minsecs_total)
try:
percent = int(count / point)
except ZeroDivisionError:
percent = 0
context['dl_data']['bytes_total'] = total # pylint: disable=bad-whitespace
context['dl_data']['bytes_elapsed'] = count # pylint: disable=bad-whitespace
context['dl_data']['time_total'] = time_total # pylint: disable=bad-whitespace
context['dl_data']['time_left'] = time_left # pylint: disable=bad-whitespace
context['dl_data']['kbsec'] = kbsec # pylint: disable=bad-whitespace
if not opts['daemon']:
sys.stdout.write('\x1b[2K\r')
sys.stdout.write(
colored('Total size is {} '.format(sizeof_fmt(total)), 'green'))
sys.stdout.write(colored('({} bytes), '.format(total), 'green'))
sys.stdout.write(colored('{}%, '.format(str(percent)), 'cyan'))
sys.stdout.write(colored(kbsec, 'cyan'))
sys.stdout.write(colored(' KiB/s, ', 'cyan'))
sys.stdout.write(colored('{}/{} left'.format(time_left, time_total), 'cyan'))
sys.stdout.flush()
delay_blocks = 0
delay_count = 0
except OSError as exc:
out.error('OS Error: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except ProtocolError as exc:
out.error('Protocol Error: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except Exception as exc:
out.error('Exception: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except OSError as exc:
out.error('There was an error opening {}: {}'.format(file_name, exc))
del context['dl_data']
if opts.get('hard_stop') or opts.get('abort'):
os.remove(file_name)
if is_text is True and opts.get('save_html', True) is False:
os.remove(file_name)
if not content:
content = None
cur.execute('DELETE FROM active_dl WHERE url_uuid = %s', [url_uuid])
flayer.event.fire('flayer/{}/download'.format(opts['id']), {root_url: 'complete'}, opts)
flayer.db.pattern_wait(dbclient, media_url)
flayer.db.set_domain_wait(dbclient, opts, media_url)
if not opts['daemon']:
print()
time.sleep(wait)
return content, req_headers
def sizeof_fmt(num, suffix='B'):
'''
Show human-readable sizes
'''
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s " % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s " % (num, 'Yi', suffix)
def dbsave_media(cur, media_url, url_uuid, file_name, dbclient):
'''
Save a media item into the database, once it's been downloaded
cur: Database cursor
media_url: The URL of the image/video that was downloaded
url_uuid: The UUID of the parent of the media_url
file_name: The place where the media_url was downloaded to
'''
try:
cur.execute('''
INSERT INTO urls (url) values (%s) RETURNING uuid
''', [media_url])
dbclient.commit()
new_id = cur.fetchone()[0]
except psycopg2.IntegrityError:
# This relationship already exists
dbclient.rollback()
cur.execute('''
SELECT uuid FROM urls WHERE url = %s
''', [media_url])
new_id = cur.fetchone()[0]
try:
cur.execute('''
INSERT INTO referers (url_uuid, referer_uuid) values (%s, %s)
''', [new_id, url_uuid])
dbclient.commit()
except psycopg2.IntegrityError:
# This relationship already exists
dbclient.rollback()
cur.execute('''
SELECT COUNT(*) FROM content WHERE url_uuid = %s
''', [new_id])
if cur.fetchone()[0] < 1:
cur.execute('''
INSERT INTO content
(url_uuid, cache_path)
VALUES
(%s, %s)
''', [new_id, file_name])
dbclient.commit()
def queue_urls(links, dbclient, opts):
'''
Check the database for any queued URLS, and add to the list
'''
out = Output(opts)
cur = dbclient.cursor()
if isinstance(links, str):
links = [links]
for url in links:
if opts.get('force') is not True and not opts.get('queue_id'):
# Check for URL in DB
cur.execute('''
SELECT uuid
FROM urls
WHERE url = %s
''', [url])
if cur.rowcount > 0:
if url not in opts['warned']:
out.info('URL has already been downloaded; use --force if necessary')
else:
if url not in opts['warned']:
opts['warned'].append(url)
continue
fields = ['url']
args = [url]
if opts.get('queue_id') is not None:
fields.append('uuid')
args.append(opts['queue_id'])
if 'refresh_interval' in opts:
fields.append('refresh_interval')
args.append(opts['refresh_interval'])
if 'overwrite' not in opts:
opts['overwrite'] = False
fields.append('overwrite')
args.append(opts['overwrite'])
query = 'INSERT INTO dl_queue ({}) VALUES ({})'.format(
', '.join(fields),
', '.join(['%s' for arg in range(len(args))])
)
try:
cur.execute(query, args)
dbclient.commit()
except psycopg2.IntegrityError:
# This URL is already queued
dbclient.rollback()
cur.execute('SELECT count(*) FROM dl_queue')
return cur.fetchone()[0]
def reprocess_urls(urls, patterns, dbclient=None):
'''
Reprocess the cached URLs which matches the pattern(s)
'''
if not urls:
urls = []
if isinstance(patterns, str):
patterns = [patterns]
cur = dbclient.cursor()
wheres = ['url~%s'] * len(patterns)
query = 'SELECT url FROM urls WHERE {}'.format(' OR '.join(wheres))
cur.execute(query, patterns)
for row in cur.fetchall():
urls.append(row[0])
return urls
def queue_regexp(urls, pattern, dbclient, opts):
'''
Add the URLs matching the pattern to the download queue
'''
expr = re.compile(pattern)
links = []
for url in urls:
if expr.search(url):
links.append(url)
queue_urls(links, dbclient, opts)
def _rename(media_url, file_name, opts):
'''
When files are downloaded using status, rename as per a template
'''
out = Output(opts)
template = opts.get('rename_template', '')
if not template:
return file_name
urlcomps = urllib.parse.urlparse(media_url)
replacements = {
'host': urlcomps[1].split(':')[0],
'path': '/'.join(urlcomps[2].split('/')[:-2])
}
# File extensions
if '.' in urlcomps[2].split('/')[-1]:
replacements['ext'] = urlcomps[2].split('/')[-1].split('.')[-1]
else:
replacements['ext'] = ''
if not opts.get('rename_count'):
opts['rename_count'] = opts.get('rename_count_start', 0)
if opts.get('rename_count_padding'):
try:
opts['rename_count_padding'] = int(opts['rename_count_padding'])
except ValueError:
out.warn('--rename-count-padding must be an integer, using 0')
opts['rename_count_padding'] = 0
template = template.replace('{count}', '{count:0>{rename_count_padding}}')
replacements['rename_count_padding'] = opts['rename_count_padding']
replacements['count'] = str(opts['rename_count'])
opts['rename_count'] += 1
file_name = os.path.join(opts['save_path'], template.format(**replacements))
return file_name
def parse_links(url, content, level, opts):
'''
Return the links from an HTML page
'''
out = Output(opts)
hrefs = []
try:
# Get ready to do some html parsing
soup = BeautifulSoup(content, 'html.parser')
# Generate absolute URLs for every link on the page
url_comps = urllib.parse.urlparse(url)
tags = soup.find_all('a')
if opts['search_src'] is True:
tags = tags + soup.find_all(src=True)
for link in tags:
if level > int(opts['level']):
continue
href = urllib.parse.urljoin(url, link.get('href'))
if opts['search_src'] is True and not link.get('href'):
href = urllib.parse.urljoin(url, link.get('src'))
link_comps = urllib.parse.urlparse(href)
if link.text.startswith('javascript'):
continue
if int(opts.get('level', 0)) > 0 and int(opts.get('level', 0)) < 2:
continue
if opts['span_hosts'] is not True:
if not link_comps[1].startswith(url_comps[1].split(':')[0]):
continue
hrefs.append(href.split('#')[0])
# Render the page, and print it along with the links
if opts.get('render', False) is True:
out.info(soup.get_text())
return hrefs
except TypeError:
# This URL probably isn't HTML
return []
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,279 | techhat/webflayer | refs/heads/master | /plugins/parsers/mediawiki.py | # -*- coding: utf-8 -*-
'''
Web Flayer module for wikipedia
Grabs the raw data from a wikipedia page and dump it to a file, with the page's
title as the filename.
``/etc/flayer/flayer`` should have a ``wikipedia_cache_path`` specified to
download files to. However, if that is not specified, the file will be stored
in the current working directory.
Please note that this module exists solely as an example, and should not be
used to abuse the Wikipedia service.
If you like Wikipedia, please consider donating to help keep it alive. You can
donate at https://donate.wikimedia.org/.
'''
import requests
import flayer.tools
def func_map(url):
'''
Function map
'''
if 'wikipedia' in url:
return wikipedia_raw
return None
def wikipedia_raw(url_uuid, url, content):
'''
Grab raw wikipedia data
'''
cache_path = __opts__.get('wikipedia_cache_path', '.')
title = url.split('?')[0].split('/')[-1]
file_name = '{}/{}'.format(cache_path, title)
req = requests.get(url, stream=True, params={'action': 'raw'})
flayer.tools.status(
req,
url,
url_uuid,
file_name,
dbclient=__dbclient__,
opts=__opts__,
context=__context__,
)
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,280 | techhat/webflayer | refs/heads/master | /plugins/organizers/jsonld.py | # -*- coding: utf-8 -*-
'''
Web Flayer organizer module for JSON-LD
'''
import json
from bs4 import BeautifulSoup
import flayer.tools
def organize(url):
'''
Decide whether a page is using JSON-LD
'''
url_uuid, content = flayer.tools.get_url(
url, dbclient=__dbclient__, opts=__opts__, context=__context__
)
types = set()
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all('script', attrs={'type': 'application/ld+json'}):
for data in tag:
try:
script = json.loads(data)
types.add(script['@type'])
except json.decoder.JSONDecodeError as exc:
types.add(exc)
return list(types)
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,281 | techhat/webflayer | refs/heads/master | /flayer/event.py | # -*- coding: utf-8 -*-
'''
Handle Salt event bus
'''
# 3rd party
import salt.config
import salt.utils.event
def bus(opts):
'''
Connect to Salt's event bus
'''
salt_opts = salt.config.minion_config('/etc/salt/{}'.format(opts['salt_node']))
event = salt.utils.event.get_event(
opts['salt_node'],
salt_opts['sock_dir'],
salt_opts['transport'],
opts=salt_opts,
listen=False,
)
return event
def fire(tag, data, opts):
'''
Fire a message on the event bus
'''
if opts['salt_events'] is True:
opts['salt_event'].fire_master(data, tag)
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,282 | techhat/webflayer | refs/heads/master | /flayer/loader.py | # -*- coding: utf-8 -*-
'''
Basic functions for Web Flayer
'''
# 3rd party
from salt.loader import LazyLoader
import salt.config
def parser(opts, context, urls, dbclient):
'''
Load spider modules
'''
master_opts = salt.config.master_config('/etc/salt/master')
minion_opts = salt.config.minion_config('/etc/salt/minion')
return LazyLoader(
opts['parser_dir'],
minion_opts,
tag=u'flayer/parser',
pack={
u'__master_opts__': master_opts,
u'__minion_opts__': minion_opts,
u'__opts__': opts,
u'__context__': context,
u'__urls__': urls,
u'__dbclient__': dbclient,
},
)
def search(opts, dbclient):
'''
Load search modules
'''
minion_opts = salt.config.minion_config('/etc/salt/minion')
return LazyLoader(
opts['search_dir'],
minion_opts,
tag=u'flayer/search',
pack={
u'__opts__': opts,
u'__dbclient__': dbclient,
},
)
def organize(opts, dbclient, context):
'''
Load organizer modules
'''
minion_opts = salt.config.minion_config('/etc/salt/minion')
return LazyLoader(
opts['organize_dir'],
minion_opts,
tag=u'flayer/organize',
pack={
u'__opts__': opts,
u'__dbclient__': dbclient,
u'__context__': context,
},
)
def filter(opts, context, urls, dbclient):
'''
Load filterr modules
'''
minion_opts = salt.config.minion_config('/etc/salt/minion')
return LazyLoader(
opts['filter_dir'],
minion_opts,
tag=u'flayer/filter',
pack={
u'__opts__': opts,
u'__context__': context,
u'__urls__': urls,
u'__dbclient__': dbclient,
},
)
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,283 | techhat/webflayer | refs/heads/master | /flayer/api.py | # -*- coding: utf-8 -*-
'''
API interface
'''
# Python
import json
import urllib
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
# Internal
import flayer.db
class FlayerHTTPServer(ThreadingMixIn, HTTPServer):
'''
Threaded HTTP Server
'''
def MakeFlayerHTTPRequestHandler(opts, context): # pylint: disable=invalid-name
'''
Return an HTTP class which can handle opts being passed in
'''
class FlayerHTTPRequestHandler(BaseHTTPRequestHandler):
'''
Process arguments
'''
def __init__(self, *args, **kwargs):
self.dbclient = flayer.db.client(opts)
super(FlayerHTTPRequestHandler, self).__init__(*args, **kwargs)
def do_GET(self): # pylint: disable=invalid-name
'''
Only GET requests are supported at this time
'''
qstr = self.path.lstrip('/?')
data = urllib.parse.parse_qs(qstr)
if 'list_queue' in data:
queue = flayer.db.list_queue(self.dbclient, opts)
self.send(json.dumps(queue))
return
if 'show_opts' in data:
tmp_opts = opts.copy()
del tmp_opts['http_api']
del tmp_opts['salt_event']
for item in opts:
if isinstance(item, set):
tmp_opts[item] = list(temp_opts[item])
self.send(json.dumps(tmp_opts, indent=4), content_type='text/json')
return
if 'show_context' in data:
self.send(json.dumps(context, indent=4), content_type='text/json')
return
for item in ('headers', 'parser_dir'):
if item in data:
opts[item] = data[item]
del data[item]
for item in data:
if data[item][0] in ('True', 'False', 'None'):
opts[item] = bool(data[item][0])
elif item == 'user_agent':
opts['headers']['User-Agent'] = data[item][0]
else:
opts[item] = data[item][0]
self.send('True')
# Stop the server if necessary
if opts.get('stop') or opts.get('hard_stop') or opts.get('abort'):
open(opts['stop_file'], 'a').close()
def send(self, message, response=200, content_type='text/html'):
'''
Send a message to the client
'''
self.send_response(response)
self.send_header('Content-type', content_type)
self.end_headers()
self.wfile.write(bytes(message, 'utf8'))
def log_message(self, fmt, *args): # pylint: disable=arguments-differ,unused-argument
'''
Don't log to the console
'''
return
return FlayerHTTPRequestHandler
def run(opts, context):
'''
Main HTTP server
'''
server_address = ((
opts.get('api_addr', '127.0.0.1'),
int(opts.get('api_port', 42424)),
))
flayer_handler = MakeFlayerHTTPRequestHandler(opts, context)
httpd = FlayerHTTPServer(
server_address,
flayer_handler,
)
opts['http_api'] = httpd
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,284 | techhat/webflayer | refs/heads/master | /plugins/organizers/jsonld_recipes.py | # -*- coding: utf-8 -*-
'''
Web Flayer organizer module for JSON-LD Recipes
In order to use this plugin, a ``jsonld_domains`` table needs to be created:
.. code-block:: sql
create table jsonld_domains (domain text unique);
'''
# Python
import json
import urllib
# 3rd party
import requests
from bs4 import BeautifulSoup
# Internal
import flayer.tools
def organize(url):
'''
Decide whether a page has an JSON-LD recipe
'''
out = flayer.tools.Output(__opts__)
cur = __dbclient__.cursor()
insert_sql = '''
INSERT INTO jsonld_domains (domain)
VALUES (%s)
ON CONFLICT DO NOTHING
'''
try:
req = requests.get(url)
content = req.text
except requests.exceptions.MissingSchema as exc:
return []
except requests.exceptions.ConnectionError:
return []
except requests.exceptions.SSLError:
out.warn('SSL Error with {}, trying again without verification'.format(url))
req = requests.get(url, verify=False)
content = req.text
soup = BeautifulSoup(content, 'html.parser')
if 'jsonld_domains' not in __context__:
__context__['jsonld_domains'] = []
for tag in soup.find_all('script', attrs={'type': 'application/ld+json'}):
for data in tag:
try:
script = json.loads(data)
try:
script_type = script['@type'].lower()
except (AttributeError, KeyError, TypeError):
return []
if script_type == 'recipe':
url_comps = urllib.parse.urlparse(url)
netloc = url_comps[1].split(':')[0]
cur.execute(insert_sql, [netloc])
__dbclient__.commit()
if netloc not in __context__['jsonld_domains']:
__context__['jsonld_domains'].append(netloc)
flayer.tools.queue_urls(url, __dbclient__, __opts__)
return 'Queueing for download: {}'.format(url)
except json.decoder.JSONDecodeError as exc:
pass
return []
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,285 | techhat/webflayer | refs/heads/master | /setup.py | #!/usr/bin/env python
from setuptools import setup
setup(
name='webflayer',
version='0.6.5',
description='Data mining tool',
author='Joseph Hall',
author_email='techhat@gmail.com',
url='https://github.com/techhat/webflayer',
packages=['webflayer'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
install_requires=[
'beautifulsoup4',
'psycopg2',
'pyyaml',
'requests',
'termcolor',
],
scripts=['scripts/flay'],
data_files=[
('share/webflayer', ['schema/webflayer.sql']),
],
)
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,286 | techhat/webflayer | refs/heads/master | /flayer/scripts.py | # -*- coding: utf-8 -*-
# pylint: disable=too-many-nested-blocks,too-many-branches
'''
Basic functions for Web Flayer
'''
# Python
import os
import sys
import time
import copy
import json
import pprint
import urllib
import logging
# 3rd party
import yaml
import psutil
import requests
from bs4 import BeautifulSoup
from salt.loader import LazyLoader
import salt.config
# Internal
import flayer.db
import flayer.api
import flayer.tools
import flayer.event
import flayer.config
import flayer.loader
from flayer.version import __version__
log = logging.getLogger(__name__)
def daemonize(opts, context):
'''
Spawn a new process
'''
out = flayer.tools.Output(opts)
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as exc:
out.error('fork #1 failed: {} ({})'.format(exc.errno, exc))
sys.exit(1)
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as exc:
out.error('fork #2 failed: {} ({})'.format(exc.errno, exc))
sys.exit(1)
flayer.api.run(opts, context)
def run(run_opts=None): # pylint: disable=too-many-return-statements
'''
Run the program
'''
if run_opts is None:
run_opts = {}
opts, urls, parser = flayer.config.load(run_opts)
context = {}
if opts.get('stop') or opts.get('hard_stop') or opts.get('abort'):
open(opts['stop_file'], 'a').close()
return
if opts['daemon']:
daemonize(opts, context)
out = flayer.tools.Output(opts)
dbclient = flayer.db.client(opts)
opts['salt_event'] = flayer.event.bus(opts)
if opts.get('version'):
out.info(__version__)
return
if opts.get('show_opts'):
out.info(pprint.pformat(opts))
return
if opts.get('show_context'):
out.info(pprint.pformat(context))
return
if opts.get('list_queue', False) is True:
flayer.db.list_queue(dbclient, opts)
return
if opts.get('show_url_metadata'):
flayer.db.get_url_metadata(dbclient, opts)
return
if opts.get('pause'):
flayer.db.pause(dbclient, opts, opts['pause'])
return
if opts.get('unpause'):
flayer.db.unpause(dbclient, opts, opts['unpause'])
return
# Keeps track of the URLs that we've already warned about this session
opts['warned'] = []
organizers = flayer.loader.organize(opts, dbclient, context)
organize_engine = None
organize_fun = None
if opts.get('search_organize'):
for organize_engine in opts['search_organize']:
organize_fun = '.'.join([organize_engine, 'organize'])
if organize_fun not in organizers:
out.error('The {} organizer is not available'.format(organize_engine))
if opts.get('search'):
searches = flayer.loader.search(opts, dbclient)
engine = opts['search'][0]
fun = '.'.join([engine, 'search'])
if fun not in searches:
out.error('The {} search engine is not available'.format(engine))
else:
for item in searches[fun]():
if organize_engine is not None:
ret = organizers[organize_fun](item)
if ret:
if isinstance(ret, str):
out.info(ret)
else:
out.info(pprint.pformat(ret))
else:
out.info(item)
if not organize_fun:
return
if opts.get('input_file'):
if opts['input_file'] == '-':
flayer.tools.queue_urls(sys.stdin.readlines(), dbclient, opts)
else:
try:
with open(opts['input_file'], 'r') as ifh:
links = ifh.read().splitlines()
flayer.tools.queue_urls(links, dbclient, opts)
except OSError as exc:
out.error('There was an error reading {}: {}'.format(opts['input_file'], exc))
if opts.get('queue', False) is True:
count = flayer.tools.queue_urls(urls, dbclient, opts)
out.info('Added item(s) to the queue, {} items now queued'.format(count))
return
parsers = flayer.loader.parser(opts, context, urls, dbclient)
filters = flayer.loader.filter(opts, context, urls, dbclient)
if opts['reprocess']:
urls = flayer.tools.reprocess_urls(urls, opts['reprocess'], dbclient)
if not urls and opts['use_queue'] is True:
flayer.db.pop_dl_queue(dbclient, urls, opts)
if not urls:
if not opts['daemon'] and not organize_fun:
parser.print_help()
return
# Write pid file
pid = os.getpid()
if not os.path.exists(opts['pid_file']):
opts['already_running'] = False
os.makedirs(os.path.dirname(opts['pid_file']), mode=0o700, exist_ok=True)
with open(opts['pid_file'], 'w') as pfh:
pfh.write(str(pid))
pfh.close()
# Write the metadata file
metadata = {
'id': opts['id'],
'pid': pid,
'api_addr': opts['api_addr'],
'api_port': opts['api_port'],
'dbname': opts['dbname'],
'dbhost': opts['dbhost'],
}
with open(opts['meta_file'], 'w') as fh_:
json.dump(metadata, fh_, indent=4)
if not opts['already_running'] or opts.get('single') is True:
level = 0
# Use a while instead of for, because the list is expected to expand
while True:
url_uuid = None
if opts['stop']:
opts['http_api'].shutdown()
break
if os.path.exists(opts['stop_file']):
out.warn('stop file found, exiting')
os.remove(opts['stop_file'])
try:
opts['http_api'].shutdown()
except KeyError:
pass
break
if len(urls) < 1 and opts['use_queue'] is True:
flayer.db.pop_dl_queue(dbclient, urls, opts)
if opts['urls']:
flayer.tools.queue_urls(opts['urls'], dbclient, opts)
opts['urls'] = []
try:
url = urls.pop(0)
except IndexError:
if opts['daemon']:
time.sleep(.1)
continue
else:
break
if url.strip() == '':
continue
for mod in parsers:
if isinstance(url_uuid, int) and url_uuid == 0:
break
if not mod.endswith('.pre_flight'):
continue
url_uuid, url, content = parsers[mod](url)
if url_uuid is None:
try:
url_uuid, content = flayer.tools.get_url(
url, dbclient=dbclient, opts=opts, context=context
)
except requests.exceptions.MissingSchema as exc:
out.error(exc)
continue
# Display the source of the URL content
if opts.get('source', False) is True:
out.info(content)
hrefs = flayer.tools.parse_links(url, content, level, opts)
level += 1
if opts.get('links', False) is True:
out.info('\n'.join(hrefs))
if opts.get('queuelinks', False) is True:
flayer.tools.queue_urls(hrefs, dbclient, opts)
if opts.get('use_parsers', True) is True:
try:
flayer.tools.process_url(url_uuid, url, content, parsers)
except TypeError:
out.warn('No matching parsers were found')
if opts.get('queue_re'):
flayer.tools.queue_regexp(hrefs, opts['queue_re'], dbclient, opts)
if opts.get('single') is True:
break
try:
os.remove(opts['pid_file'])
except FileNotFoundError:
pass
try:
os.remove(opts['meta_file'])
except FileNotFoundError:
pass
else:
verified_running = False
for process in psutil.process_iter():
try:
if 'chromium' in process.cmdline()[0]:
continue
if 'python' in process.cmdline()[0]:
cmdline = ' '.join(process.cmdline())
if 'flay' in cmdline:
if os.getpid() != process.pid:
verified_running = True
if opts['daemon']:
out.error(
'flay already running, or improperly stopped',
force=True,
)
sys.exit(1)
else:
out.info('flay already running, adding item(s) to the queue')
except IndexError:
pass
if verified_running is False:
out.error(
'flay not found in process list, check {}'.format(
opts['stop_file']
), force=True
)
flayer.tools.queue_urls(urls, dbclient, opts)
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,287 | techhat/webflayer | refs/heads/master | /plugins/searches/wikipedia.py | # -*- coding: utf-8 -*-
'''
Web Flayer search module for Wikipedia
'''
import requests
from bs4 import BeautifulSoup
def search(opts):
'''
Perform a search in Google
'''
query = opts['search'][1].replace(' ', '+')
url = ('https://en.wikipedia.org/w/index.php?search={}'
'&title=Special:Search&profile=default&fulltext=1').format(query)
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
urls = set()
for tag in soup.find_all('a'):
try:
link = tag.attrs['href']
except KeyError:
continue
if 'index.php' in link:
continue
if 'Portal:' in link:
continue
if 'Help:' in link:
continue
if 'Special:' in link:
continue
if 'Wikipedia:' in link:
continue
if '/wiki/' not in link:
continue
if link.startswith('/wiki/'):
link = 'https://en.wikipedia.org' + link
if 'wikipedia.org' not in link:
continue
urls.add(link)
return list(urls)
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,288 | techhat/webflayer | refs/heads/master | /plugins/parsers/jsonld_recipes.py | # -*- coding: utf-8 -*-
'''
Web Flayer module for JSON-LD Recipes
In order to use this plugin, a ``jsonld_domains`` table needs to be created:
.. code-block:: sql
create table jsonld_domains (domain text unique);
'''
import os
import json
import html
import flayer.tools
from bs4 import BeautifulSoup
def func_map(url):
'''
Function map
'''
domains = __context__.get('jsonld_domains')
if domains is None:
domains = []
cur = __dbclient__.cursor()
cur.execute('SELECT domain FROM jsonld_domains')
for row in cur.fetchall():
domains.append(row[0])
__context__['jsonld_domains'] = domains
for domain in domains:
if domain in url:
return parse_page
return None
def parse_page(url_uuid, url, content):
'''
Route a page with primary data stored in json
'''
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all('script', attrs={'type': 'application/ld+json'}):
for content in tag:
script = json.loads(content)
if script['@type'].lower() == 'recipe':
parse_recipe(url, content, script)
if script['@type'].lower() == 'itemlist':
parse_list(script['itemListElement'])
def parse_list(list_element):
'''
Parse an ItemList entity
'''
urls = []
for item in list_element:
urls.append(item['url'])
flayer.tools.queue_urls(urls, __dbclient__, __opts__)
def parse_recipe(url, content, recipe_dict):
'''
Download and parse a recipe
'''
cache_path = __opts__.get('recipe_cache_path', '')
text_data = '{}\n\n'.format(recipe_dict['name'])
try:
for item in recipe_dict['recipeIngredient']:
text_data += '{}\n'.format(item)
text_data += '\n'
except KeyError:
pass
if isinstance(recipe_dict.get('recipeInstructions'), str):
recipe_dict['recipeInstructions'] = [recipe_dict['recipeInstructions']]
elif recipe_dict.get('recipeInstructions') is None:
recipe_dict['recipeInstructions'] = []
for item in recipe_dict['recipeInstructions']:
text_data += '{}\n'.format(html.unescape(item))
html_path = os.path.join(cache_path, 'site', url.split('://')[1])
html_file = os.path.join(html_path, 'index.html')
try:
os.makedirs(html_path, mode=0o0755)
except FileExistsError:
pass
with open(html_file, 'w') as hof:
hof.write(content)
json_path = os.path.join(cache_path, 'site-json', url.split('://')[1])
json_file = os.path.join(json_path, 'index.json')
try:
os.makedirs(json_path, mode=0o0755)
except FileExistsError:
pass
with open(json_file, 'w') as hof:
json.dump(recipe_dict, hof, indent=4)
txt_path = os.path.join(cache_path, 'site-txt', url.split('://')[1])
txt_file = os.path.join(txt_path, 'index.txt')
try:
os.makedirs(txt_path, mode=0o0755)
except FileExistsError:
pass
with open(txt_file, 'w') as hof:
hof.write(text_data)
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,289 | techhat/webflayer | refs/heads/master | /flayer/config.py | # -*- coding: utf-8 -*-
'''
Config for Web Flayer
'''
# Python
import os
import argparse
# 3rd party
import yaml
# Internal
from flayer.version import __version__
def load(opts):
'''
Load configuration
'''
opts['already_running'] = True
opts['parser_dir'] = []
opts['search_dir'] = []
opts['organize_dir'] = []
opts['filter_dir'] = []
parser = argparse.ArgumentParser()
# Basic configuration
parser.add_argument(
'--config-file',
dest='config_file',
action='store',
default='/etc/flayer/flayer',
help='Default location for the config file',
)
parser.add_argument(
'--run-dir',
dest='run_dir',
action='store',
default=None, # This is defined further down
help='Default location for the PID file, stop file, etc',
)
parser.add_argument(
'--parser-dir',
dest='parser_dir',
action='append',
default=[],
help='Location for flayer parser plugins',
)
parser.add_argument(
'--search-dir',
dest='search_dir',
action='append',
default=[],
help='Location for flayer search plugins',
)
parser.add_argument(
'--organize-dir',
dest='organize_dir',
action='append',
default=[],
help='Location for flayer organizer plugins',
)
parser.add_argument(
'--filter-dir',
dest='filter_dir',
action='append',
default=[],
help='Location for flayer filter plugins',
)
parser.add_argument(
'--salt-node',
dest='salt_node',
action='store',
default='minion',
help='master or minion, default minion',
)
# Control
parser.add_argument(
'--id',
dest='id',
action='store',
help='The ID of the flay agent to control',
)
parser.add_argument(
'--daemon',
dest='daemon',
action='store_true',
default=False,
help='Start as a background service',
)
parser.add_argument(
'--stop',
dest='stop',
action='store_true',
help='Stop after the current download',
)
parser.add_argument(
'--hard-stop',
dest='hard_stop',
action='store_true',
help='Stop, delete and requeue current download, then exit',
)
parser.add_argument(
'--abort',
dest='abort',
action='store_true',
help='Stop, delete current download, exit',
)
parser.add_argument(
'--api-addr',
dest='api_addr',
action='store',
default='127.0.0.1',
help='The host address of the API',
)
parser.add_argument(
'--api-port',
dest='api_port',
action='store',
default=42424,
help='The host port of the API',
)
parser.add_argument(
'--salt-events',
dest='salt_events',
action='store_true',
default=False,
help="Whether to fire events on Salt's event bus",
)
# Downloading
parser.add_argument(
'-f', '--force',
dest='force',
action='store_true',
default=False,
help='Force flayer to re-download the URL(s)',
)
parser.add_argument(
'--overwrite',
dest='overwrite',
action='store_true',
default=False,
help='Force flayer to overwrite an existing file',
)
parser.add_argument(
'-w', '--wait',
dest='wait',
action='store',
default=0,
help='Amount of time to wait between requests',
)
parser.add_argument(
'--domain-wait',
dest='domain_wait',
action='store',
default=0,
help='Amount of time to wait between requests, per domain',
)
parser.add_argument(
'--random-wait',
dest='random_wait',
action='store_true',
default=False,
help='Random wait (default from 1 to 10 seconds) between requests',
)
parser.add_argument(
'-s', '--single',
dest='single',
action='store_true',
default=False,
help='Process a single URL, separate from any other current processes',
)
parser.add_argument(
'-S', '--server-response',
dest='include_headers',
action='store_true',
default=False,
help='Whether to display (pprint) the headers when requesting a URL',
)
parser.add_argument(
'-i', '--input-file',
dest='input_file',
action='store',
help='A file containing a list of links to download',
)
parser.add_argument(
'-H', '--header',
dest='headers',
action='append',
help='A header line to be included with the request',
)
parser.add_argument(
'-d', '--data',
dest='data',
action='store',
default=None,
help='Data to be POSTed in the request',
)
parser.add_argument(
'--use-queue',
dest='use_queue',
action='store_true',
default=True,
help="Process the items in the download queue (default)",
)
parser.add_argument(
'--no-queue',
dest='use_queue',
action='store_false',
help="Don't process any of the items in the download queue",
)
parser.add_argument(
'--queue',
dest='queue',
action='store_true',
default=False,
help='Add the URLs to the download queue and exit',
)
parser.add_argument(
'-p', '--reprocess',
dest='reprocess',
action='store',
default=None,
nargs='+',
help='Reprocess URLs matching a postgresql-style regexp',
)
parser.add_argument(
'--no-db-cache',
dest='no_db_cache',
action='store_true',
default=False,
help="Don't cache the target in the database",
)
parser.add_argument(
'--queue-links',
dest='queuelinks',
action='store_true',
default=False,
help='Add the absolute URLs from the page to the download queue',
)
parser.add_argument(
'--queue-re', '--queue-regex', '--queue-regexp',
dest='queue_re',
action='store',
default=None,
help='Add the absolute URLs matching the regexp to the download queue',
)
parser.add_argument(
'--search-src',
dest='search_src',
action='store_true',
default=False,
help='Search tags with src attribute, in addition to hrefs',
)
parser.add_argument(
'-x', '--force-directories',
dest='force_directories',
action='store_true',
default=False,
help='When downloading, force a directory structure',
)
parser.add_argument(
'--save-path',
dest='save_path',
action='store',
default=None,
help='When downloading, use this path as the download root',
)
parser.add_argument(
'--save-html',
dest='save_html',
action='store_true',
default=True,
help='When downloading, save HTML as well as binary files',
)
parser.add_argument(
'--no-save-html',
dest='save_html',
action='store_false',
help='When downloading (with --save-path), do NOT save HTML files',
)
parser.add_argument(
'--use-parsers',
dest='use_parsers',
action='store_true',
default=True,
help="Download the URL, using the parsers to process it (default)",
)
parser.add_argument(
'--no-parsers',
dest='use_parsers',
action='store_false',
help="Just download the URL; don't call any parsers to process it",
)
parser.add_argument(
'--user-agent',
dest='user_agent',
action='store',
default='flay {}'.format(__version__),
help='User agent to report to the server',
)
parser.add_argument(
'--refresh-interval',
dest='refresh_interval',
action='store',
help="Auto-populate the paused_until field in the download queue",
)
parser.add_argument(
'--pause',
dest='pause',
action='store',
nargs='+',
help='Name of a queued URL to pause',
)
parser.add_argument(
'--unpause',
dest='unpause',
action='store',
nargs='+',
help='Name of a queued URL to unpause',
)
parser.add_argument(
'--verify',
dest='verify',
action='store',
default=True,
help='Set to False to ignore SSL errors',
)
# Templating
parser.add_argument(
'--rename', '--rename-template',
dest='rename_template',
action='store',
help='A template to use for renaming downloads',
)
parser.add_argument(
'--rename-count-start',
dest='rename_count_start',
action='store',
default=0,
help='Number to start {count} at',
)
parser.add_argument(
'--rename-count-padding',
dest='rename_count_padding',
action='store',
default=0,
help='Zero-padding to be used for {count}',
)
# Recursion
parser.add_argument(
'--level',
dest='level',
action='store',
default=0,
help='Specify recursion maximum depth level depth',
)
parser.add_argument(
'--span-hosts',
dest='span_hosts',
action='store_true',
default=False,
help='Enable spanning across hosts when doing recursive retrieving',
)
# Built-in tools
parser.add_argument(
'--search',
dest='search',
action='store',
nargs='+',
help='Perform a search, using the specified engine',
)
parser.add_argument(
'--search-limit',
dest='search_limit',
action='store',
default=30,
help='Maximum number of results for searches',
)
parser.add_argument(
'--search-organize',
dest='search_organize',
action='store',
nargs='+',
help='Send --search results to a organizer engine',
)
# Informational
parser.add_argument(
'--source',
dest='source',
action='store_true',
default=False,
help="Display the URL's source",
)
parser.add_argument(
'--render', '--dump',
dest='render',
action='store_true',
default=False,
help='Render the content',
)
parser.add_argument(
'--links',
dest='links',
action='store_true',
default=False,
help='Display by a list of the absolute URLs in the page',
)
parser.add_argument(
'-l', '--list-queue',
dest='list_queue',
action='store_true',
default=False,
help='List the remaining URLS in the download queue',
)
parser.add_argument(
'--show-metadata', '--show-url-metadata',
dest='show_url_metadata',
action='append',
help='Show any metadata for the given URL',
)
parser.add_argument(
'--show-opts',
dest='show_opts',
action='store_true',
default=False,
help='Return a copy of opts for this instance',
)
parser.add_argument(
'--show-context',
dest='show_context',
action='store_true',
default=False,
help='Return a copy of the context for this instance',
)
parser.add_argument(
'-v', '--verbose',
dest='verbose',
action='store_true',
default=False,
help="Display more information about what's going on",
)
parser.add_argument(
'--version',
dest='version',
action='store_true',
help='Display the version and exit',
)
parser.add_argument(dest='urls', nargs=argparse.REMAINDER)
cli_opts = parser.parse_args().__dict__
# Load in the config file
with open(cli_opts['config_file'], 'r') as ifh:
opts.update(yaml.safe_load(ifh.read()))
# Preserve the ID from being overwritten by "None"
id_ = opts.get('id')
if cli_opts.get('id') is None:
cli_opts['id'] = id_
cli_opts['parser_dir'].extend(opts['parser_dir'])
cli_opts['search_dir'].extend(opts['search_dir'])
cli_opts['organize_dir'].extend(opts['organize_dir'])
cli_opts['filter_dir'].extend(opts['filter_dir'])
# Override with any environment variables
for param in set(list(opts) + list(cli_opts)):
env_var = 'FLAYER_{}'.format(param.upper())
if env_var in os.environ:
cli_opts[param] = os.environ[env_var]
# Lay down CLI opts on top of config file opts and environment
opts.update(cli_opts)
# parser_dir is an array
if not opts['parser_dir']:
opts['parser_dir'] = ['/srv/flayer/plugins/parsers']
# search_dir is an array
if not opts['search_dir']:
opts['search_dir'] = ['/srv/flayer/plugins/searchers']
# organize_dir is an array
if not opts['organize_dir']:
opts['organize_dir'] = ['/srv/flayer/plugins/organizers']
# filter_dir is an array
if not opts['filter_dir']:
opts['filter_dir'] = ['/srv/flayer/plugins/filters']
# Set the verify argument for requests
if opts['verify'] in ('False', False):
opts['verify'] = False
# Set up any headers for the agent
if opts['headers'] is None:
opts['headers'] = []
headers = {}
for header in list(opts['headers']):
if isinstance(header, dict):
headers[header] = opts['headers'][header]
else:
headers[header.split(':')[0]] = ':'.join(header.split(':')[1:]).strip()
opts['headers'] = headers
if opts['user_agent']:
opts['headers']['User-Agent'] = opts['user_agent']
if opts.get('data'):
opts['method'] = 'POST'
else:
opts['method'] = 'GET'
urls = opts['urls']
if not opts.get('id'):
opts['id'] = 'unknown'
if opts.get('run_dir') is None:
opts['run_dir'] = os.path.join('/var/run/flayer', opts['id'])
opts['pid_file'] = os.path.join(opts['run_dir'], 'pid')
opts['stop_file'] = os.path.join(opts['run_dir'], 'stop')
opts['meta_file'] = os.path.join(opts['run_dir'], 'meta')
return opts, urls, parser
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,290 | techhat/webflayer | refs/heads/master | /salt/_modules/flayer.py | # -*- coding: utf-8 -*-
'''
Salt execution module for Web Flayer
'''
# python
import os
import json
# salt
from salt.exceptions import CommandExecutionError
from salt.ext import six
import salt.utils.http
def __virtual__():
'''
Only requires Salt
'''
return True
def _query(decode=False, id_=None, **params):
'''
Send a command to the API
'''
agents = __grains__['flayer_agents']
if id_ is None:
if 'unknown' in agents:
id_ = 'unknown'
else:
if len(list(agents)) == 1:
id_ = list(agents)[0]
else:
raise CommandExecutionError('A valid Web Flayer id_ was not specified')
elif id_ not in agents:
raise CommandExecutionError('{} is not running'.format(id_))
api_host = agents[id_].get('api_addr', '127.0.0.1')
api_port = agents[id_].get('api_port', 42424)
url = 'http://{0}:{1}'.format(api_host, api_port)
return salt.utils.http.query(
url,
params=params,
decode=decode,
decode_type='json',
)
def queue(urls, force=False, data=None):
'''
Queue up a URL or URLs for download
'''
if isinstance(urls, six.string_types):
urls = [urls]
_query(urls=urls, force=force, data=data)
def start(
config_file='/etc/flayer/flayer',
run_dir='/var/run/flayer',
parser_dir=None,
id_=None,
api_addr='127.0.0.1',
api_port=42424,
):
'''
Start the Web Flayer daemon
'''
if not os.path.exists(config_file):
raise Exception('Config file ({}) not found'.format(config_file))
if not os.path.exists(os.path.dirname(pid_file)):
raise Exception('PID dir ({}) not found'.format(os.path.dirname(pid_file)))
args = (
'flayer', '--daemon',
'--config-file', config_file,
'--run-dir', run_dir,
)
if parser_dir is not None:
if isinstance(parser_dir, str):
parser_dir = [parser_dir]
if not isinstance(parser_dir, list):
raise Exception('parser_dir must be a string or list')
for item in parser_dir:
if not os.path.exists(item):
raise Exception('parser_dir {} does not exist')
args.append('--parser-dir')
args.extend(parser_dir)
if id_ is not None:
args.extend(['--id', id_])
if api_addr is not None:
args.extend(['--api-addr', api_addr])
if api_port is not None:
args.extend(['--api-port', api_port])
__salt__['cmd.run_bg'](args)
def stop(id_=None):
'''
Stop the Web Flayer daemon
'''
_query(stop=True, id_=id_)
def hard_stop(id_=None):
'''
Hard stop the Web Flayer daemon
'''
_query(hard_stop=True, id_=id_)
def abort(id_=None):
'''
Abort the Web Flayer daemon
'''
_query(abort=True, id_=id_)
def list_queue(id_=None):
'''
List the contents of the queue
'''
return _query(list_queue=True)
def show_opts(id_=None):
'''
List the opts for the daemon
'''
return _query(show_opts=True, id_=id_)
def active_downloads(id_=None):
'''
Show active downloads
'''
context = _query(decode=True, show_context=True).get('dict', '')
return context.get('dl_data', {})
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,291 | techhat/webflayer | refs/heads/master | /salt/_grains/flayer.py | # -*- coding: utf-8 -*-
'''
Salt grains for Web Flayer
'''
# Python
import os
import json
# 3rd party
import psutil
def __virtual__():
'''
Only requires Salt
'''
return True
def process():
'''
Return the IDs of any running Web Flayer instances
'''
ret = {}
run_dir = __opts__.get('flayer_run_dir', '/var/run/flayer')
for agent in os.listdir(run_dir):
meta_file = os.path.join(run_dir, agent, 'meta')
if not os.path.exists(meta_file):
continue
with open(meta_file, 'r') as mfh:
meta = json.load(mfh)
if psutil.Process(meta['pid']).cmdline()[0]:
ret[meta['id']] = meta
return {'flayer_agents': ret}
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,292 | techhat/webflayer | refs/heads/master | /flayer/version.py | # -*- coding: utf-8 -*-
'''
Manage the program version
'''
__version__ = '0.6.4'
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,293 | techhat/webflayer | refs/heads/master | /flayer/log.py | # -*- coding: utf-8 -*-
'''
Logging module for Web Flayer
'''
import os
import logging
def setup():
'''
Setup the logs
'''
handler = logging.StreamHandler()
formatter = logging.BASIC_FORMAT
handler.setFormatter(formatter)
logging.basicConfig(level=os.environ.get('FLAYER_LOGLEVEL', 'INFO'))
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,294 | techhat/webflayer | refs/heads/master | /flayer/db.py | # -*- coding: utf-8 -*-
'''
Database functions for Web Flayer
'''
# Python
import os
import time
import json
import urllib
import datetime
# 3rd party
import psycopg2
import psycopg2.extras
# Internal
import flayer.tools
def client(config):
'''
Return database client
'''
dbclient = psycopg2.connect(
'dbname={0} user={1} password={2} host={3}'.format(
config.get('dbname', 'flayer'),
config.get('dbuser', 'postgres'),
config.get('dbpass', ''),
config.get('dbhost', 'localhost'),
)
)
psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json)
return dbclient
def pop_dl_queue(dbclient, urls, opts):
'''
Check the database for any queued URLS, and add to the list
'''
cur = dbclient.cursor()
# Unpause jobs past the time limit
cur.execute('''
UPDATE dl_queue
SET paused_until = NULL
WHERE paused_until IS NOT NULL
AND paused_until <= NOW()
''')
dbclient.commit()
# Lock a URL for this instance
cur.execute('''
LOCK TABLE ONLY dl_queue;
UPDATE dl_queue
SET locked_by = %s
WHERE uuid = (
SELECT uuid
FROM dl_queue
WHERE paused = FALSE
AND paused_until IS NULL
ORDER BY dl_order, added
LIMIT 1
)
RETURNING uuid
''', [opts['id']])
if cur.rowcount > 0:
data = cur.fetchone()
url_uuid = data[0]
else:
return
# Helps out with the lock
time.sleep(.2)
# Queue the URL and delete it from the queue
cur.execute('SELECT url, refresh_interval FROM dl_queue WHERE uuid = %s', [url_uuid])
url, refresh = cur.fetchone()
urls.append(url)
if refresh:
next_refresh = datetime.datetime.now() + datetime.timedelta(**refresh)
cur.execute('''
UPDATE dl_queue SET locked_by = '', paused_until = %s WHERE uuid = %s
''', [next_refresh, url_uuid])
else:
cur.execute('DELETE FROM dl_queue WHERE uuid = %s', [url_uuid])
dbclient.commit()
opts['queue_id'] = url_uuid
def update_url_refresh(url_uuid, interval, dbclient, opts):
'''
List all queued URLs in the database
'''
ret = []
out = flayer.tools.Output(opts)
cur = dbclient.cursor()
cur.execute(
'UPDATE urls SET refresh_interval = %s WHERE uuid = %s',
[interval, url_uuid],
)
dbclient.commit()
def list_queue(dbclient, opts):
'''
List all queued URLs in the database
'''
ret = []
out = flayer.tools.Output(opts)
cur = dbclient.cursor()
cur.execute('SELECT url, paused FROM dl_queue')
if cur.rowcount > 0:
for row in cur.fetchall():
if bool(row[1]) is True:
line = '{} (paused)'.format(row[0])
else:
line = row[0]
ret.append(line)
out.info(line)
out.info('{} URLS queued'.format(cur.rowcount))
if not opts.get('already_running'):
try:
os.remove(opts['pid_file'])
except FileNotFoundError:
pass
return {'urls': ret, 'number_queued': cur.rowcount}
def pause(dbclient, opts, urls):
'''
Pause URL(s) in the download queue
'''
ret = {'urls': urls, 'number_paused': len(urls)}
out = flayer.tools.Output(opts)
cur = dbclient.cursor()
spacer = ', '.join(['%s' for url in range(len(urls))])
sql = 'UPDATE dl_queue SET paused = true WHERE url IN ({})'.format(spacer)
cur.execute(sql, urls)
dbclient.commit()
out.info(ret)
return ret
def unpause(dbclient, opts, urls):
'''
Unpause URL(s) in the download queue
'''
ret = {'urls': urls, 'number_unpaused': len(urls)}
out = flayer.tools.Output(opts)
cur = dbclient.cursor()
spacer = ', '.join(['%s' for url in range(len(urls))])
sql = 'UPDATE dl_queue SET paused = false WHERE url IN ({})'.format(spacer)
cur.execute(sql, urls)
dbclient.commit()
out.info(ret)
return ret
def pattern_wait(dbclient, url):
'''
Check the URL against the ``pattern_wait`` table, using a regular
expression. If it matches, then all other URLs that match the pattern will
have their ``paused_until`` values updated to ``now() + {wait} seconds``.
Only the first match will be returned, so it's best to make patterns as
specific as possible. Normally a pattern will only be a domain name, so
this should not normally be a problem.
This function should be run before and after any download, such as
``get_url()`` and ``status()``. Running before will help prevent other
agents from hitting the domain again at the same time, and running after
will keep all agents from hitting a domain again too fast.
'''
cur = dbclient.cursor()
sql = 'SELECT wait, pattern FROM pattern_wait WHERE %s ~ pattern LIMIT 1'
cur.execute(sql, [url])
try:
row = cur.fetchone()
wait = row[0]
pattern = row[1]
except TypeError:
# No matches
return
sql = '''
UPDATE dl_queue
SET paused_until = now() + '%s seconds'
WHERE %s ~ url
'''
cur.execute(sql, [wait, pattern])
dbclient.commit()
def check_domain_wait(dbclient, url):
'''
Check the URL against the ``domain_wait`` table. If the domain is in the
table, it will check the ``wait_until`` field. If that time has not yet
passed, return ``False``.
Before checking the ``domain_wait`` table for the domain, another query
will delete any entries from the table that are passed the ``wait_until``
time.
This function should be run before any download, such as ``get_url()`` and
``status()``. Running before will help prevent other agents from hitting
the domain again at the same time, or too quickly afterwards.
'''
cur = dbclient.cursor()
sql = 'DELETE from domain_wait WHERE wait_until < now()'
cur.execute(sql)
dbclient.commit()
urlcomps = urllib.parse.urlparse(url)
domain = urlcomps[1]
sql = 'SELECT count(*) FROM domain_wait WHERE domain ~ %s'
cur.execute(sql, [domain])
try:
wait = cur.fetchone()[0]
if int(wait) > 0:
return False
except TypeError:
# No matches
return True
def set_domain_wait(dbclient, opts, url):
'''
This function should be run before any download, such as ``get_url()`` and
``status()``. Running before will help prevent other agents from hitting
the domain again at the same time, or too quickly afterwards.
'''
cur = dbclient.cursor()
urlcomps = urllib.parse.urlparse(url)
domain = urlcomps[1]
sql = '''
INSERT INTO domain_wait (domain, wait_until)
values (%s, now() + '%s seconds')
ON CONFLICT DO NOTHING
'''
cur.execute(sql, [domain, opts['domain_wait']])
def get_url_metadata(dbclient, opts):
'''
This function gets metadata for a URL which may or may not have already
been retreived itself.
'''
out = flayer.tools.Output(opts)
cur = dbclient.cursor()
for url in opts['show_url_metadata']:
sql = 'SELECT uuid FROM urls WHERE url ~ %s'
cur.execute(sql, (url,))
uuid = None
if cur.rowcount > 0:
uuid = cur.fetchone()[0]
sql = 'SELECT uuid, metadata FROM url_metadata WHERE url = %s'
cur.execute(sql, (url,))
uuidm, metadata = cur.fetchone()
if uuid and uuid != uuidm:
out.warn('UUID in URLs does not match UUID in metadata')
out.warn('{} in URLs'.format(uuid))
out.warn('{} in metadata'.format(uuid))
out.action('URL: {}'.format(url))
out.action('UUID: {}'.format(uuid))
out.info(pprint.pformat(metadata))
def store_url_metadata(dbclient, opts, url, metadata):
'''
This function stores metadata for a URL which may or may not have already
been retreived itself.
'''
cur = dbclient.cursor()
sql = 'SELECT uuid FROM urls WHERE url ~ %s'
cur.execute(sql, (url,))
uuid = None
data = cur.fetchone()
if data:
uuid = data[0]
sql = '''
INSERT INTO url_metadata (uuid, url, metadata)
VALUES (%s, %s, %s)
ON CONFLICT (url) DO UPDATE
SET metadata = %s
'''
cur.execute(sql, (uuid, url, json.dumps(metadata), json.dumps(metadata)))
dbclient.commit()
| {"/flayer/tools.py": ["/flayer/event.py"], "/plugins/parsers/mediawiki.py": ["/flayer/tools.py"], "/plugins/organizers/jsonld.py": ["/flayer/tools.py"], "/flayer/api.py": ["/flayer/db.py"], "/plugins/organizers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/scripts.py": ["/flayer/db.py", "/flayer/api.py", "/flayer/tools.py", "/flayer/event.py", "/flayer/config.py", "/flayer/loader.py", "/flayer/version.py"], "/plugins/parsers/jsonld_recipes.py": ["/flayer/tools.py"], "/flayer/config.py": ["/flayer/version.py"], "/flayer/db.py": ["/flayer/tools.py"]} |
78,311 | AGLDWG/ld-link-harvester | refs/heads/master | /charts/progress_chart.py | import pandas as pd
import matplotlib.pyplot as plt
def progress_chart_pie(visited_uri, total_uri, title):
"""
Generates progress chart based on how much of the .au domain has been crawled already.
:param visited_uri: int
:param total_uri: int
:param title: str
:return: None
"""
data = pd.DataFrame({'Proportion of AU Domains': [visited_uri, total_uri]},
index=['Visited', 'Not Visited'])
no_labels =['','']
data.plot.pie(y='Proportion of AU Domains', startangle=90, labels=no_labels, counterclock=False)
plt.legend(labels=data.index, fontsize='small', loc='center right', bbox_to_anchor=(1.3, 0.5))
if title is not None:
plt.title(title)
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,312 | AGLDWG/ld-link-harvester | refs/heads/master | /harvester/__init__.py | import requests
from multiprocessing import Process, Manager
import time
import sqlite3
import os
import sys
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
from harvester.lddatabase import LDHarvesterDatabaseConnector
AUTO_PROCESS_OVERFLOW = True
WORK_QUEUE_OVERFLOW_FILE = 'overflow.txt'
DATABASE_FILE = 'ld-database.db'
DATABASE_TEMPLATE = '../database/create_database.sql'
SCHEMA_INTEGRITY_CHECK = True # If False and not creating new db, do not need template file. RECOMMEND TO LEAVE True.
CRAWL_RECORD_REPAIR = True
RESPONSE_TIMEOUT = 60
MAX_REDIRECTS = 3
KILL_PROCESSES_TIMEOUT = 600 #600 # If monitoring process detects no activity for more than this (seconds), kill all processes
RECURSION_DEPTH_LIMIT = 3
PROC_COUNT = 8
COMMIT_FREQ = 50
WORK_QUEUE_MAX_SIZE = 1000000
RESP_QUEUE_MAX_SIZE = 1000000
RDF_MEDIA_TYPES = [
"application/rdf+xml",
"text/turtle",
"application/n-triples",
"application/ld+json",
"application/owl+xml",
"text/trig",
"application/n-quads"
]
RDF_FORMATS = [
'rdf',
'owl',
'ttl',
'n3',
'nt',
'json'
]
GLOBAL_HEADER = {
'Accept': ",".join(RDF_MEDIA_TYPES),
'User-Agent': 'LD Link Harvester'
}
BLACKLIST_FORMATS = [
'jpg',
'JPG',
'BMP',
'bmp',
'png',
'PNG',
'jpeg',
'JPEG',
'MP4',
'mp4',
'flv',
'pdf',
'PDF',
'eps',
'EPS',
'svg',
'SVG'
]
def verify_database(connector, template):
"""
Verifies the schema of the database attached to a connector against an external SQL script (template).
:param connector: lddatabase.LDHarvesterDatabaseConnector
:param template: str
:return: bool
"""
virtual_db = sqlite3.Connection(':memory:')
virtual_cursor = virtual_db.cursor()
with open(template, 'r') as script:
virtual_cursor.executescript(script.read())
if virtual_cursor.execute("SELECT sql FROM sqlite_master WHERE type='table'").fetchall() == connector.cursor.execute("SELECT sql FROM sqlite_master WHERE type='table'").fetchall():
return True
else:
return False
def connect(database_file, crawl=True):
"""
Connect to the database as necessary using the custom database connector (uses the database file name provided). A new crawl is also created if specified.
:param database_file: str
:param crawl: bool = True
:return: tuple
"""
print('Opening connector to database...')
try:
if os.path.isfile(database_file):
dbconnector = LDHarvesterDatabaseConnector(database_file)
print("Successfully connected to '{}'.".format(database_file))
if crawl:
crawlid = dbconnector.get_new_crawlid()
dbconnector.insert_crawl(crawlid)
return dbconnector, crawlid
else:
return dbconnector, None
else:
print("Cannot find '{}'.".format(database_file))
ans = str(input("Would you like to create '{}' now? [y/n] ".format(database_file)))
if ans.lower() == 'y':
dbconnector = LDHarvesterDatabaseConnector(database_file)
with open(DATABASE_TEMPLATE, 'r') as script:
dbconnector.cursor.executescript(script.read())
print("Successfully created '{}'.".format(database_file))
if crawl:
crawlid = dbconnector.get_new_crawlid()
dbconnector.insert_crawl(crawlid)
return dbconnector, crawlid
else:
return dbconnector, 0
else:
print('Exiting')
exit(0)
except Exception as er:
print("Could not connect to a database. Something went wrong...")
print("\t{}".format(er))
exit(1)
def close(dbconnector, crawlid):
"""
End the crawl, commit database changes and close the database connection.
:return: None
"""
dbconnector.end_crawl(crawlid)
dbconnector.commit()
dbconnector.close()
print("Connection Closed.")
def find_links_html(response_content, uri, seed, depth=0):
"""
Take web response content, create a beautifulsoup map of it and find appropriate links to visit.
:param response_content: str
:param uri: str
:param seed: str
:param depth: int
:return: list
"""
links = []
soup = BeautifulSoup(response_content, "lxml")
for link in soup.findAll('a'):
link = link.get('href')
link = urljoin(uri, link)
link = link.split('#')[0]
link = link.split('?')[0] # Removes queries.
if urlparse(link).path.split('/')[-1].split('.')[-1] in BLACKLIST_FORMATS:
continue
if isinstance(link, str):
links.append((link, depth, seed))
return links
def process_response(response, uri, seed, depth):
"""
Process a response appropriately and generate an 'enhanced response' containing metadata about the response and approptiate actions to do with it.
:param response: requests.Response()
:param uri: str
:param seed: str
:param depth: int
:return: tuple or dict
"""
try:
file_format = response.headers['Content-type'].split(';')[0]
except:
print('Bad response from {}. Continuing'.format(uri))
enhanced_resp = {'url': uri,
'opcode': 2,
'params': {'source': seed, 'format': "N/A", 'failed': 1}}
return enhanced_resp
if response.status_code == 200:
if uri.split('.')[-1] in RDF_FORMATS:
enhanced_resp = {'url': uri,
'opcode': 3,
'params': {'source': seed, 'format': file_format}}
return enhanced_resp
if file_format in RDF_MEDIA_TYPES:
enhanced_resp = {'url': uri,
'opcode': 3,
'params': {'source': seed, 'format': file_format}}
return enhanced_resp
elif file_format == 'text/html':
try:
if urlparse(uri).netloc == urlparse(seed).netloc:
if depth == 0 and len(response.history) > 0 and response.history[0].status_code in [300, 301, 302, 303, 304, 305, 307, 308]:
try:
seed = "http://" + urlparse(response.history[0].headers['Location']).netloc
uri = response.history[0].headers['Location']
except Exception as er:
print("Could not find redirect location in headers for {}: {}".format(uri, er))
if depth < RECURSION_DEPTH_LIMIT:
child_links = find_links_html(response.content, uri, seed, depth+1)
enhanced_resp = {'url': uri,
'opcode': 2,
'params': {'source': seed, 'format': file_format, 'failed': 0}}
return enhanced_resp, child_links
else:
enhanced_resp = {'url': uri,
'opcode': 2,
'params': {'source': seed, 'format': file_format, 'failed': 0}}
return enhanced_resp
else:
enhanced_resp = {'url': uri,
'opcode': 2,
'params': {'source': seed, 'format': file_format, 'failed': 0}}
return enhanced_resp
except Exception as er:
print(er, end='...')
print('Cannot decode response from {}. Continuing'.format(uri))
enhanced_resp = {'url': uri,
'opcode': 2,
'params': {'source': seed, 'format': file_format, 'failed': 1}}
return enhanced_resp
else:
enhanced_resp = {'url': uri,
'opcode': 2,
'params': {'source': seed,'format': file_format, 'failed': 0}}
return enhanced_resp
else:
enhanced_resp = {'url': uri,
'opcode': 2,
'params': {'source': seed, 'format': file_format,'failed': 1}}
return enhanced_resp
start_sentinel = "start"
end_sentinel = "end"
def worker_fn(p, in_queue, out_queue, visited):
"""
Individual worker function which takes items from the work queue, sends a request and adds the processed response to the response queue.
:param p: str
:param in_queue: multiprocessing.Queue
:param out_queue: multiprocessing.Queue
:param visited: dict
:return: None
"""
print("Process {} started.".format(p))
out_queue.put(start_sentinel)
while not in_queue.empty():
try:
url = in_queue.get(block=False)
except Exception as e:
continue
url, depth, seed = url
try:
if url not in visited and depth <= RECURSION_DEPTH_LIMIT:
visited[url] = True
session = requests.Session()
session.max_redirects = MAX_REDIRECTS
resp = requests.get(url, headers=GLOBAL_HEADER, timeout=RESPONSE_TIMEOUT)
session.close()
else:
continue
except Exception as e:
enhanced_resp = {'url': url,
'opcode': 2,
'params': {'source': seed, 'format': "N/A", 'failed': 1}}
out_queue.put((enhanced_resp, e))
continue
processed_response = process_response(resp, url, seed, depth)
if isinstance(processed_response, tuple):
in_queue = add_bulk_to_work_queue(in_queue, processed_response[1], visited)
out_queue.put((processed_response[0], resp))
else:
out_queue.put((processed_response, resp))
print("Process {} done.".format(p))
out_queue.put(end_sentinel)
raise SystemExit(0)
def add_bulk_to_work_queue(queue, content_list, visited_urls=dict()):
"""
Add a bulk load of URIs to a queue at once.
:param queue: multiprocessing.Queue()
:param content_list: list
:param visited_urls: dict
:return: multiprocessing.Queue()
"""
full_msg = False
for child in content_list:
if queue.full():
if not full_msg:
print("Work Queue is full. Flushing content to disk.")
full_msg = True
if child[0] not in visited_urls:
with open(WORK_QUEUE_OVERFLOW_FILE, 'a') as overflow:
overflow.write("{} {} {}\n".format(child[0], child[1], child[2]))
else:
full_msg = False
if child[0] not in visited_urls:
queue.put((child[0], child[1], child[2]))
return queue
if __name__ == "__main__":
URL_SOURCE = 'single_URI.txt'
if len(sys.argv) > 1:
URL_SOURCE = sys.argv[1]
"""
Main runtime script. Essentially calls on the functions as appropriate. Handles workers, and processes contents of the response queue.
"""
URL_BATCH = [(url.strip(), 0, url.strip()) for url in open(URL_SOURCE)]
dbconnector, crawlid = connect(DATABASE_FILE)
if SCHEMA_INTEGRITY_CHECK:
if verify_database(dbconnector, DATABASE_TEMPLATE):
print("Database schema integrity has been verified.")
else:
print("Error, database schema does not match the provided template.")
exit(1)
if CRAWL_RECORD_REPAIR:
repairs_required, repairs_made = dbconnector.self_repair_crawl_periods()
if repairs_required != 0:
print("Repairing Crawl records.\nRepairs Required: {}\nRepairs Made: {}".format(repairs_required, repairs_made))
else:
print("No Crawl record repairs are required.")
print("Adding seeds to database.")
dbconnector.insert_seed_bulk(URL_BATCH)
dbconnector.commit()
print("Seeds added to database.")
#signal.signal(signal.SIGTERM, close)
#signal.signal(signal.SIGINT, close)
full_msg = False
manager = Manager()
visited = manager.dict()
work_queue = manager.Queue(maxsize=WORK_QUEUE_MAX_SIZE)
work_queue = add_bulk_to_work_queue(work_queue, URL_BATCH)
resp_queue = manager.Queue(maxsize=RESP_QUEUE_MAX_SIZE)
begin = time.time()
while True:
worker_procs = []
for i in range(PROC_COUNT):
p = Process(target=worker_fn, args=(i+1, work_queue, resp_queue, visited))
worker_procs.append(p)
[p.start() for p in worker_procs]
# wait for processes to start
time.sleep(0.1)
threads_started = 0
threads_ended = 0
i = 0
emergency_timeout_start = time.time()
emergency_timeout = False
while True:
if not resp_queue.empty():
emergency_timeout_start = time.time()
#print(resp_queue.qsize())
if i >= COMMIT_FREQ:
dbconnector.commit()
i =- 1
i += 1
resp_tuple = resp_queue.get()
if resp_tuple == start_sentinel:
threads_started += 1
continue
elif resp_tuple == end_sentinel:
threads_ended += 1
if threads_ended == PROC_COUNT:
break
else:
continue
if isinstance(resp_tuple[0], dict):
'''
OPCODES:
0 = Insert Seed (Deprecated)
1 = Insert Failed Seed (Handled by 2)
2 = Insert Link (Failed or otherwise)
3 = Insert RDF Data
'''
opcode = resp_tuple[0]['opcode']
if resp_tuple[0]['url'] == resp_tuple[0]['params']['source']:
dbconnector.insert_crawl_seed(uri=resp_tuple[0]['url'], crawlid=crawlid)
if opcode == 2:
dbconnector.insert_link(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'], content_format=resp_tuple[0]['params']['format'], failed=resp_tuple[0]['params']['failed'])
if resp_tuple[0]['params']['failed'] == 1 and resp_tuple[0]['url'] == resp_tuple[0]['params']['source']:
if isinstance(resp_tuple[1], Exception):
dbconnector.insert_failed_seed(uri=resp_tuple[0]['url'], crawlid=crawlid, code='000')
else:
dbconnector.insert_failed_seed(uri=resp_tuple[0]['url'], crawlid=crawlid, code=resp_tuple[1].status_code)
if opcode == 3:
dbconnector.insert_link(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'],content_format=resp_tuple[0]['params']['format'], failed=0)
dbconnector.insert_valid_rdfuri(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'], response_format=resp_tuple[0]['params']['format'])
if isinstance(resp_tuple[1], Exception):
print("{} : {}".format(str(resp_tuple[0]['url']), str(resp_tuple[1])))
else:
print("{} : {}".format(str(resp_tuple[0]['url']), str(resp_tuple[1].status_code)))
if time.time() - emergency_timeout_start > KILL_PROCESSES_TIMEOUT:
print("FROZEN. Emergency Timeout.")
emergency_timeout = True
break
if not emergency_timeout:
[p.join() for p in worker_procs]
else:
[p.terminate() for p in worker_procs]
if not work_queue.empty():
continue
if not AUTO_PROCESS_OVERFLOW:
break
else:
if os.path.isfile(WORK_QUEUE_OVERFLOW_FILE):
new_urls = [(url.split()[0], int(url.split()[1]), url.split()[2]) for url in open(WORK_QUEUE_OVERFLOW_FILE, 'r')]
open(WORK_QUEUE_OVERFLOW_FILE, 'w').close()
if len(new_urls) > 0:
add_bulk_to_work_queue(work_queue, new_urls, visited)
continue
else:
break
else:
break
end = time.time()
close(dbconnector, crawlid)
print("Duration: {} seconds".format(end - begin))
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,313 | AGLDWG/ld-link-harvester | refs/heads/master | /stats.py | import harvester
import pandas as pd
import numpy as np
import xlsxwriter
from charts.file_format_chart import clean_formats
def outer_merge_frames(baseframe, appendframe, focal_point):
return pd.merge(baseframe, appendframe, on=focal_point, how='outer')
if __name__ == '__main__':
DATABASE_FILE = "C:\\Users\\Has112\\Documents\\db_history\\28-05-2019\\ld-database.db"
DATABASE_VERIFICATION_TEMPLATE = 'database/create_database.sql'
WORKBOOK_NAME = 'C:\\Users\\Has112\\Documents\\db_history\\28-05-2019\\28-05-2019_Summary.xlsx'
TOTAL_DOMAINS = 7460919
INSERT_FIGURES = True
FIGURES_DIRECTORY = 'C:\\Users\\Has112\\Documents\\db_history\\28-05-2019\\figures\\'
# Open Workbook
workbook = xlsxwriter.Workbook(WORKBOOK_NAME)
# Define formats for the xlsxwriter to use.
format_index_label = workbook.add_format({'bold': True,
'align': 'right',
'italic': True,
'bg_color': '#95b3d7'})
format_sheet_heading = workbook.add_format({'font_size': 16,
'bold': True,
'font_color': '#1f497d',
'bottom': 5,
'border_color': '#4f81bd'})
format_column_summary = workbook.add_format({'bold': True,
'font_color': '#FFFFFF',
'font_size': 11,
'top': 5,
'border_color': '#3f4956',
'bg_color': '#4f81bd',
'align': 'right'})
format_column_summary_extra = workbook.add_format({'bold': True,
'font_color': '#FFFFFF',
'font_size': 11,
'bg_color': '#4f81bd',
'align': 'right'})
format_column_heading = workbook.add_format({'bold': True,
'font_color': '#FFFFFF',
'font_size': 13,
'bottom': 5,
'border_color': '#3f4956',
'bg_color': '#4f81bd'})
format_even_data_cell = workbook.add_format({'align': 'right',
'bg_color': '#dce6f1'})
format_odd_data_cell = workbook.add_format({'align': 'right',
'bg_color': '#b8cce4'})
# Connect to Database
dbconnector, crawl_id = harvester.connect(DATABASE_FILE, crawl=False)
if harvester.verify_database(dbconnector, DATABASE_VERIFICATION_TEMPLATE):
print("Database schema integrity has been verified.")
else:
print("Error, database schema does not match the provided template.")
exit(1)
# Request Summary Data from Database
total_links_visited = dbconnector.cursor.execute("""
SELECT COUNT(*) FROM Link;
""").fetchone()[0]
total_seeds_visited = dbconnector.cursor.execute("""
SELECT COUNT(*) FROM Seed;
""").fetchone()[0]
total_rdf_links_found = dbconnector.cursor.execute("""
SELECT COUNT(*) FROM RdfURI;
""").fetchone()[0]
total_crawls = dbconnector.cursor.execute("""
SELECT COUNT(DISTINCT crawlId) FROM Crawl;
""").fetchone()[0]
total_failed_seeds = dbconnector.cursor.execute("""
SELECT COUNT(DISTINCT seedURI) FROM FailedSeed;
""").fetchone()[0]
total_failed_requests = dbconnector.cursor.execute("""
SELECT COUNT(*) FROM Link WHERE failed=1;
""").fetchone()[0]
# Create 'Summary' Worksheet and Write Data To It.
summary_worksheet = workbook.add_worksheet('Summary')
summary_worksheet.write(0, 0, 'Summary', format_sheet_heading)
summary = pd.DataFrame([total_crawls, total_seeds_visited, total_failed_seeds, total_links_visited, total_failed_requests, total_rdf_links_found, round((total_seeds_visited / TOTAL_DOMAINS)*100, 2)], index=['Total Crawls Made', 'Total Seeds Processed', 'Total Failed Seeds', 'Total Links Visited', 'Total Failed Link Requests', 'Total RDF Links Found', "Percentage of .au Domain Crawled"])
row_idx = 2
max = 0
for index in summary.index:
if len(index) > max:
max = len(index)
summary_worksheet.set_column(1, 1, max)
for record in summary.iterrows():
summary_worksheet.write(row_idx, 1, record[0], format_index_label)
if record[0] == "Percentage of .au Domain Crawled":
summary_worksheet.write(row_idx, 2, str(summary.loc["Percentage of .au Domain Crawled", 0]) + "%", format_even_data_cell)
else:
summary_worksheet.write(row_idx, 2, record[1], format_even_data_cell)
row_idx += 1
row_idx += 2
if INSERT_FIGURES:
summary_worksheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'project_progress.png')
# Create a Crawl Summary Sheet in the WorkBook
crawl_records = dbconnector.cursor.execute("""
SELECT
c.crawlID,
strftime('%Y-%m-%d %H:%M:%S', startDate, 'unixepoch'),
strftime('%Y-%m-%d %H:%M:%S', endDate, 'unixepoch'),
(endDate - startDate) AS duration
FROM
CRAWL as c;
""").fetchall()
seeds_per_crawl = dbconnector.cursor.execute("""
SELECT
c.crawlID,
COUNT(distinct originseedURI)
FROM
Crawl as c,
Link as l
WHERE c.crawlID = l.crawlID
GROUP BY c.crawlID;
""").fetchall()
failed_seeds_per_crawl = dbconnector.cursor.execute("""
SELECT
c.crawlID,
COUNT(distinct seedURI)
FROM
Crawl as c,
FailedSeed as fs
WHERE c.crawlID = fs.crawlID
GROUP BY c.crawlID;
""").fetchall()
links_visited_per_crawl = dbconnector.cursor.execute("""
SELECT
crawlID,
COUNT(address) as linksvisited,
COUNT(address)/COUNT(distinct originseedURI) as averagedomain
FROM Link
GROUP BY crawlID;
""").fetchall()
links_failed_per_crawl = dbconnector.cursor.execute("""
SELECT
crawlID,
COUNT(address) as failedlinks
FROM Link
WHERE failed = 1
GROUP BY crawlID;
""").fetchall()
rdf_links_found_per_crawl = dbconnector.cursor.execute("""
SELECT
c.crawlID,
COUNT(rdfseeduri)
FROM
Crawl as c,
RdfURI as r
WHERE c.crawlID = r.crawlID
GROUP BY c.crawlID;
""").fetchall()
crawls = pd.DataFrame(crawl_records, columns=['Crawl ID', 'Start', 'End', 'Duration'])
seeds_per_crawl = pd.DataFrame(seeds_per_crawl, columns=['Crawl ID', 'Seeds Processed'])
crawls = outer_merge_frames(crawls, seeds_per_crawl, 'Crawl ID')
failed_seeds_per_crawl = pd.DataFrame(failed_seeds_per_crawl, columns=['Crawl ID', 'Failed Seeds'])
crawls = outer_merge_frames(crawls, failed_seeds_per_crawl, 'Crawl ID')
links_visited_per_crawl = pd.DataFrame(links_visited_per_crawl, columns=['Crawl ID', 'Links Visited', 'Average Domain Size'])
links_visited_per_crawl['Crawl ID'] = links_visited_per_crawl['Crawl ID'].astype(np.int64)
crawls = outer_merge_frames(crawls, links_visited_per_crawl, 'Crawl ID')
crawls['Speed'] = crawls['Links Visited']/crawls['Duration']
crawls['Speed'] = crawls['Speed'].round(2)
crawls['Speed Domain Processing'] = crawls['Seeds Processed'] / crawls['Duration']
crawls['Speed Domain Processing'] = crawls['Speed Domain Processing'].round(2)
crawls['Duration'] = crawls['Duration'].apply(lambda x: x / 3600)
crawls['Duration'] = crawls['Duration'].round(3)
links_failed_per_crawl = pd.DataFrame(links_failed_per_crawl, columns=['Crawl ID', 'Failed Requests'])
links_failed_per_crawl['Crawl ID'] = links_failed_per_crawl['Crawl ID'].astype(np.int64)
crawls = outer_merge_frames(crawls, links_failed_per_crawl, 'Crawl ID')
rdf_links_found_per_crawl = pd.DataFrame(rdf_links_found_per_crawl, columns=['Crawl ID', 'RDF Links Found'])
rdf_links_found_per_crawl['Crawl ID'] = rdf_links_found_per_crawl['Crawl ID'].astype(np.int64)
crawls = outer_merge_frames(crawls, rdf_links_found_per_crawl, 'Crawl ID')
crawls[['Seeds Processed', 'Failed Seeds', 'Links Visited', 'Average Domain Size', 'Failed Requests', 'RDF Links Found']] = crawls[['Seeds Processed', 'Failed Seeds', 'Links Visited', 'Average Domain Size', 'Failed Requests', 'RDF Links Found']].fillna(0)
crawls[['Seeds Processed', 'Failed Seeds', 'Links Visited', 'Average Domain Size', 'Failed Requests','RDF Links Found']] = crawls[['Seeds Processed', 'Failed Seeds', 'Links Visited', 'Average Domain Size', 'Failed Requests','RDF Links Found']].astype(np.int64)
crawl_records_worksheet = workbook.add_worksheet('Crawl Records')
crawl_records_worksheet.write(0, 0, 'Crawl Records', format_sheet_heading)
row_idx = 2
crawl_records_worksheet.set_column(2, 3, 19)
crawl_records_worksheet.set_column(3, 12, 22.29)
crawl_records_worksheet.write(row_idx, 1, 'CrawlID', format_column_heading)
crawl_records_worksheet.write(row_idx, 2, 'Start Date', format_column_heading)
crawl_records_worksheet.write(row_idx, 3, 'End Date', format_column_heading)
crawl_records_worksheet.write(row_idx, 4, 'Duration (hours)', format_column_heading)
crawl_records_worksheet.write(row_idx, 5, 'Seeds Processed', format_column_heading)
crawl_records_worksheet.write(row_idx, 6, 'Failed Seeds', format_column_heading)
crawl_records_worksheet.write(row_idx, 7, 'Links Visited', format_column_heading)
crawl_records_worksheet.write(row_idx, 8, 'Average Domain Size', format_column_heading)
crawl_records_worksheet.write(row_idx, 9, 'Speed (Links/s)', format_column_heading)
crawl_records_worksheet.write(row_idx, 10, 'Speed (Domains/s)', format_column_heading)
crawl_records_worksheet.write(row_idx, 11, 'Failed Requests', format_column_heading)
crawl_records_worksheet.write(row_idx, 12, 'RDF Links Found', format_column_heading)
row_idx += 1
for record in crawls.iterrows():
if row_idx % 2 == 0:
format_data_cell = format_even_data_cell
else:
format_data_cell = format_odd_data_cell
crawl_records_worksheet.write(row_idx, 1, record[1][0], format_index_label)
crawl_records_worksheet.write(row_idx, 2, record[1][1], format_data_cell)
crawl_records_worksheet.write(row_idx, 3, record[1][2], format_data_cell)
crawl_records_worksheet.write(row_idx, 4, record[1][3], format_data_cell) if not np.isnan(record[1][3]) else crawl_records_worksheet.write(row_idx, 4, '', format_data_cell)
crawl_records_worksheet.write(row_idx, 5, record[1][4], format_data_cell)
crawl_records_worksheet.write(row_idx, 6, record[1][5], format_data_cell)
crawl_records_worksheet.write(row_idx, 7, record[1][6], format_data_cell)
crawl_records_worksheet.write(row_idx, 8, record[1][7], format_data_cell)
crawl_records_worksheet.write(row_idx, 9, record[1][8], format_data_cell) if not np.isnan(record[1][8]) else crawl_records_worksheet.write(row_idx, 9, '', format_data_cell)
crawl_records_worksheet.write(row_idx, 10, record[1][9], format_data_cell) if not np.isnan(record[1][9]) else crawl_records_worksheet.write(row_idx, 10, '', format_data_cell)
crawl_records_worksheet.write(row_idx, 11, record[1][10], format_data_cell)
crawl_records_worksheet.write(row_idx, 12, record[1][11], format_data_cell)
row_idx += 1
crawl_records_worksheet.write(row_idx, 1, 'TOTAL', format_column_summary)
crawl_records_worksheet.write(row_idx, 2, '', format_column_summary)
crawl_records_worksheet.write(row_idx, 3, '', format_column_summary)
crawl_records_worksheet.write(row_idx, 4, crawls['Duration'].sum().round(3), format_column_summary)
crawl_records_worksheet.write(row_idx, 5, crawls['Seeds Processed'].sum(), format_column_summary)
crawl_records_worksheet.write(row_idx, 6, crawls['Failed Seeds'].sum(), format_column_summary)
crawl_records_worksheet.write(row_idx, 7, crawls['Links Visited'].sum(), format_column_summary)
crawl_records_worksheet.write(row_idx, 8, crawls['Average Domain Size'].sum(), format_column_summary)
crawl_records_worksheet.write(row_idx, 9, 'N/A', format_column_summary)
crawl_records_worksheet.write(row_idx, 10, 'N/A', format_column_summary)
crawl_records_worksheet.write(row_idx, 11, crawls['Failed Requests'].sum(), format_column_summary)
crawl_records_worksheet.write(row_idx, 12, crawls['RDF Links Found'].sum(), format_column_summary)
row_idx += 1
crawl_records_worksheet.write(row_idx, 1, 'MEAN', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 2, '', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 3, '', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 4, crawls['Duration'].mean().round(3), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 5, crawls['Seeds Processed'].mean().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 6, crawls['Failed Seeds'].mean().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 7, crawls['Links Visited'].mean().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 8, crawls['Average Domain Size'].mean().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 9, crawls['Speed'].mean().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 10, crawls['Speed Domain Processing'].mean().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 11, crawls['Failed Requests'].mean().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 12, crawls['RDF Links Found'].mean().round(2), format_column_summary_extra)
row_idx += 1
crawl_records_worksheet.write(row_idx, 1, 'MEDIAN', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 2, '', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 3, '', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 4, crawls['Duration'].median().round(3), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 5, crawls['Seeds Processed'].median(), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 6, crawls['Failed Seeds'].median(), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 7, crawls['Links Visited'].median(), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 8, crawls['Average Domain Size'].median(), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 9, crawls['Speed'].median().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 10, crawls['Speed Domain Processing'].median().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 11, crawls['Failed Requests'].median(), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 12, crawls['RDF Links Found'].median(), format_column_summary_extra)
row_idx += 1
crawl_records_worksheet.write(row_idx, 1, 'STD', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 2, '', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 3, '', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 4, crawls['Duration'].std().round(3), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 5, crawls['Seeds Processed'].std().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 6, crawls['Failed Seeds'].std().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 7, crawls['Links Visited'].std().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 8, crawls['Average Domain Size'].std().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 9, crawls['Speed'].std().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 10, crawls['Speed Domain Processing'].std().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 11, crawls['Failed Requests'].std().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 12, crawls['RDF Links Found'].std().round(2), format_column_summary_extra)
row_idx += 1
crawl_records_worksheet.write(row_idx, 1, 'VAR', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 2, '', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 3, '', format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 4, crawls['Duration'].var().round(3), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 5, crawls['Seeds Processed'].var().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 6, crawls['Failed Seeds'].var().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 7, crawls['Links Visited'].var().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 8, crawls['Average Domain Size'].var().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 9, crawls['Speed'].var().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 10, crawls['Speed Domain Processing'].var().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 11, crawls['Failed Requests'].var().round(2), format_column_summary_extra)
crawl_records_worksheet.write(row_idx, 12, crawls['RDF Links Found'].var().round(2), format_column_summary_extra)
if INSERT_FIGURES:
row_idx += 3
crawl_records_worksheet.write(row_idx, 0, 'Analytics and Figures', format_sheet_heading)
row_idx += 2
crawl_records_worksheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'seeds_crawl_size_time.png')
row_idx += 23
crawl_records_worksheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'requests_crawl_size_time.png')
row_idx += 23
crawl_records_worksheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'seeds_requests_crawl_size_time.png')
row_idx += 23
crawl_records_worksheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'rdf_domain_size_histogram.png')
row_idx += 23
crawl_records_worksheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'domain_size_histogram.png')
row_idx += 23
# Create Format Summary Worksheet
response_format_summary_sheet = workbook.add_worksheet('Format Summary')
response_format_summary_sheet.write(0, 0, 'Format Summary', format_sheet_heading)
response_formats = dbconnector.cursor.execute("""
SELECT
crawlid,
contentFormat,
COUNT(contentFormat)
FROM LINK
GROUP BY
crawlid,
contentFormat;
""")
response_format_data = pd.DataFrame(response_formats, columns=['Crawl ID', 'Format', 'Count']).pivot(index='Crawl ID', columns='Format', values='Count').fillna(0)
format_types = response_format_data.columns
collapse_map = {}
focus_formats = ['text/html',
'text/xml',
'application/json',
'application/ld+json',
'application/rdf+xml',
'text/plain',
'text/turtle',
'application/owl+xml',
'text/trig',
'application/xml'
'application/n-quads']
other_formats = []
for format in format_types:
if format.startswith("#<Mime::NullType:") or format == '' or format == "N/A":
collapse_map[format] = 'No Format'
elif len(format.split(",")) > 1:
collapse_map[format] = 'Other Format'
elif format not in focus_formats:
collapse_map[format] = 'Other Format'
other_formats.append(format)
else:
collapse_map[format] = format
response_format_data = response_format_data.groupby(collapse_map, axis=1).sum().reset_index(level=0)
response_format_data['Crawl ID'] = response_format_data['Crawl ID'].astype(np.int64)
response_format_data = outer_merge_frames(crawls['Crawl ID'], response_format_data, 'Crawl ID')
row_idx = 2
i = 1
max = 0
for heading in response_format_data.columns:
if len(heading) > max:
max = len(heading)
response_format_summary_sheet.write(row_idx, i, heading, format_column_heading)
i += 1
row_idx += 1
response_format_summary_sheet.set_column(2, len(response_format_data.columns)+1, max + 1)
for record in response_format_data.iterrows():
if row_idx % 2 == 0:
format_data_cell = format_even_data_cell
else:
format_data_cell = format_odd_data_cell
response_format_summary_sheet.write(row_idx, 1, record[1][0], format_index_label)
col_idx = 2
for cell in record[1][1:]:
response_format_summary_sheet.write(row_idx, col_idx, cell, format_data_cell) if not np.isnan(cell) else response_format_summary_sheet.write(row_idx, col_idx, '0', format_data_cell)
col_idx += 1
row_idx += 1
response_format_summary_sheet.write(row_idx, 1, 'TOTAL', format_column_summary)
col_idx = 2
for heading in response_format_data.columns[1:]:
response_format_summary_sheet.write(row_idx, col_idx, response_format_data[heading].sum(), format_column_summary)
col_idx += 1
row_idx += 1
response_format_summary_sheet.write(row_idx, 1, 'MEAN', format_column_summary_extra)
col_idx = 2
for heading in response_format_data.columns[1:]:
response_format_summary_sheet.write(row_idx, col_idx, response_format_data[heading].mean().round(2), format_column_summary_extra)
col_idx += 1
row_idx += 1
response_format_summary_sheet.write(row_idx, 1, 'MEDIAN', format_column_summary_extra)
col_idx = 2
for heading in response_format_data.columns[1:]:
response_format_summary_sheet.write(row_idx, col_idx, response_format_data[heading].median(), format_column_summary_extra)
col_idx += 1
row_idx += 1
response_format_summary_sheet.write(row_idx, 1, 'STD', format_column_summary_extra)
col_idx = 2
for heading in response_format_data.columns[1:]:
response_format_summary_sheet.write(row_idx, col_idx, response_format_data[heading].std().round(2), format_column_summary_extra)
col_idx += 1
row_idx += 1
response_format_summary_sheet.write(row_idx, 1, 'VAR', format_column_summary_extra)
col_idx = 2
for heading in response_format_data.columns[1:]:
response_format_summary_sheet.write(row_idx, col_idx, response_format_data[heading].var().round(2), format_column_summary_extra)
col_idx += 1
if INSERT_FIGURES:
row_idx += 3
response_format_summary_sheet.write(row_idx, 0, 'Charts and Figures', format_sheet_heading)
row_idx += 2
response_format_summary_sheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'content_format_pie.png')
row_idx += 23
response_format_summary_sheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'content_format_bar.png')
row_idx += 23
response_format_summary_sheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'rdf_content_format_pie.png')
row_idx += 23
response_format_summary_sheet.insert_image(row_idx, 1, FIGURES_DIRECTORY + 'rdf_content_format_bar.png')
row_idx += 23
# Create Sheet Containing Data on ALL formats that are included under other.
all_formats_worksheet = workbook.add_worksheet('All Formats')
all_formats_worksheet.write(0,0, 'All Formats Detected')
response_formats = dbconnector.cursor.execute("""
SELECT
crawlid,
contentFormat,
COUNT(contentFormat)
FROM LINK
GROUP BY
crawlid,
contentFormat;
""")
response_format_data = pd.DataFrame(response_formats, columns=['Crawl ID', 'Format', 'Count']).pivot(
index='Crawl ID', columns='Format', values='Count').fillna(0)
format_types = response_format_data.columns
collapse_map = {}
for format in format_types:
if format.startswith("#<Mime::NullType:") or format == '' or format == "N/A":
collapse_map[format] = 'No Format'
elif len(format.split(",")) > 1:
collapse_map[format] = 'Multiple Formats'
else:
collapse_map[format] = format
response_format_data = response_format_data.groupby(collapse_map, axis=1).sum().reset_index(level=0)
response_format_data['Crawl ID'] = response_format_data['Crawl ID'].astype(np.int64)
response_format_data = outer_merge_frames(crawls['Crawl ID'], response_format_data, 'Crawl ID')
row_idx = 2
i = 1
for heading in response_format_data.columns:
all_formats_worksheet.set_column(i, i, len(heading) + 2)
all_formats_worksheet.write(row_idx, i, heading, format_column_heading)
i += 1
row_idx += 1
for record in response_format_data.iterrows():
if row_idx % 2 == 0:
format_data_cell = format_even_data_cell
else:
format_data_cell = format_odd_data_cell
all_formats_worksheet.write(row_idx, 1, record[1][0], format_index_label)
col_idx = 2
for cell in record[1][1:]:
all_formats_worksheet.write(row_idx, col_idx, cell, format_data_cell) if not np.isnan(
cell) else all_formats_worksheet.write(row_idx, col_idx, '0', format_data_cell)
col_idx += 1
row_idx += 1
all_formats_worksheet.write(row_idx, 1, 'TOTAL', format_column_summary)
col_idx = 2
for heading in response_format_data.columns[1:]:
all_formats_worksheet.write(row_idx, col_idx, response_format_data[heading].sum(),
format_column_summary)
col_idx += 1
row_idx += 1
all_formats_worksheet.write(row_idx, 1, 'MEAN', format_column_summary_extra)
col_idx = 2
for heading in response_format_data.columns[1:]:
all_formats_worksheet.write(row_idx, col_idx, response_format_data[heading].mean().round(2),
format_column_summary_extra)
col_idx += 1
row_idx += 1
all_formats_worksheet.write(row_idx, 1, 'MEDIAN', format_column_summary_extra)
col_idx = 2
for heading in response_format_data.columns[1:]:
all_formats_worksheet.write(row_idx, col_idx, response_format_data[heading].median(),
format_column_summary_extra)
col_idx += 1
row_idx += 1
all_formats_worksheet.write(row_idx, 1, 'STD', format_column_summary_extra)
col_idx = 2
for heading in response_format_data.columns[1:]:
all_formats_worksheet.write(row_idx, col_idx, response_format_data[heading].std().round(2),
format_column_summary_extra)
col_idx += 1
row_idx += 1
all_formats_worksheet.write(row_idx, 1, 'VAR', format_column_summary_extra)
col_idx = 2
for heading in response_format_data.columns[1:]:
all_formats_worksheet.write(row_idx, col_idx, response_format_data[heading].var().round(2),
format_column_summary_extra)
col_idx += 1
workbook.close()
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,314 | AGLDWG/ld-link-harvester | refs/heads/master | /cleaning/get_domains.py | import pandas as pd
INPUT_FILE = 'au-domains-latest.csv'
data = pd.read_csv(INPUT_FILE)
('http://' + data['domain']).to_csv('aus-domain-urls.txt', header=False, index=None, sep=" ")
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,315 | AGLDWG/ld-link-harvester | refs/heads/master | /database/connector functions.py | def insert_crawl_seed(uri, crawlid):
insert_seed(uri)
try:
cursor.execute("INSERT INTO CrawlSeeds (seedURI, crawlId) VALUES ('{uri}', {crawlId})".format(uri=uri, crawlId=crawlid))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: CrawlSeeds.seedURI, CrawlSeeds.crawlId':
print("Already tested the '{}' seed during this crawl.".format(uri))
def insert_seed(uri):
try:
cursor.execute("INSERT INTO Seed (seedURI) VALUES ('{uri}')".format(uri=uri))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: Seed.seedURI':
print("'{}' Already in Seeds!".format(uri))
def insert_link(uri, crawlid, source, failed=0):
try:
cursor.execute("Insert INTO Link (address, crawlId, originSeedURI, failed) VALUES ('{uri}', '{crawlId}', '{source}', {failed})".format(uri=uri, crawlId=crawlid, source=source, failed=failed))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: Link.address, Link.originSeedURI, Link.crawlId':
print("'{}' Already visited in this crawl through this seed. Ignoring.".format(uri))
def insert_crawl(crawlid):
try:
cursor.execute("INSERT INTO Crawl (crawlId) VALUES ({crawlId})".format(crawlId=crawlid))
except sqlite3.Error as er:
print(er)
if str(er) == 'UNIQUE constraint failed: Crawl.crawlId':
print('\t...crawlId exists.')
print('Critical Error.')
print('Exiting!')
exit(1)
def insert_valid_rdfuri(uri, crawlid, source, response_format):
try:
cursor.execute("INSERT INTO RdfURI (rdfSeedURI, crawlId, originSeedURI, contentFormat) VALUES ('{uri}', {crawlId}, '{source}', '{format}')".format(uri=uri, crawlId=crawlid, source=source, format=response_format))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: RdfURI.rdfSeedURI, RdfURI.originSeedURI, RdfURI.crawlId':
print("'{}' - '{}' pair is already discovered in this crawl! Ignoring.".format(uri, source))
def insert_failed_seed(uri, crawlid, code):
try:
cursor.execute("INSERT INTO FailedSeed (seedURI, crawlId, statusCode) VALUES ('{uri}', {crawlId}, '{code}')".format(uri=uri, crawlId=crawlid, code=code))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: FailedSeed.seedURI, FailedSeed.crawlId':
print("Already attempted and failed to request '{}' during this crawl. Ignoring.".format(uri)) | {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,316 | AGLDWG/ld-link-harvester | refs/heads/master | /cleaning/partition_domains.py | import pandas as pd
IN_FILE = 'aus-domain-urls.txt'
START_IDX = 0
BLOCK_SIZE = [10, 20, 50, 100, 1000, 100000, 1000000]
OUT_FILE_PREFIX = 'aus-domain-urls'
data = pd.read_csv(IN_FILE)
data_length = len(data)
for i in range(len(BLOCK_SIZE)):
if i == 0:
lower_bound = 0
else:
lower_bound = upper_bound
if i == len(BLOCK_SIZE) - 1:
upper_bound = data_length
else:
upper_bound = lower_bound + BLOCK_SIZE[i]
out_file = '{}_{}_{}_{}.txt'.format(OUT_FILE_PREFIX, lower_bound, upper_bound, upper_bound - lower_bound)
(data.iloc[ lower_bound:upper_bound, : ]).to_csv(out_file, header=False, index=None, sep=" ")
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,317 | AGLDWG/ld-link-harvester | refs/heads/master | /cleaning/partition_domains_fixed_block_size.py | import pandas as pd
IN_FILE = 'aus-domain-urls.txt'
START_IDX = 1186
BLOCK_SIZE = 100
OUT_FILE_PREFIX = 'partition_data/aus-domain-urls'
data = pd.read_csv(IN_FILE)
data_length = len(data)
for i in range(int(data_length - START_IDX / BLOCK_SIZE)):
if i == 0:
lower_bound = START_IDX
else:
lower_bound = upper_bound
upper_bound = lower_bound + BLOCK_SIZE
if upper_bound >= data_length:
upper_bound = data_length - 1
out_file = '{}_{}_{}_{}.txt'.format(OUT_FILE_PREFIX, lower_bound, upper_bound, upper_bound - lower_bound)
(data.iloc[lower_bound:upper_bound, :]).to_csv(out_file, header=False, index=None, sep=" ")
break
out_file = '{}_{}_{}_{}.txt'.format(OUT_FILE_PREFIX, lower_bound, upper_bound, upper_bound - lower_bound)
(data.iloc[ lower_bound:upper_bound, : ]).to_csv(out_file, header=False, index=None, sep=" ")
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,318 | AGLDWG/ld-link-harvester | refs/heads/master | /harvester/lddatabase.py | import sqlite3
__author__ = 'Jake Hashim-Jones'
class LDHarvesterDatabaseConnector(sqlite3.Connection):
"""
Specialized Extension of the sqlite3.connection object which adds functions to interact specifically with the ld database.
"""
def __init__(self, file):
super().__init__(file)
self.cursor = sqlite3.Cursor(self)
def self_repair_crawl_periods(self):
"""
Searches the database crawl table to look for records of crawls that do not have end dates (due to erranous program exits) and fills them in as the latest time that a link has been visited.
:return: tuple (repairs_required, repairs_made)
"""
pseudo_values = self.cursor.execute("""
SELECT crawlId, MAX(dateVisited) as LatestVisit
FROM Link
WHERE crawlId in (
SELECT crawlId
FROM Crawl
WHERE endDate is Null)
GROUP BY crawlId;
""").fetchall()
repairs_required = len(pseudo_values)
if repairs_required == 0:
return 0, 0
else:
repairs_made = 0
for crawl, pseudo_ending in pseudo_values:
try:
self.cursor.execute("""
UPDATE Crawl
SET endDate = {pseudo_ending}
WHERE crawlId = {crawl}
""".format(pseudo_ending=pseudo_ending, crawl=crawl))
repairs_made += 1
except Exception as er:
print("Cannot repair record for crawl '{}'...{}".format(crawl, er))
self.commit()
return repairs_required, repairs_made
def get_new_crawlid(self):
"""
Generate the next logical crawlId for the run.
:return new_crawlid: int
"""
response = self.cursor.execute("SELECT MAX(crawlid) FROM Crawl")
resp = response.fetchall()[0][0]
if resp is None:
new_crawlid = 0
else:
new_crawlid = resp + 1
return new_crawlid
def end_crawl(self, crawlid):
"""
Update crawl record to include finishing time (as current time).
:param crawlid: int
:return: None
"""
self.cursor.execute("UPDATE Crawl SET endDate=strftime('%s','now') WHERE crawlId={crawlId}".format(crawlId=crawlid))
def insert_crawl_seed(self, uri, crawlid, newseed=0):
"""
Insert new record into the CrawlSeeds table.
:param uri: str
:param crawlid: int
:param newseed: int
:return: None
"""
if newseed:
self.insert_seed(uri)
try:
self.cursor.execute("INSERT INTO CrawlSeeds (seedURI, crawlId) VALUES ('{uri}', {crawlId})".format(uri=uri, crawlId=crawlid))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: CrawlSeeds.seedURI, CrawlSeeds.crawlId':
print("Already tested the '{}' seed during this crawl.".format(uri))
def insert_seed_bulk(self, url_list):
"""
Insert a group of seeds from an array.
:param url_list: list
:return: None
"""
for url in url_list:
try:
self.cursor.execute("INSERT INTO Seed (seedURI) VALUES ('{uri}')".format(uri=url[0]))
except Exception as er:
if str(er) == 'UNIQUE constraint failed: Seed.seedURI':
print("'{}' Already in Seeds!".format(url[0]))
else:
print("'{}' Error: {}".format(url[0], er))
def insert_seed(self, uri):
"""
Insert new seed into the database.
:param uri: str
:return: None
"""
try:
self.cursor.execute("INSERT INTO Seed (seedURI) VALUES ('{uri}')".format(uri=uri))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: Seed.seedURI':
print("'{}' Already in Seeds!".format(uri))
def insert_link(self, uri, crawlid, source, content_format, failed=0):
"""
Insert new link visited into the database.
:param uri: str
:param crawlid: str
:param source: str
:param content_format: str
:param failed: int
:return:
"""
if failed not in [0,1]:
print("Warning! 'failed' parameter should be 0 or 1. Making it 1.")
failed = 1
try:
self.cursor.execute(
"Insert INTO Link (address, crawlId, originSeedURI, contentFormat, failed) VALUES ('{uri}', '{crawlId}', '{source}','{contentFormat}', {failed})".format(uri=uri, crawlId=crawlid, source=source, contentFormat=content_format, failed=failed))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: Link.address, Link.originSeedURI, Link.crawlId':
print("'{}' Already visited in this crawl through this seed. Ignoring.".format(uri))
def insert_crawl(self, crawlid):
"""
Create new entry for crawl in the database.
:param crawlid: int
:return: None
"""
try:
self.cursor.execute("INSERT INTO Crawl (crawlId) VALUES ({crawlId})".format(crawlId=crawlid))
except sqlite3.Error as er:
print(er)
if str(er) == 'UNIQUE constraint failed: Crawl.crawlId':
print('\t...crawlId exists.')
print('Critical Error.')
print('Exiting!')
exit(1)
def insert_valid_rdfuri(self, uri, crawlid, source, response_format):
"""
Insert valid URI pointing to RDF data into the appropriate table.
:param uri: str
:param crawlid: int
:param source: str
:param response_format: str
:return: None
"""
try:
self.cursor.execute(
"INSERT INTO RdfURI (rdfSeedURI, crawlId, originSeedURI, contentFormat) VALUES ('{uri}', {crawlId}, '{source}', '{format}')".format(uri=uri, crawlId=crawlid, source=source, format=response_format))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: RdfURI.rdfSeedURI, RdfURI.originSeedURI, RdfURI.crawlId':
print("'{}' - '{}' pair is already discovered in this crawl! Ignoring.".format(uri, source))
def insert_failed_seed(self, uri, crawlid, code):
"""
Record if a seed specifically fails in the database.
:param uri: str
:param crawlid: int
:param code: str
:return: None
"""
try:
self.cursor.execute(
"INSERT INTO FailedSeed (seedURI, crawlId, statusCode) VALUES ('{uri}', {crawlId}, '{code}')".format(
uri=uri, crawlId=crawlid, code=code))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: FailedSeed.seedURI, FailedSeed.crawlId':
print("Already attempted and failed to request '{}' during this crawl. Ignoring.".format(uri))
if __name__ == '__main__':
connector = LDHarvesterDatabaseConnector('..\ld-database.db')
crawlid = connector.get_new_crawlid()
connector.insert_crawl(crawlid)
connector.insert_crawl_seed('www.nothing.com', crawlid)
connector.insert_failed_seed('www.nothing.com', crawlid, '404')
connector.insert_crawl_seed('www.google.com', crawlid)
connector.insert_link('www.google.com/data.rdf', crawlid, 'www.google.com')
connector.insert_valid_rdfuri('www.google.com/data.rdf', crawlid, 'google.com', 'application/rdf+xml')
connector.insert_link('www.google.com/no_data.rdf', crawlid, 'www.google.com', 1)
from time import sleep
sleep(2)
connector.end_crawl(crawlid)
connector.commit()
connector.close() | {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,319 | AGLDWG/ld-link-harvester | refs/heads/master | /charts/__init__.py | from charts import progress_chart, file_format_chart, size_histogram, request_time_scatter
if __name__ == '__main__':
pass
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,320 | AGLDWG/ld-link-harvester | refs/heads/master | /charts/size_histogram.py | import pandas as pd
import matplotlib.pyplot as plt
def plot_size_histogram(seed_size_data, bins, title=None):
"""
Generates a histogram based on the frequency of different link pool sizes of a specific domain.
:param seed_size_data: array
:param bins: int
:param title: str
:return: None
"""
data = pd.DataFrame(seed_size_data, columns=['Domain', 'Size'])
data = pd.DataFrame(seed_size_data, index=data['Domain'], columns=['Domain','Size'])
data = data['Size']
data.hist(bins=bins)
plt.xlabel('Pages per site')
plt.ylabel('Frequency')
if title is not None:
plt.title(title)
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,321 | AGLDWG/ld-link-harvester | refs/heads/master | /post_processing/ld_downloader.py | import rdflib
import requests
import bs4
import os
import sys
import shutil
URL_FILE = "C:\\Users\\Has112\\Documents\\db_history\\21-05-2019\\rdf_seeds.txt"
FAILED_URL_OUT = 'url_failed.txt'
PASSED_URL_OUT = 'url_passed.txt'
URL_ID_KEY_OUT = 'url_key.txt'
LD_OUT_DIR = 'ld_store'
FAILED_LOG_FILE = 'url_log.txt'
RDF_MEDIA_TYPES = [
"application/rdf+xml",
"text/turtle",
"application/n-triples",
"application/ld+json",
"application/owl+xml",
"text/trig",
"application/n-quads"
]
RDF_FORMATS = [
'rdf',
'owl',
'ttl',
'n3',
'nt',
'json'
]
GLOBAL_HEADER = {
'Accept': ",".join(RDF_MEDIA_TYPES),
'User-Agent': 'LD Link Harvester'
}
def parse_embedded_ld_json(content):
rdf_data = []
soup = bs4.BeautifulSoup(content, "lxml")
scripts = soup.find_all('script')
for script in scripts:
if script.get('type') == 'application/ld+json':
rdf_data.append(script.get_text())
return rdf_data
URL_BATCH = open(URL_FILE).readlines()
if not os.path.isfile(FAILED_URL_OUT):
open(FAILED_URL_OUT, 'w').close()
if not os.path.isfile(FAILED_LOG_FILE):
open(FAILED_LOG_FILE, 'w').close()
if not os.path.isfile(PASSED_URL_OUT):
open(PASSED_URL_OUT, 'w').close()
if not os.path.isfile(URL_ID_KEY_OUT):
open(URL_ID_KEY_OUT, 'w').close()
key_counter = 0
else:
lines = open(URL_ID_KEY_OUT, 'r').readlines()
key_counter = int(lines[-1].split('.ttl')[0]) if len(lines) > 0 else 0
if not os.path.exists(LD_OUT_DIR):
os.mkdir(LD_OUT_DIR)
key_counter = 0
for url in URL_BATCH:
url = url.strip()
print("Validating '{}'.".format(url))
g = rdflib.Graph()
r = requests.get(url, headers=GLOBAL_HEADER)
if r.status_code == 200:
response_content_type = r.headers['content-type'].split(';')[0]
try:
if 'application/ld+json' in r.text:
ld_json = parse_embedded_ld_json(r.content)
if len(ld_json) > 0:
for array in ld_json:
g.parse(data=array, format='json-ld')
with open('{}/{}.ttl'.format(LD_OUT_DIR, key_counter), 'w') as f:
f.write(g.serialize(format='turtle').decode('utf-8'))
with open(URL_ID_KEY_OUT, 'a') as f:
f.write('{}.ttl\t{}\n'.format(key_counter, url))
key_counter += 1
else:
with open(FAILED_URL_OUT, 'a') as f:
f.write('{url}\n'.format(url=url))
with open(FAILED_LOG_FILE, 'a') as f:
f.write('{url}\nNo JSON_LD data found.\n\n'.format(url=url))
else:
g.parse(data=r.text, format=response_content_type)
with open('{}/{}.ttl'.format(LD_OUT_DIR, key_counter), 'w') as f:
f.write(g.serialize(format='turtle').decode('utf-8'))
with open(URL_ID_KEY_OUT, 'a') as f:
f.write('{}.ttl\t{}\n'.format(key_counter, url))
key_counter += 1
print('Passed! {} Triplets.'.format(len(g)))
with open(PASSED_URL_OUT, 'a') as f:
f.write('{url}\n'.format(url=url))
except Exception as e:
print(e)
with open(FAILED_URL_OUT, 'a') as f:
f.write('{url}\n'.format(url=url))
with open(FAILED_LOG_FILE, 'a') as f:
f.write('{url}\n{error}\n\n'.format(url=url, error=e))
else:
print("Url '{}' broken.".format('url'))
with open(FAILED_URL_OUT, 'a') as f:
f.write('{url}\n'.format(url=url))
with open(FAILED_LOG_FILE, 'a') as f:
f.write('{url}\nBroken.\n'.format(url=url)) | {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,322 | AGLDWG/ld-link-harvester | refs/heads/master | /charts/file_format_chart.py | import pandas as pd
import matplotlib.pyplot as plt
def clean_formats(format_dict):
"""
Clusters/groups the more obscure formats together appropriately to simplify the charts. Modifies dictionary IN PLACE.
:param format_dict: dict
:return: None
"""
seperate_together= ['text/html',
'text/xml',
'application/json',
'application/ld+json',
'application/rdf+xml',
'text/plain',
'text/turtle',
'application/owl+xml',
'text/trig',
'application/xml'
'application/n-quads']
count_removed = 0
count_combined = 0
count_other = 0
for key in list(format_dict.keys()):
# Combine entries with Null formats into 'No Format'.
if key.startswith("#<Mime::NullType:") or key == '' or key == "N/A":
format_dict['No Format'] = format_dict.get('No Format', 0) + format_dict.pop(key)
count_combined += 1
# Remove entries with multiple formats (comma separated).
elif len(key.split(",")) > 1:
format_dict['Other'] = format_dict.get('Other', 0) + format_dict.pop(key)
count_removed += 1
# Cluster other media types
elif key not in seperate_together:
format_dict['Other'] = format_dict.get('Other', 0) + format_dict.pop(key)
count_other += 1
def file_format_pie(format_dict, title=None):
"""
Generates a pie chart based on file formats.
:param format_dict: dict
:param title: str
:return: None
"""
data = pd.DataFrame({'Content Format': list(format_dict.values())},
index=list(format_dict.keys()))
no_labels = ['' for i in data.index]
data.plot.pie(y='Content Format', startangle=90, labels=no_labels, counterclock=False)
plt.legend(labels=data.index, fontsize='small', loc='center right', bbox_to_anchor=(1.5, 0.5))
if title is not None:
plt.title(title)
def file_format_bar(format_dict, title=None):
"""
Generates a bar chart based on file formats
:param format_dict: dict
:param title: str
:return: None
"""
data = pd.DataFrame({'Content Format': list(format_dict.values())},
index=list(format_dict.keys()))
data.transpose().plot.bar()
plt.legend(labels=data.index, fontsize='small', loc='center right', bbox_to_anchor=(1.4, 0.5))
plt.xticks([])
plt.ylabel('Response Count')
if title is not None:
plt.title(title)
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,323 | AGLDWG/ld-link-harvester | refs/heads/master | /harvester/tests/test_worker.py | import harvester
from multiprocessing import Queue, Process, Manager
WORK_QUEUE_MAX_SIZE = 100
RESP_QUEUE_MAX_SIZE = 100
URL_BATCH = [("http://127.0.0.1:8080", 0, "http://127.0.0.1:8080")]
manager = Manager()
visited = manager.dict()
work_queue = manager.Queue(maxsize=WORK_QUEUE_MAX_SIZE)
work_queue = harvester.add_bulk_to_work_queue(work_queue, URL_BATCH)
resp_queue = manager.Queue(maxsize=RESP_QUEUE_MAX_SIZE)
proc = Process(target=harvester.worker_fn, args=(1, work_queue, resp_queue, visited))
proc.start()
proc.join()
results = []
visited_links = []
while not resp_queue.empty():
out_entry = resp_queue.get()
if out_entry in ['start', 'end']:
continue
visited_links.append(out_entry[0]['url'])
results.append(out_entry)
def test_worker():
"""
Worker should recursively search a web domain and find all links up to a specific depth.
"""
assert len(set(visited)) == 16
def test_visited_dictionary():
expected_visited = {'http://127.0.0.1:8080': True,
'http://127.0.0.1:8080/index.html': True,
'http://127.0.0.1:8080/birds.html': True,
'http://127.0.0.1:8080/mammals.html': True,
'http://127.0.0.1:8080/fish.html': True,
'http://127.0.0.1:8080/reptiles.html': True,
'http://127.0.0.1:8080/amphibians.html': True,
'http://127.0.0.1:8080/anthropods.html': True,
'http://127.0.0.1:8080/form.html': True,
'http://127.0.0.1:8080/contact.html': True,
'https://www.australia.com/en/facts-and-planning/australias-animals.html': True,
'https://en.wikipedia.org/wiki/Fauna_of_Australia': True,
'https://www.csiro.au/en/Research/Collections/ANIC': True,
'http://127.0.0.1:8080/amphibians/growling.html': True,
'http://127.0.0.1:8080/amphibians/crucifix.html': True,
'http://127.0.0.1:8080/amphibians/red-crowned.html': True}
assert list(visited.keys()) == list(expected_visited.keys())
def test_worker_visited_once():
"""
Links should only be visited once per crawl and ignored if already visited
"""
assert len(set(visited_links)) == len(visited_links)
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,324 | AGLDWG/ld-link-harvester | refs/heads/master | /harvester/tests/test_database_verification.py | import os
import harvester
def test_database_verification_bad():
"""
If the database file provided to the function has an invalid schema that does not match the factory schema, the function should flag this.
"""
bad_db = harvester.LDHarvesterDatabaseConnector('integrity_bad.sql')
template = '../../database/create_database.sql'
assert not harvester.verify_database(bad_db, template)
bad_db.close()
os.remove('integrity_bad.sql')
def test_database_verification_good():
"""
If the database file provided to the function has a valid schema, it returns true.
"""
good_db = harvester.LDHarvesterDatabaseConnector('integrity_good.sql')
with open('../../database/create_database.sql', 'r') as script:
good_db.cursor.executescript(script.read())
template = '../../database/create_database.sql'
assert harvester.verify_database(good_db, template)
good_db.close()
os.remove('integrity_good.sql')
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,325 | AGLDWG/ld-link-harvester | refs/heads/master | /harvester/tests/test_response_handler.py | import harvester
import requests
import sys
import os
from copy import copy
def test_valid_response():
"""
If the response is from a page with the format text/html, the handler should hand it to the parser
function to obtain appropriate child links.
"""
expected_response = ({'url': 'http://127.0.0.1:8080',
'opcode': 2,
'params': {'source': 'http://127.0.0.1:8080',
'format': 'text/html',
'failed': 0}},
{('http://127.0.0.1:8080/index.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/birds.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/mammals.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/reptiles.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/anthropods.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/contact.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 1, 'http://127.0.0.1:8080'),
('https://www.australia.com/en/facts-and-planning/australias-animals.html', 1, 'http://127.0.0.1:8080'),
('https://en.wikipedia.org/wiki/Fauna_of_Australia', 1, 'http://127.0.0.1:8080'),
('https://www.csiro.au/en/Research/Collections/ANIC', 1, 'http://127.0.0.1:8080')})
response = requests.get('http://127.0.0.1:8080')
uri = 'http://127.0.0.1:8080'
seed = 'http://127.0.0.1:8080'
depth = 0
result = harvester.process_response(response, uri, seed, depth)
result = (result[0], set(result[1]))
assert isinstance(result, tuple)
assert response.status_code == 200
assert result == expected_response
def test_error_response():
"""
The response handler should be able to handle responses that return some form of error (e.g. 404)
"""
sys.stdout = open(os.devnull, 'w')
expected_result = {'url': 'http://127.0.0.1:8080/no-exist',
'opcode': 2,
'params': {'source': 'http://127.0.0.1:8080',
'format': 'N/A',
'failed': 1}}
uri = 'http://127.0.0.1:8080/no-exist'
response = requests.get(uri)
seed = 'http://127.0.0.1:8080'
depth = 0
result = harvester.process_response(response, uri, seed, depth)
assert isinstance(result, dict)
assert response.status_code == 404
assert result == expected_result
def test_error_no_file_format():
"""
The response handler function should be capable of detecting if the content-format is missing and replacing it
with placeholder value N/A if necessary.
"""
sys.stdout = open(os.devnull, 'w')
expected_result = {'url': 'http://127.0.0.1:8080/mammals.html',
'opcode': 2,
'params': {'source': 'http://127.0.0.1:8080',
'format': 'N/A',
'failed': 1}}
uri = 'http://127.0.0.1:8080/mammals.html'
response = requests.get(uri)
response.headers.pop('content-type')
seed = 'http://127.0.0.1:8080'
depth = 0
result = harvester.process_response(response, uri, seed, depth)
assert result == expected_result
def test_rdf_detected_in_name():
"""
The crawler should also detect rdf files if it encounters a file ending in the URL matching a known type.
"""
sys.stdout = open(os.devnull, 'w')
expected_result = {'url': 'http://127.0.0.1:8080/mammals.rdf',
'opcode': 3,
'params': {'source': 'http://127.0.0.1:8080',
'format': 'application/rdf+xml'}}
uri = 'http://127.0.0.1:8080/mammals.html'
response = requests.get(uri)
uri = 'http://127.0.0.1:8080/mammals.rdf'
response.headers['content-type'] = 'application/rdf+xml'
seed = 'http://127.0.0.1:8080'
depth = 0
result = harvester.process_response(response, uri, seed, depth)
assert result == expected_result
def test_rdf_detected_in_format():
"""
The response handler should be able to detect rdf file format in the response headers and act accordingly.
"""
sys.stdout = open(os.devnull, 'w')
expected_result = {'url': 'http://127.0.0.1:8080/mammals.html',
'opcode': 3,
'params': {'source': 'http://127.0.0.1:8080',
'format': 'application/rdf+xml'}}
uri = 'http://127.0.0.1:8080/mammals.html'
response = requests.get(uri)
response.headers['content-type'] = 'application/rdf+xml'
seed = 'http://127.0.0.1:8080'
depth = 0
result = harvester.process_response(response, uri, seed, depth)
assert result == expected_result
def test_error_invalid_format():
"""
The response handler should be able to encounter an 'invalid' file format and return a valid result
(i.e. failed=0) to be recorded but not attempt to pass it to the parser.
"""
sys.stdout = open(os.devnull, 'w')
expected_result = {'url': 'http://127.0.0.1:8080/mammals.html',
'opcode': 2,
'params': {'source': 'http://127.0.0.1:8080',
'format': 'text/xml',
'failed': 0}}
uri = 'http://127.0.0.1:8080/mammals.html'
response = requests.get(uri)
response.headers['content-type'] = 'text/xml'
seed = 'http://127.0.0.1:8080'
depth = 0
result = harvester.process_response(response, uri, seed, depth)
assert result == expected_result
def test_redirect_change_seed():
"""
The response handler should modify the uri and seeds appropriately if a 3xx redirect is detected to have occurred.
"""
expected_result = ({'url': 'http://127.0.0.1:8080/mammals.html',
'opcode': 2,
'params': {'source': 'http://127.0.0.1:8080',
'format': 'text/html',
'failed': 0}},
{('http://127.0.0.1:8080/index.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/birds.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/mammals.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/reptiles.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/anthropods.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/contact.html', 1, 'http://127.0.0.1:8080')})
uri = 'http://127.0.0.1:8080/mammals.html'
response = requests.get(uri)
# Modifying a response to make it look like a redirect
uri = 'http://nonexists.com/mammals.html'
hist_response = copy(response)
hist_response.status_code = 301
hist_response.headers = {'Location': 'http://127.0.0.1:8080/mammals.html', 'Content-Type': 'text/html; charset=UTF-8', 'Content-Encoding': 'gzip', 'Date': 'Thu, 11 Apr 2019 03:24:58 GMT', 'Expires': 'Thu, 11 Apr 2019 03:24:58 GMT', 'Cache-Control': 'private, max-age=0', 'X-Content-Type-Options': 'nosniff', 'X-Frame-Options': 'SAMEORIGIN', 'X-XSS-Protection': '1; mode=block', 'Content-Length': '177', 'Server': 'GSE'}
response.history.append(hist_response)
seed = 'http://nonexists.com'
depth = 0
result = harvester.process_response(response, uri, seed, depth)
result = (result[0], set(result[1]))
assert result == expected_result
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,326 | AGLDWG/ld-link-harvester | refs/heads/master | /harvester/tests/test_link_parser.py | import harvester
import requests
def test_parser_relative_to_absolute_links():
"""
The parser should be able to return a set of URLs that are absolute links, even if they are published as
relative links.
"""
expected_response = [
('http://127.0.0.1:8080/birds.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/mammals.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/reptiles.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/anthropods.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/contact.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians/growling.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians/red-crowned.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians/crucifix.html', 1, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/index.html', 1, 'http://127.0.0.1:8080'),
]
uri = 'http://127.0.0.1:8080/amphibians.html'
seed = 'http://127.0.0.1:8080'
depth = 1
response = requests.get(uri)
links = harvester.find_links_html(response.content, uri, seed, depth)
assert set(links) == set(expected_response)
def test_parser_external_links():
"""
The parser should also be able to return local as well as external links (all in absolute formats).
"""
expected_response = [
('http://127.0.0.1:8080/birds.html', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/mammals.html', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/reptiles.html', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians.html', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/anthropods.html', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/contact.html', 0, 'http://127.0.0.1:8080'),
('https://www.australia.com/en/facts-and-planning/australias-animals.html', 0, 'http://127.0.0.1:8080'),
('https://en.wikipedia.org/wiki/Fauna_of_Australia', 0, 'http://127.0.0.1:8080'),
('https://www.csiro.au/en/Research/Collections/ANIC', 0, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/index.html', 0, 'http://127.0.0.1:8080')
]
uri = 'http://127.0.0.1:8080'
seed = 'http://127.0.0.1:8080'
depth = 0
response = requests.get(uri)
links = harvester.find_links_html(response.content, uri, seed, depth)
assert set(links) == set(expected_response)
def test_parser_blacklisted_files():
"""
The parser function should be able to filter out black listed file types and ignore the associated links
(e.g. no '.jpg' links).
"""
expected_response = [
('http://127.0.0.1:8080/birds.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/mammals.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/reptiles.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/anthropods.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/contact.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/index.html', 2, 'http://127.0.0.1:8080')
]
uri = 'http://127.0.0.1:8080/mammals.html'
seed = 'http://127.0.0.1:8080'
depth = 2
response = requests.get(uri)
links = harvester.find_links_html(response.content, uri, seed, depth)
assert set(links) == set(expected_response)
def test_parser_remove_anchors():
"""
The parser should remove link anchors from links - i.e. remove hashes from urls
(e.g. http://127.0.0.1:8080/fish.html#introduced-fish --> http://127.0.0.1:8080/fish.html)
"""
expected_response = [
('http://127.0.0.1:8080/birds.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/mammals.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/reptiles.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/anthropods.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/contact.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/index.html', 2, 'http://127.0.0.1:8080')
]
incorrect_response = [
('http://127.0.0.1:8080/birds.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/mammals.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/fish.html#introduced-fish', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/reptiles.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/amphibians.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/anthropods.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/form.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/contact.html', 2, 'http://127.0.0.1:8080'),
('http://127.0.0.1:8080/index.html', 2, 'http://127.0.0.1:8080')
]
uri = 'http://127.0.0.1:8080/fish.html'
seed = 'http://127.0.0.1:8080'
depth = 2
response = requests.get(uri)
links = harvester.find_links_html(response.content, uri, seed, depth)
assert set(links) != set(incorrect_response)
assert set(links) == set(expected_response)
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,327 | AGLDWG/ld-link-harvester | refs/heads/master | /harvest.py | from multiprocessing import Process, Manager
import time
import os
import sys
import harvester
# Set Global Variables
URL_SOURCE = 'URI.txt'
if len(sys.argv) > 1:
URL_SOURCE = sys.argv[1]
AUTO_PROCESS_OVERFLOW = True
DATABASE_FILE = 'data/ld-database.db'
DATABASE_TEMPLATE = 'database/create_database.sql'
WORK_QUEUE_OVERFLOW_FILE = 'data/{}_overflow.txt'.format(URL_SOURCE.split('/')[-1])
SCHEMA_INTEGRITY_CHECK = True
CRAWL_RECORD_REPAIR = True
RESPONSE_TIMEOUT = 60
MAX_REDIRECTS = 3
KILL_PROCESSES_TIMEOUT = 600
RECURSION_DEPTH_LIMIT = 3
PROC_COUNT = 16
COMMIT_FREQ = 50
WORK_QUEUE_MAX_SIZE = 1000000
RESP_QUEUE_MAX_SIZE = 1000000
RDF_MEDIA_TYPES = [
"application/rdf+xml",
"text/turtle",
"application/n-triples",
"application/ld+json",
"application/owl+xml",
"text/trig",
"application/n-quads"
]
RDF_FORMATS = [
'rdf',
'owl',
'ttl',
'n3',
'nt',
'json'
]
GLOBAL_HEADER = {
'Accept': ",".join(RDF_MEDIA_TYPES),
'User-Agent': 'LD Link Harvester'
}
BLACKLIST_FORMATS = [
'jpg',
'JPG',
'BMP',
'bmp',
'png',
'PNG',
'jpeg',
'JPEG',
'MP4',
'mp4',
'flv',
'pdf',
'PDF',
'eps',
'EPS',
'svg',
'SVG'
]
#Override Harvester Module Settings With User Selected Values
global_vars = globals().copy()
for var in global_vars:
if var in locals() or var in globals():
if var == 'DATABASE_TEMPLATE':
harvester.DATABASE_TEMPLATE = DATABASE_TEMPLATE
elif var == 'WORK_QUEUE_OVERFLOW_FILE':
harvester.WORK_QUEUE_OVERFLOW_FILE = WORK_QUEUE_OVERFLOW_FILE
elif var == 'AUTO_PROCESS_OVERFLOW':
harvester.AUTO_PROCESS_OVERFLOW = AUTO_PROCESS_OVERFLOW
elif var == 'DATABASE_FILE':
harvester.DATABASE_FILE = DATABASE_FILE
elif var == 'SCHEMA_INTEGRITY_CHECK':
harvester.SCHEMA_INTEGRITY_CHECK = SCHEMA_INTEGRITY_CHECK
elif var == 'CRAWL_RECORD_REPAIR':
harvester.CRAWL_RECORD_REPAIR = CRAWL_RECORD_REPAIR
elif var == 'RESPONSE_TIMEOUT':
harvester.RESPONSE_TIMEOUT = RESPONSE_TIMEOUT
elif var == 'KILL_PROCESSES_TIMEOUT':
harvester.KILL_PROCESSES_TIMEOUT = KILL_PROCESSES_TIMEOUT
elif var == 'MAX_REDIRECTS':
harvester.MAX_REDIRECTS = MAX_REDIRECTS
elif var == 'RECURSION_DEPTH_LIMIT':
harvester.RECURSION_DEPTH_LIMIT = RECURSION_DEPTH_LIMIT
elif var == 'PROC_COUNT':
harvester.PROC_COUNT = PROC_COUNT
elif var == 'COMMIT_FREQ':
harvester.COMMIT_FREQ = COMMIT_FREQ
elif var == 'WORK_QUEUE_MAX_SIZE':
harvester.WORK_QUEUE_MAX_SIZE = WORK_QUEUE_MAX_SIZE
elif var == 'RESP_QUEUE_MAX_SIZE':
harvester.RESP_QUEUE_MAX_SIZE = RESP_QUEUE_MAX_SIZE
elif var == 'RDF_MEDIA_TYPES':
harvester.RDF_MEDIA_TYPES = RDF_MEDIA_TYPES
elif var == 'RDF_FORMATS':
harvester.RDF_FORMATS = RDF_FORMATS
elif var == 'GLOBAL_HEADER':
harvester.GLOBAL_HEADER = GLOBAL_HEADER
elif var == 'BLACKLIST_FORMATS':
harvester.BLACKLIST_FORMATS = BLACKLIST_FORMATS
if __name__ == "__main__":
"""
Main runtime script. Essentially calls on the functions as appropriate. Handles workers, and processes contents of the response queue.
"""
URL_BATCH = [(url.strip(), 0, url.strip()) for url in open(URL_SOURCE)]
dbconnector, crawlid = harvester.connect(DATABASE_FILE)
if SCHEMA_INTEGRITY_CHECK:
if harvester.verify_database(dbconnector, DATABASE_TEMPLATE):
print("Database schema integrity has been verified.")
else:
print("Error, database schema does not match the provided template.")
exit(1)
if CRAWL_RECORD_REPAIR:
repairs_required, repairs_made = dbconnector.self_repair_crawl_periods()
if repairs_required != 0:
print("Repairing Crawl records.\nRepairs Required: {}\nRepairs Made: {}".format(repairs_required, repairs_made))
else:
print("No Crawl record repairs are required.")
print("Adding seeds to database.")
dbconnector.insert_seed_bulk(URL_BATCH)
dbconnector.commit()
print("Seeds added to database.")
full_msg = False
manager = Manager()
visited = manager.dict()
work_queue = manager.Queue(maxsize=WORK_QUEUE_MAX_SIZE)
work_queue = harvester.add_bulk_to_work_queue(work_queue, URL_BATCH)
resp_queue = manager.Queue(maxsize=RESP_QUEUE_MAX_SIZE)
begin = time.time()
while True:
worker_procs = []
for i in range(PROC_COUNT):
p = Process(target=harvester.worker_fn, args=(i+1, work_queue, resp_queue, visited))
worker_procs.append(p)
[p.start() for p in worker_procs]
# wait for processes to start
time.sleep(0.1)
threads_started = 0
threads_ended = 0
i = 0
emergency_timeout_start = time.time()
emergency_timeout = False
while True:
if not resp_queue.empty():
emergency_timeout_start = time.time()
#print(resp_queue.qsize())
if i >= COMMIT_FREQ:
dbconnector.commit()
i =- 1
i += 1
try:
resp_tuple = resp_queue.get(timeout=KILL_PROCESSES_TIMEOUT)
except:
print("FROZEN. Emergency Timeout: Empty Response Queue.")
break
if resp_tuple == harvester.start_sentinel:
threads_started += 1
continue
elif resp_tuple == harvester.end_sentinel:
threads_ended += 1
if threads_ended == PROC_COUNT:
break
else:
continue
if isinstance(resp_tuple[0], dict):
'''
OPCODES:
0 = Insert Seed (Deprecated)
1 = Insert Failed Seed (Handled by 2)
2 = Insert Link (Failed or otherwise)
3 = Insert RDF Data
'''
opcode = resp_tuple[0]['opcode']
if resp_tuple[0]['url'] == resp_tuple[0]['params']['source']:
dbconnector.insert_crawl_seed(uri=resp_tuple[0]['url'], crawlid=crawlid)
if opcode == 2:
dbconnector.insert_link(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'], content_format=resp_tuple[0]['params']['format'], failed=resp_tuple[0]['params']['failed'])
if resp_tuple[0]['params']['failed'] == 1 and resp_tuple[0]['url'] == resp_tuple[0]['params']['source']:
if isinstance(resp_tuple[1], Exception):
dbconnector.insert_failed_seed(uri=resp_tuple[0]['url'], crawlid=crawlid, code='000')
else:
dbconnector.insert_failed_seed(uri=resp_tuple[0]['url'], crawlid=crawlid, code=resp_tuple[1].status_code)
if opcode == 3:
dbconnector.insert_link(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'],content_format=resp_tuple[0]['params']['format'], failed=0)
dbconnector.insert_valid_rdfuri(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'], response_format=resp_tuple[0]['params']['format'])
if isinstance(resp_tuple[1], Exception):
print("{} : {}".format(str(resp_tuple[0]['url']), str(resp_tuple[1])))
else:
print("{} : {}".format(str(resp_tuple[0]['url']), str(resp_tuple[1].status_code)))
if time.time() - emergency_timeout_start > KILL_PROCESSES_TIMEOUT:
print("FROZEN. Emergency Timeout.")
emergency_timeout = True
break
if not emergency_timeout:
[p.join() for p in worker_procs]
else:
[p.terminate() for p in worker_procs]
if not work_queue.empty():
emergency_timeout = False
continue
if time.time() - emergency_timeout_start > KILL_PROCESSES_TIMEOUT:
print("FROZEN. Emergency Timeout.")
emergency_timeout = True
[p.terminate() for p in worker_procs]
break
if not AUTO_PROCESS_OVERFLOW:
break
else:
if os.path.isfile(WORK_QUEUE_OVERFLOW_FILE):
new_urls = [(url.split()[0], int(url.split()[1]), url.split()[2]) for url in open(WORK_QUEUE_OVERFLOW_FILE, 'r')]
open(WORK_QUEUE_OVERFLOW_FILE, 'w').close()
if len(new_urls) > 0:
harvester.add_bulk_to_work_queue(work_queue, new_urls, visited)
continue
else:
break
else:
break
end = time.time()
harvester.close(dbconnector, crawlid)
print("Duration: {} seconds".format(end - begin))
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,328 | AGLDWG/ld-link-harvester | refs/heads/master | /charts/request_time_scatter.py | import pandas as pd
import matplotlib.pyplot as plt
def seed_count_time_scatter(data, title):
"""
Produces a scatter plot based on the seeds visited (per crawl) and time.
:param data: array
:param title: str
:return: None
"""
data = pd.DataFrame(data, columns=['Crawl', 'Elapsed Time (s)', 'Seeds Visited (per crawl)'])
data.plot.scatter(y='Elapsed Time (s)',
x='Seeds Visited (per crawl)')
plt.title(title)
def request_count_time_scatter(data, title):
"""
Produces a scatter plot based on the number of links visited per crawl and time.
:param data: array
:param title: str
:return: None
"""
data = pd.DataFrame(data, columns=['Crawl', 'Elapsed Time (s)', 'Total Links Visited (per crawl)'])
data.plot.scatter(y='Elapsed Time (s)',
x='Total Links Visited (per crawl)')
plt.title(title)
def seed_count_time_scatter_3d(data, title):
"""
Produces a three-dimensional scatter plot of seeds and links visited per crawl against time.
The third dimension (time) is represented by colour.
:param data: array
:param title: str
:return: None
"""
data = pd.DataFrame(data, columns=['Crawl', 'Elapsed Time (s)', 'Seeds Visited (per crawl)', 'Total Links Visited (per crawl)'])
data.plot.scatter(y='Total Links Visited (per crawl)',
x='Seeds Visited (per crawl)',
c='Elapsed Time (s)',
colormap='viridis')
plt.title(title)
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,329 | AGLDWG/ld-link-harvester | refs/heads/master | /harvester/tests/test_workers_parallel.py | import harvester
from multiprocessing import Queue, Process, Manager
#Set appropriate global variables
WORK_QUEUE_MAX_SIZE = 100
RESP_QUEUE_MAX_SIZE = 100
URL_BATCH = [("http://127.0.0.1:8080", 0, "http://127.0.0.1:8080")]
manager = Manager()
visited = manager.dict()
work_queue = manager.Queue(maxsize=WORK_QUEUE_MAX_SIZE)
work_queue = harvester.add_bulk_to_work_queue(work_queue, URL_BATCH)
resp_queue = manager.Queue(maxsize=RESP_QUEUE_MAX_SIZE)
workers = []
for i in range(4):
proc = Process(target=harvester.worker_fn, args=(1, work_queue, resp_queue, visited))
workers.append(proc)
[proc.start() for proc in workers]
[proc.join() for proc in workers]
results = []
visited_links = []
while not resp_queue.empty():
out_entry = resp_queue.get()
if out_entry in ['start', 'end']:
continue
visited_links.append(out_entry[0]['url'])
results.append(out_entry)
def test_worker():
"""
Worker should recursively search a web domain and find all links up to a specific depth and obtain the same
result working in parallel as they do when alone.
"""
assert len(set(visited_links)) == 16
def test_visited_dictionary():
"""
The workers should all collectively crawl the entire web domain to obtain the same result as an independent worker
would.
"""
expected_visited = {'http://127.0.0.1:8080': True,
'http://127.0.0.1:8080/index.html': True,
'http://127.0.0.1:8080/birds.html': True,
'http://127.0.0.1:8080/mammals.html': True,
'http://127.0.0.1:8080/fish.html': True,
'http://127.0.0.1:8080/reptiles.html': True,
'http://127.0.0.1:8080/amphibians.html': True,
'http://127.0.0.1:8080/anthropods.html': True,
'http://127.0.0.1:8080/form.html': True,
'http://127.0.0.1:8080/contact.html': True,
'https://www.australia.com/en/facts-and-planning/australias-animals.html': True,
'https://en.wikipedia.org/wiki/Fauna_of_Australia': True,
'https://www.csiro.au/en/Research/Collections/ANIC': True,
'http://127.0.0.1:8080/amphibians/growling.html': True,
'http://127.0.0.1:8080/amphibians/crucifix.html': True,
'http://127.0.0.1:8080/amphibians/red-crowned.html': True}
assert set(visited.keys()) == set(expected_visited.keys())
def test_worker_visited_once():
"""
Links should only be visited once per crawl and ignored if already visited. This applies collectively to all
workers used.
"""
assert len(set(visited_links)) == len(visited_links)
| {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,330 | AGLDWG/ld-link-harvester | refs/heads/master | /chart.py | import harvester
import matplotlib.pyplot as plt
import os
import shutil
import charts
DATABASE_FILE = "C:\\Users\\Has112\\Documents\\db_history\\28-05-2019\\ld-database.db"
DATABASE_VERIFICATION_TEMPLATE = 'database/create_database.sql'
SAVE_CHART = True
SAVE_CHART_DIRECTORY = 'C:\\Users\\Has112\\Documents\\db_history\\28-05-2019\\figures\\' # Should end with '/'
SHOW_CHART = False
TOTAL_DOMAINS = 7460919
TRANSPARENT = False
# Connect to Database
dbconnector, crawl_id = harvester.connect(DATABASE_FILE, crawl=False)
if harvester.verify_database(dbconnector, DATABASE_VERIFICATION_TEMPLATE):
print("Database schema integrity has been verified.")
else:
print("Error, database schema does not match the provided template.")
exit(1)
# Override existing output folder if it exists
if not os.path.exists(SAVE_CHART_DIRECTORY):
os.mkdir(SAVE_CHART_DIRECTORY)
else:
shutil.rmtree(SAVE_CHART_DIRECTORY)
os.mkdir(SAVE_CHART_DIRECTORY)
# Plot Seed Count-Time Scatter (with third dimension)
dbconnector.cursor.execute("""
SELECT Crawl.crawlId, endDate - startDate as elapsed, count(distinct originSeedURI), count(address)
FROM Crawl, Link
WHERE Crawl.crawlId = Link.crawlId
GROUP BY Crawl.crawlId;
""")
seed_count_time_data = dbconnector.cursor.fetchall()
charts.request_time_scatter.seed_count_time_scatter_3d(seed_count_time_data, 'Seeds and Links Visited vs Time Requirements')
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'seeds_requests_crawl_size_time.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot Request Count-Time Scatter
dbconnector.cursor.execute("""
SELECT Crawl.crawlId, endDate - startDate as elapsed, count(address)
FROM Crawl, Link
WHERE Crawl.crawlId = Link.crawlId
GROUP BY Crawl.crawlId;
""")
seed_count_time_data = dbconnector.cursor.fetchall()
charts.request_time_scatter.request_count_time_scatter(seed_count_time_data, 'Requests Made vs Time Requirements')
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'requests_crawl_size_time.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot Seed Count-Time Scatter
dbconnector.cursor.execute("""
SELECT Crawl.crawlId, endDate - startDate as elapsed, count(distinct originSeedURI)
FROM Crawl, Link
WHERE Crawl.crawlId = Link.crawlId
GROUP BY Crawl.crawlId;
""")
seed_count_time_data = dbconnector.cursor.fetchall()
charts.request_time_scatter.seed_count_time_scatter(seed_count_time_data, 'Crawl Size vs Time Requirements')
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'seeds_crawl_size_time.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot Progress Pie Chart
dbconnector.cursor.execute("""
SELECT COUNT(DISTINCT originSeedUri)
FROM LINK
""")
VISITED_DOMAINS = dbconnector.cursor.fetchone()[0]
charts.progress_chart.progress_chart_pie(VISITED_DOMAINS, TOTAL_DOMAINS, "Project Progress")
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'project_progress.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot Response Format Pie Chart
dbconnector.cursor.execute("""
SELECT contentFormat, COUNT(contentFormat)
FROM LINK
GROUP BY contentFormat;
""")
content_format_dict = dict(dbconnector.cursor.fetchall())
charts.file_format_chart.clean_formats(content_format_dict)
charts.file_format_chart.file_format_pie(content_format_dict, 'Response Format Breakdown')
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'content_format_pie.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot Response Format Bar Chart
charts.file_format_chart.file_format_bar(content_format_dict, 'Response Format Breakdown')
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'content_format_bar.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot RDF Response Format Pie Chart
dbconnector.cursor.execute("""
SELECT contentFormat, COUNT(contentFormat)
FROM RdfURI
GROUP BY contentFormat;
""")
content_format_dict = dict(dbconnector.cursor.fetchall())
charts.file_format_chart.clean_formats(content_format_dict)
charts.file_format_chart.file_format_pie(content_format_dict, 'RDF Format Breakdown')
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'rdf_content_format_pie.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot RDF Response Format Bar Chart
charts.file_format_chart.file_format_bar(content_format_dict, 'RDF Format Breakdown')
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'rdf_content_format_bar.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot Site Size Histogram
dbconnector.cursor.execute("""
SELECT originSeedURI, COUNT(DISTINCT address)
FROM Link
GROUP BY originSeedURI
HAVING COUNT(DISTINCT address) > 1;
""")
seed_size_data = dbconnector.cursor.fetchall()
charts.size_histogram.plot_size_histogram(seed_size_data, 100, "Domain Size Distribution")
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'domain_size_histogram.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close()
# Plot RDF Per Site Histogram
dbconnector.cursor.execute("""
SELECT originSeedURI, COUNT(DISTINCT rdfSeedURI)
FROM RdfURI
GROUP BY originSeedURI
HAVING COUNT(DISTINCT rdfSeedURI) > 1;
""")
seed_size_data = dbconnector.cursor.fetchall()
charts.size_histogram.plot_size_histogram(seed_size_data, 100, "RDF Domain Size Distribution")
if SHOW_CHART:
plt.show()
if SAVE_CHART:
plt.savefig(SAVE_CHART_DIRECTORY + 'rdf_domain_size_histogram.png', bbox_inches="tight", dpi=300, transparent = TRANSPARENT)
plt.close() | {"/harvester/__init__.py": ["/harvester/lddatabase.py"], "/stats.py": ["/harvester/__init__.py", "/charts/file_format_chart.py"], "/harvester/tests/test_worker.py": ["/harvester/__init__.py"], "/harvester/tests/test_database_verification.py": ["/harvester/__init__.py"], "/harvester/tests/test_response_handler.py": ["/harvester/__init__.py"], "/harvester/tests/test_link_parser.py": ["/harvester/__init__.py"], "/harvest.py": ["/harvester/__init__.py"], "/harvester/tests/test_workers_parallel.py": ["/harvester/__init__.py"], "/chart.py": ["/harvester/__init__.py", "/charts/__init__.py"]} |
78,333 | GaryGaster/Redecoration | refs/heads/main | /main/views.py | from django.shortcuts import render
from django.core.mail import send_mail
def send_order_enquiry(request):
if request.method == 'POST':
if request.POST.get('email', False):
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
number = request.POST['number']
message = request.POST['message']
message += '\n\n' + name + '\n' + email + '\n' + number
subject += ' /**\ ' + name + ' /**\ ' + email + '/**\ ' + number
send_mail(
subject,
message,
'bartosz.marcickiewicz@gmail.com',
['bartosz.marcickiewicz@gmail.com'],
fail_silently=False
)
return render(request, 'main/contact.html')
def home(request):
send_order_enquiry(request)
return render(request, 'main/home.html')
def authors(request):
return render(request, 'main/authors.html')
| {"/main/tests/test_urls.py": ["/main/views.py"], "/main/urls.py": ["/main/views.py"]} |
78,334 | GaryGaster/Redecoration | refs/heads/main | /main/tests/test_urls.py | from django.test import SimpleTestCase
from django.urls import reverse, resolve
from main.views import home, send_order_enquiry, authors
class TestUrls(SimpleTestCase):
def test_home_url_is_resolved(self):
url = reverse('home')
print(resolve(url))
self.assertEqual(resolve(url).func, home)
def test_send_order_enquiry_url_is_resolved(self):
url = reverse('contact')
print(resolve(url))
self.assertEqual(resolve(url).func, send_order_enquiry)
def test_authors_is_resolved(self):
url = reverse('authors')
print(resolve(url))
self.assertEqual(resolve(url).func, authors)
| {"/main/tests/test_urls.py": ["/main/views.py"], "/main/urls.py": ["/main/views.py"]} |
78,335 | GaryGaster/Redecoration | refs/heads/main | /main/tests/test_views.py | from django.core import mail
from django.test import TestCase, Client
from django.urls import reverse
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.home_url = reverse('home')
self.authors_url = reverse('authors')
def test_home_GET(self):
response = self.client.get(self.home_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'main/home.html')
def test_authors_GET(self):
response = self.client.get(self.authors_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'main/authors.html')
def test_send_email(self):
mail.send_mail('Subject here', 'Here is the message.',
'from@example.com', ['to@example.com'],
fail_silently=False)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Subject here')
| {"/main/tests/test_urls.py": ["/main/views.py"], "/main/urls.py": ["/main/views.py"]} |
78,336 | GaryGaster/Redecoration | refs/heads/main | /main/urls.py | from django.urls import path
from .views import home, send_order_enquiry, authors
urlpatterns = [
path('', home, name='home'),
path('contact/', send_order_enquiry, name='contact'),
path('authors/', authors, name='authors'),
]
| {"/main/tests/test_urls.py": ["/main/views.py"], "/main/urls.py": ["/main/views.py"]} |
78,350 | Modorn/radar_image_propagation | refs/heads/master | /config.py | import os
#iterator
DATA_BASE_PATH = os.path.join("/extend", "sz17_data")
REF_PATH = os.path.join(DATA_BASE_PATH, "radarPNG_3")
BASE_PATH = os.path.join("/extend", "gru_tf_data")
SAVE_PATH = os.path.join(BASE_PATH, "0321_1417test")
SAVE_MODEL = os.path.join(SAVE_PATH, "Save")
SAVE_VALID = os.path.join(SAVE_PATH, "Valid")
SAVE_TEST = os.path.join(SAVE_PATH, "Test")
SAVE_SUMMARY = os.path.join(SAVE_PATH, "Summary")
if not os.path.exists(SAVE_MODEL):
os.makedirs(SAVE_MODEL)
if not os.path.exists(SAVE_VALID):
os.makedirs(SAVE_VALID)
RAINY_TRAIN = ['201501010000', '201801010000']
RAINY_VALID = ['201801010000', '201809180000']
RAINY_TEST = ['201805110000', '201806080000']
#train
MAX_ITER = 100001
SAVE_ITER = 5000
VALID_ITER = 5000
# project
DTYPE = "single"
NORMALIZE = False
H = 900
W = 900
BATCH_SIZE = 2
IN_CHANEL = 1
# Encoder Forecaster
IN_SEQ = 5
OUT_SEQ = 10
LR = 0.0001
RESIDUAL = False
SEQUENCE_MODE = False
FIRST_CONV = (8, 7, 5, 1) # pad
LAST_DECONV = (8, 7, 5, 1)
DOWNSAMPLE = [(5, 3), # kernel stride
(3, 2)]
UPSAMPLE = [(5, 3),
(4, 2)]
FEATMAP_SIZE = [180, 60, 30]
I2H_KERNEL = [3, 3, 3]
H2H_KERNEL = [5, 5, 3]
NUM_FILTER = [32, 96, 96]
# EVALUATION
ZR_a = 58.53
ZR_b = 1.56
USE_BALANCED_LOSS = False
THRESHOLDS = [0.5, 2, 5, 10, 30]
BALANCING_WEIGHTS = [1, 1, 2, 5, 10, 30]
TEMPORAL_WEIGHT_TYPE = "same"
TEMPORAL_WEIGHT_UPPER = 5
L1_LAMBDA = 0
L2_LAMBDA = 1.0
GDL_LAMBDA = 0
PREDICT_LENGTH = 20 | {"/runner.py": ["/config.py"]} |
78,351 | Modorn/radar_image_propagation | refs/heads/master | /runner.py | import logging
import os
import numpy as np
from model import Model
from iterator import Iterator
import config as c
from utils import config_log, save_png
from utils import normalize_frames, denormalize_frames
class Runner(object):
def __init__(self, para_tuple=None):
self.para_tuple = para_tuple
self.model = Model(para_tuple)
if not para_tuple:
self.model.init_params()
def train(self):
iter = 0
train_iter = Iterator(time_interval=c.RAINY_TRAIN,
sample_mode="random",
seq_len=c.IN_SEQ + c.OUT_SEQ)
while iter < c.MAX_ITER:
data, *_ = train_iter.sample(batch_size=c.BATCH_SIZE)
in_data = data[:, :c.IN_SEQ, ...]
if c.IN_CHANEL == 3:
gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, :, :, 1:-1]
elif c.IN_CHANEL == 1:
gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]
else:
raise NotImplementedError
if c.NORMALIZE:
in_data = normalize_frames(in_data)
gt_data = normalize_frames(gt_data)
mse, mae, gdl = self.model.train_step(in_data, gt_data)
logging.info(f"Iter {iter}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")
if (iter + 1) % c.SAVE_ITER == 0:
self.model.save_model(iter)
if (iter + 1) % c.VALID_ITER == 0:
self.run_benchmark(iter)
iter += 1
def run_benchmark(self, iter, mode="Valid"):
if mode == "Valid":
time_interval = c.RAINY_VALID
else:
time_interval = c.RAINY_TEST
test_iter = Iterator(time_interval=time_interval,
sample_mode="sequent",
seq_len=c.IN_SEQ + c.OUT_SEQ,
stride=20)
i = 1
while not test_iter.use_up:
data, date_clip, *_ = test_iter.sample(batch_size=c.BATCH_SIZE)
in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H, c.W, c.IN_CHANEL))
gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H, c.W, 1))
if type(data) == type([]):
break
in_data[...] = data[:, :c.IN_SEQ, ...]
if c.IN_CHANEL == 3:
gt_data[...] = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, :, :, 1:-1]
elif c.IN_CHANEL == 1:
gt_data[...] = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]
else:
raise NotImplementedError
# in_date = date_clip[0][:c.IN_SEQ]
if c.NORMALIZE:
in_data = normalize_frames(in_data)
gt_data = normalize_frames(gt_data)
mse, mae, gdl, pred = self.model.valid_step(in_data, gt_data)
logging.info(f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")
i += 1
if c.IN_CHANEL == 3:
in_data = in_data[:, :, :, :, 1:-1]
for b in range(c.BATCH_SIZE):
predict_date = date_clip[b][c.IN_SEQ]
logging.info(f"Save {predict_date} results")
if mode == "Valid":
save_path = os.path.join(c.SAVE_VALID, str(iter), predict_date.strftime("%Y%m%d%H%M"))
else:
save_path = os.path.join(c.SAVE_TEST, str(iter), predict_date.strftime("%Y%m%d%H%M"))
path = os.path.join(save_path, "in")
save_png(in_data[b], path)
path = os.path.join(save_path, "pred")
save_png(pred[b], path)
path = os.path.join(save_path, "out")
save_png(gt_data[b], path)
def test(self):
iter = self.para_tuple[-1] + "_test"
self.run_benchmark(iter, mode="Test")
if __name__ == '__main__':
config_log()
# paras = ("first_try", "94999")
paras = None
runner = Runner(paras)
runner.train()
| {"/runner.py": ["/config.py"]} |
78,357 | kanhebei/flask-saved | refs/heads/master | /flask_saved/providers/local.py | from . import BaseStorage
import os
from .._compat import urljoin
from flask import current_app
from werkzeug.exceptions import BadRequest
from requests.models import Response
import uuid
class LocalStorage(BaseStorage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base_url = current_app.config.get('STORAGE_LOCAL_BASE_URL')
self.base_path = current_app.config.get('STORAGE_LOCAL_BASE_PATH')
def _generate_url(self, path):
return urljoin(self.base_url or 'http', path)
def save(self, storage, filename=None):
filename = filename if filename else uuid.uuid4().hex
### 获取保存的目标地址
full_path = os.path.join(self.base_path, filename).replace('\\','/')
# 如果保存的目标路径已经存在则返回异常
if os.path.exists(full_path):
raise UploadFileExists('File Already Exists')
# 获取目标地址的路径
folder = os.path.dirname(full_path)
if not os.path.exists(folder):
# 路径不存在的时候新建路径
os.makedirs(folder)
if isinstance(storage, str):
with open(full_path, 'wb') as _f:
_f.write(storage)
if isinstance(storage, Response):
with open(full_path, 'wb') as _f:
_f.write(storage.content)
elif hasattr(storage, 'read'):
storage.save(full_path)
else:
raise BadRequest()
return self.Result(
url=self._generate_url(full_path),
flag=full_path
)
def delete(self, flag):
if os.path.exists(flag):
os.remove(flag)
# raise FileExistsError('Not Find %s' %dest) | {"/flask_saved/providers/local.py": ["/flask_saved/providers/__init__.py"], "/wsgi.py": ["/flask_saved/__init__.py"], "/flask_saved/providers/oss.py": ["/flask_saved/providers/__init__.py"]} |
78,358 | kanhebei/flask-saved | refs/heads/master | /flask_saved/providers/__init__.py | from collections import namedtuple
class BaseStorage:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.Result = namedtuple('Result', 'url flag')
def generate_url(self, filename):
raise NotImplementedError
def read(self, filename):
raise NotImplementedError
def save(self, f, filename):
raise NotImplementedError
def delete(self, filename):
raise NotImplementedError | {"/flask_saved/providers/local.py": ["/flask_saved/providers/__init__.py"], "/wsgi.py": ["/flask_saved/__init__.py"], "/flask_saved/providers/oss.py": ["/flask_saved/providers/__init__.py"]} |
78,359 | kanhebei/flask-saved | refs/heads/master | /flask_saved/__init__.py | from werkzeug.utils import import_string
from flask import current_app
_DRIVES = {
'local':'flask_saved.providers.local.LocalStorage',
'oss': 'flask_saved.providers.oss.OssStorage'
}
class Storage:
def __init__(self, app=None):
self.default_provider = None
if app is not None:
self.init_app(app)
@staticmethod
def provider(self, name=None):
_provider = name if name is not None else current_app.config['STORAGE_PROVIDER_DEFAULT']
if _provider not in _DRIVES:
raise RuntimeError('Storage Provider error')
_provider_object = import_string(_DRIVES[_provider])
return _provider_object()
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
current_provider = current_app.config['STORAGE_PROVIDER_DEFAULT']
if current_provider not in _DRIVES:
raise RuntimeError('Storage Provider error')
_provider_object = import_string(_DRIVES[current_provider])
return getattr(_provider_object(), key)
def init_app(self, app):
# STORAGE 默认使用的
default_provider = app.config.setdefault('STORAGE_PROVIDER_DEFAULT', 'local')
if default_provider not in _DRIVES:
raise RuntimeError('STORAGE_PROVIDER_DEFAULT set error')
# LOCAL 提供器配置项
app.config.setdefault('STORAGE_LOCAL_BASE_PATH', 'upload')
app.config.setdefault('STORAGE_LOCAL_BASE_URL', None)
# OSS 提供器配置
oss_key = app.config.setdefault('STORAGE_OSS_ACCESS_KEY', None)
oss_secret = app.config.setdefault('STORAGE_OSS_SECRET_KEY', None)
oss_endpoint = app.config.setdefault('STORAGE_OSS_ENDPOINT', None)
oss_bucket = app.config.setdefault('STORAGE_OSS_BUCKET', None)
app.config.setdefault('STORAGE_OSS_CNAME', None)
app.config.setdefault('STORAGE_OSS_DOMIAN', None)
app.config.setdefault('STORAGE_OSS_BASE_PATH', None)
# 使用oss提供器 必须设置的配置项
if default_provider == 'oss':
if oss_key is None:
raise RuntimeError('STORAGE_OSS_ACCESS_KEY must be set')
if oss_secret is None:
raise RuntimeError('STORAGE_OSS_SECRET_KEY must be set')
if oss_endpoint is None:
raise RuntimeError('STORAGE_OSS_ENDPOINT must be set')
if oss_bucket is None:
raise RuntimeError('STORAGE_OSS_BUCKET must be set')
self.default_provider = default_provider
app.extensions['storage'] = self | {"/flask_saved/providers/local.py": ["/flask_saved/providers/__init__.py"], "/wsgi.py": ["/flask_saved/__init__.py"], "/flask_saved/providers/oss.py": ["/flask_saved/providers/__init__.py"]} |
78,360 | kanhebei/flask-saved | refs/heads/master | /wsgi.py | from flask import Flask, render_template, request, redirect, url_for,send_from_directory, jsonify
from flask_saved import Storage
from flask_debugtoolbar import DebugToolbarExtension
from flask_sqlalchemy import SQLAlchemy
import pprint
import uuid
import os
import urllib
import requests
from requests.models import Response
app = Flask(__name__)
app.config['SECRET_KEY'] = 'jajajjajjja'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:root@localhost:3306/sogou'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# app.config['STORAGE_LOCAL_BASE_PATH'] = '../upload/pic'
# app.config['STORAGE_PROVIDER_DEFAULT'] = 'oss'
# app.config['STORAGE_OSS_ACCESS_KEY'] = ''
# app.config['STORAGE_OSS_SECRET_KEY'] = ''
# app.config['STORAGE_OSS_ENDPOINT'] = ''
# app.config['STORAGE_OSS_BUCKET'] = ''
# app.config['STORAGE_OSS_BASE_PATH'] = ''
# app.config['STORAGE_OSS_DOMIAN'] = ''
# app.config['STORAGE_OSS_CNAME'] = True
tool_bar = DebugToolbarExtension(app)
app.debug = True
storage = Storage(app)
db = SQLAlchemy(app)
class Picture(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
status = db.Column(db.Boolean, index=True, nullable=False, default=True, comment='状态')
url = db.Column(db.String(255), comment='图片链接', index=True, nullable=False)
name = db.Column(db.String(255))
path = db.Column(db.String(255))
@property
def dest(self):
return os.path.join(self.path, self.name)
@app.before_first_request
def bfr():
db.create_all()
@app.route('/', methods=['get', 'post'])
def index():
print(request.headers['Host'])
if request.method == 'POST':
upload = request.files.get('upload')
src = request.form.get('src')
if upload:
f = request.files['upload']
elif src:
f = requests.get(src)
if f.status_code != 200:
return '', f.status_code
# content_type = f.headers['Content-Type']
# index = content_type.index('/')
# if content_type[index+1:] not in storage.extensions:
# return '', 401
else:
return '', 400
# print('========', f.__dict__)
print(f.headers)
result = storage.save(f)
print(result)
pic = Picture()
pic.url = result.url
pic.path = result.flag
db.session.add(pic)
db.session.commit()
return render_template('index.html')
@app.route('/lists')
def show():
res = Picture.query.all()
return render_template('lists.html', result = res)
@app.route('/delete')
def delete():
pid = request.args.get('picture')
img = Picture.query.get(pid)
storage.delete(img.name)
db.session.delete(img)
db.session.commit()
return redirect(url_for('.show'))
@app.route('/img/<path:filename>')
def img(filename):
return storage.read(filename)
# return send_from_directory(storage.base_path, filename) | {"/flask_saved/providers/local.py": ["/flask_saved/providers/__init__.py"], "/wsgi.py": ["/flask_saved/__init__.py"], "/flask_saved/providers/oss.py": ["/flask_saved/providers/__init__.py"]} |
78,361 | kanhebei/flask-saved | refs/heads/master | /flask_saved/exception.py | class UploadNotAllowed(Exception):
"""This exception is raised if the upload was not allowed."""
class UploadFileExists(Exception):
"""This exception is raised when the uploaded file exits."""
class UploadNotSizeMax(Exception):
"""This exception is raised when the uploaded file not size.""" | {"/flask_saved/providers/local.py": ["/flask_saved/providers/__init__.py"], "/wsgi.py": ["/flask_saved/__init__.py"], "/flask_saved/providers/oss.py": ["/flask_saved/providers/__init__.py"]} |
78,362 | kanhebei/flask-saved | refs/heads/master | /flask_saved/providers/oss.py | from . import BaseStorage
from .._compat import urljoin
from flask import current_app
from werkzeug.utils import cached_property
import oss2
import os
import uuid
class OssStorage(BaseStorage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._access_key = current_app.config.get('STORAGE_OSS_ACCESS_KEY')
self._secret_key = current_app.config.get('STORAGE_OSS_SECRET_KEY')
self._endpoint = current_app.config.get('STORAGE_OSS_ENDPOINT')
self._bucket = current_app.config.get('STORAGE_OSS_BUCKET')
self._cname = current_app.config.get('STORAGE_OSS_CNAME')
self._domain = current_app.config.get('STORAGE_OSS_DOMIAN')
self.base_path = current_app.config.get('STORAGE_OSS_BASE_PATH')
@cached_property
def auth(self):
return oss2.Auth(self._access_key, self._secret_key)
@cached_property
def bucket(self):
return oss2.Bucket(self.auth, self._endpoint, self._bucket)
@cached_property
def host(self):
return '{schema}://{bucket}.{endpoint}'.format(
schema='https',
bucket=self._bucket,
endpoint=self._endpoint
)
def save(self, storage, filename=None):
filename = filename if filename else uuid.uuid4().hex
full_path = os.path.join(self.base_path, filename).replace('\\','/')
headers = None
content_type = storage.headers.get('Content-Type')
if content_type:
headers = {'Content-Type': content_type}
result = self.bucket.put_object(full_path, storage, headers=headers)
if result.status == 200:
return self.Result(
url=self._generate_url(full_path),
flag=full_path
)
else:
return False
def delete(self, flag):
if self.bucket.object_exists(flag):
self.bucket.delete_object(flag)
return True
def _generate_url(self, path):
if self._domain:
return urljoin(self._domain, path)
else:
return urljoin(self.host, path)
| {"/flask_saved/providers/local.py": ["/flask_saved/providers/__init__.py"], "/wsgi.py": ["/flask_saved/__init__.py"], "/flask_saved/providers/oss.py": ["/flask_saved/providers/__init__.py"]} |
78,363 | vijaypatha/Quizz_Flow | refs/heads/master | /question_model.py | class Question:
"""
Creating a class that will allow us to construct an object with two attributes: text and answer
An another object will take in object of question from this object and fulfill User Journey / Experience
"""
def __init__(self, q_text, q_answer):
self.text = q_text
self.answer = q_answer
| {"/main.py": ["/question_model.py"]} |
78,364 | vijaypatha/Quizz_Flow | refs/heads/master | /main.py | from data import question_data
from question_model import Question
from quiz_brain import QuizBrain
question_bank = []
for question in question_data:
question_text = question["text"]
question_answer = question["answer"]
new_question_answer = Question(question_text, question_answer)
question_bank.append(new_question_answer)
quiz = QuizBrain(question_bank)
while quiz.still_has_question():
quiz.next_question()
print(f"Your final score is: {quiz.score}/{len(question_bank)}")
| {"/main.py": ["/question_model.py"]} |
78,371 | abgibbs/edenAP_detrend | refs/heads/myedits | /get_photometry_eden.py | import argparse
from astropy import units as u
import astropy.config as astropy_config
from astropy.io import fits
from astropy.time import Time
from configparser import ConfigParser
from datetime import datetime
import glob
import numpy as np
import os
import pickle
import shutil
from tqdm import tqdm as bar
import time
from constants import get_telescopes, find_val, LOOKDATE, log
import PhotUtils
from eden_calibrate import eden_calibrate
def get_photometry(telescope,datafolder,minap=5,maxap=50,apstep=1,get_astrometry=True,ref_centers=True, calibrate=True, use_calibrated=False):
# define constants from config.ini
config = ConfigParser()
config.read('config.ini')
server_destination = config['FOLDER OPTIONS']['server_destination']
astrometry_timeout = float(config['ASTROMETRY OPTIONS']['timeout'])
#Get all of the (good) files in datafolder and determine the filter
files,filters=[],[]
target = None
for path in np.sort(glob.glob(datafolder+'/*')):
# Check that this is a FITS file
if path.strip('/').split('.')[-1] not in ['fits','fts','fit','fits.gz','fts.gz','fit.gz']:
continue
# Try to open the header
try:
h = fits.getheader(path)
except:
continue
# Get the filter name
filt = find_val(h,'FILTER',typ=str)
# First image only
if target is None:
# Get the target, filter, date from the header
target = h['OBJECT']
date = LOOKDATE(h)
# Get RA, Dec either through the header (preferred) or by target lookup
try:
RA = find_val(h,'RA')
Dec = find_val(h,'DEC')
# Ensure that RA,Dec are floats or sexagesimal
try:
float(RA)
float(Dec)
except ValueError:
if ':' not in RA or ':' not in Dec:
RA,Dec = None, None
except:
RA,Dec = None,None
if RA is None or Dec is None:
RA, Dec = PhotUtils.get_general_coords(target,date)
if RA == 'NoneFound':
print("\t Unable to find coordinates!")
return
# Convert RA and DECs of object to decimal degrees:
RA_d,Dec_d = PhotUtils.CoordsToDecimal([[RA,Dec]])
# Add these values to the lists
files.append(path)
filters.append(filt)
files,filters = np.array(files),np.array(filters)
for filt in np.unique(filters):
print("\t Found {:d} images with filter: {:s}".format((filters==filt).sum(),filt))
# If it doesn't already exist, create the output directory for this data set
outdir = datafolder.replace('/RAW/','/REDUCED/').replace('/CALIBRATED/','/REDUCED/')
if not os.path.isdir(outdir):
os.makedirs(outdir)
# Find photometry.pkl if it already exists (and isn't empty)
if use_calibrated or calibrate:
pkl_name = '/calib_photometry.pkl'
else:
pkl_name = '/photometry.pkl'
if os.path.exists(outdir+pkl_name):
master_dict = pickle.load(open(outdir+pkl_name,'rb'))
if master_dict == {}:
master_dict = None
else:
print("\t Found photometry.pkl")
# If all of the images have been reduced then we can skip this one
frame_name = [filename.replace(server_destination,'') for filename in files]
if 'frame_name' in master_dict.keys() and np.in1d(frame_name,master_dict['frame_name']).all():
print("\t Photometry complete! Skipping...")
return
else:
master_dict = None
# Look for calibrations and if they don't exist, create them
if calibrate and not use_calibrated:
eden_calibrate(telescope, datafolder, files)
datafolder = datafolder.replace('RAW', 'CALIBRATED')
files = glob.glob(datafolder+'/*')
# Aperture sizes
R = np.arange(minap,maxap + 1,apstep)
# Get master dictionary for photometry, saving progress every 10 files:
n_chunks = np.max([1,int(len(files)/10)])
chunked_files = np.array_split(files,n_chunks)
for i in range(n_chunks):
# Perform the photometry for this chunk
master_dict = PhotUtils.getPhotometry(chunked_files[i],target,telescope,filters,R,RA_d,Dec_d,outdir,None,
get_astrometry=get_astrometry, refine_cen=ref_centers,
astrometry_timeout=astrometry_timeout,master_dict=master_dict)
# Save dictionary:
print('\t Saving photometry at ' + outdir + '...')
OUT_FILE = open(outdir+pkl_name, 'wb')
pickle.dump(master_dict, OUT_FILE)
OUT_FILE.close()
if __name__=="__main__":
# Get user input:
parser = argparse.ArgumentParser()
# Name of telescope
parser.add_argument('-telescope', default=None)
# Directory containing the raw OR calibrated image files
parser.add_argument('-datafolder', default=None)
# Defines the range of aperture radii to use (px)
parser.add_argument('-minap', default=5)
parser.add_argument('-maxap', default=50)
parser.add_argument('-apstep', default=1)
# Run astrometry on the images?
parser.add_argument('--get_astrometry', dest='get_astrometry', action='store_true')
parser.set_defaults(get_astrometry=True)
# Refine the centroids of each target?
parser.add_argument('--ref_centers', dest='ref_centers', action='store_true')
parser.set_defaults(ref_centers=True)
args = parser.parse_args()
# Run the photometry routine
get_photometry(args.telescope,args.datafolder,args.minap,args.maxap,args.apstep,args.get_astrometry,args.ref_centers)
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,372 | abgibbs/edenAP_detrend | refs/heads/myedits | /GPDetrend.py |
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import exotoolbox
import batman
import seaborn as sns
import argparse
import matplotlib
import matplotlib.pyplot as plt
import pymultinest
from scipy import interpolate
import numpy as np
import utils
import os
parser = argparse.ArgumentParser()
# This reads the lightcurve file. First column is time, second column is flux:
parser.add_argument('-lcfile', default=None)
# This reads the external parameters to fit (assumed to go in the columns):
parser.add_argument('-eparamfile', default=None)
# This defines which of the external parameters you want to use, separated by commas.
# Default is all:
parser.add_argument('-eparamtouse', default='all')
# This reads the external parameters to fit (assumed to go in the columns):
parser.add_argument('-compfile', default=None)
# This defines which comparison stars, if any, you want to use, separated by commas.
# Default is all:
parser.add_argument('-comptouse', default='all')
# This reads an output folder:
parser.add_argument('-ofolder', default='')
# Number of live points:
parser.add_argument('-nlive', default=1000)
args = parser.parse_args()
# Extract lightcurve and external parameters. When importing external parameters,
# standarize them and save them on the matrix X:
# ------------------------------------------------------------------------------
lcfilename = args.lcfile
# save telescope, target, and date to write in plot titles
name = lcfilename.strip('/').split('/')
name = name[-6]+'_'+name[-5]+'_'+name[-4]
tall,fall,f_index = np.genfromtxt(lcfilename,unpack=True,usecols=(0,1,2))
idx = np.where(f_index == 0)[0]
t,f = tall[idx],fall[idx]
out_folder = args.ofolder
eparamfilename = args.eparamfile
eparams = args.eparamtouse
# just load twice because can't seem to get names if unpack=True
data = np.genfromtxt(eparamfilename,dtype=None, names=True, skip_header=0)
anames = np.asarray(data.dtype.names) # store the external parameter(alpha) names
data = np.genfromtxt(eparamfilename,unpack=True,dtype=None)
ebad = [] # will store indices of external parameters that won't work
for i in range(len(data)):
if np.var(data[i]) == 0 or None: # find external parameters which will not work
ebad.append(i)
print('\nExternal parameter -'+anames[i]+'- has zero variance, will remove automatically \n')
x = (data[i] - np.mean(data[i]))/np.sqrt(np.var(data[i]))
if i == 0:
X = x
else:
X = np.vstack((X,x))
# remove bad eparams while keeping those selected by user
if eparams != 'all':
idx_params = np.array(eparams.split(',')).astype('int')
idx_params = np.delete(idx_params,np.where(np.isin(idx_params,ebad)==True))
X = X[idx_params,:]
anames = anames[idx_params]
else:
X = np.delete(X, ebad, axis=0)
anames = np.delete(anames, ebad)
compfilename = args.compfile
if compfilename is not None:
comps = args.comptouse
data = np.genfromtxt(compfilename,unpack=True)
if len(np.shape(data))==1:
data = np.array([data,data])
for i in range(len(data)):
x = (data[i] - np.mean(data[i]))/np.sqrt(np.var(data[i]))
if i == 0:
Xc = x
else:
Xc = np.vstack((Xc,x))
if comps != 'all':
idx_params = np.array(comps.split(',')).astype('int')
Xc = Xc[idx_params,:]
print(lcfilename, eparamfilename, compfilename)
# ------------------------------------------------------------------
# Other inputs:
n_live_points = int(args.nlive)
# Cook the george kernel:
import george
kernel = np.var(f)*george.kernels.ExpSquaredKernel(np.ones(X[:,idx].shape[0]),ndim=X[:,idx].shape[0],axes=range(X[:,idx].shape[0]))
# Cook jitter term
jitter = george.modeling.ConstantModel(np.log((200.*1e-6)**2.))
# Wrap GP object to compute likelihood
gp = george.GP(kernel, mean=0.0,fit_mean=False,white_noise=jitter,fit_white_noise=True)
gp.compute(X[:,idx].T)
# Now define MultiNest priors and log-likelihood:
def prior(cube, ndim, nparams):
# Prior on "median flux" is uniform:
cube[0] = utils.transform_uniform(cube[0],-2.,2.)
# Pior on the log-jitter term (note this is the log VARIANCE, not sigma); from 0.01 to 100 ppm:
cube[1] = utils.transform_uniform(cube[1],np.log((0.01e-3)**2),np.log((100e-3)**2))
pcounter = 2
# Prior on coefficients of comparison stars:
if compfilename is not None:
for i in range(Xc.shape[0]):
cube[pcounter] = utils.transform_uniform(cube[pcounter],-10,10)
pcounter += 1
# Prior on kernel maximum variance; from 0.01 to 100 mmag:
cube[pcounter] = utils.transform_loguniform(cube[pcounter],(0.01*1e-3)**2,(100*1e-3)**2)
pcounter = pcounter + 1
# Now priors on the alphas = 1/lambdas; gamma(1,1) = exponential, same as Gibson+:
for i in range(X.shape[0]):
cube[pcounter] = utils.transform_exponential(cube[pcounter])
pcounter += 1
def loglike(cube, ndim, nparams):
# Evaluate the log-likelihood. For this, first extract all inputs:
mflux,ljitter = cube[0],cube[1]
pcounter = 2
model = mflux
if compfilename is not None:
for i in range(Xc.shape[0]):
model = model + cube[pcounter]*Xc[i,idx]
pcounter += 1
max_var = cube[pcounter]
pcounter = pcounter + 1
alphas = np.zeros(X.shape[0])
for i in range(X.shape[0]):
alphas[i] = cube[pcounter]
pcounter = pcounter + 1
gp_vector = np.append(np.append(ljitter,np.log(max_var)),np.log(1./alphas))
# Evaluate model:
residuals = f - model
gp.set_parameter_vector(gp_vector)
return gp.log_likelihood(residuals)
n_params = 3 + X.shape[0]
if compfilename is not None:
n_params += Xc.shape[0]
out_file = out_folder+'out_multinest_trend_george_'
import pickle
# If not ran already, run MultiNest, save posterior samples and evidences to pickle file:
if not os.path.exists(out_folder+'posteriors_trend_george.pkl'):
# Run MultiNest:
pymultinest.run(loglike, prior, n_params, n_live_points = n_live_points,outputfiles_basename=out_file, resume = False, verbose = True)
# Get output:
output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params = n_params)
# Get out parameters: this matrix has (samples,n_params+1):
mc_samples = output.get_equal_weighted_posterior()[:,:-1]
a_lnZ = output.get_stats()['global evidence']
out = {}
out['posterior_samples'] = {}
out['lnZ'] = a_lnZ
out['posterior_samples']['unnamed'] = mc_samples # for easy read of output
out['posterior_samples']['mmean'] = mc_samples[:,0]
out['posterior_samples']['ljitter'] = mc_samples[:,1]
pcounter = 2
xc_coeffs = []
if compfilename is not None:
for i in range(Xc.shape[0]):
xc_coeffs.append(mc_samples[:,pcounter])
out['posterior_samples']['xc'+str(i)] = mc_samples[:,pcounter]
pcounter += 1
out['posterior_samples']['max_var'] = mc_samples[:,pcounter]
pcounter = pcounter + 1
alphas = []
for i in range(X.shape[0]):
alphas.append(mc_samples[:,pcounter])
out['posterior_samples']['alpha'+str(i)] = mc_samples[:,pcounter]
pcounter = pcounter + 1
pickle.dump(out,open(out_folder+'posteriors_trend_george.pkl','wb'))
else:
mc_samples = pickle.load(open(out_folder+'posteriors_trend_george.pkl','rb'))['posterior_samples']['unnamed']
out = pickle.load(open(out_folder+'posteriors_trend_george.pkl','rb'))
# Extract posterior parameter vector:
cube = np.median(mc_samples,axis=0)
cube_var = np.var(mc_samples,axis=0)
mflux,ljitter = cube[0],cube[1]
pcounter = 2
model = mflux
if compfilename is not None:
for i in range(Xc.shape[0]):
model = model + cube[pcounter]*Xc[i,idx]
pcounter += 1
max_var = cube[pcounter]
pcounter = pcounter + 1
alphas = np.zeros(X.shape[0])
for i in range(X.shape[0]):
alphas[i] = cube[pcounter]
pcounter = pcounter + 1
gp_vector = np.append(np.append(ljitter,np.log(max_var)),np.log(1./alphas))
# Evaluate model:
residuals = f - model
gp.set_parameter_vector(gp_vector)
# Get prediction from GP:
pred_mean, pred_var = gp.predict(residuals, X.T, return_var=True)
pred_std = np.sqrt(pred_var)
model = mflux
pcounter = 2
if compfilename is not None:
for i in range(Xc.shape[0]):
model = model + cube[pcounter]*Xc[i,:]
pcounter += 1
print('\nPLOTTING...... \n')
# PLOT 1 - The raw light curve with GP model
fout,fout_err = exotoolbox.utils.mag_to_flux(fall-model,np.ones(len(tall))*np.sqrt(np.exp(ljitter)))
plt.errorbar(tall - int(tall[0]),fout,yerr=fout_err,fmt='.')
pred_mean_f,fout_err = exotoolbox.utils.mag_to_flux(pred_mean,np.ones(len(tall))*np.sqrt(np.exp(ljitter)))
plt.plot(tall - int(tall[0]),pred_mean_f)
plt.xlabel('Time (BJD - '+str(int(tall[0]))+')')
plt.ylabel('Relative flux')
plt.title(name+'\nRaw LC w/ GP Model')
plt.figtext(0.5, 0.15, 'Evidence: '+str(round(out['lnZ'],4)), horizontalalignment='center')
plt.savefig('raw_lc.png')
#plt.show()
plt.gcf().clear()
print('Raw light curve saved!')
# PLOT 2 - Residuals to the GP model
fall = fall - model - pred_mean
#plt.errorbar(tall,fall,yerr=np.ones(len(tall))*np.sqrt(np.exp(ljitter)),fmt='.')
plt.errorbar(tall - int(tall[0]),fall,yerr=np.ones(len(tall))*np.sqrt(np.exp(ljitter)),fmt='.')
plt.title(name+'\nResiduals')
plt.savefig('residuals.png')
plt.gcf().clear()
print('Residuals saved!')
# PLOT 3 - Detrended light curve
fout,fout_err = exotoolbox.utils.mag_to_flux(fall,np.ones(len(tall))*np.sqrt(np.exp(ljitter)))
fileout = open('detrended_lc.dat','w')
for i in range(len(tall)):
fileout.write('{0:.10f} {1:.10f} {2:.10f}\n'.format(tall[i],fout[i],fout_err[i]))
fileout.close()
mdev = np.std(fall) * 1000 # magnitude
fdev = np.std(fout) * 100 #percent
plt.errorbar(tall - int(tall[0]),fout,yerr=fout_err,fmt='.')
plt.xlabel('Time (BJD - '+str(int(tall[0]))+')')
plt.ylabel('Relative flux')
plt.title(name+'\nGP Detrended LC')
plt.figtext(0.5, 0.15, '$\sigma_m$ = '+str(round(fdev,3))+'% = '+str(round(mdev,3))+' mmag', horizontalalignment='center')
plt.savefig('detrended_lc.png')
plt.gcf().clear()
print('Detrended light curve saved!')
# PLOT 4 - Alpha Posteriors
alist = [] # stores full alphas
amed = [] # stores median of alphas
acounter = 0 # find amount of alphas for plotting
for key in out['posterior_samples']:
if 'alpha' in key:
acounter += 1
alist.append(out['posterior_samples'][key])
amed.append(str(round(np.median(out['posterior_samples'][key]),4)))
f, axarr = plt.subplots(acounter, sharex='col')
text = 'Median Posteriors: \n' # to be put at bottom of figure
for i in range(len(amed)):
axarr[i].hist(alist[i], bins=500, color='black')
axarr[i].set_title(anames[i], fontsize=10)
text = text + anames[i] + '- ' + amed[i] +' '
#plt.xlim(0, 4.0)
f.suptitle('Alpha Posteriors: \n'+name, fontsize='large')
plt.subplots_adjust(hspace=0.75,top=0.85)
plt.figtext(0.1, 0.01,text, fontsize=8)
plt.savefig('alpha_posteriors.png')
print('Alpha posteriors saved!')
# PLOT 5 - Comparison Posteriors
clist = []
cmed = []
ccounter = 0
for key in out['posterior_samples']:
if 'xc' in key:
ccounter += 1
clist.append(out['posterior_samples'][key])
cmed.append(str(round(np.median(out['posterior_samples'][key]),4)))
f, axarr = plt.subplots(acounter, sharex='col')
text = 'Median Posteriors: \n'
for i in range(len(cmed)):
axarr[i].hist(clist[i], bins=500, color='black')
axarr[i].set_title('xc'+str(i), fontsize=10)
text = text + 'xc'+str(i) + '- ' + cmed[i] +' '
f.suptitle('Comparison Posteriors: \n'+name, fontsize='large')
plt.subplots_adjust(hspace=0.75,top=0.85)
plt.figtext(0.1, 0.01,text, fontsize=8)
plt.savefig('comps_posteriors.png')
print('Comparison posteriors saved!')
print('\nEvidence: ', out['lnZ'])
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,373 | abgibbs/edenAP_detrend | refs/heads/myedits | /constants.py | import logging
import os
import re
from configparser import ConfigParser
from glob import iglob
import numpy as np
import jdcal
from astropy.io import fits
from dateutil import parser
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans, convolve_fft
from string import Formatter
# STANDARD LIST OF TELESCOPES, UPDATE WHEN NEEDED
telescopes_list = ["VATT", "BOK", "KUIPER", "SCHULMAN", "CHAT", "CASSINI", "CAHA", "LOT", "GUFI"]
# Dictionary of EDEN Available Telescopes. The values are lists with respective labels found in header files
telescopes = {"GUFI": ['gufi', 'vatt_gufi'],
"BOK": ["bok"],
"KUIPER": ["kuiper", "bigelow-1.55m"],
"SCHULMAN": ["schulman", "STX-16803"],
"CASSINI": ["cassini", "Mt. Orzale 152 cm"],
"CAHA": ["caha", "CA 1.23m"],
"LOT": ["lot", "Driver for Princeton Instruments cameras"],
"VATT": ["!vatt_gufi", "vatt"]}
bad_flags = ['BAD', 'TEST', 'RENAME', 'FOCUS', 'USELESS', 'RANDOM', 'PROVO', 'PROVA']
# String, float, int types
str_types = [str,np.str,np.str_]
float_types = [float,np.float,np.float64,np.float_]
int_types = [int,np.int,np.int64,np.int_]
# Suppress astropy warnings
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore',category=AstropyWarning)
# Formatting/functions for logging
FORMAT1 = "%(message)s"
edenAP_path = os.path.abspath(os.path.dirname(__file__))
log_folder = os.path.join(edenAP_path, 'EDEN Logging')
if not os.path.isdir(log_folder):
os.mkdir(log_folder)
logging.basicConfig(filename=os.path.join(log_folder, 'edenAP.log'), format=FORMAT1, level=logging.INFO)
log = logging.info
# This is the server destination in the current computer
config = ConfigParser()
config.read(edenAP_path+'/config.ini')
server_destination = config['FOLDER OPTIONS']['server_destination']
# EDEN Database shortcuts for mysql interactions
EDEN_data_cols = "`Filename`,`Directory`,`DATE-OBS`,`MJD`,`Telescope`,`Observer`," \
"`Target`,`Filter`,`Integration time [s]`,`Airmass`,`Instrument`," \
"`RA`,`Dec`,`Data Quality`,`RA hh`,`RA mm`,`RA ss.ss`,`Dec dd`,`Dec mm`,`Dec ss.ss`"
EDEN_data_vals = "'{}',\"{}\",'{}',{:1.3f},\"{}\",\"{}\",'{}',\"{}\",{:1.3f},{:1.3f},\"{}\","" \
""{:1.3f},{:1.3f},{:1.3f},{:t},{:t},{:1.3f},{:t},{:t},{:1.3f}"
# Open filters.dat to determine the filter sets
filter_sets = {}
for line in open(edenAP_path+'/filters.dat','r').readlines():
if line.strip() == '' or line.strip()[0] == '#': continue
keys = line.strip().split()
filter_sets[keys[0]] = keys
# Server localizers
def get_telescopes():
"""
Get the telescope names of the current ones in the server
:return: list of telescope names
"""
telescope_dirs = iglob(os.path.join(server_destination, 'RAW/*'))
telescopes = [telescope_dir.split('/')[-1] for telescope_dir in telescope_dirs]
return telescopes
def get_target_dates(calibrated=True, telescope=None):
"""
get target dates for calibrated/raw targets in our server
:param calibrated: if True, only dates in the directory of calibrated objects returned, if False, only raw ones
:param telescope: telescope for which to find the targets; if None, then find for all telescopes
:return: list of dates
"""
add2server = ['*', 'cal', '*', '*', '*']
if telescope is not None:
add2server[0] = telescope.upper()
if not calibrated:
add2server[1] = 'raw'
date_dirs = iglob(os.path.join(server_destination, *add2server))
dates = {date_dir.split('/')[-1] for date_dir in date_dirs}
return sorted(list(dates))
# Convenience functions
# advanced function to get values from headers
def shorten_path(path):
"""
function to shorten a path to a file. For displaying purposes.
:param path: path to file
:return:
"""
path_list = path.split(os.sep)
if len(path_list) < 5:
short_path = os.path.join("~", *path_list[:-1])
else:
short_path = os.path.join("~", *path_list[-5:-1])
# path_list = path_list[((len(path_list)+1)// 2):]
# half_path = os.path.join(path_list[0], *path_list[1:])
return short_path
def find_val(filepath_header, keyword, ext=0, comment=False, regex=False, typ=None):
"""
This function takes a keyword and finds the FIRST matching key in the header and returns the its value.
:param filepath_header: filepath for the file, filepath can also be a header
:param keyword: keyword for the key header
:param ext: extension to look for header. Default 0
:param comment: Look for match in keyword comments. Default False
:param regex: Look for match using regular expression; re.search function.
:param typ:Type of object that you want returned. If keyword match, and value type is wrong, its comment is returned
:return: value corresponding the key header. String or Float. Returns None if no match
"""
if isinstance(filepath_header, fits.Header):
hdr = filepath_header
else:
with fits.open(filepath_header) as hdul:
hdr = hdul[ext].header
return_val = None
# Before attempting brute search. Try getting the value
try:
if not regex:
return_val = hdr[keyword]
else:
raise KeyError
except KeyError:
for key, val in hdr.items():
if regex:
if re.search(keyword, key):
return_val = val
elif re.search(keyword, hdr.comments[key]):
return_val = val
else:
inKeyword = keyword.upper() in key.upper()
inComment = keyword.upper() in hdr.comments[key].upper()
if inKeyword:
return_val = val
if comment and inComment:
return_val = val
if return_val is not None:
if (typ is not None) and (typ is not type(return_val)):
comment = hdr.comments[key].strip('/').strip()
return_val = comment
break
else:
raise
return return_val
def getjd(date):
"""
get Julian Date given Gregorian Date as string or datetime object
:param date: date string or datetime object
:return: julian date
"""
if isinstance(date, str):
date = parser.parse(date)
return sum(jdcal.gcal2jd(int(date.year), int(date.month), int(date.day)))
# function to find correct date in header
def LOOKDATE(header):
"""
Persistent function that will look for the date of the observation recorded in the header.
Procedure:
1. Looks for 'DATE', 'DATE-OBS' or anything including 'DATE' in header.
2. Tests format 'YYYY-MM-DDTHH:MM:SS.ss', or simply 'YYYY-MM-DD' or 'YYYY/MM/DD'
3. If format doesn't include the time, it looks for 'UT' keyword to find time and appends it to the date string
:param header: header of current file
:return: datetime object
"""
try:
# find_val will first try to get 'DATE' header. If it doesn't work, it will find header keywords that
# include the word 'DATE' which includes 'DATE-OBS'
date_key = 'DATE-OBS' if 'DATE-OBS' in header else 'DATE'
date = find_val(header, date_key)
if "T" in date:
temp_date = parser.parse(date)
else:
try:
time = find_val(header, 'UT')
if '/' in time or '-' in time or ':' not in time:
# if 'UT' value suggests date string, then raise err
raise KeyError
temp_date = parser.parse(date + 'T' + time)
except KeyError:
time_key = 'TIME-OBS' if 'TIME-OBS' in header else 'TIME'
time = find_val(header, time_key)
temp_date = parser.parse(date + 'T' + time)
except (KeyError, TypeError):
date = find_val(header, 'DATE')
temp_date = parser.parse(date)
return temp_date
# function to validate directory paths. It creates a path if it doesn't already exist.
def validateDirs(*paths):
"""
Validate directories. Create directory tree if it doesn't exist. Any number of arguments (paths) are valid.
"""
for folder_path in paths:
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# natural sorting technique
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split('(\d+)', text)]
# Identifies filters by their preferred name using the filters.dat file
# If the filter is not identified in filters.dat, just return the input
def id_filters(filters):
if type(filters) in str_types:
out = np.copy([filters])
else:
out = np.copy(filters)
for key in filter_sets:
out[np.in1d(out,filter_sets[key])] = key
if type(filters) in str_types:
return out[0]
else:
return out
def copy(source, dest):
"""
wrapper function that uses unix system's copy function: `cp -n`
:param source: source file
:param dest: destination file/folder
:return:
"""
dest = new_destination(source, dest)
proc = subprocess.Popen(['cp', '-n', source, dest], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return_code = proc.wait()
if return_code == 1:
log("Copy Function encountered Error. File somehow exists already.")
return return_code
def mv(source, dest):
"""
wrapper function that uses unix system's move function: `mv -n`
:param source: source file
:param dest: destination file/folder
:return:
"""
dest = new_destination(source, dest)
proc = subprocess.Popen(['mv', '-n', source, dest], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return_code = proc.wait()
if return_code == 1:
log("Move Function encountered Error. File somehow exists already.")
return return_code
def check_and_rename(file, add=0):
"""
Quick function to
:param file:
:param add:
:return:
"""
original_file = file
if add != 0:
split = file.split(".")
ext = split[-1]
before_ext = '.'.join(split[:-1])
part_1 = before_ext + "_" + str(add)
file = ".".join([part_1, ext])
if not os.path.isfile(file):
return file
else:
add += 1
check_and_rename(original_file, add)
class ModHDUList(fits.HDUList):
# class attribute is the kernel
kernel = Gaussian2DKernel(5)
def __init__(self, hdus=[], interpolate=False, **kwargs):
"""
This is wrapper around fits.HDUList class
This class will take all methods and properties of fits.HDUList
while also being able to perform algebraic operations with the data of each HDU image, and
contains a interpolate and copy methods. Interpolate will detect any infs/nans or values less than zero and
interpolate them. Copy method will return a new (clone) instance of the object.
(New) This class allows algebraic operations to lists of numpy arrays as long as they are the same length as
the HDUList; len(hdul) == len(array_list) must be true
:param hdus: filepath to fits file or a fits.HDUList object
:param interpolate: if True, instance will interpolate negative/zero values in data at construction
"""
if type(hdus) is str:
hdus = fits.open(hdus)
# validate dimensions of data
for i in range(len(hdus)):
data: np.ndarray = hdus[i].data
if data is None:
continue
# if data is three-dimensional and first axis is equals 1 (first axis is 3rd coordinate)
if len(data.shape) > 2 and data.shape[0] == 1:
# then reshape to 2 dimensions
shaped_data = data.reshape((data.shape[1:]))
hdus[i].data = shaped_data.astype(float)
super(ModHDUList, self).__init__(hdus, **kwargs)
if interpolate:
self.interpolate()
def interpolate(self):
"""
interpolate zeros and negative values in data using FFT convolve function
"""
for hdu in self:
if hdu.data is None:
continue
with np.errstate(invalid='ignore'):
non_finite = ~np.isfinite(hdu.data)
less_zero = hdu.data <= 0
if np.any(less_zero) or np.any(non_finite):
data = hdu.data.astype(float)
mask_data = np.ma.masked_less_equal(data, 0)
# mask_data = np.ma.masked_inside(data, -1e5, 0)
mask_data.fill_value = np.nan
data = mask_data.filled()
data = interpolate_replace_nans(data, self.kernel, convolve=convolve_fft, allow_huge=True)
hdu.data = data
def len(self):
"""
:return: the current length of file (number of extensions)
"""
return len(self)
def MEF(self):
"""
:return: flag tells whether self is a multiExtension fits
"""
return self.len() > 1
def copy(self):
"""
create a copy of the HDUList
:return: the copy will be a new ModHDUList object
"""
return ModHDUList([hdu.copy() for hdu in self])
def sub(self, hdul):
return self.__sub__(hdul)
def mul(self, hdul):
return self.__mul__(hdul)
def truediv(self, hdul):
return self.__truediv__(hdul)
def get_data(self, i):
return self[i].data
def check_data(self, hdul):
"""
Check data before operations are applied to it. We allow None's to be in this list because the None is usually
used instead for an empty data attribute.
:param hdul:
:return:
"""
# use flags to tell whether given input is a HDUList or a list of numpy arrays
hdul_flag = "HDUList" in str(type(hdul))
arrays_flag = check_arraylist(hdul)
if hdul_flag or arrays_flag:
assert len(hdul) == self.len(), "HDULists don't have the same number of extensions"
return hdul_flag, arrays_flag
def __sub__(self, hdul):
hdul_flag, arrays_flag = self.check_data(hdul)
new_obj = self.copy()
for i in range(self.len()):
if self[i].data is None:
continue
if hdul_flag:
# assuming hdul is another hdul
hdu_data = hdul[i].data.astype(float)
data = self[i].data.astype(float) - hdu_data
elif arrays_flag:
# assuming hdul is a list of ndarrays
data = self[i].data.astype(float) - hdul[i].astype(float)
else:
# assuming hdul is a constant
data = self[i].data.astype(float) - hdul
new_obj[i].data = data
return new_obj
def __truediv__(self, hdul):
hdul_flag, arrays_flag = self.check_data(hdul)
new_obj = self.copy()
for i in range(self.len()):
if self[i].data is None:
continue
if hdul_flag:
# assuming hdul is another hdul
hdu_data = hdul[i].data.astype(float)
data = self[i].data.astype(float) / hdu_data
elif arrays_flag:
# assuming hdul is a list of ndarrays
data = self[i].data.astype(float) / hdul[i].astype(float)
else:
# assuming hdul is a constant
data = self[i].data.astype(float) / hdul
new_obj[i].data = data
return new_obj
def __add__(self, hdul):
hdul_flag, arrays_flag = self.check_data(hdul)
new_obj = self.copy()
for i in range(self.len()):
if self[i].data is None:
continue
if hdul_flag:
# assuming hdul is another hdul
hdu_data = hdul[i].data.astype(float)
data = self[i].data.astype(float) + hdu_data
elif arrays_flag:
# assuming hdul is a list of ndarrays
data = self[i].data.astype(float) + hdul[i].astype(float)
else:
# assuming hdul is a constant
data = self[i].data.astype(float) + hdul
new_obj[i].data = data
return new_obj
def __mul__(self, hdul):
hdul_flag, arrays_flag = self.check_data(hdul)
new_obj = self.copy()
for i in range(self.len()):
if self[i].data is None:
continue
if hdul_flag:
# assuming hdul is another hdul
hdu_data = hdul[i].data.astype(float)
data = self[i].data.astype(float) * hdu_data
elif arrays_flag:
# assuming hdul is a list of ndarrays
data = self[i].data.astype(float) * hdul[i].astype(float)
else:
# assuming hdul is a constant
data = self[i].data.astype(float) * hdul
new_obj[i].data = data
return new_obj
def __radd__(self, hdul):
return self.__add__(hdul)
def __rsub__(self, hdul):
return self.__sub__(hdul)
def __rmul__(self, hdul):
return self.__mul__(hdul)
def __rtruediv__(self, hdul):
return self.__truediv__(hdul)
def flatten(self, method='median'):
"""
:param method: method to normalize the file; 'median' or 'mean'
:return: normalized HDUList extension-wise.
"""
if method != 'median' and method != 'mean':
raise ValueError('Method {} doesn\'t exist please enter "median" or "mean"'.format(method))
method = getattr(np, method)
flatten_hdul = ModHDUList([hdu.copy() for hdu in self])
for i in range(self.len()):
if self[i].data is None:
continue
data = flatten_hdul[i].data
flatten_hdul[i].data = data / method(self[i].data)
flatten_hdul[0].header.add_history('FITS has been flattened by its {}'.format(method))
return flatten_hdul
def median(self):
"""
Get median of all pixels in all extensions
"""
return np.nanmedian([hdu.data.astype(float) for hdu in self if hdu.data is not None])
def mean(self):
"""
Get mean of all pixels in all extensions
"""
return np.nanmean([hdu.data.astype(float) for hdu in self if hdu.data is not None])
def std(self):
"""
Get standard deviation of all pixels in all extensions
"""
return np.nanstd([hdu.data.astype(float) for hdu in self if hdu.data is not None])# STANDARD LIST OF TELESCOPES AND TYPES OF CALIBRATIONS IMAGES, UPDATE WHEN NEEDED
# Simple class to avoid invalid integer to float implicit conversion when formatting a string
# Use... MyFormatter().format("{0} {1:t}", "Hello", 4.567) # returns "Hello 4"
class MyFormatter(Formatter):
"""
Simple class to avoid invalid integer to float implicit conversion when formatting a string.
Usage:
MyFormatter().format("{0} {1:t}", "Hello", 4.567) # returns "Hello 4"
"""
def format_field(self, value, format_spec):
if format_spec == 't': # Truncate and render as int
return str(int(value))
return super(MyFormatter, self).format_field(value, format_spec)
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,374 | abgibbs/edenAP_detrend | refs/heads/myedits | /transit_photometry.py | # -*- coding: utf-8 -*-
import argparse
import os
import pickle
from configparser import ConfigParser
import pdb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.visualization import ZScaleInterval, ImageNormalize, LinearStretch
from matplotlib import ticker
from matplotlib.ticker import AutoMinorLocator
from mpl_toolkits.mplot3d import art3d
from scipy.signal import medfilt
from scipy.stats import linregress
from constants import get_telescopes, log, validateDirs
config = ConfigParser()
config.read('config.ini')
server_destination = config['FOLDER OPTIONS']['server_destination']
plt.style.use('ggplot')
minorLocator = AutoMinorLocator()
def angle2degree(raw_angle, unit):
"""
Convert given angle with known unit (astropy.unit) to degrees in decimals.
:param raw_angle: numberic or string value
:param unit: unit of the angle; a astropy.unit object
:return: angle in degrees (decimal)
"""
return Angle(raw_angle, unit=unit).deg
def CoordsToDecimal(coords, hours=False):
"""
Function to convert given angles to degree decimals. This function makes big assumptions given the wide variety
of formats that EDEN has come across.
ASSUMPTION:
- if given coordinates are numeric values, then both RA/DEC are given in degrees
- if given coordinates are strings/non-numeric values, then RA is given in hour angle and DEC in degrees.
:return: numeric degree decimals
"""
if hours:
try:
# check if object is iterable; list, array, etc
isList = iter(coords)
if isinstance(coords[0], str):
# it must be a string, and with the following formats
return angle2degree(coords, u.hourangle)
else:
return angle2degree(coords, u.deg)
except TypeError:
# if coords is a numeric value... else is a string
if isinstance(coords, str):
# it must be a string, and with the following formats
return angle2degree(coords, u.hourangle)
else:
return angle2degree(coords, u.deg)
ras = np.array([])
decs = np.array([])
for i in range(len(coords)):
# JOSE's mod ----- function used to assume consistent formatting of RA/DEC in Header.
raw_RA, raw_DEC = coords[i]
# the following if-else statement only works for current telescope usage (Aug. 2018)
try:
# Both are in degrees format
ras = np.append(ras, angle2degree(float(raw_RA), u.deg))
decs = np.append(decs, angle2degree(float(raw_DEC), u.deg))
except ValueError:
# it must be a string, and with the following formats
ras = np.append(ras, angle2degree(raw_RA, u.hourangle))
decs = np.append(decs, angle2degree(raw_DEC, u.deg))
return ras, decs
def get_super_comp(all_comp_fluxes, all_comp_fluxes_err):
super_comp = np.zeros(all_comp_fluxes.shape[1])
super_comp_err = np.zeros(all_comp_fluxes.shape[1])
for i in range(all_comp_fluxes.shape[1]):
data = all_comp_fluxes[:, i]
data_err = all_comp_fluxes_err[:, i]
med_data = np.nanmedian(data)
sigma = get_sigma(data)
idx = np.where((data <= med_data + 5 * sigma) & (data >= med_data - 5 * sigma) & \
(~np.isnan(data)) & (~np.isnan(data_err)))[0]
super_comp[i] = np.nanmedian(data[idx])
super_comp_err[i] = np.sqrt(np.sum(data_err[idx] ** 2) / np.double(len(data_err[idx])))
return super_comp, super_comp_err
def get_sigma(data):
"""
Get median absolute deviation from data
:param data: numberical data
:return: median absolute deviation
"""
mad = np.nanmedian(np.abs(data - np.nanmedian(data)))
return mad * 1.4826
def check_star(data, idx: int, min_ap: int, max_ap: int, force_aperture: bool, forced_aperture: int):
"""
:param data: photometry data pickle saved from get_photometry
:param idx: index of object
:param min_ap:
:param max_ap:
:param force_aperture:
:param forced_aperture:
:return:
"""
apertures = [forced_aperture] if force_aperture else (min_ap, max_ap)
for chosen_aperture in apertures:
try:
target_flux = data['data']['star_' + str(idx)]['fluxes_' + str(chosen_aperture) + '_pix_ap']
except KeyError:
target_flux = data['data']['target_star_' + str(idx)]['fluxes_' + str(chosen_aperture) + '_pix_ap']
if np.any(target_flux < 0):
return False
return True
def super_comparison_detrend(data, idx, idx_comparison, chosen_aperture,
comp_apertures=None, plot_comps=False, all_idx=None,
supercomp=False):
try:
n_comps = len(idx_comparison)
except TypeError:
idx_comparison = [idx_comparison]
if comp_apertures is None:
comp_apertures = [chosen_aperture] * len(idx_comparison)
try:
target_flux = data['data'][f'star_{idx}'][f'fluxes_{chosen_aperture}_pix_ap'][all_idx]
target_flux_err = data['data']['star_' + str(idx)][f'fluxes_{chosen_aperture}_pix_ap_err'][all_idx]
except KeyError:
target_flux = data['data'][f'target_star_{idx}'][f'fluxes_{chosen_aperture}_pix_ap'][all_idx]
target_flux_err = data['data'][f'target_star_{idx}'][f'fluxes_{chosen_aperture}_pix_ap_err'][all_idx]
if plot_comps:
plt.plot(target_flux / np.nanmedian(target_flux), 'b-')
all_comp_fluxes = np.zeros((len(idx_comparison), target_flux.size))
all_comp_fluxes_err = np.zeros(all_comp_fluxes.shape)
for i in range(len(idx_comparison)):
idx_c = idx_comparison[i]
comp_aperture = comp_apertures[i]
try:
comp_flux = data['data'][f'star_{idx_c}'][f'fluxes_{comp_aperture}_pix_ap'][all_idx]
comp_flux_err = data['data']['star_' + str(idx_c)][f'fluxes_{comp_aperture}_pix_ap_err'][all_idx]
except KeyError:
comp_flux = data['data'][f'target_star_{idx_c}'][f'fluxes_{comp_aperture}_pix_ap'][all_idx]
comp_flux_err = data['data'][f'target_star_{idx_c}'][f'fluxes_{comp_aperture}_pix_ap_err'][all_idx]
comp_med = np.nanmedian(comp_flux)
all_comp_fluxes[i] = comp_flux / comp_med
all_comp_fluxes_err[i] = comp_flux_err / comp_med
if plot_comps:
plt.plot(comp_flux / comp_med, 'r-', alpha=0.1)
super_comp, super_comp_err = get_super_comp(all_comp_fluxes, all_comp_fluxes_err)
if plot_comps:
plt.plot(super_comp, 'r-')
plt.show()
relative_flux = target_flux / super_comp
relative_flux_err = relative_flux * np.sqrt((target_flux_err / target_flux) ** 2 + \
(super_comp_err / super_comp) ** 2)
med_rel_flux = np.nanmedian(relative_flux)
detrend_flux = relative_flux / med_rel_flux
detrend_flux_err = relative_flux_err / med_rel_flux
if supercomp:
return detrend_flux, detrend_flux_err, super_comp, super_comp_err
return detrend_flux, detrend_flux_err
def save_photometry(t, rf, rf_err, output_folder, target_name,
plot_data=False, title='', units='Relative Flux'):
"""
Save given relative photometry into plots/files.
:param t: times
:param rf: relative fluxes
:param rf_err: errors of relative fluxes
:param output_folder: folder to save all files
:param target_name: name of the target object
:param plot_data:
:param title: Title of the plots
:param units: units of the Y axis on plots
"""
log("Saving photometry for target {:s} on {:s} using the following:".format(target_name, output_folder))
mag_fact = (100. ** .2)
rf_mag = -mag_fact * np.log10(rf)
rf_mag_err = rf_err * 2.5 / (np.log(10) * rf)
f = open(output_folder + target_name + '.dat', 'w')
f2 = open(output_folder + target_name + '_norm_flux.dat', 'w')
f.write('# Times (BJD) \t Diff. Mag. \t Diff. Mag. Err.\n')
f2.write('# Times (BJD) \t Norm. Flux. \t Norm. Flux Err.\n')
for i in range(len(t)):
if not np.isnan(rf_mag[i]) and not np.isnan(rf_mag_err[i]):
f.write(str(t[i]) + '\t' + str(rf_mag[i]) + '\t' + str(rf_mag_err[i]) + '\n')
f2.write(str(t[i]) + '\t' + str(rf[i]) + '\t' + str(rf_err[i]) + '\n')
f.close()
if plot_data:
# Bin on a 10-min window:
t_min = np.min(t)
t_hours = (t - t_min) * 24.
n_bin = 10 # minutes
bin_width = n_bin / 60. # hr
bins = (t_hours / bin_width).astype('int')
times_bins = []
fluxes_bins = []
errors_bins = []
for i_bin in np.unique(bins):
idx = np.where(bins == i_bin)
times_bins.append(np.median(t_hours[idx]))
fluxes_bins.append(np.nanmedian(rf[idx]))
errors_bins.append(np.sqrt(np.sum(rf_err[idx] ** 2)) / np.double(len(idx[0])))
# Calculate standard deviation of median filtered data
mfilt = median_filter(rf)
sigma = get_sigma(rf - mfilt) / np.nanmedian(rf)
sigma_mag = -2.5 * np.log10((1. - sigma) / 1.)
mfilt_bin = median_filter(fluxes_bins)
sigma_bin = get_sigma(fluxes_bins - mfilt_bin) / np.median(fluxes_bins)
sigma_mag_bin = -2.5 * np.log10((1. - sigma_bin) / 1.)
sigma_top = '$\sigma_{{m}}$ = {:.0f} ppm = {:.1f} mmag'.format(sigma * 1e6, sigma_mag * 1e3)
sigma_bott = '$\sigma_{{m,bin}}$ = {:.0f} ppm = {:.1f} mmag'.format(sigma_bin * 1e6, sigma_mag_bin * 1e3)
sigma_file = open(output_folder + target_name + '.sigma.dat', 'w')
sigma_file.write(sigma_top + '\n')
sigma_file.write(sigma_bott)
sigma_file.close()
# Make plot
plt.errorbar(t_hours, rf, rf_err, fmt='o', alpha=0.3, label='Data')
plt.errorbar(np.array(times_bins), np.array(fluxes_bins), np.array(errors_bins), fmt='o', label='10-min bins')
plt.annotate(sigma_top, xy=(0.5, 0.10), xycoords='axes fraction', va='bottom', ha='center')
plt.annotate(sigma_bott, xy=(0.5, 0.05), xycoords='axes fraction', va='bottom', ha='center')
plt.xlabel('Time from start (hr)')
plt.ylabel(units)
plt.title(title, fontsize='12')
plt.xlim(-0.05 * np.ptp(t_hours), 1.05 * np.ptp(t_hours))
nom_ymin = 0.95
data_min = np.max([np.min(rf - 2 * rf_err), np.nanmedian(rf - rf_err) - 15 * get_sigma(rf - median_filter(rf))])
nom_ymax = 1.05
data_max = np.min([np.max(rf + 2 * rf_err), np.nanmedian(rf + rf_err) + 15 * get_sigma(rf - median_filter(rf))])
try:
plt.ylim(data_min, data_max)
except:
plt.ylim(nom_ymin, nom_ymax)
x_formatter = ticker.ScalarFormatter(useOffset=False)
plt.gca().xaxis.set_major_formatter(x_formatter)
plt.legend()
plt.gcf().savefig(output_folder + target_name + '.pdf', dpi=150, bbox_inches='tight')
plt.close()
def save_trendStats(epdlc_path, output_folder: str, rmag_delta=0.5, mag_delta=0.8, starID=None):
# prepare data and some vars for plots
if isinstance(epdlc_path, pd.DataFrame):
epdlc = epdlc_path
else:
# else it is assumed is a string (path)
epdlc = pd.read_csv(epdlc_path)
starID: str = os.path.basename(epdlc_path).split('.')[-2]
# setup limits for magnitude and relative magnitude plots
mean_mag = epdlc['mag1'].mean()
rmag_lim = max(-rmag_delta, epdlc['rmag1'].min()), min(rmag_delta, epdlc['rmag1'].max())
mag_lim = max(mean_mag - mag_delta, epdlc['mag1'].min()), min(mean_mag + mag_delta, epdlc['mag1'].max())
airmass_savepath = os.path.join(output_folder, 'airmass_trends', starID + '_Airmass trend.png')
fwhm_savepath = os.path.join(output_folder, 'seeing_trends', starID + '_Seeing trend.png')
distance_savepath = os.path.join(output_folder, 'distance_trends', starID + '_Distance trend.png')
validateDirs(output_folder, os.path.dirname(airmass_savepath),
os.path.dirname(fwhm_savepath), os.path.dirname(distance_savepath))
# calculate distances from median:
xdist = abs(epdlc['cen_x'] - epdlc['cen_x'].median())
ydist = abs(epdlc['cen_y'] - epdlc['cen_y'].median())
epdlc['cen_dist'] = (xdist ** 2 + ydist ** 2) ** 0.5
corr = epdlc.corr()
# start airmass plots
fig, axes = plt.subplots(2, 2, sharex='col')
epdlc.plot(x='BJD', y='Z', title='Airmass over time', ax=axes[0, 0])
axes[0, 0].set_ylabel('Airmass')
epdlc.plot.scatter(x='BJD', y='mag1', yerr='mag1_err', title='Magnitude over time', ax=axes[1, 0])
axes[1, 0].set_ylabel('Magnitude')
ax2 = epdlc.plot.scatter(x='Z', y='mag1', yerr='mag1_err', title='Magnitude vs Airmass', ax=axes[1, 1])
axes[1, 1].set_xlabel('Airmass')
ax1 = epdlc.plot.scatter(x='Z', y='rmag1', yerr='rmag1_err', title='Relative Magnitude vs Airmass', ax=axes[0, 1])
axes[0, 1].set_ylabel('Relative Magnitude')
rmag_corr = corr['rmag1'].loc['Z']
mag_corr = corr['mag1'].loc['Z']
ann = ax1.annotate('Correlation Coeff: %.3f' % rmag_corr, (30, 5), None, 'axes points')
ann.set_size(8)
ann = ax2.annotate('Correlation Coeff: %.3f' % mag_corr, (30, 5), None, 'axes points')
ann.set_size(8)
for ax in axes.reshape(axes.size):
ax.autoscale(True, axis='both', tight=True)
fig.suptitle(starID, y=1.05)
plt.tight_layout()
try:
axes[0, 1].set_ylim(rmag_lim)
axes[1, 0].set_ylim(mag_lim)
axes[1, 1].set_ylim(mag_lim)
except:
pass
fig.savefig(airmass_savepath, dpi=200, bbox_inches='tight')
# end airmass plots
plt.close()
# start seeing plots
fig, axes = plt.subplots(2, 2, sharex='col')
epdlc.plot.scatter(x='BJD', y='FWHM', title='Seeing over time', ax=axes[0, 0])
epdlc.plot.scatter(x='BJD', y='mag1', yerr='mag1_err', title='Magnitude over time', ax=axes[1, 0])
axes[1, 0].set_ylabel('Magnitude')
ax2 = epdlc.plot.scatter(x='FWHM', y='mag1', yerr='mag1_err', title='Magnitude vs Seeing', ax=axes[1, 1])
ax1 = epdlc.plot.scatter(x='FWHM', y='rmag1', yerr='rmag1_err', title='Relative Magnitude vs Seeing', ax=axes[0, 1])
axes[0, 1].set_ylabel('Relative Magnitude')
rmag_corr = corr['rmag1'].loc['FWHM']
mag_corr = corr['mag1'].loc['FWHM']
ann = ax1.annotate('Correlation Coeff: %.3f' % rmag_corr, (30, 5), None, 'axes points')
ann.set_size(8)
ann = ax2.annotate('Correlation Coeff: %.3f' % mag_corr, (30, 5), None, 'axes points')
ann.set_size(8)
for ax in axes.reshape(axes.size):
ax.autoscale(True, axis='both', tight=True)
fig.suptitle(starID, y=1.05)
plt.tight_layout()
try:
axes[0, 1].set_ylim(rmag_lim)
axes[1, 0].set_ylim(mag_lim)
axes[1, 1].set_ylim(mag_lim)
except:
pass
fig.savefig(fwhm_savepath, dpi=200, bbox_inches='tight')
# end seeing plots
plt.close()
# start distance plots
fig, axes = plt.subplots(2, 2, sharex='col')
epdlc.plot(x='BJD', y='cen_dist', title='Distance over time', ax=axes[0, 0])
axes[0, 0].set_ylabel('Pixel distance')
epdlc.plot.scatter(x='BJD', y='mag1', yerr='mag1_err',
title='Magnitude over time', ax=axes[1, 0])
axes[1, 0].set_ylabel('Magnitude')
ax2 = epdlc.plot.scatter(x='cen_dist', y='mag1', yerr='mag1_err',
title='Magnitude vs Distance', ax=axes[1, 1])
axes[1, 1].set_xlabel('Pixel Distance')
ax1 = epdlc.plot.scatter(x='cen_dist', y='rmag1', yerr='rmag1_err',
title='Relative Magnitude vs Distance', ax=axes[0, 1])
axes[0, 1].set_ylabel('Relative Magnitude')
rmag_corr = corr['rmag1'].loc['cen_dist']
mag_corr = corr['mag1'].loc['cen_dist']
ax1.annotate('Correlation Coeff: %.3f' % rmag_corr, (30, 5), None, 'axes points', size=8)
ax2.annotate('Correlation Coeff: %.3f' % mag_corr, (30, 5), None, 'axes points', size=8)
for ax in axes.reshape(axes.size):
ax.autoscale(True, axis='both', tight=True)
fig.suptitle(starID + '\nDistance: Distance from median center', y=1.1)
plt.tight_layout()
try:
axes[0, 1].set_ylim(rmag_lim)
axes[1, 0].set_ylim(mag_lim)
axes[1, 1].set_ylim(mag_lim)
except:
pass
fig.savefig(distance_savepath, dpi=200, bbox_inches='tight')
# end distance plots
plt.close()
def dataframe2epdlc(table: pd.DataFrame, out_path: str):
"""
Convert dataframe to epdlc file
:param table: table that must follow the format in the code
:param out_path: complete file path to save epdlc file
"""
# Define literal string with formatting:
header = f'#{"frame":<31} {"BJD":<16} {"JD":<16} {"mag1":<8} {"mag1_err":<8} ' \
f'{"mag2":<8} {"mag2_err":<8} {"mag3":<8} {"mag3_err":<8} ' \
f'{"rmag1":<8} {"rmag1_err":<8} {"rmag2":<8} {"rmag2_err":<8}' \
f' {"rmag3":<8} {"rmag3_err":<8} {"cen_x":<8} ' \
f'{"cen_y":<8} {"bg":<8} {"bg_err":<8} {"FWHM":<8} ' \
f'{"HA":<8} {"ZA":<8} {"Z":<8}\n'
row_fmt = '{frame:<32} {BJD:<16.8f} {JD:<16.8f} {mag1:< 8.4f} {mag1_err:<8.4f} ' \
'{mag2:< 8.4f} {mag2_err:<8.4f} {mag3:< 8.4f} {mag3_err:<8.4f} ' \
'{rmag1:< 8.4f} {rmag1_err:< 8.4f} {rmag2:< 8.4f} {rmag2_err:< 8.4f} ' \
'{rmag3:< 8.4f} {rmag3_err:< 8.4f} {cen_x:<8.3f} ' \
'{cen_y:<8.3f} {bg:<8.3f} {bg_err:<8.3f} {FWHM:< 8.3f} ' \
'{HA:< 8.2f} {ZA:<8.2f} {Z:<8.3f}\n'
formatted_rows = [row_fmt.format(**row) for i,row in table.iterrows()]
with open(out_path, 'w') as epdlc:
epdlc.write(header)
epdlc.writelines(formatted_rows)
def save_photometry_hs(data, idx, idx_comparison,idx_all_comps_sorted,
chosen_aperture: int, min_aperture: int, max_aperture: int,
comp_apertures, idx_sort_times, output_folder: str,
target_name: str, band='i', all_idx=None):
"""
Save epdlc files
:param data: picke data
:param idx: index of main target
:param idx_comparison: indeces of comparison stars to detrend
:param chosen_aperture: choosen aperture to save
:param min_aperture: minimum aperture to save
:param max_aperture: maximum aperture to save
:param comp_apertures:
:param idx_sort_times: indeces of sorted times to use
:param output_folder: folder to save all epdlc files
:param target_name: name of the main target
:param band:
:param all_idx:
:return:
"""
header = ['frame', 'BJD', 'JD', 'mag1', 'mag1_err', 'mag2', 'mag2_err', 'mag3', 'mag3_err',
'rmag1', 'rmag1_err', 'rmag2', 'rmag2_err', 'rmag3', 'rmag3_err', 'cen_x', 'cen_y',
'bg', 'bg_err', 'FWHM', 'HA', 'ZA', 'Z']
hs_folder = os.path.join(output_folder, 'LC')
trends_folder = os.path.join(output_folder, 'trends')
other_formats = os.path.join(hs_folder, 'csv_html')
# Create folder for the outputs in HS format:
if not os.path.exists(hs_folder):
os.mkdir(hs_folder)
# Create folder for the outputs in html format:
if not os.path.exists(other_formats):
os.mkdir(other_formats)
# Create folder for trendStats
if not os.path.exists(trends_folder):
os.mkdir(trends_folder)
# First, write lightcurve in the HS format for each star. First the comparisons:
print('\t Saving data for target and', len(idx_comparison), 'comparison stars')
for i in np.append(idx_comparison, idx):
try:
d = data['data']['star_%d' % i]
except KeyError:
d = data['data']['target_star_%d' % i]
if i == idx:
star_name = target_name
else:
star_name = str(data['data']['IDs'][i])
fluxes_ap = 'fluxes_%d_pix_ap' % chosen_aperture
fluxes_min_ap = 'fluxes_%d_pix_ap' % min_aperture
fluxes_max_ap = 'fluxes_%d_pix_ap' % max_aperture
fluxes_ap_err = 'fluxes_%d_pix_ap_err' % chosen_aperture
fluxes_min_ap_err = 'fluxes_%d_pix_ap_err' % min_aperture
fluxes_max_ap_err = 'fluxes_%d_pix_ap_err' % max_aperture
flux = d[fluxes_ap][all_idx][idx_sort_times]
flux_min = d[fluxes_min_ap][all_idx][idx_sort_times]
flux_max = d[fluxes_max_ap][all_idx][idx_sort_times]
flux_err = d[fluxes_ap_err][all_idx][idx_sort_times]
flux_min_err = d[fluxes_min_ap_err][all_idx][idx_sort_times]
flux_max_err = d[fluxes_max_ap_err][all_idx][idx_sort_times]
FWHMs = d['fwhm'][all_idx][idx_sort_times]
epdlc_path = os.path.join(hs_folder, star_name + '.epdlc')
csv_path = os.path.join(other_formats, star_name + '.csv')
html_path = os.path.join(other_formats, star_name + '.html')
ra = data['data']['RA_degs'][i]
# Get super-comparison detrend for the current star:
current_comps = [ii for ii in idx_comparison if ii != i]
if len(current_comps) == 0:
current_comps = idx_all_comps_sorted[0:10]
comp_apertures = comp_apertures * len(current_comps)
r_flux1, r_flux_err1 = super_comparison_detrend(data, i, current_comps, chosen_aperture,
comp_apertures=comp_apertures, plot_comps=False,
all_idx=all_idx)
r_flux1, r_flux_err1 = r_flux1[idx_sort_times], r_flux_err1
r_flux2, r_flux_err2 = super_comparison_detrend(data, i, current_comps, min_aperture,
comp_apertures=comp_apertures, plot_comps=False,
all_idx=all_idx)
r_flux2, r_flux_err2 = r_flux2[idx_sort_times], r_flux_err2[idx_sort_times]
r_flux3, r_flux_err3 = super_comparison_detrend(data, i, current_comps, max_aperture,
comp_apertures=comp_apertures, plot_comps=False,
all_idx=all_idx)
r_flux3, r_flux_err3 = r_flux3[idx_sort_times], r_flux_err3[idx_sort_times]
# (100.**.2) ~ 2.512
mag_fact = (100. ** .2)
# Get Relative Mags Data
rmag1 = (-mag_fact * np.log10(r_flux1))
rmag1_err = (r_flux_err1 * mag_fact / (np.log(10) * r_flux1))
rmag2 = (-mag_fact * np.log10(r_flux2))
rmag2_err = (r_flux_err2 * mag_fact / (np.log(10) * r_flux2))
rmag3 = (-mag_fact * np.log10(r_flux3))
rmag3_err = (r_flux_err3 * mag_fact / (np.log(10) * r_flux3))
# Get Mags Data
mag1 = (-mag_fact * np.log10(flux))
mag1_err = (mag_fact * flux_err / (np.log(10.) * flux))
mag2 = (-mag_fact * np.log10(flux_min))
mag2_err = (mag_fact * flux_min_err / (np.log(10.) * flux_min))
mag3 = (-mag_fact * np.log10(flux_max))
mag3_err = (mag_fact * flux_max_err / (np.log(10.) * flux_max))
# Set all fwhms that are 0 to -1
FWHMs[FWHMs == 0] = -1
lst_deg = CoordsToDecimal(data['LST'][all_idx][idx_sort_times], hours=True)
# following line context: keep lst_deg - ra between 0 and 360 degrees
HA = lst_deg - ra + 360
HA[HA<0] += 360
HA[HA>=360] -= 360
#HA = lst_deg - ra + 360 if lst_deg - ra < 0 else lst_deg - ra - 360 if lst_deg - ra > 360 else lst_deg - ra
Z = data['airmasses'][all_idx][idx_sort_times].astype(float)
ZA = np.arccos(1. / Z) * (180. / np.pi)
# frame names in order
frame_names = [frame_name.split('/')[-1] for frame_name in data['frame_name'][all_idx][idx_sort_times]]
bjd_times = data['BJD_times'][all_idx][idx_sort_times]
jd_times = data['JD_times'][all_idx][idx_sort_times]
centroids_x = d['centroids_x'][all_idx][idx_sort_times]
centroids_y = d['centroids_y'][all_idx][idx_sort_times]
background = d['background'][all_idx][idx_sort_times]
background_err = d['background_err'][all_idx][idx_sort_times]
tableData = {hrd: col for hrd, col in zip(header, [frame_names, bjd_times, jd_times, mag1, mag1_err,
mag2, mag2_err, mag3, mag3_err, rmag1, rmag1_err,
rmag2, rmag2_err, rmag3, rmag3_err, centroids_x,
centroids_y, background, background_err, FWHMs,
HA, ZA, Z])}
table = pd.DataFrame(tableData)
table.to_csv(csv_path, index=False)
table.to_html(html_path, float_format=lambda double: '%.4f' % double)
dataframe2epdlc(table, epdlc_path)
save_trendStats(table, trends_folder, starID=star_name)
def radial_profile(data, center):
"""
:param data: 2D array image
:param center: x,y for the center
:return: 1D array of average flux for each pixel distance from center
"""
x, y = np.indices(data.shape)
# get distances from center for each (x,y) pixel and round up to integer values
r = np.sqrt((x - center[0])**2 + (y - center[1])**2).astype(np.int)
r = r.astype(np.int)
#
tbin = np.bincount(r.ravel(), data.ravel())
nr = np.bincount(r.ravel())
radialprofile = tbin / nr
return radialprofile
def plot_images(data, idx, idx_comparison, aperture, min_ap, max_ap,
comp_apertures, out_dir, frames, idx_frames, half_size=50, overwrite=False):
def plot_im(d, cen_x, cen_y, obj_x, obj_y, ap: int, min_ap: int, max_ap: int,
half_size, frame_name: str, object_name: str, overwrite, norm: ImageNormalize=None):
if 'full_frame' in object_name:
fullf_path = os.path.join(out_dir, 'sub_imgs', 'full_frame')
if not os.path.exists(fullf_path):
os.makedirs(fullf_path)
# the following is an new python format... called Literal String Formatting or f'Strings
fname = f'{out_dir:}/sub_imgs/full_frame/{object_name:}_{frame_name.split("/")[-1]:}.png'
fname_3d = fname_r =None
else:
if not os.path.exists(os.path.join(out_dir, 'sub_imgs', object_name, 'surfaces')):
os.makedirs(os.path.join(out_dir, 'sub_imgs', object_name, 'surfaces'))
if not os.path.exists(os.path.join(out_dir, 'sub_imgs', object_name, 'radial_plot')):
os.makedirs(os.path.join(out_dir, 'sub_imgs', object_name, 'radial_plot'))
fname = f'{out_dir:}/sub_imgs/{object_name:}/{frame_name.split("/")[-1]:}_{object_name:}.png'
fname_3d = f'{out_dir:}/sub_imgs/{object_name:}/surfaces/{frame_name.split("/")[-1]:}_{object_name:}.png'
fname_r = f'{out_dir:}/sub_imgs/{object_name:}/radial_plot/{frame_name.split("/")[-1]:}_{object_name:}.png'
if not os.path.exists(fname) or overwrite:
# Plot image of the target:
x0 = max(0, int(cen_x) - half_size)
x1 = min(int(cen_x) + half_size, d.shape[1])
y0 = max(0, int(cen_y) - half_size)
y1 = min(int(cen_y) + half_size, d.shape[0])
# plot 2D Subimage
subimg = d[y0:y1, x0:x1].copy()
subimg -= np.median(subimg)
# create normalization for plotting
if norm is not None:
norm = ImageNormalize(subimg, interval=ZScaleInterval(), stretch=LinearStretch())
extent = (x0, x1, y0, y1)
plt.imshow(subimg, extent=extent, interpolation='none', origin='lower', norm=norm)
plt.plot(obj_x, obj_y, 'wx', markersize=15, lw=2, alpha=0.5)
circle = plt.Circle((obj_x, obj_y), min_ap, color='black', lw=2, alpha=0.5, fill=False)
circle2 = plt.Circle((obj_x, obj_y), max_ap, color='black', lw=2, alpha=0.5, fill=False)
circle3 = plt.Circle((obj_x, obj_y), ap, color='white', lw=2, alpha=0.5, fill=False)
plt.gca().add_artist(circle)
plt.gca().add_artist(circle2)
plt.gca().add_artist(circle3)
plt.savefig(fname, dpi=125, bbox_inches='tight')
plt.close()
if fname_3d is not None:
# Plot Radial profile for target
center = obj_x - x0, obj_y - y0
rad_profile = radial_profile(subimg, center)[:max_ap + 10]
fig, ax = plt.subplots()
plt.plot(rad_profile, 'x-')
plt.tick_params(which='both', width=2)
plt.tick_params(which='major', length=7)
plt.tick_params(which='minor', length=4, color='r')
ax.xaxis.set_minor_locator(minorLocator)
plt.grid()
ax.set_ylabel("Average Count")
ax.set_xlabel("Pixels")
plt.grid(which="minor")
fig.savefig(fname_r, bbox_inches='tight')
plt.close()
# plot 3D Surface subimage; we create an even smaller subimage for more detail
half_size = max_ap + 1
x0 = max(0, int(cen_x) - half_size)
x1 = min(int(cen_x) + half_size, d.shape[1])
y0 = max(0, int(cen_y) - half_size)
y1 = min(int(cen_y) + half_size, d.shape[0])
background_level = subimg.mean()
subimg = d[y0:y1, x0:x1] - np.median(d[y0:y1, x0:x1])
x = np.arange(x1 - x0) + x0
y = np.arange(y1 - y0) + y0
X, Y = np.meshgrid(x, y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, subimg, rstride=1, cstride=1, alpha=0.9, cmap='summer', norm=norm)
circle = plt.Circle((obj_x, obj_y), min_ap, color='black', lw=2, fill=False)
circle2 = plt.Circle((obj_x, obj_y), max_ap, color='black', lw=2, fill=False)
circle3 = plt.Circle((obj_x, obj_y), ap, color='#ad343a', lw=2, fill=False)
ax.add_patch(circle)
ax.add_patch(circle2)
ax.add_patch(circle3)
art3d.pathpatch_2d_to_3d(circle, z=background_level, zdir="z")
art3d.pathpatch_2d_to_3d(circle2, z=background_level, zdir="z")
art3d.pathpatch_2d_to_3d(circle3, z=background_level, zdir="z")
ax.contour(X, Y, subimg, zdir='x', offset=X[0, 0], cmap='summer')
ax.contour(X, Y, subimg, zdir='y', offset=Y[0, 0], cmap='summer')
ax.view_init(25, 35)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Counts')
fig.savefig(fname_3d, dpi=125, bbox_inches='tight')
plt.close()
# Get the centroids of the target:
target_cen_x, target_cen_y = get_cens(data, idx, idx_frames)
# Same for the comparison stars:
comp_cen_x, comp_cen_y = get_cens(data, idx_comparison[0], idx_frames)
all_comp_cen_x = np.atleast_2d(comp_cen_x)
all_comp_cen_y = np.atleast_2d(comp_cen_y)
for i in range(1, len(idx_comparison)):
idx_c = idx_comparison[i]
comp_cen_x, comp_cen_y = get_cens(data, idx_c, idx_frames)
all_comp_cen_x = np.vstack((all_comp_cen_x, comp_cen_x))
all_comp_cen_y = np.vstack((all_comp_cen_y, comp_cen_y))
# Now plot images around centroids plus annulus:
exts = np.unique(data['data']['ext']).astype('int')
nframes = len(frames)
for i in range(nframes):
for ext in exts:
frame: str = server_destination+'/'+frames[i]
# temporary fix to underscore mislabeling
if not os.path.isfile(frame):
frame = frame.replace(' ', '_')
d = fits.getdata(frame, ext=ext)
idx_names = data['data']['ext'] == ext
names_ext = data['data']['names'][idx_names]
# Create normalization for plotting
norm = ImageNormalize(d.copy() - np.median(d), interval=ZScaleInterval(), stretch=LinearStretch())
for name in names_ext:
if 'target' in name:
# Plot image of the target:
if i == 0:
plot_im(d, target_cen_x[0], target_cen_y[0], target_cen_x[i], target_cen_y[i],
aperture, min_ap, max_ap, 4 * half_size, frames[i], 'target_full_frame',
overwrite, norm)
plot_im(d, target_cen_x[0], target_cen_y[0], target_cen_x[i], target_cen_y[i],
aperture, min_ap, max_ap, half_size, frames[i], 'target', overwrite, norm)
# Plot image of the comparisons:
for j in range(len(idx_comparison)):
idx_c = idx_comparison[j]
name = 'star_' + str(idx_c)
if name in names_ext:
if i == 0:
plot_im(d, np.median(all_comp_cen_x[j, :]), np.median(all_comp_cen_y[j, :]),
all_comp_cen_x[j, i], all_comp_cen_y[j, i], comp_apertures[j], min_ap, max_ap,
4 * half_size, frames[i], name + '_full_frame', overwrite, norm)
plot_im(d, np.median(all_comp_cen_x[j, :]), np.median(all_comp_cen_y[j, :]),
all_comp_cen_x[j, i], all_comp_cen_y[j, i], comp_apertures[j], min_ap, max_ap,
half_size, frames[i], name, overwrite, norm)
def plot_cmd(colors, data, idx, idx_comparison, post_dir):
"""
Plot the color-magnitude diagram of all stars,
indicating the target and selected comparison stars.
"""
ms = plt.rcParams['lines.markersize']
plt.plot(colors, data['data']['Jmag'], 'b.', label='All stars')
plt.plot(colors[idx], data['data']['Jmag'][idx], 'ro', ms=ms * 2, label='Target')
plt.plot(colors[idx_comparison], data['data']['Jmag'][idx_comparison], 'r.', label='Selected comparisons')
plt.title('Color-magnitude diagram of stars')
plt.xlabel('J$-$H color')
plt.ylabel('J (mag)')
plt.legend(loc='best')
plt.gca().invert_yaxis()
plt.savefig(os.path.join(post_dir, 'CMD.pdf'))
plt.close()
def median_filter(arr):
median_window = int(np.sqrt(len(arr)))
if median_window % 2 == 0:
median_window += 1
return medfilt(arr, median_window)
def get_cens(data, idx, idx_frames):
try:
cen_x = data['data']['star_' + str(idx)]['centroids_x'][idx_frames]
cen_y = data['data']['star_' + str(idx)]['centroids_y'][idx_frames]
except:
cen_x = data['data']['target_star_' + str(idx)]['centroids_x'][idx_frames]
cen_y = data['data']['target_star_' + str(idx)]['centroids_y'][idx_frames]
return cen_x, cen_y
def post_processing(telescope,datafolder,target_name,target_coords,band='ip',ncomp=0,min_ap=5,max_ap=25,forced_aperture=15,filename='photometry.pkl',
force_aperture=False,optimize_apertures=False,plt_images=False,all_plots=False,overwrite=False):
# Check if given telescope is in server, if it is use it, else exit program:
inServer = any([telescope.upper() == tel.upper() for tel in get_telescopes()])
if not inServer:
print("Error, telescope not supported.")
exit()
red_path = datafolder
# make directory for post processing files
post_dir = os.path.join(red_path, 'post_processing/')
if not os.path.exists(post_dir):
os.makedirs(post_dir)
if not os.path.exists(os.path.join(post_dir, 'post_processing_outputs')):
os.makedirs(os.path.join(post_dir, 'post_processing_outputs'))
#################################################
# Convert target coordinates to degrees:
target_ra, target_dec = CoordsToDecimal(target_coords)
# Open dictionary, save times:
try:
data = pickle.load(open(os.path.join(red_path, filename), 'rb'))
except FileNotFoundError:
print("\t Pickle file not found. Please make sure it exists! Quitting...")
raise
all_sites = len(data['frame_name']) * [[]]
all_cameras = len(data['frame_name']) * [[]]
for i in range(len(data['frame_name'])):
frames = server_destination+'/'+data['frame_name'][i]
if not os.path.exists(frames):
frames = frames.replace(' ', '_')
with fits.open(frames) as hdulist:
h = hdulist[0].header
try:
all_sites[i] = telescope
all_cameras[i] = h['INSTRUME']
except (KeyError, IndexError):
all_sites[i] = telescope
all_cameras[i] = 'VATT4k'
sites = []
frames_from_site = {}
for i in range(len(all_sites)):
s = all_sites[i]
c = all_cameras[i]
if s + '+' + c not in sites:
sites.append(s + '+' + c)
frames_from_site[s + '+' + c] = [i]
else:
frames_from_site[s + '+' + c].append(i)
print('Observations taken from: ', sites)
# Get all the RAs and DECs of the objects:
all_ras, all_decs = data['data']['RA_degs'], data['data']['DEC_degs']
# Search for the target:
distance = np.sqrt((all_ras - target_ra) ** 2 + (all_decs - target_dec) ** 2)
# idx_target: index of main target
idx_target: int = np.argmin(distance)
# Search for closest stars in color to target star:
target_hmag, target_jmag = data['data']['Hmag'][idx_target], data['data']['Jmag'][idx_target]
colors = data['data']['Jmag'] - data['data']['Hmag']
target_color = target_hmag - target_jmag
color_distance = np.sqrt((colors - target_color) ** 2. + (target_jmag - data['data']['Jmag']) ** 2.)
idx_distances = np.argsort(color_distance)
idx_all_comps = []
# Start with full set of comparison stars, provided they are good:
for i in idx_distances:
if i == idx_target:
continue
if check_star(data, i, min_ap, max_ap, force_aperture, forced_aperture):
idx_all_comps.append(i)
for site in sites:
print('\t Photometry for site:', site)
idx_frames = frames_from_site[site]
times = data['BJD_times'][idx_frames]
isWrong = np.all(times[0] == times)
if isWrong:
log("For some reason the BJD_times keyword in pickle contains an array repeating the same number.")
idx_sort_times = np.argsort(times)
step = len(data['frame_name'][idx_frames]) // 5
step = step if step > 0 else 1
print('\t Frames Summary: ~', str(data['frame_name'][idx_frames][0:-1:step]))
# Check which is the aperture that gives a minimum rms:
if force_aperture:
print('\t Forced aperture to ', forced_aperture)
chosen_aperture: int = forced_aperture
else:
print('\t Estimating optimal aperture...')
apertures_to_check = range(min_ap, max_ap)
precision = np.zeros(len(apertures_to_check))
for i in range(len(apertures_to_check)):
aperture = apertures_to_check[i]
# Check the target
relative_flux, relative_flux_err = super_comparison_detrend(data, idx_target, idx_all_comps, aperture,
all_idx=idx_frames)
save_photometry(times[idx_sort_times], relative_flux[idx_sort_times], relative_flux_err[idx_sort_times],
os.path.join(post_dir, 'post_processing_outputs/'),
target_name='target_photometry_ap' + str(aperture) + '_pix', plot_data=True)
mfilt = median_filter(relative_flux[idx_sort_times])
precision[i] = get_sigma((relative_flux[idx_sort_times] - mfilt) * 1e6)
idx_max_prec = np.nanargmin(precision)
chosen_aperture = apertures_to_check[idx_max_prec]
print('\t >> Best precision achieved for target at an aperture of {:} pixels'.format(chosen_aperture))
print('\t >> Precision achieved: {:.0f} ppm'.format(precision[idx_max_prec]))
# Now determine the n best comparisons using the target aperture by ranking using correlation coefficient:
idx_comparison = []
comp_apertures = []
comp_correlations = []
target_flux = data['data']['target_star_' + str(idx_target)]['fluxes_' + str(chosen_aperture) + '_pix_ap'][idx_frames]
target_flux_err = data['data']['target_star_' + str(idx_target)]['fluxes_' + str(chosen_aperture) + '_pix_ap_err'][
idx_frames]
exptimes = data['exptimes']
for idx_c in idx_all_comps:
star = 'star_%d' % idx_c
flux_ap = 'fluxes_%d_pix_ap' % chosen_aperture
flux_ap_err = 'fluxes_%d_pix_ap_err' % chosen_aperture
comp_flux = (data['data'][star][flux_ap] / exptimes)[idx_frames]
comp_flux_err = (data['data'][star][flux_ap_err] / exptimes)[idx_frames]
# quick test for NaN's on all comparison stars
isNan = np.isnan(np.sum(np.append(comp_flux, comp_flux_err)))
if isNan:
isFloat = isinstance(exptimes[0], float)
isConsistent = np.all(exptimes == exptimes[0])
log("ALERT: Reference Star's Flux or Flux Err contains NaNs. Details:")
log("Star ID: %s\tAperture: %d\t Correct Exptime Format: %r" % (star, chosen_aperture,
isFloat and isConsistent))
# Check the correlation between the target and comparison flux
result = linregress(target_flux, comp_flux)
comp_correlations.append(result.rvalue ** 2)
# set NaNs to 0
comp_correlations = np.array(comp_correlations)
comp_correlations[np.isnan(comp_correlations)] = 0
# get comp_correlations in descending order
comp_corr_idxsorted = np.argsort(comp_correlations)[::-1]
comp_corr_sorted = np.array(comp_correlations)[comp_corr_idxsorted]
log("Sorted Comparison correlations:\n{:}".format(comp_corr_sorted[:10]))
idx_all_comps_sorted = np.array(idx_all_comps)[comp_corr_idxsorted]
idx_comparison = idx_all_comps_sorted[0:ncomp]
# Selecting optimal number of comparisons, if not pre-set with flag
if ncomp == 0:
print('\t Selecting optimal number of comparisons')
closest_yet = np.inf
idx_optimal_comparison = []
for i in range(idx_all_comps_sorted.size):
# Check the target
relative_flux, relative_flux_err = super_comparison_detrend(data, idx_target, idx_all_comps_sorted[:i + 1],
chosen_aperture, all_idx=idx_frames)
mfilt = median_filter(relative_flux[idx_sort_times])
prec = np.nanmedian(relative_flux_err) * 1e6
rms_scatter = get_sigma(relative_flux[idx_sort_times] - mfilt) * 1e6
rel_diff = np.abs(prec - rms_scatter) / prec
if rel_diff < closest_yet:
ncomp += 1
closest_yet = rel_diff
idx_optimal_comparison.append(i)
idx_comparison = idx_all_comps_sorted[idx_optimal_comparison]
msg1 = '\t {:} comparison stars available'.format(len(idx_all_comps_sorted))
msg2 = '\t Selected the {:} best: {:}'.format(ncomp, idx_comparison)
print(msg1)
print(msg2.replace('\n', '\n\t '))
log(msg1.replace('\t', ''))
log(msg2.replace('\t', ''))
# pdb.set_trace()
# Plot the color-magnitude diagram
plot_cmd(colors, data, idx_target, idx_comparison, post_dir)
comp_apertures = []
# Check the comparisons, and optionally select their apertures
if not os.path.exists(post_dir + 'raw_light_curves/'):
os.mkdir(post_dir + 'raw_light_curves/')
if not os.path.exists(post_dir + 'comp_light_curves/'):
os.mkdir(post_dir + 'comp_light_curves/')
for i_c in idx_comparison:
# super detrend each comparison star
idx_c = idx_comparison[idx_comparison != i_c]
if len(idx_c) == 0:
idx_c = idx_all_comps_sorted[0:10]
if optimize_apertures:
precision = np.zeros(len(apertures_to_check))
for i_ap in range(len(apertures_to_check)):
aperture = apertures_to_check[i_ap]
rf_comp, rf_comp_err = super_comparison_detrend(data, i_c, idx_c, aperture, all_idx=idx_frames)
mfilt = median_filter(rf_comp[idx_sort_times])
precision[i_ap] = get_sigma((rf_comp[idx_sort_times] - mfilt) * 1e6)
idx_max_prec = np.nanargmin(precision)
the_aperture = apertures_to_check[idx_max_prec]
print('\t >> Best precision for star_{:} achieved at an aperture of {:} pixels'.format(i_c, the_aperture))
print('\t >> Precision achieved: {:.0f} ppm'.format(precision[idx_max_prec]))
else:
the_aperture = chosen_aperture
# Save the raw and detrended light curves
comp_star_id = 'star_%d' % i_c
comp_fluxes_id = 'fluxes_%d_pix_ap' % the_aperture
comp_fluxes_err_id = 'fluxes_%d_pix_ap_err' % the_aperture
exptimes = data['exptimes']
comp_flux = (data['data'][comp_star_id][comp_fluxes_id] / exptimes)[idx_frames]
comp_flux_err = (data['data'][comp_star_id][comp_fluxes_err_id] / exptimes)[idx_frames]
save_photometry(times[idx_sort_times], comp_flux[idx_sort_times], comp_flux_err[idx_sort_times],
post_dir + 'raw_light_curves/',
target_name='star_{:}_photometry_ap{:}_pix'.format(i_c, the_aperture),
plot_data=True, units='Counts')
rf_comp, rf_comp_err = super_comparison_detrend(data, i_c, idx_c, the_aperture, all_idx=idx_frames)
save_photometry(times[idx_sort_times], rf_comp[idx_sort_times], rf_comp_err[idx_sort_times],
post_dir + 'comp_light_curves/',
target_name='star_{:}_photometry_ap{:}_pix'.format(i_c, the_aperture),
plot_data=True, units='Counts')
comp_apertures.append(the_aperture)
# Save the raw light curves for the target as well
target_flux = (data['data']['target_star_%d' % idx_target]['fluxes_%d_pix_ap' % chosen_aperture] / exptimes)[idx_frames]
target_flux_err = \
(data['data']['target_star_%d' % idx_target]['fluxes_%d_pix_ap_err' % chosen_aperture] / exptimes)[idx_frames]
# detect if target_flux or target_flux_err contain NaNs
# quick test for nans
isNan = np.isnan(np.sum(np.append(target_flux, target_flux_err)))
if isNan:
log("ALERT: Target Star's Flux or Flux Err contains NaNs. Details:")
log("Star ID: %d\tAperture: %d" % (idx_target, chosen_aperture))
save_photometry(times[idx_sort_times], target_flux[idx_sort_times], target_flux_err[idx_sort_times],
post_dir + 'raw_light_curves/', target_name='target_photometry_ap{:}_pix'.format(chosen_aperture),
plot_data=True, units='Counts')
# And save the super comparison
_, _, super_comp, super_comp_err = super_comparison_detrend(data, idx_target, idx_all_comps_sorted[0:ncomp],
chosen_aperture, comp_apertures=comp_apertures,
all_idx=idx_frames, supercomp=True)
save_photometry(times[idx_sort_times], super_comp[idx_sort_times], super_comp_err[idx_sort_times],
post_dir + 'raw_light_curves/',
target_name='super_comp_photometry_ap{:}_pix'.format(chosen_aperture),
plot_data=True)
# Saving sub-images
if plt_images:
print('\t Plotting and saving sub-images...')
log('Plotting and saving sub-images...')
plot_images(data, idx_target, idx_comparison, chosen_aperture, min_ap, max_ap,
comp_apertures, post_dir, data['frame_name'][idx_frames],
idx_frames, overwrite=overwrite)
# pdb.set_trace()
# Save and plot final LCs:
print('\t Getting final relative flux...')
relative_flux, relative_flux_err = super_comparison_detrend(data, idx_target, idx_comparison, chosen_aperture,
comp_apertures=comp_apertures, plot_comps=all_plots,
all_idx=idx_frames)
# pdb.set_trace()
print('\t Saving...')
save_photometry(times[idx_sort_times], relative_flux[idx_sort_times], relative_flux_err[idx_sort_times],
post_dir, target_name=target_name, plot_data=True,
title=target_name + ' on ' + red_path.split('/')[-1] + ' at ' + site)
# pdb.set_trace()
save_photometry_hs(data, idx_target, idx_comparison, idx_all_comps_sorted, chosen_aperture, min_ap, max_ap, comp_apertures,
idx_sort_times, post_dir, target_name, band=band, all_idx=idx_frames)
print('\t Done!\n')
plt.clf()
if __name__=="__main__":
################ INPUT DATA #####################
parser = argparse.ArgumentParser()
parser.add_argument('-telescope', default=None)
parser.add_argument('-datafolder', default=None)
parser.add_argument('-target_name', default=None)
parser.add_argument('-ra', default=None)
parser.add_argument('-dec', default=None)
parser.add_argument('-band', default='ip')
parser.add_argument('-dome', default='')
parser.add_argument('-minap', default=5)
parser.add_argument('-maxap', default=25)
parser.add_argument('-apstep', default=1)
parser.add_argument('-ncomp', default=0)
parser.add_argument('-forced_aperture', default=15)
parser.add_argument('--force_aperture', dest='force_aperture', action='store_true')
parser.set_defaults(force_aperture=False)
parser.add_argument('--optimize_apertures', dest='optimize_apertures', action='store_true')
parser.set_defaults(optimize_apertures=False)
parser.add_argument('--plt_images', dest='plt_images', action='store_true')
parser.set_defaults(plt_images=False)
parser.add_argument('--all_plots', dest='all_plots', action='store_true')
parser.set_defaults(all_plots=False)
parser.add_argument('--overwrite', dest='overwrite', action='store_true')
parser.set_defaults(overwrite=False)
args = parser.parse_args()
force_aperture = args.force_aperture
optimize_apertures = args.optimize_apertures
plt_images = args.plt_images
all_plots = args.all_plots
overwrite = args.overwrite
telescope = args.telescope
target_name = args.target_name
datafolder = args.datafolder
band = args.band
dome = args.dome
target_coords = [[args.ra, args.dec.split()[0]]]
min_ap = int(args.minap)
max_ap = int(args.maxap)
forced_aperture = int(args.forced_aperture)
ncomp = int(args.ncomp)
filename = 'photometry.pkl'
post_processing(telescope,datafolder,target_name,target_coords,band,ncomp,min_ap,max_ap,forced_aperture,filename,
force_aperture,optimize_apertures,plt_images,all_plots,overwrite)
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,375 | abgibbs/edenAP_detrend | refs/heads/myedits | /qtsql.py | import sys
import mysql.connector
import pandas as pd
from PyQt5 import QtCore, QtWidgets
from mysql.connector import errorcode
from constants import log
Qt = QtCore.Qt
pyqtSignal = QtCore.pyqtSignal
class EDEN_DB(object):
"""
This is the EDEN database utility
it is able to connect, create, add, and get from the EDEN MYSQL DATABASE
After object construction, one must ConnectDatabase, then defineTable before starting working with queries.
it is still under works [May 2018]
"""
def __init__(self, username='apaidani_general', password='EDEN17Data',
host='distantearths.com', verbose=True, autocommit=False):
self.db = None
self.tableName = None
self.uniques_h = None
if verbose:
print("DEFINING CONNECTION")
self.connection = mysql.connector.connect(user=username, password=password,
host=host, connect_timeout=5000)
if verbose:
print("DEFINING CURSOR")
self.lastCommand = ''
self.isClosed = False
self.verbose = verbose
self.cursor = self.connection.cursor()
self.connection.autocommit = autocommit
def defineTable(self, tableName):
self.tableName = tableName
def ConnectDatabase(self, db):
"""
Connect to given database name in the current account
:param db: database name
"""
self.db = db
try:
self.connection.database = self.db
# Set timeouts for 1 hour
# self.cursor.execute('SET SESSION connect_timeout=3600')
# self.cursor.execute('SET SESSION wait_timeout=3600')
# self.cursor.execute('SET SESSION interactive_timeout=3600')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
self.CreateDatabase()
self.connection.database = self.db
else:
print(err.msg)
def ShowDatabases(self):
"""
return a list of available databases under the current account
:return: list of database names
"""
dbs = self.RunCommand("SHOW DATABASES;")
dbs = [tup[0] for tup in dbs]
return dbs
def ShowTables(self):
"""
return a list of available tables in the selected database
:return: list of table names
"""
tables = self.RunCommand("SHOW Tables FROM {};".format(self.db))
tables = [tup[0] for tup in tables]
return tables
def CreateDatabase(self):
try:
self.RunCommand("CREATE DATABASE %s DEFAULT CHARACTER SET 'utf8';" % self.db)
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err.msg))
def CreateTable(self):
"""
This table simply exists to create a template table.
"""
cmd = (" CREATE TABLE IF NOT EXISTS " + self.tableName + " ("
" `ID` int(5) NOT NULL AUTO_INCREMENT,"
" `date` date NOT NULL,"
" `time` time NOT NULL,"
" `message` char(50) NOT NULL,"
" PRIMARY KEY (`ID`)"
") ENGINE=InnoDB;")
self.RunCommand(cmd)
def GetTable(self, cols='*', condition=None):
"""
Retrieve Table from database. It will retrieve the database in Pandas.DataFrame format.
params cols: This defines what columns to select from. eg: FIRST-COLUMN,SECOND-COLUMN
params condition: This defines a condition to select from. eg: 'ID' < 20
"""
case1 = "SELECT {} FROM {};".format(cols, self.tableName)
case2 = "SELECT {} FROM {} WHERE {}".format(cols, self.tableName, condition)
query = case1 if condition is None else case2
pdTable = pd.read_sql(query, self.connection)
col_vals = pdTable.columns
for header in col_vals:
cmd = "select case when count(distinct `{0}`)=count(`{0}`) then 'True' else 'False' end from {1};".format(
header, self.tableName)
if self.RunCommand(cmd)[0][0] == "True":
self.uniques_h = header
break
return pdTable
def CreateDataFrame(self, mysql_msg, col_vals=None):
"""
This is an auxiliary function to translate a mysql query result into a pandas Dataframe.
This method is deprecated because I started using pandas.read_sql function to do this.
:param mysql_msg: mysql result from the query
:param col_vals: column/header of table queried, if None, then it will use last headers from cursor.
:return: table type pandas Dataframe
"""
if not col_vals:
col_names = self.cursor.column_names
else:
col_names = col_vals
col_names = [str(col).strip('`').strip('b').strip("'") for col in col_names]
for i in range(len(col_names)):
if bytes is type(col_names[i]):
col_names[i] = str(col_names[i])
df = pd.DataFrame(mysql_msg, columns=col_names)
if len(df):
first_h = list(df)[0]
if "ID" in first_h:
try:
df[first_h] = df[first_h].astype(int)
except:
pass
return df
def GetColumns(self):
"""
Get table describing the columns of the current selected table.
:return: pandas Dataframe describing the current table
"""
return pd.read_sql("SHOW COLUMNS FROM %s;" % self.tableName, self.connection)
# msg_rows = self.RunCommand("SHOW COLUMNS FROM %s;" % self.tableName)
# return self.CreateDataFrame(msg_rows)
def RunCommand(self, cmd):
"""
Execute raw SQL command. Return result from query
:param cmd: command
:return: mysql message
"""
if self.verbose:
print("RUNNING COMMAND:\n" + cmd)
try:
self.cursor.execute(cmd)
except mysql.connector.Error as err:
print('ERROR MESSAGE: ' + str(err.msg))
print('WITH ' + cmd)
try:
msg = self.cursor.fetchall()
except:
msg = self.cursor.fetchone()
self.lastCommand = cmd
return msg
def AddEntryToTable(self, columns, values):
"""
Add an entry to the current database's table. columns and values must be the same size.
:param columns: column names to which add the `values`
:param values: values per column
"""
cmd = "INSERT INTO {} ({})".format(self.tableName, columns)
cmd += " VALUES ({});".format(values)
self.RunCommand(cmd)
def Delete(self, condition):
"""
Delete rows in current table given a condition to find the values
:param condition: condition to find the values: e.g. `ID` < 20
"""
cmd = "DELETE FROM {} WHERE {};".format(self.tableName, condition)
self.RunCommand(cmd)
def UpdateVal(self, newvals, conditions):
"""
Update values in table. Given new values and a condition to find the rows.
:param newvals: new values format: `Column-Name` = 'new value'
:param conditions: conditions to find values to be changed: e.g. `ID` < 20
"""
cmd = "UPDATE {} SET {} WHERE {};".format(self.tableName, newvals, conditions)
self.RunCommand(cmd)
def close(self, commit=False):
"""
Close EDEN MySQL Connection.
:param commit: if True, all changes will be committed to the MySQL database.
"""
self.cursor.close()
if commit:
self.connection.commit()
self.connection.close()
self.isClosed = True
def __del__(self):
if not self.isClosed:
self.close()
class PandaSignal(QtCore.QObject):
changedData = pyqtSignal(QtCore.QModelIndex, str, str)
class PandasModel(QtCore.QAbstractTableModel, QtCore.QObject):
"""
Class to populate a PyQt table view with a pandas dataframe
"""
# changedData = pyqtSignal(QtCore.QModelIndex, str, str)
# dataChanged = pyqtSignal(QtCore.QModelIndex, str, str)
# changedData = pyqtSignal('QModelIndex', str, str)
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
# self.dataChanged = pyqtSignal(QtCore.QModelIndex, str, str)
self.signals = PandaSignal()
self._data = data
def rowCount(self, parent=None):
return self._data.shape[0]
def columnCount(self, parent=None):
return self._data.shape[1]
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
return str(self._data.iloc[index.row(), index.column()])
return None
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self._data.columns[col]
if role == Qt.ToolTipRole:
if orientation == Qt.Horizontal:
return self._data.columns[col]
return None
def setData(self, index, value, role=Qt.EditRole):
if role == Qt.EditRole:
before = self._data.iloc[index.row(), index.column()]
after = value
self._data.iloc[index.row(), index.column()] = value
# self.signals.dataChanged.emit(index, str(before), str(after))
self.signals.changedData.emit(index, str(before), str(after))
log("SIGNAL FOR changedData emmited")
return True
return False
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
class Completer(QtWidgets.QCompleter):
def __init__(self, strList, parent=None):
super(Completer, self).__init__(strList, parent)
self.setCaseSensitivity(Qt.CaseInsensitive)
self.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
self.setWrapAround(False)
# Add texts instead of replace
def pathFromIndex(self, index):
path = QtWidgets.QCompleter.pathFromIndex(self, index)
lst = str(self.widget().text()).split(',')
if len(lst) > 1:
path = '%s, %s' % (','.join(lst[:-1]), path)
return path
# Add operator to separate between texts
def splitPath(self, path):
path = str(path.split(',')[-1]).lstrip(' ')
return [path]
if __name__ == '__main__':
"""
connection = mysql.connector.connect(user='apaidani_general', password='EDEN17Data',
host='distantearths.com')
cursor = connection.cursor()
try:
cursor.execute("SHOW DATABASES;")
except mysql.connector.Error as err:
print("ERROR MSG: " + str(err.msg))
rows = cursor.fetchall()
rows = [tup[0] for tup in rows]
print(rows)
print(type(rows))
database='apaidani_edendata'
"""
myEdencnx = EDEN_DB()
print(myEdencnx.ShowDatabases())
myEdencnx.ConnectDatabase('apaidani_edendata')
print(myEdencnx.ShowTables())
myEdencnx.defineTable('`EDEN Data Files`')
print(myEdencnx.GetColumns())
table = myEdencnx.GetTable()
del myEdencnx
print(table)
application = QtWidgets.QApplication(sys.argv)
view = QtWidgets.QTableView()
model = PandasModel(table)
view.setModel(model)
headers = list(model._data)
view.show()
sys.exit(application.exec_())
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,376 | abgibbs/edenAP_detrend | refs/heads/myedits | /FIT_Class.py | import os
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astroquery.simbad import Simbad
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from jdcal import gcal2jd
from constants import telescopes, bad_flags, log
headers = ['INSTRUME', 'TELESCOP', 'OBJECT']
def hms2dec(hour, min, sec):
return 15 * (hour + min / 60 + sec / 3600)
def dms2dec(degree, arcmin, arcsec):
if degree > 0:
return degree + arcmin / 60 + arcsec / 3600
elif degree < 0:
return degree - arcmin / 60 - arcsec / 3600
else:
return arcmin / 60 + arcsec / 3600
def remove_chars_iter(subj, chars):
sc = set(chars)
return ''.join([c for c in subj if c not in sc])
# function to find correct date in header
def LOOKDATE(header, time_obj=False):
"""
Persistent function that will look for the date of the observation recorded in the header.
It will correct for the timezone in order to get the local date/time when the observation started (locally).
Procedure:
1. Looks for 'DATE', 'DATE-OBS' or anything including 'DATE' in header.
2. Tests format 'YYYY-MM-DDTHH:MM:SS.ss', or simply 'YYYY-MM-DD' or 'YYYY/MM/DD'
3. If format doesn't include the time, it looks for 'UT' keyword to find time and appends it to the date string
4. Looks for the 'TIMEZONE' keyword in order to correct for it, if not found uses default `7`
:param header: header of current file
:param time_obj: if True, it will return a tuple of ( date string, datetime object)
:return: date string or (date string, datetime object)
"""
from cal_data import find_val
try:
# find_val will first try to get 'DATE' header. If it doesn't work, it will find header keywords that
# include the word 'DATE' which includes 'DATE-OBS'
date_key = 'DATE-OBS' if 'DATE-OBS' in header else 'DATE'
date = find_val(header, date_key)
if "T" in date:
temp_date = parse(date)
else:
try:
time = find_val(header, 'UT')
if '/' in time or '-' in time:
# if 'UT' value suggests date string, then raise err
raise KeyError
temp_date = parse(date + 'T' + time)
except KeyError:
time_key = 'TIME-OBS' if 'TIME-OBS' in header else 'TIME'
time = find_val(header, time_key)
temp_date = parse(date + 'T' + time)
except (KeyError, TypeError):
date = find_val(header, 'DATE')
temp_date = parse(date)
try:
time_diffUT = find_val(header, 'timezone')
except (KeyError, TypeError):
# try default
time_diffUT = -7
# FIRST TO CONVERT TO LOCAL TIME, THEN -7 AS LIMIT IN THE MORNING
correct_date = temp_date + relativedelta(hours=time_diffUT - 7)
if time_obj:
return str(correct_date.date()), correct_date
return str(correct_date.date())
def get_mjd(header):
"""
Due to nonexistent standard julian day header... and the inaccuracy in some of them...
I get the julian header from the starting date/time of the corresponding observation.
:param header: header of fits file
:return: Modified Julian Date
"""
nouse, time_obj = LOOKDATE(header, time_obj=True)
MJD = gcal2jd(time_obj.year, time_obj.month, time_obj.day)[-1]
day_fraction = (time_obj.hour + time_obj.minute / 60.0 + time_obj.second / 3600) / 24.0
return float(MJD + day_fraction)
def get_RADEC(header):
"""
Get RA / DEC from FITS header no matter what the format is. Epoch. J2000
Attempt to get from SIMBAD QUERY, if it fails then get RA/DEC of the center of the image (there no other way)
:param header: fits header
:return: two list, first one of RA values and other of DEC Values of the following format:
[ degrees, hour|degree, minute|arcminute, second|arcsecond]
Where RA is in HMS and DEC is in DMS format.
"""
from cal_data import find_val
result = Simbad.query_object(find_val(header, 'object'))
if result is None:
raw_RA = find_val(header, 'RA')
raw_DEC = find_val(header, 'DEC')
else:
raw_RA = result['RA'][0]
raw_DEC = result['DEC'][0]
# assumes if raw_DEC is float then raw_RA is also a float
if isinstance(raw_DEC, float):
# Both are in degrees format
temp_RA = Angle(raw_RA, unit=u.deg)
temp_DEC = Angle(raw_DEC, unit=u.deg)
else:
# it must be a string, and with the following formats
temp_RA = Angle(raw_RA, unit=u.hourangle)
temp_DEC = Angle(raw_DEC, unit=u.deg)
RA = [temp_RA.deg, *temp_RA.hms]
DEC = [temp_DEC.deg, *temp_DEC.dms]
return RA, DEC
# EVERY STRING INFO USED WILL BE UPPERCASE
# CLASS TO CREATE FITFILE
class FITFILE(object):
def __init__(self, path):
"""
Lightweight FITFILE object that saves the most relevant information of the image/object as attributes.
Including filename, observers, integration time, instrument, telescope, RA, DEC, etc...
:param path: path to fits file
"""
from cal_data import find_val
log("Creating FITFILE Object for %s" % path)
# THIS IS A TAG TO BE CHANGED IF INCOMPATIBILITY IS CATCh
self.EXIST = True
self.flag_type = "There is no flag on this file."
self.path = path
self.filename = os.path.basename(path)
path_list = path.split(os.sep)
if len(path_list) < 4:
self.short_path = os.path.join("~", *path_list[:-1])
else:
self.short_path = os.path.join("~", *path_list[-4:-1])
self.hdr = fits.getheader(path)
def find_key(x, **kwargs):
return find_val(self.hdr, x, **kwargs)
# find_key = lambda x, **kwargs: find_val(self.hdr, x, **kwargs)
obs = find_key('OBSERVER')
self.observer = remove_chars_iter(obs, ['"', '='])
integration = find_key('EXPTIME', raise_err=False)
self.integration = float(integration) if integration is not None else float(find_key('EXPOSURE'))
# Make sure there is no flag regarding it as a 'bad' file
obj = find_key("OBJECT")
for bad in bad_flags:
mybool1 = bad in self.filename.upper()
mybool2 = bad in obj.upper()
if mybool1 or mybool2:
# if flag exists, set flag
self.EXIST = False
self.flag_type = "{} contains one of the following flags:" \
"Bad,Test,Rename,Focus,Useless, Provo or Random".format(self.filename)
# try to find information in headers
try:
from cal_data import find_imgtype
img_type = find_imgtype(self.hdr, self.filename)
# USE FUNCTION TO LOOK-UP DATE
self.date = LOOKDATE(self.hdr)
# USE DICTIONARY OF TELESCOPES TO LOOK FOR MATCH IN HEADER
instrument = find_key("INSTRUME", raise_err=False)
self.instrument = remove_chars_iter(instrument, ['"', '=']) if instrument is not None else ''
telescop = find_key("TELESCOP").upper()
telFound = False
for name, label_list in telescopes.items():
for label in label_list:
telFound = label.upper() in telescop
telFound = telFound if telFound else label.upper() in self.instrument.upper()
counter = label[0] == '!'
if telFound and not counter:
self.telescop = name
if not self.instrument:
self.instrument = name
break
if telFound:
break
# now info specific to type of data
if img_type is not None:
# If FITS is calibration fits....
self.type = img_type
self.name = ""
self.airmass = 'N/A'
self.RA = ['N/A', 'N/A', 'N/A', 'N/A']
if img_type.upper() == 'FLAT':
self.filter = find_key('FILTER', typ=str).upper()
else:
# else FITS is an OBJECT FITS
self.type = "OBJECT"
self.name = obj.upper() # use name-object in header
self.MJD = get_mjd(self.hdr)
self.filter = find_key('FILTER', typ=str).upper()
self.RA, self.DEC = get_RADEC(self.hdr)
if not self.name:
self.EXIST = False
self.flag_type = "%s was tagged as incomplete, OBJECT keyword " \
"did not contain object name" % self.filename
try:
self.airmass = float(find_key("AIRMASS"))
except KeyError:
self.airmass = 0
# failed attempts must be flagged
except KeyError as e:
self.EXIST = False
keyword = e.args[0].strip('.')
self.flag_type = "{} in header for {}.".format(keyword, self.filename)
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,377 | abgibbs/edenAP_detrend | refs/heads/myedits | /eden_GPDetrend.py | import numpy as np
import argparse
import glob
import os
from astropy import units as u
from astroquery import simbad
from astropy.time import Time
from astropy.stats import sigma_clip
from datetime import datetime
import astropy.config as astropy_config
from configparser import ConfigParser
import pickle
import time
def eden_GPDetrend(telescope, datafolder, targets, calibrated=True):
# define constants from config.ini
config = ConfigParser()
config.read('config.ini')
server_destination = config['FOLDER OPTIONS']['server_destination']
# create GPDetrend inputs, and run GPDetrend
# create folder in post_processing for GPLC files
if calibrated:
out_dir = datafolder + 'calib_post_processing/' + 'GPLC/'
else:
out_dir = datafolder +'post_processing/'+'GPLC/'
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
# Load necessary files to create GPDetrend inputs
nflux = np.genfromtxt(datafolder+'post_processing/'+targets[0]+'.dat')
LC = np.genfromtxt(datafolder+'post_processing/LC/'+targets[0]+'.epdlc')
comps = glob.glob(datafolder+'post_processing/comp_light_curves/*')
# Store information
# lc file
times = nflux[:,0]
flux = nflux[:,1]
# sigma clip flux
filt = sigma_clip(flux, sigma=5)
filt = np.invert(filt.mask)
# eparams file
etime = LC[:,1]
airmass = LC[:,22]
FWHM = LC[:,19]
cen_X = LC[:,15]
cen_Y = LC[:,16]
bg = LC[:,17]
# for some reason the eparam times do not always match the flux times, this (usually) finds and removes the extras
if len(times) != len(airmass):
print('LC length does not match comps and eparams length!')
print('Time length: ', len(times), 'eparams length: ', len(airmass))
# rounding to 5 gets rid of small differences
times = np.round(times, decimals=5)
etime = np.round(etime, decimals=5)
mask = np.in1d(etime, times) # find values truly not in lc
airmass = airmass[mask]
FWHM = FWHM[mask]
cen_X = cen_X[mask]
cen_Y = cen_Y[mask]
bg = bg[mask]
# comps file
cflux = np.zeros((len(times),int(len(comps)/4)))
count = 0
for j in range(len(comps)):
if 'pdf' or 'sigma' or 'norm' not in comps[j]:
try: # does not always work
comp = np.genfromtxt(comps[j])
if len(times) != len(airmass):
comp = comp[mask]
cflux[:,count] = comp[:,1]
count = count + 1
except:
pass
else:
pass
# sigma mask
times, flux = times[filt], flux[filt]
airmass, FWHM, cen_X, cen_Y, bg = airmass[filt], FWHM[filt], cen_X[filt], cen_Y[filt], bg[filt]
cflux = cflux[filt, :]
# Write the GPDetrend input files
# array format
light = np.array([times, flux, np.zeros(len(times))])
eparams = np.array([times, airmass, FWHM, bg, cen_X, cen_Y], dtype='float')
# the FWHM often contains nans, this removes those times from all files.
rem = np.where(np.isnan(FWHM))
# Remove times with FWHM Nans and transpose
eparams = np.delete(np.transpose(eparams), rem, axis=0)
light = np.delete(np.transpose(light), rem, axis=0)
cflux = np.delete(cflux, rem, axis=0)
# write
lfile = out_dir+'lc.dat'
efile = out_dir+'eparams.dat'
cfile = out_dir+'comps.dat'
ofolder = 'detrend_'
np.savetxt(lfile,light, fmt='%1.6f', delimiter=' ')
np.savetxt(cfile,cflux, fmt='%1.6f', delimiter=' ')
np.savetxt(efile,eparams, fmt='%1.6f', delimiter=' ', header='times, airmass, fwhm, background, x_cen, y_cen')
# RUN DETREND
# changing directories seems necessary, otherwise the out folder is too long a name for multinest
mycwd = os.getcwd()
os.chdir(out_dir)
os.system('python '+mycwd+'/GPDetrend.py -ofolder '+ofolder+' -lcfile '+lfile+' -eparamfile '+efile+' -compfile '+cfile+' -eparamtouse all' )
os.chdir(mycwd)
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,378 | abgibbs/edenAP_detrend | refs/heads/myedits | /automatic_photometry_eden.py | import argparse
import astropy.config as astropy_config
from configparser import ConfigParser
from datetime import datetime
from dateutil import parser
import glob
import numpy as np
import os
import shutil
import sys
import traceback
# Wrapper script for the photometry, post-processing, and detrending scripts.
# Clear the astroquery cache to ensure up-to-date values are grabbed from SIMBAD
if os.path.exists(astropy_config.get_cache_dir()+'/astroquery/'):
# First delete the astropy cache for astroquery; otherwise the data might be out of date!
shutil.rmtree(astropy_config.get_cache_dir()+'/astroquery/')
# Then import astropy, astroquery, and other modules in this code
from astropy import units as u
from astropy.time import Time
from astroquery import simbad
from constants import log
from get_photometry_eden import get_photometry
import PhotUtils
from transit_photometry import post_processing
from eden_GPDetrend import eden_GPDetrend
# Read config.ini
config = ConfigParser()
config.read('config.ini')
server_destination = config['FOLDER OPTIONS']['server_destination']
ASTROMETRY = config['PHOTOMETRY OPTIONS'].getboolean('ASTROMETRY')
REF_CENTERS = config['PHOTOMETRY OPTIONS'].getboolean('REF_CENTERS')
# Parse arguments
parserIO = argparse.ArgumentParser(description='Performs photometry and produces optimal light curves for all data from TELESCOPE within the past NDAYS.')
parserIO.add_argument('-telescope',default=None,help='name(s) of telescopes (e.g., VATT)')
parserIO.add_argument('-ndays',default=7,help='number of days to look back')
parserIO.add_argument('-target',default=None,help='specify the target')
parserIO.add_argument('--calibrated',action='store_true',help='look for data in the CALIBRATED directory instead of RAW')
parserIO.add_argument('--no-calibration',action='store_true',help='do not perform calibration, just use raw images')
parserIO.add_argument('--overwrite',action='store_true',help='overwrite existing photometry (photometry.pkl)')
parserIO.add_argument('--photometry',action='store_true',help='ONLY do the photometry')
parserIO.add_argument('--post-processing',action='store_true',help='ONLY do the post-processing (must do photometry first!)')
parserIO.add_argument('--detrending',action='store_true',help='ONLY do the detrending with GPDetrend (must do photometry and post-processing first!)')
args = parserIO.parse_args()
tele,ndays = args.telescope,int(args.ndays)
# Find the selected telescope under RAW/, otherwise exit
telescopes_list = [d.strip('/').split('/')[-1] for d in glob.glob(server_destination+'/RAW/*/')]
if tele not in telescopes_list:
print("Telescope {:s} not supported.")
print("Supported telescopes: "+', '.join(telescopes_list))
exit()
# Are we using RAW or CALIBRATED data?
dtype = '/CALIBRATED/' if args.calibrated else '/RAW/'
if not args.no_calibration or args.calibrated:
pkl_name = 'calib_photometry.pkl'
post_name = 'calib_post_processing/'
else:
pkl_name = 'photometry.pkl'
post_name = 'post_processing/'
# Is the target specified?
if args.target is not None:
# Do a SIMBAD lookup for this target name
target_names = simbad.Simbad.query_objectids(args.target)
# If the lookup fails, just use the argument target name
if target_names is None:
print("\nSIMBAD lookup failed for target {:s}".format(args.target))
target_names = [args.target]
else:
# Convert the astropy Table into a string array
target_names = target_names.as_array().astype(str)
# Replace double spaces (why are these in SIMBAD?)
target_names = [name.replace(' ',' ') for name in target_names]
print("\nTarget {:s} identified by SIMBAD under the following names:".format(args.target))
print(target_names)
# Append the argument target name as well, in case it isn't in this list
target_names.append(args.target)
else:
target_names = None
# Find all of the dates under RAW or CALIBRATED for this telescope
date_dirs = np.sort(glob.glob(os.path.join(server_destination,dtype.replace('/',''),tele,'*','*-*-*/')))
dates = np.array([Time(d.strip('/').split('/')[-1]) for d in date_dirs])
# Filter these to dates which lie within ndays of today (ignore the time)
today = Time(datetime.today().strftime('%Y-%m-%d'))
mask = dates>(today-ndays*u.day)
date_dirs,dates = date_dirs[mask],dates[mask]
# Filter to the selected target
targets = np.array([d.strip('/').split('/')[-2] for d in date_dirs])
if target_names is not None:
mask = np.in1d(targets,target_names)
# Also check for names with underscores
mask = mask|np.in1d(targets,[name.replace(' ','_') for name in target_names])
date_dirs,dates,targets = date_dirs[mask],dates[mask],targets[mask]
# Print some info about the reduction to be performed
print("\nFound {:d} data sets from within the past {:d} days under {:s}.".format(mask.sum(),ndays,dtype))
print("Targets: "+", ".join(np.unique(targets)))
# Overwrite bypass (will double-check once)
bypass = False
# Loop through the directories; save the data sets which fail with exceptions
failed_phot = []
failed_post = []
failed_det = []
for i in range(len(date_dirs)):
print("\nTarget: {:s} | Date: {:s}".format(targets[i],dates[i].iso.split()[0]))
log("Now working on {:s}".format(date_dirs[i]))
reduced_dir = date_dirs[i].replace(dtype,'/REDUCED/')
lightcurves_dir = date_dirs[i].replace(dtype,'/LIGHTCURVES/')
# Run the astrometry & photometry routine (unless --post-processing or --detrending is passed as an argument)
# This produces photometry.pkl (under the REDUCED directory tree), which contains the absolute
# flux of every star across several aperture sizes, as well as the x/y positions and FWHM
if not args.post_processing and not args.detrending:
print('\n\t###################################')
print('\tDoing photometry....')
print('\t###################################')
# Delete photometry.pkl if --overwrite is passed as an argument, but check first
if args.overwrite and os.path.exists(reduced_dir+pkl_name):
if bypass or input("Overwriting photometry.pkl! Press return to confirm, or any other key to exit: ") == '':
os.remove(reduced_dir+pkl_name)
log("Removing {:s}".format(reduced_dir+pkl_name))
bypass = True
else:
exit()
# Try to run photometry, but skip this data set if it fails
try:
# Run the photometry routine
get_photometry(tele,date_dirs[i], calibrate= not args.no_calibration, use_calibrated=args.calibrated)
except (KeyboardInterrupt,SystemExit):
raise
except:
e = sys.exc_info()
print("\t Photometry FAILED with error type: {0}".format(e[0]))
print("\t See the log for more information.")
log("\t Photometry failed for {:s} with the following error:".format(date_dirs[i]))
log(traceback.print_exception(*e))
failed_phot.append(i)
else:
print('\n\t###################################')
print('\tSkipping photometry....')
print('\t###################################')
# Run the post-processing routine (unless --photometry is passed as an argument)
# Chooses the optimal aperture size & set of reference stars then creates a light curve
if not args.photometry and not args.detrending:
print('\n\t###################################')
print('\tDoing post-processing....')
print('\t###################################')
# Get the target coordinates first (legacy)
RA, DEC = PhotUtils.get_general_coords(targets[i],parser.parse(dates[i].isot))
target_coords = [[RA,DEC]]
# Run the post-processing routine (if photometry was successful)
print(reduced_dir+pkl_name)
if os.path.exists(reduced_dir+pkl_name):
# Try to do the post-processing, but except errors and continue
try:
# Run the post-processing routine
post_processing(tele,reduced_dir,targets[i],target_coords,overwrite=True,ncomp=6, filename=pkl_name, outname=post_name)
# Copy all of the .epdlc files into the LIGHTCURVES directory
print('\t Copying lightcurves into {:s}'.format(lightcurves_dir))
if not os.path.isdir(lightcurves_dir):
os.makedirs(lightcurves_dir)
for filename in glob.glob(reduced_dir+post_name+'/LC/*.epdlc'):
shutil.copyfile(filename,lightcurves_dir+'/'+filename.split('/')[-1])
except (KeyboardInterrupt,SystemExit):
raise
except:
e = sys.exc_info()
print("\t Post-processing FAILED with error type: {0}".format(e[0]))
print("\t See the log for more information.")
log("\t Post-processing failed for {:s} with the following error:".format(date_dirs[i]))
log(traceback.print_exception(*e))
failed_post.append(i)
else:
print('\t No photometry.pkl found - run the photometry routine first!')
else:
print('\n\t###################################')
print('\tSkipping post-processing....')
print('\t###################################')
if not args.photometry and not args.post_processing:
print('\n\t###################################')
print('\tDoing detrending....')
print('\t###################################')
# call eden_GPDetrend
if os.path.exists(reduced_dir+post_name):
# Try to run detrending, but skip this data set if it fails
try:
# Run the detrending routine
eden_GPDetrend(tele,reduced_dir,targets, calibrated=not args.no_calibration)
except (KeyboardInterrupt,SystemExit):
raise
except:
e = sys.exc_info()
print("\t Detrending FAILED with error type: {0}".format(e[0]))
print("\t See the log for more information.")
log("\t Detrending failed for {:s} with the following error:".format(date_dirs[i]))
log(traceback.print_exception(*e))
failed_det.append(i)
else:
print('\n\t###################################')
print('\tSkipping detrending....')
print('\t###################################')
# At the end, print which data sets failed
if len(failed_phot)>0 or len(failed_post)>0:
print("The following data sets FAILED to reduce:")
log("The following data sets FAILED to reduce:")
if len(failed_phot)>0:
string = ["({:s}/{:s}/{:s})".format(tele,targets[i],dates[i].iso.split()[0]) for i in failed_phot]
print("Photometry: {:s}".format(' '.join(string)))
log("Photometry: {:s}".format(' '.join(string)))
if len(failed_post)>0:
string = ["({:s}/{:s}/{:s})".format(tele,targets[i],dates[i].iso.split()[0]) for i in failed_post]
print("Post-processing: {:s}".format(' '.join(string)))
log("Post-processing: {:s}".format(' '.join(string)))
if len(failed_det)>0:
string = ["({:s}/{:s}/{:s})".format(tele,targets[i],dates[i].iso.split()[0]) for i in failed_det]
print("Detrending: {:s}".format(' '.join(string)))
log("Detrending: {:s}".format(' '.join(string)))
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,379 | abgibbs/edenAP_detrend | refs/heads/myedits | /PhotUtils.py | import multiprocessing as mp
import os
import re
import signal
import subprocess
import sys
import time as clocking_time
import warnings
from configparser import ConfigParser
from math import modf
# Use for UTC -> BJD conversion:
import dateutil
import ephem as E
import jdcal
import matplotlib
import numpy as np
from astropy import _erfa as erfa
from astropy import constants as const
from astropy import coordinates as coord
from astropy import time
from astropy import units as u
from astropy import wcs
from astropy.coordinates import SkyCoord, Angle
from astropy.io import fits
from astropy.stats import gaussian_sigma_to_fwhm
from astropy.stats import sigma_clipped_stats
from astropy.utils.data import download_file
from astropy.utils.iers import IERS
from astropy.utils.iers import IERS_A, IERS_A_URL
from astroquery.irsa import Irsa
from astroquery.simbad import Simbad
from photutils import CircularAperture, aperture_photometry
from photutils import DAOStarFinder
from photutils import make_source_mask
from photutils.centroids import centroid_com
from scipy import optimize
from scipy.ndimage.filters import gaussian_filter
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from constants import log, natural_keys, LOOKDATE, find_val
# define constants from config.ini
config = ConfigParser()
config.read('config.ini')
server_destination = config['FOLDER OPTIONS']['server_destination']
fpack_folder = config['FOLDER OPTIONS']['funpack']
astrometry_directory = config['FOLDER OPTIONS']['astrometry']
manual_object_coords = config['Manual Coords']
# Ignore computation errors:
np.seterr(divide='ignore', invalid='ignore')
# Define style of plotting (ggplot is nicer):
plt.style.use('ggplot')
# Default limit of rows is 500. Go for infinity and beyond!
Irsa.ROW_LIMIT = np.inf
# mean sidereal rate (at J2000) in radians per (UT1) second
SR = 7.292115855306589e-5
# configure simbad query
Simbad.add_votable_fields('propermotions')
# define globals
global_d = 0
global_h = 0
global_x = 0
global_y = 0
global_R = 0
global_target_names = 0
global_frame_name = None
global_out_dir = None
global_saveplot = False
global_refine_centroids = False
global_half_size = 30
global_GAIN = 2.3
fwhm_factor = 2.
sigma_gf = 5. * fwhm_factor # 5.
def date_format(year, month, day, hour, minute, second):
def fn(number):
if number > 9:
return str(number)
else:
return '0' + str(number)
return fn(year) + '-' + fn(month) + '-' + fn(day) + 'T' + \
fn(hour) + ':' + fn(minute) + ':' + fn(second)
def TrimLim(s):
cols, rows = s[1:-1].split(',')
min_col, max_col = cols.split(':')
min_row, max_row = rows.split(':')
return int(min_col), int(max_col), int(min_row), int(max_row)
def site_data_2_string(sitelong, sitelat, sitealt):
basestring = str
if not isinstance(sitelong, basestring):
longitude = str(int(modf(360. - sitelong)[1])) + ':' + str(modf(360. - sitelong)[0] * 60.)
else:
longitude = sitelong
if not isinstance(sitelat, basestring):
latitude = str(int(modf(sitelat)[1])) + ':' + str(-modf(sitelat)[0] * 60.)
else:
latitude = sitelat
if not isinstance(sitealt, basestring):
return longitude, latitude, str(sitealt)
else:
return longitude, latitude, sitealt
def getCalDay(JD):
year, month, day, hour = jdcal.jd2gcal(JD, 0.0)
hour = hour * 24
minutes = modf(hour)[0] * 60.0
seconds = modf(minutes)[0] * 60.0
hh = int(modf(hour)[1])
mm = int(modf(minutes)[1])
ss = seconds
if (hh < 10):
hh = '0' + str(hh)
else:
hh = str(hh)
if (mm < 10):
mm = '0' + str(mm)
else:
mm = str(mm)
if (ss < 10):
ss = '0' + str(np.round(ss, 1))
else:
ss = str(np.round(ss, 1))
return year, month, day, hh, mm, ss
def getTime(year, month, day, hh, mm, ss):
return str(year) + '/' + str(month) + '/' + str(day) + ' ' + hh + ':' + mm + ':' + ss
def getAirmass(ra, dec, day, longitude, latitude, elevation):
"""
Get airmass given the RA, DEC of object in sky
longitude, latitude and elevation of site.
:param ra: RA of object in decimal degrees or string in format HMS
:param dec: Dec of object in decimal degrees or string in format DMS
:param day: date time string or object; e.g. '2018-05-29 05:20:30.5'
:param longitude: longitude in degrees east of Greenwich ( East +, West - )
:param latitude: longitude in degrees north of the Equator ( North +, South -)
:param elevation: Elevation above sea level in meters
:return: airmass of celestial object
Exampple: Kuiper Telescope Target: 2MASSI J183579+325954 Airmass: 1.01 (header)
>> sitelong = -110.73453
>> sitelat = 32.41647
>> sitealt = 2510.
>> Day = '2018-07-19 05:43:20.411'
>> Dec = '+32:59:54.5'
>> RA = '18:35:37.92'
>> getAirmass(RA, Dec, Day, sitelong, sitelat, sitealt)
Out[1]: 1.005368864146983
"""
star = E.FixedBody()
star._ra = ra if isinstance(ra, str) else ra * E.degree
star._dec = dec if isinstance(dec, str) else dec * E.degree
observer = E.Observer()
observer.date = day
# convert longitude and latitude to radians with E.degree
observer.long = E.degree * longitude
observer.lat = E.degree * latitude
observer.elevation = elevation
observer.temp = 7.0
observer.pressure = 760.0
star.compute(observer)
# star.alt: altitude of object in radians
# h = altitude of object in degrees
# factor is a corrected altitude for more accurate airmass (in radians)
h = star.alt * 180 / np.pi
factor = h + E.degree * 244. / (165. + 47. * (h ** 1.1))
airmass = 1. / np.sin(factor * np.pi / 180.)
return airmass
def get_planet_data(planet_data, target_object_name):
f = open(planet_data, 'r')
while True:
line = f.readline()
if line == '':
break
elif line[0] != '#':
name, ra, dec = line.split()
print(name, ra, dec)
if target_object_name == name:
f.close()
return [[ra, dec]]
f.close()
print('Planet ' + target_object_name + ' not found in file ' + planet_data)
print('Add it to the list and try running the code again.')
sys.exit()
def get_dict(target, central_ra, central_dec, central_radius, ra_obj, dec_obj,
hdulist, exts, R, catalog=u'fp_psc', date=dateutil.parser.parse('20180101')):
print('\t > Generating master dictionary for coordinates', central_ra, central_dec, '...')
# Make query to 2MASS:
result = Irsa.query_region(coord.SkyCoord(central_ra, central_dec, unit=(u.deg, u.deg)),
spatial='Cone', radius=central_radius * 5400. * u.arcsec, catalog=catalog)
# Query to PPMXL to get proper motions:
resultppm = Irsa.query_region(coord.SkyCoord(central_ra, central_dec, unit=(u.deg, u.deg)),
spatial='Cone', radius=central_radius * 5400. * u.arcsec, catalog='ppmxl')
# Get RAs, DECs, and PMs from this last catalog:
rappmxl = resultppm['ra'].data.data
decppmxl = resultppm['dec'].data.data
rappm = resultppm['pmra'].data.data
decppm = resultppm['pmde'].data.data
# Save coordinates, magnitudes and errors on the magnitudes:
all_ids = result['designation'].data.data.astype(str)
all_ra = result['ra'].data.data
all_dec = result['dec'].data.data
all_j = result['j_m'].data.data
all_j_err = result['j_msigcom'].data.data
all_k = result['k_m'].data.data
all_k_err = result['k_msigcom'].data.data
all_h = result['h_m'].data.data
all_h_err = result['h_msigcom'].data.data
# Correct RA and DECs for PPM. First, get delta T:
data_jd = sum(jdcal.gcal2jd(date.year, date.month, date.day))
deltat = (data_jd - 2451544.5) / 365.25
print('\t Correcting PPM for date ', date, ', deltat: ', deltat, '...')
for i in range(len(all_ra)):
c_ra = all_ra[i]
c_dec = all_dec[i]
dist = (c_ra - rappmxl) ** 2 + (c_dec - decppmxl) ** 2
min_idx = np.argmin(dist)
# 3 arcsec tolerance:
if dist[min_idx] < 3. / 3600.:
all_ra[i] = all_ra[i] + deltat * rappm[min_idx]
all_dec[i] = all_dec[i] + deltat * decppm[min_idx]
print('\t Done.')
# Check which ras and decs have valid coordinates inside the first image...
# ...considering all image extensions.
# Save only those as valid objects for photometry:
idx = []
all_extensions = np.array([])
for ext in exts:
h = hdulist[ext].header
x_max = hdulist[ext].data.shape[1]
y_max = hdulist[ext].data.shape[0]
x, y = SkyToPix(h, all_ra, all_dec)
for i in range(len(x)):
if 0 < x[i] < x_max and 0 < y[i] < y_max:
idx.append(i)
all_extensions = np.append(all_extensions, ext)
assert len(idx) > 0, "Indeces list for reference stars could not be generated while creating MasterDict"
# Create dictionary that will save all the data:
log('CREATING DICTIONARY FOR THE FIRST TIME. CREATING KEYS')
master_dict = {}
master_dict['frame_name'] = np.array([])
master_dict['UTC_times'] = np.array([])
master_dict['BJD_times'] = np.array([])
master_dict['JD_times'] = np.array([])
master_dict['LST'] = np.array([])
master_dict['exptimes'] = np.array([])
master_dict['airmasses'] = np.array([])
master_dict['filters'] = np.array([])
master_dict['source'] = np.array([])
# Generate a flux dictionary for each target.
master_dict['data'] = {}
master_dict['data']['RA_degs'] = all_ra[idx]
master_dict['data']['IDs'] = all_ids[idx]
master_dict['data']['DEC_degs'] = all_dec[idx]
master_dict['data']['RA_coords'], master_dict['data']['DEC_coords'] = DecimalToCoords(all_ra[idx], all_dec[idx])
master_dict['data']['ext'] = all_extensions
master_dict['data']['Jmag'] = all_j[idx]
master_dict['data']['Jmag_err'] = all_j_err[idx]
master_dict['data']['Kmag'] = all_k[idx]
master_dict['data']['Kmag_err'] = all_k_err[idx]
master_dict['data']['Hmag'] = all_h[idx]
master_dict['data']['Hmag_err'] = all_h_err[idx]
all_names = len(idx) * ['']
# Get index of target star:
distances = (all_ra[idx] - ra_obj) ** 2 + (all_dec[idx] - dec_obj) ** 2
target_idx = np.argmin(distances)
# Dictionaries per reference star: centroids_x, centroids_y, background, background_err, fwhm
# A fluxes_{aperture}_pix_ap keyword per aperture as well
for i in range(len(idx)):
if i != target_idx:
all_names[i] = 'star_' + str(i)
else:
all_names[i] = 'target_star_' + str(i)
# Replace RA and DEC with the ones given by the user:
ra_str, dec_str = get_general_coords(target, date)
ra_deg, dec_deg = CoordsToDecimal([[ra_str, dec_str]])
master_dict['data']['RA_degs'][i] = ra_deg
master_dict['data']['DEC_degs'][i] = dec_deg
master_dict['data']['RA_coords'][i] = ra_str
master_dict['data']['DEC_coords'][i] = dec_str
master_dict['data'][all_names[i]] = {}
master_dict['data'][all_names[i]]['centroids_x'] = np.array([])
master_dict['data'][all_names[i]]['centroids_y'] = np.array([])
master_dict['data'][all_names[i]]['background'] = np.array([])
master_dict['data'][all_names[i]]['background_err'] = np.array([])
master_dict['data'][all_names[i]]['fwhm'] = np.array([])
for r in R:
master_dict['data'][all_names[i]]['fluxes_' + str(r) + '_pix_ap'] = np.array([])
master_dict['data'][all_names[i]]['fluxes_' + str(r) + '_pix_ap_err'] = np.array([])
master_dict['data']['names'] = np.array(all_names)
print('\t > Extracting data for ' + str(len(all_names)) + ' sources')
return master_dict
def getPhotometry(filenames, target: str, telescope: str, filters, R, ra_obj, dec_obj, out_data_folder, use_filter: str,
get_astrometry=True, sitelong=None, sitelat=None, sitealt=None, refine_cen=False,astrometry_timeout=30,master_dict=None):
# Define radius in which to search for targets (in degrees):
search_radius = 0.25
# Initiallize empty dictionary if not saved and different variables:
longitude, latitude, elevation = None, None, None
if master_dict is None:
master_dict = {}
updating_dict = False
else:
updating_dict = True
if telescope == 'SWOPE':
long_h_name = 'SITELONG'
lat_h_name = 'SITELAT'
alt_h_name = 'SITEALT'
exptime_h_name = 'EXPTIME'
lst_h_name = None
t_scale_low = 0.4
t_scale_high = 0.45
egain = 2.3
elif telescope == 'CHAT':
long_h_name = 'SITELONG'
lat_h_name = 'SITELAT'
alt_h_name = 'SITEALT'
exptime_h_name = 'EXPTIME'
lst_h_name = 'LST'
t_scale_low = 0.6
t_scale_high = 0.65
egain = 1.0
elif telescope == 'LCOGT':
# This data is good for LCOGT 1m.
long_h_name = 'LONGITUD'
lat_h_name = 'LATITUDE'
alt_h_name = 'HEIGHT'
exptime_h_name = 'EXPTIME'
lst_h_name = 'LST'
t_scale_low = 0.3
t_scale_high = 0.5
egain = 'GAIN'
elif telescope == 'SMARTS':
long_h_name = 'LONGITUD'
lat_h_name = 'LATITUDE'
alt_h_name = 'ALTITIDE'
exptime_h_name = 'EXPTIME'
lst_h_name = 'ST'
t_scale_low = 0.1
t_scale_high = 0.4
egain = 3.2
elif telescope == 'OBSUC':
long_h_name = None
sitelong = 289.4656
sitelat = -33.2692
sitealt = 1450.
lat_h_name = None
alt_h_name = None
exptime_h_name = 'EXPTIME'
lst_h_name = None
t_scale_low = 0.2
t_scale_high = 0.8
egain = 'EGAIN'
elif telescope == 'NTT':
long_h_name = 'HIERARCH ESO TEL GEOLON'
lat_h_name = 'HIERARCH ESO TEL GEOLAT'
alt_h_name = 'HIERARCH ESO TEL GEOELEV'
exptime_h_name = 'HIERARCH ESO DET WIN1 DIT1'
lst_h_name = None
t_scale_low = 0.2
t_scale_high = 0.8
egain = 'HIERARCH ESO DET OUT1 GAIN'
elif telescope == 'KUIPER':
long_h_name = None
lat_h_name = None
alt_h_name = None
sitelong = -110.73453
sitelat = 32.41647
sitealt = 2510.
exptime_h_name = 'EXPTIME'
lst_h_name = 'LST-OBS'
t_scale_low = 0.145
t_scale_high = 0.145 * 4
egain = 3.173
elif telescope == 'SCHULMAN':
long_h_name = 'LONG-OBS'
lat_h_name = 'LAT-OBS'
alt_h_name = 'ALT-OBS'
exptime_h_name = 'EXPTIME'
lst_h_name = 'ST'
t_scale_low = 0.25
t_scale_high = 0.35
egain = 1.28
elif telescope == 'VATT':
long_h_name = None
lat_h_name = None
alt_h_name = None
sitelong = -109.892014
sitelat = 32.701303
sitealt = 3191.
exptime_h_name = 'EXPTIME'
lst_h_name = 'ST'
t_scale_low = 0.185
t_scale_high = 0.188 * 4
egain = 1.9 # 'DETGAIN'
elif telescope == 'BOK':
long_h_name = None
lat_h_name = None
alt_h_name = None
sitelong = -111.6
sitelat = 31.98
sitealt = 2120.
exptime_h_name = 'EXPTIME'
lst_h_name = 'ST'
t_scale_low = 0.1
t_scale_high = 0.4 * 4
egain = 1.5
elif telescope == 'CASSINI':
long_h_name = None
lat_h_name = None
alt_h_name = None
sitelong = 11.3
sitelat = 44.3
sitealt = 785.
exptime_h_name = 'EXPTIME'
lst_h_name = 'ST'
t_scale_low = 0.55
t_scale_high = 0.58 * 3
egain = 2.22
elif telescope == 'CAHA':
long_h_name = None
lat_h_name = None
alt_h_name = None
sitelong = 2.55
sitelat = 37.2
sitealt = 2168.
exptime_h_name = 'EXPTIME'
lst_h_name = 'LST'
t_scale_low = 0.31
t_scale_high = 0.3132 * 3
egain = 'GAIN'
elif telescope == 'GUFI':
long_h_name = None
lat_h_name = None
alt_h_name = None
sitelong = -109.892014
sitelat = 32.701303
sitealt = 3191.
exptime_h_name = 'EXPTIME'
lst_h_name = None
t_scale_low = 0.05 # wrong
t_scale_high = 0.35 * 3 # wrong
egain = 'GAIN'
elif telescope == 'LOT':
long_h_name = None
lat_h_name = None
alt_h_name = None
sitelong = 120.873611
sitelat = 23.468611
sitealt = 2862
exptime_h_name = 'EXPTIME'
lst_h_name = None
t_scale_low = 0.375 # This number wasn't taken from specs, it was a matching value from astrometry
t_scale_high = 0.375 * 3
egain = 'GAIN'
else:
print('ERROR: the selected telescope %s is not supported.' % telescope)
sys.exit()
# Iterate through the files:
first_time = True
# print('reach iteration through the files')
for f in filenames:
# print('iterating through files')
# Decompress file. Necessary because Astrometry cant handle this:
if f[-7:] == 'fits.fz':
p = subprocess.Popen(fpack_folder + 'funpack ' + f, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
p.wait()
if p.returncode != 0 and p.returncode != None:
print('\t Funpack command failed. The error was:')
out, err = p.communicate()
print(err)
print('\n\t Exiting...\n')
sys.exit()
f = f[:-3]
# Try opening the fits file (might be corrupt):
try:
hdulist = fits.open(f)
fitsok = True
except Exception as e:
print('\t Encountered error opening {:}'.format(f))
log(str(e))
fitsok = False
# If frame already reduced, skip it:
if updating_dict:
if 'frame_name' in master_dict.keys() and f.replace(server_destination,'') in master_dict['frame_name']:
fitsok = False
if fitsok:
h0 = hdulist[0].header # primary fits header
exts = get_exts(hdulist)
# Check filter:
filter_ok = False
if use_filter is None:
filter_ok = True
else:
temp_filter: str = find_val(h0, 'FILTER', typ=str)
if use_filter.lower() == temp_filter.lower():
filter_ok = True
log("FILTER_OK: %r\t%s" % (filter_ok, f))
if filter_ok:
print('\t Working on frame ' + f)
########## OBTAINING THE ASTROMETRY ###############
# First, run astrometry on the current frame if not ran already:
filename = f.split('.fits')[0]
# astrometry will save the file in the /REDUCED/ folder
if f.startswith(server_destination):
wcs_filepath = f.replace('/CALIBRATED/', '/REDUCED/').replace('/RAW/','/REDUCED/')
wcs_filepath = os.path.join(os.path.dirname(wcs_filepath),
'wcs_fits/',
os.path.basename(wcs_filepath))
wcs_filepath = wcs_filepath.replace('.fits', '.wcs.fits')
if not os.path.exists(os.path.dirname(wcs_filepath)):
os.makedirs(os.path.dirname(wcs_filepath))
else:
wcs_filepath = f.replace('.fits', '.wcs.fits')
if not os.path.exists(wcs_filepath) and get_astrometry:
print('\t Calculating astrometry...')
run_astrometry(f, exts,ra=ra_obj[0], dec=dec_obj[0], radius=0.5,
scale_low=t_scale_low, scale_high=t_scale_high, astrometry_timeout=astrometry_timeout)
print('\t ...done!')
# Now get data if astrometry worked...
if os.path.exists(wcs_filepath) and get_astrometry:
# If get_astrometry flag is on, load the header from the WCS-solved image onto the input data
# (Don't just use the WCS solved image; this way we can change between RAW and CALIBRATED input types
# without having to redo the astrometry)
print('\t Detected file ' + wcs_filepath + '. Using it...')
#hdulist = fits.open(f)
hdulist_wcs = fits.open(wcs_filepath)
for ext in exts:
hdulist[ext].header = hdulist_wcs[ext].header
hdulist_wcs.close()
# ...or if no astrometry was needed on the frame:
elif not get_astrometry:
continue # hdulist = fits.open(f)
# or if the astrometry ultimately failed... skip file and enter None values in the dict
else:
print("\t Skipping file " + f)
continue
# Create master dictionary and define data to be used in the code:
if first_time:
date_time = LOOKDATE(h0) # datetime object
try:
central_ra, central_dec = CoordsToDecimal([[find_val(h0, 'RA'),
find_val(h0, 'DEC')]])
except ValueError:
# If there's no RA, Dec in the header, just use the target RA, Dec
central_ra, central_dec = ra_obj, dec_obj
if not updating_dict:
master_dict = get_dict(target, central_ra[0], central_dec[0], search_radius,
ra_obj, dec_obj, hdulist, exts, R, date=date_time.date())
else:
all_data = master_dict['data'].keys()
all_names_d = []
all_idx = []
for dataname in all_data:
if 'star' in dataname:
all_names_d.append(dataname)
all_idx.append(int(dataname.split('_')[-1]))
idxs = np.argsort(all_idx)
all_names = len(all_names_d) * [[]]
for i in range(len(idxs)):
all_names[i] = all_names_d[idxs[i]]
if sitelong is None:
sitelong = h0[long_h_name]
if sitelat is None:
sitelat = h0[lat_h_name]
if sitealt is None:
sitealt = h0[alt_h_name]
first_time = False
# Save filename to master dictionary
# Remove the server root; this way it works on multiple computers with different mount points
log('SETTING A FILENAME TO frame_name key in master_dict\t %s' % f.replace(server_destination,''))
master_dict['frame_name'] = np.append(master_dict['frame_name'], f.replace(server_destination,''))
########## OBTAINING THE TIMES OF OBSERVATION ####################
# Get the BJD time. First, add exposure time:
utc_time = LOOKDATE(h0)
t_center = utc_time + dateutil.relativedelta.relativedelta(seconds=h0[exptime_h_name] / 2.)
t = Time(t_center, scale='utc', location=(str(sitelong) + 'd', str(sitelat) + 'd', sitealt))
RA = find_val(h0, 'RA')
DEC = find_val(h0, 'DEC')
try:
# the purpose of this is to see if values are floats...therefore degrees
float(RA)
coords = SkyCoord(str(RA) + ' ' + str(DEC), unit=(u.deg, u.deg))
except ValueError:
# if there is a colon in the values, assume sexagesimal
if ':' in RA and ':' in DEC:
coords = SkyCoord(RA + ' ' + DEC, unit=(u.hourangle, u.deg))
# Otherwise use the target coordinates
else:
coords = SkyCoord(str(ra_obj[0]) + ' ' + str(dec_obj[0]), unit=(u.deg,u.deg))
# Save UTC, exposure, JD and BJD and LS times. Also airmass and filter used.
master_dict['UTC_times'] = np.append(master_dict['UTC_times'], str(utc_time).replace(' ', 'T'))
master_dict['exptimes'] = np.append(master_dict['exptimes'], h0[exptime_h_name])
master_dict['JD_times'] = np.append(master_dict['JD_times'], t.jd)
master_dict['BJD_times'] = np.append(master_dict['BJD_times'], ((t.bcor(coords)).jd))
if lst_h_name is not None:
master_dict['LST'] = np.append(master_dict['LST'], h0[lst_h_name])
else:
t.delta_ut1_utc = 0.
c_lst = str(t.sidereal_time('mean', 'greenwich'))
c_lst = c_lst.split('h')
hh = c_lst[0]
c_lst = c_lst[1].split('m')
mm = c_lst[0]
ss = (c_lst[1])[:-1]
master_dict['LST'] = np.append(master_dict['LST'], hh + ':' + mm + ':' + ss)
# Calculate Accurate Airmass
year, month, day, hh, mm, ss = getCalDay((t.bcor(coords)).jd)
day = getTime(year, month, day, hh, mm, ss)
master_dict['airmasses'] = np.append(master_dict['airmasses'],
getAirmass(central_ra[0], central_dec[0], day, sitelong,
sitelat, sitealt))
# Save the filters
master_dict['filters'] = np.append(master_dict['filters'],filters)
# Save the data source (RAW or CALIBRATED)
source = 'RAW' if '/RAW/' in f[0] else 'CALIBRATED' if '/CALIBRATED/' in f[0] else 'unknown'
master_dict['source'] = np.append(master_dict['source'],source)
########## OBTAINING THE FLUXES ###################
for ext in exts:
# Load the data:
h = hdulist[ext].header
data = hdulist[ext].data
# Get the indices of stars on this extension
idx = np.where(master_dict['data']['ext'] == ext)
if not np.any(idx):
continue
# Get the names of stars on this extension
names_ext = master_dict['data']['names'][idx]
x, y = SkyToPix(h, master_dict['data']['RA_degs'][idx], master_dict['data']['DEC_degs'][idx])
# Get fluxes of all the targets in this extension for different apertures:
print('\t Performing aperture photometry on objects...')
tic = clocking_time.time()
if isinstance(egain, str):
fluxes, errors, x_ref, y_ref, bkg, bkg_err, fwhm = getAperturePhotometry(data, h, x, y, R,
names_ext,
frame_name=
filename.split('/')[
-1],
out_dir=out_data_folder,
GAIN=h[egain],
saveplot=False,
refine_centroids=refine_cen)
else:
fluxes, errors, x_ref, y_ref, bkg, bkg_err, fwhm = getAperturePhotometry(data, h, x, y, R,
names_ext,
frame_name=
filename.split('/')[
-1],
out_dir=out_data_folder,
GAIN=egain,
saveplot=False,
refine_centroids=refine_cen)
toc = clocking_time.time()
print('\t Took {:1.2f} seconds.'.format(toc - tic))
# Save everything in the dictionary:
for i in range(len(names_ext)):
extended_centroidsx = np.append(master_dict['data'][names_ext[i]]['centroids_x'], x_ref[i])
extended_centroidsy = np.append(master_dict['data'][names_ext[i]]['centroids_y'], y_ref[i])
extended_background = np.append(master_dict['data'][names_ext[i]]['background'], bkg[i])
extended_background_err = np.append(master_dict['data'][names_ext[i]]['background_err'],
bkg_err[i])
extended_fwhm = np.append(master_dict['data'][names_ext[i]]['fwhm'], fwhm[i])
master_dict['data'][names_ext[i]]['centroids_x'] = extended_centroidsx
master_dict['data'][names_ext[i]]['centroids_y'] = extended_centroidsy
master_dict['data'][names_ext[i]]['background'] = extended_background
master_dict['data'][names_ext[i]]['background_err'] = extended_background_err
master_dict['data'][names_ext[i]]['fwhm'] = extended_fwhm
for j in range(len(R)):
idx_fluxes = 'fluxes_%d_pix_ap' % R[j]
idx_fluxes_err = 'fluxes_%d_pix_ap_err' % R[j]
this_flux = fluxes[i, j]
this_flux_err = errors[i, j]
# quick test for nans
test = np.append(this_flux, this_flux_err)
if np.isnan(np.sum(test)):
log("ALERT: During Saving Aperture Photometry Properties:"
" Fluxes and Fluxes Err contain NaNs. Details:")
log("names_ext: %s\nidx_fluxes: %s" % (names_ext[i], idx_fluxes))
extended_idx_fluxes = np.append(master_dict['data'][names_ext[i]][idx_fluxes], this_flux)
extended_idx_fluxes_err = np.append(master_dict['data'][names_ext[i]][idx_fluxes_err],
this_flux_err)
master_dict['data'][names_ext[i]][idx_fluxes] = extended_idx_fluxes
master_dict['data'][names_ext[i]][idx_fluxes_err] = extended_idx_fluxes_err
return master_dict
def organize_files(files, obj_name, filt, leaveout=''):
dome_flats = []
sky_flats = []
bias = []
objects = []
all_objects = len(files) * [[]]
unique_objects = []
for i in range(len(files)):
try:
with fits.open(files[i]) as hdulist:
d, h = hdulist[1].data, hdulist[0].header
# d,h = fits.getdata(files[i],header=True)
except:
print('File ' + files[i] + ' probably corrupt. Skipping it')
if i + 1 == len(files):
break
i = i + 1
if h['EXPTYPE'] == 'Bias':
all_objects[i] = 'Bias'
else:
all_objects[i] = h['OBJECT']
try:
c_filter = h['FILTER']
except:
c_filter = filt
if h['OBJECT'] not in unique_objects and c_filter == filt and h['EXPTYPE'] != 'Bias':
unique_objects.append(h['OBJECT'])
elif h['EXPTYPE'] == 'Bias':
unique_objects.append('Bias') # h['OBJECT'])
print('\t We found the following frames:')
for i in range(len(unique_objects)):
counter = 0
for obj in all_objects:
if obj == unique_objects[i]:
counter = counter + 1
print('\t (' + str(i) + ') ' + unique_objects[i] + ' (' + str(counter) + ' frames)')
print('\t Which ones are the (separate your selection with commas, e.g., 0,3,4)...')
idx_biases = [int(i) for i in input('\t ...biases?').split(',')]
idx_dome_flats = [int(i) for i in input('\t ...dome flats?').split(',')]
idx_sky_flats = [int(i) for i in input('\t ...sky flats?').split(',')]
idx_science = [int(i) for i in input('\t ...science frames?').split(',')]
for i in range(len(files)):
for j in range(len(unique_objects)):
if unique_objects[j] == all_objects[i]:
if leaveout != '':
im_name = files[i].split(leaveout)[0]
else:
im_name = files[i]
if j in idx_biases:
bias.append(im_name)
elif j in idx_dome_flats:
dome_flats.append(im_name)
elif j in idx_sky_flats:
sky_flats.append(im_name)
elif j in idx_science:
objects.append(im_name)
return bias, dome_flats, sky_flats, objects
def NormalizeFlat(MasterFlat):
original_shape = MasterFlat.shape
flattened_Flat = MasterFlat.flatten()
median_f = np.median(flattened_Flat)
idx = np.where(flattened_Flat == 0)
flattened_Flat = flattened_Flat / median_f
flattened_Flat[idx] = np.ones(len(idx))
return flattened_Flat.reshape(original_shape)
def MedianCombine(ImgList, MB=None, flatten_counts=False):
n = len(ImgList)
if n == 0:
raise ValueError("empty list provided!")
with fits.open(ImgList[0]) as hdulist:
d, h = hdulist[1].data, hdulist[1].header
# data,h = fits.getdata(ImgList[0],header=True)
datasec1, datasec2 = h['DATASEC'][1:-1].split(',')
ri, rf = datasec2.split(':')
ci, cf = datasec1.split(':')
data = d[int(ri) - 1:int(rf), int(ci) - 1:int(cf)]
factor = 1.25
if (n < 3):
factor = 1
ronoise = factor * h['ENOISE'] / np.sqrt(n)
gain = h['EGAIN']
if (n == 1):
if h['EXPTIME'] > 0:
texp = h['EXPTIME']
else:
texp = 1.
if flatten_counts:
return ((data - MB) / texp) / np.median((data - MB) / texp), ronoise, gain
else:
return data / texp, ronoise, gain
else:
for i in range(n - 1):
with fits.open(ImgList[i + 1]) as hdulist:
d, h = hdulist[1].data, hdulist[1].header
# d,h = fits.getdata(ImgList[i+1],header=True)
datasec1, datasec2 = h['DATASEC'][1:-1].split(',')
ri, rf = datasec2.split(':')
ci, cf = datasec1.split(':')
d = d[int(ri) - 1:int(rf), int(ci) - 1:int(cf)]
if h['EXPTIME'] > 0:
texp = h['EXPTIME']
else:
texp = 1.
if flatten_counts:
data = np.dstack((data, ((d - MB) / texp) / np.median((d - MB) / texp)))
else:
data = np.dstack((data, d / texp))
return np.median(data, axis=2), ronoise, gain
def run_astrometry(filename,exts, ra=None, dec=None, radius=0.5, scale_low=0.1, scale_high=1., astrometry_timeout = 30):
"""
This code runs Astrometry.net on a frame.
* ra and dec: are guesses of the ra and the dec of the center of the field (in degrees).
* radius: radius (in degrees) around the input guess ra,dec that astrometry should look for.
* scale_[low, high]: are scale limits (arcsec/pix) for the image.
* astrometry_timeout: maximum number of seconds to run astrometry.net (per attempt)
"""
# flags
success = False
# server work is a flag to work on Project's directory structure
server_work = True if filename.startswith(server_destination) else False
true_filename = filename
print('\t\t Found {:} extensions'.format(len(exts)))
# setup gf_filepath for gaussian filtered file and final WCS filepath
# MOD: JOSE. Save gf,wcs,etc files to /red/*/*/*/wcs_fits folder
if server_work:
filename = filename.replace('/CALIBRATED/','/REDUCED/').replace('/RAW/','/REDUCED/')
filename = os.path.join(os.path.dirname(filename), 'wcs_fits/', os.path.basename(filename))
gf_filepath = filename.replace('.fits', '_gf.fits')
wcs_filepath = filename.replace('.fits', '.wcs.fits')
else:
gf_filepath = filename.replace('.fits', '_gf.fits')
wcs_filepath = filename.replace('.fits', '.wcs.fits')
# check if wcs_filepath exists, if so has it been ran by astrometry?
isCorrect = False
if os.path.isfile(wcs_filepath):
with fits.open(wcs_filepath) as hdulist:
for comm in hdulist[0].header['COMMENT']:
if 'solved_' in comm.lower():
isCorrect = True
break
if not isCorrect:
# Create file to save gaussian filtered fits
with fits.open(true_filename) as hdulist:
# Overwrite argument is helpful if pipeline failed previously
hdulist.writeto(gf_filepath, overwrite=True)
# variables to check if current file is
cal_dir = os.path.dirname(true_filename)
current_file = os.path.basename(true_filename)
files = sorted(os.listdir(cal_dir), key=natural_keys)
# sigma of gaussian filter, and astrometry's radius search are increased per loop
nsigmas = 2
nradii = 2
sigmas = np.linspace(0, 5, nsigmas)
radii = np.linspace(0.6, 1.8, nradii)
log("Starting Astrometry on %s" % true_filename)
astrometry_path = os.path.join(astrometry_directory, 'solve-field')
for ext in exts:
print('\t\t Working on extension {:}...'.format(ext))
ext_fname = gf_filepath.replace('.fits', '_' + str(ext) + '.wcs.fits')
if (ra is not None) and (dec is not None) and (radius is not None):
CODE = '{} --continue --no-plots --downsample 2 --cpulimit 60 --extension "{}" ' \
'--scale-units arcsecperpix --scale-low {} --scale-high {} --ra {} ' \
'--dec {} --radius {} --new-fits "{}" "{}"'.format(astrometry_path, ext, scale_low, scale_high,
ra, dec, 'FIXME', ext_fname, gf_filepath)
else:
CODE = '{} --continue --no-plots --downsample 2 --cpulimit 60 --extension "{}" ' \
'--scale-units arcsecperpix --scale-low ' \
'{} --scale-high {} --new-fits "{}" "{}"'.format(astrometry_path, ext, scale_low, scale_high,
ext_fname, gf_filepath)
# attempt multiple sigmas/radii;
count = 0
for i in range(nradii):
temp_CODE = CODE.replace('FIXME', '%.3f' % radii[i])
for j in range(nsigmas):
with fits.open(true_filename) as hdulist:
data = gaussian_filter(hdulist[ext].data, sigmas[j])
fits.update(gf_filepath, data, hdulist[ext].header, ext)
log("Executing Astrometry Code:")
log(temp_CODE)
p = subprocess.Popen(temp_CODE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
#p.wait() # Commented out - we *don't* want to wait if it's going to take forever
# Log the start time
t_proc_start = clocking_time.time()
while True:
# Check if the process has returned an error or created the file
done = (p.returncode != 0 and p.returncode is not None) or (os.path.isfile(ext_fname))
if not done:
# How many seconds have elapsed?
elapsed_time = clocking_time.time()-t_proc_start
# Sleep for 1s
clocking_time.sleep(1)
# Kill the entire process group if the timeout is reached
if elapsed_time>astrometry_timeout:
print("\t\t Astrometry.net timed out after {:.0f} seconds! ({:d}/{:d} attempts)"\
.format(astrometry_timeout,count+1,nsigmas*nradii))
try:
os.killpg(os.getpgid(p.pid),signal.SIGINT)
except ProcessLookupError:
# On OS X the above line fails for some reason; this will just skip killing the process
pass
break
else:
p.wait() # Wait for astrometry.net to finish running, just in case
break
if p.returncode != 0 and p.returncode is not None:
print('\t\t ASTROMETRY FAILED. The error was:')
out, err = p.communicate()
print(err)
print('\n\t Exiting...\n')
sys.exit()
count += 1
success = os.path.isfile(ext_fname)
if success:
# if success, save successful parameters to first attempt
sigmas[0] = sigmas[j]
break
if success:
# if success, save successful parameters to first attempt in following exts
print("\t\t Astrometry was successful for ext " + str(ext))
log("Astrometry was successful for ext %d" % ext)
radii[0] = radii[i]
break
log("Total Number of Attempts: % d" % count)
print("\t\t Total Number of Attempts: " + str(count))
if not success:
log("Astrometry failed for ext %d" % ext)
print("\t\t Astronomy failed for ext %d" % ext)
print("\t\t Program will attempt to get WCS from latest processed file later.")
# Astrometry.net is run on individual extensions, which are saved above.
# Combining them back into a single file.
with fits.open(true_filename) as hdulist:
for ext in exts:
ext_fname = gf_filepath.replace('.fits', '_' + str(ext) + '.wcs.fits')
# Try to save new WCS info in original file
try:
with fits.open(ext_fname) as hdulist_new:
if ext == 0:
hdulist[0].header = hdulist_new[0].header
else:
hdulist[ext].header = hdulist_new[1].header
# If it doesn't work, copy the WCS info from the last file
except FileNotFoundError as err:
# print out err
print("\t\t " + str(err))
# search adjecent files to see if they have been proccessed...
# this will look for the files before and after the current one
for i in range(len(files)):
if files[i] == current_file and i != 0:
last_filename = files[i - 1]
break
else:
# if there is no previous file, get next file
for i in range(len(files)):
if files[i] == current_file and i != 0:
last_filename = files[i + 1]
break
else:
# if there are no such files... then skip file
log('Quitting astrometry and skipping file')
print("\t\t Quitting astrometry and skipping file")
return 0
# fix up file name of latest WCS file
if server_work:
last_wcs_filename = os.path.join(cal_dir, last_filename).replace('.fits', '.wcs.fits')
last_wcs_filename = last_wcs_filename.replace('/RAW/', '/REDUCED/').replace('/CALIBRATED/','/REDUCED/')
last_wcs_filename = os.path.join(os.path.dirname(last_wcs_filename),
'wcs_fits/',
os.path.basename(last_wcs_filename))
else:
last_wcs_filename = os.path.join(cal_dir, last_filename).replace('.fits', '.wcs.fits')
# does it exist?
isFile = os.path.isfile(last_wcs_filename)
if not isFile:
print("\t\t Could not find previous frame {:}".format(last_wcs_filename))
# if reference file doesn't exit, skip this one
# doing this will avoid raising an exception and quitting photometry proccess
return 0
# if it exists, then keep going
print('\t\t Using WCS info from previous frame {:}'.format(last_wcs_filename))
with fits.open(last_wcs_filename) as hdulist_new:
if ext == 0:
hdulist[ext].header = hdulist_new[ext].header
else:
hdulist[ext].header = hdulist_new[1].header
# If it works, remove the single-extension astrometry file
else:
os.remove(ext_fname)
# Strip the data from the WCS solution to save space
for ext in exts:
hdulist[ext].data = None
# Save the original headers with the WCS info
hdulist.writeto(wcs_filepath)
# Save space by removing the gaussian filtered image, if any
os.remove(gf_filepath)
def SkyToPix(h, ras, decs):
"""
This code converts input ra and decs to pixel coordinates given the
header information (h).
"""
# Load WCS information:
if 'EPOCH' in h:
h['EPOCH'] = float(h['EPOCH'])
if 'EQUINOX' in h:
h['EQUINOX'] = float(h['EQUINOX'])
w = wcs.WCS(h)
# Generate matrix that will contain sky coordinates:
sky_coords = np.zeros([len(ras), 2])
# Fill it:
for i in range(len(ras)):
sky_coords[i, 0] = ras[i]
sky_coords[i, 1] = decs[i]
# Get pixel coordinates
pix_coords = w.wcs_world2pix(sky_coords, 1)
# Return x,y pixel coordinates:
return pix_coords[:, 0], pix_coords[:, 1]
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data).astype(float)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.dot(np.transpose(ypowers), data), xpowers)
def getAperturePhotometry(d, h, x, y, R, target_names, frame_name=None, out_dir=None, saveplot=False,
refine_centroids=False, half_size=50, GAIN=1.0, ncores=None):
"""
Define/Set global variables for next aperture photometry
:param d: image data
:param h: image header
:param x: x-Coordinate of object in pixels
:param y: y-Coordinate of object in pixels
:param R: list/array of aperture radii
:param target_names:
:param frame_name:
:param out_dir: output directory for
:param saveplot:
:param refine_centroids:
:param half_size:
:param GAIN:
:param ncores:
:return:
"""
global global_d, global_h, global_x, global_y, global_R, global_target_names, global_frame_name, \
global_out_dir, global_saveplot, global_refine_centroids, global_half_size, global_GAIN
global_d = d
global_h = h
global_x = x
global_y = y
global_R = R
global_target_names = target_names
global_frame_name = frame_name
global_out_dir = out_dir
global_saveplot = saveplot
global_refine_centroids = refine_centroids
global_half_size = half_size
global_GAIN = GAIN
fluxes = np.zeros([len(x), len(R)])
fluxes_err = np.zeros([len(x), len(R)])
x_ref = np.zeros(len(x))
y_ref = np.zeros(len(y))
bkg = np.zeros(len(x))
bkg_err = np.zeros(len(x))
fwhm = np.zeros(len(x))
if ncores is None:
pool = mp.Pool(processes=4)
else:
pool = mp.Pool(processes=ncores)
results = pool.map(getCentroidsAndFluxes, range(len(x)))
pool.terminate()
for i in range(len(x)):
fluxes[i, :], fluxes_err[i, :], x_ref[i], y_ref[i], bkg[i], bkg_err[i], fwhm[i] = results[i]
return fluxes, fluxes_err, x_ref, y_ref, bkg, bkg_err, fwhm
def getCentroidsAndFluxes(i):
fluxes_R = np.ones(len(global_R)) * (-1)
fluxes_err_R = np.ones(len(global_R)) * (-1)
# Generate a sub-image around the centroid, if centroid is inside the image:
if 0 < global_x[i] < global_d.shape[1] and 0 < global_y[i] < global_d.shape[0]:
# Reminder (I might need it in case of confusion later) :
# x1 and y1 aren't indeces, they're exclusive boundaries
# while x0 and y0 are indeces and inclusive boundaries
x0 = max(0, int(global_x[i]) - global_half_size)
x1 = min(int(global_x[i]) + global_half_size, global_d.shape[1])
y0 = max(0, int(global_y[i]) - global_half_size)
y1 = min(int(global_y[i]) + global_half_size, global_d.shape[0])
x_cen = global_x[i] - x0
y_cen = global_y[i] - y0
subimg = global_d[y0:y1, x0:x1].astype(float)
x_ref = x0 + x_cen
y_ref = y0 + y_cen
if global_refine_centroids:
# Refine the centroids, if falls on full image, then redefine subimage to center object
x_new, y_new = get_refined_centroids(subimg, x_cen, y_cen)
if 0 < x_new < global_d.shape[1] and 0 < y_new < global_d.shape[0]:
x_cen, y_cen = int(x_new), int(y_new)
x_ref = x0 + x_cen
y_ref = y0 + y_cen
x0 = max(0, x_ref - global_half_size)
x1 = min(x_ref + global_half_size, global_d.shape[1])
y0 = max(0, y_ref - global_half_size)
y1 = min(y_ref + global_half_size, global_d.shape[0])
subimg = global_d[y0:y1, x0:x1].astype(float)
# Estimate background level: mask out sources, get median of masked image, and noise
mask = make_source_mask(subimg, snr=2, npixels=5, dilate_size=11)
mean, median, std = sigma_clipped_stats(subimg, sigma=3.0, mask=mask)
background = median
background_sigma = std
subimg -= background
sky_sigma = np.ones(subimg.shape) * background_sigma
# If saveplot is True, save image and the centroid:
if global_saveplot and ('target' in global_target_names[i]):
if not os.path.exists(global_out_dir + global_target_names[i]):
os.mkdir(global_out_dir + global_target_names[i])
im = plt.imshow(subimg)
im.set_clim(0, 1000)
plt.plot(x_cen, y_cen, 'wx', markersize=15, alpha=0.5)
circle = plt.Circle((x_cen, y_cen), np.min(global_R), color='black', fill=False)
circle2 = plt.Circle((x_cen, y_cen), np.max(global_R), color='black', fill=False)
plt.gca().add_artist(circle)
plt.gca().add_artist(circle2)
if not os.path.exists(global_out_dir + global_target_names[i] + '/' + global_frame_name + '.png'):
plt.savefig(global_out_dir + global_target_names[i] + '/' + global_frame_name + '.png')
plt.close()
# With the calculated centroids, get aperture photometry:
for j in range(len(global_R)):
fluxes_R[j], fluxes_err_R[j] = getApertureFluxes(subimg, x_cen, y_cen, global_R[j], sky_sigma)
fwhm = estimate_fwhm(subimg, x_cen, y_cen)
return fluxes_R, fluxes_err_R, x_ref, y_ref, background, background_sigma, fwhm
else:
return fluxes_R, fluxes_err_R, global_x[i], global_y[i], 0., 0., 0.
def get_refined_centroids(data, x_init, y_init, half_size=25):
"""
Refines the centroids by fitting a centroid to the central portion of an image
Method assumes initial astrometry is accurate within the half_size
"""
# Take the central portion of the data (i.e., the subimg)
x0 = max(0, int(x_init) - half_size)
x1 = min(int(x_init) + half_size, data.shape[1])
y0 = max(0, int(y_init) - half_size)
y1 = min(int(y_init) + half_size, data.shape[0])
x_guess = x_refined = x_init - x0
y_guess = y_refined = y_init - y0
cen_data = data[y0:y1, x0:x1].astype(float)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
sources = DAOStarFinder(threshold=0, fwhm=2.35 * 5).find_stars(gaussian_filter(cen_data, 5))
xcents = sources['xcentroid']
ycents = sources['ycentroid']
dists = (x_refined - xcents) ** 2 + (y_refined - ycents) ** 2
try:
idx_min = np.argmin(dists)
x_guess = xcents[idx_min]
y_guess = ycents[idx_min]
except Exception as e:
print('\t\t DAOStarFinder failed. Refining pointing with a gaussian...')
try:
# Robustly fit a gaussian
p = fit_gaussian(cen_data)
x_guess = p[1]
y_guess = p[2]
except Exception:
print('\t\t No luck. Resorting to astrometric coordinates.')
# Don't let the new coordinates stray outside of the sub-image
if data.shape[0] > x_guess > 0:
x_refined = x_guess
if data.shape[1] > y_guess > 0:
y_refined = y_guess
return x_refined + x0, y_refined + y0
def gaussian(height, center_x, center_y, width_x, width_y):
"""Returns a gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
return lambda x, y: height * np.exp(
-(((center_x - x) / width_x) ** 2 + ((center_y - y) / width_y) ** 2) / 2)
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = np.indices(data.shape)
x = (X * data).sum() / total
y = (Y * data).sum() / total
col = data[:, int(y)]
width_x = np.sqrt(np.abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())
row = data[int(x), :]
width_y = np.sqrt(np.abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())
height = data.max()
return height, x, y, width_x, width_y
def fit_gaussian(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit"""
params = moments(data)
errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -
data)
p, success = optimize.leastsq(errorfunction, params)
return p
def estimate_fwhm(data, x0, y0):
"""
This function estimates the FWHM of an image containing only one source
(possibly gaussian) and estimates the FWHM of it by performing two cuts on
X and Y and finding the stddev of both cuts. The resulting FWHM is obtained from
the mean of those stddev.
"""
def get_second_moment(x, y, mu):
moment = np.sqrt(np.sum(y * (x - mu) ** 2) / np.sum(y))
return moment
# the following two lines might be unnecessary; since data 'should' be centered anyway
y0_idx = int(y0) if y0 < data.shape[0] else data.shape[0] - 1
x0_idx = int(x0) if x0 < data.shape[1] else data.shape[1] - 1
sigma_y = get_second_moment(np.arange(data.shape[1]), data[y0_idx, :], x0)
sigma_x = get_second_moment(np.arange(data.shape[0]), data[:, x0_idx], y0)
# Context: if sigma_x and sigma_y are not zero or NaN, then average them, else return the one that isn't zero/NaN
sigma = (sigma_x + sigma_y) / 2. if sigma_x and sigma_y else sigma_y or sigma_x
return gaussian_sigma_to_fwhm * sigma
def getApertureFluxes(subimg, x_cen, y_cen, Radius, sky_sigma):
apertures = CircularAperture([(x_cen, y_cen)], r=Radius)
rawflux_table = aperture_photometry(subimg, apertures,
error=sky_sigma)
return rawflux_table['aperture_sum'][0], rawflux_table['aperture_sum_err'][0]
def angle2degree(raw_angle, unit):
"""
Convert given angle with known unit (astropy.unit) to degrees.
:param raw_angle: numberic or string value
:param unit: unit of the angle; a astropy.unit object
:return: angle in degrees (decimal)
"""
return Angle(raw_angle, unit=unit).deg
def CoordsToDecimal(coords):
"""
Function to convert given angles to degree decimals. This function makes big assumptions given the wide variety
of formats that EDEN has come across.
ASSUMPTION:
- if given coordinates are numeric values, then both RA/DEC are given in degrees
- if given coordinates are strings/non-numeric values, then RA is given in hour angle and DEC in degrees.
:param coords:
:return: ras, decs in decimal degrees
"""
ras = np.array([])
decs = np.array([])
for i in range(len(coords)):
# JOSE's mod ----- function used to assume consistent formatting of RA/DEC in Header.
raw_RA, raw_DEC = coords[i]
# the following if-else statement only works for current telescope usage (Aug. 2018)
try:
# Both are in degrees format
ras = np.append(ras, angle2degree(float(raw_RA), u.deg))
decs = np.append(decs, angle2degree(float(raw_DEC), u.deg))
except ValueError:
# it must be a string, and with the following formats
ras = np.append(ras, angle2degree(raw_RA, u.hourangle))
decs = np.append(decs, angle2degree(raw_DEC, u.deg))
return ras, decs
def DecimalToCoords(ra_degs, dec_degs):
ra_coords = len(ra_degs) * [[]]
dec_coords = len(dec_degs) * [[]]
for i in range(len(ra_degs)):
c_ra = (ra_degs[i] / 360.) * 24.
c_dec = dec_degs[i]
ra_hr = int(c_ra)
ra_min = int((c_ra - ra_hr) * 60.)
ra_sec = (c_ra - ra_hr - ra_min / 60.0) * 3600.
dec_deg = int(c_dec)
dec_min = int(np.abs(c_dec - dec_deg) * 60.)
dec_sec = (np.abs(c_dec - dec_deg) - dec_min / 60.) * 3600.
ra_coords[i] = NumToStr(ra_hr) + ':' + NumToStr(ra_min) + ':' + NumToStr(ra_sec, roundto=3)
dec_coords[i] = NumToStr(dec_deg) + ':' + NumToStr(dec_min) + ':' + NumToStr(dec_sec, roundto=3)
return ra_coords, dec_coords
def NumToStr(number, roundto=None):
"""
Convert number to string using string formatting.
:param number: integer or floating point
:param roundto: round to decimal points
:return: string formatted value
"""
formatString = '%02d'
if isinstance(number, float):
if roundto is not None and roundto != 0:
# the + 3 is the decimal point and the 2 minimum digit before decimal pt
totalDigits = roundto + 4 if number < 0 else roundto + 3
formatString = '%0{:d}.{:d}f'.format(totalDigits, roundto)
else:
number = int(number)
if isinstance(number, int):
totalDigits = 3 if number < 0 else 2
formatString = '%0{:d}d'.format(totalDigits)
return formatString % number
def SuperComparison(fluxes, errors):
flux = np.sum(fluxes / errors ** 2) / np.sum(1. / errors ** 2)
err_flux = np.sqrt(np.sum(errors ** 2) / np.double(len(fluxes)))
return flux, err_flux
class Time(time.Time):
"""
time class: inherits astropy time object, and adds heliocentric, barycentric
correction utilities.
"""
def __init__(self, *args, **kwargs):
super(Time, self).__init__(*args, **kwargs)
self.height = kwargs.get('height', 0.0)
def _pvobs(self):
"""
calculates position and velocity of the telescope
:return: position/velocity in AU and AU/d in GCRS reference frame
"""
# convert obs position from WGS84 (lat long) to ITRF geocentric coords in AU
xyz = self.location.to(u.AU).value
# now we need to convert this position to Celestial Coords
# specifically, the GCRS coords.
# conversion from celestial to terrestrial coords given by
# [TRS] = RPOM * R_3(ERA) * RC2I * [CRS]
# where:
# [CRS] is vector in GCRS (geocentric celestial system)
# [TRS] is vector in ITRS (International Terrestrial Ref System)
# ERA is earth rotation angle
# RPOM = polar motion matrix
tt = self.tt
mjd = self.utc.mjd
# we need the IERS values to correct for the precession/nutation of the Earth
iers_tab = IERS.open()
# Find UT1, which is needed to calculate ERA
# uses IERS_B by default , for more recent times use IERS_A download
try:
ut1 = self.ut1
except:
try:
iers_a_file = download_file(IERS_A_URL, cache=True)
iers_a = IERS_A.open(iers_a_file)
self.delta_ut1_utc = self.get_delta_ut1_utc(iers_a)
ut1 = self.ut1
except:
# fall back to UTC with degraded accuracy
warnings.warn('Cannot calculate UT1: using UTC with degraded accuracy')
ut1 = self.utc
# Gets x,y coords of Celestial Intermediate Pole (CIP) and CIO locator s
# CIO = Celestial Intermediate Origin
# Both in GCRS
X, Y, S = erfa.xys00a(tt.jd1, tt.jd2)
# Get dX and dY from IERS B
dX = np.interp(mjd, iers_tab['MJD'], iers_tab['dX_2000A']) * u.arcsec
dY = np.interp(mjd, iers_tab['MJD'], iers_tab['dY_2000A']) * u.arcsec
# Get GCRS to CIRS matrix
# can be used to convert to Celestial Intermediate Ref Sys
# from GCRS.
rc2i = erfa.c2ixys(X + dX.to(u.rad).value, Y + dY.to(u.rad).value, S)
# Gets the Terrestrial Intermediate Origin (TIO) locator s'
# Terrestrial Intermediate Ref Sys (TIRS) defined by TIO and CIP.
# TIRS related to to CIRS by Earth Rotation Angle
sp = erfa.sp00(tt.jd1, tt.jd2)
# Get X and Y from IERS B
# X and Y are
xp = np.interp(mjd, iers_tab['MJD'], iers_tab['PM_x']) * u.arcsec
yp = np.interp(mjd, iers_tab['MJD'], iers_tab['PM_y']) * u.arcsec
# Get the polar motion matrix. Relates ITRF to TIRS.
rpm = erfa.pom00(xp.to(u.rad).value, yp.to(u.rad).value, sp)
# multiply ITRF position of obs by transpose of polar motion matrix
# Gives Intermediate Ref Frame position of obs
x, y, z = np.array([rpmMat.T.dot(xyz) for rpmMat in rpm]).T
# Functions of Earth Rotation Angle, theta
# Theta is angle bewtween TIO and CIO (along CIP)
# USE UT1 here.
theta = erfa.era00(ut1.jd1, ut1.jd2)
S, C = np.sin(theta), np.cos(theta)
# Position #GOT HERE
pos = np.asarray([C * x - S * y, S * x + C * y, z]).T
# multiply by inverse of GCRS to CIRS matrix
# different methods for scalar times vs arrays
if pos.ndim > 1:
pos = np.array([np.dot(rc2i[j].T, pos[j]) for j in range(len(pos))])
else:
pos = np.dot(rc2i.T, pos)
# Velocity
vel = np.asarray([SR * (-S * x - C * y), SR * (C * x - S * y), np.zeros_like(x)]).T
# multiply by inverse of GCRS to CIRS matrix
if vel.ndim > 1:
vel = np.array([np.dot(rc2i[j].T, vel[j]) for j in range(len(pos))])
else:
vel = np.dot(rc2i.T, vel)
# return position and velocity
return pos, vel
def _obs_pos(self):
"""
calculates heliocentric and barycentric position of the earth in AU and AU/d
"""
tdb = self.tdb
# get heliocentric and barycentric position and velocity of Earth
# BCRS reference frame
h_pv, b_pv = erfa.epv00(tdb.jd1, tdb.jd2)
# h_pv etc can be shape (ntimes,2,3) or (2,3) if given a scalar time
if h_pv.ndim == 2:
h_pv = h_pv[np.newaxis, :]
if b_pv.ndim == 2:
b_pv = b_pv[np.newaxis, :]
# unpack into position and velocity arrays
h_pos = h_pv[:, 0, :]
h_vel = h_pv[:, 1, :]
# unpack into position and velocity arrays
b_pos = b_pv[:, 0, :]
b_vel = b_pv[:, 1, :]
# now need position and velocity of observing station
pos_obs, vel_obs = self._pvobs()
# add this to heliocentric and barycentric position of center of Earth
h_pos += pos_obs
b_pos += pos_obs
h_vel += vel_obs
b_vel += vel_obs
return h_pos, h_vel, b_pos, b_vel
def _vect(self, coord):
'''get unit vector pointing to star, and modulus of vector, in AU
coordinate of star supplied as astropy.coordinate object
assume zero proper motion, parallax and radial velocity'''
pmra = pmdec = px = rv = 0.0
rar = coord.ra.radian
decr = coord.dec.radian
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# ignore warnings about 0 parallax
pos, vel = erfa.starpv(rar, decr, pmra, pmdec, px, rv)
modulus = np.sqrt(pos.dot(pos))
unit = pos / modulus
modulus /= const.au.value
return modulus, unit
def hcor(self, coord):
mod, spos = self._vect(coord)
# get helio/bary-centric position and velocity of telescope, in AU, AU/d
h_pos, h_vel, b_pos, b_vel = self._obs_pos()
# heliocentric light travel time, s
tcor_hel = const.au.value * np.array([np.dot(spos, hpos) for hpos in h_pos]) / const.c.value
# print 'Correction to add to get time at heliocentre = %.7f s' % tcor_hel
dt = time.TimeDelta(tcor_hel, format='sec', scale='tdb')
return self.utc + dt
def bcor(self, coord):
mod, spos = self._vect(coord)
# get helio/bary-centric position and velocity of telescope, in AU, AU/d
h_pos, h_vel, b_pos, b_vel = self._obs_pos()
# barycentric light travel time, s
tcor_bar = const.au.value * np.array([np.dot(spos, bpos) for bpos in b_pos]) / const.c.value
# print 'Correction to add to get time at barycentre = %.7f s' % tcor_bar
dt = time.TimeDelta(tcor_bar, format='sec', scale='tdb')
return self.tdb + dt
def get_exts(hdulist):
"""
Returns a list of the fits extensions containing data
"""
# If the input isn't an HDUList then assume it is a filename and open the file
if type(hdulist) is not fits.HDUList:
hdulist = fits.open(hdulist)
close = True
else:
close = False
exts = []
for i in range(len(hdulist)):
if hdulist[i].data is not None:
exts.append(i)
# If the input wasn't an HDUList, then close the image
if close:
im.close()
return exts
# Old method (not compatible with CAHA data; also assumes extension 0 is empty
"""
h = fits.getheader(filename)
try:
EXTEND = h['EXTEND']
except KeyError:
EXTEND = False
if EXTEND:
exts = range(1, h['NEXTEND'] + 1)
else:
exts = [0]
return exts
"""
def get_general_coords(target, date):
"""
Given a target name, returns RA and DEC from simbad.
:param target: string name of target
:param date: date string or datetime object
:return:
"""
if isinstance(date, str):
date = date.replace('-', '')
try:
# Try to get info from Simbad
target_fixed = target.replace('_', ' ')
log("Querying Simbad Target: %s" % target_fixed)
result = Simbad.query_object(target_fixed)
# If none, try again with a dash
if result is None:
result = Simbad.query_object(target_fixed.replace(' ','-'))
if result is None:
# result is None when query fails
raise KeyError('Invalid target name in Simbad Query: %s' %target_fixed)
else:
# print("\t Simbad lookup successful for {:s}!".format(target_fixed))
log("Simbad lookup successful for {:s}!".format(target_fixed))
except KeyError as e:
# Manually load values
log(str(e))
if target in manual_object_coords:
ra, dec = manual_object_coords[target].split(' ')
return ra, dec
else:
# no other option but to raise err
raise
else:
# Assuming the Simbad query worked, load the coordinates:
# Load positions as strings
rahh, ramm, rass = result['RA'][0].split()
decdd, decmm, decss = result['DEC'][0].split()
# Load proper motions as arcsec / year
pmra = result['PMRA'].to(u.arcsec / u.year).value[0]
pmdec = result['PMDEC'].to(u.arcsec / u.year).value[0]
# Convert RA and DEC to whole numbers:
ra = np.double(rahh) + (np.double(ramm) / 60.) + (np.double(rass) / 3600.)
if np.double(decdd) < 0:
dec = np.double(decdd) - (np.double(decmm) / 60.) - (np.double(decss) / 3600.)
else:
dec = np.double(decdd) + (np.double(decmm) / 60.) + (np.double(decss) / 3600.)
# Calculate time difference from J2000:
if isinstance(date, str):
year = int(date[:4])
month = int(date[4:6])
day = int(date[6:8])
s = str(year) + '.' + str(month) + '.' + str(day)
dt = dateutil.parser.parse(s)
else:
dt = date
data_jd = sum(jdcal.gcal2jd(dt.year, dt.month, dt.day))
deltat = (data_jd - 2451544.5) / 365.25
# Calculate total PM:
pmra = np.double(pmra) * deltat / 15. # Conversion from arcsec to sec <- This works for GJ 1214, TRAPPIST-1
pmdec = np.double(pmdec) * deltat
# Correct proper motion:
c_ra = ra + ((pmra) / 3600.)
c_dec = dec + ((pmdec) / 3600.)
# Return RA and DEC:
ra_hr = int(c_ra)
ra_min = int((c_ra - ra_hr) * 60.)
ra_sec = (c_ra - ra_hr - ra_min / 60.0) * 3600.
dec_deg = int(c_dec)
dec_min = int(np.abs(c_dec - dec_deg) * 60.)
dec_sec = (np.abs(c_dec - dec_deg) - dec_min / 60.) * 3600.
return NumToStr(ra_hr) + ':' + NumToStr(ra_min) + ':' + NumToStr(ra_sec, roundto=3), \
NumToStr(dec_deg) + ':' + NumToStr(dec_min) + ':' + NumToStr(dec_sec, roundto=3)
def get_suffix(s):
m = re.match('.+([0-9])[^0-9]*$', s)
idx = m.start(1) + 1
return s[idx:]
def get_trailing_number(s):
m = re.search(r'\d+$', s)
return int(m.group()) if m else None
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,380 | abgibbs/edenAP_detrend | refs/heads/myedits | /cal_data.py | import resource
import os
import random
import re
from datetime import datetime
from gc import collect
from multiprocessing import Pool
import time
import objgraph
import psutil
import numpy as np
from astropy import units as u
from astropy.io import fits
from astropy.modeling import models
from astropy.nddata import CCDData
from ccdproc import trim_image, subtract_overscan, cosmicray_lacosmic
from DirSearch_Functions import search_all_fits, set_mulifits
from constants1 import log, ModHDUList, check_and_rename
todays_day = lambda: datetime.today()
global num_flats, num_bias, num_darks
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split('(\d+)', text)]
def create_generator(iterator):
for obj in iterator:
yield obj
def memory():
# return the memory usage in MB
import psutil
process = psutil.Process(os.getpid())
mem = process.memory_info().rss / float(2 ** 20)
return mem
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
chunks_list = []
for i in range(0, len(l), n):
chunks_list.append(l[i:i + n])
return chunks_list
def replace_or_inf(hdul, constant, zero=False, infi=False):
for hdu in hdul:
if hdu.data is None:
continue
zero_indx = hdu.data == 0
inf_indx = hdu.data == np.inf
if zero:
hdu.data[zero_indx] = constant
if infi:
hdu.data[inf_indx] = constant
def get_gain_rdnoise(calibration, bins=2, filters=None, twi_flat=False):
basename = os.path.basename
bias = []
append_bias = bias.append
flats = []
append_flats = flats.append
medbias_path = None
if not filters:
return None
# edit for calibrations in different folders
all_calibs = []
for i in range(len(calibration)):
for j in search_all_fits(calibration[i]):
all_calibs.append(j)
for filepath in all_calibs:
filename = basename(filepath)
this_imagetype = find_imgtype(filepath)
if not check_comp(filepath, bins):
continue
# avoid all median/mean files, except bias
if 'MEDIAN' in filename or 'MEAN' in filename.upper():
if "BIAS" in filename.upper():
medbias_path = filepath
continue
if "BIAS" == this_imagetype or "ZERO" == this_imagetype:
append_bias(filepath)
continue
elif "FLAT" == this_imagetype:
if check_comp(filepath, filters, twilight_flats=twi_flat):
append_flats(filepath)
continue
# limit search
assert bias and flats, "Either bias or flats files were not detected."
if len(flats) > 100:
flats = random.sample(flats, 100)
if len(bias) > 100:
bias = random.sample(bias, 100)
# files with lowest sigma
least_sigma = 1e6
bias1 = bias2 = None
log('looking for bias files with lowest sigma')
for bias_file in bias:
bias_ = ModHDUList(bias_file)
bias_ = overscan_sub(bias_)
sigma = bias_.std()
if sigma < least_sigma:
log("{:1.2f}\t{:s}".format(sigma, bias_file))
# log(str(sigma) + bias_file)
bias2 = bias1
bias1 = bias_
least_sigma = sigma
log('looking for flats file with lowest sigma')
flat1 = flat2 = None
least_sigma = 1e6
# we need to account for bias noise
if not medbias_path:
medbias_path = cross_median(bias, this_type='Bias')
medbias = ModHDUList(medbias_path)
for flat_file in flats:
flat = ModHDUList(flat_file) - medbias
flat = overscan_sub(flat)
sigma = flat.std()
if sigma < least_sigma:
log("{:1.2f}\t{:s}".format(sigma, flat_file))
flat2 = flat1
flat1 = flat
least_sigma = sigma
collect()
# MAKE SURE THERE ARE TWO FILES:
if flat1 is None or flat2 is None:
flat1 = random.sample(flats, 1)[0]
diff = set(flats) - {flat1}
flat2 = random.sample(list(diff), 1)[0]
flat1 = ModHDUList(flat1)
flat2 = ModHDUList(flat2)
if bias1 is None or bias2 is None:
bias1 = random.sample(bias, 1)[0]
diff = set(bias) - {bias1}
bias2 = random.sample(list(diff), 1)[0]
bias1 = ModHDUList(bias1)
bias2 = ModHDUList(bias2)
# We must collect up all readnoise/gain per amplifier, store them in 'gains' and 'read_noises' in order
# read_noises are read noises in units of ADU
bias_diff = bias1 - bias2
read_noise_calc = lambda i: bias_diff[i].data.std() / np.sqrt(2)
read_noises = np.array([read_noise_calc(i) for i in range(len(bias1)) if bias1[i].data is not None])
log("READ_NOISES:\t{} ADU".format(read_noises))
# Now we get the gain using the flats
normflat = flat2.flatten()
flatcorr = flat1 / normflat
if len(read_noises) == len(flat1) - 1:
gain_calc = lambda i: flatcorr[i].data.mean() / (0.5 * flatcorr[i].data.std() ** 2 - read_noises[i - 1] ** 2)
else:
gain_calc = lambda i: flatcorr[i].data.mean() / (0.5 * flatcorr[i].data.std() ** 2 - read_noises[i] ** 2)
gains = np.array([gain_calc(i) for i in range(len(flat1)) if flat1[i].data is not None])
read_noises_e = gains * read_noises
return gains, read_noises_e
def find_imgtype(filepath_header, filename=None, ext=0, median_opt=False):
"""
Only for calibration files!!. Find the image type (flats,darks,bias)
:param filepath_header: this must be the full address to file, or the header of the file
:param filename: the file name. Only actually used when filepath_header is a header object
:param ext: Optional, extension header to which check info
:param median_opt: if True, this function will allow super calibration files to pass the test
:return: the image type, returns None if image is not a calibration file
"""
mylist = ["FLAT", "ZERO", "BIAS", "DARK", "BLANK"]
bias_labels = ["ZERO", "BLANK"]
# Check object header
option1 = find_val(filepath_header, "object", ext, raise_err=False).upper()
# Check image type header
option2 = find_val(filepath_header, "imagety", ext, raise_err=False).upper()
# Check filename
if isinstance(filepath_header, str):
option3 = filename = os.path.basename(filepath_header).upper()
else:
if filename is None:
option3 = filename = ''
else:
option3 = filename.upper()
# a fourth one... make sure you're not getting an already median file
option4 = True if median_opt else ("MEDIAN" not in filename.upper()) or ("MEAN" not in filename.upper())
mylist2 = [option1, option2, option3]
for key in mylist:
# this loop checks for all options and see if there is a match
truth_func = any([key in key2 for key2 in mylist2])
if truth_func and option4:
if key in bias_labels:
return "BIAS"
return key
else:
return None
def find_val(filepath_header, keyword, ext=0, comment=False, regex=False, typ=None, raise_err=True):
"""
This function takes a keyword and finds the FIRST matching key in the header and returns the its value.
:param filepath_header: filepath for the file, filepath can also be a header
:param keyword: keyword for the key header
:param ext: extension to look for header. Default 0
:param comment: Look for match in keyword comments. Default False
:param regex: Look for match using regular expression; re.search function.
:param typ:Type of object that you want returned. If keyword match, and value type is wrong, its comment is returned
:return: value corresponding the key header. String or Float. Returns None if no match
"""
hrd = get_header(filepath_header, ext=ext)
return_val = None
# Before attempting brute search. Try getting the value directly
try:
if not regex:
return_val = hrd[keyword]
else:
raise KeyError
except KeyError:
for key, val in hrd.items():
if regex:
if re.search(keyword, key):
return_val = val
elif re.search(keyword, hrd.comments[key]):
return_val = val
else:
inKeyword = keyword.upper() in key.upper()
inComment = keyword.upper() in hrd.comments[key].upper()
if inKeyword:
return_val = val
if comment and inComment:
return_val = val
if return_val is not None:
if (typ is not None) and (typ is not type(return_val)):
comment = hrd.comments[key].strip('/').strip()
return_val = comment
break
else:
if raise_err:
raise
else:
return_val = None
return return_val
def find_gain(filepath_header, namps=1):
"""
Assumes gain keys are in primary header are in order for amplifier
:param filepath_header: filepath or header of FITS file
:param namps: number of amplifiers
:return: list with gains per amplifier in the order of amplifier
"""
hrd = get_header(filepath_header)
comms = hrd.comments
# HERE GAIN KEYS WILL BE IN ORDER, SO WE ASSUME GAIN KEYWORDS
# HAVE SOME SORT OF ORDERING
# Create filtering function for invalid gain values
def filter_function(x):
return 'GAIN' in x.upper() and ('VID' not in x.upper() and 'VIDEO' not in comms[x].upper())
# filter and sort gain keywords
gain_keys = sorted(filter(filter_function, hrd.keys()), key=natural_keys)
# now with this order, get gains
gains = [float(hrd[key]) for key in gain_keys if hrd[key]]
if len(gains) == namps:
return gains
else:
return None
def find_rdnoise(filepath_header, namps=1):
"""
Assumes read noise keys are in primary header are in order for amplifier
:param filepath_header: filepath or header of FITS file
:param namps: number of amplifiers
:return: list with read noises per amplifier in the order of amplifier
"""
hrd = get_header(filepath_header)
# filter and sort gain keywords
rdnois_keys = sorted(filter(lambda x: 'RDNOIS' in x.upper(), hrd.keys()), key=natural_keys)
# now with this order, get read noises
rdnoises = [float(hrd[key]) for key in rdnois_keys if hrd[key]]
if len(rdnoises) == namps:
return rdnoises
else:
return None
# input FITS filename path
def get_header(filename_hdu, ext=0):
"""
Get header from filepath of FITS file or HDUList object.
:param filename_hdu: filepath to file (string) or HDUList object
:param ext: extension; default [0]
:return: return the header
"""
if isinstance(filename_hdu, fits.Header):
header = filename_hdu
else:
with fits.open(filename_hdu) as hdul:
header = hdul[ext].header
return header
def get_scan(hdu):
x_range, y_range = hdu.header['BIASSEC'].strip('][').split(',')
xstart, xend = [int(j) for j in x_range.split(':')]
xstart -= 1;
xend -= 1
ystart, yend = [int(j) for j in y_range.split(':')]
ystart -= 1;
yend -= 1
return hdu.data[ystart:yend, xstart:xend]
def overscan_sub(hdul):
exts = len(hdul)
trimmed_hdul = ModHDUList([hdu.copy() for hdu in hdul])
for i in range(exts):
if hdul[i].data is not None:
if 'BIASSEC' in hdul[i].header and 'TRIMSEC' in hdul[i].header:
data = hdul[i].data
ccdData = CCDData(data, unit=u.adu)
poly_model = models.Polynomial1D(3)
oscan_subtracted = subtract_overscan(ccdData, fits_section=hdul[i].header['BIASSEC'], model=poly_model)
trimmed_hdul[i].data = trim_image(oscan_subtracted, fits_section=hdul[i].header['TRIMSEC']).data
return trimmed_hdul
# input list of filepaths to FITS files
def cross_median(images_list, root_dir=None, this_type="", twilight=False):
"""
Function will create a median FITS image retaining all header information and save it, then return
the filepath to median file.
:param images_list: list of filepaths to FITS files, or a list of HDULists, or a list of lists
with data (numpy array) per extension
:param root_dir: Only Required when given image_list is a list of HDULists or numpy data
:param this_type:
:return: filepath to median file
"""
if root_dir is None:
# find root dir for the given files (it will be used to save the mean_file)
root_dir = os.path.dirname(images_list[0])
if twilight:
this_type = "Twilight-" + this_type
median_dir = os.path.join(root_dir, "MEDIAN:" + this_type + ".fits")
# the first image as template (any would work)
template_hdul = ModHDUList(images_list[0])
# add history/comment that it is a median file
if twilight:
template_hdul[0].header.add_history("TWILIGHT FLAT")
template_hdul[0].header.add_history("Pre-processed MEDIAN. {}".format(todays_day()))
# number of extensions
exts = len(template_hdul)
for i in range(exts):
if template_hdul[i].data is None:
continue
data_list = [ModHDUList(image)[i].data.astype(float) for image in images_list]
template_hdul[i].data = np.median(data_list, axis=0)
del data_list
if 'FLAT' in this_type.upper():
# flat tends to have negative values that impact badly...
template_hdul.interpolate()
template_hdul.writeto(median_dir, overwrite=True)
template_hdul.close()
return median_dir
def cross_mean(images_list, root_dir=None, this_type="", twilight=False):
"""
Function will create a mean FITS image retaining all header information and save it, then return
the filepath to mean file.
:param images_list: list of filepaths to FITS files
:param root_dir: Only give this parameter if the given image_list is a list of HDULists
:param this_type:
:return: filepath to mean file
"""
if root_dir is None:
# find root dir for the given files (it will be used to save the mean_file)
root_dir = os.path.dirname(images_list[0])
if twilight:
this_type = "Twilight-" + this_type
mean_dir = os.path.join(root_dir, "MEAN:" + this_type + ".fits")
# use first image as template, any would work
template_hdul = ModHDUList(images_list[0])
# add history/comment that it is a mean file
if twilight:
template_hdul[0].header.add_history("TWILIGHT FLAT")
if "DARK" in this_type.upper():
template_hdul[0].header.add_history("BIAS SUBTRACTED")
if "FLAT" in this_type.upper():
template_hdul[0].header.add_history("BIAS/DARK SUBTRACTED")
template_hdul[0].header.add_history("Pre-processed MEAN. {}".format(todays_day()))
# number of extensions
exts = len(template_hdul)
for i in range(exts):
if template_hdul[i].data is None:
continue
data_list = [ModHDUList(image)[i].data.astype(float) for image in images_list]
template_hdul[i].data = np.mean(data_list, axis=0)
del data_list
if 'FLAT' in this_type.upper():
# flat tends to have negative values that impact badly...
template_hdul.interpolate()
template_hdul.writeto(mean_dir, overwrite=True)
template_hdul.close()
return mean_dir
##function takes path of FITS, bin to test. and outputs the respective boolean if the binning is correct
def check_comp(filepath, comp, twilight_flats=False):
"""
function takes a path of a fits, bin, filter or both and outputs the respective boolean if parameters
of the given file matches the given parameters.
:param filepath: filepath to fits
:param comp: binning and filter (either/both) of filepath to be tested against.
:param twilight_flats: bool indictating whether we are looking for twilight flats
:return: boolean corresponding whether the parameters match
"""
bool_list = []
# check if it is a twilight_flats
a = "twilight" in filepath.lower()
b = "twilight" in find_val(filepath, "object").lower()
z = a or b
# this covers whether what happens if we are looking for twilight_flats
if not twilight_flats and not z:
z = True
elif not twilight_flats and z:
z = False
bool_list.append(z)
# specific behavior if comp is a list
if type(comp) is list or type(comp) is tuple:
# print('CHECKING COMP BIN/Filter: {}\t{}'.format(os.path.basename(filepath), comp))
filters, bins = comp
actual_bin = 1 if 'CASSINI' in filepath else find_val(filepath, "bin")
actual_filter = find_val(filepath, "filter", typ=str)
x = str(bins) in str(actual_bin)
y = str(filters.upper()) in str(actual_filter.upper())
bool_list.append(x)
bool_list.append(y)
# return x and y and z
else:
# else it must be one value, find which one (bin/filter)
try:
# test if comp is an #, therefore testing BIN
float(comp)
actual_bin = 1 if 'CASSINI' in filepath else find_val(filepath, "bin")
mybool = str(comp) in str(actual_bin)
bool_list.append(mybool)
except ValueError:
actual_filter = find_val(filepath, "filter", typ=str)
x = str(comp.upper()) in str(actual_filter.upper())
bool_list.append(x)
# return x and z
return all(bool_list)
def filter_objects(obj_str, bins):
"""
filter out object files using the following attributes:
'FINAL' keyword in filename (an already calibrated file)
object file is a directory
object fits has different binning
"""
filename = os.path.basename(obj_str)
cal_keys = ['flat', 'zero', 'bias', 'dark']
isCal = any([key in filename.lower() for key in cal_keys])
if 'FINAL' in obj_str:
return False
elif not os.path.isfile(obj_str):
return False
elif not check_comp(obj_str, bins):
return False
elif isCal:
return False
else:
return True
def get_comp_info(obj_address):
"""
This function gets the compatibility parameters for the files from just one (random) file in the
object file list. This function will also test if data doesn't have rdnoise/gain info.
:param obj_address: this is the parsed address of the object files.
:return: a list containing the BIN #, and the filter of the object.
"""
if os.path.isfile(obj_address):
file_name = obj_address
else:
file_name = list(search_all_fits(obj_address))[0]
hdul = ModHDUList(file_name)
num_amps = len(hdul)
if num_amps > 1 and hdul[0].data is None:
num_amps -= 1
gains = find_gain(hdul[0].header, num_amps)
rdnoises = find_rdnoise(hdul[0].header, num_amps)
m = True
if gains is None or rdnoises is None:
m = False
bins = 1 if 'CASSINI' in obj_address else find_val(file_name, "bin")
y = [i for i in bins if i.isdigit()][0] if isinstance(bins, str) else bins
x = find_val(file_name, "filter", typ=str)
z = find_val(file_name, "exptime")
comp = [x, y, z, m]
# log(comp)
for i in comp[:-1]:
if not i:
raise Exception("The program could not find the necessary info, this file is not compatible")
return comp
def search_median(calibration, comp, twi_flat=False, recycle=False, median_opt=True, print_out=None):
"""
This function searches the calibration files in the given calibrations folder. It searches for specific
binning and filter calibration files.
:param calibration: address directory of the calibration files necessary for the objects
:param comp: compatibility information list, binning and filter
:param twi_flat: True if you want to apply twilight flats or False to use regular flats (or anything is found)
:param recycle: True if you want to try to find already calculated super calibration files, False otherwise
:param median_opt: True if you want to apply MEDIAN to all central tendency applications, False to use MEAN
:return: A list of the following [ superbias_path, superdark_path, superflats_path, exposureTime_Darks]
"""
filters, bins = comp[:2]
global num_flats, num_bias, num_darks, printout
if print_out is None:
printout = print
else:
printout = print_out.emit
if median_opt:
method = 'median'
central_method = cross_median
else:
method = 'mean'
central_method = cross_mean
basename = os.path.basename
# setup flags to use in calculation process
darks_flag = True
darks_norm = False
flats_calculation = True
bias_calculation = True
darks_calculation = True
# setup lists to use in search for fits
bias = []
darks = []
flats = []
bias_median = False
darks_median = False
flats_median = False
super_cal_found = False
central_flag = "MEDIAN" if median_opt else "MEAN"
log("Searching and filtering calibration files")
# edit for calibrations in different folders
all_calibs = []
for i in range(len(calibration)):
for j in search_all_fits(calibration[i]):
all_calibs.append(j)
for filepath in all_calibs:
filename = basename(filepath)
# Compatibility test is overriden if calibration is done on CASSINI Files. Due to the fact
# that there is no binning keyword in their calibrations headers.
compatibility = '/CASSINI/' in calibration or check_comp(filepath, bins)
if not compatibility:
log("%s Not Compatible" % filename)
continue
log("%s Compatible" % filename)
# capture all median/mean files
if recycle and central_flag in filename:
log("Super Calibration file found while filtering: %s" % filename)
if "FLAT" in filename.upper():
if check_comp(filepath, filters, twilight_flats=twi_flat):
flats_median = filepath
elif "BIAS" in filename.upper():
bias_median = filepath
elif "DARK" in filename.upper():
darks_median = filepath
super_cal_found = flats_median and bias_median and darks_median
continue
this_imagetype = find_imgtype(filepath)
if "DARK" == this_imagetype:
darks.append(filepath)
continue
elif "BIAS" == this_imagetype or "ZERO" == this_imagetype:
bias.append(filepath)
continue
elif "FLAT" == this_imagetype:
if check_comp(filepath, filters, twilight_flats=twi_flat):
flats.append(filepath)
continue
if super_cal_found:
break
num_bias = len(bias)
num_darks = len(darks)
num_flats = len(flats)
if num_bias == 0 or num_flats == 0:
printout("Either no bias or flat files found."
" If you want to find the 'superFlat' or 'superBias' sperately,"
"use cross_median or cross_mean instead of this function. Exiting...")
return None
# no existing darks, then set flag to False
darks_flag = True if super_cal_found else len(darks) > 0
################ Evaluate found median files for re-using#######################
# initialize variables
if recycle:
if darks_flag:
exptime_dark = find_val(darks_median, "exptime") if darks_median else 1
else:
darks_median = None
exptime_dark = 1
# if all super calibration files are available, then return them
if bias_median and flats_median and darks_median is not False:
log('------MEDIANS FILES WERE FOUND------')
log('{}\t{}\t{}'.format(bias_median, darks_median, flats_median))
printout('Found MEDIAN Files. Skipping MEDIAN calibration files calculations...'
" CONTINUING WITH OBJECT FILES")
return bias_median, darks_median, flats_median, exptime_dark
else:
# if at least one is available then re-use them
if bias_median:
log("----------BIAS ALREADY CALCULATED FOUND----------")
bias_calculation = False
if darks_median is not False:
log("----------DARKS ALREADY CALCULATED FOUND----------")
darks_calculation = False
if flats_median:
log("----------FLATS ALREADY CALCULATED FOUND----------")
flats_calculation = False
log("----------CALIBRATIONS FILES ARE NOW REDUCING--------")
printout("Found: {} bias files\n\t{} darks files\n\t{} "
"flats files\nCalibration files are now reducing".format(num_bias, num_darks, num_flats))
# this is done for every flat to make in case exptime was changed at some point
if darks_flag:
# exptime_dark will be the exptime at all times, normalizing is applied if necessary
exptime_flats = [find_val(x, 'exptime') for x in flats]
exptime_darks = {find_val(x, 'exptime') for x in darks}
exptime_dark = find_val(darks[0], "exptime")
if len(exptime_darks) > 1:
printout("Exposure time of darks varies, applying normalizing method")
darks_norm = True
# alphas is a list of normalization constants for the darks during subtraction from super flat
alphas = list(map(lambda x: x / exptime_dark, exptime_flats))
else:
darks_median = None
exptime_dark = 1
alphas = np.zeros(len(flats))
###############################################################################
##########################----BIAS CALCULATIONS ----##########################
if bias_calculation:
bias_median = central_method(bias, this_type="Bias.BIN{}".format(bins))
log("----------BIAS CALCULATION COMPLETED----------")
printout("----------BIAS CALCULATION COMPLETED----------")
##########################----END BIAS CALCULATIONS ----##########################
##################################################################################
###############################################################################
##########################----DARKS CALCULATIONS ----##########################
# open file in memory
bias_med = fits.open(bias_median, memmap=False)
if darks_calculation and darks_flag:
unbiased_darks = []
for dark in darks:
raw_dark = ModHDUList(dark)
if darks_norm:
# applying darks normalization
this_exposure = find_val(dark, "exptime")
if exptime_dark != this_exposure:
# subtract bias
raw_dark = raw_dark - bias_med
# normalize to exptime_dark
constant = exptime_dark / this_exposure
raw_dark = raw_dark * constant
# change EXPTIME to display normalized exptime
raw_dark[0].header['EXPTIME'] = exptime_dark
unbiased_darks.append(raw_dark)
del raw_dark
continue
unbiased_darks.append(raw_dark - bias_med)
# collect() is python's garbage collector (memory release)
collect()
root_darks = os.path.dirname(darks[0])
darks_median = central_method(unbiased_darks, root_dir=root_darks, this_type="Darks.BIN{}".format(bins))
# unload the memory space/ a lot of memory must be used for unbiased_darks
del unbiased_darks
log("----------DARKS CALCULATION COMPLETED----------")
printout("----------DARKS CALCULATION COMPLETED----------")
##########################----END DARKS CALCULATIONS ----##########################
###################################################################################
###############################################################################
##########################----FLATS CALCULATIONS ----##########################
if flats_calculation:
# THIS IS DONE THIS WAY, TO AVOID MISMATCHES OF NORMALIZATION WHEN SUBTRACTING
root_flats = os.path.dirname(flats[0])
fixed_flats_list = []
flat_append = fixed_flats_list.append
fits.conf.use_memmap = False
dark_med = ModHDUList(darks_median) if darks_flag else 1
for i, flat in enumerate(flats):
raw_flat = ModHDUList(flat)
super_bias = bias_med - (dark_med * alphas[i]) if darks_flag else bias_med
# super_bias is either just the bias or the bias minus the dark frame
calibrate_flat = raw_flat - super_bias
calibrate_flat = calibrate_flat.flatten(method=method)
flat_append(calibrate_flat)
del raw_flat
collect()
del bias_med
if darks_flag:
del dark_med
flats_median = central_method(fixed_flats_list, root_dir=root_flats,
this_type="Flats.{}.BIN{}".format(filters, bins), twilight=twi_flat)
fits.conf.use_memmap = True
del fixed_flats_list
log("----------FLATS CALCUATION COMPLETED----------")
printout("----------FLATS CALCUATION COMPLETED----------")
##########################----END FLATS CALCULATIONS ----##########################
###################################################################################
printout("CALIBRATION FILES WERE REDUCED.. CONTINUING WITH OBJECT FILES")
return bias_median, darks_median, flats_median, exptime_dark
def trim_reference_image(image, image2) -> ModHDUList:
"""
Trim HDUList (image) according to reference window of other HDUList (image2)
Uses following range format to trim image:
DATASEC = [X1, Y1, X2, Y2]
:param image: Image to be trimmed
:param image2: Image to use for reference trim section
:returns new_image: New HDUList of trimmed image(s)
"""
# copy the HDUList
new_image = image.copy()
for i in range(len(image)):
if image[i].data is None:
continue
# copy the hdu
new_hdu = image[i].copy()
x1, y1, x2, y2 = eval(image2[i].header['DATASEC'])
new_hdu.data = new_hdu.data[y1:y2, x1:x2]
new_image[i] = new_hdu
return new_image
def prepare_cal(filepath: str, *args) -> (list, tuple):
"""
Prepare super calibration files by trimming if necessary and returning a list of their data attributes instead of
the HDUList objects
:param filepath: filepath to object file to use as reference for trimming
:param args: calibration files
:return: calibration data, one list of image data per given calibration file. returned in the same order.
"""
trimmed = verify_window(filepath, *args)
new_args = []
for i in range(len(trimmed)):
data = []
for j in range(len(trimmed[i])):
data.append(trimmed[i][j].data)
new_args.append(data)
return new_args
def verify_window(filepath, *args) -> (list, tuple):
"""
verify existence of windowed file and trim accordingly
:param filepath: filepath/HDUList to object image which you want to use as reference windowed frame
:param args: ModHDUList/HDUList objects that you want to trim
:return: list of given objects trimmed, same order
"""
window = False
isWindowed = False
if '/CAHA/' in filepath:
# all CAHA images have the DATASEC keyword. We verify if image is windowed by seeing if section
# is not equal to maximum detector window [0, 0, 4096, 4112]
window = find_val(filepath, 'DATASEC')
isWindowed = eval(window) != [0, 0, 4096, 4112]
elif 'CASSINI' in filepath:
# Cassini's window verification is different due to the fact that windows are manually set
# therefore, not every cassini data set will have a 'DATASEC' keyword in the header
window = find_val(filepath, 'DATASEC') if 'DATASEC' in fits.getheader(filepath) else False
isWindowed = True if window else False
if window and isWindowed:
log("FOUND CAHA WINDOW:\n\tFile:{}\n\tWINDOW:{}".format(os.path.basename(filepath), window))
ref_window = ModHDUList(filepath)
new_args = []
for i in range(len(args)):
new_args.append(trim_reference_image(args[i], ref_window))
return new_args
else:
log("IMAGE DOES NOT CONTAIN CAHA-LIKE WINDOW FRAME")
return args
def last_processing2(obj, beta, flats_median, darks_median, bias_median, final_dir):
"""
New processing for images. Now instead of trying to reopen the FITS files in every 'process'
:param obj: file path to object frame
:param beta: normalizing value for dark frames ==> OBJ_EXPTIME / DARK_EXPTIME
:param flats_median: list of data attributes of super flats image ==> [data0, data1, data2]
:param darks_median: list of data attributes of super darks image ==> [data0, data1, data2]
:param bias_median: list of data attributes of super bias image ==> [data0, data1, data2]
:param final_dir:
:return:
"""
filename = obj.split('/')[-1]
this_filename = "Calibrated_" + filename
log("Calibrating object frame: %s" % filename)
mem = psutil.virtual_memory().percent
#if mem > 90.:
# log("Memory usage spiked to {}%. Skipping frame {}".format(mem, filename))
# print('Mem problem')
# return False
save_to_path = os.path.join(final_dir, this_filename)
obj_image = ModHDUList(obj)
exts = len(bias_median)
super_bias = [None if bias_median[i] is None else bias_median[i] + darks_median[i]*beta for i in range(exts)]
final_image = (obj_image - super_bias) / flats_median
# RIGHT BEFORE SAVING, MUST DO COSMIC RAY REMOVAL...
final_image = cosmic_ray(final_image)
# INTERPOLATE RIGHT BEFORE SAVING
# try to get rid of infs/nans/zeroes
final_image.interpolate()
final_image[0].header.add_history(
"Calibrated Image: Bias subtracted, Flattened, and Cosmic Ray Cleansed"
)
final_image.writeto(save_to_path, overwrite=True)
final_image.close()
collect()
def cosmic_ray(hdul):
num_amps = len(hdul)
if num_amps > 1 and hdul[0].data is None:
num_amps -= 1
gains = find_gain(hdul[0].header, num_amps)
rdnoises = find_rdnoise(hdul[0].header, num_amps)
kwargs = {'sigclip': 4.5, 'objlim': 6, 'gain': 2., 'readnoise': 6.,
'sepmed': True, 'cleantype': "idw", "verbose": True}
amp_count = 0
for i in range(len(hdul)):
if hdul[i].data is None:
continue
data = hdul[i].data
kwargs['gain'] = gains[amp_count]
kwargs['readnoise'] = rdnoises[amp_count]
newdata, mask = cosmicray_lacosmic(data, **kwargs)
hdul[i].data = newdata / gains[amp_count]
amp_count += 1
hdul[0].header.add_history('LaPlacian Cosmic Ray removal algorithm applied')
return hdul
def full_reduction(objects, calibrations, twilight_flats=False, split=3, recycle=False, median_opt=True,
print_out=None):
"""
The function fully reduces the object files given the biases/flats/darks.
This processing function works for almost all kinds of scenarios, must keep testing.
:param objects: This can be the address of the object files or a list of selected object files.
:param calibrations: This can be the address of all calibration files or a list of the following:
FLATS MEDIAN FILEPATH, DARKS MEDIAN FILEPATH, BIAS MEDIAN FILEPATH, exposure time of darks value.
:param twilight_flats: Set True, and function will only use twilight flats, else it will use regular ones.
:param split: Default=3, This parameter will split the reduction into a number of subprocess,
possibly speeding up the reduction time
:param recycle: Default=False. If set to True, the program will use a compatible MEDIAN calibration file if found.
:return: The program saves the final images to a new folder in the given objects-address.
"""
global printout
# printout = print_out
log("\n{0}FULL REDUCTION STARTED{0}\n".format("-" * 7))
if print_out is None:
printout = print
else:
printout = print_out.emit
printout("FULL REDUCTION STARTED")
print(type(objects))
############################################################
# parse directory folders
flag_cal_list = False
flag_objs_list = False
if type(calibrations) is str:
print("Calibrations variable is a string... assuming it is path to calibrations folder")
calibrations = calibrations.rstrip(os.sep)
x = os.path.isdir(calibrations)
else:
print("Calibrations variable is a list... assuming it is a list of calibration filepaths")
mybool_list = []
for i in range(len(calibrations)):
calibrations[i] = calibrations[i].rstrip(os.sep)
mybool_list.append(os.path.isfile(calibrations[i]))
x = all(mybool_list)
flag_cal_list = True
if type(objects) is str:
print("Objects variable is a string... assuming it is path to object frames folder")
objects = objects.rstrip(os.sep)
y = os.path.isdir(objects)
else:
print("Objects variable is a list... assuming it is a list of object frames filepaths")
mybool_list = []
for i in range(len(objects)):
objects[i].rstrip(os.sep)
mybool_list.append(os.path.isfile(objects[i]) or os.path.isdir(objects[i]))
y = all(mybool_list)
flag_objs_list = True
if x and y:
log("All Input files exist... continuing processing")
else:
log("At least one of the input files don't exist, quitting.")
raise(ValueError('At least one of the input files don\'t exist, quitting.'))
#############################################################
# get compatibility factors (filter/binning/exposure time obj), extra one to see if read noise exists in header
if flag_objs_list:
comp = get_comp_info(objects[0])
else:
comp = get_comp_info(objects)
filters, bins, exptime_obj, rdnoise_gain = comp
# calculate median/mean calibration fles using search_median
if not flag_cal_list:
# assumes calibrations is a string (directory path)
bias_median, darks_median, flats_median, exptime_dark = search_median(calibrations,
comp,
twi_flat=twilight_flats,
recycle=recycle,
median_opt=median_opt,
print_out=print_out)
elif flag_cal_list:
# else assumes calibrations files are given
bias_median, darks_median, flats_median, exptime_dark = calibrations
else:
log("Calibrations variable is neither a sequence or a string, quitting...")
raise ValueError("Calibrations variable is neither a sequence or a string, quitting...")
# If read noise doesn't exist in at least one header, calculate and put in header files.
if not rdnoise_gain:
printout("Information about ReadNoise or Gain couldn't be found... Assigning New Values")
log("Information about ReadNoise or Gain couldn't be found... Assigning New Values")
# parse calibrations folder/file path
cal_folder = calibrations if type(calibrations) is str else os.path.dirname(calibrations)
mybool_list = [key in cal_folder for key in ['BIAS', 'ZERO', 'FLAT', 'DARK']]
if any(mybool_list):
cal_folder = os.path.dirname(cal_folder)
# setup search for available filters to use
all_filters = []
all_filters_append = all_filters.append
for filepath in search_all_fits(cal_folder):
image_type = find_imgtype(filepath)
if image_type == 'FLAT':
this_filter = find_val(filepath, 'filter')
all_filters_append(this_filter)
avfilters = list(set(all_filters))
log("Found set of filters: {}".format(avfilters))
# choosing flat frame filter to use, preference given to clear/blank/open filter labels
filtr = random.choice(list(avfilters))
for filter_type in ['clear', 'blank', 'open']:
isFound = False
for avfilter in avfilters:
if filter_type.upper() in avfilter.upper():
filtr = avfilter
break
if isFound:
break
log("Applying get_gain_rdnoise function...")
gains, read_noises = get_gain_rdnoise(cal_folder, bins=bins, filters=filtr)
log("get_gain_rdnoise sucessful")
telescop = '/'.join(cal_folder.split('/')[:-1])
printout("Assigninig following gain values:\n{}\n...and readnoise:\n{}".format(gains, read_noises))
for i in range(len(gains)):
value_set = 'GAIN{}'.format(i + 1), gains[i]
comm = 'EDEN Corrected Gain of AMP{} in units of e-/ADU'.format(i + 1)
set_mulifits(telescop, '*.fits', value_set, comment=comm, keep_originals=False)
value_set = 'RDNOISE{}'.format(i + 1), read_noises[i]
comm = 'EDEN Corrected Read Noise of AMP{} in units of e-'.format(i + 1)
set_mulifits(telescop, '*.fits', value_set, comment=comm, keep_originals=False)
log("Values have been assigned!... Continuing Calibration.")
printout("Values have been assigned!... Continuing Calibration.")
# set up object files for calibration
if flag_objs_list:
final_dir = os.path.dirname(objects[0]).replace("raw", 'cal')
if final_dir == os.path.dirname(objects[0]):
final_dir = os.path.join(os.path.dirname(objects[0]), 'calibrated')
list_objects = objects
else:
final_dir = objects.replace("raw", 'cal')
list_objects = list(search_all_fits(objects))
if final_dir == objects:
final_dir = os.path.join(objects, 'calibrated')
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
filtered_objects = [obj_path for obj_path in list_objects if filter_objects(obj_path, bins)]
# beta variable is to be multiplied by the corrected_darks to normalize it in respect to obj files
betas = [find_val(objs, 'exptime') / exptime_dark for objs in filtered_objects]
assert len(betas) == len(filtered_objects), "For some reason betas and objects aren't the same size"
# set up calibration files to pickle through multiprocessing
t0 = time.time()
normflats = ModHDUList(flats_median)
medbias = ModHDUList(bias_median)
# try to get rid of infs/nans/zeroes
normflats.interpolate()
if darks_median:
meddark = ModHDUList(darks_median)
_list = [normflats, meddark, medbias]
normflats, meddark, medbias = prepare_cal(filtered_objects[0], *_list)
else:
_list = [normflats, medbias]
normflats, medbias = prepare_cal(filtered_objects[0], *_list)
meddark = [np.zeros(normflats[i].shape) for i in range(len(normflats))]
lapse = time.time() - t0
log("Preparation right before calibration took %.4f " % lapse)
# create arguments list/iterator
arguments = []
for obj, beta in zip(filtered_objects, betas):
# each argument will have an object frame, normalization constant(Beta), and directory names of
# super calibration files, and final directory for object frames.
arguments.append((obj, beta, normflats, meddark, medbias, final_dir))
# initialize multiprocessing pool in try/except block in order to avoid problems
pool = Pool(processes=split)
try:
t0 = time.time()
pool.starmap(last_processing2, arguments)
lapse = time.time() - t0
log("WHOLE CALIBRATION PROCESS IN ALL FILES TOOK %.4f" % lapse)
except Exception:
log("An error occurred during the multiprocessing, closing pool...")
raise
finally:
# the finally block will ensure the pool is closed no matter what
pool.close()
pool.join()
del arguments[:]
log("FULL DATA REDUCTION COMPLETED")
printout("REDUCTION COMPLETED!")
if __name__ == '__main__':
# from DirSearch_Functions import walklevel
# import fnmatch
import glob
dirs = glob.glob('/Volumes/home/Data/KUIPER/raw/*/*/*')
dirs.pop(3)
cal = '/Volumes/home/Data/KUIPER/Calibrations'
print(dirs)
for one_dir in dirs:
print(" {} STARTED".format(one_dir))
full_reduction(one_dir, cal, recycle=True)
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,381 | abgibbs/edenAP_detrend | refs/heads/myedits | /next_genUI5.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'working_GUI.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(750, 650)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMaximumSize(QtCore.QSize(750, 650))
MainWindow.setMouseTracking(False)
MainWindow.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
MainWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("#transfer_tab{\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #95afc7, stop: 1 #FFFFFF);\n"
"}\n"
"#renaming_tab{\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #95afc7, stop: 1 #FFFFFF);\n"
"}\n"
"#reduction_tab{\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #95afc7, stop: 1 #FFFFFF);\n"
"}\n"
"#eden_ap{\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #95afc7, stop: 1 #FFFFFF);\n"
"}\n"
"#db_tab{\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #95afc7, stop: 1 #FFFFFF);\n"
"}\n"
"QTextEdit{\n"
"font: 12pt \"Serif\";\n"
"border: 2px solid grey;\n"
"border-radius: 5px;\n"
"selection-background-color: darkgray;\n"
"background: lightblue;\n"
"color: rgb(40, 46, 58);\n"
"}\n"
"QLineEdit {\n"
"font: 10pt \"Serif\";\n"
"border: 1px solid grey;\n"
"border-radius: 4px;\n"
"selection-background-color: darkgray;\n"
"}\n"
"QPushButton {\n"
" font: 12pt \"Serif\";\n"
" border: 2px solid #8f8f91;\n"
" border-radius: 6px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #f6f7fa, stop: 1 #dadbde);\n"
" min-width: 80px;\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #dadbde, stop: 1 #f6f7fa);\n"
"}\n"
"\n"
"QPushButton:flat {\n"
" border: none; /* no border for a flat push button */\n"
"}\n"
"\n"
"QPushButton:default {\n"
" border-color: navy; /* make the default button prominent */\n"
"}\n"
"QRadioButton::indicator {\n"
" width: 13px;\n"
" height: 13px;\n"
"}\n"
"QComboBox {\n"
" border: 1px solid gray;\n"
" border-radius: 3px;\n"
" padding: 1px 18px 1px 3px;\n"
" min-width: 6em;\n"
"}\n"
"QComboBox::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 15px;\n"
" border-left-width: 1px;\n"
" border-left-color: darkgray;\n"
" border-left-style: solid; \n"
" border-top-right-radius: 3px; \n"
" border-bottom-right-radius: 3px;\n"
"}\n"
"QComboBox QAbstractItemView {\n"
" border: 2px solid darkgray;\n"
" selection-background-color: lightgray;\n"
"}\n"
"QGroupBox {\n"
" border: 2px solid gray;\n"
" border-radius: 5px;\n"
" margin-top: 1.5ex; /* leave space at the top for the title */\n"
" font: bold 12pt \"Serif\" ; \n"
"}\n"
"\n"
"#db_connection{\n"
" border: 0px ;\n"
" border-radius: 0px;\n"
" margin-top: 0ex; /* leave space at the top for the title */\n"
"}\n"
"\n"
"#db_query{\n"
" border: 0px ;\n"
" border-radius: 0px;\n"
" margin-top: 0ex; /* leave space at the top for the title */\n"
"\n"
"}\n"
"#options_box_rname{\n"
" font: bold 11pt \"Serif\"\n"
"}\n"
"QLabel#app_info_rnamelabel{\n"
" font: bold 10pt \"Serif\";\n"
"}\n"
"QLabel{\n"
"font: 12pt \"Serif\";\n"
"}\n"
"QLabel#top_label{\n"
"font: bold 12pt \"Serif\"\n"
"}\n"
"QRadioButton{\n"
"font: 12pt \"Serif\";\n"
"text-align: center;\n"
"}\n"
"QCheckBox{\n"
"font: 12pt \"Serif\";\n"
"}\n"
"")
MainWindow.setDockNestingEnabled(False)
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setAutoFillBackground(False)
self.centralwidget.setObjectName("centralwidget")
self.tabWdgt = QtWidgets.QTabWidget(self.centralwidget)
self.tabWdgt.setGeometry(QtCore.QRect(0, 0, 750, 650))
font = QtGui.QFont()
font.setPointSize(11)
self.tabWdgt.setFont(font)
self.tabWdgt.setToolTip("")
self.tabWdgt.setAccessibleName("")
self.tabWdgt.setAutoFillBackground(True)
self.tabWdgt.setTabBarAutoHide(False)
self.tabWdgt.setObjectName("tabWdgt")
self.transfer_tab = QtWidgets.QWidget()
self.transfer_tab.setAutoFillBackground(False)
self.transfer_tab.setStyleSheet("")
self.transfer_tab.setObjectName("transfer_tab")
self.transfer_info = QtWidgets.QGroupBox(self.transfer_tab)
self.transfer_info.setGeometry(QtCore.QRect(20, 20, 710, 480))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.transfer_info.setFont(font)
self.transfer_info.setAutoFillBackground(False)
self.transfer_info.setObjectName("transfer_info")
self.output = QtWidgets.QTextEdit(self.transfer_info)
self.output.setGeometry(QtCore.QRect(50, 240, 601, 181))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.output.setFont(font)
self.output.setStyleSheet("")
self.output.setReadOnly(True)
self.output.setObjectName("output")
self.select_dir = QtWidgets.QPushButton(self.transfer_info)
self.select_dir.setGeometry(QtCore.QRect(60, 60, 250, 35))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.select_dir.setFont(font)
self.select_dir.setObjectName("select_dir")
self.MS_dir = QtWidgets.QPushButton(self.transfer_info)
self.MS_dir.setGeometry(QtCore.QRect(60, 140, 250, 35))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.MS_dir.setFont(font)
self.MS_dir.setObjectName("MS_dir")
self.text_dir = QtWidgets.QLineEdit(self.transfer_info)
self.text_dir.setGeometry(QtCore.QRect(70, 100, 231, 31))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.text_dir.setFont(font)
self.text_dir.setStyleSheet("")
self.text_dir.setObjectName("text_dir")
self.Ms_text = QtWidgets.QLineEdit(self.transfer_info)
self.Ms_text.setGeometry(QtCore.QRect(70, 180, 231, 31))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.Ms_text.setFont(font)
self.Ms_text.setObjectName("Ms_text")
self.ovr_transfer = QtWidgets.QPushButton(self.transfer_info)
self.ovr_transfer.setGeometry(QtCore.QRect(430, 60, 191, 41))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.ovr_transfer.setFont(font)
self.ovr_transfer.setObjectName("ovr_transfer")
self.ovr_text = QtWidgets.QLineEdit(self.transfer_info)
self.ovr_text.setGeometry(QtCore.QRect(430, 110, 191, 31))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.ovr_text.setFont(font)
self.ovr_text.setStyleSheet("")
self.ovr_text.setObjectName("ovr_text")
self.quit_button = QtWidgets.QPushButton(self.transfer_tab)
self.quit_button.setGeometry(QtCore.QRect(450, 550, 111, 41))
self.quit_button.setObjectName("quit_button")
self.strt_trnsf = QtWidgets.QPushButton(self.transfer_tab)
self.strt_trnsf.setGeometry(QtCore.QRect(190, 550, 131, 41))
self.strt_trnsf.setObjectName("strt_trnsf")
self.tabWdgt.addTab(self.transfer_tab, "")
self.renaming_tab = QtWidgets.QWidget()
self.renaming_tab.setAccessibleName("")
self.renaming_tab.setObjectName("renaming_tab")
self.rname_box = QtWidgets.QGroupBox(self.renaming_tab)
self.rname_box.setGeometry(QtCore.QRect(0, 60, 791, 601))
self.rname_box.setObjectName("rname_box")
self.table_rname = QtWidgets.QTableWidget(self.rname_box)
self.table_rname.setGeometry(QtCore.QRect(325, 340, 420, 181))
self.table_rname.setAlternatingRowColors(True)
self.table_rname.setTextElideMode(QtCore.Qt.ElideLeft)
self.table_rname.setRowCount(5)
self.table_rname.setColumnCount(4)
self.table_rname.setObjectName("table_rname")
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(10)
item.setFont(font)
self.table_rname.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(10)
item.setFont(font)
self.table_rname.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(10)
item.setFont(font)
self.table_rname.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(10)
item.setFont(font)
self.table_rname.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(0, 2, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(0, 3, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(1, 2, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(1, 3, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(2, 2, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(2, 3, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(3, 2, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(3, 3, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(4, 1, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(4, 2, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(9)
item.setFont(font)
self.table_rname.setItem(4, 3, item)
self.info_value_hrdset = QtWidgets.QLabel(self.rname_box)
self.info_value_hrdset.setGeometry(QtCore.QRect(380, 270, 291, 51))
self.info_value_hrdset.setObjectName("info_value_hrdset")
self.hrd_src_dir = QtWidgets.QLineEdit(self.rname_box)
self.hrd_src_dir.setGeometry(QtCore.QRect(390, 70, 281, 25))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.hrd_src_dir.setFont(font)
self.hrd_src_dir.setObjectName("hrd_src_dir")
self.app_info = QtWidgets.QTextEdit(self.rname_box)
self.app_info.setGeometry(QtCore.QRect(10, 270, 291, 231))
self.app_info.setReadOnly(True)
self.app_info.setObjectName("app_info")
self.modified_dir = QtWidgets.QLineEdit(self.rname_box)
self.modified_dir.setGeometry(QtCore.QRect(390, 150, 281, 25))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.modified_dir.setFont(font)
self.modified_dir.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.modified_dir.setAutoFillBackground(False)
self.modified_dir.setObjectName("modified_dir")
self.rname_src = QtWidgets.QPushButton(self.rname_box)
self.rname_src.setGeometry(QtCore.QRect(420, 30, 250, 35))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.rname_src.setFont(font)
self.rname_src.setObjectName("rname_src")
self.rname_dest = QtWidgets.QPushButton(self.rname_box)
self.rname_dest.setGeometry(QtCore.QRect(420, 110, 250, 35))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.rname_dest.setFont(font)
self.rname_dest.setObjectName("rname_dest")
self.strt_rnaming = QtWidgets.QPushButton(self.rname_box)
self.strt_rnaming.setGeometry(QtCore.QRect(590, 200, 91, 41))
self.strt_rnaming.setObjectName("strt_rnaming")
self.options_box_rname = QtWidgets.QGroupBox(self.rname_box)
self.options_box_rname.setGeometry(QtCore.QRect(20, 30, 221, 191))
self.options_box_rname.setObjectName("options_box_rname")
self.keep_orig_rname = QtWidgets.QCheckBox(self.options_box_rname)
self.keep_orig_rname.setGeometry(QtCore.QRect(10, 100, 171, 23))
self.keep_orig_rname.setObjectName("keep_orig_rname")
self.recursive_rname = QtWidgets.QCheckBox(self.options_box_rname)
self.recursive_rname.setGeometry(QtCore.QRect(10, 30, 201, 23))
self.recursive_rname.setObjectName("recursive_rname")
self.lineEdit_3 = QtWidgets.QLineEdit(self.options_box_rname)
self.lineEdit_3.setEnabled(True)
self.lineEdit_3.setGeometry(QtCore.QRect(30, 60, 141, 25))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.lineEdit_3.setFont(font)
self.lineEdit_3.setObjectName("lineEdit_3")
self.hrd_chng_type = QtWidgets.QRadioButton(self.renaming_tab)
self.hrd_chng_type.setGeometry(QtCore.QRect(200, 30, 171, 23))
self.hrd_chng_type.setChecked(True)
self.hrd_chng_type.setObjectName("hrd_chng_type")
self.rname_type = QtWidgets.QRadioButton(self.renaming_tab)
self.rname_type.setGeometry(QtCore.QRect(450, 30, 112, 23))
self.rname_type.setObjectName("rname_type")
self.top_label = QtWidgets.QLabel(self.renaming_tab)
self.top_label.setGeometry(QtCore.QRect(230, 0, 341, 21))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.top_label.setFont(font)
self.top_label.setObjectName("top_label")
self.app_info_rnamelabel = QtWidgets.QLabel(self.renaming_tab)
self.app_info_rnamelabel.setGeometry(QtCore.QRect(60, 300, 171, 17))
self.app_info_rnamelabel.setObjectName("app_info_rnamelabel")
self.tabWdgt.addTab(self.renaming_tab, "")
self.reduction_tab = QtWidgets.QWidget()
self.reduction_tab.setObjectName("reduction_tab")
self.input_files_reduction = QtWidgets.QGroupBox(self.reduction_tab)
self.input_files_reduction.setGeometry(QtCore.QRect(20, 10, 711, 261))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.input_files_reduction.setFont(font)
self.input_files_reduction.setObjectName("input_files_reduction")
self.select_objs_reduction = QtWidgets.QPushButton(self.input_files_reduction)
self.select_objs_reduction.setGeometry(QtCore.QRect(290, 50, 241, 41))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.select_objs_reduction.setFont(font)
self.select_objs_reduction.setObjectName("select_objs_reduction")
self.select_cal_reduction = QtWidgets.QPushButton(self.input_files_reduction)
self.select_cal_reduction.setGeometry(QtCore.QRect(290, 100, 241, 41))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.select_cal_reduction.setFont(font)
self.select_cal_reduction.setObjectName("select_cal_reduction")
self.cal_comp = QtWidgets.QLineEdit(self.input_files_reduction)
self.cal_comp.setGeometry(QtCore.QRect(330, 150, 181, 21))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.cal_comp.setFont(font)
self.cal_comp.setText("")
self.cal_comp.setObjectName("cal_comp")
self.recycle_cal = QtWidgets.QCheckBox(self.input_files_reduction)
self.recycle_cal.setGeometry(QtCore.QRect(430, 190, 175, 35))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.recycle_cal.setFont(font)
self.recycle_cal.setChecked(True)
self.recycle_cal.setObjectName("recycle_cal")
self.twilight_flats = QtWidgets.QCheckBox(self.input_files_reduction)
self.twilight_flats.setGeometry(QtCore.QRect(280, 190, 161, 35))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.twilight_flats.setFont(font)
self.twilight_flats.setObjectName("twilight_flats")
self.nprocess = QtWidgets.QSpinBox(self.input_files_reduction)
self.nprocess.setGeometry(QtCore.QRect(587, 100, 71, 24))
self.nprocess.setWrapping(True)
self.nprocess.setMinimum(3)
self.nprocess.setMaximum(12)
self.nprocess.setObjectName("nprocess")
self.nprocess_label = QtWidgets.QLabel(self.input_files_reduction)
self.nprocess_label.setGeometry(QtCore.QRect(580, 50, 101, 41))
self.nprocess_label.setObjectName("nprocess_label")
self.type_reduction = QtWidgets.QComboBox(self.reduction_tab)
self.type_reduction.setGeometry(QtCore.QRect(40, 80, 171, 25))
self.type_reduction.setObjectName("type_reduction")
self.type_reduction.addItem("")
self.type_reduction.addItem("")
self.type_reduciton_label = QtWidgets.QLabel(self.reduction_tab)
self.type_reduciton_label.setGeometry(QtCore.QRect(50, 50, 171, 17))
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.type_reduciton_label.setFont(font)
self.type_reduciton_label.setObjectName("type_reduciton_label")
self.start_reduction = QtWidgets.QPushButton(self.reduction_tab)
self.start_reduction.setGeometry(QtCore.QRect(50, 190, 141, 41))
self.start_reduction.setObjectName("start_reduction")
self.mean_reduction = QtWidgets.QRadioButton(self.reduction_tab)
self.mean_reduction.setGeometry(QtCore.QRect(40, 110, 131, 23))
self.mean_reduction.setObjectName("mean_reduction")
self.median_reduction = QtWidgets.QRadioButton(self.reduction_tab)
self.median_reduction.setGeometry(QtCore.QRect(40, 140, 131, 23))
self.median_reduction.setChecked(True)
self.median_reduction.setObjectName("median_reduction")
self.readme_reduction = QtWidgets.QTextEdit(self.reduction_tab)
self.readme_reduction.setGeometry(QtCore.QRect(20, 250, 711, 331))
self.readme_reduction.setReadOnly(True)
self.readme_reduction.setObjectName("readme_reduction")
self.tabWdgt.addTab(self.reduction_tab, "")
self.db_tab = QtWidgets.QWidget()
self.db_tab.setObjectName("db_tab")
self.db_connection = QtWidgets.QGroupBox(self.db_tab)
self.db_connection.setGeometry(QtCore.QRect(10, 0, 741, 211))
self.db_connection.setObjectName("db_connection")
self.establish_connection = QtWidgets.QPushButton(self.db_connection)
self.establish_connection.setGeometry(QtCore.QRect(210, 70, 171, 31))
self.establish_connection.setObjectName("establish_connection")
self.db_label = QtWidgets.QLabel(self.db_connection)
self.db_label.setGeometry(QtCore.QRect(0, 30, 211, 31))
self.db_label.setObjectName("db_label")
self.disconnect = QtWidgets.QPushButton(self.db_connection)
self.disconnect.setGeometry(QtCore.QRect(230, 100, 131, 31))
self.disconnect.setObjectName("disconnect")
self.db_editmode = QtWidgets.QCheckBox(self.db_connection)
self.db_editmode.setEnabled(True)
self.db_editmode.setGeometry(QtCore.QRect(240, 40, 141, 23))
self.db_editmode.setObjectName("db_editmode")
self.db_output = QtWidgets.QTextEdit(self.db_connection)
self.db_output.setGeometry(QtCore.QRect(390, 10, 341, 171))
self.db_output.setReadOnly(True)
self.db_output.setObjectName("db_output")
self.verticalLayoutWidget = QtWidgets.QWidget(self.db_connection)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 60, 191, 101))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.db_form = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.db_form.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.db_form.setContentsMargins(0, 0, 0, 0)
self.db_form.setSpacing(2)
self.db_form.setObjectName("db_form")
self.db_host = QtWidgets.QLineEdit(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.db_host.sizePolicy().hasHeightForWidth())
self.db_host.setSizePolicy(sizePolicy)
self.db_host.setEchoMode(QtWidgets.QLineEdit.Password)
self.db_host.setObjectName("db_host")
self.db_form.addWidget(self.db_host)
self.db_username = QtWidgets.QLineEdit(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.db_username.sizePolicy().hasHeightForWidth())
self.db_username.setSizePolicy(sizePolicy)
self.db_username.setObjectName("db_username")
self.db_form.addWidget(self.db_username)
self.db_password = QtWidgets.QLineEdit(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.db_password.sizePolicy().hasHeightForWidth())
self.db_password.setSizePolicy(sizePolicy)
self.db_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.db_password.setObjectName("db_password")
self.db_form.addWidget(self.db_password)
self.db_query = QtWidgets.QGroupBox(self.db_tab)
self.db_query.setGeometry(QtCore.QRect(0, 160, 751, 461))
self.db_query.setObjectName("db_query")
self.db_table = QtWidgets.QTableView(self.db_query)
self.db_table.setGeometry(QtCore.QRect(0, 150, 745, 313))
self.db_table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectItems)
self.db_table.setSortingEnabled(False)
self.db_table.setObjectName("db_table")
self.formLayoutWidget = QtWidgets.QWidget(self.db_query)
self.formLayoutWidget.setGeometry(QtCore.QRect(370, 40, 221, 91))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.db_query_buttons = QtWidgets.QFormLayout(self.formLayoutWidget)
self.db_query_buttons.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.db_query_buttons.setRowWrapPolicy(QtWidgets.QFormLayout.WrapAllRows)
self.db_query_buttons.setLabelAlignment(QtCore.Qt.AlignCenter)
self.db_query_buttons.setFormAlignment(QtCore.Qt.AlignCenter)
self.db_query_buttons.setContentsMargins(0, 0, 0, 0)
self.db_query_buttons.setHorizontalSpacing(6)
self.db_query_buttons.setVerticalSpacing(1)
self.db_query_buttons.setObjectName("db_query_buttons")
self.db_update = QtWidgets.QPushButton(self.formLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_update.sizePolicy().hasHeightForWidth())
self.db_update.setSizePolicy(sizePolicy)
self.db_update.setObjectName("db_update")
self.db_query_buttons.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.db_update)
self.db_download = QtWidgets.QPushButton(self.formLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_download.sizePolicy().hasHeightForWidth())
self.db_download.setSizePolicy(sizePolicy)
self.db_download.setObjectName("db_download")
self.db_query_buttons.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.db_download)
self.db_sendquery = QtWidgets.QPushButton(self.formLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_sendquery.sizePolicy().hasHeightForWidth())
self.db_sendquery.setSizePolicy(sizePolicy)
self.db_sendquery.setObjectName("db_sendquery")
self.db_query_buttons.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.db_sendquery)
self.formLayoutWidget_2 = QtWidgets.QWidget(self.db_query)
self.formLayoutWidget_2.setGeometry(QtCore.QRect(0, 30, 361, 111))
self.formLayoutWidget_2.setObjectName("formLayoutWidget_2")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget_2)
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setRowWrapPolicy(QtWidgets.QFormLayout.DontWrapRows)
self.formLayout.setFormAlignment(QtCore.Qt.AlignCenter)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setHorizontalSpacing(4)
self.formLayout.setVerticalSpacing(3)
self.formLayout.setObjectName("formLayout")
self.db_databases_label = QtWidgets.QLabel(self.formLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_databases_label.sizePolicy().hasHeightForWidth())
self.db_databases_label.setSizePolicy(sizePolicy)
self.db_databases_label.setObjectName("db_databases_label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.db_databases_label)
self.db_databases = QtWidgets.QComboBox(self.formLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_databases.sizePolicy().hasHeightForWidth())
self.db_databases.setSizePolicy(sizePolicy)
self.db_databases.setObjectName("db_databases")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.db_databases)
self.db_cols_label = QtWidgets.QLabel(self.formLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_cols_label.sizePolicy().hasHeightForWidth())
self.db_cols_label.setSizePolicy(sizePolicy)
self.db_cols_label.setAlignment(QtCore.Qt.AlignCenter)
self.db_cols_label.setObjectName("db_cols_label")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.db_cols_label)
self.db_where_label = QtWidgets.QLabel(self.formLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_where_label.sizePolicy().hasHeightForWidth())
self.db_where_label.setSizePolicy(sizePolicy)
self.db_where_label.setAlignment(QtCore.Qt.AlignCenter)
self.db_where_label.setObjectName("db_where_label")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.db_where_label)
self.db_cols = QtWidgets.QLineEdit(self.formLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_cols.sizePolicy().hasHeightForWidth())
self.db_cols.setSizePolicy(sizePolicy)
self.db_cols.setObjectName("db_cols")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.db_cols)
self.db_where = QtWidgets.QLineEdit(self.formLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_where.sizePolicy().hasHeightForWidth())
self.db_where.setSizePolicy(sizePolicy)
self.db_where.setText("")
self.db_where.setObjectName("db_where")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.db_where)
self.db_tables_label = QtWidgets.QLabel(self.formLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_tables_label.sizePolicy().hasHeightForWidth())
self.db_tables_label.setSizePolicy(sizePolicy)
self.db_tables_label.setAlignment(QtCore.Qt.AlignCenter)
self.db_tables_label.setObjectName("db_tables_label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.db_tables_label)
self.db_tables = QtWidgets.QComboBox(self.formLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_tables.sizePolicy().hasHeightForWidth())
self.db_tables.setSizePolicy(sizePolicy)
self.db_tables.setObjectName("db_tables")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.db_tables)
self.formLayoutWidget_3 = QtWidgets.QWidget(self.db_query)
self.formLayoutWidget_3.setGeometry(QtCore.QRect(620, 50, 101, 71))
self.formLayoutWidget_3.setObjectName("formLayoutWidget_3")
self.db_add_remove_form = QtWidgets.QFormLayout(self.formLayoutWidget_3)
self.db_add_remove_form.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.db_add_remove_form.setRowWrapPolicy(QtWidgets.QFormLayout.WrapAllRows)
self.db_add_remove_form.setLabelAlignment(QtCore.Qt.AlignCenter)
self.db_add_remove_form.setFormAlignment(QtCore.Qt.AlignCenter)
self.db_add_remove_form.setContentsMargins(0, 0, 0, 0)
self.db_add_remove_form.setHorizontalSpacing(6)
self.db_add_remove_form.setVerticalSpacing(1)
self.db_add_remove_form.setObjectName("db_add_remove_form")
self.db_add = QtWidgets.QPushButton(self.formLayoutWidget_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_add.sizePolicy().hasHeightForWidth())
self.db_add.setSizePolicy(sizePolicy)
self.db_add.setObjectName("db_add")
self.db_add_remove_form.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.db_add)
self.db_delete = QtWidgets.QPushButton(self.formLayoutWidget_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.db_delete.sizePolicy().hasHeightForWidth())
self.db_delete.setSizePolicy(sizePolicy)
self.db_delete.setObjectName("db_delete")
self.db_add_remove_form.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.db_delete)
self.tabWdgt.addTab(self.db_tab, "")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabWdgt.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "EDEN Utilities"))
self.transfer_info.setTitle(_translate("MainWindow", "Information for Storage"))
self.output.setToolTip(_translate("MainWindow", "<html><head/><body><p>Messages from the program</p></body></html>"))
self.output.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Serif\'; font-size:12pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:600;\">OUTPUT</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:600;\">Welcome to EDEN Utils.</span><span style=\" font-family:\'Ubuntu\'; font-size:11pt;\"> </span></p></body></html>"))
self.select_dir.setText(_translate("MainWindow", "Select Source Directory"))
self.MS_dir.setText(_translate("MainWindow", "(Optional) Manual Storage directory"))
self.text_dir.setPlaceholderText(_translate("MainWindow", "e.g: /home/user/user_folder"))
self.Ms_text.setPlaceholderText(_translate("MainWindow", "e.g: /home/user/incompatibles"))
self.ovr_transfer.setText(_translate("MainWindow", "Override destination"))
self.quit_button.setText(_translate("MainWindow", "Quit"))
self.strt_trnsf.setText(_translate("MainWindow", "Start Transfer"))
self.tabWdgt.setTabText(self.tabWdgt.indexOf(self.transfer_tab), _translate("MainWindow", "Transfer Data"))
self.rname_box.setTitle(_translate("MainWindow", "Header Value Change"))
item = self.table_rname.horizontalHeaderItem(0)
item.setToolTip(_translate("MainWindow", "FIT key to be changed"))
item = self.table_rname.horizontalHeaderItem(1)
item.setToolTip(_translate("MainWindow", "string to set key value"))
item = self.table_rname.horizontalHeaderItem(2)
item.setToolTip(_translate("MainWindow", "base name of file"))
__sortingEnabled = self.table_rname.isSortingEnabled()
self.table_rname.setSortingEnabled(False)
self.table_rname.setSortingEnabled(__sortingEnabled)
self.info_value_hrdset.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\">Information for modification<br/>(Up to 5 simultaneuos changes)</p></body></html>"))
self.hrd_src_dir.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:9pt;\">Enter the directory holding files to be renamed</span></p></body></html>"))
self.hrd_src_dir.setPlaceholderText(_translate("MainWindow", "e.g. /home/Data/files"))
self.app_info.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Serif\'; font-size:12pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:11pt;\"><br /></p></body></html>"))
self.modified_dir.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:9pt;\">Preferred directory to store modified files</span></p></body></html>"))
self.modified_dir.setPlaceholderText(_translate("MainWindow", "e.g. /home/data/modified_files"))
self.rname_src.setText(_translate("MainWindow", "Select directory of files"))
self.rname_dest.setText(_translate("MainWindow", "(Optional) Select New Destination"))
self.strt_rnaming.setText(_translate("MainWindow", "Start "))
self.options_box_rname.setTitle(_translate("MainWindow", "Options"))
self.keep_orig_rname.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:9pt;\">This will keep the original files and use copy functions instead of move functions. The renamed files will be copied to a subfolder with name:\' renamed files \'(unless specified on the right) </span></p></body></html>"))
self.keep_orig_rname.setText(_translate("MainWindow", "Keep orignal files"))
self.recursive_rname.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:9pt;\">This will allow the program to search the file basenames in subfolders</span></p></body></html>"))
self.recursive_rname.setText(_translate("MainWindow", "Recursive file search"))
self.lineEdit_3.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:10pt;\">Enter the recursive limit for the file search recursion ( 1 = \'No recursion\'). If empty, it will recurse completely.</span></p></body></html>"))
self.lineEdit_3.setPlaceholderText(_translate("MainWindow", "Enter recursive limit"))
self.hrd_chng_type.setText(_translate("MainWindow", " FIT header keys"))
self.rname_type.setText(_translate("MainWindow", " File names"))
self.top_label.setText(_translate("MainWindow", "What are you renaming/changing"))
self.app_info_rnamelabel.setText(_translate("MainWindow", "App Information"))
self.tabWdgt.setTabText(self.tabWdgt.indexOf(self.renaming_tab), _translate("MainWindow", "Mass rename/modify"))
self.input_files_reduction.setTitle(_translate("MainWindow", "Input Files"))
self.select_objs_reduction.setToolTip(_translate("MainWindow", "<html><head/><body><p>Select all object FITS files to be reduced.</p></body></html>"))
self.select_objs_reduction.setText(_translate("MainWindow", "Select Source of Object FITS"))
self.select_cal_reduction.setToolTip(_translate("MainWindow", "<html><head/><body><p>Select all calibrations FITS files to be used for the reduction.</p></body></html>"))
self.select_cal_reduction.setText(_translate("MainWindow", "Select Dir. with Calibration FITS"))
self.cal_comp.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:10pt;\">Format:<br/>{Filter}, {BIN#}</span></p></body></html>"))
self.cal_comp.setPlaceholderText(_translate("MainWindow", "\'Clear, 3 bin\' or \'I, 2\'"))
self.recycle_cal.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:10pt;\">Check this if you want the program to try to find/reuse calibration files.<br/>If you this is unchecked, it will re-compute all files.</span></p></body></html>"))
self.recycle_cal.setText(_translate("MainWindow", "Recycle Calibration files"))
self.twilight_flats.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:10pt;\">Check this if you want the program to use twilight flats.<br/>If this is unchecked, it will filter them out and try to use other flats.</span></p></body></html>"))
self.twilight_flats.setText(_translate("MainWindow", "Use twilight flats"))
self.nprocess.setToolTip(_translate("MainWindow", "Number of processes to split the work"))
self.nprocess.setSpecialValueText(_translate("MainWindow", "Auto"))
self.nprocess_label.setText(_translate("MainWindow", "Number of \n"
"Multiprocesses"))
self.type_reduction.setItemText(0, _translate("MainWindow", "Full Calibration"))
self.type_reduction.setItemText(1, _translate("MainWindow", "Only Calibration files"))
self.type_reduciton_label.setText(_translate("MainWindow", "Type of Calibration"))
self.start_reduction.setText(_translate("MainWindow", "Start Calibration"))
self.mean_reduction.setText(_translate("MainWindow", " Take Mean"))
self.median_reduction.setText(_translate("MainWindow", " Take Median"))
self.readme_reduction.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Serif\'; font-size:12pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:600;\">README</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:11pt; font-style:italic;\">The Full Calibration</span><span style=\" font-family:\'Ubuntu\'; font-size:11pt;\"> type is where selected raw object images and raw calibration files are used. You must select object files, and folder of calibration files. The program will choose the appropriate calibration files. This outputs final object images and mean/median of calibration files.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:11pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:11pt; font-style:italic;\">Only Calibration files</span><span style=\" font-family:\'Ubuntu\'; font-size:11pt;\"> Reduction type is simply to get the median/mean of the calibration files. You must select folder with calibration files, and type in the text box the filter and binning for which to do this. Make sure that the files have the correct filter header info.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:11pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:11pt;\">Checkout the options on the side for different combinations. For the reuse of calibration files, the program may not be able to find already-computed calibration files, so it will re-compute them with overwriting capability.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:11pt;\"><br /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:600;\">OUTPUT:</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:11pt;\"><br /></p></body></html>"))
self.tabWdgt.setTabText(self.tabWdgt.indexOf(self.reduction_tab), _translate("MainWindow", "Data Calibration"))
self.db_connection.setTitle(_translate("MainWindow", "Connection"))
self.establish_connection.setText(_translate("MainWindow", "Establish Connection"))
self.db_label.setToolTip(_translate("MainWindow", "<html><head/><body><p>Only use this if you know what you\'re doing</p></body></html>"))
self.db_label.setText(_translate("MainWindow", "Override MySQL info"))
self.disconnect.setText(_translate("MainWindow", "Disconnect"))
self.db_editmode.setText(_translate("MainWindow", "Edit Mode"))
self.db_output.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Serif\'; font-size:12pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt;\">This is program is a simple interface to the MySQL EDEN Database. Quick Startup:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt;\">-This message box will serve as a notification box.</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt;\">-Default connection is to apaidani_general@distantearths.com</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt;\">-Once connection has been established. The databases/tables drop-down list will fill up with available databases/tables. Once you select one you can make queries and download data as you please.</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt;\">-Please make sure columns are typed written exactly. Also, the conditions must be MySQL compatible; eg: AND,OR,>,<.</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt;\">-Downloaded data is available in the following directory:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt; font-style:italic;\"> ~/eden-utils/csv_data</span></p></body></html>"))
self.db_host.setPlaceholderText(_translate("MainWindow", "Host"))
self.db_username.setPlaceholderText(_translate("MainWindow", "Username"))
self.db_password.setPlaceholderText(_translate("MainWindow", "Password"))
self.db_query.setTitle(_translate("MainWindow", "Query"))
self.db_update.setText(_translate("MainWindow", "Update"))
self.db_download.setText(_translate("MainWindow", "Download as CSV file"))
self.db_sendquery.setText(_translate("MainWindow", "SEND QUERY"))
self.db_databases_label.setText(_translate("MainWindow", "Select Database"))
self.db_cols_label.setText(_translate("MainWindow", "Columns "))
self.db_where_label.setText(_translate("MainWindow", "Conditions"))
self.db_cols.setToolTip(_translate("MainWindow", "<html><head/><body><p>Insert the columns to query from the selected database. Leave blank if you want to select all columns.</p></body></html>"))
self.db_cols.setPlaceholderText(_translate("MainWindow", "Filname,`RA ss`, directory"))
self.db_where.setToolTip(_translate("MainWindow", "<html><head/><body><p>Insert conditions to query filtered data from the selected database.</p></body></html>"))
self.db_where.setPlaceholderText(_translate("MainWindow", "\' ID > 20\' AND `RA ss` > 12"))
self.db_tables_label.setText(_translate("MainWindow", "Tables"))
self.db_add.setText(_translate("MainWindow", "Add Row"))
self.db_delete.setText(_translate("MainWindow", "Delete Row"))
self.tabWdgt.setTabText(self.tabWdgt.indexOf(self.db_tab), _translate("MainWindow", "Database"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,382 | abgibbs/edenAP_detrend | refs/heads/myedits | /eden_calibrate.py | import argparse
import astropy.config as astropy_config
from configparser import ConfigParser
from datetime import datetime
from dateutil import parser
import glob
import numpy as np
import os
import shutil
import sys
import traceback
from astropy import units as u
from astropy.time import Time
from astropy.io import fits
import time
import cal_data
from cal_data import filter_objects, find_val, prepare_cal, last_processing2, get_gain_rdnoise
from DirSearch_Functions import search_all_fits, set_mulifits
from constants import log, ModHDUList, check_and_rename
from multiprocessing import Pool
#
# Script to calibrate eden data using functions in cal_data.py
#
# define constants from config.ini
config = ConfigParser()
config.read('config.ini')
server_destination = config['FOLDER OPTIONS']['server_destination']
def quantity_check(calib_folder):
# function to see if there are at least 5 calib files in a folder
c_num = len(glob.glob(calib_folder+'/*'))
check = True if c_num > 5 else False
return check
def find_closest(telescope, datafolder, calib_type):
#
# Look for the closest day with calibration files to use for creation of master calibs
# Currently limits search to 20 days before and after for flats, as others are probably not valid
# 365 days for bias and darks
#
date = Time(datafolder.strip('/').split('/')[-1])
target = datafolder.strip('/').split('/')[-2]
dcounter = 0
check = False
limit = 20 if calib_type == 'FLAT' else 365
# Begin looking for nearest day
while dcounter < limit and not check:
calib_date = date + dcounter * u.day # days ahead
calib_folder = datafolder.replace('RAW', 'Calibrations').replace('/'+target,'').replace(date.iso.split()[0], calib_date.iso.split()[0]).replace(telescope, telescope+'/'+calib_type)
if os.path.isdir(calib_folder):
check = quantity_check(calib_folder)
if not check: # days behind
calib_date = date - dcounter * u.day
calib_folder = datafolder.replace('RAW', 'Calibrations').replace('/'+target,'').replace(date.iso.split()[0], calib_date.iso.split()[0]).replace(telescope, telescope+'/'+calib_type)
if os.path.isdir(calib_folder):
check = quantity_check(calib_folder)
if check: # after finding closest calibs, check for calibrated
found_date = calib_date.iso.split()[0]
print('\t Nearest '+calib_type+' folder with more than 5 files found on '+found_date)
else:
dcounter += 1
try:
found_folder = datafolder.replace('RAW', 'Calibrations').replace('/'+target,'').replace(date.iso.split()[0], found_date).replace(telescope, telescope+'/'+calib_type)
return found_folder
except:
print('\t NO ' + calib_type + ' folder found with more than 5 files within '+str(limit)+' days... ')
return None
def eden_calibrate(telescope, datafolder, files):
#
# Function to produce calibrated images for photometry using cal_data functions
# Writes in CALIBRATED directory
#
print('\n\t BEGINNING CALIBRATION PROCEDURE')
# check if calibrated files exist, if so return
# TO DO
# Get calibration folders from closest day
flat_folder = find_closest(telescope, datafolder, 'FLAT')
bias_folder = find_closest(telescope, datafolder, 'BIAS')
dark_folder = find_closest(telescope, datafolder, 'DARK')
calibration = []
if flat_folder != None:
calibration.append(flat_folder)
if bias_folder != None:
calibration.append(bias_folder)
if dark_folder != None:
calibration.append(dark_folder)
# get compatibility factors
comp = cal_data.get_comp_info(datafolder)
filters, bins, exptime_obj, rdnoise_gain = comp
#calculate median/mean calibration files using search_median from cal_data
# assumes calibrations is a string (directory path)
bias_median, darks_median, flats_median, exptime_dark = cal_data.search_median(calibration,
comp,
twi_flat=False,
recycle=True,
median_opt=True)
# If read noise doesn't exist in at least one header, calculate and put in header files.
if not rdnoise_gain:
print("Information about ReadNoise or Gain couldn't be found... Assigning New Values")
log("Information about ReadNoise or Gain couldn't be found... Assigning New Values")
# parse calibrations folder/file path
log("Applying get_gain_rdnoise function...")
gains, read_noises = get_gain_rdnoise(calibration, bins=bins, filters=filters)
log("get_gain_rdnoise sucessful")
#telescop = '/'.join(cal_folder.split('/')[:-1])
print("Assigning following gain values:\n{}\n...and readnoise:\n{}".format(gains, read_noises))
for i in range(len(gains)):
value_set = 'GAIN{}'.format(i + 1), gains[i]
comm = 'EDEN Corrected Gain of AMP{} in units of e-/ADU'.format(i + 1)
set_mulifits(datafolder, '*.fits', value_set, comment=comm, keep_originals=False)
for j in range(len(calibration)):
set_mulifits(calibration[j], '*.fits', value_set, comment=comm, keep_originals=False)
value_set = 'RDNOISE{}'.format(i + 1), read_noises[i]
comm = 'EDEN Corrected Read Noise of AMP{} in units of e-'.format(i + 1)
set_mulifits(datafolder, '*.fits', value_set, comment=comm, keep_originals=False)
for j in range(len(calibration)):
set_mulifits(calibration[j], '*.fits', value_set, comment=comm, keep_originals=False)
log("Values have been assigned!... Continuing Calibration.")
print("Values have been assigned!... Continuing Calibration.")
# setup object files for calibration
final_dir = datafolder.replace('RAW', 'CALIBRATED')
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
list_objects = list(search_all_fits(datafolder))
filtered_objects = [obj_path for obj_path in list_objects if filter_objects(obj_path, bins)]
# beta variable is to be multiplied by the corrected_darks to normalize it in respect to obj files
betas = [find_val(objs, 'exptime') / exptime_dark for objs in filtered_objects]
assert len(betas) == len(filtered_objects), "For some reason betas and objects aren't the same size"
# set up calibration files to pickle through multiprocessing
t0 = time.time()
normflats = ModHDUList(flats_median)
medbias = ModHDUList(bias_median)
# try to get rid of infs/nans/zeroes
normflats.interpolate()
if darks_median:
meddark = ModHDUList(darks_median)
_list = [normflats, meddark, medbias]
normflats, meddark, medbias = prepare_cal(filtered_objects[0], *_list)
else:
_list = [normflats, medbias]
normflats, medbias = prepare_cal(filtered_objects[0], *_list)
try: # sometimes normflats contains None for some reason
meddark = [np.zeros(normflats[i].shape) for i in range(len(normflats))]
except:
for i in range(len(normflats)):
try:
if normflats[i] != None:
meddark = [np.zeros(normflats[i].shape)]
except:
meddark = []
for j in range(len(normflats)):
meddark.append(np.zeros(normflats[i].shape))
#[np.zeros(normflats[i].shape),np.zeros(normflats[i].shape),np.zeros(normflats[i].shape)] # this is not going to work robustly
lapse = time.time() - t0
log("Preparation right before calibration took %.4f " % lapse)
# create arguments list/iterator
arguments = []
for obj, beta in zip(filtered_objects, betas):
# each argument will have an object frame, normalization constant(Beta), and directory names of
# super calibration files, and final directory for object frames.
arguments.append((obj, beta, normflats, meddark, medbias, final_dir))
#arguments = [obj,beta,normflats,meddark,medbias,final_dir]
# initialize multiprocessing pool in try/except block in order to avoid problems
split = 3 # number of subprocesses
pool = Pool(processes=split)
try:
t0 = time.time()
pool.starmap(last_processing2, arguments)
#map(last_processing2, arguments)
lapse = time.time() - t0
log("WHOLE CALIBRATION PROCESS IN ALL FILES TOOK %.4f" % lapse)
except Exception:
log("An error occurred during the multiprocessing, closing pool...")
raise
finally:
# the finally block will ensure the pool is closed no matter what
pool.close()
pool.join()
del arguments[:]
log("FULL DATA CALIBRATION COMPLETED")
print("CALIBRATION COMPLETED!")
return
| {"/get_photometry_eden.py": ["/constants.py", "/PhotUtils.py", "/eden_calibrate.py"], "/transit_photometry.py": ["/constants.py"], "/qtsql.py": ["/constants.py"], "/FIT_Class.py": ["/constants.py", "/cal_data.py"], "/automatic_photometry_eden.py": ["/constants.py", "/get_photometry_eden.py", "/PhotUtils.py", "/transit_photometry.py", "/eden_GPDetrend.py"], "/PhotUtils.py": ["/constants.py"], "/eden_calibrate.py": ["/cal_data.py", "/constants.py"]} |
78,386 | jperezmacias/tagRtrvr | refs/heads/master | /main_tagRtrvr.py | ''' Programmed by Jose M. Perez-Macias
May 29th 2014
Version 29.05.2014
Retrieves images and downloads them to a folder with the current timestamp. The images
are scanned from a URI (XML)
Imports the module called tagRtrvr
'''
from tagRtrvr import *
####### MAIN ###########
def main():
### SETTINGS
tag ='img'
url = 'http://www.w3.org/'
# url='http://xhtml.com/en/xhtml/reference/base/'
folder_name = tag +'/' + str(strftime("%Y_%m_%d_%H_%M_%S", gmtime())) + '/'
# I get the xml file
# def get_html(url):
xhtml, cod = get_html(url)
#print html # Testing if if returns a HTML output
# Print the response code (It should be 200)
print 'HTTP response code ' + str(cod)
# We check everything is ok
if (get_html(url)[1] == 200 ):
print "We got a response from the remote server"
else:
print "We are in trouble"
# Retrieve the selected tab
img_list = get_xml_tags(xhtml,tag)
print 'Number of images ' + str(len(img_list))
save_img_array(img_list,url,folder_name)
main()
# Trash
#raw_input("Press enter to continue")
| {"/test_tagRtrvr.py": ["/tagRtrvr.py"]} |
78,387 | jperezmacias/tagRtrvr | refs/heads/master | /tagRtrvr.py | ''' Programmed by Jose M. Perez-Macias
May 29th 2014
Version 29.05.2014
Functions for main_tagRtrvr.py
------------------------------------
get_html(url)
def get_xml_tags(xhtml,tag):
def download_img(img_url):
def save_img(prefix,url,folder):
save_img_array(img_list,url,folder_name):
'''
from xml.dom import minidom
import urllib
import os, os.path
import sys
#import datetime
from time import gmtime, strftime
# This function gets the URL
def get_html(url):
headers = {'User-Agent' : 'Mozilla 5.10'}
#response = urllib.urlopen(url,None,headers)
try:
response = urllib.urlopen(url,None,headers)
except Exception as e:
print 'Could not open URL'
print e
else:
# Response code we expect is 200, check RFC's for others
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
code = response.code
html = response.read()
return html, code
# This function gets the tags in an array
def get_xml_tags(xhtml,tag):
#xmldoc = minidom.parse('binary.xml') # This is for files
try:
xmldoc = minidom.parseString(xhtml)
except Exception as e:
print "Could not retrieve xhtml, are you sure is XHTML?"
print e
else:
itemlist = xmldoc.getElementsByTagName('img')
return itemlist
# This function downloads the image
# It is called from save
def download_img(img_url):
img = urllib.urlopen(img_url)
code = img.code
data = img.read()
return data, code
# This function gets the URI of the image and saves it in a folder
# defined by 'folder'. If this folder does not exist, it creates it.
def save_img(prefix,url,folder):
# I check if the folder exsists and if not I create it
# http://www.java2s.com/Tutorial/Python/0420__Network/RetrievingImagesfromHTMLDocuments.htm
if not os.path.exists(folder):
os.makedirs(folder)
#I download the imgage
data, code = download_img(url)
#print " Download image code " + str(code)
# Save the image
splitPath = url.split('/')
image_name = splitPath.pop()
#print fName
# os.getcwd() # To get the base path
full_path = folder + str(prefix) + '_' + image_name
# Save the imgage in the specificed
f = open(full_path, 'wb')
f.write(data)
f.close()
def save_img_array(img_list,url,folder_name):
for idx, img_tag in enumerate(img_list):
# i is the image_tag, and idx is the index
print idx+1
# Structure of the array is : img_list[0].attributes['name'].value
#print i.attributes['src'].value
#print url[:-1] + i.attributes['src'].value # I join, but also have to remove the last slash from the base url
img_url = url + img_tag.attributes['src'].value[2:];
# We save the images
#def save_img(prefix, url,folder):
save_img(idx,img_url,folder_name)
def download_images_xhtml_url(url):
### SETTINGS
tag ='img'
# url='http://xhtml.com/en/xhtml/reference/base/'
folder_name = tag +'/' + str(strftime("%Y_%m_%d_%H_%M_%S", gmtime())) + '/'
# I get the xml file
# def get_html(url):
xhtml, cod = get_html(url)
#print html # Testing if if returns a HTML output
# Print the response code (It should be 200)
print 'HTTP response code ' + str(cod)
# We check everything is ok
if (get_html(url)[1] == 200 ):
print "We got a response from the remote server"
else:
print "We are in trouble"
# Retrieve the selected tab
img_list = get_xml_tags(xhtml,tag)
print 'Number of images ' + str(len(img_list))
save_img_array(img_list,url,folder_name)
| {"/test_tagRtrvr.py": ["/tagRtrvr.py"]} |
78,388 | jperezmacias/tagRtrvr | refs/heads/master | /test_tagRtrvr.py | '''
Tests for module tagRtrvr.py
Author: Jose M. Perez-Macias
Data: 29.05.2014
'''
import unittest
from tagRtrvr import *
import os, os.path # To count the number of files
#The following is the class in which all functions will be ran by the unittest
class AsignmentTest(unittest.TestCase):
# The function "setUp" will always be ran in order to setup the test environment before all the test have run.
def setUp(self):
'''Verify environment is setup properly''' # Printed if test fails
pass
# The function "tearDown" will always be ran in order to cleanup the test environment after all the tests have run.
def tearDown(self):
'''Verify environment is tore down properly''' # Printed if test tails
pass
def test_get_html(self):
'''Verify that the url is valid''' #Printed if test fails
self.assertEqual(get_html('http://www.w3.org/')[1], 200)
self.assertEqual(get_html('http://www.google.com')[1], 200)
def test_get_html_exc(self):
# assertRaises(excClass, callableObj) protoype
self.assertRaises(TypeError, get_html('http://www.w3.org/'))
def test_get_xml_tags(self):
get_xml_tags(get_html('http://www.w3.org/')[0],'img')
def test_download_img(self):
download_img('http://www.google.com/images/srpr/logo11w.png')
def test_save_img(self):
folder = '_test_output/'
img_url = 'http://www.google.com/images/srpr/logo11w.png'
save_img(1,img_url,folder)
def test_save_img_array(self):
tag ='img'
url = 'http://www.w3.org/'
xhtml, cod = get_html(url)
folder_name = tag +'/' + str(strftime("%Y_%m_%d_%H_%M_%S", gmtime())) + '/'
img_list = get_xml_tags(xhtml,tag)
save_img_array(img_list,url,folder_name)
def test_found_and_download(self):
# Test if the number of images downloaded and expected are the same
# check number of img
tag ='img'
url = 'http://www.w3.org/'
xhtml, cod = get_html(url)
folder_name = tag +'/' + str(strftime("%Y_%m_%d_%H_%M_%S", gmtime())) + '/'
img_list = get_xml_tags(xhtml,tag)
number_expected=len(img_list)
# check number of downloaded images
save_img_array(img_list,url,folder_name)
number_found = len([item for item in os.listdir(folder_name) if os.path.isfile(os.path.join(folder_name, item))])
self.assertEqual(number_expected, number_found)
if __name__ == '__main__':
unittest.main() | {"/test_tagRtrvr.py": ["/tagRtrvr.py"]} |
78,394 | pvt900/capstone | refs/heads/master | /mywartburg_scraper.py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from time import sleep
import fileinput
#Writen by Will Goettl
#Thank you to Justin Schoppe for providing help and code framework
a = []
data = []
j = 1
def Find(browser):
try:
next_page = browser.find_elements_by_xpath('//*[@id="pg0_V_divMain"]/div[3]/div/div/a')
print(next_page[len(next_page)-1].text=="Next page -->")
if next_page[len(next_page)-1].text == "Next page -->":
next_page[len(next_page)-1].click()
sleep(2)
return True
return False
except NoSuchElementException:
return False
except IndexError:
return False
if __name__ == '__main__':
driver = webdriver.Chrome()
driver.get('https://my.wartburg.edu/ICS/')
username = driver.find_element_by_id("userName")
username.send_keys("/////////////////") #enter Username
password = driver.find_element_by_xpath('//*[@id="password"]')
password.send_keys("/////////////////") #enter password
driver.find_element_by_xpath('//*[@id="siteNavBar_btnLogin"]').click()
driver.get('https://my.wartburg.edu/ICS/Academics/Student.jnz?portlet=Registration&screen='
'Advanced+Course+Search&screenType=next')
select = Select(driver.find_element_by_xpath('//*[@id="pg0_V_ddlTerm"]'))
index = 0
#selects what term to begin in. Index 0 begins at the top of the dropdown menue "Term:" in the course finder.
#Subsequently, index 1 would be the second, 2 the third, and so on
select.select_by_index(index)
select = Select(driver.find_element_by_xpath('//*[@id="pg0_V_ddlTerm"]'))
selected = select.all_selected_options[0].text.split()
while selected[:2] != ['2006', 'Winter']:
#fartest data goes back as of 4.2020. Edit to desired parameters. Can contain both year and term or just one element
index += 1
driver.find_element_by_xpath('//*[@id="pg0_V_btnSearch"]').click()
while True:
for i in driver.find_elements_by_xpath('//*[@id="pg0_V_dgCourses"]/tbody/tr'):
a=[]
try:
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[3]').text == "":
a.append("ID NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[3]').text)
#course title
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[4]').text == "":
a.append("TITLE NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[4]').text)
#teacher name
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[5]/span').text == "":
a.append("NAME NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[5]/span').text)
#seats
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[6]').text == "":
a.append("SEATS NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[6]').text)
#status
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[7]').text == "":
a.append("STATUS NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[7]').text)
#meeting time
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[8]/ul/li').text == "":
a.append("MEETING TIME NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[8]/ul/li').text)
#credits
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[9]').text == "":
a.append("CREDITS NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[9]').text)
#begin date
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[10]').text == "":
a.append("BEGIN-DATE NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[10]').text)
#end date
if driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[11]').text == "":
a.append("END-DATE NULL")
else:
a.append(driver.find_element_by_xpath(f'//*[@id="pg0_V_dgCourses"]/tbody/tr[{j}]/td[11]').text)
data.append(a)
j += 2
except:
j = 1
break
if not Find(driver):
j = 1
break
driver.get('https://my.wartburg.edu/ICS/Academics/Student.jnz?portlet=Registration&screen='
'Advanced+Course+Search&screenType=next')
select = Select(driver.find_element_by_xpath('//*[@id="pg0_V_ddlTerm"]'))
select.select_by_index(index)
select = Select(driver.find_element_by_xpath('//*[@id="pg0_V_ddlTerm"]'))
selected = select.all_selected_options[0].text.split()
driver.close()
print(data)
def chunk(lst, n): #class that partitions our lists neatly
print("chunking...")
for i in range(0, len(lst), n):
yield lst[i:i + n]
n = 9
#uberlist = list(chunk(data, n))
with open('historical_data.txt', 'w') as file: #output of scraped data
print("writing file...")
#for j in range(len(a)):
# a[j] = a[j].split('\n')
for listitem in data:
file.write('%s\n' % listitem)
with fileinput.FileInput('historical_data.txt', inplace=True, backup='.bak') as file: #rename output as needed
for line in file:
print(line.replace("['']", ""), end='')
| {"/Demo.py": ["/cf_api.py"]} |
78,395 | pvt900/capstone | refs/heads/master | /cf_api.py | #cf_api.py
#Written for CS460 01 Capstone at Wartburg College
#Instructor/Client - Dr.John Zelle
#By: Will Goettl & Rob Farmer
import mechanicalsoup
import pandas as pd
import os,time
from bs4 import BeautifulSoup as soup
from datetime import datetime
class CourseSearch:
def __init__(self):
'''
Initializes Variables & StatefulBrowser for use within the CourseSearch class
'''
#Sets Statefulself.browser Object to winnet then it it grabs form
self.browser = mechanicalsoup.StatefulBrowser()
self.browser.open("http://winnet.wartburg.edu/coursefinder/")
self.page = self.browser.select_form()
self.option_list = self.browser.get_current_page().findAll('option')
#Form Variables
self.keyword = None
self.dept = None
self.term = None
self.ee_req = None
self.time = None
self.is_cult_div = None
self.ee_req = None
self.write = False
self.pass_fail = False
self.instructor = None
self.course_open = None
#DropDownDictionaries
self.course_list = []
self.changes = []
self.data = []
self.Departments = {'All':'None Selected','AC ':'Accounting', 'ART ':'Art','BI ':'Biology','BA ':'Business Administration','CH ':'Chemistry', 'CS ':'Computer Science',
'CCJ ':'Criminology/Criminal Justice','EC ':'Economics', 'ED ':'Education', 'ES ':'Engineering Science', 'EN ':'English','EI ':'English International Students',
'ENV ':'Environmental Science', 'EXS ':'Exercise Science', 'FL ':'Foreign Language','FR ':'French','SCI ':'General Science',
'GER ':'German','GM ':'Global Multicultural','GR ':'Greek','HE ':'Health','HB ':'Hebrew','HI ':'History','IS ':'Inquiry Studies',
'ID ':'Interdisciplinary','COM ':'Journalism and Communication','LS ':'Liberal Studies','MA ':'Mathematics', 'MU ':'Music',
'NSC ':'Neuroscience','PJ ':'Peace and Justice Studies','PH ':'Philiosophy','PE ':'Physical Education','PHY ':'Physics','PS ':'Political Science',
'NUR ':'Pre-Professional Nursing Prog', 'PSY ':'Psychology', 'PBH ':'Public Health','RE ':'Religion','SCH ':'Scholars Program',
'SW ':'Social Work','SO ':'Sociology','SP ':'Spanish','TH ':'Theatre','VE ':'Venture Ed','WW ':'Wartburg West','WS ':"Womens Studies"}
self.Terms = ['May Term', 'Summer Session', 'Winter Term', 'Fall Term']
self.Times = {'all':'Show all', 'od':'Other Evening', 'oe':'Other Evening', '7:45AM/MWF':'7:45AM MWF', '9:00AM/MWF':'9:00AM MWF', '10:45AM/MWF':'10:45AM MWF',
'12:00PM/MWF':'12:00PM MWF', '1:15PM/MWF':'1:15PM MWF', '2:30PM/MWF':'2:30PM MWF', '3:50PM/MWF':'3:50PM MWF', '7:45AM/THX':'7:45AM TH',
'7:45AM/THX':'7:45AM TH', '9:35AM/THX':'9:35AM TH','11:30AM/THX':'11:30AM TH', '1:00PM/THX':'1:00PM TH', '2:50PM/THX':'2:50PM TH', '3:50PM/THX':'3:50PM TH'}
self.ED = {'all':'Show all EE courses', 'CP':'CP - Capstone', 'FL':'FL- Foreign Language', 'FR':'FR - Faith & Reflection',
'HF':'HF - Humanities/Fine Arts Interconnected','MR':'MR - Mathematical Reasoning','NS':'NS - Natural Science Interconnected',
'SR':'SR - Scientific Reasoning','SS':'SS - Social Science Interconnected'}
self.CD = {'none':'Not selected','C':'C - Cultural Immersion', 'D': 'D - Diversity Across Curriculum Course','all':'Show both'}
self.WI = {'none':'Not selected', 'WI':'Writing Intensive'}
self.PF = {'none':'Not selected', 'PF':'Pass/D/F basis'}
self.Instructor = {'0':'Not Selected'}
def update_historic(self):
'''
Opens file containing historical course data
Takes search results and indexes them into a hashtable
It then compares the courses in winnet to the historic courses and appends
courses that are new and overwrites courses that are pre-existing.
'''
historic = pd.read_csv('course_history.txt', header=None, sep='"', encoding = 'ISO-8859-1',
names=['Course_ID', 'Course_Title', 'Professor_Name','Meeting_Time','Enrollment','Room','Year','Term','Credit'])
winnet = pd.DataFrame(self.data,
columns=['Course_ID', 'Course_Title', 'Professor_Name','Meeting_Time','Enrollment','Room','Year','Term','Credit'])
cols = ['Course_ID','Year','Term']
combined = pd.concat([historic,winnet])
combined = combined.drop_duplicates(subset=cols, keep='last')
#combined.set_index(['Course_ID','Year','Term'], inplace=True)
combined.to_csv(r'course_history.txt',header=None,index=cols,sep='"',mode='w')
winnet.to_csv(r'Changelog.txt',header=None,index=None,sep='"',mode='w')
historic.to_csv(r'historic_legacy.txt',header=None,index=None,sep='"',mode='w')
timestr = time.strftime("%Y-%m-%d")
changelog = "Changelog"+timestr+".txt"
historic_V = 'historic_legacy'+timestr+'.txt'
#os.rename is to append the Current Date as d.dataFrame.to_csv can't take a variable str w/ Datetime
#os.rename will not change filename if the filename already exists
os.rename('Changelog.txt',changelog)
os.rename('historic_legacy.txt',historic_V)
def set_keyword(self,key):
'''
Takes a String to be passed into the Keyword search parameter
'''
self.page.set('ctl00$ContentPlaceHolder1$FormView1$TextBox_keyword', key)
def get_departments(self):
'''
Returns current lists of departments in an array
'''
departs = []
for key,value in self.Departments.items():
departs.append(value)
return departs
def set_department(self,department):
'''
Takes a department as a string and passes corresponding department code to form
'''
dept = [k for k,v in self.Departments.items() if v.casefold() == department.casefold()]
self.page.set("ctl00$ContentPlaceHolder1$FormView1$DropDownList_Department", dept[0])
def get_years(self):
'''
Compares current date to end of May Term.
If date is before june 1st, it will return an array of the past 2 years and future year
if date is past, it will return last year and future 2 years. (Used for Determining Valid Years for Terms)
'''
dt = datetime.now()
current_year = int('{:%Y}'.format(dt))
end_of_term = datetime(current_year, 6, 1)
if dt < end_of_term:
return [current_year-2, current_year-1, current_year, current_year+1]
else:
return [current_year-1, current_year, current_year+1, current_year+2]
def get_terms(self):
'''
Returns array of current terms.
'''
return self.Terms
def set_term(self,year,semester):
'''
Takes year (int) and semester (str)
passes parameters to form as concatencated str
'''
term = str(year)+' '+semester
self.page.set("ctl00$ContentPlaceHolder1$FormView1$DropDownList_Term", term)
def get_times(self):
'''
Returns array of available class times
'''
times = []
for key,value in self.Times.items():
times.append(value)
return times
def set_time(self,period):
'''
Takes time as parameter and passes the proper form value to search form
'''
for k,v in self.Times.items():
if period in v:
self.page.set('ctl00$ContentPlaceHolder1$FormView1$DropDownList_MeetingTime', k)
def get_ed(self):
'''
Returns array of possible essential ed requirements
'''
eds = []
for key,value in self.ED.items():
eds.append(value)
return eds
def set_ed(self,ed):
'''
Takes 'ed' as string and passes to form for Essential Ed.
'''
for k,v in self.Times.items():
if ed in v:
self.page.set('ctl00$ContentPlaceHolder1$FormView1$DropDownList_EssentialEd', k)
def get_cd(self):
'''
Returns list of Cultural-Diversity Requirment Options
'''
cd = []
for key,value in self.CD.items():
cd.append(value)
return cd
def set_cd(self,option):
'''
Takes option as a str, evaluates that self.CD contains the parameter
If so, passes it to form to set Culural Diversity field.
'''
cdo = [k for k,v in self.CD.items() if v.casefold() == option.casefold()]
self.page.set('ctl00$ContentPlaceHolder1$FormView1$DropDownList_CulturalDiversity', cdo[0])
def set_wi(self, wi):
'''
Takes 'wi' as bool and sets fields with boolean.
'''
if wi == True:
self.page.set('ctl00$ContentPlaceHolder1$FormView1$DropDownList_WritingIntensive', 'WI')
else:
self.page.set('ctl00$ContentPlaceHolder1$FormView1$DropDownList_WritingIntensive', 'none')
def is_pf(self,pf):
'''
Takes parameter 'pf' as bool
passes corresponding T/F to Pass/Fail Field
'''
if pf == True:
self.page.set('ctl00$ContentPlaceHolder1$FormView1$DropDownList_PassFail', 'PF')
elif pf == False:
self.page.set('ctl00$ContentPlaceHolder1$FormView1$DropDownList_PassFail','none')
def get_instructors(self):
'''
Determines instructors from Search form HTML,
Creates a dictionary based on their ID:Names
Returns an array all names.
'''
for item in self.option_list:
value = str(item).split('"') #used " as delimeter due to , and ' being used in the course data
if value[1].isdigit():
self.Instructor[value[1]] = item.get_text()
proflist = []
for key,value in self.Instructor.items():
proflist.append(value)
return proflist
def set_instructor(self, name):
'''
Takes parameter name as a str, cross-references instructor dictionary
passes Instructor ID to instructor field.
'''
#transform name to id_num
for k,v in self.Instructor.items():
if name in v:
self.page.set('ctl00$ContentPlaceHolder1$FormView1$DropDownList_Instructor', k)
def couse_open(self, option):
'''
Takes parameter option as bool, passes bool to field
'''
self.page.set('ctl00$ContentPlaceHolder1$FormView1$CheckBox_OpenCourses', option)
def search_form(self):
'''
Identifies form submission button, submits form and
extracts the courses found in the results.
Returns the courses in the form of a Tuple containing Tuples.
'''
self.course_list = []
course = []
counter = 0
self.page.choose_submit('ctl00$ContentPlaceHolder1$FormView1$Button_FindNow')
self.browser.submit_selected()
table_of_data = self.browser.get_current_page().find('table')
data = table_of_data.get_text('td').replace('\n', '').replace('td', '|')
data2 = data.split('|')
for index in data2[18:]:
if counter < 15:
if index == '':
continue
else:
if index == '\xa0' or index == ' ':
index = 'Null'
course.append(index)
counter +=1
else:
if index == '\xa0' or index == ' ':
index = 'Null'
course.append(index)
self.course_list.append(tuple(course))
course = []
counter = 0
for item in self.course_list:
self.data.append([item[5],item[6],item[7],item[8],item[9],item[10],item[11],item[12],item[14]])
return self.data
def display_browser(self):
'''
Displays cached copy of webpage
'''
self.browser.launch_browser()
def save_file(self):
'''
Saves the results of the search to a .csv file
'''
with open('test_data.txt', 'w') as handler: #output of scraped data
print("writing file...")
for course in self.course_list:
handler.write('%s\n' % list(course))
| {"/Demo.py": ["/cf_api.py"]} |
78,396 | pvt900/capstone | refs/heads/master | /Demo.py | from cf_api import CourseSearch as cfapi
def main():
test = cfapi()
#test.set_keyword('This is useless')
#departs = test.get_departments()
#test.set_department('Psychology')
#years = test.get_years()
#terms = test.get_terms()
#test.set_term(2019,'Fall Term')
#times = test.get_times()
#test.set_time()
#essential_ed = test.get_ed()
#test.set_ed()
#cul_div = test.get_cd()
#test.set_cd()
#test.set_wi(True)
#test.is_pf(False)
#instructors = test.get_instructors()
#test.set_instructor('John Zelle')
#test.course_open(True)
a = test.search_form()
b = test.update_historic()
#test.display_browser()
if __name__ == '__main__':
main()
#(ノಠ益ಠ)ノ彡┻━┻ (ಥ╭╮ಥ) ლ(ಠ益ಠლ) Yes ! <3
# (•_•) ( •_•)>⌐■-■ (⌐■_■) <3 | {"/Demo.py": ["/cf_api.py"]} |
78,399 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/serializers.py | import json
from decimal import Decimal
from django.db.models import Sum, F
from rest_framework.serializers import ModelSerializer
from snakeoil_webshop.models import Product, ShoppingCart
class ProductSerializer(ModelSerializer):
class Meta:
model = Product
fields = [
'pk',
'sku',
'name',
'description',
'created',
'updated',
'price',
'num_in_stock'
]
# Extend the Product model with an 'as_json' convenience method.
def product_as_json(self):
"""
Return a JSON repesentation of the Product.
"""
serializer = ProductSerializer(self)
json_string = json.dumps(serializer.data, ensure_ascii=False)
return json_string
setattr(Product, 'as_json', product_as_json)
# Extend the ShoppingCart model with a summary serializer method.
def cart_summary_as_string(self):
"""
Return a short string representing the contents
of the shopping cart.
"""
total_num_items = self.shopping_cart_items.aggregate(
total_num_items=Sum('num_items'),
).get('total_num_items', 0)
total_price = self.shopping_cart_items.aggregate(
total_price=Sum(
F('product__price')*F('num_items')
)
).get('total_price', 0.00)
if total_num_items is None:
total_num_items = 0
if total_price is None:
total_price = Decimal("0.00")
summary_string = f"{total_num_items} items | {total_price} €"
return summary_string
setattr(ShoppingCart, 'summarize', cart_summary_as_string) | {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,400 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/management/commands/add_demo_users.py | import secrets
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User, Group, Permission
class Command(BaseCommand):
PASSWORD_LENGTH = 16
STAFF_USERNAME = "demo.webmaster"
MANAGER_USERNAME = "demo.manager"
CUSTOMER_X_USERNAME = "demo.customer.x"
CUSTOMER_Y_USERNAME = "demo.customer.y"
CUSTOMER_Z_USERNAME = "demo.customer.z"
MANAGERS_GROUP_NAME = "shop.managers"
STAFF_GROUP_NAME = "shop.staff"
SILENT = "silent"
help = (
"Sets up a staff user, a shop manager and three customers with the correct permissions "
"for demo purposes. All accounts will receive new random passwords. The passwords will "
"be printed out in plain text!"
)
def handle(self, *args, **options):
# Create the staff user. They have full access to webshop functions and the Django Admin.
staff_user = self.update_user(self.STAFF_USERNAME, options)
self.give_staff_permissions(staff_user)
# Create the shop manager. They have full webshop feature access, but can't see Django Admin.
# This part also configures the shop manager permission group.
shop_manager = self.update_user(self.MANAGER_USERNAME, options)
manager_group, created = Group.objects.get_or_create(name=self.MANAGERS_GROUP_NAME)
manager_group.permissions.add(
Permission.objects.get(codename="add_product")
)
manager_group.user_set.add(shop_manager)
# Create three ordinary web shop customer accounts.
self.update_user(self.CUSTOMER_X_USERNAME, options)
self.update_user(self.CUSTOMER_Y_USERNAME, options)
self.update_user(self.CUSTOMER_Z_USERNAME, options)
if not options.get(self.SILENT, False):
self.stdout.write(self.style.SUCCESS("Demo login accounts created and/or updated with new passwords."))
def update_user(self, username, options):
"""
Changes the given user's password, or creates the user
if they don't exist.
Returns the updated or created user.
"""
user, created = User.objects.get_or_create(username=username)
password = secrets.token_urlsafe(self.PASSWORD_LENGTH)
if not options.get(self.SILENT, False):
self.stdout.write(self.style.NOTICE(f"{user.username.ljust(16)}: {password}"))
user.set_password(password)
user.save()
return user
@staticmethod
def give_staff_permissions(user):
"""
Promotes the given user to a staff member with full
privileges over the web shop's resources.
"""
# Create a staff user group if it didn't exist yet.
staff_group, created = Group.objects.get_or_create(name=Command.STAFF_GROUP_NAME)
# Give the staff group all permissions over the webshop resources.
nouns = ["product", "shoppingcart", "shoppingcartitem"]
verbs = ["add", "change", "delete", "view"]
for noun in nouns:
for verb in verbs:
permission_codename = f"{verb}_{noun}"
staff_group.permissions.add(
Permission.objects.get(codename=permission_codename)
)
# Add the given user to the staff group.
staff_group.user_set.add(user)
user.is_staff = True
user.save() | {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,401 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/helpers.py | from snakeoil_webshop.models import ShoppingCart
def find_active_cart_for_user(user):
"""
Returns the active ShoppingCart of the given User.
Right now every user has at most just one cart,
so we will also create a cart here if it does not
exist yet.
"""
active_cart, created = ShoppingCart.objects.get_or_create(user=user)
return active_cart
| {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,402 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/migrations/0001_initial.py | # Generated by Django 3.2 on 2021-05-03 17:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.SlugField(unique=True)),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('price', models.DecimalField(decimal_places=2, default=0.0, max_digits=8)),
('num_in_stock', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ShoppingCart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ShoppingCartItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_items', models.IntegerField(default=1)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shopping_cart_items', to='snakeoil_webshop.product')),
('shopping_cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shopping_cart_items', to='snakeoil_webshop.shoppingcart')),
],
options={
'unique_together': {('shopping_cart', 'product')},
},
),
]
| {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,403 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/models.py | import json
from django.db import models
from django.contrib.auth.models import User
class Product(models.Model):
"""
An abstract product definition. To keep things simple, we also include
price and stock count information right in this model.
"""
# Text fields for identifying the product.
sku = models.SlugField(unique=True)
name = models.TextField()
description = models.TextField()
# Timestamps for monitoring the life cycle of the product.
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
# These fields should really be derived from other models,
# but let's just keep things simple and include some per-product
# constants here.
price = models.DecimalField(default=0.00, decimal_places=2, max_digits=8)
num_in_stock = models.IntegerField(default=0)
def __str__(self):
return f"{self.sku} - {self.name}"
def as_json(self):
raise NotImplementedError("Product.as_json must be overridden by importing the serializers.")
class ShoppingCart(models.Model):
"""
A collection of shopped items associated with a particular user.
"""
user = models.OneToOneField(
User,
on_delete=models.CASCADE
)
def __str__(self):
return f"Cart {self.pk} of {self.user.username}"
class ShoppingCartItem(models.Model):
"""
A single entry in the shopping cart. Represents a given number of
physical items corresponding to a given product definition.
"""
class Meta:
# Make sure we don't get two entries of the same product in
# any shopping cart.
unique_together = ['shopping_cart', 'product']
shopping_cart = models.ForeignKey(
ShoppingCart,
on_delete=models.CASCADE,
related_name="shopping_cart_items"
)
product = models.ForeignKey(
Product,
on_delete=models.CASCADE,
related_name="shopping_cart_items"
)
num_items = models.IntegerField(default=1)
def __str__(self):
return f"{self.num_items} x {self.product.sku}" | {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,404 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/forms.py | from django import forms
from django.db.models import Q
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.forms import widgets
from snakeoil_webshop.models import Product
class ProductSearchForm(forms.Form):
"""
A simple form for searching products by SKU (product code) or name.
Allows the user to choose whether to sort the results by price or name.
"""
#############
# CONSTANTS #
#############
# Constants for sorting options.
NAME_ASC = "name"
NAME_DESC = "-name"
PRICE_ASC = "price"
PRICE_DESC = "-price"
SORTING_CHOICES = [
(NAME_ASC, "Name, A-Z"),
(NAME_DESC, "Name, Z-A"),
(PRICE_ASC, "Price, low first"),
(PRICE_DESC, "Price, high first"),
]
#################
# SEARCH FIELDS #
#################
# A substring search field that will hit the SKU or name of a product.
search_string = forms.CharField(
label="Search products",
required=False,
help_text="Search for products by name or product code."
)
# A list of available ways to sort the results.
sort_by = forms.ChoiceField(
label="Sort by",
required=True,
choices=SORTING_CHOICES
)
###########
# METHODS #
###########
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'List products', css_class='btn-success'))
def filter_results(self):
"""
Assuming this form has bound data, return the products
that match the search criteria entered into the form.
"""
if not self.is_valid():
# Return nothing if the query was invalid.
# This should allow the form page to reload
# quickly to show the validation errors.
return Product.objects.none()
results = Product.objects.all()
search_string = self.cleaned_data.get("search_string", "")
if search_string:
q = Q(sku__icontains=search_string) | Q(name__icontains=search_string)
results = results.filter(q)
sort_by = self.cleaned_data.get("sort_by", self.NAME_ASC)
results = results.order_by(sort_by)
return results
def give_all_results(self):
"""
Return all products that are for sale.
Called when a fresh empty form is presented
to the user.
"""
results = Product.objects.all().order_by(self.NAME_ASC)
return results
class ProductCreationForm(forms.ModelForm):
"""
This form collects the details necessary for adding a new
product into the web shop's database.
"""
class Meta:
model = Product
fields = [
'sku',
'name',
'description',
'price',
'num_in_stock',
]
labels = {
"sku": "Product code",
"num_in_stock": "Number of items in stock"
}
widgets = {
"name": forms.TextInput
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Add product', css_class='btn-success'))
def clean_price(self):
"""
Make sure the given price isn't negative.
"""
price = self.cleaned_data.get("price")
if price < 0:
raise forms.ValidationError("The price of a product cannot be negative.")
return price
def clean_num_in_stock(self):
"""
Make sure the given number of items in stock isn't negative.
"""
num_in_stock = self.cleaned_data.get("num_in_stock")
if num_in_stock < 0:
raise forms.ValidationError("The stock count of a product cannot be negative.")
return num_in_stock
def give_all_results(self):
"""
Return all product definitions in the reverse update order,
i.e. the last product modified comes up first.
"""
results = Product.objects.all().order_by('-updated')
return results
class AddToCartForm(forms.Form):
"""
A lightweight form for validating requests to add a product
to the shopping cart.
"""
# The product ID.
pk = forms.IntegerField()
# How many items to add?
num_items = forms.IntegerField(required=False) | {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,405 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil/wsgi.py | """
WSGI config for snakeoil project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import sys
from django.core.wsgi import get_wsgi_application
from dotenv import load_dotenv
load_dotenv()
sys.path.append('/opt/bitnami/projects/snake-oil-webshop')
application = get_wsgi_application()
| {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,406 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/admin.py | from django.contrib import admin
from django.db.models import Sum, F
from snakeoil_webshop.models import Product, ShoppingCart, ShoppingCartItem
class ProductAdmin(admin.ModelAdmin):
fields = [
'sku',
'description',
'created',
'updated',
'price',
'num_in_stock'
]
readonly_fields = ['created', 'updated']
list_display = ['__str__', 'created', 'updated', 'price', 'num_in_stock']
class ShoppingCartItemInline(admin.StackedInline):
model = ShoppingCartItem
extra = 3
class ShoppingCartAdmin(admin.ModelAdmin):
fields = ['user', 'item_count', 'total_price']
readonly_fields = ['item_count', 'total_price']
list_display = ['__str__', 'item_count', 'total_price']
inlines = [ShoppingCartItemInline]
def item_count(self, obj):
total_num_items = obj.shopping_cart_items.aggregate(
total_num_items=Sum('num_items')
).get('total_num_items', 0)
return total_num_items
def total_price(self, obj):
total_price = obj.shopping_cart_items.aggregate(
total_price=Sum(
F('product__price')*F('num_items')
)
).get('total_price', 0.00)
return total_price
admin.site.register(Product, ProductAdmin)
admin.site.register(ShoppingCart, ShoppingCartAdmin) | {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,407 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/management/commands/add_demo_products.py | from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from snakeoil_webshop.models import Product
class Command(BaseCommand):
SILENT = "silent"
SKU001 = "SKU001"
SKU002 = "SKU002"
SKU003 = "SKU003"
SKU004 = "SKU004"
help = "Create a set of dummy products suitable for demonstration purposes. Existing products with overlapping SKUs will be removed."
def handle(self, *args, **options):
"""
Create four different types of snake oil.
First remove any products with conflicting SKUs.
"""
demo_products = [
{
"sku": self.SKU001,
"name": "Clear snake oil",
"description": "A clear liquid potentially possessing some aspects of the the essence of the regenerative properties of snake oil.",
"price": "11.99",
"num_in_stock": "108"
},
{
"sku": self.SKU002,
"name": "Turbid snake oil",
"description": "A rather opaque extract from who knows what part of some venomous snake, likely from a deep jungle somewhere far away.",
"price": "16.99",
"num_in_stock": "273"
},
{
"sku": self.SKU003,
"name": "Thick snake oil",
"description": "A viscous slime with a rather alarming aroma. Might be flammable enough to pose a moderate danger indoors.",
"price": "27.99",
"num_in_stock": "35"
},
{
"sku": self.SKU004,
"name": "Potent snake oil",
"description": "This actively bubbling mixture of unknown biochemical agents will almost certainly cure all disese and illness that it by itself does not cause.",
"price": "33.99",
"num_in_stock": "1899"
}
]
for product_definition in demo_products:
# Delete any old product with a conflicting SKU.
try:
product = Product.objects.get(sku=product_definition["sku"])
product.delete()
except Product.DoesNotExist:
pass
# Create a new demo product.
product = Product.objects.create(**product_definition)
if not options.get(self.SILENT, False):
self.stdout.write(self.style.SUCCESS(f"Demo product created: {product.sku} - {product.name}.")) | {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,408 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/views.py | import json
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import TemplateView, RedirectView
from django.db.models import Sum, F
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from snakeoil_webshop import helpers
from snakeoil_webshop.forms import ProductSearchForm, AddToCartForm, ProductCreationForm
from snakeoil_webshop.models import Product, ShoppingCart, ShoppingCartItem
# Import the whole serializers module to extend Product with the as_json method.
import snakeoil_webshop.serializers # type: ignore
# Constants for identifying which view is getting rendered.
SHOP = "SHOP"
PRODUCT_MANAGEMENT = "PRODUCT_MANAGEMENT"
SHOPPING_CART = "SHOPPING_CART"
class ShopView(LoginRequiredMixin, TemplateView):
template_name = "shop.html"
login_url = '/login/'
redirect_field_name = 'next'
def get_context_data(self, *args, **kwargs):
context = super(ShopView, self).get_context_data(*args, **kwargs)
if self.request.method == 'POST':
# We received a filled product search form from the user.
form = ProductSearchForm(self.request.POST)
products = form.filter_results()
else:
# We're serving the shop page out for the first time with an empty search form.
form = ProductSearchForm()
products = form.give_all_results()
active_shopping_cart = helpers.find_active_cart_for_user(self.request.user)
added_context = {
"form": form,
"products": products,
"num_results": len(products),
"active_view": SHOP,
"shopping_cart_string": active_shopping_cart.summarize()
}
context.update(added_context)
return context
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
class ProductManagementView(ShopView):
template_name = "product_management.html"
login_url = '/login/'
redirect_field_name = 'next'
def get_context_data(self, *args, **kwargs):
context = super(ShopView, self).get_context_data(*args, **kwargs)
new_product = None
if self.request.method == 'POST':
# We received a filled product creation form from the user.
# Remember the settings to make it easier to add similar products.
form = ProductCreationForm(self.request.POST)
if form.is_valid():
new_product = self.create_new_product(form.cleaned_data)
else:
# We're serving the product management page out for the first time
# with an empty product creation form.
form = ProductCreationForm()
products = form.give_all_results()
active_shopping_cart = helpers.find_active_cart_for_user(self.request.user)
added_context = {
"form": form,
"products": products,
"num_results": len(products),
"new_product": new_product,
"active_view": PRODUCT_MANAGEMENT,
"shopping_cart_string": active_shopping_cart.summarize()
}
context.update(added_context)
return context
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def create_new_product(self, cleaned_data):
"""
Create a new product corresponding to the given
cleaned data from a ProductCreationForm.
"""
product = Product.objects.create(**cleaned_data)
return product
class ShoppingCartView(LoginRequiredMixin, TemplateView):
template_name = "shopping_cart.html"
login_url = '/login/'
redirect_field_name = 'next'
def get_context_data(self, *args, **kwargs):
context = super(ShoppingCartView, self).get_context_data(*args, **kwargs)
active_shopping_cart = helpers.find_active_cart_for_user(self.request.user)
items_in_cart = active_shopping_cart.shopping_cart_items.select_related("product")
total_num_items = active_shopping_cart.shopping_cart_items.aggregate(
total_num_items=Sum('num_items'),
).get('total_num_items', 0)
total_price = active_shopping_cart.shopping_cart_items.aggregate(
total_price=Sum(
F('product__price')*F('num_items')
)
).get('total_price', 0.00)
additional_context = {
"items_in_cart": items_in_cart,
"num_items": total_num_items,
"total_price": total_price,
"active_view": SHOPPING_CART,
"shopping_cart_string": active_shopping_cart.summarize()
}
context.update(additional_context)
return context
class AddToCartView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
# The validation checks that we have an integer for the product pk
# and if there is something for the number of items to add, that
# that too is an integer.
validation_form = AddToCartForm(request.data)
if not validation_form.is_valid():
return Response(
"Bad request.",
status=400
)
product_id = validation_form.cleaned_data.get("pk")
num_items_to_add = validation_form.cleaned_data.get("num_items", None)
# Add one item unless told otherwise.
if num_items_to_add is None:
num_items_to_add = 1
# Looks like we have an integer for the product identifier.
# Does it correspond to an existing Product?
try:
product = Product.objects.get(pk=product_id)
except Product.DoesNotExist:
return Response(
f"The indicated product (ID: {product_id}) does not exist.",
status=404
)
# We have a product, let's put it into the acting user's cart.
shopping_cart, cart_created = ShoppingCart.objects.get_or_create(
user=request.user
)
cart_item, item_created = ShoppingCartItem.objects.get_or_create(
shopping_cart=shopping_cart,
product=product
)
# Update the number of carted items corresponding to the added product.
if item_created:
cart_item.num_items = num_items_to_add
else:
cart_item.num_items += num_items_to_add
cart_item.save()
response_data = {
"product": product.as_json(),
"num_items_added": num_items_to_add,
"cart_summary": shopping_cart.summarize()
}
return Response(response_data, status=200)
class ClearCartView(RedirectView):
"""
A view that clears the requesting user's shopping cart
and redirects them back to see the empty cart.
"""
permanent = False
query_string = True
pattern_name = "shopping-cart"
def get_redirect_url(self, *args, **kwargs):
"""
Clear the requesting user's cart before redirecting.
"""
active_shopping_cart = helpers.find_active_cart_for_user(self.request.user)
active_shopping_cart.shopping_cart_items.all().delete()
return super().get_redirect_url(*args, **kwargs) | {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,409 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/migrations/0002_product_name.py | # Generated by Django 3.2 on 2021-05-04 17:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snakeoil_webshop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='name',
field=models.TextField(default='unnamed'),
preserve_default=False,
),
]
| {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,410 | drjustigious/snake-oil-webshop | refs/heads/main | /snakeoil_webshop/tests.py | import decimal
from snakeoil_webshop.models import Product
from snakeoil_webshop import helpers
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.urls import reverse
from snakeoil_webshop.management.commands import add_demo_users, add_demo_products
class PermissionsTestCase(TestCase):
"""
Verify that each user group can see the appropriate resources,
and only those resources.
"""
def setUp(self):
add_demo_users.Command().handle(silent=True)
self.staff_user = User.objects.get(username=add_demo_users.Command.STAFF_USERNAME)
self.manager = User.objects.get(username=add_demo_users.Command.MANAGER_USERNAME)
self.customer = User.objects.get(username=add_demo_users.Command.CUSTOMER_X_USERNAME)
self.client = Client()
def test_everyone_can_see_shop(self):
"""
Everyone can see the main shop page.
"""
self.assert_get_url_status_as_user(
self.customer,
"shop",
200
)
self.assert_get_url_status_as_user(
self.manager,
"shop",
200
)
self.assert_get_url_status_as_user(
self.staff_user,
"shop",
200
)
def test_everyone_can_see_shopping_cart(self):
"""
Everyone can see the shopping cart.
"""
self.assert_get_url_status_as_user(
self.customer,
"shopping-cart",
200
)
self.assert_get_url_status_as_user(
self.manager,
"shopping-cart",
200
)
self.assert_get_url_status_as_user(
self.staff_user,
"shopping-cart",
200
)
def test_customer_cannot_see_product_management(self):
"""
Ordinary customers should not be able to access
the product management page. They will be redirected
to login instead.
"""
self.assert_get_redirected_to_login(
self.customer,
"product-management"
)
self.assert_get_url_status_as_user(
self.manager,
"product-management",
200
)
self.assert_get_url_status_as_user(
self.staff_user,
"product-management",
200
)
def assert_get_url_status_as_user(self, user, url_name, expected_http_status):
"""
Log in as the given user, try to GET the given URL
and assert that the response status was as expected.
"""
self.client.force_login(user)
response = self.client.get(
reverse(url_name)
)
self.assertEqual(response.status_code, expected_http_status)
self.client.logout()
def assert_get_redirected_to_login(self, user, url_name):
self.client.force_login(user)
response = self.client.get(
reverse(url_name)
)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get("Location").startswith("login"))
self.client.logout()
class CartTransactionsTestCase(TestCase):
"""
Verify that a customer can add and remove products
to and from their shopping cart.
"""
def setUp(self):
add_demo_users.Command().handle(silent=True)
add_demo_products.Command().handle(silent=True)
self.customer = User.objects.get(username=add_demo_users.Command.CUSTOMER_X_USERNAME)
self.client = Client()
def test_can_add_single_items_to_cart(self):
"""
Add two individual items to the shopping cart
with an implied item count of 1 each.
"""
products_to_add = [
Product.objects.get(sku=add_demo_products.Command.SKU001),
Product.objects.get(sku=add_demo_products.Command.SKU002)
]
self.client.force_login(self.customer)
# Add first item.
response = self.client.post(
reverse("add-to-cart"),
{'pk': products_to_add[0].pk}
)
self.assertEqual(response.status_code, 200)
# Add second item.
response = self.client.post(
reverse("add-to-cart"),
{'pk': products_to_add[1].pk}
)
self.assertEqual(response.status_code, 200)
# Check what's in the cart.
cart = helpers.find_active_cart_for_user(self.customer)
products_in_cart = [
cart.shopping_cart_items.select_related("product").get(product=products_to_add[0]),
cart.shopping_cart_items.select_related("product").get(product=products_to_add[1])
]
self.assertEqual(products_to_add[0].pk, products_in_cart[0].product.pk)
self.assertEqual(products_to_add[1].pk, products_in_cart[1].product.pk)
self.client.logout()
def test_can_add_multiple_items_of_product_to_cart(self):
"""
Verify that it's possible how many items of a given product
should be dropped into the shopping cart.
"""
product_to_add = Product.objects.get(sku=add_demo_products.Command.SKU001)
num_to_add = 3
self.client.force_login(self.customer)
# Add the items twice.
response = self.client.post(
reverse("add-to-cart"),
{
'pk': product_to_add.pk,
'num_items': num_to_add
}
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse("add-to-cart"),
{
'pk': product_to_add.pk,
'num_items': num_to_add
}
)
self.assertEqual(response.status_code, 200)
# Check what's in the cart.
cart = helpers.find_active_cart_for_user(self.customer)
cart_item = cart.shopping_cart_items.select_related("product").get(product=product_to_add)
self.assertEqual(cart_item.num_items, 2*num_to_add)
self.client.logout()
def test_can_clear_cart(self):
"""
Verify that a customer can clear their shopping cart.
"""
product_to_add = Product.objects.get(sku=add_demo_products.Command.SKU001)
num_to_add = 3
self.client.force_login(self.customer)
# Add items to cart.
response = self.client.post(
reverse("add-to-cart"),
{
'pk': product_to_add.pk,
'num_items': num_to_add
}
)
self.assertEqual(response.status_code, 200)
# Clear the cart. The user should be redirected back to the cart view.
response = self.client.get(
reverse("clear-cart")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("Location"), reverse("shopping-cart"))
# Assert the cart is empty.
cart = helpers.find_active_cart_for_user(self.customer)
cart_items = cart.shopping_cart_items.all() or None
self.assertIsNone(cart_items)
self.client.logout()
class ProductManagementTestCase(TestCase):
"""
Verify a shop manager can add new product definitions.
Also verify an ordinary customer can't.
"""
def setUp(self):
add_demo_users.Command().handle(silent=True)
self.manager = User.objects.get(username=add_demo_users.Command.MANAGER_USERNAME)
self.customer = User.objects.get(username=add_demo_users.Command.CUSTOMER_X_USERNAME)
self.client = Client()
def test_manager_can_add_new_product(self):
"""
Verify that a shop manager can add a new product.
"""
SKU = "sku"
NAME = "name"
DESCRIPTION = "description"
PRICE = "price"
NUM_IN_STOCK = "num_in_stock"
test_product_details = {
SKU: "SKU005",
NAME: "testname",
DESCRIPTION: "test description",
PRICE: decimal.Decimal("9.99"),
NUM_IN_STOCK: 123
}
# Create the new product.
self.client.force_login(self.manager)
response = self.client.post(
reverse("product-management"),
test_product_details
)
# TODO: The standard HTTP status for "created" would be 201.
self.assertEqual(response.status_code, 200)
# Find the new product and check that the details match.
product = Product.objects.get(sku=test_product_details[SKU])
self.assertEqual(product.sku, test_product_details[SKU])
self.assertEqual(product.name, test_product_details[NAME])
self.assertEqual(product.description, test_product_details[DESCRIPTION])
self.assertEqual(product.price, test_product_details[PRICE])
self.assertEqual(product.num_in_stock, test_product_details[NUM_IN_STOCK])
self.client.logout()
def test_customer_cannot_add_products(self):
"""
Verify that an ordinary customer cannot add a product.
"""
SKU = "sku"
NAME = "name"
DESCRIPTION = "description"
PRICE = "price"
NUM_IN_STOCK = "num_in_stock"
test_product_details = {
SKU: "SKU005",
NAME: "testname",
DESCRIPTION: "test description",
PRICE: 9.99,
NUM_IN_STOCK: 123
}
# Try to create the new product. The user should be
# redirected to login.
self.client.force_login(self.customer)
response = self.client.post(
reverse("product-management"),
test_product_details
)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get("Location").startswith("login"))
# Make sure the product did not get created.
try:
product = Product.objects.get(sku=test_product_details[SKU])
self.assertIsNone(product)
except Product.DoesNotExist:
pass
self.client.logout() | {"/snakeoil_webshop/serializers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/helpers.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/forms.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/admin.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/management/commands/add_demo_products.py": ["/snakeoil_webshop/models.py"], "/snakeoil_webshop/views.py": ["/snakeoil_webshop/forms.py", "/snakeoil_webshop/models.py", "/snakeoil_webshop/serializers.py"], "/snakeoil_webshop/tests.py": ["/snakeoil_webshop/models.py"]} |
78,415 | aime20ic/collab_compet | refs/heads/master | /collab_compet/unity_env.py | import numpy as np
from unityagents import UnityEnvironment
class UnityEnv():
"""
Unity environment simulation
"""
def __init__(self, path, name, worker_id=0, train=False, seed=None, verbose=False):
"""
Class constructor / UnityEnv initializer
Args:
path (string): Path to Unity simulation
name (string): Name for environment
worker_id (int): Indicates port to use for communication
train (bool): Unity environment training mode
seed (int): Seed for random number generation
verbose (bool): Verbosity
Returns:
UnityEnv class object
"""
# Initialize environment variables
self.env = None
self.name = name
self.worker_id = worker_id
self.brain_name = None
self.brain = None
self.agents = None
self.action_size = None
self.state_size = None
self.step_count = 0
self.rng = None
self.rng_seed = None
self.verbose = verbose
# Initialize environment status variables
self.env_info = None
self.state = None
self.reward = None
self.done = None
# Create environment
self.env = UnityEnvironment(file_name=path, worker_id=self.worker_id, seed=seed)
# Get default Unity environment "brain"
self.brain_name = self.env.brain_names[0]
self.brain = self.env.brains[self.brain_name]
# Set seed
self.seed(seed)
# Reset environment
self.reset(train)
# Debug
if self.verbose:
print('\nNumber of agents: {}'.format(len(self.agents)))
print('Number of actions: {}'.format(self.action_size))
print('States look like: {}'.format(self.state))
print('States have length: {}\n'.format(self.state_size))
return
def reset(self, train=False):
"""
Reset environment
Args:
train (bool): Use training mode
Returns:
Environment initial observation (vector)
"""
# Reset environment
self.env_info = self.env.reset(train_mode=train)[self.brain_name]
# Set environment variables
self.agents = self.env_info.agents
self.action_size = self.brain.vector_action_space_size
# Set state
if len(self.agents) > 1:
self.state = self.env_info.vector_observations
self.state_size = self.state.shape[1]
else:
self.state = self.env_info.vector_observations[0]
self.state_size = len(self.state)
return self.state
def step(self, action=None):
"""
Perform specified action in environment
Args:
action (int/float or List of int/float): Action to be performed
Returns:
Tuple containing (state, action, reward, next_state, done)
state (vector): Environment observation before action
action (int): Action to be performed
reward (float): Reward for performing specified action
state (vector): Environment observation after action
done (bool): Is simulation complete
"""
# Get current state
state = self.state
# Send action to environment
self.env_info = self.env.step(action)[self.brain_name]
# Get environment status
if len(self.agents) > 1:
next_state = self.env_info.vector_observations
self.reward = self.env_info.rewards
self.done = self.env_info.local_done
else:
next_state = self.env_info.vector_observations[0]
self.reward = self.env_info.rewards[0]
self.done = self.env_info.local_done[0]
# Set current state
self.state = next_state
# Increase step counter
self.step_count += 1
return state, action, self.reward, next_state, self.done
def close(self):
"""
Close environment
Args:
None
Returns:
None
"""
self.env.close()
return
def seed(self, seed=None):
"""
Set seed for random number generation, sampling, & repeatibility
Args:
seed (int): Seed for random number generation
Returns:
None
"""
# Error check
if not isinstance(seed, int) and seed is not None:
raise ValueError('Specified seed must be integer or None')
# Set seed & random number generator
self.rng_seed = seed
self.rng = np.random.RandomState(seed)
return
| {"/collab_compet/run_agent.py": ["/collab_compet/agent.py", "/collab_compet/unity_env.py"]} |
78,416 | aime20ic/collab_compet | refs/heads/master | /collab_compet/run_agent.py | import json
import time
import argparse
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from collections import deque
from collab_compet.agent import MADDPG
from collab_compet.unity_env import UnityEnv
def parse_args():
"""
Parse specified arguments from command line
Args:
None
Returns:
Argparse NameSpace object containing command line arguments
"""
parser = argparse.ArgumentParser(description='Agent hyperparameters')
parser.add_argument('--actor', nargs='*', help='Path to actor models to load')
parser.add_argument('--critic', nargs='*', help='Path to critic models to load')
parser.add_argument('--n-episodes', type=int, default=2000, help='Maximum number of training episodes')
parser.add_argument('--output', type=str, default='./output', help='Directory to save models, logs, & other output')
parser.add_argument('--run-id', type=int, help='Execution run identifier')
parser.add_argument('--sim', type=str, default='Tennis_Windows_x86_64/Tennis.exe',
help='Path to Unity Tennis simulation')
parser.add_argument('--test', action='store_true', help='Test mode, no agent training')
parser.add_argument('--window', type=int, default=100, help='Window size to use for terminal condition check')
parser.add_argument('--seed', type=int, default=0, help='Seed for repeatability')
args = parser.parse_args()
# Convert current time to run ID if none specified
if args.run_id is None:
args.run_id = int(time.time())
# Convert string paths to path objects
args.output = Path(args.output + '/' + str(args.run_id) + '/')
args.actor = [Path(actor) for actor in args.actor] if args.actor else None
args.critic = [Path(critic) for critic in args.critic] if args.critic else None
return args
def write2path(text, path):
"""
Write text to path object
Args:
text (str): Text to log
path (Path): Path object
Returns:
None
"""
# Create path
path.parent.mkdir(parents=True, exist_ok=True)
# Write text to path
if not path.exists():
path.write_text(text)
else:
with path.open('a') as f:
f.write(text)
return
def plot_performance(scores, name, window_size):
"""
Plot summary of DQN performance on environment
Args:
scores (list of float): Score per simulation episode
name (Path): Name for file
window_size (int): Windowed average size
"""
window_avg = []
window_std = []
window = deque(maxlen=window_size)
# Create avg score
avg = [np.mean(scores[:i+1]) for i in range(len(scores))]
for i in range(len(scores)):
window.append(scores[i])
window_avg.append(np.mean(window))
window_std.append(np.std(window))
# Create 95% confidence interval (2*std)
lower_95 = np.array(window_avg) - 2 * np.array(window_std)
upper_95 = np.array(window_avg) + 2 * np.array(window_std)
# Plot scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.scatter(np.arange(len(scores)), scores, color='cyan', label='Scores')
plt.plot(avg, color='blue', label='Average')
plt.plot(window_avg, color='red', label='Windowed Average (n={})'.format(window_size))
plt.fill_between(np.arange(len(window_std)), lower_95, upper_95, color='red', alpha=0.1)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.legend(loc='upper left')
ax.margins(y=.1, x=.1) # Help with scaling
plt.show(block=False)
# Save figure
name.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(name)
plt.close()
return
def eval_agent(agent, env, eval_type, **kwargs):
"""
Train agent
Args:
agent (Agent): DDPG Agent
env (UnityEnv): Unity simulation environment
eval_type (str): Training or testing agent
n_episodes (int): Number of training episodes
tmax (int): Number of trajectories to collect
Returns:
None
"""
eval_options = ['train', 'test'] # evaluation options
# Set score variables
scores = [] # scores from each episode
best_avg_score = -100 # best averaged window score
best_avg_score_std = None # best averaged window score std
score_goal = kwargs.get('goal', 0.5) # goal to get to
window_size = kwargs.get('window', 100) # size for rolling window
scores_window = deque(maxlen=window_size) # last 100 scores
# Error check
if eval_type.lower() not in eval_options:
raise ValueError(
'Invalid eval_type specified. Options are {}'.format(eval_options)
)
# Initialize key word argument variables
max_t = kwargs.get('max_t', 999)
n_episodes = kwargs.get('n_episodes', 2000)
run_id = kwargs.get('run_id', int(time.time()))
output = kwargs.get('output', Path('./output/' + str(run_id) + '/'))
# Create log name
prefix = str(run_id) + '__' + agent.name + '__' + env.name
log = output / (prefix + '__performance.log')
# Start timer
start_time = time.time()
elapsed_time = lambda: time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))
# Iterate through episodes
for episode in range(n_episodes):
# Reset environment
score = np.zeros(len(env.agents))
states = env.reset(train=True if eval_type == 'train' else False)
# Learn for max_t steps
for t in range(max_t):
# Get actions for all agents
actions = agent.act(states)
# Send actions for all agents
_, _, rewards, next_states, dones = env.step(actions)
# Update all agents
if eval_type == 'train':
agent.step(states, actions, rewards, next_states, dones)
# Update next state & score
states = next_states
score += rewards
# Check terminal condition
if any(dones):
break
# Get max reward
score = np.max(score)
# Save most recent scores
scores_window.append(score)
scores.append(score)
# Calculate average & standard deviation of current scores
scores_mean = np.mean(scores_window)
scores_std = np.std(scores_window)
# Print & log episode performance
window_summary = ('\rEpisode {}\tAverage Score: {:.2f} ± {:.2f}'
'\tElapsed Time: {}').format(
episode, scores_mean, scores_std, elapsed_time())
print(window_summary, end="")
# Check terminal condition every window_size episodes
if episode % window_size == 0:
# Save best performing model (weights)
if eval_type=='train' and scores_mean >= best_avg_score:
output.mkdir(parents=True, exist_ok=True)
agent.save(prefix + '__best')
best_avg_score = scores_mean
best_avg_score_std = scores_std
# Print & log performance of last window_size runs
window_summary = ('\rEpisode {}\tAverage Score: {:.2f} ± {:.2f}'
'\tElapsed time: {}').format(episode, scores_mean, scores_std, elapsed_time())
print(window_summary)
write2path(window_summary, log)
# Terminal condition check (early stop / overfitting)
# if eval_type == 'train' and scores_mean < best_avg_score:
# window_summary = ('\rEarly stop at {:d}/{:d} episodes!\rAverage Score: {:.2f} ± {:.2f}'
# '\rBest Average Score: {:.2f}\tElapsed Time: {}').format(
# episode, n_episodes, scores_mean, scores_std, best_avg_score, elapsed_time())
# print(window_summary)
# write2path(window_summary, log)
# break
# Terminal condition check (hit goal)
if eval_type == 'train' and scores_mean - scores_std >= score_goal:
window_summary = ('\nEnvironment solved in {:d}/{:d} episodes!\tAverage Score: {:.2f}±{:.2f}'
'\tElapsed time: {}').format(episode, n_episodes, scores_mean, scores_std, elapsed_time())
print(window_summary)
write2path(window_summary, log)
break
# Save final model (weights)
if eval_type == 'train':
output.mkdir(parents=True, exist_ok=True)
agent.save(prefix)
# Plot training performance
plot_performance(scores, output / (prefix + '__training.png'), window_size)
# Save evaluation parameters
parameters = {
'n_episodes': n_episodes,
'eval_type': eval_type,
'max_t': max_t,
'agent_seed': agent.rng_seed,
'env_seed': env.rng_seed,
'best_avg_score': best_avg_score,
'best_avg_score_std': best_avg_score_std,
'scores_mean': scores_mean,
'scores_std': scores_std
}
with open(output / (prefix + '__parameters.json'), 'w') as file:
json.dump(parameters, file, indent=4, sort_keys=True)
return
def main(args):
"""
Run agent in environment
Args:
args: Argparse NameSpace object containing command line arguments
Returns:
None
"""
# Create env
env = UnityEnv(args.sim, name='Tennis', seed=args.seed)
# Create agent
agent = MADDPG(env.state_size, env.action_size, n_agents=2,
random_seed=args.seed, name='MADDPG', run_id=args.run_id, output=args.output)
# Load agents
if args.actor: agent.load(args.actor, 'actor')
if args.critic: agent.load(args.critic, 'critic')
# Evaluate agent
train_mode = 'test' if args.test else 'train'
eval_agent(agent, env, train_mode, **vars(args))
# Close env
env.close()
return
if __name__ == "__main__":
"""
Execute script
"""
args = parse_args()
main(args)
| {"/collab_compet/run_agent.py": ["/collab_compet/agent.py", "/collab_compet/unity_env.py"]} |
78,417 | aime20ic/collab_compet | refs/heads/master | /collab_compet/agent.py | import time
import json
import numpy as np
from pathlib import Path
from collab_compet.ddpg import DDPG
class MADDPG():
"""
Multi-Agent DDPG
"""
def __init__(self, state_size, action_size, n_agents, random_seed, **kwargs):
"""
Create all agents
Args:
state_size (int): Environment observation size
action_size (int): Environment action size
n_agents (int): Number of agents in environment
random_seed (int): Seed for repeatability
Returns:
None
"""
self.name = kwargs.get('name', 'MADDPG')
self.run_id = kwargs.get('run_id', int(time.time()))
self.output = kwargs.get('output',
Path('./output/' + str(self.run_id) + '/'))
self.rng_seed = random_seed
# Create agents
self.agents = [
DDPG(state_size, action_size, random_seed, name='DDPG-{}'.format(i),
run_id=self.run_id, output=self.output)
for i in range(n_agents)
]
# Reset agents
[agent.reset() for agent in self.agents]
def load(self, paths, ac):
"""
Load
Args:
path (list of Path): Saved model weights to load
ac (str): Actor or critic
Returns:
None
"""
[agent.load(path, ac) for agent, path in zip(self.agents, paths)]
def act(self, observations):
"""
Get actions for all agents
Args:
observations (array): Observation for each agent
Returns:
actions (array of arrays): Continuous actions for each agent
"""
actions = [agent.act(obs) for agent, obs in zip(self.agents, observations)]
return np.array(actions)
def step(self, states, actions, rewards, next_states, dones):
"""
Add memory to experience replay buffer & learn for each agent
Args:
states (array): Observations for each agent for current time step
actions (array): Continuous actions for each agents
rewards (array): Rewards for each agents
next_states (array): Observations for each agent for next time step
dones (array): Environment complete for each agent
Returns:
None
"""
memories = zip(states, actions, rewards, next_states, dones)
[agent.step(*memory) for memory, agent in zip(memories, self.agents)]
def save(self, prefix):
"""
Save all agent models
Args:
prefix (str): Prefix for saving DDPG models
Returns:
None
"""
[agent.save(prefix + '__' + agent.name) for agent in self.agents]
| {"/collab_compet/run_agent.py": ["/collab_compet/agent.py", "/collab_compet/unity_env.py"]} |
78,420 | yzgyyang/wsl-tray | refs/heads/master | /src/app.py | import subprocess
from functools import partial
from . import wsl_operations as wsl
from .systray import SysTrayIcon
from .systray.win32_adapter import WM_LBUTTONDBLCLK, WM_RBUTTONUP
APP = "wsl-tray"
APP_DISPLAY = "WSL Tray"
# Actions
def say_hello(systray):
print("Hello, World!")
def toggle_wsl_state(distro_name, systray):
return wsl.toggle_state(distro_name)
def open_about(systray):
cmd = 'explorer "https://github.com/yzgyyang/wsl-tray"'
subprocess.run(cmd)
def no_action(systray):
pass
def update_menu(systray):
# Distros
info = wsl.get_all_states()
menu_options = tuple([
(f"[{v['state']}] {name}", None, partial(toggle_wsl_state, name))
for name, v in info.items()
])
menu_options += (("-----", None, no_action),)
# Start/Stop all
menu_options += (("Terminate All", None, wsl.shutdown_all),)
menu_options += (("Shutdown All + WSL 2 Backend", None, wsl.shutdown_all),)
menu_options += (("-----", None, no_action),)
# About
menu_options += (("About", None, open_about),)
systray.update(menu_options=menu_options)
NOTIFY_DICT = {
WM_LBUTTONDBLCLK: say_hello,
WM_RBUTTONUP: update_menu,
}
def main(icon="icon.ico"):
systray = SysTrayIcon(icon,
APP_DISPLAY,
menu_options=tuple(),
on_quit=no_action,
notify_dict=NOTIFY_DICT)
systray.start()
| {"/src/app.py": ["/src/systray/win32_adapter.py"]} |
78,421 | yzgyyang/wsl-tray | refs/heads/master | /wsl-tray.py | import os
import sys
from src import app
# https://stackoverflow.com/questions/7674790/bundling-data-files-with-pyinstaller-onefile
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
if __name__ == "__main__":
app.main(icon=resource_path("./icon.ico"))
| {"/src/app.py": ["/src/systray/win32_adapter.py"]} |
78,422 | yzgyyang/wsl-tray | refs/heads/master | /src/wsl_operations.py | import subprocess
from .pyi_patch import subprocess_args
def run_cmd(cmd):
return subprocess.run(cmd, **subprocess_args())
def get_all_states():
cmd = ["wsl", "--list", "--verbose"]
stdout = run_cmd(cmd).stdout.decode("utf-16-le")
lines = stdout.strip().split("\r\n")[1:]
info_list = [x.strip().split()[-3:] for x in lines]
info = {x[0]: {"state": x[1], "version": x[2]} for x in info_list}
return info
def get_state(distro_name):
info = get_all_states()
return info[distro_name]["state"]
def start_distro(distro_name):
cmd = f"wsl --distribution {distro_name}"
return subprocess.Popen(cmd, creationflags=subprocess.CREATE_NEW_CONSOLE)
def terminate_distro(distro_name):
cmd = ["wsl", "--terminate", distro_name]
return run_cmd(cmd)
def toggle_state(distro_name):
state = get_state(distro_name)
if state == "Stopped":
return start_distro(distro_name)
elif state == "Running":
return terminate_distro(distro_name)
return None
def terminate_all(*args):
info = get_all_states()
for distro_name, i in info.items():
if i["state"] != "Stopped":
terminate_distro(distro_name)
def shutdown_all(*args):
cmd = ["wsl", "--shutdown"]
return run_cmd(cmd)
| {"/src/app.py": ["/src/systray/win32_adapter.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.